From c688b7eafc6075bd9ce2dd34025d3688d5d7cbe3 Mon Sep 17 00:00:00 2001 From: qima Date: Mon, 22 Jul 2024 22:56:26 +0800 Subject: [PATCH 001/115] chore(cli): use Quick strategy during chunk upload --- sn_cli/src/bin/subcommands/files.rs | 2 +- sn_client/src/api.rs | 4 ++-- sn_client/src/files.rs | 2 +- sn_client/src/files/download.rs | 2 +- sn_client/src/uploader/mod.rs | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sn_cli/src/bin/subcommands/files.rs b/sn_cli/src/bin/subcommands/files.rs index 84669b08d5..7d3163ad85 100644 --- a/sn_cli/src/bin/subcommands/files.rs +++ b/sn_cli/src/bin/subcommands/files.rs @@ -54,7 +54,7 @@ pub enum FilesCmds { /// /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Balanced, short = 'r', help = "Sets the retry strategy on upload failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] + #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on upload failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] retry_strategy: RetryStrategy, }, Download { diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index 05ec3e67ff..e9531862ce 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -636,7 +636,7 @@ impl Client { /// * 'payee' - [PeerId] /// * 'payment' - [Payment] /// * 'verify_store' - Boolean - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Balanced by default + /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default /// pub(super) async fn store_chunk( &self, @@ -648,7 +648,7 @@ impl Client { ) -> Result<()> { info!("Store chunk: {:?}", chunk.address()); let key = chunk.network_address().to_record_key(); - let retry_strategy = Some(retry_strategy.unwrap_or(RetryStrategy::Balanced)); + let retry_strategy = Some(retry_strategy.unwrap_or(RetryStrategy::Quick)); let record_kind = RecordKind::ChunkWithPayment; let record = Record { diff --git a/sn_client/src/files.rs b/sn_client/src/files.rs index 36d743d319..8643b71961 100644 --- a/sn_client/src/files.rs +++ b/sn_client/src/files.rs @@ -114,7 +114,7 @@ impl FilesApi { /// Directly writes Chunks to the network in the /// form of immutable self encrypted chunks. /// - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Balanced by default + /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default pub async fn get_local_payment_and_upload_chunk( &self, chunk: Chunk, diff --git a/sn_client/src/files/download.rs b/sn_client/src/files/download.rs index 71ce616afd..a1f8de5f09 100644 --- a/sn_client/src/files/download.rs +++ b/sn_client/src/files/download.rs @@ -91,7 +91,7 @@ impl FilesDownload { /// Sets the RetryStrategy to increase the re-try on failure attempts. /// - /// By default, this option is set to RetryStrategy::Balanced + /// By default, this option is set to RetryStrategy::Quick pub fn set_retry_strategy(mut self, retry_strategy: RetryStrategy) -> Self { self.retry_strategy = retry_strategy; self diff --git a/sn_client/src/uploader/mod.rs b/sn_client/src/uploader/mod.rs index 2288c2a10b..99c6865b39 100644 --- a/sn_client/src/uploader/mod.rs +++ b/sn_client/src/uploader/mod.rs @@ -201,7 +201,7 @@ impl Uploader { /// This does not affect the retries during the Payment task. Use `set_max_repayments_for_failed_data` to /// configure the re-payment attempts. /// - /// By default, this option is set to RetryStrategy::Balanced + /// By default, this option is set to RetryStrategy::Quick pub fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { self.inner .as_mut() From 811ad29f21a4340e830eb7e2a47b13ff8970b97c Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 19 Jul 2024 20:18:39 +0530 Subject: [PATCH 002/115] refactor(network): reserver trace log level for tracking event statistics --- sn_cli/src/bin/main.rs | 2 +- sn_networking/src/cmd.rs | 24 ++++++------- sn_networking/src/driver.rs | 4 +-- sn_networking/src/event/kad.rs | 30 ++++++++-------- sn_networking/src/event/request_response.rs | 12 +++---- sn_networking/src/event/swarm.rs | 38 ++++++++++----------- sn_networking/src/lib.rs | 14 ++++---- sn_networking/src/record_store.rs | 14 ++++---- sn_networking/src/relay_manager.rs | 19 +++++------ sn_networking/src/replication_fetcher.rs | 6 ++-- sn_networking/src/transfers.rs | 8 ++--- 11 files changed, 84 insertions(+), 87 deletions(-) diff --git a/sn_cli/src/bin/main.rs b/sn_cli/src/bin/main.rs index 2e4546fb28..0ac03d458b 100644 --- a/sn_cli/src/bin/main.rs +++ b/sn_cli/src/bin/main.rs @@ -41,7 +41,7 @@ async fn main() -> Result<()> { let opt = Opt::parse(); let logging_targets = vec![ // TODO: Reset to nice and clean defaults once we have a better idea of what we want - ("sn_networking".to_string(), Level::DEBUG), + ("sn_networking".to_string(), Level::INFO), ("safe".to_string(), Level::TRACE), ("sn_build_info".to_string(), Level::TRACE), ("autonomi".to_string(), Level::TRACE), diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index ba2014827e..3365adf02c 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -396,7 +396,7 @@ impl SwarmDriver { } => { cmd_string = "PutRecord"; let record_key = PrettyPrintRecordKey::from(&record.key).into_owned(); - trace!( + debug!( "Putting record sized: {:?} to network {:?}", record.value.len(), record_key @@ -408,7 +408,7 @@ impl SwarmDriver { .put_record(record, quorum) { Ok(request_id) => { - trace!("Sent record {record_key:?} to network. Request id: {request_id:?} to network"); + debug!("Sent record {record_key:?} to network. Request id: {request_id:?} to network"); Ok(()) } Err(error) => { @@ -429,7 +429,7 @@ impl SwarmDriver { } => { cmd_string = "PutRecordTo"; let record_key = PrettyPrintRecordKey::from(&record.key).into_owned(); - trace!( + debug!( "Putting record {record_key:?} sized: {:?} to {peers:?}", record.value.len(), ); @@ -439,7 +439,7 @@ impl SwarmDriver { peers.into_iter(), quorum, ); - trace!("Sent record {record_key:?} to {peers_count:?} peers. Request id: {request_id:?}"); + debug!("Sent record {record_key:?} to {peers_count:?} peers. Request id: {request_id:?}"); if let Err(err) = sender.send(Ok(())) { error!("Could not send response to PutRecordTo cmd: {:?}", err); @@ -669,7 +669,7 @@ impl SwarmDriver { // be handled. // `self` then handles the request and sends a response back again to itself. if peer == *self.swarm.local_peer_id() { - trace!("Sending query request to self"); + debug!("Sending query request to self"); if let Request::Query(query) = req { self.send_event(NetworkEvent::QueryRequestReceived { query, @@ -678,7 +678,7 @@ impl SwarmDriver { } else { // We should never receive a Replicate request from ourselves. // we already hold this data if we do... so we can ignore - trace!("Replicate cmd to self received, ignoring"); + debug!("Replicate cmd to self received, ignoring"); } } else { let request_id = self @@ -686,10 +686,10 @@ impl SwarmDriver { .behaviour_mut() .request_response .send_request(&peer, req); - trace!("Sending request {request_id:?} to peer {peer:?}"); + debug!("Sending request {request_id:?} to peer {peer:?}"); let _ = self.pending_requests.insert(request_id, sender); - trace!("Pending Requests now: {:?}", self.pending_requests.len()); + debug!("Pending Requests now: {:?}", self.pending_requests.len()); } } SwarmCmd::SendResponse { resp, channel } => { @@ -697,7 +697,7 @@ impl SwarmDriver { match channel { // If the response is for `self`, send it directly through the oneshot channel. MsgResponder::FromSelf(channel) => { - trace!("Sending response to self"); + debug!("Sending response to self"); match channel { Some(channel) => { channel @@ -898,7 +898,7 @@ impl SwarmDriver { .collect(); if !all_records.is_empty() { - trace!( + debug!( "Sending a replication list of {} keys to {replicate_targets:?} ", all_records.len() ); @@ -912,13 +912,13 @@ impl SwarmDriver { .behaviour_mut() .request_response .send_request(&peer_id, request.clone()); - trace!("Sending request {request_id:?} to peer {peer_id:?}"); + debug!("Sending request {request_id:?} to peer {peer_id:?}"); let _ = self.pending_requests.insert(request_id, None); let _ = self .replication_targets .insert(peer_id, now + REPLICATION_TIMEOUT); } - trace!("Pending Requests now: {:?}", self.pending_requests.len()); + debug!("Pending Requests now: {:?}", self.pending_requests.len()); } Ok(()) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 65d8449028..dc9a77f08c 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -827,7 +827,7 @@ impl SwarmDriver { /// Dials the given multiaddress. If address contains a peer ID, simultaneous /// dials to that peer are prevented. pub(crate) fn dial(&mut self, mut addr: Multiaddr) -> Result<(), DialError> { - trace!(%addr, "Dialing manually"); + debug!(%addr, "Dialing manually"); let peer_id = multiaddr_pop_p2p(&mut addr); let opts = match peer_id { @@ -844,7 +844,7 @@ impl SwarmDriver { /// Dials with the `DialOpts` given. pub(crate) fn dial_with_opts(&mut self, opts: DialOpts) -> Result<(), DialError> { - trace!(?opts, "Dialing manually"); + debug!(?opts, "Dialing manually"); self.swarm.dial(opts) } diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index fac28268f2..ce839a8f5c 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -40,7 +40,7 @@ impl SwarmDriver { ref step, } => { event_string = "kad_event::get_closest_peers"; - trace!( + debug!( "Query task {id:?} of key {:?} returned with peers {:?}, {stats:?} - {step:?}", hex::encode(closest_peers.key.clone()), closest_peers.peers, @@ -68,7 +68,7 @@ impl SwarmDriver { } } } else { - trace!("Can't locate query task {id:?}, it has likely been completed already."); + debug!("Can't locate query task {id:?}, it has likely been completed already."); return Err(NetworkError::ReceivedKademliaEventDropped { query_id: id, event: "GetClosestPeers Ok".to_string(), @@ -87,7 +87,7 @@ impl SwarmDriver { let (get_closest_type, mut current_closest) = self.pending_get_closest_peers.remove(&id).ok_or_else(|| { - trace!( + debug!( "Can't locate query task {id:?}, it has likely been completed already." ); NetworkError::ReceivedKademliaEventDropped { @@ -124,7 +124,7 @@ impl SwarmDriver { step, } => { event_string = "kad_event::get_record::found"; - trace!( + debug!( "Query task {id:?} returned with record {:?} from peer {:?}, {stats:?} - {step:?}", PrettyPrintRecordKey::from(&peer_record.record.key), peer_record.peer @@ -141,7 +141,7 @@ impl SwarmDriver { step, } => { event_string = "kad_event::get_record::finished_no_additional"; - trace!("Query task {id:?} of get_record completed with {stats:?} - {step:?} - {cache_candidates:?}"); + debug!("Query task {id:?} of get_record completed with {stats:?} - {step:?} - {cache_candidates:?}"); self.handle_get_record_finished(id, step)?; } kad::Event::OutboundQueryProgressed { @@ -224,7 +224,7 @@ impl SwarmDriver { step, } => { event_string = "kad_event::PutRecordOk"; - trace!( + debug!( "Query task {id:?} put record {:?} ok, {stats:?} - {step:?}", PrettyPrintRecordKey::from(&put_record_ok.key) ); @@ -239,7 +239,7 @@ impl SwarmDriver { event_string = "kad_event::OutboundQueryProgressed::Bootstrap"; // here BootstrapOk::num_remaining refers to the remaining random peer IDs to query, one per // bucket that still needs refreshing. - trace!("Kademlia Bootstrap with {id:?} progressed with {bootstrap_result:?} and step {step:?}"); + debug!("Kademlia Bootstrap with {id:?} progressed with {bootstrap_result:?} and step {step:?}"); } kad::Event::RoutingUpdated { peer, @@ -286,22 +286,22 @@ impl SwarmDriver { } => { event_string = "kad_event::InboundRequest::GetRecord"; if !present_locally && num_closer_peers < CLOSE_GROUP_SIZE { - trace!("InboundRequest::GetRecord doesn't have local record, with {num_closer_peers:?} closer_peers"); + debug!("InboundRequest::GetRecord doesn't have local record, with {num_closer_peers:?} closer_peers"); } } kad::Event::UnroutablePeer { peer } => { event_string = "kad_event::UnroutablePeer"; - trace!(peer_id = %peer, "kad::Event: UnroutablePeer"); + debug!(peer_id = %peer, "kad::Event: UnroutablePeer"); } kad::Event::RoutablePeer { peer, .. } => { // We get this when we don't add a peer via the identify step. // And we don't want to add these as they were rejected by identify for some reason. event_string = "kad_event::RoutablePeer"; - trace!(peer_id = %peer, "kad::Event: RoutablePeer"); + debug!(peer_id = %peer, "kad::Event: RoutablePeer"); } other => { event_string = "kad_event::Other"; - trace!("kad::Event ignored: {other:?}"); + debug!("kad::Event ignored: {other:?}"); } } @@ -383,7 +383,7 @@ impl SwarmDriver { let expected_answers = get_quorum_value(&cfg.get_quorum); - trace!("Expecting {expected_answers:?} answers for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); + debug!("Expecting {expected_answers:?} answers for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); if responded_peers >= expected_answers { if !cfg.expected_holders.is_empty() { @@ -503,7 +503,7 @@ impl SwarmDriver { } else { // We manually perform `query.finish()` if we return early from accumulate fn. // Thus we will still get FinishedWithNoAdditionalRecord. - trace!("Can't locate query task {query_id:?} during GetRecord finished. We might have already returned the result to the sender."); + debug!("Can't locate query task {query_id:?} during GetRecord finished. We might have already returned the result to the sender."); } Ok(()) } @@ -527,7 +527,7 @@ impl SwarmDriver { // return error if the entry cannot be found let (sender, _, cfg) = self.pending_get_record.remove(&query_id).ok_or_else(|| { - trace!("Can't locate query task {query_id:?}, it has likely been completed already."); + debug!("Can't locate query task {query_id:?}, it has likely been completed already."); NetworkError::ReceivedKademliaEventDropped { query_id, event: "GetRecordError NotFound or QuorumFailed".to_string(), @@ -548,7 +548,7 @@ impl SwarmDriver { let pretty_key = PrettyPrintRecordKey::from(key); let (sender, result_map, cfg) = self.pending_get_record.remove(&query_id).ok_or_else(|| { - trace!( + debug!( "Can't locate query task {query_id:?} for {pretty_key:?}, it has likely been completed already." ); NetworkError::ReceivedKademliaEventDropped { diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 8d6ecfd0a2..8f31112f19 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -32,7 +32,7 @@ impl SwarmDriver { request_id, .. } => { - trace!("Received request {request_id:?} from peer {peer:?}, req: {request:?}"); + debug!("Received request {request_id:?} from peer {peer:?}, req: {request:?}"); // If the request is replication or quote verification, // we can handle it and send the OK response here. // As the handle result is unimportant to the sender. @@ -108,7 +108,7 @@ impl SwarmDriver { request_id, response, } => { - trace!("Got response {request_id:?} from peer {peer:?}, res: {response}."); + debug!("Got response {request_id:?} from peer {peer:?}, res: {response}."); if let Some(sender) = self.pending_requests.remove(&request_id) { // The sender will be provided if the caller (Requester) is awaiting for a response // at the call site. @@ -168,7 +168,7 @@ impl SwarmDriver { warn!("RequestResponse: InboundFailure for request_id: {request_id:?} and peer: {peer:?}, with error: {error:?}"); } request_response::Event::ResponseSent { peer, request_id } => { - trace!("ResponseSent for request_id: {request_id:?} and peer: {peer:?}"); + debug!("ResponseSent for request_id: {request_id:?} and peer: {peer:?}"); } } Ok(()) @@ -186,7 +186,7 @@ impl SwarmDriver { return; }; - trace!( + debug!( "Received replication list from {holder:?} of {} keys", incoming_keys.len() ); @@ -195,7 +195,7 @@ impl SwarmDriver { // giving us some margin for replication let closest_k_peers = self.get_closest_k_value_local_peers(); if !closest_k_peers.contains(&holder) || holder == self.self_peer_id { - trace!("Holder {holder:?} is self or not in replication range."); + debug!("Holder {holder:?} is self or not in replication range."); return; } @@ -219,7 +219,7 @@ impl SwarmDriver { .replication_fetcher .add_keys(holder, incoming_keys, all_keys); if keys_to_fetch.is_empty() { - trace!("no waiting keys to fetch from the network"); + debug!("no waiting keys to fetch from the network"); } else { self.send_event(NetworkEvent::KeysToFetchForReplication(keys_to_fetch)); } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 3efbcbc692..04d23ead05 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -128,7 +128,7 @@ impl SwarmDriver { match *iden { libp2p::identify::Event::Received { peer_id, info } => { - trace!(%peer_id, ?info, "identify: received info"); + debug!(%peer_id, ?info, "identify: received info"); if info.protocol_version != IDENTIFY_PROTOCOL_STR.to_string() { warn!(?info.protocol_version, "identify: {peer_id:?} does not have the same protocol. Our IDENTIFY_PROTOCOL_STR: {:?}", IDENTIFY_PROTOCOL_STR.as_str()); @@ -227,10 +227,10 @@ impl SwarmDriver { }; if kbucket_full { - trace!("received identify for a full bucket {ilog2:?}, not dialing {peer_id:?} on {addrs:?}"); + debug!("received identify for a full bucket {ilog2:?}, not dialing {peer_id:?} on {addrs:?}"); return Ok(()); } else if already_present_in_rt { - trace!("received identify for {peer_id:?} that is already part of the RT. Not dialing {peer_id:?} on {addrs:?}"); + debug!("received identify for {peer_id:?} that is already part of the RT. Not dialing {peer_id:?} on {addrs:?}"); return Ok(()); } @@ -263,7 +263,7 @@ impl SwarmDriver { }); } - trace!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table"); + debug!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table"); // Attempt to add the addresses to the routing table. for multiaddr in addrs { @@ -280,9 +280,9 @@ impl SwarmDriver { ); } // Log the other Identify events. - libp2p::identify::Event::Sent { .. } => trace!("identify: {iden:?}"), - libp2p::identify::Event::Pushed { .. } => trace!("identify: {iden:?}"), - libp2p::identify::Event::Error { .. } => trace!("identify: {iden:?}"), + libp2p::identify::Event::Sent { .. } => debug!("identify: {iden:?}"), + libp2p::identify::Event::Pushed { .. } => debug!("identify: {iden:?}"), + libp2p::identify::Event::Error { .. } => debug!("identify: {iden:?}"), } } #[cfg(feature = "local-discovery")] @@ -304,7 +304,7 @@ impl SwarmDriver { } } mdns::Event::Expired(peer) => { - trace!("mdns peer {peer:?} expired"); + debug!("mdns peer {peer:?} expired"); } } } @@ -367,7 +367,7 @@ impl SwarmDriver { send_back_addr, } => { event_string = "incoming"; - trace!("IncomingConnection ({connection_id:?}) with local_addr: {local_addr:?} send_back_addr: {send_back_addr:?}"); + debug!("IncomingConnection ({connection_id:?}) with local_addr: {local_addr:?} send_back_addr: {send_back_addr:?}"); } SwarmEvent::ConnectionEstablished { peer_id, @@ -378,7 +378,7 @@ impl SwarmDriver { established_in, } => { event_string = "ConnectionEstablished"; - trace!(%peer_id, num_established, ?concurrent_dial_errors, "ConnectionEstablished ({connection_id:?}) in {established_in:?}: {}", endpoint_str(&endpoint)); + debug!(%peer_id, num_established, ?concurrent_dial_errors, "ConnectionEstablished ({connection_id:?}) in {established_in:?}: {}", endpoint_str(&endpoint)); let _ = self.live_connected_peers.insert( connection_id, @@ -398,7 +398,7 @@ impl SwarmDriver { connection_id, } => { event_string = "ConnectionClosed"; - trace!(%peer_id, ?connection_id, ?cause, num_established, "ConnectionClosed: {}", endpoint_str(&endpoint)); + debug!(%peer_id, ?connection_id, ?cause, num_established, "ConnectionClosed: {}", endpoint_str(&endpoint)); let _ = self.live_connected_peers.remove(&connection_id); self.record_connection_metrics(); } @@ -538,7 +538,7 @@ impl SwarmDriver { connection_id, } => { event_string = "Dialing"; - trace!("Dialing {peer_id:?} on {connection_id:?}"); + debug!("Dialing {peer_id:?} on {connection_id:?}"); } SwarmEvent::NewExternalAddrCandidate { address } => { event_string = "NewExternalAddrCandidate"; @@ -563,12 +563,12 @@ impl SwarmDriver { info!(%address, "external address: new candidate has the same configured port, adding it."); self.swarm.add_external_address(address); - if tracing::level_enabled!(tracing::Level::TRACE) { + if tracing::level_enabled!(tracing::Level::DEBUG) { let all_external_addresses = self.swarm.external_addresses().collect_vec(); let all_listeners = self.swarm.listeners().collect_vec(); - trace!("All our listeners: {all_listeners:?}"); - trace!( + debug!("All our listeners: {all_listeners:?}"); + debug!( "All our external addresses: {all_external_addresses:?}" ); } @@ -577,7 +577,7 @@ impl SwarmDriver { } } } else { - trace!("external address: listen port not set. This has to be set if you're running a node"); + debug!("external address: listen port not set. This has to be set if you're running a node"); } } } @@ -592,7 +592,7 @@ impl SwarmDriver { other => { event_string = "Other"; - trace!("SwarmEvent has been ignored: {other:?}") + debug!("SwarmEvent has been ignored: {other:?}") } } self.remove_outdated_connections(); @@ -677,11 +677,11 @@ impl SwarmDriver { self.record_connection_metrics(); - trace!( + debug!( "Current libp2p peers pool stats is {:?}", self.swarm.network_info() ); - trace!( + debug!( "Removed {removed_conns} outdated live connections, still have {} left.", self.live_connected_peers.len() ); diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 848c8210a8..2430249805 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -71,9 +71,7 @@ use tokio::sync::{ mpsc::{self, Sender}, oneshot, }; - use tokio::time::Duration; -use tracing::trace; /// The type of quote for a selected payee. pub type PayeeQuote = (PeerId, MainPubkey, PaymentQuote); @@ -565,7 +563,7 @@ impl Network { } } if result.is_err() { - trace!("Getting record from network of {pretty_key:?} via backoff..."); + debug!("Getting record from network of {pretty_key:?} via backoff..."); } result.map_err(|err| BackoffError::Transient { err: NetworkError::from(err), @@ -735,7 +733,7 @@ impl Network { /// Put `Record` to the local RecordStore /// Must be called after the validations are performed on the Record pub fn put_local_record(&self, record: Record) { - trace!( + debug!( "Writing Record locally, for {:?} - length {:?}", PrettyPrintRecordKey::from(&record.key), record.value.len() @@ -830,7 +828,7 @@ impl Network { key: &NetworkAddress, client: bool, ) -> Result> { - trace!("Getting the closest peers to {key:?}"); + debug!("Getting the closest peers to {key:?}"); let (sender, receiver) = oneshot::channel(); self.send_swarm_cmd(SwarmCmd::GetClosestPeersToAddressFromNetwork { key: key.clone(), @@ -845,7 +843,7 @@ impl Network { // remove our peer id from the calculations here: closest_peers.retain(|&x| x != self.peer_id()); } - if tracing::level_enabled!(tracing::Level::TRACE) { + if tracing::level_enabled!(tracing::Level::DEBUG) { let close_peers_pretty_print: Vec<_> = closest_peers .iter() .map(|peer_id| { @@ -856,7 +854,7 @@ impl Network { }) .collect(); - trace!("Network knowledge of close peers to {key:?} are: {close_peers_pretty_print:?}"); + debug!("Network knowledge of close peers to {key:?} are: {close_peers_pretty_print:?}"); } let closest_peers = sort_peers_by_address(&closest_peers, key, CLOSE_GROUP_SIZE)?; @@ -922,7 +920,7 @@ fn get_fees_from_store_cost_responses( ); // get the lowest cost - trace!("Got all costs: {all_costs:?}"); + debug!("Got all costs: {all_costs:?}"); let payee = all_costs .into_iter() .next() diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 582179e54a..beae192828 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -149,7 +149,7 @@ impl NodeRecordStore { let process_entry = |entry: &DirEntry| -> _ { let path = entry.path(); if path.is_file() { - trace!("Existing record found: {path:?}"); + debug!("Existing record found: {path:?}"); // if we've got a file, lets try and read it let filename = match path.file_name().and_then(|n| n.to_str()) { Some(file_name) => file_name, @@ -517,7 +517,7 @@ impl NodeRecordStore { /// this avoids us returning half-written data or registering it as stored before it is. pub(crate) fn put_verified(&mut self, r: Record, record_type: RecordType) -> Result<()> { let record_key = PrettyPrintRecordKey::from(&r.key).into_owned(); - trace!("PUT a verified Record: {record_key:?}"); + debug!("PUT a verified Record: {record_key:?}"); // if the cache already has this record in it (eg, a conflicting spend) // remove it from the cache @@ -666,7 +666,7 @@ impl RecordStore for NodeRecordStore { } if !self.records.contains_key(k) { - trace!("Record not found locally: {key:?}"); + debug!("Record not found locally: {key:?}"); return None; } @@ -692,7 +692,7 @@ impl RecordStore for NodeRecordStore { Ok(record_header) => { match record_header.kind { RecordKind::ChunkWithPayment | RecordKind::RegisterWithPayment => { - trace!("Record {record_key:?} with payment shall always be processed."); + debug!("Record {record_key:?} with payment shall always be processed."); } _ => { // Chunk with existing key do not to be stored again. @@ -701,13 +701,13 @@ impl RecordStore for NodeRecordStore { // double spend to be detected or register op update. match self.records.get(&record.key) { Some((_addr, RecordType::Chunk)) => { - trace!("Chunk {record_key:?} already exists."); + debug!("Chunk {record_key:?} already exists."); return Ok(()); } Some((_addr, RecordType::NonChunk(existing_content_hash))) => { let content_hash = XorName::from_content(&record.value); if content_hash == *existing_content_hash { - trace!("A non-chunk record {record_key:?} with same content_hash {content_hash:?} already exists."); + debug!("A non-chunk record {record_key:?} with same content_hash {content_hash:?} already exists."); return Ok(()); } } @@ -722,7 +722,7 @@ impl RecordStore for NodeRecordStore { } } - trace!("Unverified Record {record_key:?} try to validate and store"); + debug!("Unverified Record {record_key:?} try to validate and store"); let event_sender = self.network_event_sender.clone(); // push the event off thread so as to be non-blocking let _handle = spawn(async move { diff --git a/sn_networking/src/relay_manager.rs b/sn_networking/src/relay_manager.rs index ddd65b3745..8628b08151 100644 --- a/sn_networking/src/relay_manager.rs +++ b/sn_networking/src/relay_manager.rs @@ -74,7 +74,6 @@ impl RelayManager { stream_protocols: &Vec, ) { if self.candidates.len() >= MAX_POTENTIAL_CANDIDATES { - trace!("Got max relay candidates"); return; } @@ -89,7 +88,7 @@ impl RelayManager { } } } else { - trace!("Peer {peer_id:?} does not support relay server protocol"); + debug!("Peer {peer_id:?} does not support relay server protocol"); } } @@ -120,7 +119,7 @@ impl RelayManager { // Pick a random candidate from the vector. Check if empty, or `gen_range` panics for empty range. let index = if self.candidates.is_empty() { - trace!("No more relay candidates."); + debug!("No more relay candidates."); break; } else { rand::thread_rng().gen_range(0..self.candidates.len()) @@ -130,7 +129,7 @@ impl RelayManager { // skip if detected as a bad node if let Some((_, is_bad)) = bad_nodes.get(&peer_id) { if *is_bad { - trace!("Peer {peer_id:?} is considered as a bad node. Skipping it."); + debug!("Peer {peer_id:?} is considered as a bad node. Skipping it."); continue; } } @@ -138,7 +137,7 @@ impl RelayManager { if self.connected_relays.contains_key(&peer_id) || self.waiting_for_reservation.contains_key(&peer_id) { - trace!("We are already using {peer_id:?} as a relay server. Skipping."); + debug!("We are already using {peer_id:?} as a relay server. Skipping."); continue; } @@ -154,7 +153,7 @@ impl RelayManager { } } } else { - trace!("No more relay candidates."); + debug!("No more relay candidates."); break; } } @@ -176,11 +175,11 @@ impl RelayManager { peer_id: &PeerId, swarm: &mut Swarm, ) { - if tracing::level_enabled!(tracing::Level::TRACE) { + if tracing::level_enabled!(tracing::Level::DEBUG) { let all_external_addresses = swarm.external_addresses().collect_vec(); let all_listeners = swarm.listeners().collect_vec(); - trace!("All our listeners: {all_listeners:?}"); - trace!("All our external addresses: {all_external_addresses:?}"); + debug!("All our listeners: {all_listeners:?}"); + debug!("All our external addresses: {all_external_addresses:?}"); } match self.waiting_for_reservation.remove(peer_id) { @@ -221,7 +220,7 @@ impl RelayManager { } if let Some(addr) = self.waiting_for_reservation.remove(&peer_id) { info!("Removed peer form waiting_for_reservation as the listener has been closed {peer_id:?}: {addr:?}"); - trace!( + debug!( "waiting_for_reservation len: {:?}", self.waiting_for_reservation.len() ) diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 7312c967da..8aedbc525c 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -107,7 +107,7 @@ impl ReplicationFetcher { info!("Node is full, among {total_incoming_keys} incoming replications from {holder:?}, found {} beyond current farthest", out_of_range_keys.len()); for addr in out_of_range_keys.iter() { - trace!("Node is full, the incoming record_key {addr:?} is beyond current farthest record"); + debug!("Node is full, the incoming record_key {addr:?} is beyond current farthest record"); } } @@ -149,7 +149,7 @@ impl ReplicationFetcher { info!("Among {total_incoming_keys} incoming replications from {holder:?}, found {} out of range", out_of_range_keys.len()); for addr in out_of_range_keys.iter() { let ilog2_distance = self_address.distance(addr).ilog2(); - trace!("The incoming record_key {addr:?} is out of range with ilog2_distance being {ilog2_distance:?}, do not fetch it from {holder:?}"); + debug!("The incoming record_key {addr:?} is out of range with ilog2_distance being {ilog2_distance:?}, do not fetch it from {holder:?}"); } } @@ -237,7 +237,7 @@ impl ReplicationFetcher { pub(crate) fn next_keys_to_fetch(&mut self) -> Vec<(PeerId, RecordKey)> { self.prune_expired_keys_and_slow_nodes(); - trace!("Next to fetch...."); + debug!("Next to fetch...."); if self.on_going_fetches.len() >= MAX_PARALLEL_FETCH { warn!("Replication Fetcher doesn't have free fetch capacity. Currently has {} entries in queue.", diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs index f8566511d8..8f240c156b 100644 --- a/sn_networking/src/transfers.rs +++ b/sn_networking/src/transfers.rs @@ -104,7 +104,7 @@ impl Network { wallet: &HotWallet, ) -> Result> { // get CashNoteRedemptions from encrypted Transfer - trace!("Decyphering Transfer"); + debug!("Decyphering Transfer"); let cashnote_redemptions = wallet.unwrap_transfer(transfer)?; self.verify_cash_notes_redemptions(wallet.address(), &cashnote_redemptions) @@ -122,7 +122,7 @@ impl Network { cashnote_redemptions: &[CashNoteRedemption], ) -> Result> { // get the parent transactions - trace!( + debug!( "Getting parent Tx for validation from {:?}", cashnote_redemptions.len() ); @@ -179,7 +179,7 @@ impl Network { } // check Txs and parent spends are valid - trace!("Validating parent spends"); + debug!("Validating parent spends"); for tx in parent_txs { let tx_inputs_keys: Vec<_> = tx.inputs.iter().map(|i| i.unique_pubkey()).collect(); @@ -246,7 +246,7 @@ pub fn get_signed_spend_from_record( Err(NetworkError::NoSpendFoundInsideRecord(*address)) } [one] => { - trace!("Spend get for address: {address:?} successful"); + debug!("Spend get for address: {address:?} successful"); Ok(one.clone()) } _double_spends => { From ef4e5bb3a346194ff95ed8d34954cc93a80cb425 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 19 Jul 2024 20:23:12 +0530 Subject: [PATCH 003/115] chore(network): remove unused events and cmds --- sn_networking/src/cmd.rs | 54 +--------------------------------- sn_networking/src/driver.rs | 19 ------------ sn_networking/src/event/mod.rs | 5 ---- sn_networking/src/lib.rs | 50 ------------------------------- sn_node/src/node.rs | 11 ------- 5 files changed, 1 insertion(+), 138 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 3365adf02c..0752f236f4 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -10,7 +10,7 @@ use crate::{ driver::{PendingGetClosestType, SwarmDriver}, error::{NetworkError, Result}, event::TerminateNodeReason, - multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, CLOSE_GROUP_SIZE, + multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, REPLICATION_PEERS_COUNT, }; use libp2p::{ @@ -18,7 +18,6 @@ use libp2p::{ store::{Error as StoreError, RecordStore}, Quorum, Record, RecordKey, }, - swarm::dial_opts::DialOpts, Multiaddr, PeerId, }; use sn_protocol::{ @@ -63,15 +62,6 @@ pub enum SwarmCmd { addr: Multiaddr, sender: oneshot::Sender>, }, - DialWithOpts { - opts: DialOpts, - sender: oneshot::Sender>, - }, - // Returns all the peers from all the k-buckets from the local Routing Table. - // This includes our PeerId as well. - GetAllLocalPeers { - sender: oneshot::Sender>, - }, /// Get a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. GetKBuckets { @@ -87,11 +77,6 @@ pub enum SwarmCmd { key: NetworkAddress, sender: oneshot::Sender>, }, - // Get closest peers from the local RoutingTable - GetCloseGroupLocalPeers { - key: NetworkAddress, - sender: oneshot::Sender>, - }, GetSwarmLocalState(oneshot::Sender), // Send Request to the PeerId. SendRequest { @@ -238,18 +223,12 @@ impl Debug for SwarmCmd { SwarmCmd::TriggerIntervalReplication => { write!(f, "SwarmCmd::TriggerIntervalReplication") } - SwarmCmd::DialWithOpts { opts, .. } => { - write!(f, "SwarmCmd::DialWithOpts {{ opts: {opts:?} }}") - } SwarmCmd::GetClosestPeersToAddressFromNetwork { key, .. } => { write!(f, "SwarmCmd::GetClosestPeers {{ key: {key:?} }}") } SwarmCmd::GetClosestKLocalPeers { .. } => { write!(f, "SwarmCmd::GetClosestKLocalPeers") } - SwarmCmd::GetCloseGroupLocalPeers { key, .. } => { - write!(f, "SwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}") - } SwarmCmd::GetLocalStoreCost { .. } => { write!(f, "SwarmCmd::GetLocalStoreCost") } @@ -266,9 +245,6 @@ impl Debug for SwarmCmd { SwarmCmd::GetAllLocalRecordAddresses { .. } => { write!(f, "SwarmCmd::GetAllLocalRecordAddresses") } - SwarmCmd::GetAllLocalPeers { .. } => { - write!(f, "SwarmCmd::GetAllLocalPeers") - } SwarmCmd::GetKBuckets { .. } => { write!(f, "SwarmCmd::GetKBuckets") } @@ -598,13 +574,6 @@ impl SwarmDriver { Err(e) => sender.send(Err(e.into())), }; } - SwarmCmd::DialWithOpts { opts, sender } => { - cmd_string = "DialWithOpts"; - let _ = match self.dial_with_opts(opts) { - Ok(_) => sender.send(Ok(())), - Err(e) => sender.send(Err(e.into())), - }; - } SwarmCmd::GetClosestPeersToAddressFromNetwork { key, sender } => { cmd_string = "GetClosestPeersToAddressFromNetwork"; let query_id = self @@ -620,10 +589,6 @@ impl SwarmDriver { ), ); } - SwarmCmd::GetAllLocalPeers { sender } => { - cmd_string = "GetAllLocalPeers"; - let _ = sender.send(self.get_all_local_peers()); - } SwarmCmd::GetKBuckets { sender } => { cmd_string = "GetKBuckets"; let mut ilog2_kbuckets = BTreeMap::new(); @@ -642,23 +607,6 @@ impl SwarmDriver { } let _ = sender.send(ilog2_kbuckets); } - SwarmCmd::GetCloseGroupLocalPeers { key, sender } => { - cmd_string = "GetCloseGroupLocalPeers"; - let key = key.as_kbucket_key(); - // calls `kbuckets.closest_keys(key)` internally, which orders the peers by - // increasing distance - // Note it will return all peers, heance a chop down is required. - let closest_peers = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_local_peers(&key) - .map(|peer| peer.into_preimage()) - .take(CLOSE_GROUP_SIZE) - .collect(); - - let _ = sender.send(closest_peers); - } SwarmCmd::GetClosestKLocalPeers { sender } => { cmd_string = "GetClosestKLocalPeers"; let _ = sender.send(self.get_closest_k_value_local_peers()); diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index dc9a77f08c..02b414dc99 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -790,18 +790,6 @@ impl SwarmDriver { }); } - // get all the peers from our local RoutingTable. Contains self - pub(crate) fn get_all_local_peers(&mut self) -> Vec { - let mut all_peers: Vec = vec![]; - for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { - for entry in kbucket.iter() { - all_peers.push(entry.node.key.clone().into_preimage()); - } - } - all_peers.push(self.self_peer_id); - all_peers - } - /// get closest k_value the peers from our local RoutingTable. Contains self. /// Is sorted for closeness to self. pub(crate) fn get_closest_k_value_local_peers(&mut self) -> Vec { @@ -842,13 +830,6 @@ impl SwarmDriver { self.swarm.dial(opts) } - /// Dials with the `DialOpts` given. - pub(crate) fn dial_with_opts(&mut self, opts: DialOpts) -> Result<(), DialError> { - debug!(?opts, "Dialing manually"); - - self.swarm.dial(opts) - } - /// Record one handling time. /// Log for every 100 received. pub(crate) fn log_handling(&mut self, handle_string: String, handle_time: Duration) { diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 992f9e4716..4457f2f071 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -150,8 +150,6 @@ pub enum NetworkEvent { TerminateNode { reason: TerminateNodeReason }, /// List of peer nodes that failed to fetch replication copy from. FailedToFetchHolders(BTreeSet), - /// A peer in RT that supposed to be verified. - BadNodeVerification { peer_id: PeerId }, /// Quotes to be verified QuoteVerification { quotes: Vec<(PeerId, PaymentQuote)> }, /// Carry out chunk proof check against the specified record and peer @@ -219,9 +217,6 @@ impl Debug for NetworkEvent { NetworkEvent::FailedToFetchHolders(bad_nodes) => { write!(f, "NetworkEvent::FailedToFetchHolders({bad_nodes:?})") } - NetworkEvent::BadNodeVerification { peer_id } => { - write!(f, "NetworkEvent::BadNodeVerification({peer_id:?})") - } NetworkEvent::QuoteVerification { quotes } => { write!( f, diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 2430249805..1d3c10f70c 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -251,56 +251,6 @@ impl Network { .map_err(|_e| NetworkError::InternalMsgChannelDropped) } - /// Returns the closest peers to the given `NetworkAddress` that is fetched from the local - /// Routing Table. It is ordered by increasing distance of the peers - /// Note self peer_id is not included in the result. - pub async fn get_close_group_local_peers(&self, key: &NetworkAddress) -> Result> { - let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetCloseGroupLocalPeers { - key: key.clone(), - sender, - }); - - match receiver.await { - Ok(close_peers) => { - // Only perform the pretty print and tracing if tracing is enabled - if tracing::level_enabled!(tracing::Level::TRACE) { - let close_peers_pretty_print: Vec<_> = close_peers - .iter() - .map(|peer_id| { - format!( - "{peer_id:?}({:?})", - PrettyPrintKBucketKey( - NetworkAddress::from_peer(*peer_id).as_kbucket_key() - ) - ) - }) - .collect(); - - trace!( - "Local knowledge of close peers to {key:?} are: {close_peers_pretty_print:?}" - ); - } - Ok(close_peers) - } - Err(err) => { - error!("When getting local knowledge of close peers to {key:?}, failed with error {err:?}"); - Err(NetworkError::InternalMsgChannelDropped) - } - } - } - - /// Returns all the PeerId from all the KBuckets from our local Routing Table - /// Also contains our own PeerId. - pub async fn get_all_local_peers(&self) -> Result> { - let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetAllLocalPeers { sender }); - - receiver - .await - .map_err(|_e| NetworkError::InternalMsgChannelDropped) - } - /// Returns all the PeerId from all the KBuckets from our local Routing Table /// Also contains our own PeerId. pub async fn get_closest_k_value_local_peers(&self) -> Result> { diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 73176b53ca..3eb2019e4e 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -570,17 +570,6 @@ impl Node { } }); } - NetworkEvent::BadNodeVerification { peer_id } => { - event_header = "BadNodeVerification"; - let network = self.network().clone(); - - trace!("Need to verify whether peer {peer_id:?} is a bad node"); - let _handle = spawn(async move { - if Self::close_nodes_shunning_peer(&network, peer_id).await { - network.record_node_issues(peer_id, NodeIssue::CloseNodesShunning); - } - }); - } NetworkEvent::QuoteVerification { quotes } => { event_header = "QuoteVerification"; let network = self.network().clone(); From d5d3be9a927c3427ffde3a8598a9f5c33ed051c0 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 22 Jul 2024 11:27:04 +0530 Subject: [PATCH 004/115] refactor(node): move useful logs to debu level --- sn_logging/src/layers.rs | 16 +++++++++-- sn_node/src/bin/safenode/main.rs | 2 +- sn_node/src/event.rs | 2 +- sn_node/src/node.rs | 46 ++++++++++++++++---------------- sn_node/src/put_validation.rs | 38 +++++++++++++------------- sn_node/src/replication.rs | 20 +++++++------- 6 files changed, 68 insertions(+), 56 deletions(-) diff --git a/sn_logging/src/layers.rs b/sn_logging/src/layers.rs index b10e3cec51..4fbd3c07ea 100644 --- a/sn_logging/src/layers.rs +++ b/sn_logging/src/layers.rs @@ -278,7 +278,6 @@ fn get_logging_targets(logging_env_value: &str) -> Result> ("sn_client".to_string(), Level::TRACE), ("sn_faucet".to_string(), Level::TRACE), ("sn_logging".to_string(), Level::TRACE), - ("sn_node".to_string(), Level::TRACE), ("sn_node_manager".to_string(), Level::TRACE), ("sn_node_rpc_client".to_string(), Level::TRACE), ("sn_peers_acquisition".to_string(), Level::TRACE), @@ -292,8 +291,21 @@ fn get_logging_targets(logging_env_value: &str) -> Result> if !t.contains_key("sn_networking") { if contains_keyword_all_sn_logs { t.insert("sn_networking".to_string(), Level::TRACE) - } else { + } else if contains_keyword_verbose_sn_logs { t.insert("sn_networking".to_string(), Level::DEBUG) + } else { + t.insert("sn_networking".to_string(), Level::INFO) + }; + } + + // Override sn_node if it was not specified. + if !t.contains_key("sn_node") { + if contains_keyword_all_sn_logs { + t.insert("sn_node".to_string(), Level::TRACE) + } else if contains_keyword_verbose_sn_logs { + t.insert("sn_node".to_string(), Level::DEBUG) + } else { + t.insert("sn_node".to_string(), Level::INFO) }; } t diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index eaf734380e..2d72552da7 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -395,7 +395,7 @@ fn monitor_node_events(mut node_events_rx: NodeEventsReceiver, ctrl_tx: mpsc::Se } Ok(event) => { /* we ignore other events */ - trace!("Currently ignored node event {event:?}"); + debug!("Currently ignored node event {event:?}"); } Err(RecvError::Lagged(n)) => { warn!("Skipped {n} node events!"); diff --git a/sn_node/src/event.rs b/sn_node/src/event.rs index 0f74995770..c3e9857bad 100644 --- a/sn_node/src/event.rs +++ b/sn_node/src/event.rs @@ -39,7 +39,7 @@ impl NodeEventsChannel { pub(crate) fn broadcast(&self, event: NodeEvent) { let event_string = format!("{event:?}"); if let Err(err) = self.0.send(event) { - trace!( + debug!( "Error occurred when trying to broadcast a node event ({event_string:?}): {err}" ); } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 3eb2019e4e..bd708a1e7f 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -353,7 +353,7 @@ impl Node { // runs every replication_interval time _ = replication_interval.tick() => { let start = Instant::now(); - trace!("Periodic replication triggered"); + debug!("Periodic replication triggered"); let network = self.network().clone(); self.record_metrics(Marker::IntervalReplicationTriggered); @@ -365,7 +365,7 @@ impl Node { // runs every bad_nodes_check_time time _ = bad_nodes_check_interval.tick() => { let start = Instant::now(); - trace!("Periodic bad_nodes check triggered"); + debug!("Periodic bad_nodes check triggered"); let network = self.network().clone(); self.record_metrics(Marker::IntervalBadNodesCheckTriggered); @@ -385,7 +385,7 @@ impl Node { if cfg!(feature = "reward-forward") { if let Some(owner) = self.owner() { let start = Instant::now(); - trace!("Periodic balance forward triggered"); + debug!("Periodic balance forward triggered"); let network = self.network().clone(); let forwarding_reason = owner.clone(); @@ -441,7 +441,7 @@ impl Node { let start = Instant::now(); let event_string = format!("{event:?}"); let event_header; - trace!("Handling NetworkEvent {event_string:?}"); + debug!("Handling NetworkEvent {event_string:?}"); match event { NetworkEvent::PeerAdded(peer_id, connected_peers) => { @@ -510,14 +510,14 @@ impl Node { } NetworkEvent::ResponseReceived { res } => { event_header = "ResponseReceived"; - trace!("NetworkEvent::ResponseReceived {res:?}"); + debug!("NetworkEvent::ResponseReceived {res:?}"); if let Err(err) = self.handle_response(res) { error!("Error while handling NetworkEvent::ResponseReceived {err:?}"); } } NetworkEvent::KeysToFetchForReplication(keys) => { event_header = "KeysToFetchForReplication"; - trace!("Going to fetch {:?} keys for replication", keys.len()); + debug!("Going to fetch {:?} keys for replication", keys.len()); self.record_metrics(Marker::fetching_keys_for_replication(&keys)); if let Err(err) = self.fetch_replication_keys_without_wait(keys) { @@ -531,7 +531,7 @@ impl Node { let _handle = spawn(async move { let res = Self::handle_query(&network, query, payment_address).await; - trace!("Sending response {res:?}"); + debug!("Sending response {res:?}"); network.send_response(res, channel); }); @@ -543,7 +543,7 @@ impl Node { let _handle = spawn(async move { let key = PrettyPrintRecordKey::from(&record.key).into_owned(); match self_clone.validate_and_store_record(record).await { - Ok(()) => trace!("UnverifiedRecord {key} has been stored"), + Ok(()) => debug!("UnverifiedRecord {key} has been stored"), Err(err) => { self_clone.record_metrics(Marker::RecordRejected(&key, &err)); } @@ -585,7 +585,7 @@ impl Node { event_header = "ChunkProofVerification"; let network = self.network().clone(); - trace!("Going to verify chunk {keys_to_verify:?} against peer {peer_id:?}"); + debug!("Going to verify chunk {keys_to_verify:?} against peer {peer_id:?}"); let _handle = spawn(async move { // To avoid the peer is in the process of getting the copy via replication, @@ -643,7 +643,7 @@ impl Node { let req_copy = req.clone(); let network_copy = network.clone(); let handle: JoinHandle = spawn(async move { - trace!("getting node_status of {peer_id:?} from {peer:?}"); + debug!("getting node_status of {peer_id:?} from {peer:?}"); if let Ok(resp) = network_copy.send_request(req_copy, peer).await { match resp { Response::Query(QueryResponse::CheckNodeInProblem { @@ -695,7 +695,7 @@ impl Node { ) -> Response { let resp: QueryResponse = match query { Query::GetStoreCost(address) => { - trace!("Got GetStoreCost request for {address:?}"); + debug!("Got GetStoreCost request for {address:?}"); let record_key = address.to_record_key(); let self_id = network.peer_id(); @@ -732,7 +732,7 @@ impl Node { } } Query::GetReplicatedRecord { requester, key } => { - trace!("Got GetReplicatedRecord from {requester:?} regarding {key:?}"); + debug!("Got GetReplicatedRecord from {requester:?} regarding {key:?}"); let our_address = NetworkAddress::from_peer(network.peer_id()); let mut result = Err(ProtocolError::ReplicatedRecordNotFound { @@ -750,15 +750,15 @@ impl Node { QueryResponse::GetReplicatedRecord(result) } Query::GetChunkExistenceProof { key, nonce } => { - trace!("Got GetChunkExistenceProof for chunk {key:?}"); + debug!("Got GetChunkExistenceProof for chunk {key:?}"); let mut result = Err(ProtocolError::ChunkDoesNotExist(key.clone())); if let Ok(Some(record)) = network.get_local_record(&key.to_record_key()).await { let proof = ChunkProof::new(&record.value, nonce); - trace!("Chunk proof for {key:?} is {proof:?}"); + debug!("Chunk proof for {key:?} is {proof:?}"); result = Ok(proof) } else { - trace!( + debug!( "Could not get ChunkProof for {key:?} as we don't have the record locally." ); } @@ -766,13 +766,13 @@ impl Node { QueryResponse::GetChunkExistenceProof(result) } Query::CheckNodeInProblem(target_address) => { - trace!("Got CheckNodeInProblem for peer {target_address:?}"); + debug!("Got CheckNodeInProblem for peer {target_address:?}"); let is_in_trouble = if let Ok(result) = network.is_peer_shunned(target_address.clone()).await { result } else { - trace!("Could not get status of {target_address:?}."); + debug!("Could not get status of {target_address:?}."); false }; @@ -950,7 +950,7 @@ impl Node { let balance_file_path = network.root_dir_path().join(FORWARDED_BALANCE_FILE_NAME); let old_balance = read_forwarded_balance_value(&balance_file_path); let updated_balance = old_balance + total_forwarded_amount; - trace!("Updating forwarded balance to {updated_balance}"); + debug!("Updating forwarded balance to {updated_balance}"); write_forwarded_balance_value(&balance_file_path, updated_balance)?; Ok(updated_balance) @@ -958,14 +958,14 @@ impl Node { } fn read_forwarded_balance_value(balance_file_path: &PathBuf) -> u64 { - trace!("Reading forwarded balance from file {balance_file_path:?}"); + debug!("Reading forwarded balance from file {balance_file_path:?}"); match std::fs::read_to_string(balance_file_path) { Ok(balance) => balance.parse::().unwrap_or_else(|_| { - trace!("The balance from file is not a valid number"); + debug!("The balance from file is not a valid number"); 0 }), Err(_) => { - trace!("Error while reading to string, setting the balance to 0. This can happen at node init."); + debug!("Error while reading to string, setting the balance to 0. This can happen at node init."); 0 } } @@ -991,7 +991,7 @@ async fn chunk_proof_verify_peer( { let nonce = thread_rng().gen::(); let expected_proof = ChunkProof::new(&record.value, nonce); - trace!("To verify peer {peer_id:?}, chunk_proof for {key:?} is {expected_proof:?}"); + debug!("To verify peer {peer_id:?}, chunk_proof for {key:?} is {expected_proof:?}"); let request = Request::Query(Query::GetChunkExistenceProof { key: key.clone(), @@ -1031,7 +1031,7 @@ fn received_valid_chunk_proof( ) -> Option<()> { if let Ok(Response::Query(QueryResponse::GetChunkExistenceProof(Ok(proof)))) = resp { if expected_proof.verify(&proof) { - trace!( + debug!( "Got a valid ChunkProof of {key:?} from {peer:?}, during peer chunk proof check." ); Some(()) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 656eadc8da..64635bf18b 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -56,7 +56,7 @@ impl Node { // we eagery retry replicaiton as it seems like other nodes are having trouble // did not manage to get this chunk as yet self.replicate_valid_fresh_record(record_key, RecordType::Chunk); - trace!( + debug!( "Chunk with addr {:?} already exists: {already_exists}, payment extracted.", chunk.network_address() ); @@ -109,16 +109,16 @@ impl Node { let net_addr = NetworkAddress::from_register_address(*register.address()); let key = net_addr.to_record_key(); let pretty_key = PrettyPrintRecordKey::from(&key); - trace!("Got record to store without payment for register at {pretty_key:?}"); + debug!("Got record to store without payment for register at {pretty_key:?}"); if !self.validate_key_and_existence(&net_addr, &key).await? { - trace!("Ignore store without payment for register at {pretty_key:?}"); + debug!("Ignore store without payment for register at {pretty_key:?}"); return Err(Error::InvalidPutWithoutPayment( PrettyPrintRecordKey::from(&record.key).into_owned(), )); } // store the update - trace!("Store update without payment as we already had register at {pretty_key:?}"); + debug!("Store update without payment as we already had register at {pretty_key:?}"); let result = self.validate_and_store_register(register, true).await; if result.is_ok() { @@ -154,7 +154,7 @@ impl Node { .await { if already_exists { - trace!("Payment of the incoming exists register {pretty_key:?} having error {err:?}"); + debug!("Payment of the incoming exists register {pretty_key:?} having error {err:?}"); } else { error!("Payment of the incoming non-exist register {pretty_key:?} having error {err:?}"); return Err(err); @@ -168,7 +168,7 @@ impl Node { /// Store a pre-validated, and already paid record to the RecordStore pub(crate) async fn store_replicated_in_record(&self, record: Record) -> Result<()> { - trace!("Storing record which was replicated to us {:?}", record.key); + debug!("Storing record which was replicated to us {:?}", record.key); let record_header = RecordHeader::from_record(&record)?; match record_header.kind { // A separate flow handles payment for chunks and registers @@ -186,7 +186,7 @@ impl Node { .validate_key_and_existence(&chunk.network_address(), &record_key) .await?; if already_exists { - trace!( + debug!( "Chunk with addr {:?} already exists?: {already_exists}, do nothing", chunk.network_address() ); @@ -271,7 +271,7 @@ impl Node { }; // finally store the Record directly into the local storage - trace!("Storing chunk {chunk_name:?} as Record locally"); + debug!("Storing chunk {chunk_name:?} as Record locally"); self.network().put_local_record(record); self.record_metrics(Marker::ValidChunkRecordPutFromNetwork(&pretty_key)); @@ -444,7 +444,7 @@ impl Node { { Ok(cash_notes) => { let received_royalties = total_cash_notes_amount(&cash_notes)?; - trace!( + debug!( "{} network royalties payment cash notes found for record {pretty_key} for a total value of {received_royalties:?}", cash_notes.len() ); @@ -487,14 +487,14 @@ impl Node { ) -> Result<()> { let key = address.to_record_key(); let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); - trace!("Validating record payment for {pretty_key}"); + debug!("Validating record payment for {pretty_key}"); // load wallet let mut wallet = HotWallet::load_from(self.network().root_dir_path())?; let old_balance = wallet.balance().as_nano(); // unpack transfer - trace!("Unpacking incoming Transfers for record {pretty_key}"); + debug!("Unpacking incoming Transfers for record {pretty_key}"); let (received_fee, mut cash_notes, royalties_cash_notes_r) = self .cash_notes_from_transfers(payment.transfers, &wallet, pretty_key.clone()) .await?; @@ -518,7 +518,7 @@ impl Node { return Err(Error::ReusedPayment); } - trace!("Received payment of {received_fee:?} for {pretty_key}"); + debug!("Received payment of {received_fee:?} for {pretty_key}"); // Notify `record_store` that the node received a payment. self.network().notify_payment_received(); @@ -546,7 +546,7 @@ impl Node { // check if the quote is valid let storecost = payment.quote.cost; verify_quote_for_storecost(self.network(), payment.quote, address)?; - trace!("Payment quote valid for record {pretty_key}"); + debug!("Payment quote valid for record {pretty_key}"); // Let's check payment is sufficient both for our store cost and for network royalties // Since the storage payment is made to a single node, we can calculate the royalties fee based on that single payment. @@ -558,7 +558,7 @@ impl Node { // finally, (after we accept any payments to us as they are ours now anyway) // lets check they actually paid enough if received_fee < expected_fee { - trace!("Payment insufficient for record {pretty_key}. {received_fee:?} is less than {expected_fee:?}"); + debug!("Payment insufficient for record {pretty_key}. {received_fee:?} is less than {expected_fee:?}"); return Err(Error::PaymentProofInsufficientAmount { paid: received_fee, expected: expected_fee, @@ -584,7 +584,7 @@ impl Node { debug!("Register with addr {reg_addr:?} is valid and doesn't exist locally"); return Ok(Some(register.to_owned())); } - trace!("Register with addr {reg_addr:?} exists locally, comparing with local version"); + debug!("Register with addr {reg_addr:?} exists locally, comparing with local version"); let key = NetworkAddress::from_register_address(*reg_addr).to_record_key(); @@ -605,10 +605,10 @@ impl Node { let mut merged_register = local_register.clone(); merged_register.verified_merge(register)?; if merged_register == local_register { - trace!("Register with addr {reg_addr:?} is the same as the local version"); + debug!("Register with addr {reg_addr:?} is the same as the local version"); Ok(None) } else { - trace!("Register with addr {reg_addr:?} is different from the local version"); + debug!("Register with addr {reg_addr:?} is different from the local version"); Ok(Some(merged_register)) } } @@ -803,11 +803,11 @@ impl Node { while let Some(res) = tasks.join_next().await { match res { Ok((spend, Ok(_descendant))) => { - trace!("Spend {spend:?} has a live descendant"); + debug!("Spend {spend:?} has a live descendant"); let _inserted = live_spends.insert(spend); } Ok((spend, Err(NetworkError::GetRecordError(GetRecordError::RecordNotFound)))) => { - trace!("Spend {spend:?} descendant was not found, continuing..."); + debug!("Spend {spend:?} descendant was not found, continuing..."); } Ok((spend, Err(e))) => { warn!( diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index 728014a754..070a858228 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -35,7 +35,7 @@ impl Node { let requester = NetworkAddress::from_peer(self.network().peer_id()); let _handle: JoinHandle> = spawn(async move { let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); - trace!("Fetching record {pretty_key:?} from node {holder:?}"); + debug!("Fetching record {pretty_key:?} from node {holder:?}"); let req = Request::Query(Query::GetReplicatedRecord { requester, key: NetworkAddress::from_record_key(&key), @@ -46,12 +46,12 @@ impl Node { { Ok((_holder, record_content)) => Some(record_content), Err(err) => { - trace!("Failed fetch record {pretty_key:?} from node {holder:?}, with error {err:?}"); + debug!("Failed fetch record {pretty_key:?} from node {holder:?}, with error {err:?}"); None } }, other => { - trace!("Cannot fetch record {pretty_key:?} from node {holder:?}, with response {other:?}"); + debug!("Cannot fetch record {pretty_key:?} from node {holder:?}, with response {other:?}"); None } } @@ -62,7 +62,7 @@ impl Node { let record = if let Some(record_content) = record_opt { Record::new(key, record_content.to_vec()) } else { - trace!( + debug!( "Can not fetch record {pretty_key:?} from node {holder:?}, fetching from the network" ); let get_cfg = GetRecordCfg { @@ -76,11 +76,11 @@ impl Node { .await? }; - trace!( + debug!( "Got Replication Record {pretty_key:?} from network, validating and storing it" ); node.store_replicated_in_record(record).await?; - trace!("Completed storing Replication Record {pretty_key:?} from network."); + debug!("Completed storing Replication Record {pretty_key:?} from network."); Ok(()) }); @@ -104,7 +104,7 @@ impl Node { // first we wait until our own network store can return the record // otherwise it may not be fully written yet let mut retry_count = 0; - trace!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); + debug!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); loop { let record = match network.get_local_record(&paid_key).await { Ok(record) => record, @@ -131,7 +131,7 @@ impl Node { tokio::time::sleep(std::time::Duration::from_millis(100)).await; } - trace!("Start replication of fresh record {pretty_key:?} from store"); + debug!("Start replication of fresh record {pretty_key:?} from store"); // Already contains self_peer_id let mut closest_k_peers = match network.get_closest_k_value_local_peers().await { @@ -167,7 +167,7 @@ impl Node { let keys = vec![(data_addr.clone(), record_type.clone())]; for peer_id in sorted_based_on_addr { - trace!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); + debug!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); let request = Request::Cmd(Cmd::Replicate { holder: our_address.clone(), keys: keys.clone(), @@ -175,7 +175,7 @@ impl Node { network.send_req_ignore_reply(request, *peer_id); } - trace!( + debug!( "Completed replicate fresh record {pretty_key:?} on store, in {:?}", start.elapsed() ); From cd5f8d40b5bfac81ef290ebebdc416f0905a8a48 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 22 Jul 2024 19:58:40 +0530 Subject: [PATCH 005/115] chore: remove unused node code --- sn_node/src/lib.rs | 5 +---- sn_node/src/log_markers.rs | 17 ++--------------- sn_node/src/metrics.rs | 2 +- sn_node/src/node.rs | 24 +----------------------- 4 files changed, 5 insertions(+), 43 deletions(-) diff --git a/sn_node/src/lib.rs b/sn_node/src/lib.rs index cc4344b87e..6fe51b0816 100644 --- a/sn_node/src/lib.rs +++ b/sn_node/src/lib.rs @@ -40,7 +40,7 @@ mod replication; pub use self::{ event::{NodeEvent, NodeEventsChannel, NodeEventsReceiver}, log_markers::Marker, - node::{NodeBuilder, NodeCmd, PERIODIC_REPLICATION_INTERVAL_MAX_S}, + node::{NodeBuilder, PERIODIC_REPLICATION_INTERVAL_MAX_S}, }; use crate::error::{Error, Result}; @@ -53,7 +53,6 @@ use std::{ collections::{BTreeMap, HashSet}, path::PathBuf, }; -use tokio::sync::broadcast; /// Once a node is started and running, the user obtains /// a `NodeRunning` object which can be used to interact with it. @@ -61,8 +60,6 @@ use tokio::sync::broadcast; pub struct RunningNode { network: Network, node_events_channel: NodeEventsChannel, - #[allow(dead_code)] - node_cmds: broadcast::Sender, } impl RunningNode { diff --git a/sn_node/src/log_markers.rs b/sn_node/src/log_markers.rs index 4d545d8c43..61bec97fec 100644 --- a/sn_node/src/log_markers.rs +++ b/sn_node/src/log_markers.rs @@ -6,11 +6,9 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use libp2p::{kad::RecordKey, PeerId}; -use sn_protocol::{messages::Cmd, PrettyPrintRecordKey}; -use std::time::Duration; -// this gets us to_string easily enough use crate::Error; +use libp2p::{kad::RecordKey, PeerId}; +use sn_protocol::PrettyPrintRecordKey; use strum::Display; /// Public Markers for generating log output, @@ -21,15 +19,6 @@ pub enum Marker<'a> { /// The node has started NodeConnectedToNetwork, - /// No network activity in some time - NoNetworkActivity(Duration), - - /// Forced Replication by simulating a churned out node within close range. - ForcedReplication, - - /// Network Cmd message received - NodeCmdReceived(&'a Cmd), - /// Peer was added to the routing table PeerAddedToRoutingTable(PeerId), @@ -39,8 +28,6 @@ pub enum Marker<'a> { /// The number of peers in the routing table PeersInRoutingTable(usize), - /// Replication trigger was fired - ReplicationTriggered, /// Interval based replication IntervalReplicationTriggered, diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index 604b65b2af..d7c3cbaa17 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -169,7 +169,7 @@ impl NodeMetrics { let _ = self.put_record_err.inc(); } - Marker::ReplicationTriggered => { + Marker::IntervalReplicationTriggered => { let _ = self.replication_triggered.inc(); } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index bd708a1e7f..c42db1ef8c 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -42,7 +42,7 @@ use std::{ time::Duration, }; use tokio::{ - sync::{broadcast, mpsc::Receiver}, + sync::mpsc::Receiver, task::{spawn, JoinHandle}, }; @@ -171,12 +171,10 @@ impl NodeBuilder { let (network, network_event_receiver, swarm_driver) = network_builder.build_node()?; let node_events_channel = NodeEventsChannel::default(); - let (node_cmds, _) = broadcast::channel(10); let node = NodeInner { network: network.clone(), events_channel: node_events_channel.clone(), - node_cmds: node_cmds.clone(), initial_peers: self.initial_peers, reward_address, #[cfg(feature = "open-metrics")] @@ -189,7 +187,6 @@ impl NodeBuilder { let running_node = RunningNode { network, node_events_channel, - node_cmds, }; // Run the node @@ -199,10 +196,6 @@ impl NodeBuilder { } } -/// Commands that can be sent by the user to the Node instance, e.g. to mutate some settings. -#[derive(Clone, Debug)] -pub enum NodeCmd {} - /// `Node` represents a single node in the distributed network. It handles /// network events, processes incoming requests, interacts with the data /// storage, and broadcasts node-related events. @@ -218,7 +211,6 @@ struct NodeInner { // Peers that are dialed at startup of node. initial_peers: Vec, network: Network, - node_cmds: broadcast::Sender, #[cfg(feature = "open-metrics")] node_metrics: Option, /// Node owner's discord username, in readable format @@ -243,11 +235,6 @@ impl Node { &self.inner.network } - /// Returns the NodeCmds channel - pub(crate) fn node_cmds(&self) -> &broadcast::Sender { - &self.inner.node_cmds - } - #[cfg(feature = "open-metrics")] /// Returns a reference to the NodeMetrics if the `open-metrics` feature flag is enabled pub(crate) fn node_metrics(&self) -> Option<&NodeMetrics> { @@ -269,7 +256,6 @@ impl Node { let mut rng = StdRng::from_entropy(); let peers_connected = Arc::new(AtomicUsize::new(0)); - let mut cmds_receiver = self.node_cmds().subscribe(); // read the forwarded balance from the file and set the metric. // This is done initially because reward forwarding takes a while to kick in @@ -410,14 +396,6 @@ impl Node { let _ = node_metrics.uptime.set(node_metrics.started_instant.elapsed().as_secs() as i64); } } - node_cmd = cmds_receiver.recv() => { - match node_cmd { - Ok(cmd) => { - info!("{cmd:?} received... unhandled") - } - Err(err) => error!("When trying to read from the NodeCmds channel/receiver: {err:?}") - } - } } } }); From 1e269c77a4b24a358d4da2e24a9fcd0ec5b50d56 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 23 Jul 2024 17:42:13 +0530 Subject: [PATCH 006/115] chore(node): increase the bad node detection interval to 10mins --- sn_node/src/node.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index c42db1ef8c..b1d5e01936 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -57,6 +57,14 @@ use sn_protocol::storage::{try_serialize_record, RecordKind, SpendAddress}; /// This is the max time it should take. Minimum interval at any node will be half this pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 45; +/// Interval to trigger bad node detection. +/// This is the max time it should take. Minimum interval at any node will be half this +const PERIODIC_BAD_NODE_DETECTION_INTERVAL_MAX_S: u64 = 600; + +/// Interval to trigger reward forwarding. +/// This is the max time it should take. Minimum interval at any node will be half this +const PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S: u64 = 450; + /// Max number of attempts that chunk proof verification will be carried out against certain target, /// before classifying peer as a bad peer. const MAX_CHUNK_PROOF_VERIFY_ATTEMPTS: usize = 3; @@ -286,8 +294,9 @@ impl Node { let _ = replication_interval.tick().await; // first tick completes immediately // use a random timeout to ensure not sync when transmit messages. - let bad_nodes_check_interval: u64 = 5 * rng.gen_range( - PERIODIC_REPLICATION_INTERVAL_MAX_S / 2..PERIODIC_REPLICATION_INTERVAL_MAX_S, + let bad_nodes_check_interval: u64 = rng.gen_range( + PERIODIC_BAD_NODE_DETECTION_INTERVAL_MAX_S / 2 + ..PERIODIC_BAD_NODE_DETECTION_INTERVAL_MAX_S, ); let bad_nodes_check_time = Duration::from_secs(bad_nodes_check_interval); debug!("BadNodesCheck interval set to {bad_nodes_check_time:?}"); @@ -298,10 +307,9 @@ impl Node { let mut rolling_index = 0; // use a random timeout to ensure not sync when transmit messages. - let balance_forward_interval: u64 = 10 - * rng.gen_range( - PERIODIC_REPLICATION_INTERVAL_MAX_S / 2..PERIODIC_REPLICATION_INTERVAL_MAX_S, - ); + let balance_forward_interval: u64 = rng.gen_range( + PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S / 2..PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S, + ); let balance_forward_time = Duration::from_secs(balance_forward_interval); debug!( "BalanceForward interval set to {balance_forward_time:?} to: {:?}", From a48b8007f4d7325e1689d3ff9a19d6127faba390 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 19 Jul 2024 19:42:52 +0530 Subject: [PATCH 007/115] chore: remove dcutr --- Cargo.lock | 40 +++----------------------------- sn_networking/Cargo.toml | 1 - sn_networking/src/driver.rs | 2 -- sn_networking/src/event/mod.rs | 6 ----- sn_networking/src/event/swarm.rs | 12 ---------- sn_networking/src/metrics/mod.rs | 6 ----- 6 files changed, 3 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a116a5fb90..cea5629e5a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3736,7 +3736,6 @@ dependencies = [ "libp2p-autonat", "libp2p-connection-limits", "libp2p-core", - "libp2p-dcutr", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3833,29 +3832,6 @@ dependencies = [ "void", ] -[[package]] -name = "libp2p-dcutr" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4f7bb7fa2b9e6cad9c30a6f67e3ff5c1e4b658c62b6375e35861a85f9c97bf3" -dependencies = [ - "asynchronous-codec 0.6.2", - "either", - "futures", - "futures-bounded", - "futures-timer", - "instant", - "libp2p-core", - "libp2p-identity", - "libp2p-swarm", - "lru 0.11.1", - "quick-protobuf", - "quick-protobuf-codec 0.2.0", - "thiserror", - "tracing", - "void", -] - [[package]] name = "libp2p-dns" version = "0.41.1" @@ -3917,7 +3893,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru 0.12.3", + "lru", "quick-protobuf", "quick-protobuf-codec 0.3.1", "smallvec", @@ -4003,7 +3979,6 @@ dependencies = [ "futures", "instant", "libp2p-core", - "libp2p-dcutr", "libp2p-identify", "libp2p-identity", "libp2p-kad", @@ -4125,7 +4100,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "lru 0.12.3", + "lru", "multistream-select", "once_cell", "rand 0.8.5", @@ -4297,15 +4272,6 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" -[[package]] -name = "lru" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21" -dependencies = [ - "hashbrown 0.14.5", -] - [[package]] name = "lru" version = "0.12.3" @@ -5998,7 +5964,7 @@ dependencies = [ "compact_str", "crossterm", "itertools 0.12.1", - "lru 0.12.3", + "lru", "paste", "serde", "stability", diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 1004fbc36d..ee5a975d00 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -30,7 +30,6 @@ libp2p = { version = "0.53", features = [ "request-response", "cbor", "identify", - "dcutr", "tcp", "relay", "noise", diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 02b414dc99..a5c290d9f2 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -206,7 +206,6 @@ pub(super) struct NodeBehaviour { pub(super) upnp: libp2p::swarm::behaviour::toggle::Toggle, pub(super) relay_client: libp2p::relay::client::Behaviour, pub(super) relay_server: libp2p::relay::Behaviour, - pub(super) dcutr: libp2p::dcutr::Behaviour, pub(super) kademlia: kad::Behaviour, pub(super) request_response: request_response::cbor::Behaviour, } @@ -583,7 +582,6 @@ impl NetworkBuilder { identify, #[cfg(feature = "local-discovery")] mdns, - dcutr: libp2p::dcutr::Behaviour::new(peer_id), }; #[cfg(not(target_arch = "wasm32"))] diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 4457f2f071..85e0d65400 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -42,7 +42,6 @@ pub(super) enum NodeEvent { #[cfg(feature = "local-discovery")] Mdns(Box), Identify(Box), - Dcutr(Box), RelayClient(Box), RelayServer(Box), Void(void::Void), @@ -79,11 +78,6 @@ impl From for NodeEvent { NodeEvent::Identify(Box::new(event)) } } -impl From for NodeEvent { - fn from(event: libp2p::dcutr::Event) -> Self { - NodeEvent::Dcutr(Box::new(event)) - } -} impl From for NodeEvent { fn from(event: libp2p::relay::client::Event) -> Self { NodeEvent::RelayClient(Box::new(event)) diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 04d23ead05..00f8f2cba9 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -59,18 +59,6 @@ impl SwarmDriver { event_string = "kad_event"; self.handle_kad_event(kad_event)?; } - SwarmEvent::Behaviour(NodeEvent::Dcutr(event)) => { - #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&(*event)); - } - - event_string = "dcutr_event"; - info!( - "Dcutr with remote peer: {:?} is: {:?}", - event.remote_peer_id, event.result - ); - } SwarmEvent::Behaviour(NodeEvent::RelayClient(event)) => { event_string = "relay_client_event"; diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index cdb351407e..ba8cdebad0 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -165,12 +165,6 @@ impl Recorder for NetworkMetrics { } } -impl Recorder for NetworkMetrics { - fn record(&self, event: &libp2p::dcutr::Event) { - self.libp2p_metrics.record(event) - } -} - impl Recorder for NetworkMetrics { fn record(&self, event: &libp2p::relay::Event) { self.libp2p_metrics.record(event) From 8ac2493a092c93b7c04760e2005c3c39e012e724 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 23 Jul 2024 17:59:03 +0530 Subject: [PATCH 008/115] chore(node): increase the bootstrap interval --- sn_networking/src/bootstrap.rs | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/sn_networking/src/bootstrap.rs b/sn_networking/src/bootstrap.rs index 9692584987..608f8116af 100644 --- a/sn_networking/src/bootstrap.rs +++ b/sn_networking/src/bootstrap.rs @@ -12,8 +12,9 @@ use tokio::time::Duration; use crate::target_arch::{interval, Instant, Interval}; -/// The interval in which kad.bootstrap is called -pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(5); +/// The default interval at which NetworkDiscovery is triggered. The interval is increased as more peers are added to the +/// routing table. +pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(10); /// Every BOOTSTRAP_CONNECTED_PEERS_STEP connected peer, we step up the BOOTSTRAP_INTERVAL to slow down bootstrapping /// process @@ -27,9 +28,12 @@ const LAST_PEER_ADDED_TIME_LIMIT: Duration = Duration::from_secs(180); const LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT: Duration = Duration::from_secs(30); /// The bootstrap interval to use if we haven't added any new peers in a while. -const NO_PEER_ADDED_SLOWDOWN_INTERVAL: u64 = 300; +const NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S: u64 = 600; impl SwarmDriver { + /// This functions triggers network discovery based on when the last peer was added to the RT and the number of + /// peers in RT. The function also returns a new bootstrap interval that is proportional to the number of + /// peers in RT, so more peers in RT, the longer the interval. pub(crate) async fn run_bootstrap_continuously( &mut self, current_bootstrap_interval: Duration, @@ -41,12 +45,6 @@ impl SwarmDriver { if should_bootstrap { self.trigger_network_discovery(); } - if let Some(new_interval) = &new_interval { - debug!( - "The new bootstrap_interval has been updated to {:?}", - new_interval.period() - ); - } new_interval } @@ -114,7 +112,6 @@ impl ContinuousBootstrap { peers_in_rt: u32, current_interval: Duration, ) -> (bool, Option) { - // kad bootstrap process needs at least one peer in the RT be carried out. let is_ongoing = if let Some(last_bootstrap_triggered) = self.last_bootstrap_triggered { last_bootstrap_triggered.elapsed() < LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT } else { @@ -126,16 +123,16 @@ impl ContinuousBootstrap { // the bootstrapping process. // Don't slow down if we haven't even added one peer to our RT. if self.last_peer_added_instant.elapsed() > LAST_PEER_ADDED_TIME_LIMIT && peers_in_rt != 0 { - info!( - "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous bootstrapping process" + // To avoid a heart beat like cpu usage due to the 1K candidates generation, + // randomize the interval within certain range + let no_peer_added_slowdown_interval: u64 = OsRng.gen_range( + NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S / 2..NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S, ); - - // TO avoid a heart beat like cpu usage due to the 1K candidates generation, - // randomlize the interval within certain range - let no_peer_added_slowdown_interval: u64 = OsRng - .gen_range(NO_PEER_ADDED_SLOWDOWN_INTERVAL..NO_PEER_ADDED_SLOWDOWN_INTERVAL * 2); let no_peer_added_slowdown_interval_duration = Duration::from_secs(no_peer_added_slowdown_interval); + info!( + "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {no_peer_added_slowdown_interval_duration:?}" + ); let mut new_interval = interval(no_peer_added_slowdown_interval_duration); new_interval.tick().await; // the first tick completes immediately @@ -147,7 +144,7 @@ impl ContinuousBootstrap { let step = std::cmp::max(1, step); let new_interval = BOOTSTRAP_INTERVAL * step; let new_interval = if new_interval > current_interval { - info!("More peers have been added to our RT!. Slowing down the continuous bootstrapping process"); + info!("More peers have been added to our RT!. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); let mut interval = interval(new_interval); interval.tick().await; // the first tick completes immediately Some(interval) From b181dd413f2f414d9b02c80804f77790f8d976b9 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 23 Jul 2024 17:15:34 +0100 Subject: [PATCH 009/115] chore: remove push triggers for release workflow For now we just want to trigger releases manually. The whole release workflow was refactored in another PR, but I'm also going to disable push-based triggers in the short term too. --- .github/workflows/release.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 865301da6c..338cab6eb0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,12 +8,6 @@ concurrency: group: "version-bump-release-${{ github.ref }}" on: - push: - branches: - - stable* - - alpha* - - beta* - - rc* workflow_dispatch: inputs: network_version_mode: From 36f266bb658ca9144f05d948c5861e831d2b8a69 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Thu, 25 Jul 2024 16:44:25 +0200 Subject: [PATCH 010/115] refactor: remove some lazy_static deps; unused dep --- Cargo.lock | 4 ---- node-launchpad/Cargo.toml | 1 - sn_client/Cargo.toml | 4 +--- sn_client/src/test_utils.rs | 7 ++----- sn_node/Cargo.toml | 1 - sn_node/tests/common/client.rs | 5 +---- sn_transfers/src/wallet/encryption.rs | 15 +++++++-------- 7 files changed, 11 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cea5629e5a..a95719c1a5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4657,7 +4657,6 @@ dependencies = [ "human-panic", "itertools 0.12.1", "json5", - "lazy_static", "libc", "log", "pretty_assertions", @@ -7075,7 +7074,6 @@ dependencies = [ "getrandom 0.2.15", "hex 0.4.3", "itertools 0.12.1", - "lazy_static", "libp2p", "libp2p-identity", "petgraph", @@ -7085,7 +7083,6 @@ dependencies = [ "rmp-serde", "self_encryption", "serde", - "serde_json", "sn_bls_ckd", "sn_client", "sn_curv", @@ -7275,7 +7272,6 @@ dependencies = [ "futures", "hex 0.4.3", "itertools 0.12.1", - "lazy_static", "libp2p", "prometheus-client", "prost 0.9.0", diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index e3376cad89..eae344f53d 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -37,7 +37,6 @@ fs_extra = "1.3.0" human-panic = "1.2.0" itertools = "~0.12.1" json5 = "0.4.1" -lazy_static = "1.4.0" libc = "0.2.148" log = "0.4.20" pretty_assertions = "1.4.0" diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index 6aa122ca3d..dd91d65e2f 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -14,7 +14,7 @@ version = "0.108.0" default = [] local-discovery = ["sn_networking/local-discovery"] open-metrics = ["sn_networking/open-metrics", "prometheus-client"] -test-utils = ["sn_peers_acquisition", "lazy_static", "eyre"] +test-utils = ["sn_peers_acquisition", "eyre"] # required to pass on flag to node builds websockets = ["sn_networking/websockets", "sn_protocol/websockets"] @@ -51,7 +51,6 @@ self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } sn_networking = { path = "../sn_networking", version = "0.17.0" } sn_protocol = { path = "../sn_protocol", version = "0.17.5" } -serde_json = "1.0" sn_registers = { path = "../sn_registers", version = "0.3.15" } sn_transfers = { path = "../sn_transfers", version = "0.18.8" } tempfile = "3.6.0" @@ -61,7 +60,6 @@ tracing = { version = "~0.1.26" } xor_name = "5.0.0" sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0", optional = true } eyre = { version = "0.6.8", optional = true } -lazy_static = { version = "~1.4.0", optional = true } [dev-dependencies] assert_matches = "1.5.0" diff --git a/sn_client/src/test_utils.rs b/sn_client/src/test_utils.rs index 965763e748..bce997d510 100644 --- a/sn_client/src/test_utils.rs +++ b/sn_client/src/test_utils.rs @@ -17,7 +17,6 @@ use sn_transfers::{HotWallet, NanoTokens}; use bls::SecretKey; use bytes::Bytes; use eyre::{bail, Result}; -use lazy_static::lazy_static; use rand::distributions::{Distribution, Standard}; use std::path::Path; use tokio::{ @@ -32,10 +31,8 @@ pub const AMOUNT_TO_FUND_WALLETS: u64 = 100 * 1_000_000_000; // The number of times to try to load the faucet wallet const LOAD_FAUCET_WALLET_RETRIES: usize = 6; -lazy_static! { - // mutex to restrict access to faucet wallet from concurrent tests - static ref FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::new(()); -} +// mutex to restrict access to faucet wallet from concurrent tests +static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); /// Get a new Client for testing pub async fn get_new_client(owner_sk: SecretKey) -> Result { diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index f1b0bfb98c..0cc94cc3ac 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -39,7 +39,6 @@ file-rotate = "0.7.3" futures = "~0.3.13" hex = "~0.4.3" itertools = "~0.12.1" -lazy_static = "~1.4.0" libp2p = { version = "0.53", features = ["tokio", "dns", "kad", "macros"] } prometheus-client = { version = "0.22", optional = true } # watch out updating this, protoc compiler needs to be installed on all build systems diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index e00ff36058..ce0b34e477 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -7,7 +7,6 @@ // permissions and limitations relating to use of the SAFE Network Software. use eyre::{bail, OptionExt, Result}; -use lazy_static::lazy_static; use libp2p::PeerId; use sn_client::{ acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, @@ -45,10 +44,8 @@ pub const LOCAL_NODE_COUNT: usize = 25; // The number of times to try to load the faucet wallet const LOAD_FAUCET_WALLET_RETRIES: usize = 6; -lazy_static! { // mutex to restrict access to faucet wallet from concurrent tests - static ref FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::new(()); -} + static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); /// Load HotWallet from dir pub fn get_wallet(root_dir: &Path) -> HotWallet { diff --git a/sn_transfers/src/wallet/encryption.rs b/sn_transfers/src/wallet/encryption.rs index da142adf71..c0ae28aaa1 100644 --- a/sn_transfers/src/wallet/encryption.rs +++ b/sn_transfers/src/wallet/encryption.rs @@ -11,7 +11,6 @@ use crate::wallet::Result; use crate::MainSecretKey; use bls::SecretKey; use hex::encode; -use lazy_static::lazy_static; use rand::Rng; use ring::aead::{BoundKey, Nonce, NonceSequence}; use ring::error::Unspecified; @@ -20,11 +19,11 @@ use std::io::Read; use std::num::NonZeroU32; use std::path::Path; -lazy_static! { - /// Number of iterations for pbkdf2. - static ref ITERATIONS: NonZeroU32 = - NonZeroU32::new(100_000).expect("ITERATIONS should be > 0."); -} +/// Number of iterations for pbkdf2. +const ITERATIONS: NonZeroU32 = match NonZeroU32::new(100_000) { + Some(v) => v, + None => panic!("`100_000` is not be zero"), +}; /// Filename for the encrypted secret key. pub const ENCRYPTED_MAIN_SECRET_KEY_FILENAME: &str = "main_secret_key.encrypted"; @@ -94,7 +93,7 @@ impl EncryptedSecretKey { // Reconstruct the key from salt and password ring::pbkdf2::derive( ring::pbkdf2::PBKDF2_HMAC_SHA512, - *ITERATIONS, + ITERATIONS, &salt, password.as_bytes(), &mut key, @@ -168,7 +167,7 @@ pub(crate) fn encrypt_secret_key( // HMAC is used as the pseudorandom function for its security properties ring::pbkdf2::derive( ring::pbkdf2::PBKDF2_HMAC_SHA512, - *ITERATIONS, + ITERATIONS, &salt, password.as_bytes(), &mut key, From c7cb7db1b888e285ef624c9725668b27a66ff656 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 18 Jul 2024 13:00:11 +0900 Subject: [PATCH 011/115] chore(networking): rename SwarmCmd -> NetworkSwarmCmd Precursor to seperating out local operations vs netwoking IO, so that we can prioritise the local operaitons. --- sn_networking/src/cmd.rs | 98 +++++++++++++++---------------- sn_networking/src/driver.rs | 8 +-- sn_networking/src/event/swarm.rs | 4 +- sn_networking/src/lib.rs | 58 +++++++++--------- sn_networking/src/record_store.rs | 10 ++-- 5 files changed, 89 insertions(+), 89 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 0752f236f4..f93429526e 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -57,7 +57,7 @@ pub enum NodeIssue { /// Commands to send to the Swarm #[allow(clippy::large_enum_variant)] -pub enum SwarmCmd { +pub enum NetworkSwarmCmd { Dial { addr: Multiaddr, sender: oneshot::Sender>, @@ -172,111 +172,111 @@ pub enum SwarmCmd { /// Debug impl for SwarmCmd to avoid printing full Record, instead only RecodKey /// and RecordKind are printed. -impl Debug for SwarmCmd { +impl Debug for NetworkSwarmCmd { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - SwarmCmd::Dial { addr, .. } => { + NetworkSwarmCmd::Dial { addr, .. } => { write!(f, "SwarmCmd::Dial {{ addr: {addr:?} }}") } - SwarmCmd::GetNetworkRecord { key, cfg, .. } => { + NetworkSwarmCmd::GetNetworkRecord { key, cfg, .. } => { write!( f, "SwarmCmd::GetNetworkRecord {{ key: {:?}, cfg: {cfg:?}", PrettyPrintRecordKey::from(key) ) } - SwarmCmd::PutRecord { record, .. } => { + NetworkSwarmCmd::PutRecord { record, .. } => { write!( f, "SwarmCmd::PutRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(&record.key) ) } - SwarmCmd::PutRecordTo { peers, record, .. } => { + NetworkSwarmCmd::PutRecordTo { peers, record, .. } => { write!( f, "SwarmCmd::PutRecordTo {{ peers: {peers:?}, key: {:?} }}", PrettyPrintRecordKey::from(&record.key) ) } - SwarmCmd::PutLocalRecord { record } => { + NetworkSwarmCmd::PutLocalRecord { record } => { write!( f, "SwarmCmd::PutLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(&record.key) ) } - SwarmCmd::RemoveFailedLocalRecord { key } => { + NetworkSwarmCmd::RemoveFailedLocalRecord { key } => { write!( f, "SwarmCmd::RemoveFailedLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(key) ) } - SwarmCmd::AddLocalRecordAsStored { key, record_type } => { + NetworkSwarmCmd::AddLocalRecordAsStored { key, record_type } => { write!( f, "SwarmCmd::AddLocalRecordAsStored {{ key: {:?}, record_type: {record_type:?} }}", PrettyPrintRecordKey::from(key) ) } - SwarmCmd::TriggerIntervalReplication => { + NetworkSwarmCmd::TriggerIntervalReplication => { write!(f, "SwarmCmd::TriggerIntervalReplication") } - SwarmCmd::GetClosestPeersToAddressFromNetwork { key, .. } => { + NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, .. } => { write!(f, "SwarmCmd::GetClosestPeers {{ key: {key:?} }}") } - SwarmCmd::GetClosestKLocalPeers { .. } => { + NetworkSwarmCmd::GetClosestKLocalPeers { .. } => { write!(f, "SwarmCmd::GetClosestKLocalPeers") } - SwarmCmd::GetLocalStoreCost { .. } => { + NetworkSwarmCmd::GetLocalStoreCost { .. } => { write!(f, "SwarmCmd::GetLocalStoreCost") } - SwarmCmd::PaymentReceived => { + NetworkSwarmCmd::PaymentReceived { .. } => { write!(f, "SwarmCmd::PaymentReceived") } - SwarmCmd::GetLocalRecord { key, .. } => { + NetworkSwarmCmd::GetLocalRecord { key, .. } => { write!( f, "SwarmCmd::GetLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(key) ) } - SwarmCmd::GetAllLocalRecordAddresses { .. } => { + NetworkSwarmCmd::GetAllLocalRecordAddresses { .. } => { write!(f, "SwarmCmd::GetAllLocalRecordAddresses") } - SwarmCmd::GetKBuckets { .. } => { + NetworkSwarmCmd::GetKBuckets { .. } => { write!(f, "SwarmCmd::GetKBuckets") } - SwarmCmd::GetSwarmLocalState { .. } => { + NetworkSwarmCmd::GetSwarmLocalState { .. } => { write!(f, "SwarmCmd::GetSwarmLocalState") } - SwarmCmd::RecordStoreHasKey { key, .. } => { + NetworkSwarmCmd::RecordStoreHasKey { key, .. } => { write!( f, "SwarmCmd::RecordStoreHasKey {:?}", PrettyPrintRecordKey::from(key) ) } - SwarmCmd::SendResponse { resp, .. } => { + NetworkSwarmCmd::SendResponse { resp, .. } => { write!(f, "SwarmCmd::SendResponse resp: {resp:?}") } - SwarmCmd::SendRequest { req, peer, .. } => { + NetworkSwarmCmd::SendRequest { req, peer, .. } => { write!(f, "SwarmCmd::SendRequest req: {req:?}, peer: {peer:?}") } - SwarmCmd::RecordNodeIssue { peer_id, issue } => { + NetworkSwarmCmd::RecordNodeIssue { peer_id, issue } => { write!( f, "SwarmCmd::SendNodeStatus peer {peer_id:?}, issue: {issue:?}" ) } - SwarmCmd::IsPeerShunned { target, .. } => { + NetworkSwarmCmd::IsPeerShunned { target, .. } => { write!(f, "SwarmCmd::IsPeerInTrouble target: {target:?}") } - SwarmCmd::QuoteVerification { quotes } => { + NetworkSwarmCmd::QuoteVerification { quotes } => { write!(f, "SwarmCmd::QuoteVerification of {} quotes", quotes.len()) } - SwarmCmd::FetchCompleted(key) => { + NetworkSwarmCmd::FetchCompleted(key) => { write!( f, "SwarmCmd::FetchCompleted({:?})", @@ -296,15 +296,15 @@ pub struct SwarmLocalState { } impl SwarmDriver { - pub(crate) fn handle_cmd(&mut self, cmd: SwarmCmd) -> Result<(), NetworkError> { + pub(crate) fn handle_cmd(&mut self, cmd: NetworkSwarmCmd) -> Result<(), NetworkError> { let start = Instant::now(); let mut cmd_string; match cmd { - SwarmCmd::TriggerIntervalReplication => { + NetworkSwarmCmd::TriggerIntervalReplication => { cmd_string = "TriggerIntervalReplication"; self.try_interval_replication()?; } - SwarmCmd::GetNetworkRecord { key, sender, cfg } => { + NetworkSwarmCmd::GetNetworkRecord { key, sender, cfg } => { cmd_string = "GetNetworkRecord"; let query_id = self.swarm.behaviour_mut().kademlia.get_record(key.clone()); @@ -331,7 +331,7 @@ impl SwarmDriver { info!("We now have {} pending get record attempts and cached {total_records} fetched copies", self.pending_get_record.len()); } - SwarmCmd::GetLocalStoreCost { key, sender } => { + NetworkSwarmCmd::GetLocalStoreCost { key, sender } => { cmd_string = "GetLocalStoreCost"; let cost = self .swarm @@ -346,7 +346,7 @@ impl SwarmDriver { let _res = sender.send(cost); } - SwarmCmd::PaymentReceived => { + NetworkSwarmCmd::PaymentReceived => { cmd_string = "PaymentReceived"; self.swarm .behaviour_mut() @@ -354,7 +354,7 @@ impl SwarmDriver { .store_mut() .payment_received(); } - SwarmCmd::GetLocalRecord { key, sender } => { + NetworkSwarmCmd::GetLocalRecord { key, sender } => { cmd_string = "GetLocalRecord"; let record = self .swarm @@ -365,7 +365,7 @@ impl SwarmDriver { .map(|rec| rec.into_owned()); let _ = sender.send(record); } - SwarmCmd::PutRecord { + NetworkSwarmCmd::PutRecord { record, sender, quorum, @@ -397,7 +397,7 @@ impl SwarmDriver { error!("Could not send response to PutRecord cmd: {:?}", err); } } - SwarmCmd::PutRecordTo { + NetworkSwarmCmd::PutRecordTo { peers, record, sender, @@ -421,7 +421,7 @@ impl SwarmDriver { error!("Could not send response to PutRecordTo cmd: {:?}", err); } } - SwarmCmd::PutLocalRecord { record } => { + NetworkSwarmCmd::PutLocalRecord { record } => { cmd_string = "PutLocalRecord"; let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); @@ -510,7 +510,7 @@ impl SwarmDriver { return Err(err.into()); }; } - SwarmCmd::AddLocalRecordAsStored { key, record_type } => { + NetworkSwarmCmd::AddLocalRecordAsStored { key, record_type } => { info!( "Adding Record locally, for {:?} and {record_type:?}", PrettyPrintRecordKey::from(&key) @@ -524,7 +524,7 @@ impl SwarmDriver { // Reset counter on any success HDD write. self.hard_disk_write_error = 0; } - SwarmCmd::RemoveFailedLocalRecord { key } => { + NetworkSwarmCmd::RemoveFailedLocalRecord { key } => { info!("Removing Record locally, for {key:?}"); cmd_string = "RemoveFailedLocalRecord"; self.swarm.behaviour_mut().kademlia.store_mut().remove(&key); @@ -537,7 +537,7 @@ impl SwarmDriver { }); } } - SwarmCmd::RecordStoreHasKey { key, sender } => { + NetworkSwarmCmd::RecordStoreHasKey { key, sender } => { cmd_string = "RecordStoreHasKey"; let has_key = self .swarm @@ -547,7 +547,7 @@ impl SwarmDriver { .contains(&key); let _ = sender.send(has_key); } - SwarmCmd::GetAllLocalRecordAddresses { sender } => { + NetworkSwarmCmd::GetAllLocalRecordAddresses { sender } => { cmd_string = "GetAllLocalRecordAddresses"; #[allow(clippy::mutable_key_type)] // for the Bytes in NetworkAddress let addresses = self @@ -558,7 +558,7 @@ impl SwarmDriver { .record_addresses(); let _ = sender.send(addresses); } - SwarmCmd::Dial { addr, sender } => { + NetworkSwarmCmd::Dial { addr, sender } => { cmd_string = "Dial"; if let Some(peer_id) = multiaddr_pop_p2p(&mut addr.clone()) { @@ -574,7 +574,7 @@ impl SwarmDriver { Err(e) => sender.send(Err(e.into())), }; } - SwarmCmd::GetClosestPeersToAddressFromNetwork { key, sender } => { + NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, sender } => { cmd_string = "GetClosestPeersToAddressFromNetwork"; let query_id = self .swarm @@ -589,7 +589,7 @@ impl SwarmDriver { ), ); } - SwarmCmd::GetKBuckets { sender } => { + NetworkSwarmCmd::GetKBuckets { sender } => { cmd_string = "GetKBuckets"; let mut ilog2_kbuckets = BTreeMap::new(); for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { @@ -607,11 +607,11 @@ impl SwarmDriver { } let _ = sender.send(ilog2_kbuckets); } - SwarmCmd::GetClosestKLocalPeers { sender } => { + NetworkSwarmCmd::GetClosestKLocalPeers { sender } => { cmd_string = "GetClosestKLocalPeers"; let _ = sender.send(self.get_closest_k_value_local_peers()); } - SwarmCmd::SendRequest { req, peer, sender } => { + NetworkSwarmCmd::SendRequest { req, peer, sender } => { cmd_string = "SendRequest"; // If `self` is the recipient, forward the request directly to our upper layer to // be handled. @@ -640,7 +640,7 @@ impl SwarmDriver { debug!("Pending Requests now: {:?}", self.pending_requests.len()); } } - SwarmCmd::SendResponse { resp, channel } => { + NetworkSwarmCmd::SendResponse { resp, channel } => { cmd_string = "SendResponse"; match channel { // If the response is for `self`, send it directly through the oneshot channel. @@ -668,7 +668,7 @@ impl SwarmDriver { } } } - SwarmCmd::GetSwarmLocalState(sender) => { + NetworkSwarmCmd::GetSwarmLocalState(sender) => { cmd_string = "GetSwarmLocalState"; let current_state = SwarmLocalState { connected_peers: self.swarm.connected_peers().cloned().collect(), @@ -680,11 +680,11 @@ impl SwarmDriver { .map_err(|_| NetworkError::InternalMsgChannelDropped)?; } - SwarmCmd::RecordNodeIssue { peer_id, issue } => { + NetworkSwarmCmd::RecordNodeIssue { peer_id, issue } => { cmd_string = "RecordNodeIssues"; self.record_node_issue(peer_id, issue); } - SwarmCmd::IsPeerShunned { target, sender } => { + NetworkSwarmCmd::IsPeerShunned { target, sender } => { cmd_string = "IsPeerInTrouble"; let is_bad = if let Some(peer_id) = target.as_peer_id() { if let Some((_issues, is_bad)) = self.bad_nodes.get(&peer_id) { @@ -697,7 +697,7 @@ impl SwarmDriver { }; let _ = sender.send(is_bad); } - SwarmCmd::QuoteVerification { quotes } => { + NetworkSwarmCmd::QuoteVerification { quotes } => { cmd_string = "QuoteVerification"; for (peer_id, quote) in quotes { // Do nothing if already being bad @@ -709,7 +709,7 @@ impl SwarmDriver { self.verify_peer_quote(peer_id, quote); } } - SwarmCmd::FetchCompleted(key) => { + NetworkSwarmCmd::FetchCompleted(key) => { info!( "Fetch {:?} early completed, may fetched an old version record.", PrettyPrintRecordKey::from(&key) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index d87a2d766a..e10f3d748e 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -13,7 +13,7 @@ use crate::metrics_service::run_metrics_server; use crate::{ bootstrap::{ContinuousBootstrap, BOOTSTRAP_INTERVAL}, circular_vec::CircularVec, - cmd::SwarmCmd, + cmd::NetworkSwarmCmd, error::{NetworkError, Result}, event::{NetworkEvent, NodeEvent}, multiaddr_pop_p2p, @@ -604,7 +604,7 @@ impl NetworkBuilder { replication_fetcher, #[cfg(feature = "open-metrics")] network_metrics, - cmd_receiver: swarm_cmd_receiver, + network_cmd_receiver: swarm_cmd_receiver, event_sender: network_event_sender, pending_get_closest_peers: Default::default(), pending_requests: Default::default(), @@ -646,7 +646,7 @@ pub struct SwarmDriver { #[cfg(feature = "open-metrics")] pub(crate) network_metrics: Option, - cmd_receiver: mpsc::Receiver, + network_cmd_receiver: mpsc::Receiver, event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. /// Trackers for underlying behaviour related events @@ -694,7 +694,7 @@ impl SwarmDriver { warn!("Error while handling swarm event: {err}"); } }, - some_cmd = self.cmd_receiver.recv() => match some_cmd { + some_cmd = self.network_cmd_receiver.recv() => match some_cmd { Some(cmd) => { let start = Instant::now(); let cmd_string = format!("{cmd:?}"); diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 00f8f2cba9..e0b506ff06 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::SwarmCmd, + cmd::NetworkSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, @@ -503,7 +503,7 @@ impl SwarmDriver { { self.update_on_peer_removal(*dead_peer.node.key.preimage()); - self.handle_cmd(SwarmCmd::RecordNodeIssue { + self.handle_cmd(NetworkSwarmCmd::RecordNodeIssue { peer_id: failed_peer_id, issue: crate::NodeIssue::ConnectionIssue, })?; diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 1d3c10f70c..4efcbec55b 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -45,7 +45,7 @@ pub use self::{ transfers::{get_raw_signed_spends_from_record, get_signed_spend_from_record}, }; -use self::{cmd::SwarmCmd, error::Result}; +use self::{cmd::NetworkSwarmCmd, error::Result}; use backoff::{Error as BackoffError, ExponentialBackoff}; use futures::future::select_all; use libp2p::{ @@ -161,7 +161,7 @@ pub struct Network { /// The actual implementation of the Network. The other is just a wrapper around this, so that we don't expose /// the Arc from the interface. struct NetworkInner { - swarm_cmd_sender: mpsc::Sender, + swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, root_dir_path: PathBuf, keypair: Keypair, @@ -169,7 +169,7 @@ struct NetworkInner { impl Network { pub fn new( - swarm_cmd_sender: mpsc::Sender, + swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, root_dir_path: PathBuf, keypair: Keypair, @@ -200,7 +200,7 @@ impl Network { } /// Get the sender to send a `SwarmCmd` to the underlying `Swarm`. - pub(crate) fn swarm_cmd_sender(&self) -> &mpsc::Sender { + pub(crate) fn swarm_cmd_sender(&self) -> &mpsc::Sender { &self.inner.swarm_cmd_sender } @@ -223,7 +223,7 @@ impl Network { /// This function will only be called for the bootstrap nodes. pub async fn dial(&self, addr: Multiaddr) -> Result<()> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::Dial { addr, sender }); + self.send_swarm_cmd(NetworkSwarmCmd::Dial { addr, sender }); receiver.await? } @@ -245,7 +245,7 @@ impl Network { /// Does not include self pub async fn get_kbuckets(&self) -> Result>> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetKBuckets { sender }); + self.send_swarm_cmd(NetworkSwarmCmd::GetKBuckets { sender }); receiver .await .map_err(|_e| NetworkError::InternalMsgChannelDropped) @@ -255,7 +255,7 @@ impl Network { /// Also contains our own PeerId. pub async fn get_closest_k_value_local_peers(&self) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetClosestKLocalPeers { sender }); + self.send_swarm_cmd(NetworkSwarmCmd::GetClosestKLocalPeers { sender }); receiver .await @@ -435,7 +435,7 @@ impl Network { let pretty_key = PrettyPrintRecordKey::from(&key); info!("Getting record from network of {pretty_key:?}. with cfg {cfg:?}",); let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetNetworkRecord { + self.send_swarm_cmd(NetworkSwarmCmd::GetNetworkRecord { key: key.clone(), sender, cfg: cfg.clone(), @@ -471,7 +471,7 @@ impl Network { let pretty_key = PrettyPrintRecordKey::from(&key); info!("Getting record from network of {pretty_key:?}. with cfg {cfg:?}",); let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetNetworkRecord { + self.send_swarm_cmd(NetworkSwarmCmd::GetNetworkRecord { key: key.clone(), sender, cfg: cfg.clone(), @@ -530,7 +530,7 @@ impl Network { key: RecordKey, ) -> Result<(NanoTokens, QuotingMetrics)> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetLocalStoreCost { key, sender }); + self.send_swarm_cmd(NetworkSwarmCmd::GetLocalStoreCost { key, sender }); receiver .await @@ -539,13 +539,13 @@ impl Network { /// Notify the node receicced a payment. pub fn notify_payment_received(&self) { - self.send_swarm_cmd(SwarmCmd::PaymentReceived); + self.send_swarm_cmd(NetworkSwarmCmd::PaymentReceived); } /// Get `Record` from the local RecordStore pub async fn get_local_record(&self, key: &RecordKey) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetLocalRecord { + self.send_swarm_cmd(NetworkSwarmCmd::GetLocalRecord { key: key.clone(), sender, }); @@ -558,7 +558,7 @@ impl Network { /// Whether the target peer is considered blacklisted by self pub async fn is_peer_shunned(&self, target: NetworkAddress) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::IsPeerShunned { target, sender }); + self.send_swarm_cmd(NetworkSwarmCmd::IsPeerShunned { target, sender }); receiver .await @@ -609,14 +609,14 @@ impl Network { // Waiting for a response to avoid flushing to network too quick that causing choke let (sender, receiver) = oneshot::channel(); if let Some(put_record_to_peers) = &cfg.use_put_record_to { - self.send_swarm_cmd(SwarmCmd::PutRecordTo { + self.send_swarm_cmd(NetworkSwarmCmd::PutRecordTo { peers: put_record_to_peers.clone(), record: record.clone(), sender, quorum: cfg.put_quorum, }); } else { - self.send_swarm_cmd(SwarmCmd::PutRecord { + self.send_swarm_cmd(NetworkSwarmCmd::PutRecord { record: record.clone(), sender, quorum: cfg.put_quorum, @@ -677,7 +677,7 @@ impl Network { /// Notify ReplicationFetch a fetch attempt is completed. /// (but it won't trigger any real writes to disk, say fetched an old version of register) pub fn notify_fetch_completed(&self, key: RecordKey) { - self.send_swarm_cmd(SwarmCmd::FetchCompleted(key)) + self.send_swarm_cmd(NetworkSwarmCmd::FetchCompleted(key)) } /// Put `Record` to the local RecordStore @@ -688,13 +688,13 @@ impl Network { PrettyPrintRecordKey::from(&record.key), record.value.len() ); - self.send_swarm_cmd(SwarmCmd::PutLocalRecord { record }) + self.send_swarm_cmd(NetworkSwarmCmd::PutLocalRecord { record }) } /// Returns true if a RecordKey is present locally in the RecordStore pub async fn is_record_key_present_locally(&self, key: &RecordKey) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::RecordStoreHasKey { + self.send_swarm_cmd(NetworkSwarmCmd::RecordStoreHasKey { key: key.clone(), sender, }); @@ -709,7 +709,7 @@ impl Network { &self, ) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetAllLocalRecordAddresses { sender }); + self.send_swarm_cmd(NetworkSwarmCmd::GetAllLocalRecordAddresses { sender }); receiver .await @@ -722,7 +722,7 @@ impl Network { /// layers. pub async fn send_request(&self, req: Request, peer: PeerId) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::SendRequest { + self.send_swarm_cmd(NetworkSwarmCmd::SendRequest { req, peer, sender: Some(sender), @@ -733,7 +733,7 @@ impl Network { /// Send `Request` to the given `PeerId` and do _not_ await a response here. /// Instead the Response will be handled by the common `response_handler` pub fn send_req_ignore_reply(&self, req: Request, peer: PeerId) { - let swarm_cmd = SwarmCmd::SendRequest { + let swarm_cmd = NetworkSwarmCmd::SendRequest { req, peer, sender: None, @@ -743,31 +743,31 @@ impl Network { /// Send a `Response` through the channel opened by the requester. pub fn send_response(&self, resp: Response, channel: MsgResponder) { - self.send_swarm_cmd(SwarmCmd::SendResponse { resp, channel }) + self.send_swarm_cmd(NetworkSwarmCmd::SendResponse { resp, channel }) } /// Return a `SwarmLocalState` with some information obtained from swarm's local state. pub async fn get_swarm_local_state(&self) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetSwarmLocalState(sender)); + self.send_swarm_cmd(NetworkSwarmCmd::GetSwarmLocalState(sender)); let state = receiver.await?; Ok(state) } pub fn trigger_interval_replication(&self) { - self.send_swarm_cmd(SwarmCmd::TriggerIntervalReplication) + self.send_swarm_cmd(NetworkSwarmCmd::TriggerIntervalReplication) } pub fn record_node_issues(&self, peer_id: PeerId, issue: NodeIssue) { - self.send_swarm_cmd(SwarmCmd::RecordNodeIssue { peer_id, issue }); + self.send_swarm_cmd(NetworkSwarmCmd::RecordNodeIssue { peer_id, issue }); } pub fn historical_verify_quotes(&self, quotes: Vec<(PeerId, PaymentQuote)>) { - self.send_swarm_cmd(SwarmCmd::QuoteVerification { quotes }); + self.send_swarm_cmd(NetworkSwarmCmd::QuoteVerification { quotes }); } // Helper to send SwarmCmd - fn send_swarm_cmd(&self, cmd: SwarmCmd) { + fn send_swarm_cmd(&self, cmd: NetworkSwarmCmd) { send_swarm_cmd(self.swarm_cmd_sender().clone(), cmd); } @@ -780,7 +780,7 @@ impl Network { ) -> Result> { debug!("Getting the closest peers to {key:?}"); let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetClosestPeersToAddressFromNetwork { + self.send_swarm_cmd(NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key: key.clone(), sender, }); @@ -953,7 +953,7 @@ pub(crate) fn multiaddr_strip_p2p(multiaddr: &Multiaddr) -> Multiaddr { } } -pub(crate) fn send_swarm_cmd(swarm_cmd_sender: Sender, cmd: SwarmCmd) { +pub(crate) fn send_swarm_cmd(swarm_cmd_sender: Sender, cmd: NetworkSwarmCmd) { let capacity = swarm_cmd_sender.capacity(); if capacity == 0 { diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index beae192828..0ff98e30ae 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -10,7 +10,7 @@ use crate::driver::MAX_PACKET_SIZE; use crate::target_arch::{spawn, Instant}; use crate::CLOSE_GROUP_SIZE; -use crate::{cmd::SwarmCmd, event::NetworkEvent, log_markers::Marker, send_swarm_cmd}; +use crate::{cmd::NetworkSwarmCmd, event::NetworkEvent, log_markers::Marker, send_swarm_cmd}; use aes_gcm_siv::{ aead::{Aead, KeyInit, OsRng}, Aes256GcmSiv, Nonce, @@ -76,7 +76,7 @@ pub struct NodeRecordStore { /// Send network events to the node layer. network_event_sender: mpsc::Sender, /// Send cmds to the network layer. Used to interact with self in an async fashion. - swarm_cmd_sender: mpsc::Sender, + swarm_cmd_sender: mpsc::Sender, /// ilog2 distance range of responsible records /// AKA: how many buckets of data do we consider "close" /// None means accept all records. @@ -248,7 +248,7 @@ impl NodeRecordStore { local_id: PeerId, config: NodeRecordStoreConfig, network_event_sender: mpsc::Sender, - swarm_cmd_sender: mpsc::Sender, + swarm_cmd_sender: mpsc::Sender, ) -> Self { let key = Aes256GcmSiv::generate_key(&mut OsRng); let cipher = Aes256GcmSiv::new(&key); @@ -550,13 +550,13 @@ impl NodeRecordStore { // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): info!("Wrote record {record_key:?} to disk! filename: {filename}"); - SwarmCmd::AddLocalRecordAsStored { key, record_type } + NetworkSwarmCmd::AddLocalRecordAsStored { key, record_type } } Err(err) => { error!( "Error writing record {record_key:?} filename: {filename}, error: {err:?}" ); - SwarmCmd::RemoveFailedLocalRecord { key } + NetworkSwarmCmd::RemoveFailedLocalRecord { key } } }; From 2fec1ba3eb2a3faf1bf8bf2477384f61badf42a4 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 18 Jul 2024 13:07:35 +0900 Subject: [PATCH 012/115] feat(networking): split SwarmCmds into local and network Handle local cmds with higher priority. --- sn_networking/src/cmd.rs | 534 +++++++++++++++++------------- sn_networking/src/driver.rs | 52 ++- sn_networking/src/event/swarm.rs | 4 +- sn_networking/src/lib.rs | 99 ++++-- sn_networking/src/record_store.rs | 19 +- 5 files changed, 419 insertions(+), 289 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index f93429526e..261780f7c3 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -10,7 +10,7 @@ use crate::{ driver::{PendingGetClosestType, SwarmDriver}, error::{NetworkError, Result}, event::TerminateNodeReason, - multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, + multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, CLOSE_GROUP_SIZE, REPLICATION_PEERS_COUNT, }; use libp2p::{ @@ -56,12 +56,7 @@ pub enum NodeIssue { } /// Commands to send to the Swarm -#[allow(clippy::large_enum_variant)] -pub enum NetworkSwarmCmd { - Dial { - addr: Multiaddr, - sender: oneshot::Sender>, - }, +pub enum LocalSwarmCmd { /// Get a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. GetKBuckets { @@ -72,29 +67,12 @@ pub enum NetworkSwarmCmd { GetClosestKLocalPeers { sender: oneshot::Sender>, }, - // Get closest peers from the network - GetClosestPeersToAddressFromNetwork { + // Get closest peers from the local RoutingTable + GetCloseGroupLocalPeers { key: NetworkAddress, sender: oneshot::Sender>, }, GetSwarmLocalState(oneshot::Sender), - // Send Request to the PeerId. - SendRequest { - req: Request, - peer: PeerId, - - // If a `sender` is provided, the requesting node will await for a `Response` from the - // Peer. The result is then returned at the call site. - // - // If a `sender` is not provided, the requesting node will not wait for the Peer's - // response. Instead we trigger a `NetworkEvent::ResponseReceived` which calls the common - // `response_handler` - sender: Option>>, - }, - SendResponse { - resp: Response, - channel: MsgResponder, - }, /// Check if the local RecordStore contains the provided key RecordStoreHasKey { key: RecordKey, @@ -104,11 +82,10 @@ pub enum NetworkSwarmCmd { GetAllLocalRecordAddresses { sender: oneshot::Sender>, }, - /// Get Record from the Kad network - GetNetworkRecord { + /// Get data from the local RecordStore + GetLocalRecord { key: RecordKey, - sender: oneshot::Sender>, - cfg: GetRecordCfg, + sender: oneshot::Sender>, }, /// GetLocalStoreCost for this node GetLocalStoreCost { @@ -117,24 +94,6 @@ pub enum NetworkSwarmCmd { }, /// Notify the node received a payment. PaymentReceived, - /// Get data from the local RecordStore - GetLocalRecord { - key: RecordKey, - sender: oneshot::Sender>, - }, - /// Put record to network - PutRecord { - record: Record, - sender: oneshot::Sender>, - quorum: Quorum, - }, - /// Put record to specific node - PutRecordTo { - peers: Vec, - record: Record, - sender: oneshot::Sender>, - quorum: Quorum, - }, /// Put record to the local RecordStore PutLocalRecord { record: Record, @@ -150,8 +109,6 @@ pub enum NetworkSwarmCmd { key: RecordKey, record_type: RecordType, }, - /// Triggers interval repliation - TriggerIntervalReplication, /// Notify whether peer is in trouble RecordNodeIssue { peer_id: PeerId, @@ -170,113 +127,137 @@ pub enum NetworkSwarmCmd { FetchCompleted(RecordKey), } -/// Debug impl for SwarmCmd to avoid printing full Record, instead only RecodKey +/// Commands to send to the Swarm +pub enum NetworkSwarmCmd { + Dial { + addr: Multiaddr, + sender: oneshot::Sender>, + }, + // Get closest peers from the network + GetClosestPeersToAddressFromNetwork { + key: NetworkAddress, + sender: oneshot::Sender>, + }, + + // Send Request to the PeerId. + SendRequest { + req: Request, + peer: PeerId, + + // If a `sender` is provided, the requesting node will await for a `Response` from the + // Peer. The result is then returned at the call site. + // + // If a `sender` is not provided, the requesting node will not wait for the Peer's + // response. Instead we trigger a `NetworkEvent::ResponseReceived` which calls the common + // `response_handler` + sender: Option>>, + }, + SendResponse { + resp: Response, + channel: MsgResponder, + }, + + /// Get Record from the Kad network + GetNetworkRecord { + key: RecordKey, + sender: oneshot::Sender>, + cfg: GetRecordCfg, + }, + + /// Put record to network + PutRecord { + record: Record, + sender: oneshot::Sender>, + quorum: Quorum, + }, + /// Put record to specific node + PutRecordTo { + peers: Vec, + record: Record, + sender: oneshot::Sender>, + quorum: Quorum, + }, + + /// Triggers interval repliation + TriggerIntervalReplication, +} + +/// Debug impl for LocalSwarmCmd to avoid printing full Record, instead only RecodKey /// and RecordKind are printed. -impl Debug for NetworkSwarmCmd { +impl Debug for LocalSwarmCmd { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - NetworkSwarmCmd::Dial { addr, .. } => { - write!(f, "SwarmCmd::Dial {{ addr: {addr:?} }}") - } - NetworkSwarmCmd::GetNetworkRecord { key, cfg, .. } => { - write!( - f, - "SwarmCmd::GetNetworkRecord {{ key: {:?}, cfg: {cfg:?}", - PrettyPrintRecordKey::from(key) - ) - } - NetworkSwarmCmd::PutRecord { record, .. } => { - write!( - f, - "SwarmCmd::PutRecord {{ key: {:?} }}", - PrettyPrintRecordKey::from(&record.key) - ) - } - NetworkSwarmCmd::PutRecordTo { peers, record, .. } => { - write!( - f, - "SwarmCmd::PutRecordTo {{ peers: {peers:?}, key: {:?} }}", - PrettyPrintRecordKey::from(&record.key) - ) - } - NetworkSwarmCmd::PutLocalRecord { record } => { + LocalSwarmCmd::PutLocalRecord { record } => { write!( f, "SwarmCmd::PutLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(&record.key) ) } - NetworkSwarmCmd::RemoveFailedLocalRecord { key } => { + LocalSwarmCmd::RemoveFailedLocalRecord { key } => { write!( f, "SwarmCmd::RemoveFailedLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(key) ) } - NetworkSwarmCmd::AddLocalRecordAsStored { key, record_type } => { + LocalSwarmCmd::AddLocalRecordAsStored { key, record_type } => { write!( f, "SwarmCmd::AddLocalRecordAsStored {{ key: {:?}, record_type: {record_type:?} }}", PrettyPrintRecordKey::from(key) ) } - NetworkSwarmCmd::TriggerIntervalReplication => { - write!(f, "SwarmCmd::TriggerIntervalReplication") - } - NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, .. } => { - write!(f, "SwarmCmd::GetClosestPeers {{ key: {key:?} }}") - } - NetworkSwarmCmd::GetClosestKLocalPeers { .. } => { + + LocalSwarmCmd::GetClosestKLocalPeers { .. } => { write!(f, "SwarmCmd::GetClosestKLocalPeers") } - NetworkSwarmCmd::GetLocalStoreCost { .. } => { + LocalSwarmCmd::GetCloseGroupLocalPeers { key, .. } => { + write!(f, "SwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}") + } + LocalSwarmCmd::GetLocalStoreCost { .. } => { write!(f, "SwarmCmd::GetLocalStoreCost") } - NetworkSwarmCmd::PaymentReceived { .. } => { + LocalSwarmCmd::PaymentReceived => { write!(f, "SwarmCmd::PaymentReceived") } - NetworkSwarmCmd::GetLocalRecord { key, .. } => { + LocalSwarmCmd::GetLocalRecord { key, .. } => { write!( f, "SwarmCmd::GetLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(key) ) } - NetworkSwarmCmd::GetAllLocalRecordAddresses { .. } => { + LocalSwarmCmd::GetAllLocalRecordAddresses { .. } => { write!(f, "SwarmCmd::GetAllLocalRecordAddresses") } - NetworkSwarmCmd::GetKBuckets { .. } => { + LocalSwarmCmd::GetKBuckets { .. } => { write!(f, "SwarmCmd::GetKBuckets") } - NetworkSwarmCmd::GetSwarmLocalState { .. } => { + LocalSwarmCmd::GetSwarmLocalState { .. } => { write!(f, "SwarmCmd::GetSwarmLocalState") } - NetworkSwarmCmd::RecordStoreHasKey { key, .. } => { + LocalSwarmCmd::RecordStoreHasKey { key, .. } => { write!( f, "SwarmCmd::RecordStoreHasKey {:?}", PrettyPrintRecordKey::from(key) ) } - NetworkSwarmCmd::SendResponse { resp, .. } => { - write!(f, "SwarmCmd::SendResponse resp: {resp:?}") - } - NetworkSwarmCmd::SendRequest { req, peer, .. } => { - write!(f, "SwarmCmd::SendRequest req: {req:?}, peer: {peer:?}") - } - NetworkSwarmCmd::RecordNodeIssue { peer_id, issue } => { + + LocalSwarmCmd::RecordNodeIssue { peer_id, issue } => { write!( f, "SwarmCmd::SendNodeStatus peer {peer_id:?}, issue: {issue:?}" ) } - NetworkSwarmCmd::IsPeerShunned { target, .. } => { + LocalSwarmCmd::IsPeerShunned { target, .. } => { write!(f, "SwarmCmd::IsPeerInTrouble target: {target:?}") } - NetworkSwarmCmd::QuoteVerification { quotes } => { + LocalSwarmCmd::QuoteVerification { quotes } => { write!(f, "SwarmCmd::QuoteVerification of {} quotes", quotes.len()) } - NetworkSwarmCmd::FetchCompleted(key) => { + LocalSwarmCmd::FetchCompleted(key) => { write!( f, "SwarmCmd::FetchCompleted({:?})", @@ -286,6 +267,52 @@ impl Debug for NetworkSwarmCmd { } } } + +/// Debug impl for NetworkSwarmCmd to avoid printing full Record, instead only RecodKey +/// and RecordKind are printed. +impl Debug for NetworkSwarmCmd { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + NetworkSwarmCmd::Dial { addr, .. } => { + write!(f, "SwarmCmd::Dial {{ addr: {addr:?} }}") + } + NetworkSwarmCmd::GetNetworkRecord { key, cfg, .. } => { + write!( + f, + "SwarmCmd::GetNetworkRecord {{ key: {:?}, cfg: {cfg:?}", + PrettyPrintRecordKey::from(key) + ) + } + NetworkSwarmCmd::PutRecord { record, .. } => { + write!( + f, + "SwarmCmd::PutRecord {{ key: {:?} }}", + PrettyPrintRecordKey::from(&record.key) + ) + } + NetworkSwarmCmd::PutRecordTo { peers, record, .. } => { + write!( + f, + "SwarmCmd::PutRecordTo {{ peers: {peers:?}, key: {:?} }}", + PrettyPrintRecordKey::from(&record.key) + ) + } + + NetworkSwarmCmd::TriggerIntervalReplication => { + write!(f, "SwarmCmd::TriggerIntervalReplication") + } + NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, .. } => { + write!(f, "SwarmCmd::GetClosestPeers {{ key: {key:?} }}") + } + NetworkSwarmCmd::SendResponse { resp, .. } => { + write!(f, "SwarmCmd::SendResponse resp: {resp:?}") + } + NetworkSwarmCmd::SendRequest { req, peer, .. } => { + write!(f, "SwarmCmd::SendRequest req: {req:?}, peer: {peer:?}") + } + } + } +} /// Snapshot of information kept in the Swarm's local state #[derive(Debug, Clone)] pub struct SwarmLocalState { @@ -296,9 +323,9 @@ pub struct SwarmLocalState { } impl SwarmDriver { - pub(crate) fn handle_cmd(&mut self, cmd: NetworkSwarmCmd) -> Result<(), NetworkError> { + pub(crate) fn handle_network_cmd(&mut self, cmd: NetworkSwarmCmd) -> Result<(), NetworkError> { let start = Instant::now(); - let mut cmd_string; + let cmd_string; match cmd { NetworkSwarmCmd::TriggerIntervalReplication => { cmd_string = "TriggerIntervalReplication"; @@ -331,40 +358,6 @@ impl SwarmDriver { info!("We now have {} pending get record attempts and cached {total_records} fetched copies", self.pending_get_record.len()); } - NetworkSwarmCmd::GetLocalStoreCost { key, sender } => { - cmd_string = "GetLocalStoreCost"; - let cost = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .store_cost(&key); - #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - let _ = metrics.store_cost.set(cost.0.as_nano() as i64); - } - - let _res = sender.send(cost); - } - NetworkSwarmCmd::PaymentReceived => { - cmd_string = "PaymentReceived"; - self.swarm - .behaviour_mut() - .kademlia - .store_mut() - .payment_received(); - } - NetworkSwarmCmd::GetLocalRecord { key, sender } => { - cmd_string = "GetLocalRecord"; - let record = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .get(&key) - .map(|rec| rec.into_owned()); - let _ = sender.send(record); - } NetworkSwarmCmd::PutRecord { record, sender, @@ -421,7 +414,142 @@ impl SwarmDriver { error!("Could not send response to PutRecordTo cmd: {:?}", err); } } - NetworkSwarmCmd::PutLocalRecord { record } => { + + NetworkSwarmCmd::Dial { addr, sender } => { + cmd_string = "Dial"; + + if let Some(peer_id) = multiaddr_pop_p2p(&mut addr.clone()) { + // Only consider the dial peer is bootstrap node when proper PeerId is provided. + if let Some(kbucket) = self.swarm.behaviour_mut().kademlia.kbucket(peer_id) { + let ilog2 = kbucket.range().0.ilog2(); + let peers = self.bootstrap_peers.entry(ilog2).or_default(); + peers.insert(peer_id); + } + } + let _ = match self.dial(addr) { + Ok(_) => sender.send(Ok(())), + Err(e) => sender.send(Err(e.into())), + }; + } + NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, sender } => { + cmd_string = "GetClosestPeersToAddressFromNetwork"; + let query_id = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_peers(key.as_bytes()); + let _ = self.pending_get_closest_peers.insert( + query_id, + ( + PendingGetClosestType::FunctionCall(sender), + Default::default(), + ), + ); + } + + NetworkSwarmCmd::SendRequest { req, peer, sender } => { + cmd_string = "SendRequest"; + // If `self` is the recipient, forward the request directly to our upper layer to + // be handled. + // `self` then handles the request and sends a response back again to itself. + if peer == *self.swarm.local_peer_id() { + trace!("Sending query request to self"); + if let Request::Query(query) = req { + self.send_event(NetworkEvent::QueryRequestReceived { + query, + channel: MsgResponder::FromSelf(sender), + }); + } else { + // We should never receive a Replicate request from ourselves. + // we already hold this data if we do... so we can ignore + trace!("Replicate cmd to self received, ignoring"); + } + } else { + let request_id = self + .swarm + .behaviour_mut() + .request_response + .send_request(&peer, req); + trace!("Sending request {request_id:?} to peer {peer:?}"); + let _ = self.pending_requests.insert(request_id, sender); + + trace!("Pending Requests now: {:?}", self.pending_requests.len()); + } + } + NetworkSwarmCmd::SendResponse { resp, channel } => { + cmd_string = "SendResponse"; + match channel { + // If the response is for `self`, send it directly through the oneshot channel. + MsgResponder::FromSelf(channel) => { + trace!("Sending response to self"); + match channel { + Some(channel) => { + channel + .send(Ok(resp)) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } + None => { + // responses that are not awaited at the call site must be handled + // separately + self.send_event(NetworkEvent::ResponseReceived { res: resp }); + } + } + } + MsgResponder::FromPeer(channel) => { + self.swarm + .behaviour_mut() + .request_response + .send_response(channel, resp) + .map_err(NetworkError::OutgoingResponseDropped)?; + } + } + } + } + + self.log_handling(cmd_string.to_string(), start.elapsed()); + + Ok(()) + } + pub(crate) fn handle_local_cmd(&mut self, cmd: LocalSwarmCmd) -> Result<(), NetworkError> { + let start = Instant::now(); + let mut cmd_string; + match cmd { + LocalSwarmCmd::GetLocalStoreCost { key, sender } => { + cmd_string = "GetLocalStoreCost"; + let cost = self + .swarm + .behaviour_mut() + .kademlia + .store_mut() + .store_cost(&key); + #[cfg(feature = "open-metrics")] + if let Some(metrics) = &self.network_metrics { + let _ = metrics.store_cost.set(cost.0.as_nano() as i64); + } + + let _res = sender.send(cost); + } + LocalSwarmCmd::PaymentReceived => { + cmd_string = "PaymentReceived"; + self.swarm + .behaviour_mut() + .kademlia + .store_mut() + .payment_received(); + } + LocalSwarmCmd::GetLocalRecord { key, sender } => { + cmd_string = "GetLocalRecord"; + let record = self + .swarm + .behaviour_mut() + .kademlia + .store_mut() + .get(&key) + .map(|rec| rec.into_owned()); + let _ = sender.send(record); + } + + LocalSwarmCmd::PutLocalRecord { record } => { cmd_string = "PutLocalRecord"; let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); @@ -510,7 +638,7 @@ impl SwarmDriver { return Err(err.into()); }; } - NetworkSwarmCmd::AddLocalRecordAsStored { key, record_type } => { + LocalSwarmCmd::AddLocalRecordAsStored { key, record_type } => { info!( "Adding Record locally, for {:?} and {record_type:?}", PrettyPrintRecordKey::from(&key) @@ -524,7 +652,7 @@ impl SwarmDriver { // Reset counter on any success HDD write. self.hard_disk_write_error = 0; } - NetworkSwarmCmd::RemoveFailedLocalRecord { key } => { + LocalSwarmCmd::RemoveFailedLocalRecord { key } => { info!("Removing Record locally, for {key:?}"); cmd_string = "RemoveFailedLocalRecord"; self.swarm.behaviour_mut().kademlia.store_mut().remove(&key); @@ -537,7 +665,7 @@ impl SwarmDriver { }); } } - NetworkSwarmCmd::RecordStoreHasKey { key, sender } => { + LocalSwarmCmd::RecordStoreHasKey { key, sender } => { cmd_string = "RecordStoreHasKey"; let has_key = self .swarm @@ -547,7 +675,7 @@ impl SwarmDriver { .contains(&key); let _ = sender.send(has_key); } - NetworkSwarmCmd::GetAllLocalRecordAddresses { sender } => { + LocalSwarmCmd::GetAllLocalRecordAddresses { sender } => { cmd_string = "GetAllLocalRecordAddresses"; #[allow(clippy::mutable_key_type)] // for the Bytes in NetworkAddress let addresses = self @@ -558,38 +686,7 @@ impl SwarmDriver { .record_addresses(); let _ = sender.send(addresses); } - NetworkSwarmCmd::Dial { addr, sender } => { - cmd_string = "Dial"; - - if let Some(peer_id) = multiaddr_pop_p2p(&mut addr.clone()) { - // Only consider the dial peer is bootstrap node when proper PeerId is provided. - if let Some(kbucket) = self.swarm.behaviour_mut().kademlia.kbucket(peer_id) { - let ilog2 = kbucket.range().0.ilog2(); - let peers = self.bootstrap_peers.entry(ilog2).or_default(); - peers.insert(peer_id); - } - } - let _ = match self.dial(addr) { - Ok(_) => sender.send(Ok(())), - Err(e) => sender.send(Err(e.into())), - }; - } - NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, sender } => { - cmd_string = "GetClosestPeersToAddressFromNetwork"; - let query_id = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_peers(key.as_bytes()); - let _ = self.pending_get_closest_peers.insert( - query_id, - ( - PendingGetClosestType::FunctionCall(sender), - Default::default(), - ), - ); - } - NetworkSwarmCmd::GetKBuckets { sender } => { + LocalSwarmCmd::GetKBuckets { sender } => { cmd_string = "GetKBuckets"; let mut ilog2_kbuckets = BTreeMap::new(); for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { @@ -607,68 +704,29 @@ impl SwarmDriver { } let _ = sender.send(ilog2_kbuckets); } - NetworkSwarmCmd::GetClosestKLocalPeers { sender } => { + LocalSwarmCmd::GetCloseGroupLocalPeers { key, sender } => { + cmd_string = "GetCloseGroupLocalPeers"; + let key = key.as_kbucket_key(); + // calls `kbuckets.closest_keys(key)` internally, which orders the peers by + // increasing distance + // Note it will return all peers, heance a chop down is required. + let closest_peers = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&key) + .map(|peer| peer.into_preimage()) + .take(CLOSE_GROUP_SIZE) + .collect(); + + let _ = sender.send(closest_peers); + } + LocalSwarmCmd::GetClosestKLocalPeers { sender } => { cmd_string = "GetClosestKLocalPeers"; let _ = sender.send(self.get_closest_k_value_local_peers()); } - NetworkSwarmCmd::SendRequest { req, peer, sender } => { - cmd_string = "SendRequest"; - // If `self` is the recipient, forward the request directly to our upper layer to - // be handled. - // `self` then handles the request and sends a response back again to itself. - if peer == *self.swarm.local_peer_id() { - debug!("Sending query request to self"); - if let Request::Query(query) = req { - self.send_event(NetworkEvent::QueryRequestReceived { - query, - channel: MsgResponder::FromSelf(sender), - }); - } else { - // We should never receive a Replicate request from ourselves. - // we already hold this data if we do... so we can ignore - debug!("Replicate cmd to self received, ignoring"); - } - } else { - let request_id = self - .swarm - .behaviour_mut() - .request_response - .send_request(&peer, req); - debug!("Sending request {request_id:?} to peer {peer:?}"); - let _ = self.pending_requests.insert(request_id, sender); - debug!("Pending Requests now: {:?}", self.pending_requests.len()); - } - } - NetworkSwarmCmd::SendResponse { resp, channel } => { - cmd_string = "SendResponse"; - match channel { - // If the response is for `self`, send it directly through the oneshot channel. - MsgResponder::FromSelf(channel) => { - debug!("Sending response to self"); - match channel { - Some(channel) => { - channel - .send(Ok(resp)) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } - None => { - // responses that are not awaited at the call site must be handled - // separately - self.send_event(NetworkEvent::ResponseReceived { res: resp }); - } - } - } - MsgResponder::FromPeer(channel) => { - self.swarm - .behaviour_mut() - .request_response - .send_response(channel, resp) - .map_err(NetworkError::OutgoingResponseDropped)?; - } - } - } - NetworkSwarmCmd::GetSwarmLocalState(sender) => { + LocalSwarmCmd::GetSwarmLocalState(sender) => { cmd_string = "GetSwarmLocalState"; let current_state = SwarmLocalState { connected_peers: self.swarm.connected_peers().cloned().collect(), @@ -680,11 +738,11 @@ impl SwarmDriver { .map_err(|_| NetworkError::InternalMsgChannelDropped)?; } - NetworkSwarmCmd::RecordNodeIssue { peer_id, issue } => { + LocalSwarmCmd::RecordNodeIssue { peer_id, issue } => { cmd_string = "RecordNodeIssues"; self.record_node_issue(peer_id, issue); } - NetworkSwarmCmd::IsPeerShunned { target, sender } => { + LocalSwarmCmd::IsPeerShunned { target, sender } => { cmd_string = "IsPeerInTrouble"; let is_bad = if let Some(peer_id) = target.as_peer_id() { if let Some((_issues, is_bad)) = self.bad_nodes.get(&peer_id) { @@ -697,7 +755,7 @@ impl SwarmDriver { }; let _ = sender.send(is_bad); } - NetworkSwarmCmd::QuoteVerification { quotes } => { + LocalSwarmCmd::QuoteVerification { quotes } => { cmd_string = "QuoteVerification"; for (peer_id, quote) in quotes { // Do nothing if already being bad @@ -709,7 +767,7 @@ impl SwarmDriver { self.verify_peer_quote(peer_id, quote); } } - NetworkSwarmCmd::FetchCompleted(key) => { + LocalSwarmCmd::FetchCompleted(key) => { info!( "Fetch {:?} early completed, may fetched an old version record.", PrettyPrintRecordKey::from(&key) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index e10f3d748e..2e5e96ce23 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -13,7 +13,7 @@ use crate::metrics_service::run_metrics_server; use crate::{ bootstrap::{ContinuousBootstrap, BOOTSTRAP_INTERVAL}, circular_vec::CircularVec, - cmd::NetworkSwarmCmd, + cmd::{LocalSwarmCmd, NetworkSwarmCmd}, error::{NetworkError, Result}, event::{NetworkEvent, NodeEvent}, multiaddr_pop_p2p, @@ -62,7 +62,10 @@ use std::{ num::NonZeroUsize, path::PathBuf, }; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{ + mpsc::{self, error::TryRecvError}, + oneshot, +}; use tokio::time::Duration; use tracing::warn; use xor_name::XorName; @@ -457,7 +460,10 @@ impl NetworkBuilder { }; let (network_event_sender, network_event_receiver) = mpsc::channel(NETWORKING_CHANNEL_SIZE); - let (swarm_cmd_sender, swarm_cmd_receiver) = mpsc::channel(NETWORKING_CHANNEL_SIZE); + let (network_swarm_cmd_sender, network_swarm_cmd_receiver) = + mpsc::channel(NETWORKING_CHANNEL_SIZE); + let (local_swarm_cmd_sender, local_swarm_cmd_receiver) = + mpsc::channel(NETWORKING_CHANNEL_SIZE); // Kademlia Behaviour let kademlia = { @@ -467,7 +473,7 @@ impl NetworkBuilder { peer_id, store_cfg, network_event_sender.clone(), - swarm_cmd_sender.clone(), + local_swarm_cmd_sender.clone(), ); #[cfg(feature = "open-metrics")] let mut node_record_store = node_record_store; @@ -604,7 +610,8 @@ impl NetworkBuilder { replication_fetcher, #[cfg(feature = "open-metrics")] network_metrics, - network_cmd_receiver: swarm_cmd_receiver, + network_cmd_receiver: network_swarm_cmd_receiver, + local_cmd_receiver: local_swarm_cmd_receiver, event_sender: network_event_sender, pending_get_closest_peers: Default::default(), pending_requests: Default::default(), @@ -623,7 +630,13 @@ impl NetworkBuilder { replication_targets: Default::default(), }; - let network = Network::new(swarm_cmd_sender, peer_id, self.root_dir, self.keypair); + let network = Network::new( + network_swarm_cmd_sender, + local_swarm_cmd_sender, + peer_id, + self.root_dir, + self.keypair, + ); Ok((network, network_event_receiver, swarm_driver)) } @@ -646,6 +659,7 @@ pub struct SwarmDriver { #[cfg(feature = "open-metrics")] pub(crate) network_metrics: Option, + local_cmd_receiver: mpsc::Receiver, network_cmd_receiver: mpsc::Receiver, event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. @@ -686,6 +700,30 @@ impl SwarmDriver { let mut relay_manager_reservation_interval = interval(RELAY_MANAGER_RESERVATION_INTERVAL); loop { + // Prioritise any local cmds pending. + // https://github.com/libp2p/rust-libp2p/blob/master/docs/coding-guidelines.md#prioritize-local-work-over-new-work-from-a-remote + match self.local_cmd_receiver.try_recv() { + Ok(cmd) => { + let start = Instant::now(); + let cmd_string = format!("{cmd:?}"); + if let Err(err) = self.handle_local_cmd(cmd) { + warn!("Error while handling local cmd: {err}"); + } + trace!("LocalCmd handled in {:?}: {cmd_string:?}", start.elapsed()); + + continue; + } + Err(error) => match error { + TryRecvError::Empty => { + // no local cmds pending, continue + } + TryRecvError::Disconnected => { + error!("LocalCmd channel disconnected, shutting down SwarmDriver"); + return; + } + }, + } + tokio::select! { swarm_event = self.swarm.select_next_some() => { // logging for handling events happens inside handle_swarm_events @@ -698,7 +736,7 @@ impl SwarmDriver { Some(cmd) => { let start = Instant::now(); let cmd_string = format!("{cmd:?}"); - if let Err(err) = self.handle_cmd(cmd) { + if let Err(err) = self.handle_network_cmd(cmd) { warn!("Error while handling cmd: {err}"); } trace!("SwarmCmd handled in {:?}: {cmd_string:?}", start.elapsed()); diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index e0b506ff06..9db2195ece 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, + cmd::LocalSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, @@ -503,7 +503,7 @@ impl SwarmDriver { { self.update_on_peer_removal(*dead_peer.node.key.preimage()); - self.handle_cmd(NetworkSwarmCmd::RecordNodeIssue { + self.handle_local_cmd(LocalSwarmCmd::RecordNodeIssue { peer_id: failed_peer_id, issue: crate::NodeIssue::ConnectionIssue, })?; diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 4efcbec55b..d1ff25bb14 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -31,6 +31,7 @@ mod transfers; mod transport; pub mod version; +use cmd::LocalSwarmCmd; // re-export arch dependent deps for use in the crate, or above pub use target_arch::{interval, sleep, spawn, Instant, Interval}; @@ -161,7 +162,8 @@ pub struct Network { /// The actual implementation of the Network. The other is just a wrapper around this, so that we don't expose /// the Arc from the interface. struct NetworkInner { - swarm_cmd_sender: mpsc::Sender, + network_swarm_cmd_sender: mpsc::Sender, + local_swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, root_dir_path: PathBuf, keypair: Keypair, @@ -169,14 +171,16 @@ struct NetworkInner { impl Network { pub fn new( - swarm_cmd_sender: mpsc::Sender, + network_swarm_cmd_sender: mpsc::Sender, + local_swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, root_dir_path: PathBuf, keypair: Keypair, ) -> Self { Self { inner: Arc::new(NetworkInner { - swarm_cmd_sender, + network_swarm_cmd_sender, + local_swarm_cmd_sender, peer_id, root_dir_path, keypair, @@ -199,9 +203,13 @@ impl Network { &self.inner.root_dir_path } - /// Get the sender to send a `SwarmCmd` to the underlying `Swarm`. - pub(crate) fn swarm_cmd_sender(&self) -> &mpsc::Sender { - &self.inner.swarm_cmd_sender + /// Get the sender to send a `NetworkSwarmCmd` to the underlying `Swarm`. + pub(crate) fn network_swarm_cmd_sender(&self) -> &mpsc::Sender { + &self.inner.network_swarm_cmd_sender + } + /// Get the sender to send a `LocalSwarmCmd` to the underlying `Swarm`. + pub(crate) fn local_swarm_cmd_sender(&self) -> &mpsc::Sender { + &self.inner.local_swarm_cmd_sender } /// Signs the given data with the node's keypair. @@ -223,7 +231,7 @@ impl Network { /// This function will only be called for the bootstrap nodes. pub async fn dial(&self, addr: Multiaddr) -> Result<()> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::Dial { addr, sender }); + self.send_network_swarm_cmd(NetworkSwarmCmd::Dial { addr, sender }); receiver.await? } @@ -245,7 +253,7 @@ impl Network { /// Does not include self pub async fn get_kbuckets(&self) -> Result>> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::GetKBuckets { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetKBuckets { sender }); receiver .await .map_err(|_e| NetworkError::InternalMsgChannelDropped) @@ -255,7 +263,7 @@ impl Network { /// Also contains our own PeerId. pub async fn get_closest_k_value_local_peers(&self) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::GetClosestKLocalPeers { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetClosestKLocalPeers { sender }); receiver .await @@ -435,7 +443,7 @@ impl Network { let pretty_key = PrettyPrintRecordKey::from(&key); info!("Getting record from network of {pretty_key:?}. with cfg {cfg:?}",); let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::GetNetworkRecord { + self.send_network_swarm_cmd(NetworkSwarmCmd::GetNetworkRecord { key: key.clone(), sender, cfg: cfg.clone(), @@ -471,7 +479,7 @@ impl Network { let pretty_key = PrettyPrintRecordKey::from(&key); info!("Getting record from network of {pretty_key:?}. with cfg {cfg:?}",); let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::GetNetworkRecord { + self.send_network_swarm_cmd(NetworkSwarmCmd::GetNetworkRecord { key: key.clone(), sender, cfg: cfg.clone(), @@ -530,7 +538,7 @@ impl Network { key: RecordKey, ) -> Result<(NanoTokens, QuotingMetrics)> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::GetLocalStoreCost { key, sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetLocalStoreCost { key, sender }); receiver .await @@ -539,13 +547,13 @@ impl Network { /// Notify the node receicced a payment. pub fn notify_payment_received(&self) { - self.send_swarm_cmd(NetworkSwarmCmd::PaymentReceived); + self.send_local_swarm_cmd(LocalSwarmCmd::PaymentReceived); } /// Get `Record` from the local RecordStore pub async fn get_local_record(&self, key: &RecordKey) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::GetLocalRecord { + self.send_local_swarm_cmd(LocalSwarmCmd::GetLocalRecord { key: key.clone(), sender, }); @@ -558,7 +566,7 @@ impl Network { /// Whether the target peer is considered blacklisted by self pub async fn is_peer_shunned(&self, target: NetworkAddress) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::IsPeerShunned { target, sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::IsPeerShunned { target, sender }); receiver .await @@ -609,14 +617,14 @@ impl Network { // Waiting for a response to avoid flushing to network too quick that causing choke let (sender, receiver) = oneshot::channel(); if let Some(put_record_to_peers) = &cfg.use_put_record_to { - self.send_swarm_cmd(NetworkSwarmCmd::PutRecordTo { + self.send_network_swarm_cmd(NetworkSwarmCmd::PutRecordTo { peers: put_record_to_peers.clone(), record: record.clone(), sender, quorum: cfg.put_quorum, }); } else { - self.send_swarm_cmd(NetworkSwarmCmd::PutRecord { + self.send_network_swarm_cmd(NetworkSwarmCmd::PutRecord { record: record.clone(), sender, quorum: cfg.put_quorum, @@ -677,7 +685,7 @@ impl Network { /// Notify ReplicationFetch a fetch attempt is completed. /// (but it won't trigger any real writes to disk, say fetched an old version of register) pub fn notify_fetch_completed(&self, key: RecordKey) { - self.send_swarm_cmd(NetworkSwarmCmd::FetchCompleted(key)) + self.send_local_swarm_cmd(LocalSwarmCmd::FetchCompleted(key)) } /// Put `Record` to the local RecordStore @@ -688,13 +696,13 @@ impl Network { PrettyPrintRecordKey::from(&record.key), record.value.len() ); - self.send_swarm_cmd(NetworkSwarmCmd::PutLocalRecord { record }) + self.send_local_swarm_cmd(LocalSwarmCmd::PutLocalRecord { record }) } /// Returns true if a RecordKey is present locally in the RecordStore pub async fn is_record_key_present_locally(&self, key: &RecordKey) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::RecordStoreHasKey { + self.send_local_swarm_cmd(LocalSwarmCmd::RecordStoreHasKey { key: key.clone(), sender, }); @@ -709,7 +717,7 @@ impl Network { &self, ) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::GetAllLocalRecordAddresses { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetAllLocalRecordAddresses { sender }); receiver .await @@ -722,7 +730,7 @@ impl Network { /// layers. pub async fn send_request(&self, req: Request, peer: PeerId) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::SendRequest { + self.send_network_swarm_cmd(NetworkSwarmCmd::SendRequest { req, peer, sender: Some(sender), @@ -738,37 +746,41 @@ impl Network { peer, sender: None, }; - self.send_swarm_cmd(swarm_cmd) + self.send_network_swarm_cmd(swarm_cmd) } /// Send a `Response` through the channel opened by the requester. pub fn send_response(&self, resp: Response, channel: MsgResponder) { - self.send_swarm_cmd(NetworkSwarmCmd::SendResponse { resp, channel }) + self.send_network_swarm_cmd(NetworkSwarmCmd::SendResponse { resp, channel }) } /// Return a `SwarmLocalState` with some information obtained from swarm's local state. pub async fn get_swarm_local_state(&self) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::GetSwarmLocalState(sender)); + self.send_local_swarm_cmd(LocalSwarmCmd::GetSwarmLocalState(sender)); let state = receiver.await?; Ok(state) } pub fn trigger_interval_replication(&self) { - self.send_swarm_cmd(NetworkSwarmCmd::TriggerIntervalReplication) + self.send_network_swarm_cmd(NetworkSwarmCmd::TriggerIntervalReplication) } pub fn record_node_issues(&self, peer_id: PeerId, issue: NodeIssue) { - self.send_swarm_cmd(NetworkSwarmCmd::RecordNodeIssue { peer_id, issue }); + self.send_local_swarm_cmd(LocalSwarmCmd::RecordNodeIssue { peer_id, issue }); } pub fn historical_verify_quotes(&self, quotes: Vec<(PeerId, PaymentQuote)>) { - self.send_swarm_cmd(NetworkSwarmCmd::QuoteVerification { quotes }); + self.send_local_swarm_cmd(LocalSwarmCmd::QuoteVerification { quotes }); } - // Helper to send SwarmCmd - fn send_swarm_cmd(&self, cmd: NetworkSwarmCmd) { - send_swarm_cmd(self.swarm_cmd_sender().clone(), cmd); + /// Helper to send NetworkSwarmCmd + fn send_network_swarm_cmd(&self, cmd: NetworkSwarmCmd) { + send_network_swarm_cmd(self.network_swarm_cmd_sender().clone(), cmd); + } + /// Helper to send LocalSwarmCmd + fn send_local_swarm_cmd(&self, cmd: LocalSwarmCmd) { + send_local_swarm_cmd(self.local_swarm_cmd_sender().clone(), cmd); } /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. @@ -780,7 +792,7 @@ impl Network { ) -> Result> { debug!("Getting the closest peers to {key:?}"); let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { + self.send_network_swarm_cmd(NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key: key.clone(), sender, }); @@ -953,7 +965,28 @@ pub(crate) fn multiaddr_strip_p2p(multiaddr: &Multiaddr) -> Multiaddr { } } -pub(crate) fn send_swarm_cmd(swarm_cmd_sender: Sender, cmd: NetworkSwarmCmd) { +pub(crate) fn send_local_swarm_cmd(swarm_cmd_sender: Sender, cmd: LocalSwarmCmd) { + let capacity = swarm_cmd_sender.capacity(); + + if capacity == 0 { + error!( + "SwarmCmd channel is full. Await capacity to send: {:?}", + cmd + ); + } + + // Spawn a task to send the SwarmCmd and keep this fn sync + let _handle = spawn(async move { + if let Err(error) = swarm_cmd_sender.send(cmd).await { + error!("Failed to send SwarmCmd: {}", error); + } + }); +} + +pub(crate) fn send_network_swarm_cmd( + swarm_cmd_sender: Sender, + cmd: NetworkSwarmCmd, +) { let capacity = swarm_cmd_sender.capacity(); if capacity == 0 { diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 0ff98e30ae..7939ce7e25 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -7,10 +7,11 @@ // permissions and limitations relating to use of the SAFE Network Software. #![allow(clippy::mutable_key_type)] // for the Bytes in NetworkAddress +use crate::cmd::LocalSwarmCmd; use crate::driver::MAX_PACKET_SIZE; use crate::target_arch::{spawn, Instant}; -use crate::CLOSE_GROUP_SIZE; -use crate::{cmd::NetworkSwarmCmd, event::NetworkEvent, log_markers::Marker, send_swarm_cmd}; +use crate::{event::NetworkEvent, log_markers::Marker}; +use crate::{send_local_swarm_cmd, CLOSE_GROUP_SIZE}; use aes_gcm_siv::{ aead::{Aead, KeyInit, OsRng}, Aes256GcmSiv, Nonce, @@ -76,7 +77,7 @@ pub struct NodeRecordStore { /// Send network events to the node layer. network_event_sender: mpsc::Sender, /// Send cmds to the network layer. Used to interact with self in an async fashion. - swarm_cmd_sender: mpsc::Sender, + local_swarm_cmd_sender: mpsc::Sender, /// ilog2 distance range of responsible records /// AKA: how many buckets of data do we consider "close" /// None means accept all records. @@ -248,7 +249,7 @@ impl NodeRecordStore { local_id: PeerId, config: NodeRecordStoreConfig, network_event_sender: mpsc::Sender, - swarm_cmd_sender: mpsc::Sender, + swarm_cmd_sender: mpsc::Sender, ) -> Self { let key = Aes256GcmSiv::generate_key(&mut OsRng); let cipher = Aes256GcmSiv::new(&key); @@ -280,7 +281,7 @@ impl NodeRecordStore { records, records_cache: VecDeque::with_capacity(cache_size), network_event_sender, - swarm_cmd_sender, + local_swarm_cmd_sender: swarm_cmd_sender, responsible_distance_range: None, #[cfg(feature = "open-metrics")] record_count_metric: None, @@ -541,7 +542,7 @@ impl NodeRecordStore { } let encryption_details = self.encryption_details.clone(); - let cloned_cmd_sender = self.swarm_cmd_sender.clone(); + let cloned_cmd_sender = self.local_swarm_cmd_sender.clone(); spawn(async move { let key = r.key.clone(); if let Some(bytes) = Self::prepare_record_bytes(r, encryption_details) { @@ -550,17 +551,17 @@ impl NodeRecordStore { // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): info!("Wrote record {record_key:?} to disk! filename: {filename}"); - NetworkSwarmCmd::AddLocalRecordAsStored { key, record_type } + LocalSwarmCmd::AddLocalRecordAsStored { key, record_type } } Err(err) => { error!( "Error writing record {record_key:?} filename: {filename}, error: {err:?}" ); - NetworkSwarmCmd::RemoveFailedLocalRecord { key } + LocalSwarmCmd::RemoveFailedLocalRecord { key } } }; - send_swarm_cmd(cloned_cmd_sender, cmd); + send_local_swarm_cmd(cloned_cmd_sender, cmd); } }); From be756d8c992e08ccd4a12f6571e8de8e3ba6d622 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 18 Jul 2024 16:31:17 +0900 Subject: [PATCH 013/115] fix(node): fix double spend attempt test now accumulation may not be in the ms instant. we should be tolerant and accept any error indicative of the failed tx --- sn_node/tests/double_spend.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 3f6296c490..be22115fd4 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -365,10 +365,13 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() .await?; info!("Verifying the transfers from A -> X wallet... It should error out."); let cash_notes_for_x: Vec<_> = transfer_to_x.cash_notes_for_recipient.clone(); + let result = client.verify_cashnote(&cash_notes_for_x[0]).await; info!("Got result while verifying double spend from A -> X: {result:?}"); + assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) was detected"); + assert!(spend_did_not_happen); }); // poisoned // Try to double spend from B -> Y @@ -401,20 +404,29 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() info!("Verifying the transfers from B -> Y wallet... It should error out."); let cash_notes_for_y: Vec<_> = transfer_to_y.cash_notes_for_recipient.clone(); + let result = client.verify_cashnote(&cash_notes_for_y[0]).await; info!("Got result while verifying double spend from B -> Y: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) was detected"); + assert!(spend_did_not_happen); }); info!("Verifying the original cashnote of A -> B"); + let result = client.verify_cashnote(&cash_notes_for_b[0]).await; info!("Got result while verifying the original spend from A -> B: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + + let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) was detected"); + assert!(spend_did_not_happen); }); println!("Verifying the original cashnote of B -> C"); + + // arbitrary time sleep to allow for network accumulation + tokio::time::sleep(Duration::from_secs(1)).await; + let result = client.verify_cashnote(&cash_notes_for_c[0]).await; info!("Got result while verifying the original spend from B -> C: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { From 665f21fd11f51ae143a3217ced6c3a809a4e2426 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 18 Jul 2024 16:40:17 +0900 Subject: [PATCH 014/115] fix(node): fix double spend attempt test to ensure a spend fails We don't always expect both to error out with a double spend, one might pass before acumulation, but both sould never pass --- sn_node/tests/double_spend.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index be22115fd4..13fb45922d 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -239,11 +239,17 @@ async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { client .send_spends(transfer_to_3.all_spend_requests.iter(), false) .await?; - info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); + let cash_notes_for_3: Vec<_> = transfer_to_3.cash_notes_for_recipient.clone(); - assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned - info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); - assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned + + info!("Verifying the transfers from 1 -> 3 wallet aand 1-> 2... One should error out."); + let for3_failed = client.verify_cashnote(&cash_notes_for_3[0]).await.is_err(); + let for2_failed = client.verify_cashnote(&cash_notes_for_2[0]).await.is_err(); + // Both cannot pass + assert!( + for2_failed || for3_failed, + "one transaction must be invalid" + ); // the old spend has been poisoned // The old spend has been poisoned, but spends from 22 -> 222 should still work let wallet_dir_222 = TempDir::new()?; @@ -266,6 +272,7 @@ async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { client .send_spends(transfer_to_222.all_spend_requests.iter(), false) .await?; + info!("Verifying the transfers from 22 -> 222 wallet..."); let cash_notes_for_222: Vec<_> = transfer_to_222.cash_notes_for_recipient.clone(); client.verify_cashnote(&cash_notes_for_222[0]).await?; From 93407fe8813c44545a29534caa685ac266fd6bda Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Fri, 19 Jul 2024 09:35:03 +0900 Subject: [PATCH 015/115] test(node): improve DoubleSpendAttempt test with final error check --- sn_networking/src/error.rs | 2 +- sn_node/tests/double_spend.rs | 63 ++++++++++++++++++++++++++++++----- 2 files changed, 55 insertions(+), 10 deletions(-) diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 103a79a9a3..de5cb56c3f 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -138,7 +138,7 @@ pub enum NetworkError { // ---------- Spend Errors #[error("Spend not found: {0:?}")] NoSpendFoundInsideRecord(SpendAddress), - #[error("Double spend(s) was detected. The signed spends are: {0:?}")] + #[error("Double spend(s) attempt was detected. The signed spends are: {0:?}")] DoubleSpendAttempt(Vec), // ---------- Store Error diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 13fb45922d..d06aa9b60f 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -11,7 +11,7 @@ mod common; use assert_fs::TempDir; use assert_matches::assert_matches; use common::client::{get_client_and_funded_wallet, get_wallet}; -use eyre::Result; +use eyre::{bail, Result}; use itertools::Itertools; use sn_logging::LogBuilder; use sn_networking::NetworkError; @@ -82,10 +82,10 @@ async fn cash_note_transfer_double_spend_fail() -> Result<()> { info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); assert!(should_err1.is_err() && should_err2.is_err()); assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected")); }); assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected")); }); Ok(()) @@ -277,6 +277,35 @@ async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { let cash_notes_for_222: Vec<_> = transfer_to_222.cash_notes_for_recipient.clone(); client.verify_cashnote(&cash_notes_for_222[0]).await?; + // finally assert that we have a double spend attempt error here + // we wait 1s to ensure that the double spend attempt is detected and accumulated + tokio::time::sleep(Duration::from_secs(1)).await; + + match client.verify_cashnote(&cash_notes_for_2[0]).await { + Ok(_) => bail!("Cashnote verification should have failed"), + Err(e) => { + println!("Error verifying cashnote: {:?}", e); + + assert!( + e.to_string() + .contains("Network Error Double spend(s) attempt was detected"), + "error should reflect double spend attempt", + ); + } + } + + match client.verify_cashnote(&cash_notes_for_3[0]).await { + Ok(_) => bail!("Cashnote verification should have failed"), + Err(e) => { + println!("Error verifying cashnote: {:?}", e); + + assert!( + e.to_string() + .contains("Network Error Double spend(s) attempt was detected"), + "error should reflect double spend attempt", + ); + } + } Ok(()) } @@ -377,7 +406,7 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() info!("Got result while verifying double spend from A -> X: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) was detected"); + let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) attempt was detected"); assert!(spend_did_not_happen); }); // poisoned @@ -415,7 +444,7 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() let result = client.verify_cashnote(&cash_notes_for_y[0]).await; info!("Got result while verifying double spend from B -> Y: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) was detected"); + let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) attempt was detected"); assert!(spend_did_not_happen); }); @@ -425,7 +454,7 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() info!("Got result while verifying the original spend from A -> B: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) was detected"); + let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) attempt was detected"); assert!(spend_did_not_happen); }); @@ -437,7 +466,15 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() let result = client.verify_cashnote(&cash_notes_for_c[0]).await; info!("Got result while verifying the original spend from B -> C: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for c should show double spend attempt"); + }); + let result = client.verify_cashnote(&cash_notes_for_y[0]).await; + assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { + assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for y should show double spend attempt"); + }); + let result = client.verify_cashnote(&cash_notes_for_b[0]).await; + assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { + assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for y should show double spend attempt"); }); Ok(()) @@ -541,10 +578,14 @@ async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { .await?; info!("Verifying the transfers from A -> X wallet... It should error out."); let cash_notes_for_x: Vec<_> = transfer_to_x.cash_notes_for_recipient.clone(); + + // sleep for a bit to allow the network to process and accumulate the double spend + tokio::time::sleep(Duration::from_millis(500)).await; + let result = client.verify_cashnote(&cash_notes_for_x[0]).await; info!("Got result while verifying double spend from A -> X: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected")); }); // the original A should still be present as one of the double spends @@ -584,10 +625,14 @@ async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { .await?; info!("Verifying the transfers from A -> Y wallet... It should error out."); let cash_notes_for_y: Vec<_> = transfer_to_y.cash_notes_for_recipient.clone(); + + // sleep for a bit to allow the network to process and accumulate the double spend + tokio::time::sleep(Duration::from_millis(500)).await; + let result = client.verify_cashnote(&cash_notes_for_y[0]).await; info!("Got result while verifying double spend from A -> Y: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected")); }); // the original A should still be present as one of the double spends From db445f22d34fa9a00c48a6860fb355baf2b4744b Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Fri, 19 Jul 2024 10:31:20 +0900 Subject: [PATCH 016/115] chore(networking): update SwarmCmd logging --- sn_networking/src/cmd.rs | 58 +++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 24 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 261780f7c3..481cdd2568 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -190,57 +190,60 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::PutLocalRecord { record } => { write!( f, - "SwarmCmd::PutLocalRecord {{ key: {:?} }}", + "LocalSwarmCmd::PutLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(&record.key) ) } LocalSwarmCmd::RemoveFailedLocalRecord { key } => { write!( f, - "SwarmCmd::RemoveFailedLocalRecord {{ key: {:?} }}", + "LocalSwarmCmd::RemoveFailedLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(key) ) } LocalSwarmCmd::AddLocalRecordAsStored { key, record_type } => { write!( f, - "SwarmCmd::AddLocalRecordAsStored {{ key: {:?}, record_type: {record_type:?} }}", + "LocalSwarmCmd::AddLocalRecordAsStored {{ key: {:?}, record_type: {record_type:?} }}", PrettyPrintRecordKey::from(key) ) } LocalSwarmCmd::GetClosestKLocalPeers { .. } => { - write!(f, "SwarmCmd::GetClosestKLocalPeers") + write!(f, "LocalSwarmCmd::GetClosestKLocalPeers") } LocalSwarmCmd::GetCloseGroupLocalPeers { key, .. } => { - write!(f, "SwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}") + write!( + f, + "LocalSwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}" + ) } LocalSwarmCmd::GetLocalStoreCost { .. } => { - write!(f, "SwarmCmd::GetLocalStoreCost") + write!(f, "LocalSwarmCmd::GetLocalStoreCost") } LocalSwarmCmd::PaymentReceived => { - write!(f, "SwarmCmd::PaymentReceived") + write!(f, "LocalSwarmCmd::PaymentReceived") } LocalSwarmCmd::GetLocalRecord { key, .. } => { write!( f, - "SwarmCmd::GetLocalRecord {{ key: {:?} }}", + "LocalSwarmCmd::GetLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(key) ) } LocalSwarmCmd::GetAllLocalRecordAddresses { .. } => { - write!(f, "SwarmCmd::GetAllLocalRecordAddresses") + write!(f, "LocalSwarmCmd::GetAllLocalRecordAddresses") } LocalSwarmCmd::GetKBuckets { .. } => { - write!(f, "SwarmCmd::GetKBuckets") + write!(f, "LocalSwarmCmd::GetKBuckets") } LocalSwarmCmd::GetSwarmLocalState { .. } => { - write!(f, "SwarmCmd::GetSwarmLocalState") + write!(f, "LocalSwarmCmd::GetSwarmLocalState") } LocalSwarmCmd::RecordStoreHasKey { key, .. } => { write!( f, - "SwarmCmd::RecordStoreHasKey {:?}", + "LocalSwarmCmd::RecordStoreHasKey {:?}", PrettyPrintRecordKey::from(key) ) } @@ -248,19 +251,23 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::RecordNodeIssue { peer_id, issue } => { write!( f, - "SwarmCmd::SendNodeStatus peer {peer_id:?}, issue: {issue:?}" + "LocalSwarmCmd::SendNodeStatus peer {peer_id:?}, issue: {issue:?}" ) } LocalSwarmCmd::IsPeerShunned { target, .. } => { - write!(f, "SwarmCmd::IsPeerInTrouble target: {target:?}") + write!(f, "LocalSwarmCmd::IsPeerInTrouble target: {target:?}") } LocalSwarmCmd::QuoteVerification { quotes } => { - write!(f, "SwarmCmd::QuoteVerification of {} quotes", quotes.len()) + write!( + f, + "LocalSwarmCmd::QuoteVerification of {} quotes", + quotes.len() + ) } LocalSwarmCmd::FetchCompleted(key) => { write!( f, - "SwarmCmd::FetchCompleted({:?})", + "LocalSwarmCmd::FetchCompleted({:?})", PrettyPrintRecordKey::from(key) ) } @@ -274,41 +281,44 @@ impl Debug for NetworkSwarmCmd { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { NetworkSwarmCmd::Dial { addr, .. } => { - write!(f, "SwarmCmd::Dial {{ addr: {addr:?} }}") + write!(f, "NetworkSwarmCmd::Dial {{ addr: {addr:?} }}") } NetworkSwarmCmd::GetNetworkRecord { key, cfg, .. } => { write!( f, - "SwarmCmd::GetNetworkRecord {{ key: {:?}, cfg: {cfg:?}", + "NetworkSwarmCmd::GetNetworkRecord {{ key: {:?}, cfg: {cfg:?}", PrettyPrintRecordKey::from(key) ) } NetworkSwarmCmd::PutRecord { record, .. } => { write!( f, - "SwarmCmd::PutRecord {{ key: {:?} }}", + "NetworkSwarmCmd::PutRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(&record.key) ) } NetworkSwarmCmd::PutRecordTo { peers, record, .. } => { write!( f, - "SwarmCmd::PutRecordTo {{ peers: {peers:?}, key: {:?} }}", + "NetworkSwarmCmd::PutRecordTo {{ peers: {peers:?}, key: {:?} }}", PrettyPrintRecordKey::from(&record.key) ) } NetworkSwarmCmd::TriggerIntervalReplication => { - write!(f, "SwarmCmd::TriggerIntervalReplication") + write!(f, "NetworkSwarmCmd::TriggerIntervalReplication") } NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, .. } => { - write!(f, "SwarmCmd::GetClosestPeers {{ key: {key:?} }}") + write!(f, "NetworkSwarmCmd::GetClosestPeers {{ key: {key:?} }}") } NetworkSwarmCmd::SendResponse { resp, .. } => { - write!(f, "SwarmCmd::SendResponse resp: {resp:?}") + write!(f, "NetworkSwarmCmd::SendResponse resp: {resp:?}") } NetworkSwarmCmd::SendRequest { req, peer, .. } => { - write!(f, "SwarmCmd::SendRequest req: {req:?}, peer: {peer:?}") + write!( + f, + "NetworkSwarmCmd::SendRequest req: {req:?}, peer: {peer:?}" + ) } } } From 9b732c7ff5727b130cbf9d2e184a60fe5bb7d492 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Fri, 19 Jul 2024 10:41:06 +0900 Subject: [PATCH 017/115] fix(networking): make TriggerIntervalReplication LocalSwarmCmd Although it produces network messaging, it's triggered internally in the node So seems safe. This fixes some test errors due to data being insufficiently replicated --- sn_networking/src/cmd.rs | 21 ++++++++++----------- sn_networking/src/lib.rs | 2 +- sn_node/tests/double_spend.rs | 4 ---- 3 files changed, 11 insertions(+), 16 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 481cdd2568..432045b2e7 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -125,6 +125,9 @@ pub enum LocalSwarmCmd { }, // Notify a fetch completion FetchCompleted(RecordKey), + /// Triggers interval repliation + /// NOTE: This does result in outgoing messages, but is produced locally + TriggerIntervalReplication, } /// Commands to send to the Swarm @@ -177,9 +180,6 @@ pub enum NetworkSwarmCmd { sender: oneshot::Sender>, quorum: Quorum, }, - - /// Triggers interval repliation - TriggerIntervalReplication, } /// Debug impl for LocalSwarmCmd to avoid printing full Record, instead only RecodKey @@ -271,6 +271,9 @@ impl Debug for LocalSwarmCmd { PrettyPrintRecordKey::from(key) ) } + LocalSwarmCmd::TriggerIntervalReplication => { + write!(f, "LocalSwarmCmd::TriggerIntervalReplication") + } } } } @@ -304,10 +307,6 @@ impl Debug for NetworkSwarmCmd { PrettyPrintRecordKey::from(&record.key) ) } - - NetworkSwarmCmd::TriggerIntervalReplication => { - write!(f, "NetworkSwarmCmd::TriggerIntervalReplication") - } NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, .. } => { write!(f, "NetworkSwarmCmd::GetClosestPeers {{ key: {key:?} }}") } @@ -337,10 +336,6 @@ impl SwarmDriver { let start = Instant::now(); let cmd_string; match cmd { - NetworkSwarmCmd::TriggerIntervalReplication => { - cmd_string = "TriggerIntervalReplication"; - self.try_interval_replication()?; - } NetworkSwarmCmd::GetNetworkRecord { key, sender, cfg } => { cmd_string = "GetNetworkRecord"; let query_id = self.swarm.behaviour_mut().kademlia.get_record(key.clone()); @@ -524,6 +519,10 @@ impl SwarmDriver { let start = Instant::now(); let mut cmd_string; match cmd { + LocalSwarmCmd::TriggerIntervalReplication => { + cmd_string = "TriggerIntervalReplication"; + self.try_interval_replication()?; + } LocalSwarmCmd::GetLocalStoreCost { key, sender } => { cmd_string = "GetLocalStoreCost"; let cost = self diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index d1ff25bb14..030d0b4a6f 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -763,7 +763,7 @@ impl Network { } pub fn trigger_interval_replication(&self) { - self.send_network_swarm_cmd(NetworkSwarmCmd::TriggerIntervalReplication) + self.send_local_swarm_cmd(LocalSwarmCmd::TriggerIntervalReplication) } pub fn record_node_issues(&self, peer_id: PeerId, issue: NodeIssue) { diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index d06aa9b60f..3a30cf6d93 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -284,8 +284,6 @@ async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { match client.verify_cashnote(&cash_notes_for_2[0]).await { Ok(_) => bail!("Cashnote verification should have failed"), Err(e) => { - println!("Error verifying cashnote: {:?}", e); - assert!( e.to_string() .contains("Network Error Double spend(s) attempt was detected"), @@ -297,8 +295,6 @@ async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { match client.verify_cashnote(&cash_notes_for_3[0]).await { Ok(_) => bail!("Cashnote verification should have failed"), Err(e) => { - println!("Error verifying cashnote: {:?}", e); - assert!( e.to_string() .contains("Network Error Double spend(s) attempt was detected"), From a15b1e7fe5c4c923ba6fbaee79168c8e0b9a66db Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 22 Jul 2024 11:31:08 +0900 Subject: [PATCH 018/115] test(networking): increase sleep interval before asserting double spend --- sn_node/tests/double_spend.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 3a30cf6d93..61e949bfec 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -279,7 +279,7 @@ async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { // finally assert that we have a double spend attempt error here // we wait 1s to ensure that the double spend attempt is detected and accumulated - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(5)).await; match client.verify_cashnote(&cash_notes_for_2[0]).await { Ok(_) => bail!("Cashnote verification should have failed"), @@ -456,14 +456,15 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() println!("Verifying the original cashnote of B -> C"); - // arbitrary time sleep to allow for network accumulation - tokio::time::sleep(Duration::from_secs(1)).await; + // arbitrary time sleep to allow for network accumulation of double spend. + tokio::time::sleep(Duration::from_secs(5)).await; let result = client.verify_cashnote(&cash_notes_for_c[0]).await; info!("Got result while verifying the original spend from B -> C: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for c should show double spend attempt"); }); + let result = client.verify_cashnote(&cash_notes_for_y[0]).await; assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for y should show double spend attempt"); @@ -576,7 +577,7 @@ async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { let cash_notes_for_x: Vec<_> = transfer_to_x.cash_notes_for_recipient.clone(); // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_millis(500)).await; + tokio::time::sleep(Duration::from_secs(5)).await; let result = client.verify_cashnote(&cash_notes_for_x[0]).await; info!("Got result while verifying double spend from A -> X: {result:?}"); From e02f21788357116142c6fdcea01e090235c3e8c8 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 23 Jul 2024 12:47:23 +0900 Subject: [PATCH 019/115] feat(networking): compare record_types when marking fetches as complete --- sn_client/tests/folders_api.rs | 6 +++ sn_networking/src/cmd.rs | 14 ++++--- sn_networking/src/lib.rs | 4 +- sn_networking/src/replication_fetcher.rs | 17 +++++++- sn_node/src/put_validation.rs | 52 ++++++++++++++++++++---- 5 files changed, 75 insertions(+), 18 deletions(-) diff --git a/sn_client/tests/folders_api.rs b/sn_client/tests/folders_api.rs index 4b7c74fc9f..8340c3ad32 100644 --- a/sn_client/tests/folders_api.rs +++ b/sn_client/tests/folders_api.rs @@ -190,6 +190,9 @@ async fn test_folder_remove_replace_entries() -> Result<()> { #[tokio::test] async fn test_folder_retrieve() -> Result<()> { + let _log_guards = + sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_retrieve", false); + let owner_sk = SecretKey::random(); let client = get_new_client(owner_sk).await?; let tmp_dir = tempfile::tempdir()?; @@ -267,6 +270,9 @@ async fn test_folder_retrieve() -> Result<()> { #[tokio::test] async fn test_folder_merge_changes() -> Result<()> { + let _log_guards = + sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_merge_changes", false); + let owner_sk = SecretKey::random(); let client = get_new_client(owner_sk.clone()).await?; let tmp_dir = tempfile::tempdir()?; diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 432045b2e7..9b3417d1ed 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -124,7 +124,7 @@ pub enum LocalSwarmCmd { quotes: Vec<(PeerId, PaymentQuote)>, }, // Notify a fetch completion - FetchCompleted(RecordKey), + FetchCompleted((RecordKey, RecordType)), /// Triggers interval repliation /// NOTE: This does result in outgoing messages, but is produced locally TriggerIntervalReplication, @@ -264,10 +264,10 @@ impl Debug for LocalSwarmCmd { quotes.len() ) } - LocalSwarmCmd::FetchCompleted(key) => { + LocalSwarmCmd::FetchCompleted((key, record_type)) => { write!( f, - "LocalSwarmCmd::FetchCompleted({:?})", + "LocalSwarmCmd::FetchCompleted({record_type:?} : {:?})", PrettyPrintRecordKey::from(key) ) } @@ -776,13 +776,15 @@ impl SwarmDriver { self.verify_peer_quote(peer_id, quote); } } - LocalSwarmCmd::FetchCompleted(key) => { + LocalSwarmCmd::FetchCompleted((key, record_type)) => { info!( - "Fetch {:?} early completed, may fetched an old version record.", + "Fetch of {record_type:?} {:?} early completed, may have fetched an old version of the record.", PrettyPrintRecordKey::from(&key) ); cmd_string = "FetchCompleted"; - let new_keys_to_fetch = self.replication_fetcher.notify_fetch_early_completed(key); + let new_keys_to_fetch = self + .replication_fetcher + .notify_fetch_early_completed(key, record_type); if !new_keys_to_fetch.is_empty() { self.send_event(NetworkEvent::KeysToFetchForReplication(new_keys_to_fetch)); } diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 030d0b4a6f..af7165e0a3 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -684,8 +684,8 @@ impl Network { /// Notify ReplicationFetch a fetch attempt is completed. /// (but it won't trigger any real writes to disk, say fetched an old version of register) - pub fn notify_fetch_completed(&self, key: RecordKey) { - self.send_local_swarm_cmd(LocalSwarmCmd::FetchCompleted(key)) + pub fn notify_fetch_completed(&self, key: RecordKey, record_type: RecordType) { + self.send_local_swarm_cmd(LocalSwarmCmd::FetchCompleted((key, record_type))) } /// Put `Record` to the local RecordStore diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 8aedbc525c..1b90ac9a53 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -223,10 +223,23 @@ impl ReplicationFetcher { pub(crate) fn notify_fetch_early_completed( &mut self, key_in: RecordKey, + record_type: RecordType, ) -> Vec<(PeerId, RecordKey)> { - self.to_be_fetched.retain(|(key, _t, _), _| key != &key_in); + self.to_be_fetched.retain(|(key, current_type, _), _| { + if current_type == &record_type { + key != &key_in + } else { + true + } + }); - self.on_going_fetches.retain(|(key, _t), _| key != &key_in); + self.on_going_fetches.retain(|(key, current_type), _| { + if current_type == &record_type { + key != &key_in + } else { + true + } + }); self.next_keys_to_fetch() } diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 64635bf18b..8ea30d4d0f 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -30,11 +30,6 @@ impl Node { pub(crate) async fn validate_and_store_record(&self, record: Record) -> Result<()> { let record_header = RecordHeader::from_record(&record)?; - // Notify replication_fetcher to mark the attempt as completed. - // Send the notification earlier to avoid it got skipped due to: - // the record becomes stored during the fetch because of other interleaved process. - self.network().notify_fetch_completed(record.key.clone()); - match record_header.kind { RecordKind::ChunkWithPayment => { let record_key = record.key.clone(); @@ -56,6 +51,13 @@ impl Node { // we eagery retry replicaiton as it seems like other nodes are having trouble // did not manage to get this chunk as yet self.replicate_valid_fresh_record(record_key, RecordType::Chunk); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network() + .notify_fetch_completed(record.key.clone(), RecordType::Chunk); + debug!( "Chunk with addr {:?} already exists: {already_exists}, payment extracted.", chunk.network_address() @@ -75,6 +77,12 @@ impl Node { Marker::ValidPaidChunkPutFromClient(&PrettyPrintRecordKey::from(&record.key)) .log(); self.replicate_valid_fresh_record(record_key, RecordType::Chunk); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network() + .notify_fetch_completed(record.key.clone(), RecordType::Chunk); } store_chunk_result @@ -99,6 +107,14 @@ impl Node { record_key, RecordType::NonChunk(content_hash), ); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network().notify_fetch_completed( + record.key.clone(), + RecordType::NonChunk(content_hash), + ); } result } @@ -125,6 +141,16 @@ impl Node { Marker::ValidPaidRegisterPutFromClient(&pretty_key).log(); // we dont try and force replicaiton here as there's state to be kept in sync // which we leave up to the client to enforce + + let content_hash = XorName::from_content(&record.value); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network().notify_fetch_completed( + record.key.clone(), + RecordType::NonChunk(content_hash), + ); } result } @@ -161,7 +187,19 @@ impl Node { } } - self.validate_and_store_register(register, true).await + let res = self.validate_and_store_register(register, true).await; + if res.is_ok() { + let content_hash = XorName::from_content(&record.value); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network().notify_fetch_completed( + record.key.clone(), + RecordType::NonChunk(content_hash), + ); + } + res } } } @@ -300,8 +338,6 @@ impl Node { let updated_register = match self.register_validation(®ister, present_locally).await? { Some(reg) => reg, None => { - // Notify replication_fetcher to mark the attempt as completed. - self.network().notify_fetch_completed(key.clone()); return Ok(()); } }; From 499211dc93d09c7ef21f688fb45af893dddc7f46 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 23 Jul 2024 15:08:31 +0900 Subject: [PATCH 020/115] feat(networking): basic retry on send_req OutboundError --- sn_networking/src/lib.rs | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index af7165e0a3..49e40d65c0 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -53,6 +53,7 @@ use libp2p::{ identity::Keypair, kad::{KBucketDistance, KBucketKey, Quorum, Record, RecordKey}, multiaddr::Protocol, + request_response::OutboundFailure, Multiaddr, PeerId, }; use rand::Rng; @@ -728,14 +729,45 @@ impl Network { /// then the `Request` is forwarded to itself and handled, and a corresponding `Response` is created /// and returned to itself. Hence the flow remains the same and there is no branching at the upper /// layers. + /// + /// If an outbound issue is raised, we retry once more to send the request before returning an error. pub async fn send_request(&self, req: Request, peer: PeerId) -> Result { let (sender, receiver) = oneshot::channel(); self.send_network_swarm_cmd(NetworkSwarmCmd::SendRequest { - req, + req: req.clone(), peer, sender: Some(sender), }); - receiver.await? + let mut r = receiver.await?; + + if let Err(error) = &r { + error!("Error in response: {:?}", error); + + match error { + NetworkError::OutboundError(OutboundFailure::Io(_)) + | NetworkError::OutboundError(OutboundFailure::ConnectionClosed) => { + warn!( + "Outbound failed for {req:?} .. {error:?}, redialing once and reattempting" + ); + let (sender, receiver) = oneshot::channel(); + + debug!("Reattempting to send_request {req:?} to {peer:?}"); + self.send_network_swarm_cmd(NetworkSwarmCmd::SendRequest { + req, + peer, + sender: Some(sender), + }); + + r = receiver.await?; + } + _ => { + // If the record is found, we should log the error and continue + warn!("Error in response: {:?}", error); + } + } + } + + r } /// Send `Request` to the given `PeerId` and do _not_ await a response here. From eeb6987ad5e74bf082fce6c526938c53c1dfb3ee Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 23 Jul 2024 17:30:53 +0900 Subject: [PATCH 021/115] test(networking): increase wait before verifications --- sn_node/tests/double_spend.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 61e949bfec..ce1e9515e5 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -77,6 +77,9 @@ async fn cash_note_transfer_double_spend_fail() -> Result<()> { let cash_notes_for_2: Vec<_> = transfer_to_2.cash_notes_for_recipient.clone(); let cash_notes_for_3: Vec<_> = transfer_to_3.cash_notes_for_recipient.clone(); + // we wait 5s to ensure that the double spend attempt is detected and accumulated + tokio::time::sleep(Duration::from_secs(5)).await; + let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); @@ -457,22 +460,22 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() println!("Verifying the original cashnote of B -> C"); // arbitrary time sleep to allow for network accumulation of double spend. - tokio::time::sleep(Duration::from_secs(5)).await; + tokio::time::sleep(Duration::from_secs(15)).await; let result = client.verify_cashnote(&cash_notes_for_c[0]).await; info!("Got result while verifying the original spend from B -> C: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for c should show double spend attempt"); - }); + }, "result should be verify error, it was {result:?}"); let result = client.verify_cashnote(&cash_notes_for_y[0]).await; assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for y should show double spend attempt"); - }); + }, "result should be verify error, it was {result:?}"); let result = client.verify_cashnote(&cash_notes_for_b[0]).await; assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for y should show double spend attempt"); - }); + }, "result should be verify error, it was {result:?}"); Ok(()) } @@ -577,12 +580,12 @@ async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { let cash_notes_for_x: Vec<_> = transfer_to_x.cash_notes_for_recipient.clone(); // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(5)).await; + tokio::time::sleep(Duration::from_secs(10)).await; let result = client.verify_cashnote(&cash_notes_for_x[0]).await; info!("Got result while verifying double spend from A -> X: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "non double spend error found: {str:?}"); }); // the original A should still be present as one of the double spends From 3263d930c737793080113aea5fcdad987028abe8 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 24 Jul 2024 10:16:44 +0900 Subject: [PATCH 022/115] ci: disable spend simulation while unstable --- .github/workflows/merge.yml | 142 ++++++++++++++++++------------------ 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 011fd53640..7f9f79a5ab 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -5,9 +5,9 @@ on: # on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors # the merge run checks should show on master and enable this clear test/passing history merge_group: - branches: [ main, alpha*, beta*, rc* ] + branches: [main, alpha*, beta*, rc*] pull_request: - branches: [ "*" ] + branches: ["*"] env: CARGO_INCREMENTAL: 0 # bookkeeping for incremental builds has overhead, not useful in CI. @@ -95,7 +95,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ ubuntu-latest, windows-latest, macos-latest ] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 @@ -342,7 +342,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ ubuntu-latest, windows-latest, macos-latest ] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 @@ -415,72 +415,72 @@ jobs: log_file_prefix: safe_test_logs_spend platform: ${{ matrix.os }} - # runs with increased node count - spend_simulation: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: spend simulation - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ ubuntu-latest, windows-latest, macos-latest ] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --features=local-discovery --bin safenode - timeout-minutes: 30 - - - name: Build faucet binary - run: cargo build --release --bin faucet --features="local-discovery,gifting" - timeout-minutes: 30 - - - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-count: 50 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - name: execute the spend simulation - run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_spend_simulation - platform: ${{ matrix.os }} + # # runs with increased node count + # spend_simulation: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: spend simulation + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ ubuntu-latest, windows-latest, macos-latest ] + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --features=local-discovery --bin safenode + # timeout-minutes: 30 + + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features="local-discovery,gifting" + # timeout-minutes: 30 + + # - name: Build testing executable + # run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run + # env: + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-count: 50 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - name: execute the spend simulation + # run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture + # env: + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_spend_simulation + # platform: ${{ matrix.os }} token_distribution_test: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" @@ -488,7 +488,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ ubuntu-latest, windows-latest, macos-latest ] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 From a0ca40a6350acdef7c3782bc2d618cc91f276830 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 24 Jul 2024 10:50:45 +0900 Subject: [PATCH 023/115] feat(networking): increase speed of register validation This should allow more PUT attempts in the same RetryStrategy period --- sn_client/src/register.rs | 2 +- sn_networking/src/lib.rs | 2 ++ sn_node/src/put_validation.rs | 9 ++++++++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/sn_client/src/register.rs b/sn_client/src/register.rs index 430d12092b..a4e21469d5 100644 --- a/sn_client/src/register.rs +++ b/sn_client/src/register.rs @@ -843,7 +843,7 @@ impl ClientRegister { let verification_cfg = GetRecordCfg { get_quorum: Quorum::One, - retry_strategy: Some(RetryStrategy::Balanced), + retry_strategy: Some(RetryStrategy::Quick), target_record: record_to_verify, expected_holders, }; diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 49e40d65c0..1c92230546 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -580,6 +580,8 @@ impl Network { pub async fn put_record(&self, record: Record, cfg: &PutRecordCfg) -> Result<()> { let pretty_key = PrettyPrintRecordKey::from(&record.key); + // Here we only retry after a failed validation. + // So a long validation time will limit the number of PUT retries we attempt here. let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( ExponentialBackoff { diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 8ea30d4d0f..602312f443 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -138,6 +138,7 @@ impl Node { let result = self.validate_and_store_register(register, true).await; if result.is_ok() { + debug!("Successfully stored register update at {pretty_key:?}"); Marker::ValidPaidRegisterPutFromClient(&pretty_key).log(); // we dont try and force replicaiton here as there's state to be kept in sync // which we leave up to the client to enforce @@ -151,6 +152,8 @@ impl Node { record.key.clone(), RecordType::NonChunk(content_hash), ); + } else { + warn!("Failed to store register update at {pretty_key:?}"); } result } @@ -336,8 +339,12 @@ impl Node { // check register and merge if needed let updated_register = match self.register_validation(®ister, present_locally).await? { - Some(reg) => reg, + Some(reg) => { + debug!("Register needed to be updated"); + reg + } None => { + debug!("No update needed for register"); return Ok(()); } }; From 809892e3acd0d566f9b9bd273ad9486ee5f3e67a Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 25 Jul 2024 09:39:36 +0900 Subject: [PATCH 024/115] feat(networking): use tokio::select! biased to gain simpler prioritisation --- sn_networking/src/driver.rs | 61 ++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 34 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 2e5e96ce23..51a133089e 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -62,10 +62,7 @@ use std::{ num::NonZeroUsize, path::PathBuf, }; -use tokio::sync::{ - mpsc::{self, error::TryRecvError}, - oneshot, -}; +use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use tracing::warn; use xor_name::XorName; @@ -700,38 +697,24 @@ impl SwarmDriver { let mut relay_manager_reservation_interval = interval(RELAY_MANAGER_RESERVATION_INTERVAL); loop { - // Prioritise any local cmds pending. - // https://github.com/libp2p/rust-libp2p/blob/master/docs/coding-guidelines.md#prioritize-local-work-over-new-work-from-a-remote - match self.local_cmd_receiver.try_recv() { - Ok(cmd) => { - let start = Instant::now(); - let cmd_string = format!("{cmd:?}"); - if let Err(err) = self.handle_local_cmd(cmd) { - warn!("Error while handling local cmd: {err}"); - } - trace!("LocalCmd handled in {:?}: {cmd_string:?}", start.elapsed()); - - continue; - } - Err(error) => match error { - TryRecvError::Empty => { - // no local cmds pending, continue - } - TryRecvError::Disconnected => { - error!("LocalCmd channel disconnected, shutting down SwarmDriver"); - return; - } - }, - } - tokio::select! { - swarm_event = self.swarm.select_next_some() => { - // logging for handling events happens inside handle_swarm_events - // otherwise we're rewriting match statements etc around this anwyay - if let Err(err) = self.handle_swarm_events(swarm_event) { - warn!("Error while handling swarm event: {err}"); - } + // polls futures in order they appear here (as opposed to random) + biased; + + // Prioritise any local cmds pending. + // https://github.com/libp2p/rust-libp2p/blob/master/docs/coding-guidelines.md#prioritize-local-work-over-new-work-from-a-remote + local_cmd = self.local_cmd_receiver.recv() => match local_cmd { + Some(cmd) => { + let start = Instant::now(); + let cmd_string = format!("{cmd:?}"); + if let Err(err) = self.handle_local_cmd(cmd) { + warn!("Error while handling local cmd: {err}"); + } + trace!("LocalCmd handled in {:?}: {cmd_string:?}", start.elapsed()); + }, + None => continue, }, + // next check if we have locally generated network cmds some_cmd = self.network_cmd_receiver.recv() => match some_cmd { Some(cmd) => { let start = Instant::now(); @@ -743,6 +726,16 @@ impl SwarmDriver { }, None => continue, }, + // next take and react to external swarm events + swarm_event = self.swarm.select_next_some() => { + // logging for handling events happens inside handle_swarm_events + // otherwise we're rewriting match statements etc around this anwyay + if let Err(err) = self.handle_swarm_events(swarm_event) { + warn!("Error while handling swarm event: {err}"); + } + }, + // thereafter we can check our intervals + // runs every bootstrap_interval time _ = bootstrap_interval.tick() => { if let Some(new_interval) = self.run_bootstrap_continuously(bootstrap_interval.period()).await { From 9133522744a02ab1593ca88b670fef056ac3ced8 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Fri, 26 Jul 2024 10:40:54 +0900 Subject: [PATCH 025/115] fix(client): verify register when retrieving By default we should verify, this avoids errors arising from stale data if it comes form one faster machine --- sn_client/src/api.rs | 2 +- sn_client/src/register.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index c877228db0..93418ec44f 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -450,7 +450,7 @@ impl Client { ) -> Result { let key = NetworkAddress::from_register_address(address).to_record_key(); let get_quorum = if is_verifying { - Quorum::N(NonZeroUsize::new(2).ok_or(Error::NonZeroUsizeWasInitialisedAsZero)?) + Quorum::Majority } else { Quorum::One }; diff --git a/sn_client/src/register.rs b/sn_client/src/register.rs index a4e21469d5..16efd3db1d 100644 --- a/sn_client/src/register.rs +++ b/sn_client/src/register.rs @@ -865,7 +865,7 @@ impl ClientRegister { ) -> Result { debug!("Retrieving Register from: {address}"); let reg = client - .get_signed_register_from_network(address, false) + .get_signed_register_from_network(address, true) .await?; reg.verify_with_address(address)?; Ok(reg.register()?) From 192ab7529c0d38916bc4f5f93106fc1f7cb1f700 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Fri, 26 Jul 2024 13:55:10 +0900 Subject: [PATCH 026/115] chore: clippy fixes for rust 1.80 --- sn_client/src/lib.rs | 10 +++++----- sn_transfers/src/wallet/wallet_file.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sn_client/src/lib.rs b/sn_client/src/lib.rs index 5505008e43..daf06d6f4c 100644 --- a/sn_client/src/lib.rs +++ b/sn_client/src/lib.rs @@ -16,19 +16,19 @@ //! Here are the key functionalities provided by this crate: //! //! 1. **Network Communication**: It handles communication with the Safe Network, enabling clients to -//! send and receive messages from the decentralized nodes that make up the network. +//! send and receive messages from the decentralized nodes that make up the network. //! //! 2. **Data Storage and Retrieval**: to store and retrieve data on the Safe Network. -//! This includes both private and public data, ensuring privacy and security. +//! This includes both private and public data, ensuring privacy and security. //! //! 3. **Authentication and Access Control**: It provides mechanisms for authenticating users and -//! managing access to data, ensuring that only authorized users can access sensitive information. +//! managing access to data, ensuring that only authorized users can access sensitive information. //! //! 4. **File Management**: The crate supports operations related to file management, such as uploading, -//! downloading, and managing files and directories on the Safe Network. +//! downloading, and managing files and directories on the Safe Network. //! //! 5. **Token Management**: It includes functionality for managing Safe Network tokens, which can be -//! used for various purposes within the network, including paying for storage and services. +//! used for various purposes within the network, including paying for storage and services. //! //! ## Quick links //! - [Crates.io](https://crates.io/crates/sn_client) diff --git a/sn_transfers/src/wallet/wallet_file.rs b/sn_transfers/src/wallet/wallet_file.rs index a39c911507..58b4827663 100644 --- a/sn_transfers/src/wallet/wallet_file.rs +++ b/sn_transfers/src/wallet/wallet_file.rs @@ -69,7 +69,7 @@ pub(super) fn remove_unconfirmed_spend_requests( let spend_hex_name = spend.address().to_hex(); let spend_file_path = spends_dir.join(&spend_hex_name); debug!("Writing spend to: {spend_file_path:?}"); - fs::write(spend_file_path, &spend.to_bytes())?; + fs::write(spend_file_path, spend.to_bytes())?; } let unconfirmed_spend_requests_path = wallet_dir.join(UNCONFIRMED_TX_NAME); From 5a8ae482256aa2cd000d54b5835461bef31cc325 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 26 Jul 2024 09:46:57 +0200 Subject: [PATCH 027/115] style(node): cargo fmt --- sn_node/tests/common/client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index ce0b34e477..bff2c8d333 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -44,8 +44,8 @@ pub const LOCAL_NODE_COUNT: usize = 25; // The number of times to try to load the faucet wallet const LOAD_FAUCET_WALLET_RETRIES: usize = 6; - // mutex to restrict access to faucet wallet from concurrent tests - static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); +// mutex to restrict access to faucet wallet from concurrent tests +static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); /// Load HotWallet from dir pub fn get_wallet(root_dir: &Path) -> HotWallet { From 74f36fa9e344dacd37eaf4a3a894dcafd7c3ddd5 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Fri, 26 Jul 2024 18:38:31 +0200 Subject: [PATCH 028/115] feat(launchpad): user interface tweaks --- node-launchpad/src/components/footer.rs | 4 +- node-launchpad/src/components/home.rs | 76 ++++++++++++++----------- 2 files changed, 46 insertions(+), 34 deletions(-) diff --git a/node-launchpad/src/components/footer.rs b/node-launchpad/src/components/footer.rs index 6022220bfe..31296c8d82 100644 --- a/node-launchpad/src/components/footer.rs +++ b/node-launchpad/src/components/footer.rs @@ -50,13 +50,13 @@ impl Component for Footer { // for the rest of the home scene Constraint::Min(1), // our footer - Constraint::Max(5), + Constraint::Max(4), ], ) .split(area); let border = Paragraph::new("").block( Block::default() - .title("Key Commands") + .title(" Key Commands ") .borders(Borders::ALL) .border_style(Style::default().fg(COOL_GREY)), ); diff --git a/node-launchpad/src/components/home.rs b/node-launchpad/src/components/home.rs index ad1078e135..49e5884b13 100644 --- a/node-launchpad/src/components/home.rs +++ b/node-launchpad/src/components/home.rs @@ -378,11 +378,11 @@ impl Component for Home { // header Constraint::Max(1), // device status - Constraint::Max(10), + Constraint::Max(6), // node status Constraint::Min(3), // footer - Constraint::Max(5), + Constraint::Max(4), ], ) .split(area); @@ -391,30 +391,19 @@ impl Component for Home { let layer_one_header = Layout::new( Direction::Horizontal, - vec![Constraint::Min(40), Constraint::Fill(20)], + [Constraint::Percentage(100), Constraint::Percentage(0)], // We leave space for future tabs ) .split(layer_zero[0]); f.render_widget( Paragraph::new(format!( - "Autonomi Node Launchpad (v{})", + " Autonomi Node Launchpad (v{})", self.launchpad_version_str )) .alignment(Alignment::Left) .fg(LIGHT_PERIWINKLE), layer_one_header[0], ); - let discord_user_name_text = if self.discord_username.is_empty() { - "".to_string() - } else { - format!("Discord Username: {} ", &self.discord_username) - }; - f.render_widget( - Paragraph::new(discord_user_name_text) - .alignment(Alignment::Right) - .fg(VERY_LIGHT_AZURE), - layer_one_header[1], - ); // ==== Device Status ===== @@ -434,7 +423,7 @@ impl Component for Home { f.render_widget( Paragraph::new(vec![line1, line2]).block( Block::default() - .title("Device Status") + .title(" Device Status ") .title_style(Style::new().fg(GHOST_WHITE)) .borders(Borders::ALL) .padding(Padding::uniform(1)) @@ -462,26 +451,49 @@ impl Component for Home { Cell::new("Memory Use".to_string()).fg(GHOST_WHITE), Cell::new(memory_use_val).fg(GHOST_WHITE), ]); - let total_nanos_earned_row = Row::new(vec![ + + // Combine "Total Nanos Earned" and "Discord Username" into a single row + let mut username_color = GHOST_WHITE; + let total_nanos_earned_and_discord = Row::new(vec![ Cell::new("Total Nanos Earned".to_string()).fg(VIVID_SKY_BLUE), Cell::new(self.node_stats.forwarded_rewards.to_string()) .fg(VIVID_SKY_BLUE) .bold(), + Cell::new("".to_string()), + Cell::new("Discord Username:".to_string()).fg(VIVID_SKY_BLUE), + Cell::new(if self.discord_username.is_empty() { + "[Ctrl+B] to set".to_string() + } else { + username_color = VIVID_SKY_BLUE; + self.discord_username.clone() + }) + .fg(username_color) + .bold(), ]); + let stats_rows = vec![ storage_allocated_row, - memory_use_row.bottom_margin(2), - total_nanos_earned_row, + memory_use_row.bottom_margin(1), + total_nanos_earned_and_discord, ]; - let stats_width = [Constraint::Max(25), Constraint::Min(5)]; - let stats_table = Table::new(stats_rows, stats_width).block( - Block::default() - .title("Device Status") - .title_style(Style::default().fg(GHOST_WHITE)) - .borders(Borders::ALL) - .padding(Padding::uniform(1)) - .style(Style::default().fg(VERY_LIGHT_AZURE)), - ); + let stats_width = [Constraint::Length(5)]; + let column_constraints = [ + Constraint::Percentage(25), + Constraint::Percentage(5), + Constraint::Percentage(35), // empty cell to avoid alignment left <> right + Constraint::Percentage(20), + Constraint::Percentage(15), + ]; + let stats_table = Table::new(stats_rows, stats_width) + .block( + Block::default() + .title(" Device Status ") + .title_style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .padding(Padding::horizontal(1)) + .style(Style::default().fg(VERY_LIGHT_AZURE)), + ) + .widths(column_constraints); f.render_widget(stats_table, layer_zero[1]); }; @@ -515,7 +527,7 @@ impl Component for Home { .fg(LIGHT_PERIWINKLE) .block( Block::default() - .title("Node Status") + .title(" Node Status ") .title_style(Style::default().fg(LIGHT_PERIWINKLE)) .borders(Borders::ALL) .border_style(style::Style::default().fg(COOL_GREY)) @@ -526,8 +538,8 @@ impl Component for Home { } else { let node_widths = [ Constraint::Max(15), - Constraint::Min(30), - Constraint::Max(20), + Constraint::Min(40), + Constraint::Max(10), Constraint::Max(10), ]; let table = Table::new(node_rows, node_widths) @@ -535,7 +547,7 @@ impl Component for Home { .highlight_style(Style::new().reversed()) .block( Block::default() - .title("Node Status") + .title(" Node Status ") .padding(Padding::new(2, 2, 1, 1)) .title_style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) From c932392e5c47612ac36e81a3be8cab8a81e2f109 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 23 Jul 2024 10:03:53 +0530 Subject: [PATCH 029/115] feat(manager): optionally set the different ports when running local testnet --- sn_node_manager/src/add_services/config.rs | 50 +++++++-- sn_node_manager/src/add_services/mod.rs | 79 +------------ sn_node_manager/src/add_services/tests.rs | 6 +- sn_node_manager/src/bin/cli/main.rs | 125 +++++++++++++++++++-- sn_node_manager/src/cmd/local.rs | 47 ++++++-- sn_node_manager/src/helpers.rs | 55 ++++++++- sn_node_manager/src/local.rs | 121 +++++++++++++++++--- 7 files changed, 360 insertions(+), 123 deletions(-) diff --git a/sn_node_manager/src/add_services/config.rs b/sn_node_manager/src/add_services/config.rs index 79295aac6d..83b34a17be 100644 --- a/sn_node_manager/src/add_services/config.rs +++ b/sn_node_manager/src/add_services/config.rs @@ -23,20 +23,46 @@ pub enum PortRange { Range(u16, u16), } -pub fn parse_port_range(s: &str) -> Result { - if let Ok(port) = u16::from_str(s) { - Ok(PortRange::Single(port)) - } else { - let parts: Vec<&str> = s.split('-').collect(); - if parts.len() != 2 { - return Err(eyre!("Port range must be in the format 'start-end'")); +impl PortRange { + pub fn parse(s: &str) -> Result { + if let Ok(port) = u16::from_str(s) { + Ok(Self::Single(port)) + } else { + let parts: Vec<&str> = s.split('-').collect(); + if parts.len() != 2 { + return Err(eyre!("Port range must be in the format 'start-end'")); + } + let start = parts[0].parse::()?; + let end = parts[1].parse::()?; + if start >= end { + return Err(eyre!("End port must be greater than start port")); + } + Ok(Self::Range(start, end)) } - let start = parts[0].parse::()?; - let end = parts[1].parse::()?; - if start >= end { - return Err(eyre!("End port must be greater than start port")); + } + + /// Validate the port range against a count to make sure the correct number of ports are provided. + pub fn validate(&self, count: u16) -> Result<()> { + match self { + Self::Single(_) => { + if count != 1 { + error!("The count ({count}) does not match the number of ports (1)"); + return Err(eyre!( + "The count ({count}) does not match the number of ports (1)" + )); + } + } + Self::Range(start, end) => { + let port_count = end - start + 1; + if count != port_count { + error!("The count ({count}) does not match the number of ports ({port_count})"); + return Err(eyre!( + "The count ({count}) does not match the number of ports ({port_count})" + )); + } + } } - Ok(PortRange::Range(start, end)) + Ok(()) } } diff --git a/sn_node_manager/src/add_services/mod.rs b/sn_node_manager/src/add_services/mod.rs index 03df580249..a1657640ee 100644 --- a/sn_node_manager/src/add_services/mod.rs +++ b/sn_node_manager/src/add_services/mod.rs @@ -12,10 +12,11 @@ mod tests; use self::config::{ AddAuditorServiceOptions, AddDaemonServiceOptions, AddFaucetServiceOptions, AddNodeServiceOptions, InstallAuditorServiceCtxBuilder, InstallFaucetServiceCtxBuilder, - InstallNodeServiceCtxBuilder, PortRange, + InstallNodeServiceCtxBuilder, }; use crate::{ config::{create_owned_dir, get_user_safenode_data_dir}, + helpers::{check_port_availability, get_start_port_if_applicable, increment_port_option}, VerbosityLevel, DAEMON_SERVICE_NAME, }; use color_eyre::{ @@ -62,39 +63,18 @@ pub async fn add_node( } } - if let Some(ref port_range) = options.node_port { - match port_range { - PortRange::Single(_) => { - let count = options.count.unwrap_or(1); - if count != 1 { - error!("The number of services to add ({count}) does not match the number of ports (1)"); - return Err(eyre!( - "The number of services to add ({count}) does not match the number of ports (1)" - )); - } - } - PortRange::Range(start, end) => { - let port_count = end - start + 1; - let service_count = options.count.unwrap_or(1); - if port_count != service_count { - error!("The number of services to add ({service_count}) does not match the number of ports ({port_count})"); - return Err(eyre!( - "The number of services to add ({service_count}) does not match the number of ports ({port_count})" - )); - } - } - } - } - if let Some(port_option) = &options.node_port { + port_option.validate(options.count.unwrap_or(1))?; check_port_availability(port_option, &node_registry.nodes)?; } if let Some(port_option) = &options.metrics_port { + port_option.validate(options.count.unwrap_or(1))?; check_port_availability(port_option, &node_registry.nodes)?; } if let Some(port_option) = &options.rpc_port { + port_option.validate(options.count.unwrap_or(1))?; check_port_availability(port_option, &node_registry.nodes)?; } @@ -575,52 +555,3 @@ pub fn add_faucet( } } } - -fn get_start_port_if_applicable(range: Option) -> Option { - if let Some(port) = range { - match port { - PortRange::Single(val) => return Some(val), - PortRange::Range(start, _) => return Some(start), - } - } - None -} - -fn increment_port_option(port: Option) -> Option { - if let Some(port) = port { - let incremented_port = port + 1; - return Some(incremented_port); - } - None -} - -fn check_port_availability(port_option: &PortRange, nodes: &[NodeServiceData]) -> Result<()> { - let mut all_ports = Vec::new(); - for node in nodes { - if let Some(port) = node.metrics_port { - all_ports.push(port); - } - if let Some(port) = node.node_port { - all_ports.push(port); - } - all_ports.push(node.rpc_socket_addr.port()); - } - - match port_option { - PortRange::Single(port) => { - if all_ports.iter().any(|p| *p == *port) { - error!("Port {port} is being used by another service"); - return Err(eyre!("Port {port} is being used by another service")); - } - } - PortRange::Range(start, end) => { - for i in *start..=*end { - if all_ports.iter().any(|p| *p == i) { - error!("Port {i} is being used by another service"); - return Err(eyre!("Port {i} is being used by another service")); - } - } - } - } - Ok(()) -} diff --git a/sn_node_manager/src/add_services/tests.rs b/sn_node_manager/src/add_services/tests.rs index 1cfe8e959c..bf1649df4c 100644 --- a/sn_node_manager/src/add_services/tests.rs +++ b/sn_node_manager/src/add_services/tests.rs @@ -1568,7 +1568,7 @@ async fn add_node_should_return_an_error_if_port_and_node_count_do_not_match() - Ok(_) => panic!("This test should result in an error"), Err(e) => { assert_eq!( - format!("The number of services to add (2) does not match the number of ports (3)"), + format!("The count (2) does not match the number of ports (3)"), e.to_string() ) } @@ -1639,7 +1639,7 @@ async fn add_node_should_return_an_error_if_multiple_services_are_specified_with Ok(_) => panic!("This test should result in an error"), Err(e) => { assert_eq!( - format!("The number of services to add (2) does not match the number of ports (1)"), + format!("The count (2) does not match the number of ports (1)"), e.to_string() ) } @@ -2514,7 +2514,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i auto_restart: false, auto_set_nat_flags: false, bootstrap_peers: vec![], - count: None, + count: Some(2), delete_safenode_src: true, enable_metrics_server: false, env_variables: None, diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index 4598ea8f4d..e57c820d0a 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -11,7 +11,7 @@ use color_eyre::{eyre::eyre, Result}; use libp2p::Multiaddr; use sn_logging::{LogBuilder, LogFormat}; use sn_node_manager::{ - add_services::config::{parse_port_range, PortRange}, + add_services::config::PortRange, cmd::{self}, VerbosityLevel, }; @@ -93,6 +93,9 @@ pub enum SubCmd { data_dir_path: Option, /// Set this flag to enable the metrics server. The ports will be selected at random. /// + /// If you're passing the compiled safenode via --path, make sure to enable the open-metrics feature + /// when compiling. + /// /// If you want to specify the ports, use the --metrics-port argument. #[clap(long)] enable_metrics_server: bool, @@ -132,8 +135,8 @@ pub enum SubCmd { log_format: Option, /// Specify a port for the open metrics server. /// - /// This argument should only be used with a safenode binary that has the open-metrics - /// feature enabled. + /// If you're passing the compiled safenode via --node-path, make sure to enable the open-metrics feature + /// when compiling. /// /// If not set, metrics server will not be started. Use --enable-metrics-server to start /// the metrics server without specifying a port. @@ -141,7 +144,7 @@ pub enum SubCmd { /// If multiple services are being added and this argument is used, you must specify a /// range. For example, '12000-12004'. The length of the range must match the number of /// services, which in this case would be 5. The range must also go from lower to higher. - #[clap(long, value_parser = parse_port_range)] + #[clap(long, value_parser = PortRange::parse)] metrics_port: Option, /// Specify a port for the safenode service(s). /// @@ -150,7 +153,7 @@ pub enum SubCmd { /// If multiple services are being added and this argument is used, you must specify a /// range. For example, '12000-12004'. The length of the range must match the number of /// services, which in this case would be 5. The range must also go from lower to higher. - #[clap(long, value_parser = parse_port_range)] + #[clap(long, value_parser = PortRange::parse)] node_port: Option, /// Provide a path for the safenode binary to be used by the service. /// @@ -182,7 +185,7 @@ pub enum SubCmd { /// If multiple services are being added and this argument is used, you must specify a /// range. For example, '12000-12004'. The length of the range must match the number of /// services, which in this case would be 5. The range must also go from lower to higher. - #[clap(long, value_parser = parse_port_range)] + #[clap(long, value_parser = PortRange::parse)] rpc_port: Option, /// Try to use UPnP to open a port in the home router and allow incoming connections. /// @@ -728,6 +731,15 @@ pub enum LocalSubCmd { /// The number of nodes to run. #[clap(long, default_value_t = DEFAULT_NODE_COUNT)] count: u16, + /// Set this flag to enable the metrics server. The ports will be selected at random. + /// + /// If you're passing the compiled safenode via --node-path, make sure to enable the open-metrics feature flag + /// on the safenode when compiling. If you're using --build, then make sure to enable the feature flag on the + /// safenode-manager. + /// + /// If you want to specify the ports, use the --metrics-port argument. + #[clap(long)] + enable_metrics_server: bool, /// Path to a faucet binary /// /// The path and version arguments are mutually exclusive. @@ -752,11 +764,36 @@ pub enum LocalSubCmd { /// If the argument is not used, the default format will be applied. #[clap(long, value_parser = LogFormat::parse_from_str, verbatim_doc_comment)] log_format: Option, - /// Path to a safenode binary + /// Specify a port for the open metrics server. + /// + /// If you're passing the compiled safenode via --node-path, make sure to enable the open-metrics feature flag + /// on the safenode when compiling. If you're using --build, then make sure to enable the feature flag on the + /// safenode-manager. + /// + /// If not set, metrics server will not be started. Use --enable-metrics-server to start + /// the metrics server without specifying a port. + /// + /// If multiple services are being added and this argument is used, you must specify a + /// range. For example, '12000-12004'. The length of the range must match the number of + /// services, which in this case would be 5. The range must also go from lower to higher. + #[clap(long, value_parser = PortRange::parse)] + metrics_port: Option, + /// Path to a safenode binary. + /// + /// Make sure to enable the local-discovery feature flag on the safenode when compiling the binary. /// /// The path and version arguments are mutually exclusive. #[clap(long, conflicts_with = "node_version")] node_path: Option, + /// Specify a port for the safenode service(s). + /// + /// If not used, ports will be selected at random. + /// + /// If multiple services are being added and this argument is used, you must specify a + /// range. For example, '12000-12004'. The length of the range must match the number of + /// services, which in this case would be 5. The range must also go from lower to higher. + #[clap(long, value_parser = PortRange::parse)] + node_port: Option, /// The version of safenode to use. /// /// The version number should be in the form X.Y.Z, with no 'v' prefix. @@ -779,6 +816,15 @@ pub enum LocalSubCmd { /// The argument exists to support testing scenarios. #[clap(long, conflicts_with = "owner")] owner_prefix: Option, + /// Specify a port for the RPC service(s). + /// + /// If not used, ports will be selected at random. + /// + /// If multiple services are being added and this argument is used, you must specify a + /// range. For example, '12000-12004'. The length of the range must match the number of + /// services, which in this case would be 5. The range must also go from lower to higher. + #[clap(long, value_parser = PortRange::parse)] + rpc_port: Option, /// Set to skip the network validation process #[clap(long)] skip_validation: bool, @@ -803,6 +849,15 @@ pub enum LocalSubCmd { /// The number of nodes to run. #[clap(long, default_value_t = DEFAULT_NODE_COUNT)] count: u16, + /// Set this flag to enable the metrics server. The ports will be selected at random. + /// + /// If you're passing the compiled safenode via --node-path, make sure to enable the open-metrics feature flag + /// on the safenode when compiling. If you're using --build, then make sure to enable the feature flag on the + /// safenode-manager. + /// + /// If you want to specify the ports, use the --metrics-port argument. + #[clap(long)] + enable_metrics_server: bool, /// Path to a faucet binary. /// /// The path and version arguments are mutually exclusive. @@ -827,11 +882,36 @@ pub enum LocalSubCmd { /// If the argument is not used, the default format will be applied. #[clap(long, value_parser = LogFormat::parse_from_str, verbatim_doc_comment)] log_format: Option, + /// Specify a port for the open metrics server. + /// + /// If you're passing the compiled safenode via --node-path, make sure to enable the open-metrics feature flag + /// on the safenode when compiling. If you're using --build, then make sure to enable the feature flag on the + /// safenode-manager. + /// + /// If not set, metrics server will not be started. Use --enable-metrics-server to start + /// the metrics server without specifying a port. + /// + /// If multiple services are being added and this argument is used, you must specify a + /// range. For example, '12000-12004'. The length of the range must match the number of + /// services, which in this case would be 5. The range must also go from lower to higher. + #[clap(long, value_parser = PortRange::parse)] + metrics_port: Option, /// Path to a safenode binary /// + /// Make sure to enable the local-discovery feature flag on the safenode when compiling the binary. + /// /// The path and version arguments are mutually exclusive. #[clap(long, conflicts_with = "node_version", conflicts_with = "build")] node_path: Option, + /// Specify a port for the safenode service(s). + /// + /// If not used, ports will be selected at random. + /// + /// If multiple services are being added and this argument is used, you must specify a + /// range. For example, '12000-12004'. The length of the range must match the number of + /// services, which in this case would be 5. The range must also go from lower to higher. + #[clap(long, value_parser = PortRange::parse)] + node_port: Option, /// The version of safenode to use. /// /// The version number should be in the form X.Y.Z, with no 'v' prefix. @@ -853,6 +933,15 @@ pub enum LocalSubCmd { #[clap(long)] #[clap(long, conflicts_with = "owner")] owner_prefix: Option, + /// Specify a port for the RPC service(s). + /// + /// If not used, ports will be selected at random. + /// + /// If multiple services are being added and this argument is used, you must specify a + /// range. For example, '12000-12004'. The length of the range must match the number of + /// services, which in this case would be 5. The range must also go from lower to higher. + #[clap(long, value_parser = PortRange::parse)] + rpc_port: Option, /// Set to skip the network validation process #[clap(long)] skip_validation: bool, @@ -1034,29 +1123,37 @@ async fn main() -> Result<()> { LocalSubCmd::Join { build, count, + enable_metrics_server, faucet_path, faucet_version, interval, + metrics_port, node_path, + node_port, node_version, log_format, owner, owner_prefix, peers, + rpc_port, skip_validation: _, } => { cmd::local::join( build, count, + enable_metrics_server, faucet_path, faucet_version, interval, + metrics_port, node_path, + node_port, node_version, log_format, owner, owner_prefix, peers, + rpc_port, true, verbosity, ) @@ -1067,28 +1164,36 @@ async fn main() -> Result<()> { build, clean, count, + enable_metrics_server, faucet_path, faucet_version, interval, - owner, - owner_prefix, + log_format, + metrics_port, node_path, + node_port, node_version, - log_format, + owner, + owner_prefix, + rpc_port, skip_validation: _, } => { cmd::local::run( build, clean, count, + enable_metrics_server, faucet_path, faucet_version, interval, + metrics_port, node_path, + node_port, node_version, log_format, owner, owner_prefix, + rpc_port, true, verbosity, ) diff --git a/sn_node_manager/src/cmd/local.rs b/sn_node_manager/src/cmd/local.rs index ccef08613c..1dccb70cb7 100644 --- a/sn_node_manager/src/cmd/local.rs +++ b/sn_node_manager/src/cmd/local.rs @@ -10,6 +10,7 @@ use super::get_bin_path; use crate::{ + add_services::config::PortRange, local::{kill_network, run_network, LocalNetworkOptions}, print_banner, status_report, VerbosityLevel, }; @@ -25,15 +26,19 @@ use std::path::PathBuf; pub async fn join( build: bool, count: u16, + enable_metrics_server: bool, faucet_path: Option, faucet_version: Option, interval: u64, + metrics_port: Option, node_path: Option, + node_port: Option, node_version: Option, log_format: Option, owner: Option, owner_prefix: Option, peers_args: PeersArgs, + rpc_port: Option, skip_validation: bool, verbosity: VerbosityLevel, ) -> Result<(), Report> { @@ -42,11 +47,18 @@ pub async fn join( } info!("Joining local network"); + if (enable_metrics_server || metrics_port.is_some()) && !cfg!(feature = "open-metrics") && build + { + return Err(eyre!( + "Metrics server is not available. Please enable the open-metrics feature flag. Run the command with the --features open-metrics" + )); + } + let local_node_reg_path = &get_local_node_registry_path()?; let mut local_node_registry = NodeRegistry::load(local_node_reg_path)?; let release_repo = ::default_config(); - let faucet_path = get_bin_path( + let faucet_bin_path = get_bin_path( build, faucet_path, ReleaseType::Faucet, @@ -55,7 +67,7 @@ pub async fn join( verbosity, ) .await?; - let node_path = get_bin_path( + let safenode_bin_path = get_bin_path( build, node_path, ReleaseType::Safenode, @@ -81,14 +93,18 @@ pub async fn join( }, }; let options = LocalNetworkOptions { - faucet_bin_path: faucet_path, + enable_metrics_server, + faucet_bin_path, interval, join: true, + metrics_port, node_count: count, + node_port, owner, owner_prefix, peers, - safenode_bin_path: node_path, + rpc_port, + safenode_bin_path, skip_validation, log_format, }; @@ -117,17 +133,28 @@ pub async fn run( build: bool, clean: bool, count: u16, + enable_metrics_server: bool, faucet_path: Option, faucet_version: Option, interval: u64, + metrics_port: Option, node_path: Option, + node_port: Option, node_version: Option, log_format: Option, owner: Option, owner_prefix: Option, + rpc_port: Option, skip_validation: bool, verbosity: VerbosityLevel, ) -> Result<(), Report> { + if (enable_metrics_server || metrics_port.is_some()) && !cfg!(feature = "open-metrics") && build + { + return Err(eyre!( + "Metrics server is not available. Please enable the open-metrics feature flag. Run the command with the --features open-metrics" + )); + } + // In the clean case, the node registry must be loaded *after* the existing network has // been killed, which clears it out. let local_node_reg_path = &get_local_node_registry_path()?; @@ -158,7 +185,7 @@ pub async fn run( info!("Launching local network"); let release_repo = ::default_config(); - let faucet_path = get_bin_path( + let faucet_bin_path = get_bin_path( build, faucet_path, ReleaseType::Faucet, @@ -167,7 +194,7 @@ pub async fn run( verbosity, ) .await?; - let node_path = get_bin_path( + let safenode_bin_path = get_bin_path( build, node_path, ReleaseType::Safenode, @@ -178,14 +205,18 @@ pub async fn run( .await?; let options = LocalNetworkOptions { - faucet_bin_path: faucet_path, + enable_metrics_server, + faucet_bin_path, join: false, interval, + metrics_port, + node_port, node_count: count, owner, owner_prefix, peers: None, - safenode_bin_path: node_path, + rpc_port, + safenode_bin_path, skip_validation, log_format, }; diff --git a/sn_node_manager/src/helpers.rs b/sn_node_manager/src/helpers.rs index 2e3fae01bf..a841b54e6f 100644 --- a/sn_node_manager/src/helpers.rs +++ b/sn_node_manager/src/helpers.rs @@ -13,6 +13,7 @@ use color_eyre::{ use indicatif::{ProgressBar, ProgressStyle}; use semver::Version; use sn_releases::{get_running_platform, ArchiveType, ReleaseType, SafeReleaseRepoActions}; +use sn_service_management::NodeServiceData; use std::{ io::Read, path::{Path, PathBuf}, @@ -20,7 +21,7 @@ use std::{ sync::Arc, }; -use crate::{config, VerbosityLevel}; +use crate::{add_services::config::PortRange, config, VerbosityLevel}; const MAX_DOWNLOAD_RETRIES: u8 = 3; @@ -325,3 +326,55 @@ pub fn create_temp_dir() -> Result { .inspect_err(|err| error!("Failed to crete temp dir: {err:?}"))?; Ok(new_temp_dir) } + +/// Get the start port from the `PortRange` if applicable. +pub fn get_start_port_if_applicable(range: Option) -> Option { + if let Some(port) = range { + match port { + PortRange::Single(val) => return Some(val), + PortRange::Range(start, _) => return Some(start), + } + } + None +} + +/// Increment the port by 1. +pub fn increment_port_option(port: Option) -> Option { + if let Some(port) = port { + let incremented_port = port + 1; + return Some(incremented_port); + } + None +} + +/// Make sure the port is not already in use by another node. +pub fn check_port_availability(port_option: &PortRange, nodes: &[NodeServiceData]) -> Result<()> { + let mut all_ports = Vec::new(); + for node in nodes { + if let Some(port) = node.metrics_port { + all_ports.push(port); + } + if let Some(port) = node.node_port { + all_ports.push(port); + } + all_ports.push(node.rpc_socket_addr.port()); + } + + match port_option { + PortRange::Single(port) => { + if all_ports.iter().any(|p| *p == *port) { + error!("Port {port} is being used by another service"); + return Err(eyre!("Port {port} is being used by another service")); + } + } + PortRange::Range(start, end) => { + for i in *start..=*end { + if all_ports.iter().any(|p| *p == i) { + error!("Port {i} is being used by another service"); + return Err(eyre!("Port {i} is being used by another service")); + } + } + } + } + Ok(()) +} diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index aea74eb106..4fc4fbeb97 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -6,7 +6,11 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::helpers::{get_bin_version, get_username}; +use crate::add_services::config::PortRange; +use crate::helpers::{ + check_port_availability, get_bin_version, get_start_port_if_applicable, get_username, + increment_port_option, +}; use color_eyre::eyre::OptionExt; use color_eyre::{eyre::eyre, Result}; use colored::Colorize; @@ -35,10 +39,12 @@ pub trait Launcher { fn launch_faucet(&self, genesis_multiaddr: &Multiaddr) -> Result; fn launch_node( &self, - owner: Option, - rpc_socket_addr: SocketAddr, bootstrap_peers: Vec, log_format: Option, + metrics_port: Option, + node_port: Option, + owner: Option, + rpc_socket_addr: SocketAddr, ) -> Result<()>; fn wait(&self, delay: u64); } @@ -71,10 +77,12 @@ impl Launcher for LocalSafeLauncher { fn launch_node( &self, - owner: Option, - rpc_socket_addr: SocketAddr, bootstrap_peers: Vec, log_format: Option, + metrics_port: Option, + node_port: Option, + owner: Option, + rpc_socket_addr: SocketAddr, ) -> Result<()> { let mut args = Vec::new(); @@ -97,6 +105,16 @@ impl Launcher for LocalSafeLauncher { args.push(log_format.as_str().to_string()); } + if let Some(metrics_port) = metrics_port { + args.push("--metrics-server-port".to_string()); + args.push(metrics_port.to_string()); + } + + if let Some(node_port) = node_port { + args.push("--port".to_string()); + args.push(node_port.to_string()); + } + args.push("--local".to_string()); args.push("--rpc".to_string()); args.push(rpc_socket_addr.to_string()); @@ -186,13 +204,17 @@ pub fn kill_network(node_registry: &NodeRegistry, keep_directories: bool) -> Res } pub struct LocalNetworkOptions { + pub enable_metrics_server: bool, pub faucet_bin_path: PathBuf, pub join: bool, pub interval: u64, + pub metrics_port: Option, + pub node_port: Option, pub node_count: u16, pub owner: Option, pub owner_prefix: Option, pub peers: Option>, + pub rpc_port: Option, pub safenode_bin_path: PathBuf, pub skip_validation: bool, pub log_format: Option, @@ -205,11 +227,32 @@ pub async fn run_network( ) -> Result<()> { info!("Running local network"); + // Check port availability when joining a local network. + if let Some(port_range) = &options.node_port { + port_range.validate(options.node_count)?; + check_port_availability(port_range, &node_registry.nodes)?; + } + + if let Some(port_range) = &options.metrics_port { + port_range.validate(options.node_count)?; + check_port_availability(port_range, &node_registry.nodes)?; + } + + if let Some(port_range) = &options.rpc_port { + port_range.validate(options.node_count)?; + check_port_availability(port_range, &node_registry.nodes)?; + } + let launcher = LocalSafeLauncher { safenode_bin_path: options.safenode_bin_path.to_path_buf(), faucet_bin_path: options.faucet_bin_path.to_path_buf(), }; + let mut node_port = get_start_port_if_applicable(options.node_port); + let mut metrics_port = get_start_port_if_applicable(options.metrics_port); + let mut rpc_port = get_start_port_if_applicable(options.rpc_port); + + // Start the bootstrap node if it doesnt exist. let (bootstrap_peers, start) = if options.join { if let Some(peers) = options.peers { (peers, 1) @@ -222,8 +265,20 @@ pub async fn run_network( (peer, 1) } } else { - let rpc_port = service_control.get_available_port()?; - let rpc_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_port); + let rpc_free_port = if let Some(port) = rpc_port { + port + } else { + service_control.get_available_port()? + }; + let metrics_free_port = if let Some(port) = metrics_port { + Some(port) + } else if options.enable_metrics_server { + Some(service_control.get_available_port()?) + } else { + None + }; + let rpc_socket_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_free_port); let rpc_client = RpcClient::from_socket_addr(rpc_socket_addr); let number = (node_registry.nodes.len() as u16) + 1; @@ -232,6 +287,8 @@ pub async fn run_network( RunNodeOptions { bootstrap_peers: vec![], genesis: true, + metrics_port: metrics_free_port, + node_port, interval: options.interval, log_format: options.log_format, number, @@ -247,13 +304,28 @@ pub async fn run_network( let bootstrap_peers = node .listen_addr .ok_or_eyre("The listen address was not set")?; + node_port = increment_port_option(node_port); + metrics_port = increment_port_option(metrics_port); + rpc_port = increment_port_option(rpc_port); (bootstrap_peers, 2) }; node_registry.save()?; for _ in start..=options.node_count { - let rpc_port = service_control.get_available_port()?; - let rpc_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_port); + let rpc_free_port = if let Some(port) = rpc_port { + port + } else { + service_control.get_available_port()? + }; + let metrics_free_port = if let Some(port) = metrics_port { + Some(port) + } else if options.enable_metrics_server { + Some(service_control.get_available_port()?) + } else { + None + }; + let rpc_socket_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_free_port); let rpc_client = RpcClient::from_socket_addr(rpc_socket_addr); let number = (node_registry.nodes.len() as u16) + 1; @@ -262,6 +334,8 @@ pub async fn run_network( RunNodeOptions { bootstrap_peers: bootstrap_peers.clone(), genesis: false, + metrics_port: metrics_free_port, + node_port, interval: options.interval, log_format: options.log_format, number, @@ -280,6 +354,10 @@ pub async fn run_network( // `kill` command for the nodes that we did spin up. The `kill` command works on the basis // of what's in the node registry. node_registry.save()?; + + node_port = increment_port_option(node_port); + metrics_port = increment_port_option(metrics_port); + rpc_port = increment_port_option(rpc_port); } if !options.skip_validation { @@ -314,6 +392,8 @@ pub struct RunNodeOptions { pub genesis: bool, pub interval: u64, pub log_format: Option, + pub metrics_port: Option, + pub node_port: Option, pub number: u16, pub owner: Option, pub rpc_socket_addr: SocketAddr, @@ -328,10 +408,12 @@ pub async fn run_node( info!("Launching node {}...", run_options.number); println!("Launching node {}...", run_options.number); launcher.launch_node( - run_options.owner.clone(), - run_options.rpc_socket_addr, run_options.bootstrap_peers.clone(), run_options.log_format, + run_options.metrics_port, + run_options.node_port, + run_options.owner.clone(), + run_options.rpc_socket_addr, )?; launcher.wait(run_options.interval); @@ -355,8 +437,8 @@ pub async fn run_node( local: true, log_dir_path: node_info.log_path, log_format: run_options.log_format, - metrics_port: None, - node_port: None, + metrics_port: run_options.metrics_port, + node_port: run_options.node_port, number: run_options.number, owner: run_options.owner, peer_id: Some(peer_id), @@ -471,9 +553,16 @@ mod tests { let rpc_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 13000); mock_launcher .expect_launch_node() - .with(eq(None), eq(rpc_socket_addr), eq(vec![]), eq(None)) + .with( + eq(vec![]), + eq(None), + eq(None), + eq(None), + eq(None), + eq(rpc_socket_addr), + ) .times(1) - .returning(|_, _, _, _| Ok(())); + .returning(|_, _, _, _, _, _| Ok(())); mock_launcher .expect_wait() .with(eq(100)) @@ -514,6 +603,8 @@ mod tests { genesis: true, interval: 100, log_format: None, + metrics_port: None, + node_port: None, number: 1, owner: None, rpc_socket_addr, From c6ff468ffdbc068eb1604e48cb2fef305f13e13c Mon Sep 17 00:00:00 2001 From: qima Date: Thu, 25 Jul 2024 21:27:53 +0800 Subject: [PATCH 030/115] chore(client): debug resend spend --- sn_client/src/api.rs | 2 +- sn_client/src/wallet.rs | 65 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index 93418ec44f..11057f0201 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -895,7 +895,7 @@ impl Client { expected_holders, }; let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, + put_quorum: Quorum::Majority, retry_strategy: Some(RetryStrategy::Persistent), use_put_record_to: None, verification: Some((VerificationKind::Network, verification_cfg)), diff --git a/sn_client/src/wallet.rs b/sn_client/src/wallet.rs index 50858604c9..398833e79f 100644 --- a/sn_client/src/wallet.rs +++ b/sn_client/src/wallet.rs @@ -705,6 +705,29 @@ impl WalletClient { .unconfirmed_spend_requests() .iter() .map(|s| { + info!( + "Unconfirmed spend {:?} of amount {}", + s.spend.unique_pubkey, s.spend.amount + ); + info!("====== spent_tx.inputs : {:?} ", s.spend.spent_tx.inputs); + info!("====== spent_tx.outputs : {:?} ", s.spend.spent_tx.outputs); + info!("====== parent_tx.inputs : {:?} ", s.spend.parent_tx.inputs); + info!( + "====== parent_tx.outputs : {:?} ", + s.spend.parent_tx.outputs + ); + println!( + "Unconfirmed spend {:?} of amount {}", + s.spend.unique_pubkey, s.spend.amount + ); + println!("====== spent_tx.inputs : {:?} ", s.spend.spent_tx.inputs); + println!("====== spent_tx.outputs : {:?} ", s.spend.spent_tx.outputs); + println!("====== parent_tx.inputs : {:?} ", s.spend.parent_tx.inputs); + println!( + "====== parent_tx.outputs : {:?} ", + s.spend.parent_tx.outputs + ); + let parent_spends: BTreeSet<_> = s .spend .parent_tx @@ -737,9 +760,49 @@ impl WalletClient { if let Some(parent_spends) = spends_to_check.get(&addr) { for parent_addr in parent_spends.iter() { match self.client.peek_a_spend(*parent_addr).await { - Ok(_) => { + Ok(s) => { info!("Parent {parent_addr:?} of unconfirmed Spend {addr:?} is find having at least one copy in the network !"); println!("Parent {parent_addr:?} of unconfirmed Spend {addr:?} is find having at least one copy in the network !"); + info!( + "Parent spend {:?} of amount {}", + s.spend.unique_pubkey, s.spend.amount + ); + info!( + "====== spent_tx.inputs : {:?} ", + s.spend.spent_tx.inputs + ); + info!( + "====== spent_tx.outputs : {:?} ", + s.spend.spent_tx.outputs + ); + info!( + "====== parent_tx.inputs : {:?} ", + s.spend.parent_tx.inputs + ); + info!( + "====== parent_tx.outputs : {:?} ", + s.spend.parent_tx.outputs + ); + println!( + "Parent spend {:?} of amount {}", + s.spend.unique_pubkey, s.spend.amount + ); + println!( + "====== spent_tx.inputs : {:?} ", + s.spend.spent_tx.inputs + ); + println!( + "====== spent_tx.outputs : {:?} ", + s.spend.spent_tx.outputs + ); + println!( + "====== parent_tx.inputs : {:?} ", + s.spend.parent_tx.inputs + ); + println!( + "====== parent_tx.outputs : {:?} ", + s.spend.parent_tx.outputs + ); } Err(err) => { warn!( From 71433c2a13b2a1799cb36d30a276792ff0949e79 Mon Sep 17 00:00:00 2001 From: qima Date: Thu, 25 Jul 2024 23:50:00 +0800 Subject: [PATCH 031/115] chore(node): trust a parent spend with majority --- sn_networking/src/transfers.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs index 8f240c156b..be8e9dd12b 100644 --- a/sn_networking/src/transfers.rs +++ b/sn_networking/src/transfers.rs @@ -66,10 +66,11 @@ impl Network { expected, got, })) => { - // if majority holds the spend, it might be worth it to try again. + // if majority holds the spend, it might be worth to be trusted. if got >= close_group_majority() { - debug!("At least a majority nodes hold the spend {address:?}, so trying to get it again."); - get_cfg.retry_strategy = Some(RetryStrategy::Persistent); + debug!("At least a majority nodes hold the spend {address:?}, going to trust it if can fetch with majority again."); + get_cfg.get_quorum = Quorum::Majority; + get_cfg.retry_strategy = Some(RetryStrategy::Balanced); self.get_record_from_network(key, &get_cfg).await? } else { return Err(NetworkError::GetRecordError( From cbcba45f1087efc706885420f00d459fd448c403 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 29 Jul 2024 12:52:08 +0200 Subject: [PATCH 032/115] chore(launchpad): popup crate and del unused code --- node-launchpad/src/app.rs | 7 +- node-launchpad/src/components.rs | 7 +- node-launchpad/src/components/home.rs | 6 +- node-launchpad/src/components/options.rs | 119 ------------------ node-launchpad/src/components/popup.rs | 12 ++ .../components/{ => popup}/beta_programme.rs | 5 +- .../src/components/{ => popup}/help.rs | 23 ++-- .../components/{ => popup}/manage_nodes.rs | 4 +- .../{reset_popup.rs => popup/reset.rs} | 2 +- node-launchpad/src/components/tab.rs | 100 --------------- 10 files changed, 41 insertions(+), 244 deletions(-) delete mode 100644 node-launchpad/src/components/options.rs create mode 100644 node-launchpad/src/components/popup.rs rename node-launchpad/src/components/{ => popup}/beta_programme.rs (99%) rename node-launchpad/src/components/{ => popup}/help.rs (89%) rename node-launchpad/src/components/{ => popup}/manage_nodes.rs (99%) rename node-launchpad/src/components/{reset_popup.rs => popup/reset.rs} (99%) delete mode 100644 node-launchpad/src/components/tab.rs diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index 6a3e7e9cd7..130cbc664c 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -10,10 +10,11 @@ use std::path::PathBuf; use crate::{ action::Action, - components::{ - beta_programme::BetaProgramme, footer::Footer, help::HelpPopUp, home::Home, - manage_nodes::ManageNodes, reset_popup::ResetNodesPopup, Component, + components::popup::{ + beta_programme::BetaProgramme, help::HelpPopUp, manage_nodes::ManageNodes, + reset::ResetNodesPopup, }, + components::{footer::Footer, home::Home, Component}, config::{AppData, Config}, mode::{InputMode, Scene}, style::SPACE_CADET, diff --git a/node-launchpad/src/components.rs b/node-launchpad/src/components.rs index 71534dca28..9353fb5bc7 100644 --- a/node-launchpad/src/components.rs +++ b/node-launchpad/src/components.rs @@ -17,14 +17,9 @@ use crate::{ tui::{Event, Frame}, }; -pub mod beta_programme; pub mod footer; -pub mod help; pub mod home; -pub mod manage_nodes; -pub mod options; -pub mod reset_popup; -pub mod tab; +pub mod popup; pub mod utils; /// `Component` is a trait that represents a visual and interactive element of the user interface. diff --git a/node-launchpad/src/components/home.rs b/node-launchpad/src/components/home.rs index 49e5884b13..8b878da1a9 100644 --- a/node-launchpad/src/components/home.rs +++ b/node-launchpad/src/components/home.rs @@ -6,7 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::{manage_nodes::GB_PER_NODE, utils::centered_rect_fixed, Component, Frame}; +use super::{popup::manage_nodes::GB_PER_NODE, utils::centered_rect_fixed, Component, Frame}; use crate::{ action::{Action, HomeActions}, config::Config, @@ -527,7 +527,7 @@ impl Component for Home { .fg(LIGHT_PERIWINKLE) .block( Block::default() - .title(" Node Status ") + .title(format!(" Nodes ({}) ", self.nodes_to_start)) .title_style(Style::default().fg(LIGHT_PERIWINKLE)) .borders(Borders::ALL) .border_style(style::Style::default().fg(COOL_GREY)) @@ -547,7 +547,7 @@ impl Component for Home { .highlight_style(Style::new().reversed()) .block( Block::default() - .title(" Node Status ") + .title(format!(" Nodes ({}) ", self.nodes_to_start)) .padding(Padding::new(2, 2, 1, 1)) .title_style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs deleted file mode 100644 index 574fb0d8df..0000000000 --- a/node-launchpad/src/components/options.rs +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::Component; -use crate::{ - action::Action, - mode::{InputMode, Scene}, -}; -use color_eyre::Result; -use crossterm::event::{Event, KeyCode, KeyEvent}; -use ratatui::{prelude::*, widgets::*}; -use tui_input::{backend::crossterm::EventHandler, Input}; - -#[derive(Default)] -pub struct Options { - // state - show_scene: bool, - input_mode: InputMode, - input: Input, -} - -impl Component for Options { - fn handle_key_events(&mut self, key: KeyEvent) -> Result> { - // while in entry mode, keybinds are not captured, so gotta exit entry mode from here - match key.code { - KeyCode::Esc => { - return Ok(vec![Action::SwitchInputMode(InputMode::Navigation)]); - } - KeyCode::Down => { - // self.select_next_input_field(); - } - KeyCode::Up => { - // self.select_previous_input_field(); - } - _ => {} - } - self.input.handle_event(&Event::Key(key)); - Ok(vec![]) - } - - fn update(&mut self, action: Action) -> Result> { - match action { - Action::SwitchScene(scene) => match scene { - Scene::Options => self.show_scene = true, - _ => self.show_scene = false, - }, - Action::SwitchInputMode(mode) => self.input_mode = mode, - _ => {} - }; - Ok(None) - } - - fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: Rect) -> Result<()> { - if !self.show_scene { - return Ok(()); - } - - // index 0 is reserved for tab; 2 is for keybindings - let layer_zero = Layout::new( - Direction::Vertical, - [Constraint::Max(1), Constraint::Min(15), Constraint::Max(3)], - ) - .split(area); - - // break the index 1 into sub sections - let layer_one = Layout::new( - Direction::Vertical, - [ - Constraint::Length(3), - Constraint::Length(3), - Constraint::Length(3), - Constraint::Length(3), - Constraint::Length(3), - Constraint::Length(3), - ], - ) - .split(layer_zero[1]); - - let input = Paragraph::new(self.input.value()) - .style(Style::default()) - .block( - Block::default() - .borders(Borders::ALL) - .title("Peer MultiAddress"), - ); - f.render_widget(input, layer_one[0]); - let input = Paragraph::new(self.input.value()) - .style(Style::default()) - .block(Block::default().borders(Borders::ALL).title("Home Network")); - f.render_widget(input, layer_one[1]); - let input = Paragraph::new(self.input.value()) - .style(Style::default()) - .block( - Block::default() - .borders(Borders::ALL) - .title("Data dir Path"), - ); - f.render_widget(input, layer_one[2]); - let input = Paragraph::new(self.input.value()) - .style(Style::default()) - .block(Block::default().borders(Borders::ALL).title("Log dir Path")); - f.render_widget(input, layer_one[3]); - let input = Paragraph::new(self.input.value()) - .style(Style::default()) - .block(Block::default().borders(Borders::ALL).title("Node Version")); - f.render_widget(input, layer_one[4]); - let input = Paragraph::new(self.input.value()) - .style(Style::default()) - .block(Block::default().borders(Borders::ALL).title("RPC Address")); - f.render_widget(input, layer_one[5]); - - Ok(()) - } -} diff --git a/node-launchpad/src/components/popup.rs b/node-launchpad/src/components/popup.rs new file mode 100644 index 0000000000..48d2044a04 --- /dev/null +++ b/node-launchpad/src/components/popup.rs @@ -0,0 +1,12 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +pub mod beta_programme; +pub mod help; +pub mod manage_nodes; +pub mod reset; diff --git a/node-launchpad/src/components/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs similarity index 99% rename from node-launchpad/src/components/beta_programme.rs rename to node-launchpad/src/components/popup/beta_programme.rs index 5030932fc8..5d253a4a58 100644 --- a/node-launchpad/src/components/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -6,7 +6,8 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::{utils::centered_rect_fixed, Component}; +use super::super::utils::centered_rect_fixed; +use super::super::Component; use crate::{ action::Action, mode::{InputMode, Scene}, @@ -181,7 +182,7 @@ impl Component for BetaProgramme { let pop_up_border = Paragraph::new("").block( Block::default() .borders(Borders::ALL) - .title("Beta Rewards Program") + .title(" Beta Rewards Program ") .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(VIVID_SKY_BLUE)), diff --git a/node-launchpad/src/components/help.rs b/node-launchpad/src/components/popup/help.rs similarity index 89% rename from node-launchpad/src/components/help.rs rename to node-launchpad/src/components/popup/help.rs index 945ff9d350..8d9150f8d5 100644 --- a/node-launchpad/src/components/help.rs +++ b/node-launchpad/src/components/popup/help.rs @@ -6,7 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::{utils::centered_rect_fixed, Component}; +use super::super::{utils::centered_rect_fixed, Component}; use crate::{ action::Action, mode::{InputMode, Scene}, @@ -95,38 +95,45 @@ impl Component for HelpPopUp { let pop_up_border = Paragraph::new("").block( Block::default() .borders(Borders::ALL) - .title("Get Help") + .title(" Get Help ") .title_style(style::Style::default().fg(EUCALYPTUS)) .border_style(Style::new().fg(EUCALYPTUS)), ); - let line1 = Paragraph::new(" See the quick start guides:"); + let line1 = Paragraph::new(" See the quick start guides:") + .block(Block::default().padding(Padding::horizontal(1))); f.render_widget(line1.fg(GHOST_WHITE), layer_one[1]); + let link1 = Hyperlink::new( Span::styled( - " https://autonomi.com/getting-started", + " https://autonomi.com/getting-started", Style::default().fg(VIVID_SKY_BLUE), ), "https://autonomi.com/getting-started", ); f.render_widget_ref(link1, layer_one[2]); - let line2 = Paragraph::new(" Get direct help via Discord:").fg(GHOST_WHITE); + let line2 = Paragraph::new(" Get direct help via Discord:") + .fg(GHOST_WHITE) + .block(Block::default().padding(Padding::horizontal(1))); f.render_widget(line2, layer_one[3]); + let link2 = Hyperlink::new( Span::styled( - " https://discord.gg/autonomi", + " https://discord.gg/autonomi", Style::default().fg(VIVID_SKY_BLUE), ), "https://discord.gg/autonomi", ); f.render_widget_ref(link2, layer_one[4]); - let line3 = Paragraph::new(" To join the Beta Rewards Program:").fg(GHOST_WHITE); + let line3 = Paragraph::new(" To join the Beta Rewards Program:") + .fg(GHOST_WHITE) + .block(Block::default().padding(Padding::horizontal(1))); f.render_widget(line3, layer_one[5]); let link3 = Hyperlink::new( Span::styled( - " https://autonomi.com/beta", + " https://autonomi.com/beta", Style::default().fg(VIVID_SKY_BLUE), ), "https://autonomi.com/beta", diff --git a/node-launchpad/src/components/manage_nodes.rs b/node-launchpad/src/components/popup/manage_nodes.rs similarity index 99% rename from node-launchpad/src/components/manage_nodes.rs rename to node-launchpad/src/components/popup/manage_nodes.rs index 5249251e6b..9d3959647e 100644 --- a/node-launchpad/src/components/manage_nodes.rs +++ b/node-launchpad/src/components/popup/manage_nodes.rs @@ -19,7 +19,7 @@ use crate::{ style::{clear_area, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VIVID_SKY_BLUE}, }; -use super::{utils::centered_rect_fixed, Component}; +use super::super::{utils::centered_rect_fixed, Component}; pub const GB_PER_NODE: usize = 5; pub const MB: usize = 1000 * 1000; @@ -240,7 +240,7 @@ impl Component for ManageNodes { let pop_up_border = Paragraph::new("").block( Block::default() .borders(Borders::ALL) - .title("Manage Nodes") + .title(" Manage Nodes ") .title_style(Style::new().fg(GHOST_WHITE)) .title_style(Style::new().fg(EUCALYPTUS)) .padding(Padding::uniform(2)) diff --git a/node-launchpad/src/components/reset_popup.rs b/node-launchpad/src/components/popup/reset.rs similarity index 99% rename from node-launchpad/src/components/reset_popup.rs rename to node-launchpad/src/components/popup/reset.rs index 4cc408c4ac..2545ed3bf6 100644 --- a/node-launchpad/src/components/reset_popup.rs +++ b/node-launchpad/src/components/popup/reset.rs @@ -6,7 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::{utils::centered_rect_fixed, Component}; +use super::super::{utils::centered_rect_fixed, Component}; use crate::{ action::{Action, HomeActions}, mode::{InputMode, Scene}, diff --git a/node-launchpad/src/components/tab.rs b/node-launchpad/src/components/tab.rs deleted file mode 100644 index 537c478d30..0000000000 --- a/node-launchpad/src/components/tab.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::Component; -use crate::{ - action::{Action, TabActions}, - mode::Scene, -}; -use color_eyre::Result; -use ratatui::{ - layout::{Constraint, Direction, Layout}, - style::{Style, Stylize}, - widgets::Tabs, -}; - -pub struct Tab { - scene_list: Vec, - current_tab_index: usize, -} - -impl Default for Tab { - fn default() -> Self { - Self { - scene_list: vec![Scene::Home], - current_tab_index: 0, - } - } -} - -impl Tab { - pub fn get_current_scene(&self) -> Scene { - self.scene_list[self.current_tab_index] - } -} - -impl Component for Tab { - fn update(&mut self, action: Action) -> Result> { - let send_back = match action { - Action::TabActions(TabActions::NextTab) => { - trace!(?self.current_tab_index, "Got Next tab"); - let mut new_index = self.current_tab_index + 1; - if new_index >= self.scene_list.len() { - new_index = 0; - } - self.current_tab_index = new_index; - let new_scene = self.scene_list[self.current_tab_index]; - trace!(?new_scene, "Updated tab:"); - Some(Action::SwitchScene(new_scene)) - } - - Action::TabActions(TabActions::PreviousTab) => { - trace!(?self.current_tab_index, "Got PreviousTab"); - let new_index = if self.current_tab_index == 0 { - self.scene_list.len() - 1 - } else { - self.current_tab_index - 1 - }; - self.current_tab_index = new_index; - - let new_scene = self.scene_list[self.current_tab_index]; - trace!(?new_scene, "Updated tab:"); - Some(Action::SwitchScene(new_scene)) - } - _ => None, - }; - Ok(send_back) - } - - fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: ratatui::prelude::Rect) -> Result<()> { - let layer_zero = Layout::new( - Direction::Vertical, - [ - Constraint::Max(1), - Constraint::Min(5), - Constraint::Min(3), - Constraint::Max(3), - ], - ) - .split(area); - let tab_items = self - .scene_list - .iter() - .map(|item| format!("{item:?}")) - .collect::>(); - let tab = Tabs::new(tab_items) - .style(Style::default().white()) - .highlight_style(Style::default().yellow()) - .select(self.current_tab_index) - .divider("|") - .padding(" ", " "); - f.render_widget(tab, layer_zero[0]); - - Ok(()) - } -} From e9d536dc53b081991da2bd2f6ec35c0cb5ecad62 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 30 Jul 2024 10:22:36 +0900 Subject: [PATCH 033/115] feat(networking): keep NetworkSwarmCmd sender to avoid blocking thread --- sn_networking/src/driver.rs | 24 ++++++++++++++++ sn_networking/src/event/request_response.rs | 32 ++++++++++----------- 2 files changed, 40 insertions(+), 16 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 51a133089e..1e99995a51 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -607,6 +607,9 @@ impl NetworkBuilder { replication_fetcher, #[cfg(feature = "open-metrics")] network_metrics, + // kept here to ensure we can push messages to the channel + // and not block the processing thread unintentionally + network_cmd_sender: network_swarm_cmd_sender.clone(), network_cmd_receiver: network_swarm_cmd_receiver, local_cmd_receiver: local_swarm_cmd_receiver, event_sender: network_event_sender, @@ -656,6 +659,7 @@ pub struct SwarmDriver { #[cfg(feature = "open-metrics")] pub(crate) network_metrics: Option, + network_cmd_sender: mpsc::Sender, local_cmd_receiver: mpsc::Receiver, network_cmd_receiver: mpsc::Receiver, event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. @@ -790,6 +794,26 @@ impl SwarmDriver { farthest_distance } + /// Pushes NetworkSwarmCmd off thread so as to be non-blocking + /// this is a wrapper around the `mpsc::Sender::send` call + pub(crate) fn queue_network_swarm_cmd(&self, event: NetworkSwarmCmd) { + let event_sender = self.network_cmd_sender.clone(); + let capacity = event_sender.capacity(); + + // push the event off thread so as to be non-blocking + let _handle = spawn(async move { + if capacity == 0 { + warn!( + "NetworkSwarmCmd channel is full. Await capacity to send: {:?}", + event + ); + } + if let Err(error) = event_sender.send(event).await { + error!("SwarmDriver failed to send event: {}", error); + } + }); + } + /// Sends an event after pushing it off thread so as to be non-blocking /// this is a wrapper around the `mpsc::Sender::send` call pub(crate) fn send_event(&self, event: NetworkEvent) { diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 8f31112f19..6d069e22e5 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -7,7 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - sort_peers_by_address, MsgResponder, NetworkError, NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, sort_peers_by_address, MsgResponder, NetworkError, NetworkEvent, + SwarmDriver, CLOSE_GROUP_SIZE, }; use itertools::Itertools; use libp2p::request_response::{self, Message}; @@ -41,11 +42,11 @@ impl SwarmDriver { let response = Response::Cmd( sn_protocol::messages::CmdResponse::Replicate(Ok(())), ); - self.swarm - .behaviour_mut() - .request_response - .send_response(channel, response) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + + self.queue_network_swarm_cmd(NetworkSwarmCmd::SendResponse { + resp: response, + channel: MsgResponder::FromPeer(channel), + }); self.add_keys_to_replication_fetcher(holder, keys); } @@ -56,11 +57,10 @@ impl SwarmDriver { let response = Response::Cmd( sn_protocol::messages::CmdResponse::QuoteVerification(Ok(())), ); - self.swarm - .behaviour_mut() - .request_response - .send_response(channel, response) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + self.queue_network_swarm_cmd(NetworkSwarmCmd::SendResponse { + resp: response, + channel: MsgResponder::FromPeer(channel), + }); // The keypair is required to verify the quotes, // hence throw it up to Network layer for further actions. @@ -82,11 +82,11 @@ impl SwarmDriver { let response = Response::Cmd( sn_protocol::messages::CmdResponse::PeerConsideredAsBad(Ok(())), ); - self.swarm - .behaviour_mut() - .request_response - .send_response(channel, response) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + + self.queue_network_swarm_cmd(NetworkSwarmCmd::SendResponse { + resp: response, + channel: MsgResponder::FromPeer(channel), + }); if bad_peer == NetworkAddress::from_peer(self.self_peer_id) { warn!("Peer {detected_by:?} consider us as BAD, due to {bad_behaviour:?}."); From ba065fa4a8d42769d6580fa48af1772e69da4e3b Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 30 Jul 2024 10:31:33 +0900 Subject: [PATCH 034/115] feat(networking): move more send_request off thread --- sn_networking/src/cmd.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 9b3417d1ed..65599c6b62 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -924,18 +924,16 @@ impl SwarmDriver { keys: all_records, }); for peer_id in replicate_targets { - let request_id = self - .swarm - .behaviour_mut() - .request_response - .send_request(&peer_id, request.clone()); - debug!("Sending request {request_id:?} to peer {peer_id:?}"); - let _ = self.pending_requests.insert(request_id, None); + self.queue_network_swarm_cmd(NetworkSwarmCmd::SendRequest { + req: request.clone(), + peer: peer_id, + sender: None, + }); + let _ = self .replication_targets .insert(peer_id, now + REPLICATION_TIMEOUT); } - debug!("Pending Requests now: {:?}", self.pending_requests.len()); } Ok(()) From 561e533552b2c58f14adb1b70dae8facee65902b Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 30 Jul 2024 11:58:48 +0900 Subject: [PATCH 035/115] feat(networking): maintain record_store cache_map to increase efficiency retain/remove/pop could take a while with a large cache --- sn_networking/src/cmd.rs | 1 + sn_networking/src/record_store.rs | 44 ++++++++++++++++++++++++------- 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 65599c6b62..95729116f7 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -623,6 +623,7 @@ impl SwarmDriver { let new_keys_to_fetch = self .replication_fetcher .notify_about_new_put(key.clone(), record_type); + if !new_keys_to_fetch.is_empty() { self.send_event(NetworkEvent::KeysToFetchForReplication(new_keys_to_fetch)); } diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 7939ce7e25..7d47a896a2 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -74,6 +74,9 @@ pub struct NodeRecordStore { records: HashMap, /// FIFO simple cache of records to reduce read times records_cache: VecDeque, + /// A map from record keys to their indices in the cache + /// allowing for more efficient cache management + records_cache_map: HashMap, /// Send network events to the node layer. network_event_sender: mpsc::Sender, /// Send cmds to the network layer. Used to interact with self in an async fashion. @@ -280,6 +283,7 @@ impl NodeRecordStore { config, records, records_cache: VecDeque::with_capacity(cache_size), + records_cache_map: HashMap::with_capacity(cache_size), network_event_sender, local_swarm_cmd_sender: swarm_cmd_sender, responsible_distance_range: None, @@ -517,23 +521,34 @@ impl NodeRecordStore { /// The record is marked as written to disk once `mark_as_stored` is called, /// this avoids us returning half-written data or registering it as stored before it is. pub(crate) fn put_verified(&mut self, r: Record, record_type: RecordType) -> Result<()> { + let key = &r.key; let record_key = PrettyPrintRecordKey::from(&r.key).into_owned(); - debug!("PUT a verified Record: {record_key:?}"); + debug!("PUTting a verified Record: {record_key:?}"); // if the cache already has this record in it (eg, a conflicting spend) // remove it from the cache - self.records_cache.retain(|record| record.key != r.key); + // self.records_cache.retain(|record| record.key != r.key); + // Remove from cache if it already exists + if let Some(&index) = self.records_cache_map.get(key) { + self.records_cache.remove(index); + self.update_cache_indices(index); + } - // store in the FIFO records cache, removing the oldest if needed - if self.records_cache.len() > self.config.records_cache_size { - self.records_cache.pop_front(); + // Store in the FIFO records cache, removing the oldest if needed + if self.records_cache.len() >= self.config.records_cache_size { + if let Some(old_record) = self.records_cache.pop_front() { + self.records_cache_map.remove(&old_record.key); + } } + // Push the new record to the back of the cache self.records_cache.push_back(r.clone()); + self.records_cache_map + .insert(r.key.clone(), self.records_cache.len() - 1); - self.prune_records_if_needed(&r.key)?; + self.prune_records_if_needed(key)?; - let filename = Self::generate_filename(&r.key); + let filename = Self::generate_filename(key); let file_path = self.config.storage_dir.join(&filename); #[cfg(feature = "open-metrics")] @@ -543,19 +558,21 @@ impl NodeRecordStore { let encryption_details = self.encryption_details.clone(); let cloned_cmd_sender = self.local_swarm_cmd_sender.clone(); + + let record_key2 = record_key.clone(); spawn(async move { let key = r.key.clone(); if let Some(bytes) = Self::prepare_record_bytes(r, encryption_details) { let cmd = match fs::write(&file_path, bytes) { Ok(_) => { // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): - info!("Wrote record {record_key:?} to disk! filename: {filename}"); + info!("Wrote record {record_key2:?} to disk! filename: {filename}"); LocalSwarmCmd::AddLocalRecordAsStored { key, record_type } } Err(err) => { error!( - "Error writing record {record_key:?} filename: {filename}, error: {err:?}" + "Error writing record {record_key2:?} filename: {filename}, error: {err:?}" ); LocalSwarmCmd::RemoveFailedLocalRecord { key } } @@ -568,6 +585,15 @@ impl NodeRecordStore { Ok(()) } + /// Update the cache indices after removing an element + fn update_cache_indices(&mut self, start_index: usize) { + for index in start_index..self.records_cache.len() { + if let Some(record) = self.records_cache.get(index) { + self.records_cache_map.insert(record.key.clone(), index); + } + } + } + /// Calculate the cost to store data for our current store state #[allow(clippy::mutable_key_type)] pub(crate) fn store_cost(&self, key: &Key) -> (NanoTokens, QuotingMetrics) { From 6c503f08288c6e197e6a255230a8766fa820643a Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 30 Jul 2024 12:07:06 +0900 Subject: [PATCH 036/115] feat(networking): early exit on put if same record exists --- sn_networking/src/record_store.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 7d47a896a2..a32c8c4b2a 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -530,7 +530,16 @@ impl NodeRecordStore { // self.records_cache.retain(|record| record.key != r.key); // Remove from cache if it already exists if let Some(&index) = self.records_cache_map.get(key) { - self.records_cache.remove(index); + if let Some(existing_record) = self.records_cache.remove(index) { + if existing_record.value == r.value { + // we actually just want to keep what we have, and can assume it's been stored properly. + + // so we put it back in the cache + self.records_cache.insert(index, existing_record); + // and exit early. + return Ok(()); + } + } self.update_cache_indices(index); } @@ -544,7 +553,7 @@ impl NodeRecordStore { // Push the new record to the back of the cache self.records_cache.push_back(r.clone()); self.records_cache_map - .insert(r.key.clone(), self.records_cache.len() - 1); + .insert(key.clone(), self.records_cache.len() - 1); self.prune_records_if_needed(key)?; From 232c49beefc36a3428e66088bc4fb12097a7858d Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 30 Jul 2024 13:37:00 +0900 Subject: [PATCH 037/115] feat(networking): dedupe outgoing queries against existing inflight queries --- sn_networking/src/cmd.rs | 20 +++++++- sn_networking/src/driver.rs | 5 +- sn_networking/src/error.rs | 1 + sn_networking/src/event/kad.rs | 85 +++++++++++++++++++++------------- 4 files changed, 74 insertions(+), 37 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 95729116f7..ce926d20bc 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -338,6 +338,22 @@ impl SwarmDriver { match cmd { NetworkSwarmCmd::GetNetworkRecord { key, sender, cfg } => { cmd_string = "GetNetworkRecord"; + + for (pending_query, (inflight_record_query_key, senders, _, _)) in + self.pending_get_record.iter_mut() + { + if *inflight_record_query_key == key { + debug!( + "GetNetworkRecord for {:?} is already in progress. Adding sender to {pending_query:?}", + PrettyPrintRecordKey::from(&key) + ); + senders.push(sender); + + // early exit as we're already processing this query + return Ok(()); + } + } + let query_id = self.swarm.behaviour_mut().kademlia.get_record(key.clone()); debug!( @@ -348,7 +364,7 @@ impl SwarmDriver { if self .pending_get_record - .insert(query_id, (sender, Default::default(), cfg)) + .insert(query_id, (key, vec![sender], Default::default(), cfg)) .is_some() { warn!("An existing get_record task {query_id:?} got replaced"); @@ -358,7 +374,7 @@ impl SwarmDriver { let total_records: usize = self .pending_get_record .iter() - .map(|(_, (_, result_map, _))| result_map.len()) + .map(|(_, (_, _, result_map, _))| result_map.len()) .sum(); info!("We now have {} pending get record attempts and cached {total_records} fetched copies", self.pending_get_record.len()); diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 1e99995a51..9ad9deefbd 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -38,7 +38,7 @@ use libp2p::Transport as _; use libp2p::{core::muxing::StreamMuxerBox, relay}; use libp2p::{ identity::Keypair, - kad::{self, QueryId, Quorum, Record, K_VALUE}, + kad::{self, QueryId, Quorum, Record, RecordKey, K_VALUE}, multiaddr::Protocol, request_response::{self, Config as RequestResponseConfig, OutboundRequestId, ProtocolSupport}, swarm::{ @@ -89,7 +89,8 @@ type GetRecordResultMap = HashMap)>; pub(crate) type PendingGetRecord = HashMap< QueryId, ( - oneshot::Sender>, + RecordKey, // record we're fetching, to dedupe repeat requests + Vec>>, // vec of senders waiting for this record GetRecordResultMap, GetRecordCfg, ), diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index de5cb56c3f..970ea47798 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -29,6 +29,7 @@ pub(super) type Result = std::result::Result; /// GetRecord Query errors #[derive(Error)] #[allow(missing_docs)] +#[derive(Clone)] pub enum GetRecordError { #[error("Get Record completed with non enough copies")] NotEnoughCopies { diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index ce839a8f5c..b9d0aef3d9 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -357,7 +357,7 @@ impl SwarmDriver { let pretty_key = PrettyPrintRecordKey::from(&peer_record.record.key).into_owned(); if let Entry::Occupied(mut entry) = self.pending_get_record.entry(query_id) { - let (_sender, result_map, cfg) = entry.get_mut(); + let (_key, _senders, result_map, cfg) = entry.get_mut(); if !cfg.expected_holders.is_empty() { if cfg.expected_holders.remove(&peer_id) { @@ -392,10 +392,10 @@ impl SwarmDriver { let cfg = cfg.clone(); // Remove the query task and consume the variables. - let (sender, result_map, _) = entry.remove(); + let (_key, senders, result_map, _) = entry.remove(); if result_map.len() == 1 { - Self::send_record_after_checking_target(sender, peer_record.record, &cfg)?; + Self::send_record_after_checking_target(senders, peer_record.record, &cfg)?; } else { debug!("For record {pretty_key:?} task {query_id:?}, fetch completed with split record"); let mut accumulated_spends = BTreeSet::new(); @@ -422,13 +422,20 @@ impl SwarmDriver { publisher: None, expires: None, }; - sender - .send(Ok(new_accumulated_record)) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + for sender in senders { + let new_accumulated_record = new_accumulated_record.clone(); + + sender + .send(Ok(new_accumulated_record)) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } } else { - sender - .send(Err(GetRecordError::SplitRecord { result_map })) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + for sender in senders { + let result_map = result_map.clone(); + sender + .send(Err(GetRecordError::SplitRecord { result_map })) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } } } @@ -459,7 +466,7 @@ impl SwarmDriver { /// SplitRecord if there are multiple content hash versions. fn handle_get_record_finished(&mut self, query_id: QueryId, step: ProgressStep) -> Result<()> { // return error if the entry cannot be found - if let Some((sender, result_map, cfg)) = self.pending_get_record.remove(&query_id) { + if let Some((_key, senders, result_map, cfg)) = self.pending_get_record.remove(&query_id) { let num_of_versions = result_map.len(); let (result, log_string) = if let Some((record, from_peers)) = result_map.values().next() @@ -497,9 +504,11 @@ impl SwarmDriver { ); } - sender - .send(result) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + for sender in senders { + sender + .send(result.clone()) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } } else { // We manually perform `query.finish()` if we return early from accumulate fn. // Thus we will still get FinishedWithNoAdditionalRecord. @@ -525,7 +534,7 @@ impl SwarmDriver { match &get_record_err { kad::GetRecordError::NotFound { .. } | kad::GetRecordError::QuorumFailed { .. } => { // return error if the entry cannot be found - let (sender, _, cfg) = + let (_key, senders, _, cfg) = self.pending_get_record.remove(&query_id).ok_or_else(|| { debug!("Can't locate query task {query_id:?}, it has likely been completed already."); NetworkError::ReceivedKademliaEventDropped { @@ -539,14 +548,16 @@ impl SwarmDriver { } else { debug!("Get record task {query_id:?} failed with {:?} expected holders not responded, error {get_record_err:?}", cfg.expected_holders); } - sender - .send(Err(GetRecordError::RecordNotFound)) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + for sender in senders { + sender + .send(Err(GetRecordError::RecordNotFound)) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } } kad::GetRecordError::Timeout { key } => { // return error if the entry cannot be found let pretty_key = PrettyPrintRecordKey::from(key); - let (sender, result_map, cfg) = + let (_key, senders, result_map, cfg) = self.pending_get_record.remove(&query_id).ok_or_else(|| { debug!( "Can't locate query task {query_id:?} for {pretty_key:?}, it has likely been completed already." @@ -566,9 +577,11 @@ impl SwarmDriver { warn!( "Get record task {query_id:?} for {pretty_key:?} timed out with split result map" ); - sender - .send(Err(GetRecordError::QueryTimeout)) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + for sender in senders { + sender + .send(Err(GetRecordError::QueryTimeout)) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } return Ok(()); } @@ -576,16 +589,18 @@ impl SwarmDriver { // if we have enough responses here, we can return the record if let Some((record, peers)) = result_map.values().next() { if peers.len() >= required_response_count { - Self::send_record_after_checking_target(sender, record.clone(), &cfg)?; + Self::send_record_after_checking_target(senders, record.clone(), &cfg)?; return Ok(()); } } warn!("Get record task {query_id:?} for {pretty_key:?} returned insufficient responses. {:?} did not return record", cfg.expected_holders); - // Otherwise report the timeout - sender - .send(Err(GetRecordError::QueryTimeout)) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + for sender in senders { + // Otherwise report the timeout + sender + .send(Err(GetRecordError::QueryTimeout)) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } } } @@ -593,18 +608,22 @@ impl SwarmDriver { } fn send_record_after_checking_target( - sender: oneshot::Sender>, + senders: Vec>>, record: Record, cfg: &GetRecordCfg, ) -> Result<()> { - if cfg.target_record.is_none() || cfg.does_target_match(&record) { - sender - .send(Ok(record)) - .map_err(|_| NetworkError::InternalMsgChannelDropped) + let res = if cfg.target_record.is_none() || cfg.does_target_match(&record) { + Ok(record) } else { + Err(GetRecordError::RecordDoesNotMatch(record)) + }; + + for sender in senders { sender - .send(Err(GetRecordError::RecordDoesNotMatch(record))) - .map_err(|_| NetworkError::InternalMsgChannelDropped) + .send(res.clone()) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; } + + Ok(()) } } From 14f8186c87653b59b11463215a76453617584a10 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 31 Jul 2024 17:11:58 +0800 Subject: [PATCH 038/115] chore: correct logging of cash_note and confirmed_spend disk ops --- sn_transfers/src/wallet/wallet_file.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sn_transfers/src/wallet/wallet_file.rs b/sn_transfers/src/wallet/wallet_file.rs index 58b4827663..2ecc5fd482 100644 --- a/sn_transfers/src/wallet/wallet_file.rs +++ b/sn_transfers/src/wallet/wallet_file.rs @@ -68,7 +68,7 @@ pub(super) fn remove_unconfirmed_spend_requests( for spend in unconfirmed_spend_requests.iter() { let spend_hex_name = spend.address().to_hex(); let spend_file_path = spends_dir.join(&spend_hex_name); - debug!("Writing spend to: {spend_file_path:?}"); + debug!("Writing confirmed_spend instance to: {spend_file_path:?}"); fs::write(spend_file_path, spend.to_bytes())?; } @@ -87,6 +87,7 @@ pub(super) fn get_confirmed_spend( let spends_dir = wallet_dir.join(CONFIRMED_SPENDS_DIR_NAME); let spend_hex_name = spend_addr.to_hex(); let spend_file_path = spends_dir.join(spend_hex_name); + debug!("Try to getting a confirmed_spend instance from: {spend_file_path:?}"); if !spend_file_path.is_file() { return Ok(None); } @@ -131,7 +132,7 @@ where fs::create_dir_all(&created_cash_notes_path)?; let cash_note_file_path = created_cash_notes_path.join(unique_pubkey_file_name); - debug!("Writing cash note to: {cash_note_file_path:?}"); + debug!("Writing cash_note file to: {cash_note_file_path:?}"); let hex = cash_note .to_hex() @@ -152,9 +153,8 @@ where let unique_pubkey_name = *SpendAddress::from_unique_pubkey(cash_note_key).xorname(); let unique_pubkey_file_name = format!("{}.cash_note", hex::encode(unique_pubkey_name)); - debug!("Removing cash note from: {:?}", created_cash_notes_path); - let cash_note_file_path = created_cash_notes_path.join(unique_pubkey_file_name); + debug!("Removing cash_note file from: {:?}", cash_note_file_path); fs::remove_file(cash_note_file_path)?; } From 6413eb7db44ebc8cfdc602872b8f190d944e424b Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 31 Jul 2024 14:57:15 +0200 Subject: [PATCH 039/115] chore(cli): [#2012] show download time per file --- sn_cli/src/files/download.rs | 9 +++++++++ sn_cli/src/files/files_uploader.rs | 9 ++------- sn_cli/src/utils.rs | 25 +++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 7 deletions(-) diff --git a/sn_cli/src/files/download.rs b/sn_cli/src/files/download.rs index 2d200d8dd4..9289243cc5 100644 --- a/sn_cli/src/files/download.rs +++ b/sn_cli/src/files/download.rs @@ -20,6 +20,7 @@ use indicatif::ProgressBar; use walkdir::WalkDir; use xor_name::XorName; +use crate::utils::duration_to_minute_seconds_miliseconds_string; use sn_client::{ protocol::storage::{Chunk, ChunkAddress, RetryStrategy}, FilesApi, FilesDownload, FilesDownloadEvent, @@ -97,6 +98,8 @@ pub async fn download_file( batch_size: usize, retry_strategy: RetryStrategy, ) { + let start_time = std::time::Instant::now(); + let mut files_download = FilesDownload::new(files_api.clone()) .set_batch_size(batch_size) .set_show_holders(show_holders) @@ -110,6 +113,7 @@ pub async fn download_file( let progress_handler = tokio::spawn(async move { let mut progress_bar: Option = None; + // The loop is guaranteed to end, as the channel will be closed when the download completes or errors out. while let Some(event) = download_events_rx.recv().await { match event { @@ -145,6 +149,7 @@ pub async fn download_file( } } } + if let Some(progress_bar) = progress_bar { progress_bar.finish_and_clear(); } @@ -158,6 +163,8 @@ pub async fn download_file( ) .await; + let duration = start_time.elapsed(); + // await on the progress handler first as we want to clear the progress bar before printing things. let _ = progress_handler.await; match download_result { @@ -170,6 +177,8 @@ pub async fn download_file( "Saved {file_name:?} at {}", downloaded_file_path.to_string_lossy() ); + let elapsed_time = duration_to_minute_seconds_miliseconds_string(duration); + println!("File downloaded in {elapsed_time}"); } Err(error) => { error!("Error downloading {file_name:?}: {error}"); diff --git a/sn_cli/src/files/files_uploader.rs b/sn_cli/src/files/files_uploader.rs index 59c332947c..1fcff4038f 100644 --- a/sn_cli/src/files/files_uploader.rs +++ b/sn_cli/src/files/files_uploader.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use super::get_progress_bar; +use crate::utils::duration_to_minute_seconds_string; use crate::ChunkManager; use bytes::Bytes; use color_eyre::{eyre::eyre, Report, Result}; @@ -424,13 +425,7 @@ impl FilesUploadStatusNotifier for StdOutPrinter { elapsed_time: Duration, chunks_to_upload_len: usize, ) { - let elapsed_minutes = elapsed_time.as_secs() / 60; - let elapsed_seconds = elapsed_time.as_secs() % 60; - let elapsed = if elapsed_minutes > 0 { - format!("{elapsed_minutes} minutes {elapsed_seconds} seconds") - } else { - format!("{elapsed_seconds} seconds") - }; + let elapsed = duration_to_minute_seconds_string(elapsed_time); println!( "Among {chunks_to_upload_len} chunks, found {} already existed in network, uploaded \ diff --git a/sn_cli/src/utils.rs b/sn_cli/src/utils.rs index 88cd3c1331..093b939960 100644 --- a/sn_cli/src/utils.rs +++ b/sn_cli/src/utils.rs @@ -6,7 +6,32 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use std::time::Duration; + /// Returns whether a hex string is a valid secret key in hex format. pub fn is_valid_key_hex(hex: &str) -> bool { hex.len() == 64 && hex.chars().all(|c| c.is_ascii_hexdigit()) } + +pub fn duration_to_minute_seconds_string(duration: Duration) -> String { + let elapsed_minutes = duration.as_secs() / 60; + let elapsed_seconds = duration.as_secs() % 60; + if elapsed_minutes > 0 { + format!("{elapsed_minutes} minutes {elapsed_seconds} seconds") + } else { + format!("{elapsed_seconds} seconds") + } +} + +pub fn duration_to_minute_seconds_miliseconds_string(duration: Duration) -> String { + let elapsed_minutes = duration.as_secs() / 60; + let elapsed_seconds = duration.as_secs() % 60; + let elapsed_millis = duration.subsec_millis(); + if elapsed_minutes > 0 { + format!("{elapsed_minutes} minutes {elapsed_seconds} seconds {elapsed_millis} milliseconds") + } else if elapsed_seconds > 0 { + format!("{elapsed_seconds} seconds {elapsed_millis} milliseconds") + } else { + format!("{elapsed_millis} milliseconds") + } +} From 7729acba06ee7a25659e17cce11783cc0d1c6e09 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 31 Jul 2024 16:05:03 +0200 Subject: [PATCH 040/115] chore(cli): [#2014] explicitly show the uploaded file's address in the console output --- sn_cli/src/files/files_uploader.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sn_cli/src/files/files_uploader.rs b/sn_cli/src/files/files_uploader.rs index 59c332947c..4504dd485f 100644 --- a/sn_cli/src/files/files_uploader.rs +++ b/sn_cli/src/files/files_uploader.rs @@ -462,10 +462,10 @@ impl StdOutPrinter { for (_, file_name, addr) in completed_files { let hex_addr = addr.to_hex(); if let Some(file_name) = file_name.to_str() { - println!("\"{file_name}\" {hex_addr}"); + println!("Uploaded \"{file_name}\" to address {hex_addr}"); info!("Uploaded {file_name} to {hex_addr}"); } else { - println!("\"{file_name:?}\" {hex_addr}"); + println!("Uploaded \"{file_name:?}\" to address {hex_addr}"); info!("Uploaded {file_name:?} to {hex_addr}"); } } From 78bf4fae934467ac8a263f8f5914ca23852d04d2 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 31 Jul 2024 22:07:11 +0800 Subject: [PATCH 041/115] fix(node): check whether already received an incoming payment --- sn_node/src/put_validation.rs | 7 ++++++- sn_transfers/src/wallet/hot_wallet.rs | 7 +++++++ sn_transfers/src/wallet/wallet_file.rs | 11 ++++++----- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 602312f443..7c4966131f 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -545,6 +545,11 @@ impl Node { // check for cash notes that we have already spent // this can happen in cases where the client retries a failed PUT after we have already used the cash note cash_notes.retain(|cash_note| { + let already_present = wallet.cash_note_presents(&cash_note.unique_pubkey()); + if already_present { + return !already_present; + } + let spend_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); let already_spent = matches!(wallet.get_confirmed_spend(spend_addr), Ok(Some(_spend))); if already_spent { @@ -557,7 +562,7 @@ impl Node { !already_spent }); if cash_notes.is_empty() { - info!("All incoming cash notes were already spent, no need to further process"); + info!("All incoming cash notes were already received, no need to further process"); return Err(Error::ReusedPayment); } diff --git a/sn_transfers/src/wallet/hot_wallet.rs b/sn_transfers/src/wallet/hot_wallet.rs index 79f94e7b1c..9438dee146 100644 --- a/sn_transfers/src/wallet/hot_wallet.rs +++ b/sn_transfers/src/wallet/hot_wallet.rs @@ -359,6 +359,13 @@ impl HotWallet { .collect() } + /// Checks whether the specified cash_note already presents + pub fn cash_note_presents(&mut self, id: &UniquePubkey) -> bool { + self.watchonly_wallet + .available_cash_notes() + .contains_key(id) + } + /// Returns all available cash_notes and an exclusive access to the wallet so no concurrent processes can /// get available cash_notes while we're modifying the wallet /// once the updated wallet is stored to disk it is safe to drop the WalletExclusiveAccess diff --git a/sn_transfers/src/wallet/wallet_file.rs b/sn_transfers/src/wallet/wallet_file.rs index 2ecc5fd482..cef85aab00 100644 --- a/sn_transfers/src/wallet/wallet_file.rs +++ b/sn_transfers/src/wallet/wallet_file.rs @@ -124,12 +124,13 @@ where { // The create cash_notes dir within the wallet dir. let created_cash_notes_path = wallet_dir.join(CASHNOTES_DIR_NAME); - for cash_note in created_cash_notes { - let unique_pubkey_name = - *SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()).xorname(); - let unique_pubkey_file_name = format!("{}.cash_note", hex::encode(unique_pubkey_name)); + fs::create_dir_all(&created_cash_notes_path)?; - fs::create_dir_all(&created_cash_notes_path)?; + for cash_note in created_cash_notes { + let unique_pubkey_file_name = format!( + "{}.cash_note", + SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()).to_hex() + ); let cash_note_file_path = created_cash_notes_path.join(unique_pubkey_file_name); debug!("Writing cash_note file to: {cash_note_file_path:?}"); From 6d184b1156e80b10fdaea0fc5046517ba192ed85 Mon Sep 17 00:00:00 2001 From: qima Date: Thu, 1 Aug 2024 22:26:46 +0800 Subject: [PATCH 042/115] chore(node): check confirmed_spend existence --- sn_node/src/put_validation.rs | 24 ++++++++++++++++-------- sn_transfers/src/wallet/wallet_file.rs | 2 +- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 7c4966131f..5948d69c6b 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -551,15 +551,23 @@ impl Node { } let spend_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - let already_spent = matches!(wallet.get_confirmed_spend(spend_addr), Ok(Some(_spend))); - if already_spent { - warn!( - "Double spend {} detected for record payment {pretty_key}", - cash_note.unique_pubkey() - ); + match wallet.get_confirmed_spend(spend_addr) { + Ok(None) => true, + Ok(Some(_spend)) => { + warn!( + "Burnt spend {} detected for record payment {pretty_key}", + cash_note.unique_pubkey() + ); + false + } + Err(err) => { + error!( + "When checking confirmed_spend {}, enountered error {err:?}", + cash_note.unique_pubkey() + ); + true + } } - // retain the `CashNote` if it's not already spent - !already_spent }); if cash_notes.is_empty() { info!("All incoming cash notes were already received, no need to further process"); diff --git a/sn_transfers/src/wallet/wallet_file.rs b/sn_transfers/src/wallet/wallet_file.rs index cef85aab00..e0d71a02ff 100644 --- a/sn_transfers/src/wallet/wallet_file.rs +++ b/sn_transfers/src/wallet/wallet_file.rs @@ -88,7 +88,7 @@ pub(super) fn get_confirmed_spend( let spend_hex_name = spend_addr.to_hex(); let spend_file_path = spends_dir.join(spend_hex_name); debug!("Try to getting a confirmed_spend instance from: {spend_file_path:?}"); - if !spend_file_path.is_file() { + if !spend_file_path.exists() { return Ok(None); } From 05f3feb2f68eac7d7495c6121877e6031f672f93 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 5 Aug 2024 13:03:58 +0530 Subject: [PATCH 043/115] chore(test): add more logs to the verify routing table test --- sn_node/tests/verify_routing_table.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sn_node/tests/verify_routing_table.rs b/sn_node/tests/verify_routing_table.rs index 5451582c9a..da19270b69 100644 --- a/sn_node/tests/verify_routing_table.rs +++ b/sn_node/tests/verify_routing_table.rs @@ -22,7 +22,7 @@ use std::{ time::Duration, }; use tonic::Request; -use tracing::{error, info}; +use tracing::{error, info, trace}; /// Sleep for sometime for the nodes for discover each other before verification /// Also can be set through the env variable of the same name. @@ -47,6 +47,7 @@ async fn verify_routing_table() -> Result<()> { let node_rpc_address = get_all_rpc_addresses(false)?; let all_peers = get_all_peer_ids(&node_rpc_address).await?; + trace!("All peers: {all_peers:?}"); let mut all_failed_list = BTreeMap::new(); for (node_index, rpc_address) in node_rpc_address.iter().enumerate() { @@ -71,6 +72,7 @@ async fn verify_routing_table() -> Result<()> { let current_peer = all_peers[node_index]; let current_peer_key = KBucketKey::from(current_peer); + trace!("KBuckets for node #{node_index}: {current_peer} are: {k_buckets:?}"); let mut failed_list = Vec::new(); for peer in all_peers.iter() { @@ -82,6 +84,7 @@ async fn verify_routing_table() -> Result<()> { match k_buckets.get(&ilog2_distance) { Some(bucket) => { if bucket.contains(peer) { + println!("{peer:?} found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); continue; } else if bucket.len() == K_VALUE.get() { println!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); From 197c58688b5da013b811a827c411a62032507904 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 5 Aug 2024 13:06:21 +0530 Subject: [PATCH 044/115] fix(ci): clean out target directory to save space --- .github/workflows/merge.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 7f9f79a5ab..22e72a280a 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -77,6 +77,9 @@ jobs: shell: bash run: if [[ ! $(cargo metadata --no-deps --format-version 1 | jq -r '.packages[].features.default[]? | select(. == "local-discovery")') ]]; then echo "local-discovery is not a default feature in any package."; else echo "local-discovery is a default feature in at least one package." && exit 1; fi + - name: Clean out the target directory + run: cargo clean + # In a cargo workspace, feature unification can occur, allowing a crate to be built successfully even if it # doesn't explicitly specify a feature it uses, provided another crate in the workspace enables that feature. # To detect such cases, we must build each crate using `--package` flag, building all packages at once does not work. From 82a2d6bdb0cba8c5b51ec41df60b3fafbdfa5f2e Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 5 Aug 2024 13:52:36 +0530 Subject: [PATCH 045/115] feat: strip debug info from dev profile --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 49005c0be8..0bc1ef54af 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,9 @@ clone_on_ref_ptr = "warn" debug = 0 strip = "debuginfo" +[profile.dev] +debug = 0 +strip = "debuginfo" [workspace.metadata.release] pre-release-commit-message = "chore(release): release commit, tags, deps and changelog updates" From bd05f53dcd5c43648b9884229adf58b243dc3170 Mon Sep 17 00:00:00 2001 From: grumbach Date: Mon, 5 Aug 2024 14:11:36 +0200 Subject: [PATCH 046/115] docs: update readme about transfers --- README.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 3ca059fc27..022ad9c44d 100644 --- a/README.md +++ b/README.md @@ -210,22 +210,27 @@ cargo run --bin safe --features local-discovery -- folders download Date: Thu, 1 Aug 2024 22:18:21 +0530 Subject: [PATCH 047/115] feat(metrics): track the bad node count --- sn_node/src/log_markers.rs | 7 +++++-- sn_node/src/metrics.rs | 13 +++++++++++++ sn_node/src/node.rs | 6 ++++-- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/sn_node/src/log_markers.rs b/sn_node/src/log_markers.rs index 61bec97fec..b39b8a2f41 100644 --- a/sn_node/src/log_markers.rs +++ b/sn_node/src/log_markers.rs @@ -20,10 +20,10 @@ pub enum Marker<'a> { NodeConnectedToNetwork, /// Peer was added to the routing table - PeerAddedToRoutingTable(PeerId), + PeerAddedToRoutingTable(&'a PeerId), /// Peer was removed from the routing table - PeerRemovedFromRoutingTable(PeerId), + PeerRemovedFromRoutingTable(&'a PeerId), /// The number of peers in the routing table PeersInRoutingTable(usize), @@ -51,6 +51,9 @@ pub enum Marker<'a> { /// Valid spend stored ValidSpendPutFromClient(&'a PrettyPrintRecordKey<'a>), + /// The peer has been considered as bad + PeerConsideredAsBad(&'a PeerId), + /// Record rejected RecordRejected(&'a PrettyPrintRecordKey<'a>, &'a Error), diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index d7c3cbaa17..17ae699e12 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -32,6 +32,7 @@ pub(crate) struct NodeMetrics { // routing table peer_added_to_routing_table: Counter, peer_removed_from_routing_table: Counter, + bad_node_count: Counter, // wallet pub(crate) current_reward_wallet_balance: Gauge, @@ -100,6 +101,13 @@ impl NodeMetrics { peer_removed_from_routing_table.clone(), ); + let bad_node_count = Counter::default(); + sub_registry.register( + "bad_node_count", + "Number of bad nodes that have been detected and added to the blocklist", + bad_node_count.clone(), + ); + let current_reward_wallet_balance = Gauge::default(); sub_registry.register( "current_reward_wallet_balance", @@ -128,6 +136,7 @@ impl NodeMetrics { replication_keys_to_fetch, peer_added_to_routing_table, peer_removed_from_routing_table, + bad_node_count, current_reward_wallet_balance, total_forwarded_rewards, started_instant: Instant::now(), @@ -185,6 +194,10 @@ impl NodeMetrics { let _ = self.peer_removed_from_routing_table.inc(); } + Marker::PeerConsideredAsBad(_) => { + let _ = self.bad_node_count.inc(); + } + _ => {} } } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index b1d5e01936..64ccb407ea 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -440,7 +440,7 @@ impl Node { } self.record_metrics(Marker::PeersInRoutingTable(connected_peers)); - self.record_metrics(Marker::PeerAddedToRoutingTable(peer_id)); + self.record_metrics(Marker::PeerAddedToRoutingTable(&peer_id)); // try replication here let network = self.network().clone(); @@ -452,7 +452,7 @@ impl Node { NetworkEvent::PeerRemoved(peer_id, connected_peers) => { event_header = "PeerRemoved"; self.record_metrics(Marker::PeersInRoutingTable(connected_peers)); - self.record_metrics(Marker::PeerRemovedFromRoutingTable(peer_id)); + self.record_metrics(Marker::PeerRemovedFromRoutingTable(&peer_id)); let network = self.network().clone(); self.record_metrics(Marker::IntervalReplicationTriggered); @@ -469,6 +469,8 @@ impl Node { bad_behaviour, } => { event_header = "PeerConsideredAsBad"; + self.record_metrics(Marker::PeerConsideredAsBad(&bad_peer)); + let request = Request::Cmd(Cmd::PeerConsideredAsBad { detected_by: NetworkAddress::from_peer(detected_by), bad_peer: NetworkAddress::from_peer(bad_peer), From 4026edda6df34de0a50b671be23f813938162e4c Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 2 Aug 2024 00:22:42 +0530 Subject: [PATCH 048/115] feat(metrics): track the number of times we've been shunned --- sn_networking/src/event/mod.rs | 7 +++++- sn_networking/src/event/request_response.rs | 3 +++ sn_node/src/log_markers.rs | 5 +++- sn_node/src/metrics.rs | 27 +++++++++++++++------ sn_node/src/node.rs | 4 +++ 5 files changed, 37 insertions(+), 9 deletions(-) diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 85e0d65400..e00ffc5e65 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -128,12 +128,14 @@ pub enum NetworkEvent { our_protocol: String, their_protocol: String, }, - /// The peer is now considered as a bad node, due to the detected bad behaviour + /// A peer from our RT is considered as bad due to the included behaviour PeerConsideredAsBad { detected_by: PeerId, bad_peer: PeerId, bad_behaviour: String, }, + /// We have been flagged as a bad node by a peer. + FlaggedAsBadNode { flagged_by: NetworkAddress }, /// The records bearing these keys are to be fetched from the holder or the network KeysToFetchForReplication(Vec<(PeerId, RecordKey)>), /// Started listening on a new address @@ -194,6 +196,9 @@ impl Debug for NetworkEvent { "NetworkEvent::PeerConsideredAsBad({bad_peer:?}, {bad_behaviour:?})" ) } + NetworkEvent::FlaggedAsBadNode { flagged_by } => { + write!(f, "NetworkEvent::FlaggedAsBadNode({flagged_by:?})") + } NetworkEvent::KeysToFetchForReplication(list) => { let keys_len = list.len(); write!(f, "NetworkEvent::KeysForReplication({keys_len:?})") diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 6d069e22e5..5acc581016 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -90,6 +90,9 @@ impl SwarmDriver { if bad_peer == NetworkAddress::from_peer(self.self_peer_id) { warn!("Peer {detected_by:?} consider us as BAD, due to {bad_behaviour:?}."); + self.send_event(NetworkEvent::FlaggedAsBadNode { + flagged_by: detected_by, + }) // TODO: shall we terminate self after received such notifications // from the majority close_group nodes around us? } else { diff --git a/sn_node/src/log_markers.rs b/sn_node/src/log_markers.rs index b39b8a2f41..39b078463c 100644 --- a/sn_node/src/log_markers.rs +++ b/sn_node/src/log_markers.rs @@ -8,7 +8,7 @@ use crate::Error; use libp2p::{kad::RecordKey, PeerId}; -use sn_protocol::PrettyPrintRecordKey; +use sn_protocol::{NetworkAddress, PrettyPrintRecordKey}; use strum::Display; /// Public Markers for generating log output, @@ -54,6 +54,9 @@ pub enum Marker<'a> { /// The peer has been considered as bad PeerConsideredAsBad(&'a PeerId), + /// We have been flagged as a bad node by a peer. + FlaggedAsBadNode(&'a NetworkAddress), + /// Record rejected RecordRejected(&'a PrettyPrintRecordKey<'a>, &'a Error), diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index 17ae699e12..0cf9b720ca 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -32,7 +32,8 @@ pub(crate) struct NodeMetrics { // routing table peer_added_to_routing_table: Counter, peer_removed_from_routing_table: Counter, - bad_node_count: Counter, + bad_peers_count: Counter, + shunned_count: Counter, // wallet pub(crate) current_reward_wallet_balance: Gauge, @@ -101,11 +102,18 @@ impl NodeMetrics { peer_removed_from_routing_table.clone(), ); - let bad_node_count = Counter::default(); + let shunned_count = Counter::default(); sub_registry.register( - "bad_node_count", - "Number of bad nodes that have been detected and added to the blocklist", - bad_node_count.clone(), + "shunned_count", + "Number of peers that have shunned our node", + shunned_count.clone(), + ); + + let bad_peers_count = Counter::default(); + sub_registry.register( + "bad_peers_count", + "Number of bad peers that have been detected by us and been added to the blocklist", + bad_peers_count.clone(), ); let current_reward_wallet_balance = Gauge::default(); @@ -136,7 +144,8 @@ impl NodeMetrics { replication_keys_to_fetch, peer_added_to_routing_table, peer_removed_from_routing_table, - bad_node_count, + bad_peers_count, + shunned_count, current_reward_wallet_balance, total_forwarded_rewards, started_instant: Instant::now(), @@ -195,7 +204,11 @@ impl NodeMetrics { } Marker::PeerConsideredAsBad(_) => { - let _ = self.bad_node_count.inc(); + let _ = self.bad_peers_count.inc(); + } + + Marker::FlaggedAsBadNode(_) => { + let _ = self.shunned_count.inc(); } _ => {} diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 64ccb407ea..e1fb9d10d3 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -482,6 +482,10 @@ impl Node { network.send_req_ignore_reply(request, bad_peer); }); } + NetworkEvent::FlaggedAsBadNode { flagged_by } => { + event_header = "FlaggedAsBadNode"; + self.record_metrics(Marker::FlaggedAsBadNode(&flagged_by)); + } NetworkEvent::NewListenAddr(_) => { event_header = "NewListenAddr"; if !cfg!(feature = "local-discovery") { From 6dc63b4da4d69465800c4b486600d14024d6dbe6 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 5 Aug 2024 12:34:56 +0530 Subject: [PATCH 049/115] chore(node): use PeerId for the reporting node --- sn_networking/src/event/mod.rs | 2 +- sn_networking/src/event/request_response.rs | 9 ++++++++- sn_node/src/log_markers.rs | 4 ++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index e00ffc5e65..0f6dbe7df1 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -135,7 +135,7 @@ pub enum NetworkEvent { bad_behaviour: String, }, /// We have been flagged as a bad node by a peer. - FlaggedAsBadNode { flagged_by: NetworkAddress }, + FlaggedAsBadNode { flagged_by: PeerId }, /// The records bearing these keys are to be fetched from the holder or the network KeysToFetchForReplication(Vec<(PeerId, RecordKey)>), /// Started listening on a new address diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 5acc581016..81beae7764 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -88,7 +88,14 @@ impl SwarmDriver { channel: MsgResponder::FromPeer(channel), }); - if bad_peer == NetworkAddress::from_peer(self.self_peer_id) { + let (Some(detected_by), Some(bad_peer)) = + (detected_by.as_peer_id(), bad_peer.as_peer_id()) + else { + error!("Could not get PeerId from detected_by or bad_peer NetworkAddress {detected_by:?}, {bad_peer:?}"); + return Ok(()); + }; + + if bad_peer == self.self_peer_id { warn!("Peer {detected_by:?} consider us as BAD, due to {bad_behaviour:?}."); self.send_event(NetworkEvent::FlaggedAsBadNode { flagged_by: detected_by, diff --git a/sn_node/src/log_markers.rs b/sn_node/src/log_markers.rs index 39b078463c..76afe3040d 100644 --- a/sn_node/src/log_markers.rs +++ b/sn_node/src/log_markers.rs @@ -8,7 +8,7 @@ use crate::Error; use libp2p::{kad::RecordKey, PeerId}; -use sn_protocol::{NetworkAddress, PrettyPrintRecordKey}; +use sn_protocol::PrettyPrintRecordKey; use strum::Display; /// Public Markers for generating log output, @@ -55,7 +55,7 @@ pub enum Marker<'a> { PeerConsideredAsBad(&'a PeerId), /// We have been flagged as a bad node by a peer. - FlaggedAsBadNode(&'a NetworkAddress), + FlaggedAsBadNode(&'a PeerId), /// Record rejected RecordRejected(&'a PrettyPrintRecordKey<'a>, &'a Error), From 492f50de21f6d30acd7dec8b0b50f80d0b5a1e74 Mon Sep 17 00:00:00 2001 From: qima Date: Mon, 5 Aug 2024 16:30:11 +0800 Subject: [PATCH 050/115] fix(node): terminate make_payment process during unrecoverable error --- sn_client/src/uploader/upload.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/sn_client/src/uploader/upload.rs b/sn_client/src/uploader/upload.rs index c3f88d2c80..0fdc4280de 100644 --- a/sn_client/src/uploader/upload.rs +++ b/sn_client/src/uploader/upload.rs @@ -864,6 +864,8 @@ impl InnerUploader { .resend_pending_transaction_blocking_loop() .await; + let mut terminate_process = false; + let result = match wallet_client.pay_for_records(&cost_map, verify_store).await { Ok((storage_cost, royalty_fees)) => { @@ -897,10 +899,13 @@ impl InnerUploader { WalletError::Transfer(TransferError::NotEnoughBalance( available, required, - )) => TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: Some((available, required)), - }, + )) => { + terminate_process = true; + TaskResult::MakePaymentsErr { + failed_xornames, + insufficient_balance: Some((available, required)), + } + } _ => TaskResult::MakePaymentsErr { failed_xornames, insufficient_balance: None, @@ -914,6 +919,16 @@ impl InnerUploader { }); cost_map = BTreeMap::new(); + + if terminate_process { + // The error will trigger the entire upload process to be terminated. + // Hence here we shall terminate the inner loop first, + // to avoid the wallet going furhter to be potentially got corrupted. + warn!( + "Terminating make payment processing loop due to un-recoverable error." + ); + break; + } } } debug!("Make payment processing loop terminated."); From 816ef88c14194e10e425983cadd65c35ec4c6381 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 24 Jul 2024 14:42:35 +0200 Subject: [PATCH 051/115] chore(node): [#1999] panic on upnp event `GatewayNotFound` --- sn_networking/src/event/mod.rs | 1 + sn_networking/src/event/swarm.rs | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 85e0d65400..22ba714993 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -157,6 +157,7 @@ pub enum NetworkEvent { #[derive(Debug, Clone)] pub enum TerminateNodeReason { HardDiskWriteError, + UpnpGatewayNotFound, } // Manually implement Debug as `#[debug(with = "unverified_record_fmt")]` not working as expected. diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 9db2195ece..78496bbae4 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::event::TerminateNodeReason; use crate::{ cmd::LocalSwarmCmd, event::NodeEvent, @@ -80,6 +81,12 @@ impl SwarmDriver { } event_string = "upnp_event"; info!(?upnp_event, "UPnP event"); + if let libp2p::upnp::Event::GatewayNotFound = upnp_event { + warn!("UPnP is not enabled/supported on the gateway. Please rerun without the `--upnp` flag"); + self.send_event(NetworkEvent::TerminateNode { + reason: TerminateNodeReason::UpnpGatewayNotFound, + }); + } } SwarmEvent::Behaviour(NodeEvent::RelayServer(event)) => { From 176f63ca27d5ec581cc798eb62d8fed32ada0387 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 30 Jul 2024 20:03:34 +0530 Subject: [PATCH 052/115] fix(peers): timeout if we cannot get the network contacts --- sn_peers_acquisition/src/lib.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/sn_peers_acquisition/src/lib.rs b/sn_peers_acquisition/src/lib.rs index 0b8453d34f..98a1ddc2ed 100644 --- a/sn_peers_acquisition/src/lib.rs +++ b/sn_peers_acquisition/src/lib.rs @@ -14,8 +14,10 @@ use clap::Args; use lazy_static::lazy_static; use libp2p::{multiaddr::Protocol, Multiaddr}; use rand::{seq::SliceRandom, thread_rng}; +use reqwest::Client; #[cfg(feature = "network-contacts")] use sn_networking::version::get_network_version; +use std::time::Duration; use tracing::*; use url::Url; @@ -30,7 +32,7 @@ lazy_static! { } // The maximum number of retries to be performed while trying to get peers from a URL. -const MAX_RETRIES_ON_GET_PEERS_FROM_URL: usize = 3; +const MAX_RETRIES_ON_GET_PEERS_FROM_URL: usize = 7; /// The name of the environment variable that can be used to pass peers to the node. pub const SAFE_PEERS_ENV: &str = "SAFE_PEERS"; @@ -190,8 +192,9 @@ pub fn parse_peer_addr(addr: &str) -> Result { pub async fn get_peers_from_url(url: Url) -> Result> { let mut retries = 0; + let request_client = Client::builder().timeout(Duration::from_secs(10)).build()?; loop { - let response = reqwest::get(url.clone()).await; + let response = request_client.get(url.clone()).send().await; match response { Ok(response) => { @@ -227,7 +230,8 @@ pub async fn get_peers_from_url(url: Url) -> Result> { } } } - Err(_) => { + Err(err) => { + error!("Failed to get peers from URL {url}: {err:?}"); retries += 1; if retries >= MAX_RETRIES_ON_GET_PEERS_FROM_URL { return Err(Error::FailedToObtainPeersFromUrl( From 8222681239ff6557e6dde469621f9434ed444322 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 30 Jul 2024 20:04:52 +0530 Subject: [PATCH 053/115] fix(manager): set the node startup interval based on the time to connect to node RPC server --- sn_node_manager/src/cmd/node.rs | 24 +++++++++++++++++---- sn_node_manager/src/lib.rs | 20 +++++++++++------- sn_node_manager/src/local.rs | 1 + sn_service_management/src/auditor.rs | 10 ++++++--- sn_service_management/src/daemon.rs | 10 ++++++--- sn_service_management/src/faucet.rs | 10 ++++++--- sn_service_management/src/lib.rs | 4 +++- sn_service_management/src/node.rs | 11 ++++++---- sn_service_management/src/rpc.rs | 31 ++++++++++++++++++++++++++++ 9 files changed, 95 insertions(+), 26 deletions(-) diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index ace08ef026..14d2181a9b 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -303,7 +303,7 @@ pub async fn start( print_banner("Start Safenode Services"); } info!( - "Starting safenode services with interval={interval} for: {peer_ids:?}, {service_names:?}" + "Starting safenode services with dynamic interval (starting at {interval}) for: {peer_ids:?}, {service_names:?}" ); let mut node_registry = NodeRegistry::load(&config::get_node_registry_path()?)?; @@ -326,6 +326,7 @@ pub async fn start( } let mut failed_services = Vec::new(); + let mut interval = interval; for &index in &service_indices { let node = &mut node_registry.nodes[index]; let rpc_client = RpcClient::from_socket_addr(node.rpc_socket_addr); @@ -337,12 +338,27 @@ pub async fn start( // continue without applying the delay. The reason for not doing so is because when // `start` is called below, the user will get a message to say the service was already // started, which I think is useful behaviour to retain. - debug!("Sleeping for {} milliseconds", interval); + debug!( + "Sleeping for {interval} milliseconds ({}sec)", + interval / 1000 + ); std::thread::sleep(std::time::Duration::from_millis(interval)); } match service_manager.start().await { - Ok(()) => { - debug!("Started service {}", node.service_name); + Ok(start_duration) => { + debug!( + "Started service {} in {start_duration:?}", + node.service_name + ); + if let Some(duration) = start_duration { + if duration.as_millis() as u64 > interval { + warn!( + "Service {} took longer to start than the interval, increasing interval to {} milliseconds", + node.service_name, duration.as_millis() + ); + interval = duration.as_millis() as u64; + } + } node_registry.save()?; } Err(err) => { diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 9b91db0408..9ed42963f4 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -45,6 +45,7 @@ use sn_service_management::{ UpgradeResult, }; use sn_transfers::HotWallet; +use std::time::Duration; use tracing::debug; pub const DAEMON_DEFAULT_PORT: u16 = 12500; @@ -71,7 +72,7 @@ impl ServiceManager { } } - pub async fn start(&mut self) -> Result<()> { + pub async fn start(&mut self) -> Result> { info!("Starting the {} service", self.service.name()); if ServiceStatus::Running == self.service.status() { // The last time we checked the service was running, but it doesn't mean it's actually @@ -88,7 +89,7 @@ impl ServiceManager { if self.verbosity != VerbosityLevel::Minimal { println!("The {} service is already running", self.service.name()); } - return Ok(()); + return Ok(None); } } @@ -106,7 +107,7 @@ impl ServiceManager { // // There might be many different `safenode` processes running, but since each service has // its own isolated binary, we use the binary path to uniquely identify it. - match self + let start_duration = match self .service_control .get_process_pid(&self.service.bin_path()) { @@ -116,11 +117,13 @@ impl ServiceManager { self.service.name(), pid ); - self.service.on_start(Some(pid), true).await?; + let start_duration = self.service.on_start(Some(pid), true).await?; + info!( - "Service {} has been started successfully", + "Service {} has been started successfully in {start_duration:?}", self.service.name() ); + start_duration } Err(sn_service_management::error::Error::ServiceProcessNotFound(_)) => { error!("The '{}' service has failed to start because ServiceProcessNotFound when fetching PID", self.service.name()); @@ -130,7 +133,7 @@ impl ServiceManager { error!("Failed to start service, because PID could not be obtained: {err}"); return Err(err.into()); } - } + }; if self.verbosity != VerbosityLevel::Minimal { println!("{} Started {} service", "✓".green(), self.service.name()); @@ -153,7 +156,7 @@ impl ServiceManager { self.service.log_dir_path().to_string_lossy() ); } - Ok(()) + Ok(start_duration) } pub async fn stop(&mut self) -> Result<()> { @@ -337,7 +340,7 @@ impl ServiceManager { if options.start_service { match self.start().await { - Ok(()) => {} + Ok(_) => {} Err(err) => { self.service .set_version(&options.target_version.to_string()); @@ -654,6 +657,7 @@ mod tests { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> ServiceControlResult<()>; async fn node_stop(&self, delay_millis: u64) -> ServiceControlResult<()>; async fn node_update(&self, delay_millis: u64) -> ServiceControlResult<()>; + async fn try_connect(&self, max_attempts: u8) -> ServiceControlResult; async fn update_log_level(&self, log_levels: String) -> ServiceControlResult<()>; } } diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index 4fc4fbeb97..c5bdabf922 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -540,6 +540,7 @@ mod tests { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> RpcResult<()>; async fn node_stop(&self, delay_millis: u64) -> RpcResult<()>; async fn node_update(&self, delay_millis: u64) -> RpcResult<()>; + async fn try_connect(&self, max_attempts: u8) -> RpcResult; async fn update_log_level(&self, log_levels: String) -> RpcResult<()>; } } diff --git a/sn_service_management/src/auditor.rs b/sn_service_management/src/auditor.rs index 66f00a0eb5..eb3da2b5e2 100644 --- a/sn_service_management/src/auditor.rs +++ b/sn_service_management/src/auditor.rs @@ -12,7 +12,7 @@ use crate::{ use async_trait::async_trait; use serde::{Deserialize, Serialize}; use service_manager::ServiceInstallCtx; -use std::{ffi::OsString, path::PathBuf}; +use std::{ffi::OsString, path::PathBuf, time::Duration}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct AuditorServiceData { @@ -108,10 +108,14 @@ impl<'a> ServiceStateActions for AuditorService<'a> { self.service_data.status = ServiceStatus::Removed; } - async fn on_start(&mut self, pid: Option, _full_refresh: bool) -> Result<()> { + async fn on_start( + &mut self, + pid: Option, + _full_refresh: bool, + ) -> Result> { self.service_data.pid = pid; self.service_data.status = ServiceStatus::Running; - Ok(()) + Ok(None) } async fn on_stop(&mut self) -> Result<()> { diff --git a/sn_service_management/src/daemon.rs b/sn_service_management/src/daemon.rs index c617515fe5..a7d6e02184 100644 --- a/sn_service_management/src/daemon.rs +++ b/sn_service_management/src/daemon.rs @@ -14,7 +14,7 @@ use crate::{ use async_trait::async_trait; use serde::{Deserialize, Serialize}; use service_manager::ServiceInstallCtx; -use std::{ffi::OsString, net::SocketAddr, path::PathBuf}; +use std::{ffi::OsString, net::SocketAddr, path::PathBuf, time::Duration}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct DaemonServiceData { @@ -101,10 +101,14 @@ impl<'a> ServiceStateActions for DaemonService<'a> { self.service_data.status = ServiceStatus::Removed; } - async fn on_start(&mut self, pid: Option, _full_refresh: bool) -> Result<()> { + async fn on_start( + &mut self, + pid: Option, + _full_refresh: bool, + ) -> Result> { self.service_data.pid = pid; self.service_data.status = ServiceStatus::Running; - Ok(()) + Ok(None) } async fn on_stop(&mut self) -> Result<()> { diff --git a/sn_service_management/src/faucet.rs b/sn_service_management/src/faucet.rs index f1c3d8f952..3ebf26e16f 100644 --- a/sn_service_management/src/faucet.rs +++ b/sn_service_management/src/faucet.rs @@ -12,7 +12,7 @@ use crate::{ use async_trait::async_trait; use serde::{Deserialize, Serialize}; use service_manager::ServiceInstallCtx; -use std::{ffi::OsString, path::PathBuf}; +use std::{ffi::OsString, path::PathBuf, time::Duration}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct FaucetServiceData { @@ -105,10 +105,14 @@ impl<'a> ServiceStateActions for FaucetService<'a> { self.service_data.status = ServiceStatus::Removed; } - async fn on_start(&mut self, pid: Option, _full_refresh: bool) -> Result<()> { + async fn on_start( + &mut self, + pid: Option, + _full_refresh: bool, + ) -> Result> { self.service_data.pid = pid; self.service_data.status = ServiceStatus::Running; - Ok(()) + Ok(None) } async fn on_stop(&mut self) -> Result<()> { diff --git a/sn_service_management/src/lib.rs b/sn_service_management/src/lib.rs index db32f81c34..dd782da842 100644 --- a/sn_service_management/src/lib.rs +++ b/sn_service_management/src/lib.rs @@ -30,6 +30,7 @@ use service_manager::ServiceInstallCtx; use std::{ io::{Read, Write}, path::{Path, PathBuf}, + time::Duration, }; pub use daemon::{DaemonService, DaemonServiceData}; @@ -86,7 +87,8 @@ pub trait ServiceStateActions { fn name(&self) -> String; fn pid(&self) -> Option; fn on_remove(&mut self); - async fn on_start(&mut self, pid: Option, full_refresh: bool) -> Result<()>; + /// Optionally returns the duration it took to start the service + async fn on_start(&mut self, pid: Option, full_refresh: bool) -> Result>; async fn on_stop(&mut self) -> Result<()>; fn set_version(&mut self, version: &str); fn status(&self) -> ServiceStatus; diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index b1eada4486..18fe3d88c1 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -14,7 +14,7 @@ use service_manager::{ServiceInstallCtx, ServiceLabel}; use sn_logging::LogFormat; use sn_protocol::get_port_from_multiaddr; use sn_transfers::NanoTokens; -use std::{ffi::OsString, net::SocketAddr, path::PathBuf, str::FromStr}; +use std::{ffi::OsString, net::SocketAddr, path::PathBuf, str::FromStr, time::Duration}; pub struct NodeService<'a> { pub service_data: &'a mut NodeServiceData, @@ -133,12 +133,13 @@ impl<'a> ServiceStateActions for NodeService<'a> { self.service_data.status = ServiceStatus::Removed; } - async fn on_start(&mut self, pid: Option, full_refresh: bool) -> Result<()> { - let (connected_peers, pid, peer_id) = if full_refresh { + async fn on_start(&mut self, pid: Option, full_refresh: bool) -> Result> { + let (start_duration, connected_peers, pid, peer_id) = if full_refresh { debug!( "Performing full refresh for {}", self.service_data.service_name ); + let connection_duration = self.rpc_actions.try_connect(120).await?; let node_info = self .rpc_actions .node_info() @@ -175,6 +176,7 @@ impl<'a> ServiceStateActions for NodeService<'a> { } ( + Some(connection_duration), Some(network_info.connected_peers), pid, Some(node_info.peer_id), @@ -186,6 +188,7 @@ impl<'a> ServiceStateActions for NodeService<'a> { ); debug!("Previously assigned data will be used"); ( + None, self.service_data.connected_peers.clone(), pid, self.service_data.peer_id, @@ -196,7 +199,7 @@ impl<'a> ServiceStateActions for NodeService<'a> { self.service_data.peer_id = peer_id; self.service_data.pid = pid; self.service_data.status = ServiceStatus::Running; - Ok(()) + Ok(start_duration) } async fn on_stop(&mut self) -> Result<()> { diff --git a/sn_service_management/src/rpc.rs b/sn_service_management/src/rpc.rs index d1a15f08f3..41534eb91b 100644 --- a/sn_service_management/src/rpc.rs +++ b/sn_service_management/src/rpc.rs @@ -48,6 +48,8 @@ pub trait RpcActions: Sync { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> Result<()>; async fn node_stop(&self, delay_millis: u64) -> Result<()>; async fn node_update(&self, delay_millis: u64) -> Result<()>; + /// Try to connect to the RPC endpoint and return the Duration it took to connect. + async fn try_connect(&self, max_attempts: u8) -> Result; async fn update_log_level(&self, log_levels: String) -> Result<()>; } @@ -222,6 +224,35 @@ impl RpcActions for RpcClient { Ok(()) } + async fn try_connect(&self, max_attempts: u8) -> Result { + let mut attempts = 0; + loop { + debug!( + "Attempting connection to node RPC endpoint at {}...", + self.endpoint + ); + match SafeNodeClient::connect(self.endpoint.clone()).await { + Ok(_) => { + debug!("Connection successful"); + break Ok(Duration::from_secs( + attempts as u64 * self.retry_delay.as_secs(), + )); + } + Err(_) => { + attempts += 1; + tokio::time::sleep(self.retry_delay).await; + if attempts >= max_attempts { + return Err(Error::RpcConnectionError(self.endpoint.clone())); + } + error!( + "Could not connect to RPC endpoint {:?}. Retrying {attempts}/{}", + self.endpoint, max_attempts + ); + } + } + } + } + async fn update_log_level(&self, log_levels: String) -> Result<()> { let mut client = self.connect_with_retry().await?; let _response = client From 603c1a6fdb2cf40c67e42289bd92af77a4eb7e1b Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 30 Jul 2024 20:21:46 +0530 Subject: [PATCH 054/115] feat(manager): implement a moving average of sleep intervals --- sn_node_manager/src/cmd/node.rs | 23 +++++++++++------------ sn_node_manager/src/lib.rs | 30 +++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index 14d2181a9b..3820b49025 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -16,7 +16,8 @@ use crate::{ }, config::{self, is_running_as_root}, helpers::{download_and_extract_release, get_bin_version}, - print_banner, refresh_node_registry, status_report, ServiceManager, VerbosityLevel, + print_banner, refresh_node_registry, status_report, DynamicInterval, ServiceManager, + VerbosityLevel, }; use color_eyre::{eyre::eyre, Help, Result}; use colored::Colorize; @@ -326,7 +327,8 @@ pub async fn start( } let mut failed_services = Vec::new(); - let mut interval = interval; + let mut dyn_interval = DynamicInterval::new(); + dyn_interval.add_interval_ms(interval); for &index in &service_indices { let node = &mut node_registry.nodes[index]; let rpc_client = RpcClient::from_socket_addr(node.rpc_socket_addr); @@ -339,10 +341,13 @@ pub async fn start( // `start` is called below, the user will get a message to say the service was already // started, which I think is useful behaviour to retain. debug!( - "Sleeping for {interval} milliseconds ({}sec)", - interval / 1000 + "Sleeping for {} milliseconds ({}sec)", + dyn_interval.get_interval_ms(), + dyn_interval.get_interval_ms() / 1000 ); - std::thread::sleep(std::time::Duration::from_millis(interval)); + std::thread::sleep(std::time::Duration::from_millis( + dyn_interval.get_interval_ms(), + )); } match service_manager.start().await { Ok(start_duration) => { @@ -351,13 +356,7 @@ pub async fn start( node.service_name ); if let Some(duration) = start_duration { - if duration.as_millis() as u64 > interval { - warn!( - "Service {} took longer to start than the interval, increasing interval to {} milliseconds", - node.service_name, duration.as_millis() - ); - interval = duration.as_millis() as u64; - } + dyn_interval.add_interval_ms(duration.as_millis() as u64); } node_registry.save()?; } diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 9ed42963f4..7f50d350f6 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -45,7 +45,7 @@ use sn_service_management::{ UpgradeResult, }; use sn_transfers::HotWallet; -use std::time::Duration; +use std::{collections::VecDeque, time::Duration}; use tracing::debug; pub const DAEMON_DEFAULT_PORT: u16 = 12500; @@ -621,6 +621,34 @@ fn format_status_without_colour(status: &ServiceStatus) -> String { } } +/// Moving average of intervals. +/// This is used to determine the average sleep between starting node services. +struct DynamicInterval { + last_reported_intervals_ms: VecDeque, + window_size: u8, +} + +impl DynamicInterval { + pub fn new() -> Self { + DynamicInterval { + last_reported_intervals_ms: VecDeque::new(), + window_size: 3, + } + } + + pub fn add_interval_ms(&mut self, interval_ms: u64) { + self.last_reported_intervals_ms.push_back(interval_ms); + if self.last_reported_intervals_ms.len() > self.window_size as usize { + self.last_reported_intervals_ms.pop_front(); + } + } + + pub fn get_interval_ms(&self) -> u64 { + self.last_reported_intervals_ms.iter().sum::() + / self.last_reported_intervals_ms.len() as u64 + } +} + #[cfg(test)] mod tests { use super::*; From 8a4155d986306a78166c6ebb6b1a321144d0653f Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 30 Jul 2024 20:35:28 +0530 Subject: [PATCH 055/115] feat(manager): implement dynamic interval during node upgrade --- sn_node_manager/src/cmd/auditor.rs | 2 +- sn_node_manager/src/cmd/faucet.rs | 2 +- sn_node_manager/src/cmd/node.rs | 16 ++++++-- sn_node_manager/src/lib.rs | 63 ++++++++++++++++++------------ 4 files changed, 54 insertions(+), 29 deletions(-) diff --git a/sn_node_manager/src/cmd/auditor.rs b/sn_node_manager/src/cmd/auditor.rs index 081848083d..91a04071b5 100644 --- a/sn_node_manager/src/cmd/auditor.rs +++ b/sn_node_manager/src/cmd/auditor.rs @@ -210,7 +210,7 @@ pub async fn upgrade( ServiceManager::new(service, Box::new(ServiceController {}), verbosity); match service_manager.upgrade(options).await { - Ok(upgrade_result) => { + Ok((upgrade_result, _)) => { info!("Upgrade the auditor service successfully"); print_upgrade_summary(vec![("auditor".to_string(), upgrade_result)]); node_registry.save()?; diff --git a/sn_node_manager/src/cmd/faucet.rs b/sn_node_manager/src/cmd/faucet.rs index 6645d9b6f0..86869ade8c 100644 --- a/sn_node_manager/src/cmd/faucet.rs +++ b/sn_node_manager/src/cmd/faucet.rs @@ -206,7 +206,7 @@ pub async fn upgrade( ServiceManager::new(service, Box::new(ServiceController {}), verbosity); match service_manager.upgrade(options).await { - Ok(upgrade_result) => { + Ok((upgrade_result, _)) => { print_upgrade_summary(vec![("faucet".to_string(), upgrade_result)]); node_registry.save()?; Ok(()) diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index 3820b49025..7b1d736e16 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -508,6 +508,8 @@ pub async fn upgrade( trace!("service_indices len: {}", service_indices.len()); let mut upgrade_summary = Vec::new(); + let mut dyn_interval = DynamicInterval::new(); + dyn_interval.add_interval_ms(interval); for &index in &service_indices { let node = &mut node_registry.nodes[index]; let env_variables = if provided_env_variables.is_some() { @@ -532,13 +534,21 @@ pub async fn upgrade( ServiceManager::new(service, Box::new(ServiceController {}), verbosity); match service_manager.upgrade(options).await { - Ok(upgrade_result) => { + Ok((upgrade_result, start_duration)) => { + if let Some(start_duration) = start_duration { + dyn_interval.add_interval_ms(start_duration.as_millis() as u64); + } info!("Service: {service_name} has been upgraded, result: {upgrade_result:?}",); if upgrade_result != UpgradeResult::NotRequired { // It doesn't seem useful to apply the interval if there was no upgrade // required for the previous service. - debug!("Sleeping for {} milliseconds", interval); - std::thread::sleep(std::time::Duration::from_millis(interval)); + debug!( + "Sleeping for {} milliseconds", + dyn_interval.get_interval_ms() + ); + std::thread::sleep(std::time::Duration::from_millis( + dyn_interval.get_interval_ms(), + )); } upgrade_summary.push(( service_manager.service.service_data.service_name.clone(), diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 7f50d350f6..1c1f2b9b08 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -313,7 +313,10 @@ impl ServiceManager { Ok(()) } - pub async fn upgrade(&mut self, options: UpgradeOptions) -> Result { + pub async fn upgrade( + &mut self, + options: UpgradeOptions, + ) -> Result<(UpgradeResult, Option)> { let current_version = Version::parse(&self.service.version())?; if !options.force && (current_version == options.target_version @@ -323,7 +326,7 @@ impl ServiceManager { "The service {} is already at the latest version. No upgrade is required.", self.service.name() ); - return Ok(UpgradeResult::NotRequired); + return Ok((UpgradeResult::NotRequired, None)); } debug!("Stopping the service and copying the binary"); @@ -338,33 +341,45 @@ impl ServiceManager { self.service.is_user_mode(), )?; - if options.start_service { + let start_duration = if options.start_service { match self.start().await { - Ok(_) => {} + Ok(start_duration) => start_duration, Err(err) => { self.service .set_version(&options.target_version.to_string()); info!("The service has been upgraded but could not be started: {err}"); - return Ok(UpgradeResult::UpgradedButNotStarted( - current_version.to_string(), - options.target_version.to_string(), - err.to_string(), + return Ok(( + UpgradeResult::UpgradedButNotStarted( + current_version.to_string(), + options.target_version.to_string(), + err.to_string(), + ), + None, )); } } - } + } else { + None + }; self.service .set_version(&options.target_version.to_string()); - match options.force { - true => Ok(UpgradeResult::Forced( - current_version.to_string(), - options.target_version.to_string(), - )), - false => Ok(UpgradeResult::Upgraded( - current_version.to_string(), - options.target_version.to_string(), - )), + if options.force { + Ok(( + UpgradeResult::Forced( + current_version.to_string(), + options.target_version.to_string(), + ), + start_duration, + )) + } else { + Ok(( + UpgradeResult::Upgraded( + current_version.to_string(), + options.target_version.to_string(), + ), + start_duration, + )) } } } @@ -1605,7 +1620,7 @@ mod tests { VerbosityLevel::Normal, ); - let upgrade_result = service_manager + let (upgrade_result, _) = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -1692,7 +1707,7 @@ mod tests { VerbosityLevel::Normal, ); - let upgrade_result = service_manager + let (upgrade_result, _) = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -1822,7 +1837,7 @@ mod tests { VerbosityLevel::Normal, ); - let upgrade_result = service_manager + let (upgrade_result, _) = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -1965,7 +1980,7 @@ mod tests { VerbosityLevel::Normal, ); - let upgrade_result = service_manager + let (upgrade_result, _) = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -2103,7 +2118,7 @@ mod tests { VerbosityLevel::Normal, ); - let upgrade_result = service_manager + let (upgrade_result, _) = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -2242,7 +2257,7 @@ mod tests { VerbosityLevel::Normal, ); - let upgrade_result = service_manager + let (upgrade_result, _) = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), From 0b7496f19acdb8bb64989a7081c83b60a97901d7 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 30 Jul 2024 20:56:32 +0530 Subject: [PATCH 056/115] feat(manager): provide rpc connection timeout during start and upgrade --- node-launchpad/src/components/home.rs | 1 + sn_node_manager/src/bin/cli/main.rs | 24 ++++++++++++++++- sn_node_manager/src/cmd/node.rs | 37 ++++++++++++++++++++++----- sn_node_manager/src/lib.rs | 2 +- sn_node_manager/src/local.rs | 2 +- sn_service_management/src/node.rs | 15 ++++++++++- sn_service_management/src/rpc.rs | 13 ++++++---- 7 files changed, 78 insertions(+), 16 deletions(-) diff --git a/node-launchpad/src/components/home.rs b/node-launchpad/src/components/home.rs index 8b878da1a9..a87fadf072 100644 --- a/node-launchpad/src/components/home.rs +++ b/node-launchpad/src/components/home.rs @@ -660,6 +660,7 @@ fn maintain_n_running_nodes( if let Err(err) = sn_node_manager::cmd::node::maintain_n_running_nodes( false, true, + 120000, count, None, true, diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index e57c820d0a..95d85bc9f3 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -284,6 +284,11 @@ pub enum SubCmd { /// sudo if you defined system-wide services; otherwise, do not run the command elevated. #[clap(name = "start")] Start { + /// The max time in milliseconds to wait for the RPC connection to the node to be established. + /// + /// The node is marked as failed if the connection is not established within this time. + #[clap(long, default_value = "120000")] + connection_timeout: u64, /// An interval applied between launching each service. /// /// Units are milliseconds. @@ -343,6 +348,11 @@ pub enum SubCmd { /// sudo if you defined system-wide services; otherwise, do not run the command elevated. #[clap(name = "upgrade")] Upgrade { + /// The max time in milliseconds to wait for the RPC connection to the node to be established. + /// + /// The upgrade will fail if the connection is not established within this time. + #[clap(long, default_value = "120000")] + connection_timeout: u64, /// Set this flag to upgrade the nodes without automatically starting them. /// /// Can be useful for testing scenarios. @@ -1221,10 +1231,20 @@ async fn main() -> Result<()> { } => cmd::node::remove(keep_directories, peer_ids, service_names, verbosity).await, SubCmd::Reset { force } => cmd::node::reset(force, verbosity).await, SubCmd::Start { + connection_timeout, interval, peer_id: peer_ids, service_name: service_names, - } => cmd::node::start(interval, peer_ids, service_names, verbosity).await, + } => { + cmd::node::start( + connection_timeout, + interval, + peer_ids, + service_names, + verbosity, + ) + .await + } SubCmd::Status { details, fail, @@ -1235,6 +1255,7 @@ async fn main() -> Result<()> { service_name: service_names, } => cmd::node::stop(peer_ids, service_names, verbosity).await, SubCmd::Upgrade { + connection_timeout, do_not_start, force, interval, @@ -1246,6 +1267,7 @@ async fn main() -> Result<()> { version, } => { cmd::node::upgrade( + connection_timeout, do_not_start, path, force, diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index 7b1d736e16..4b0f8c5ebc 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -32,7 +32,7 @@ use sn_service_management::{ NodeRegistry, NodeService, ServiceStateActions, ServiceStatus, UpgradeOptions, UpgradeResult, }; use sn_transfers::HotWallet; -use std::{cmp::Ordering, io::Write, net::Ipv4Addr, path::PathBuf, str::FromStr}; +use std::{cmp::Ordering, io::Write, net::Ipv4Addr, path::PathBuf, str::FromStr, time::Duration}; use tracing::debug; /// Returns the added service names @@ -295,6 +295,7 @@ pub async fn reset(force: bool, verbosity: VerbosityLevel) -> Result<()> { } pub async fn start( + connection_timeout_ms: u64, interval: u64, peer_ids: Vec, service_names: Vec, @@ -332,7 +333,8 @@ pub async fn start( for &index in &service_indices { let node = &mut node_registry.nodes[index]; let rpc_client = RpcClient::from_socket_addr(node.rpc_socket_addr); - let service = NodeService::new(node, Box::new(rpc_client)); + let service = NodeService::new(node, Box::new(rpc_client)) + .with_connection_timeout(Duration::from_millis(connection_timeout_ms)); let mut service_manager = ServiceManager::new(service, Box::new(ServiceController {}), verbosity); if service_manager.service.status() != ServiceStatus::Running { @@ -355,13 +357,15 @@ pub async fn start( "Started service {} in {start_duration:?}", node.service_name ); - if let Some(duration) = start_duration { - dyn_interval.add_interval_ms(duration.as_millis() as u64); + if let Some(start_duration) = start_duration { + dyn_interval.add_interval_ms(start_duration.as_millis() as u64); } node_registry.save()?; } Err(err) => { error!("Failed to start service {}: {err}", node.service_name); + // increment the interval by 2x incase of a failure + dyn_interval.add_interval_ms(dyn_interval.get_interval_ms() * 2); failed_services.push((node.service_name.clone(), err.to_string())) } } @@ -441,6 +445,7 @@ pub async fn stop( } pub async fn upgrade( + connection_timeout_ms: u64, do_not_start: bool, custom_bin_path: Option, force: bool, @@ -529,7 +534,8 @@ pub async fn upgrade( let service_name = node.service_name.clone(); let rpc_client = RpcClient::from_socket_addr(node.rpc_socket_addr); - let service = NodeService::new(node, Box::new(rpc_client)); + let service = NodeService::new(node, Box::new(rpc_client)) + .with_connection_timeout(Duration::from_millis(connection_timeout_ms)); let mut service_manager = ServiceManager::new(service, Box::new(ServiceController {}), verbosity); @@ -557,6 +563,8 @@ pub async fn upgrade( } Err(err) => { error!("Error upgrading service {service_name}: {err}"); + // increment the interval by 2x incase of a failure + dyn_interval.add_interval_ms(dyn_interval.get_interval_ms() * 2); upgrade_summary.push(( node.service_name.clone(), UpgradeResult::Error(format!("Error: {}", err)), @@ -586,6 +594,7 @@ pub async fn upgrade( pub async fn maintain_n_running_nodes( auto_restart: bool, auto_set_nat_flags: bool, + connection_timeout_ms: u64, max_nodes_to_run: u16, data_dir_path: Option, enable_metrics_server: bool, @@ -665,7 +674,14 @@ pub async fn maintain_n_running_nodes( ?to_start_count, "We are starting these pre-existing services: {nodes_to_start:?}" ); - start(start_node_interval, vec![], nodes_to_start, verbosity).await?; + start( + connection_timeout_ms, + start_node_interval, + vec![], + nodes_to_start, + verbosity, + ) + .await?; } else { // add + start nodes let to_add_count = to_start_count - inactive_nodes.len(); @@ -703,7 +719,14 @@ pub async fn maintain_n_running_nodes( .await?; inactive_nodes.extend(added_service_list); - start(start_node_interval, vec![], inactive_nodes, verbosity).await?; + start( + connection_timeout_ms, + start_node_interval, + vec![], + inactive_nodes, + verbosity, + ) + .await?; } } Ordering::Equal => { diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 1c1f2b9b08..ed3fb31c80 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -700,7 +700,7 @@ mod tests { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> ServiceControlResult<()>; async fn node_stop(&self, delay_millis: u64) -> ServiceControlResult<()>; async fn node_update(&self, delay_millis: u64) -> ServiceControlResult<()>; - async fn try_connect(&self, max_attempts: u8) -> ServiceControlResult; + async fn try_connect(&self, timeout: std::time::Duration) -> ServiceControlResult; async fn update_log_level(&self, log_levels: String) -> ServiceControlResult<()>; } } diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index c5bdabf922..6314407eb8 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -540,7 +540,7 @@ mod tests { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> RpcResult<()>; async fn node_stop(&self, delay_millis: u64) -> RpcResult<()>; async fn node_update(&self, delay_millis: u64) -> RpcResult<()>; - async fn try_connect(&self, max_attempts: u8) -> RpcResult; + async fn try_connect(&self, timeout: std::time::Duration) -> RpcResult; async fn update_log_level(&self, log_levels: String) -> RpcResult<()>; } } diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index 18fe3d88c1..fc8cdb9ca5 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -16,9 +16,13 @@ use sn_protocol::get_port_from_multiaddr; use sn_transfers::NanoTokens; use std::{ffi::OsString, net::SocketAddr, path::PathBuf, str::FromStr, time::Duration}; +const DEFAULT_RPC_CONNECTION_TIMEOUT: Duration = Duration::from_secs(120); + pub struct NodeService<'a> { pub service_data: &'a mut NodeServiceData, pub rpc_actions: Box, + /// Timeout for connecting to the node via RPC + pub connection_timeout: Duration, } impl<'a> NodeService<'a> { @@ -29,8 +33,14 @@ impl<'a> NodeService<'a> { NodeService { rpc_actions, service_data, + connection_timeout: DEFAULT_RPC_CONNECTION_TIMEOUT, } } + + pub fn with_connection_timeout(mut self, connection_timeout: Duration) -> NodeService<'a> { + self.connection_timeout = connection_timeout; + self + } } #[async_trait] @@ -139,7 +149,10 @@ impl<'a> ServiceStateActions for NodeService<'a> { "Performing full refresh for {}", self.service_data.service_name ); - let connection_duration = self.rpc_actions.try_connect(120).await?; + let connection_duration = self + .rpc_actions + .try_connect(self.connection_timeout) + .await?; let node_info = self .rpc_actions .node_info() diff --git a/sn_service_management/src/rpc.rs b/sn_service_management/src/rpc.rs index 41534eb91b..cf83e3173c 100644 --- a/sn_service_management/src/rpc.rs +++ b/sn_service_management/src/rpc.rs @@ -49,7 +49,7 @@ pub trait RpcActions: Sync { async fn node_stop(&self, delay_millis: u64) -> Result<()>; async fn node_update(&self, delay_millis: u64) -> Result<()>; /// Try to connect to the RPC endpoint and return the Duration it took to connect. - async fn try_connect(&self, max_attempts: u8) -> Result; + async fn try_connect(&self, timeout: Duration) -> Result; async fn update_log_level(&self, log_levels: String) -> Result<()>; } @@ -224,7 +224,12 @@ impl RpcActions for RpcClient { Ok(()) } - async fn try_connect(&self, max_attempts: u8) -> Result { + async fn try_connect(&self, timeout: Duration) -> Result { + let max_attempts = std::cmp::max(1, timeout.as_secs() / self.retry_delay.as_secs()); + trace!( + "RPC conneciton max attempts set to: {max_attempts} with retry_delay of {:?}", + self.retry_delay + ); let mut attempts = 0; loop { debug!( @@ -234,9 +239,7 @@ impl RpcActions for RpcClient { match SafeNodeClient::connect(self.endpoint.clone()).await { Ok(_) => { debug!("Connection successful"); - break Ok(Duration::from_secs( - attempts as u64 * self.retry_delay.as_secs(), - )); + break Ok(Duration::from_secs(attempts * self.retry_delay.as_secs())); } Err(_) => { attempts += 1; From bc73a094330deb03b5de9db67ad21d1fa769be36 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 31 Jul 2024 18:21:48 +0530 Subject: [PATCH 057/115] fix(peers): remove timeout from wasm32 --- sn_peers_acquisition/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sn_peers_acquisition/src/lib.rs b/sn_peers_acquisition/src/lib.rs index 98a1ddc2ed..bd013c4485 100644 --- a/sn_peers_acquisition/src/lib.rs +++ b/sn_peers_acquisition/src/lib.rs @@ -192,7 +192,12 @@ pub fn parse_peer_addr(addr: &str) -> Result { pub async fn get_peers_from_url(url: Url) -> Result> { let mut retries = 0; + #[cfg(not(target_arch = "wasm32"))] let request_client = Client::builder().timeout(Duration::from_secs(10)).build()?; + // Wasm does not have the timeout method yet. + #[cfg(target_arch = "wasm32")] + let request_client = Client::builder().build()?; + loop { let response = request_client.get(url.clone()).send().await; From 10bf30c44966f8e1b4aa51763d07c816af63227b Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 31 Jul 2024 18:40:39 +0530 Subject: [PATCH 058/115] fix(test): update the node manager tests to expect the new rpc call --- sn_node_manager/src/lib.rs | 60 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index ed3fb31c80..d15e7cf34a 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -741,6 +741,10 @@ mod tests { ))) .times(1) .returning(|_| Ok(1000)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 1000, @@ -842,6 +846,10 @@ mod tests { ))) .times(1) .returning(|_| Ok(1000)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 1000, @@ -1014,6 +1022,10 @@ mod tests { ))) .times(1) .returning(|_| Ok(1000)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 1000, @@ -1180,6 +1192,10 @@ mod tests { ))) .times(1) .returning(|_| Ok(100)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 1000, @@ -1564,6 +1580,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(2000)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -1781,6 +1801,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(2000)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2201,6 +2225,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(2000)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2369,6 +2397,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2520,6 +2552,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2674,6 +2710,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2825,6 +2865,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2976,6 +3020,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -3130,6 +3178,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -3284,6 +3336,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -3438,6 +3494,10 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); + mock_rpc_client + .expect_try_connect() + .times(1) + .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, From 2eda8689a32f481fa39d5ff969fc12cce8ce2b4d Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Thu, 1 Aug 2024 12:46:48 +0530 Subject: [PATCH 059/115] fix(manager): make 'interval' and 'connection-timeout' args mutually exclusive --- node-launchpad/src/components/home.rs | 3 +-- sn_node_manager/src/bin/cli/main.rs | 26 ++++++++++++++++------- sn_node_manager/src/cmd/node.rs | 30 ++++++++++++++------------- sn_node_manager/src/lib.rs | 11 ++++++++-- 4 files changed, 45 insertions(+), 25 deletions(-) diff --git a/node-launchpad/src/components/home.rs b/node-launchpad/src/components/home.rs index a87fadf072..49c6c9c47e 100644 --- a/node-launchpad/src/components/home.rs +++ b/node-launchpad/src/components/home.rs @@ -31,7 +31,6 @@ use std::{ }; use tokio::sync::mpsc::UnboundedSender; -const NODE_START_INTERVAL: usize = 10; const NODE_STAT_UPDATE_INTERVAL: Duration = Duration::from_secs(5); /// If nat detection fails for more than 3 times, we don't want to waste time running during every node start. const MAX_ERRORS_WHILE_RUNNING_NAT_DETECTION: usize = 3; @@ -681,7 +680,7 @@ fn maintain_n_running_nodes( None, None, VerbosityLevel::Minimal, - NODE_START_INTERVAL as u64, + None, ) .await { diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index 95d85bc9f3..d1293a1767 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -286,14 +286,20 @@ pub enum SubCmd { Start { /// The max time in milliseconds to wait for the RPC connection to the node to be established. /// - /// The node is marked as failed if the connection is not established within this time. - #[clap(long, default_value = "120000")] + /// Setting this will automatically scale the interval between each service launch. This argument + /// is mutually exclusive with the 'interval' argument. + /// + /// The upgrade will fail if the connection is not established within this time. + #[clap(long, default_value = "120000", conflicts_with = "interval")] connection_timeout: u64, /// An interval applied between launching each service. /// + /// Use connection-timeout to scale the interval automatically. This argument is mutually exclusive with the + /// 'connection-timeout' argument. + /// /// Units are milliseconds. - #[clap(long, default_value_t = 200)] - interval: u64, + #[clap(long, conflicts_with = "connection-timeout")] + interval: Option, /// The peer ID of the service to start. /// /// The argument can be used multiple times to start many services. @@ -350,8 +356,11 @@ pub enum SubCmd { Upgrade { /// The max time in milliseconds to wait for the RPC connection to the node to be established. /// + /// Setting this will automatically scale the interval between each service upgrade. This argument + /// is mutually exclusive with the 'interval' argument. + /// /// The upgrade will fail if the connection is not established within this time. - #[clap(long, default_value = "120000")] + #[clap(long, default_value = "120000", conflicts_with = "interval")] connection_timeout: u64, /// Set this flag to upgrade the nodes without automatically starting them. /// @@ -376,9 +385,12 @@ pub enum SubCmd { force: bool, /// An interval applied between upgrading each service. /// + /// Use connection-timeout to scale the interval automatically. This argument is mutually exclusive with the + /// 'connection-timeout' argument. + /// /// Units are milliseconds. - #[clap(long, default_value_t = 200)] - interval: u64, + #[clap(long, conflicts_with = "connection-timeout")] + interval: Option, /// Provide a path for the safenode binary to be used by the service. /// /// Useful for upgrading the service using a custom built binary. diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index 4b0f8c5ebc..b4049f2445 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -296,7 +296,7 @@ pub async fn reset(force: bool, verbosity: VerbosityLevel) -> Result<()> { pub async fn start( connection_timeout_ms: u64, - interval: u64, + fixed_interval: Option, peer_ids: Vec, service_names: Vec, verbosity: VerbosityLevel, @@ -304,9 +304,7 @@ pub async fn start( if verbosity != VerbosityLevel::Minimal { print_banner("Start Safenode Services"); } - info!( - "Starting safenode services with dynamic interval (starting at {interval}) for: {peer_ids:?}, {service_names:?}" - ); + info!("Starting safenode services for: {peer_ids:?}, {service_names:?}"); let mut node_registry = NodeRegistry::load(&config::get_node_registry_path()?)?; refresh_node_registry( @@ -328,8 +326,7 @@ pub async fn start( } let mut failed_services = Vec::new(); - let mut dyn_interval = DynamicInterval::new(); - dyn_interval.add_interval_ms(interval); + let mut dyn_interval = DynamicInterval::new(fixed_interval.unwrap_or(200)); for &index in &service_indices { let node = &mut node_registry.nodes[index]; let rpc_client = RpcClient::from_socket_addr(node.rpc_socket_addr); @@ -357,15 +354,18 @@ pub async fn start( "Started service {} in {start_duration:?}", node.service_name ); - if let Some(start_duration) = start_duration { + if let (Some(start_duration), None) = (start_duration, fixed_interval) { dyn_interval.add_interval_ms(start_duration.as_millis() as u64); } + node_registry.save()?; } Err(err) => { error!("Failed to start service {}: {err}", node.service_name); // increment the interval by 2x incase of a failure - dyn_interval.add_interval_ms(dyn_interval.get_interval_ms() * 2); + if fixed_interval.is_none() { + dyn_interval.add_interval_ms(dyn_interval.get_interval_ms() * 2); + } failed_services.push((node.service_name.clone(), err.to_string())) } } @@ -449,7 +449,7 @@ pub async fn upgrade( do_not_start: bool, custom_bin_path: Option, force: bool, - interval: u64, + fixed_interval: Option, peer_ids: Vec, provided_env_variables: Option>, service_names: Vec, @@ -513,8 +513,7 @@ pub async fn upgrade( trace!("service_indices len: {}", service_indices.len()); let mut upgrade_summary = Vec::new(); - let mut dyn_interval = DynamicInterval::new(); - dyn_interval.add_interval_ms(interval); + let mut dyn_interval = DynamicInterval::new(fixed_interval.unwrap_or(200)); for &index in &service_indices { let node = &mut node_registry.nodes[index]; let env_variables = if provided_env_variables.is_some() { @@ -541,9 +540,10 @@ pub async fn upgrade( match service_manager.upgrade(options).await { Ok((upgrade_result, start_duration)) => { - if let Some(start_duration) = start_duration { + if let (Some(start_duration), None) = (start_duration, fixed_interval) { dyn_interval.add_interval_ms(start_duration.as_millis() as u64); } + info!("Service: {service_name} has been upgraded, result: {upgrade_result:?}",); if upgrade_result != UpgradeResult::NotRequired { // It doesn't seem useful to apply the interval if there was no upgrade @@ -564,7 +564,9 @@ pub async fn upgrade( Err(err) => { error!("Error upgrading service {service_name}: {err}"); // increment the interval by 2x incase of a failure - dyn_interval.add_interval_ms(dyn_interval.get_interval_ms() * 2); + if fixed_interval.is_none() { + dyn_interval.add_interval_ms(dyn_interval.get_interval_ms() * 2); + } upgrade_summary.push(( node.service_name.clone(), UpgradeResult::Error(format!("Error: {}", err)), @@ -615,7 +617,7 @@ pub async fn maintain_n_running_nodes( user: Option, version: Option, verbosity: VerbosityLevel, - start_node_interval: u64, + start_node_interval: Option, ) -> Result<()> { let node_registry = NodeRegistry::load(&config::get_node_registry_path()?)?; let running_nodes = node_registry diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index d15e7cf34a..0a6dbe1a07 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -644,11 +644,18 @@ struct DynamicInterval { } impl DynamicInterval { - pub fn new() -> Self { - DynamicInterval { + /// Create a new `DynamicInterval` with a starting average interval. + pub fn new(starting_average_ms: u64) -> Self { + let mut dyn_interval = DynamicInterval { last_reported_intervals_ms: VecDeque::new(), window_size: 3, + }; + + for _ in 0..dyn_interval.window_size { + dyn_interval.add_interval_ms(starting_average_ms); } + + dyn_interval } pub fn add_interval_ms(&mut self, interval_ms: u64) { From 60700bdf1716704868ba9b9d8b17e71708f2aaca Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Thu, 1 Aug 2024 20:40:55 +0530 Subject: [PATCH 060/115] fix(manager): check if node has been connected to the network --- sn_client/src/api.rs | 4 +- sn_networking/src/lib.rs | 9 +---- sn_node/src/node.rs | 4 +- sn_node/tests/verify_data_location.rs | 4 +- sn_node_manager/src/lib.rs | 32 ++++++++-------- sn_node_manager/src/local.rs | 2 +- sn_protocol/src/lib.rs | 7 ++++ sn_service_management/src/node.rs | 2 +- sn_service_management/src/rpc.rs | 55 +++++++++++++++++---------- 9 files changed, 67 insertions(+), 52 deletions(-) diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index 11057f0201..a58112f079 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -24,7 +24,7 @@ use sn_networking::{ get_signed_spend_from_record, multiaddr_is_global, target_arch::{interval, spawn, timeout, Instant}, GetRecordCfg, GetRecordError, NetworkBuilder, NetworkError, NetworkEvent, PutRecordCfg, - VerificationKind, CLOSE_GROUP_SIZE, + VerificationKind, }; use sn_protocol::{ error::Error as ProtocolError, @@ -33,7 +33,7 @@ use sn_protocol::{ try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, RegisterAddress, RetryStrategy, SpendAddress, }, - NetworkAddress, PrettyPrintRecordKey, + NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; use sn_registers::{Permissions, SignedRegister}; use sn_transfers::{ diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 1c92230546..6492cec4b1 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -61,7 +61,7 @@ use sn_protocol::{ error::Error as ProtocolError, messages::{ChunkProof, Cmd, Nonce, Query, QueryResponse, Request, Response}, storage::{RecordType, RetryStrategy}, - NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, + NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote, QuotingMetrics}; use std::{ @@ -78,13 +78,6 @@ use tokio::time::Duration; /// The type of quote for a selected payee. pub type PayeeQuote = (PeerId, MainPubkey, PaymentQuote); -/// The maximum number of peers to return in a `GetClosestPeers` response. -/// This is the group size used in safe network protocol to be responsible for -/// an item in the network. -/// The peer should be present among the CLOSE_GROUP_SIZE if we're fetching the close_group(peer) -/// The size has been set to 5 for improved performance. -pub const CLOSE_GROUP_SIZE: usize = 5; - /// The count of peers that will be considered as close to a record target, /// that a replication of the record shall be sent/accepted to/by the peer. pub const REPLICATION_PEERS_COUNT: usize = CLOSE_GROUP_SIZE + 2; diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index e1fb9d10d3..bff7b5164c 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -24,12 +24,12 @@ use prometheus_client::registry::Registry; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; use sn_networking::{ close_group_majority, Instant, Network, NetworkBuilder, NetworkError, NetworkEvent, NodeIssue, - SwarmDriver, CLOSE_GROUP_SIZE, + SwarmDriver, }; use sn_protocol::{ error::Error as ProtocolError, messages::{ChunkProof, Cmd, CmdResponse, Query, QueryResponse, Request, Response}, - NetworkAddress, PrettyPrintRecordKey, + NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; use sn_transfers::{HotWallet, MainPubkey, MainSecretKey, NanoTokens, PAYMENT_FORWARD_PK}; use std::{ diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index d11ab60da0..3abf477b18 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -23,10 +23,10 @@ use libp2p::{ use rand::{rngs::OsRng, Rng}; use sn_client::{Client, FilesApi, Uploader, WalletClient}; use sn_logging::LogBuilder; -use sn_networking::{sort_peers_by_key, CLOSE_GROUP_SIZE}; +use sn_networking::sort_peers_by_key; use sn_protocol::{ safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, - NetworkAddress, PrettyPrintRecordKey, + NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; use sn_registers::{Permissions, RegisterAddress}; use std::{ diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 0a6dbe1a07..60d5ee0da8 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -707,7 +707,7 @@ mod tests { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> ServiceControlResult<()>; async fn node_stop(&self, delay_millis: u64) -> ServiceControlResult<()>; async fn node_update(&self, delay_millis: u64) -> ServiceControlResult<()>; - async fn try_connect(&self, timeout: std::time::Duration) -> ServiceControlResult; + async fn is_node_connected_to_network(&self, timeout: std::time::Duration) -> ServiceControlResult; async fn update_log_level(&self, log_levels: String) -> ServiceControlResult<()>; } } @@ -749,7 +749,7 @@ mod tests { .times(1) .returning(|_| Ok(1000)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -854,7 +854,7 @@ mod tests { .times(1) .returning(|_| Ok(1000)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -1030,7 +1030,7 @@ mod tests { .times(1) .returning(|_| Ok(1000)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -1200,7 +1200,7 @@ mod tests { .times(1) .returning(|_| Ok(100)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -1588,7 +1588,7 @@ mod tests { .times(1) .returning(|_| Ok(2000)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -1809,7 +1809,7 @@ mod tests { .times(1) .returning(|_| Ok(2000)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -2233,7 +2233,7 @@ mod tests { .times(1) .returning(|_| Ok(2000)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -2405,7 +2405,7 @@ mod tests { .times(1) .returning(|_| Ok(100)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -2560,7 +2560,7 @@ mod tests { .times(1) .returning(|_| Ok(100)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -2718,7 +2718,7 @@ mod tests { .times(1) .returning(|_| Ok(100)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -2873,7 +2873,7 @@ mod tests { .times(1) .returning(|_| Ok(100)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -3028,7 +3028,7 @@ mod tests { .times(1) .returning(|_| Ok(100)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -3186,7 +3186,7 @@ mod tests { .times(1) .returning(|_| Ok(100)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -3344,7 +3344,7 @@ mod tests { .times(1) .returning(|_| Ok(100)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { @@ -3502,7 +3502,7 @@ mod tests { .times(1) .returning(|_| Ok(100)); mock_rpc_client - .expect_try_connect() + .expect_is_node_connected_to_network() .times(1) .returning(|_| Ok(Duration::from_secs(0))); mock_rpc_client.expect_node_info().times(1).returning(|| { diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index 6314407eb8..d071f70dd5 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -540,7 +540,7 @@ mod tests { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> RpcResult<()>; async fn node_stop(&self, delay_millis: u64) -> RpcResult<()>; async fn node_update(&self, delay_millis: u64) -> RpcResult<()>; - async fn try_connect(&self, timeout: std::time::Duration) -> RpcResult; + async fn is_node_connected_to_network(&self, timeout: std::time::Duration) -> RpcResult; async fn update_log_level(&self, log_levels: String) -> RpcResult<()>; } } diff --git a/sn_protocol/src/lib.rs b/sn_protocol/src/lib.rs index 08297a9347..f0a67d0984 100644 --- a/sn_protocol/src/lib.rs +++ b/sn_protocol/src/lib.rs @@ -42,6 +42,13 @@ use std::{ }; use xor_name::XorName; +/// The maximum number of peers to return in a `GetClosestPeers` response. +/// This is the group size used in safe network protocol to be responsible for +/// an item in the network. +/// The peer should be present among the CLOSE_GROUP_SIZE if we're fetching the close_group(peer) +/// The size has been set to 5 for improved performance. +pub const CLOSE_GROUP_SIZE: usize = 5; + /// Returns the UDP port from the provided MultiAddr. pub fn get_port_from_multiaddr(multi_addr: &Multiaddr) -> Option { // assuming the listening addr contains /ip4/127.0.0.1/udp/56215/quic-v1/p2p/ diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index fc8cdb9ca5..d4393156e5 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -151,7 +151,7 @@ impl<'a> ServiceStateActions for NodeService<'a> { ); let connection_duration = self .rpc_actions - .try_connect(self.connection_timeout) + .is_node_connected_to_network(self.connection_timeout) .await?; let node_info = self .rpc_actions diff --git a/sn_service_management/src/rpc.rs b/sn_service_management/src/rpc.rs index cf83e3173c..44fe11d136 100644 --- a/sn_service_management/src/rpc.rs +++ b/sn_service_management/src/rpc.rs @@ -9,9 +9,12 @@ use crate::error::{Error, Result}; use async_trait::async_trait; use libp2p::{kad::RecordKey, Multiaddr, PeerId}; -use sn_protocol::safenode_proto::{ - safe_node_client::SafeNodeClient, NetworkInfoRequest, NodeInfoRequest, RecordAddressesRequest, - RestartRequest, StopRequest, UpdateLogLevelRequest, UpdateRequest, +use sn_protocol::{ + safenode_proto::{ + safe_node_client::SafeNodeClient, NetworkInfoRequest, NodeInfoRequest, + RecordAddressesRequest, RestartRequest, StopRequest, UpdateLogLevelRequest, UpdateRequest, + }, + CLOSE_GROUP_SIZE, }; use std::{net::SocketAddr, path::PathBuf, str::FromStr}; use tokio::time::Duration; @@ -48,8 +51,9 @@ pub trait RpcActions: Sync { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> Result<()>; async fn node_stop(&self, delay_millis: u64) -> Result<()>; async fn node_update(&self, delay_millis: u64) -> Result<()>; - /// Try to connect to the RPC endpoint and return the Duration it took to connect. - async fn try_connect(&self, timeout: Duration) -> Result; + /// Returns Ok(duration) if the node has been successfully connected to the network and the duration + /// it took to connect to the node via RPC. + async fn is_node_connected_to_network(&self, timeout: Duration) -> Result; async fn update_log_level(&self, log_levels: String) -> Result<()>; } @@ -224,7 +228,7 @@ impl RpcActions for RpcClient { Ok(()) } - async fn try_connect(&self, timeout: Duration) -> Result { + async fn is_node_connected_to_network(&self, timeout: Duration) -> Result { let max_attempts = std::cmp::max(1, timeout.as_secs() / self.retry_delay.as_secs()); trace!( "RPC conneciton max attempts set to: {max_attempts} with retry_delay of {:?}", @@ -236,22 +240,33 @@ impl RpcActions for RpcClient { "Attempting connection to node RPC endpoint at {}...", self.endpoint ); - match SafeNodeClient::connect(self.endpoint.clone()).await { - Ok(_) => { - debug!("Connection successful"); - break Ok(Duration::from_secs(attempts * self.retry_delay.as_secs())); - } - Err(_) => { - attempts += 1; - tokio::time::sleep(self.retry_delay).await; - if attempts >= max_attempts { - return Err(Error::RpcConnectionError(self.endpoint.clone())); + if let Ok(mut client) = SafeNodeClient::connect(self.endpoint.clone()).await { + debug!("Connection to RPC successful"); + if let Ok(response) = client + .network_info(Request::new(NetworkInfoRequest {})) + .await + { + if response.get_ref().connected_peers.len() > CLOSE_GROUP_SIZE { + return Ok(Duration::from_secs(attempts * self.retry_delay.as_secs())); + } else { + error!( + "Node does not have enough peers connected yet. Retrying {attempts}/{max_attempts}", + ); } - error!( - "Could not connect to RPC endpoint {:?}. Retrying {attempts}/{}", - self.endpoint, max_attempts - ); + } else { + error!("Could not obtain NetworkInfo through RPC. Retrying {attempts}/{max_attempts}"); } + } else { + error!( + "Could not connect to RPC endpoint {:?}. Retrying {attempts}/{max_attempts}", + self.endpoint + ); + } + + attempts += 1; + tokio::time::sleep(self.retry_delay).await; + if attempts >= max_attempts { + return Err(Error::RpcConnectionError(self.endpoint.clone())); } } } From 42a13686d8dd4adb70e1a661c02cdd1833268b55 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 2 Aug 2024 00:10:29 +0530 Subject: [PATCH 061/115] chore(manager): update docs based on comment --- node-launchpad/src/components/home.rs | 2 +- sn_node_manager/src/bin/cli/main.rs | 28 ++++++++++++++++----------- sn_node_manager/src/cmd/node.rs | 14 +++++++------- 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/node-launchpad/src/components/home.rs b/node-launchpad/src/components/home.rs index 49c6c9c47e..a122698971 100644 --- a/node-launchpad/src/components/home.rs +++ b/node-launchpad/src/components/home.rs @@ -659,7 +659,7 @@ fn maintain_n_running_nodes( if let Err(err) = sn_node_manager::cmd::node::maintain_n_running_nodes( false, true, - 120000, + 120, count, None, true, diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index d1293a1767..a5d15d75d8 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -278,19 +278,23 @@ pub enum SubCmd { }, /// Start safenode service(s). /// + /// By default, the interval between starting each service is scaled automatically. The 'connection-timeout' + /// controls the maximum time to wait between starting each service. Use the 'interval' argument to set a fixed + /// interval and override the dynamic interval. + /// /// If no peer ID(s) or service name(s) are supplied, all services will be started. /// /// On Windows, this command must run as the administrative user. On Linux/macOS, run using /// sudo if you defined system-wide services; otherwise, do not run the command elevated. #[clap(name = "start")] Start { - /// The max time in milliseconds to wait for the RPC connection to the node to be established. + /// The max time in seconds to wait between starting each node service. If the connection is not established + /// within this time, the node is considered failed and the dynamic interval is increased drastically. /// - /// Setting this will automatically scale the interval between each service launch. This argument - /// is mutually exclusive with the 'interval' argument. + /// This argument is mutually exclusive with the 'interval' argument. /// - /// The upgrade will fail if the connection is not established within this time. - #[clap(long, default_value = "120000", conflicts_with = "interval")] + /// Defaults to 120s. + #[clap(long, default_value = "120", conflicts_with = "interval")] connection_timeout: u64, /// An interval applied between launching each service. /// @@ -346,7 +350,9 @@ pub enum SubCmd { /// Upgrade safenode services. /// /// The running node will be stopped, its binary will be replaced, then it will be started - /// again. + /// again. By default, the interval between starting each service is scaled automatically. The 'connection-timeout' + /// controls the maximum time to wait between starting each service. Use the 'interval' argument to set a fixed + /// interval and override the dynamic interval. /// /// If no peer ID(s) or service name(s) are supplied, all services will be upgraded. /// @@ -354,13 +360,13 @@ pub enum SubCmd { /// sudo if you defined system-wide services; otherwise, do not run the command elevated. #[clap(name = "upgrade")] Upgrade { - /// The max time in milliseconds to wait for the RPC connection to the node to be established. + /// The max time in seconds to wait between starting each node service. If the connection is not established + /// within this time, the node is considered failed and the dynamic interval is increased drastically. /// - /// Setting this will automatically scale the interval between each service upgrade. This argument - /// is mutually exclusive with the 'interval' argument. + /// This argument is mutually exclusive with the 'interval' argument. /// - /// The upgrade will fail if the connection is not established within this time. - #[clap(long, default_value = "120000", conflicts_with = "interval")] + /// Defaults to 120s. + #[clap(long, default_value = "120", conflicts_with = "interval")] connection_timeout: u64, /// Set this flag to upgrade the nodes without automatically starting them. /// diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index b4049f2445..a0d4e39cbb 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -295,7 +295,7 @@ pub async fn reset(force: bool, verbosity: VerbosityLevel) -> Result<()> { } pub async fn start( - connection_timeout_ms: u64, + connection_timeout_s: u64, fixed_interval: Option, peer_ids: Vec, service_names: Vec, @@ -331,7 +331,7 @@ pub async fn start( let node = &mut node_registry.nodes[index]; let rpc_client = RpcClient::from_socket_addr(node.rpc_socket_addr); let service = NodeService::new(node, Box::new(rpc_client)) - .with_connection_timeout(Duration::from_millis(connection_timeout_ms)); + .with_connection_timeout(Duration::from_secs(connection_timeout_s)); let mut service_manager = ServiceManager::new(service, Box::new(ServiceController {}), verbosity); if service_manager.service.status() != ServiceStatus::Running { @@ -445,7 +445,7 @@ pub async fn stop( } pub async fn upgrade( - connection_timeout_ms: u64, + connection_timeout_s: u64, do_not_start: bool, custom_bin_path: Option, force: bool, @@ -534,7 +534,7 @@ pub async fn upgrade( let rpc_client = RpcClient::from_socket_addr(node.rpc_socket_addr); let service = NodeService::new(node, Box::new(rpc_client)) - .with_connection_timeout(Duration::from_millis(connection_timeout_ms)); + .with_connection_timeout(Duration::from_secs(connection_timeout_s)); let mut service_manager = ServiceManager::new(service, Box::new(ServiceController {}), verbosity); @@ -596,7 +596,7 @@ pub async fn upgrade( pub async fn maintain_n_running_nodes( auto_restart: bool, auto_set_nat_flags: bool, - connection_timeout_ms: u64, + connection_timeout_s: u64, max_nodes_to_run: u16, data_dir_path: Option, enable_metrics_server: bool, @@ -677,7 +677,7 @@ pub async fn maintain_n_running_nodes( "We are starting these pre-existing services: {nodes_to_start:?}" ); start( - connection_timeout_ms, + connection_timeout_s, start_node_interval, vec![], nodes_to_start, @@ -722,7 +722,7 @@ pub async fn maintain_n_running_nodes( inactive_nodes.extend(added_service_list); start( - connection_timeout_ms, + connection_timeout_s, start_node_interval, vec![], inactive_nodes, From b0fa120bc680b5c1660accb760e63190decedf55 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 6 Aug 2024 13:25:47 +0530 Subject: [PATCH 062/115] fix(manager): do not sleep if we're already waiting for the node to connect to the network --- sn_node_manager/src/bin/cli/main.rs | 33 +- sn_node_manager/src/cmd/auditor.rs | 2 +- sn_node_manager/src/cmd/faucet.rs | 2 +- sn_node_manager/src/cmd/node.rs | 69 ++--- sn_node_manager/src/lib.rs | 446 ++++++++++++++++++--------- sn_node_manager/src/local.rs | 2 +- sn_service_management/src/auditor.rs | 10 +- sn_service_management/src/daemon.rs | 10 +- sn_service_management/src/faucet.rs | 10 +- sn_service_management/src/lib.rs | 4 +- sn_service_management/src/node.rs | 34 +- sn_service_management/src/rpc.rs | 8 +- 12 files changed, 389 insertions(+), 241 deletions(-) diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index a5d15d75d8..5a54769448 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -13,7 +13,7 @@ use sn_logging::{LogBuilder, LogFormat}; use sn_node_manager::{ add_services::config::PortRange, cmd::{self}, - VerbosityLevel, + VerbosityLevel, DEFAULT_NODE_STARTUP_CONNECTION_TIMEOUT_S, }; use sn_peers_acquisition::PeersArgs; use std::{net::Ipv4Addr, path::PathBuf}; @@ -278,9 +278,10 @@ pub enum SubCmd { }, /// Start safenode service(s). /// - /// By default, the interval between starting each service is scaled automatically. The 'connection-timeout' - /// controls the maximum time to wait between starting each service. Use the 'interval' argument to set a fixed - /// interval and override the dynamic interval. + /// By default, each node service is started after the previous node has successfully connected to the network or + /// after the 'connection-timeout' period has been reached for that node. The timeout is 300 seconds by default. + /// The above behaviour can be overridden by setting a fixed interval between starting each node service using the + /// 'interval' argument. /// /// If no peer ID(s) or service name(s) are supplied, all services will be started. /// @@ -288,13 +289,13 @@ pub enum SubCmd { /// sudo if you defined system-wide services; otherwise, do not run the command elevated. #[clap(name = "start")] Start { - /// The max time in seconds to wait between starting each node service. If the connection is not established - /// within this time, the node is considered failed and the dynamic interval is increased drastically. + /// The max time in seconds to wait for a node to connect to the network. If the node does not connect to the + /// network within this time, the node is considered failed. /// /// This argument is mutually exclusive with the 'interval' argument. /// - /// Defaults to 120s. - #[clap(long, default_value = "120", conflicts_with = "interval")] + /// Defaults to 300s. + #[clap(long, default_value_t = DEFAULT_NODE_STARTUP_CONNECTION_TIMEOUT_S, conflicts_with = "interval")] connection_timeout: u64, /// An interval applied between launching each service. /// @@ -349,10 +350,10 @@ pub enum SubCmd { }, /// Upgrade safenode services. /// - /// The running node will be stopped, its binary will be replaced, then it will be started - /// again. By default, the interval between starting each service is scaled automatically. The 'connection-timeout' - /// controls the maximum time to wait between starting each service. Use the 'interval' argument to set a fixed - /// interval and override the dynamic interval. + /// By default, each node service is started after the previous node has successfully connected to the network or + /// after the 'connection-timeout' period has been reached for that node. The timeout is 300 seconds by default. + /// The above behaviour can be overridden by setting a fixed interval between starting each node service using the + /// 'interval' argument. /// /// If no peer ID(s) or service name(s) are supplied, all services will be upgraded. /// @@ -360,13 +361,13 @@ pub enum SubCmd { /// sudo if you defined system-wide services; otherwise, do not run the command elevated. #[clap(name = "upgrade")] Upgrade { - /// The max time in seconds to wait between starting each node service. If the connection is not established - /// within this time, the node is considered failed and the dynamic interval is increased drastically. + /// The max time in seconds to wait for a node to connect to the network. If the node does not connect to the + /// network within this time, the node is considered failed. /// /// This argument is mutually exclusive with the 'interval' argument. /// - /// Defaults to 120s. - #[clap(long, default_value = "120", conflicts_with = "interval")] + /// Defaults to 300s. + #[clap(long, default_value_t = DEFAULT_NODE_STARTUP_CONNECTION_TIMEOUT_S, conflicts_with = "interval")] connection_timeout: u64, /// Set this flag to upgrade the nodes without automatically starting them. /// diff --git a/sn_node_manager/src/cmd/auditor.rs b/sn_node_manager/src/cmd/auditor.rs index 91a04071b5..081848083d 100644 --- a/sn_node_manager/src/cmd/auditor.rs +++ b/sn_node_manager/src/cmd/auditor.rs @@ -210,7 +210,7 @@ pub async fn upgrade( ServiceManager::new(service, Box::new(ServiceController {}), verbosity); match service_manager.upgrade(options).await { - Ok((upgrade_result, _)) => { + Ok(upgrade_result) => { info!("Upgrade the auditor service successfully"); print_upgrade_summary(vec![("auditor".to_string(), upgrade_result)]); node_registry.save()?; diff --git a/sn_node_manager/src/cmd/faucet.rs b/sn_node_manager/src/cmd/faucet.rs index 86869ade8c..6645d9b6f0 100644 --- a/sn_node_manager/src/cmd/faucet.rs +++ b/sn_node_manager/src/cmd/faucet.rs @@ -206,7 +206,7 @@ pub async fn upgrade( ServiceManager::new(service, Box::new(ServiceController {}), verbosity); match service_manager.upgrade(options).await { - Ok((upgrade_result, _)) => { + Ok(upgrade_result) => { print_upgrade_summary(vec![("faucet".to_string(), upgrade_result)]); node_registry.save()?; Ok(()) diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index a0d4e39cbb..ca903ae8f5 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -16,8 +16,7 @@ use crate::{ }, config::{self, is_running_as_root}, helpers::{download_and_extract_release, get_bin_version}, - print_banner, refresh_node_registry, status_report, DynamicInterval, ServiceManager, - VerbosityLevel, + print_banner, refresh_node_registry, status_report, ServiceManager, VerbosityLevel, }; use color_eyre::{eyre::eyre, Help, Result}; use colored::Colorize; @@ -326,12 +325,19 @@ pub async fn start( } let mut failed_services = Vec::new(); - let mut dyn_interval = DynamicInterval::new(fixed_interval.unwrap_or(200)); for &index in &service_indices { let node = &mut node_registry.nodes[index]; let rpc_client = RpcClient::from_socket_addr(node.rpc_socket_addr); - let service = NodeService::new(node, Box::new(rpc_client)) - .with_connection_timeout(Duration::from_secs(connection_timeout_s)); + + let service = NodeService::new(node, Box::new(rpc_client)); + + // set dynamic startup delay if fixed_interval is not set + let service = if fixed_interval.is_none() { + service.with_connection_timeout(Duration::from_secs(connection_timeout_s)) + } else { + service + }; + let mut service_manager = ServiceManager::new(service, Box::new(ServiceController {}), verbosity); if service_manager.service.status() != ServiceStatus::Running { @@ -339,14 +345,10 @@ pub async fn start( // continue without applying the delay. The reason for not doing so is because when // `start` is called below, the user will get a message to say the service was already // started, which I think is useful behaviour to retain. - debug!( - "Sleeping for {} milliseconds ({}sec)", - dyn_interval.get_interval_ms(), - dyn_interval.get_interval_ms() / 1000 - ); - std::thread::sleep(std::time::Duration::from_millis( - dyn_interval.get_interval_ms(), - )); + if let Some(interval) = fixed_interval { + debug!("Sleeping for {} milliseconds", interval); + std::thread::sleep(std::time::Duration::from_millis(interval)); + } } match service_manager.start().await { Ok(start_duration) => { @@ -354,18 +356,11 @@ pub async fn start( "Started service {} in {start_duration:?}", node.service_name ); - if let (Some(start_duration), None) = (start_duration, fixed_interval) { - dyn_interval.add_interval_ms(start_duration.as_millis() as u64); - } node_registry.save()?; } Err(err) => { error!("Failed to start service {}: {err}", node.service_name); - // increment the interval by 2x incase of a failure - if fixed_interval.is_none() { - dyn_interval.add_interval_ms(dyn_interval.get_interval_ms() * 2); - } failed_services.push((node.service_name.clone(), err.to_string())) } } @@ -513,7 +508,6 @@ pub async fn upgrade( trace!("service_indices len: {}", service_indices.len()); let mut upgrade_summary = Vec::new(); - let mut dyn_interval = DynamicInterval::new(fixed_interval.unwrap_or(200)); for &index in &service_indices { let node = &mut node_registry.nodes[index]; let env_variables = if provided_env_variables.is_some() { @@ -533,28 +527,27 @@ pub async fn upgrade( let service_name = node.service_name.clone(); let rpc_client = RpcClient::from_socket_addr(node.rpc_socket_addr); - let service = NodeService::new(node, Box::new(rpc_client)) - .with_connection_timeout(Duration::from_secs(connection_timeout_s)); + let service = NodeService::new(node, Box::new(rpc_client)); + // set dynamic startup delay if fixed_interval is not set + let service = if fixed_interval.is_none() { + service.with_connection_timeout(Duration::from_secs(connection_timeout_s)) + } else { + service + }; + let mut service_manager = ServiceManager::new(service, Box::new(ServiceController {}), verbosity); match service_manager.upgrade(options).await { - Ok((upgrade_result, start_duration)) => { - if let (Some(start_duration), None) = (start_duration, fixed_interval) { - dyn_interval.add_interval_ms(start_duration.as_millis() as u64); - } - + Ok(upgrade_result) => { info!("Service: {service_name} has been upgraded, result: {upgrade_result:?}",); if upgrade_result != UpgradeResult::NotRequired { // It doesn't seem useful to apply the interval if there was no upgrade // required for the previous service. - debug!( - "Sleeping for {} milliseconds", - dyn_interval.get_interval_ms() - ); - std::thread::sleep(std::time::Duration::from_millis( - dyn_interval.get_interval_ms(), - )); + if let Some(interval) = fixed_interval { + debug!("Sleeping for {interval} milliseconds",); + std::thread::sleep(std::time::Duration::from_millis(interval)); + } } upgrade_summary.push(( service_manager.service.service_data.service_name.clone(), @@ -563,13 +556,9 @@ pub async fn upgrade( } Err(err) => { error!("Error upgrading service {service_name}: {err}"); - // increment the interval by 2x incase of a failure - if fixed_interval.is_none() { - dyn_interval.add_interval_ms(dyn_interval.get_interval_ms() * 2); - } upgrade_summary.push(( node.service_name.clone(), - UpgradeResult::Error(format!("Error: {}", err)), + UpgradeResult::Error(format!("Error: {err}")), )); } } diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 60d5ee0da8..6389c0ac34 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -18,6 +18,8 @@ pub mod local; pub mod rpc; pub mod rpc_client; +pub const DEFAULT_NODE_STARTUP_CONNECTION_TIMEOUT_S: u64 = 300; + #[derive(Clone, Copy, PartialEq, Debug)] pub enum VerbosityLevel { Minimal, @@ -45,7 +47,6 @@ use sn_service_management::{ UpgradeResult, }; use sn_transfers::HotWallet; -use std::{collections::VecDeque, time::Duration}; use tracing::debug; pub const DAEMON_DEFAULT_PORT: u16 = 12500; @@ -72,7 +73,7 @@ impl ServiceManager { } } - pub async fn start(&mut self) -> Result> { + pub async fn start(&mut self) -> Result<()> { info!("Starting the {} service", self.service.name()); if ServiceStatus::Running == self.service.status() { // The last time we checked the service was running, but it doesn't mean it's actually @@ -89,7 +90,7 @@ impl ServiceManager { if self.verbosity != VerbosityLevel::Minimal { println!("The {} service is already running", self.service.name()); } - return Ok(None); + return Ok(()); } } @@ -107,7 +108,7 @@ impl ServiceManager { // // There might be many different `safenode` processes running, but since each service has // its own isolated binary, we use the binary path to uniquely identify it. - let start_duration = match self + match self .service_control .get_process_pid(&self.service.bin_path()) { @@ -117,13 +118,12 @@ impl ServiceManager { self.service.name(), pid ); - let start_duration = self.service.on_start(Some(pid), true).await?; + self.service.on_start(Some(pid), true).await?; info!( - "Service {} has been started successfully in {start_duration:?}", + "Service {} has been started successfully", self.service.name() ); - start_duration } Err(sn_service_management::error::Error::ServiceProcessNotFound(_)) => { error!("The '{}' service has failed to start because ServiceProcessNotFound when fetching PID", self.service.name()); @@ -156,7 +156,7 @@ impl ServiceManager { self.service.log_dir_path().to_string_lossy() ); } - Ok(start_duration) + Ok(()) } pub async fn stop(&mut self) -> Result<()> { @@ -313,10 +313,7 @@ impl ServiceManager { Ok(()) } - pub async fn upgrade( - &mut self, - options: UpgradeOptions, - ) -> Result<(UpgradeResult, Option)> { + pub async fn upgrade(&mut self, options: UpgradeOptions) -> Result { let current_version = Version::parse(&self.service.version())?; if !options.force && (current_version == options.target_version @@ -326,7 +323,7 @@ impl ServiceManager { "The service {} is already at the latest version. No upgrade is required.", self.service.name() ); - return Ok((UpgradeResult::NotRequired, None)); + return Ok(UpgradeResult::NotRequired); } debug!("Stopping the service and copying the binary"); @@ -341,44 +338,33 @@ impl ServiceManager { self.service.is_user_mode(), )?; - let start_duration = if options.start_service { + if options.start_service { match self.start().await { Ok(start_duration) => start_duration, Err(err) => { self.service .set_version(&options.target_version.to_string()); info!("The service has been upgraded but could not be started: {err}"); - return Ok(( - UpgradeResult::UpgradedButNotStarted( - current_version.to_string(), - options.target_version.to_string(), - err.to_string(), - ), - None, + return Ok(UpgradeResult::UpgradedButNotStarted( + current_version.to_string(), + options.target_version.to_string(), + err.to_string(), )); } } - } else { - None - }; + } self.service .set_version(&options.target_version.to_string()); if options.force { - Ok(( - UpgradeResult::Forced( - current_version.to_string(), - options.target_version.to_string(), - ), - start_duration, + Ok(UpgradeResult::Forced( + current_version.to_string(), + options.target_version.to_string(), )) } else { - Ok(( - UpgradeResult::Upgraded( - current_version.to_string(), - options.target_version.to_string(), - ), - start_duration, + Ok(UpgradeResult::Upgraded( + current_version.to_string(), + options.target_version.to_string(), )) } } @@ -636,41 +622,6 @@ fn format_status_without_colour(status: &ServiceStatus) -> String { } } -/// Moving average of intervals. -/// This is used to determine the average sleep between starting node services. -struct DynamicInterval { - last_reported_intervals_ms: VecDeque, - window_size: u8, -} - -impl DynamicInterval { - /// Create a new `DynamicInterval` with a starting average interval. - pub fn new(starting_average_ms: u64) -> Self { - let mut dyn_interval = DynamicInterval { - last_reported_intervals_ms: VecDeque::new(), - window_size: 3, - }; - - for _ in 0..dyn_interval.window_size { - dyn_interval.add_interval_ms(starting_average_ms); - } - - dyn_interval - } - - pub fn add_interval_ms(&mut self, interval_ms: u64) { - self.last_reported_intervals_ms.push_back(interval_ms); - if self.last_reported_intervals_ms.len() > self.window_size as usize { - self.last_reported_intervals_ms.pop_front(); - } - } - - pub fn get_interval_ms(&self) -> u64 { - self.last_reported_intervals_ms.iter().sum::() - / self.last_reported_intervals_ms.len() as u64 - } -} - #[cfg(test)] mod tests { use super::*; @@ -695,6 +646,7 @@ mod tests { net::{IpAddr, Ipv4Addr, SocketAddr}, path::{Path, PathBuf}, str::FromStr, + time::Duration, }; mock! { @@ -707,7 +659,7 @@ mod tests { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> ServiceControlResult<()>; async fn node_stop(&self, delay_millis: u64) -> ServiceControlResult<()>; async fn node_update(&self, delay_millis: u64) -> ServiceControlResult<()>; - async fn is_node_connected_to_network(&self, timeout: std::time::Duration) -> ServiceControlResult; + async fn is_node_connected_to_network(&self, timeout: std::time::Duration) -> ServiceControlResult<()>; async fn update_log_level(&self, log_levels: String) -> ServiceControlResult<()>; } } @@ -748,10 +700,7 @@ mod tests { ))) .times(1) .returning(|_| Ok(1000)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 1000, @@ -802,6 +751,7 @@ mod tests { version: "0.98.1".to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -853,10 +803,7 @@ mod tests { ))) .times(1) .returning(|_| Ok(1000)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 1000, @@ -907,6 +854,7 @@ mod tests { version: "0.98.1".to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -972,6 +920,7 @@ mod tests { version: "0.98.1".to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -1029,10 +978,7 @@ mod tests { ))) .times(1) .returning(|_| Ok(1000)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 1000, @@ -1083,6 +1029,7 @@ mod tests { version: "0.98.1".to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -1199,10 +1146,7 @@ mod tests { ))) .times(1) .returning(|_| Ok(100)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 1000, @@ -1251,6 +1195,97 @@ mod tests { version: "0.98.1".to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + + let mut service_manager = ServiceManager::new( + service, + Box::new(mock_service_control), + VerbosityLevel::Normal, + ); + + service_manager.start().await?; + + Ok(()) + } + + #[tokio::test] + async fn start_should_use_dynamic_startup_delay_if_set() -> Result<()> { + let mut mock_service_control = MockServiceControl::new(); + let mut mock_rpc_client = MockRpcClient::new(); + + mock_service_control + .expect_start() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_wait() + .with(eq(3000)) + .times(1) + .returning(|_| ()); + mock_service_control + .expect_get_process_pid() + .with(eq(PathBuf::from( + "/var/safenode-manager/services/safenode1/safenode", + ))) + .times(1) + .returning(|_| Ok(1000)); + mock_rpc_client + .expect_is_node_connected_to_network() + .times(1) + .returning(|_| Ok(())); + mock_rpc_client.expect_node_info().times(1).returning(|| { + Ok(NodeInfo { + pid: 1000, + peer_id: PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?, + data_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + log_path: PathBuf::from("/var/log/safenode/safenode1"), + version: "0.98.1".to_string(), + uptime: std::time::Duration::from_secs(1), // the service was just started + wallet_balance: 0, + }) + }); + mock_rpc_client + .expect_network_info() + .times(1) + .returning(|| { + Ok(NetworkInfo { + connected_peers: vec![PeerId::from_str( + "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", + )?], + listeners: Vec::new(), + }) + }); + + let mut service_data = NodeServiceData { + auto_restart: false, + connected_peers: None, + data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + genesis: false, + home_network: false, + listen_addr: None, + local: false, + log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), + log_format: None, + metrics_port: None, + node_port: None, + number: 1, + owner: None, + peer_id: None, + pid: None, + reward_balance: Some(NanoTokens::zero()), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), + safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), + service_name: "safenode1".to_string(), + status: ServiceStatus::Added, + upnp: false, + user: Some("safe".to_string()), + user_mode: false, + version: "0.98.1".to_string(), + }; + let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)) + .with_connection_timeout(Duration::from_secs( + DEFAULT_NODE_STARTUP_CONNECTION_TIMEOUT_S, + )); let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -1587,10 +1622,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(2000)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -1647,7 +1679,7 @@ mod tests { VerbosityLevel::Normal, ); - let (upgrade_result, _) = service_manager + let upgrade_result = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -1728,13 +1760,14 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), VerbosityLevel::Normal, ); - let (upgrade_result, _) = service_manager + let upgrade_result = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -1808,10 +1841,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(2000)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -1862,13 +1892,14 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), VerbosityLevel::Normal, ); - let (upgrade_result, _) = service_manager + let upgrade_result = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -2005,13 +2036,14 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), VerbosityLevel::Normal, ); - let (upgrade_result, _) = service_manager + let upgrade_result = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -2149,7 +2181,7 @@ mod tests { VerbosityLevel::Normal, ); - let (upgrade_result, _) = service_manager + let upgrade_result = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -2232,10 +2264,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(2000)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2286,13 +2315,14 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), VerbosityLevel::Normal, ); - let (upgrade_result, _) = service_manager + let upgrade_result = service_manager .upgrade(UpgradeOptions { auto_restart: false, bootstrap_peers: Vec::new(), @@ -2404,10 +2434,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2458,6 +2485,7 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -2559,10 +2587,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2613,6 +2638,7 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -2717,10 +2743,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2771,6 +2794,7 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -2872,10 +2896,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -2926,6 +2947,7 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -3027,10 +3049,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -3081,6 +3100,7 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -3185,10 +3205,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -3239,6 +3256,7 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -3343,10 +3361,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -3397,6 +3412,7 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -3501,10 +3517,7 @@ mod tests { .with(eq(current_node_bin.to_path_buf().clone())) .times(1) .returning(|_| Ok(100)); - mock_rpc_client - .expect_is_node_connected_to_network() - .times(1) - .returning(|_| Ok(Duration::from_secs(0))); + mock_rpc_client.expect_node_info().times(1).returning(|| { Ok(NodeInfo { pid: 2000, @@ -3555,6 +3568,7 @@ mod tests { version: current_version.to_string(), }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + let mut service_manager = ServiceManager::new( service, Box::new(mock_service_control), @@ -3578,6 +3592,162 @@ mod tests { Ok(()) } + #[tokio::test] + async fn upgrade_should_use_dynamic_startup_delay_if_set() -> Result<()> { + let current_version = "0.1.0"; + let target_version = "0.2.0"; + + let tmp_data_dir = assert_fs::TempDir::new()?; + let current_install_dir = tmp_data_dir.child("safenode_install"); + current_install_dir.create_dir_all()?; + + let current_node_bin = current_install_dir.child("safenode"); + current_node_bin.write_binary(b"fake safenode binary")?; + let target_node_bin = tmp_data_dir.child("safenode"); + target_node_bin.write_binary(b"fake safenode binary")?; + + let mut mock_service_control = MockServiceControl::new(); + let mut mock_rpc_client = MockRpcClient::new(); + + // before binary upgrade + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(1000)); + mock_service_control + .expect_stop() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + + // after binary upgrade + mock_service_control + .expect_uninstall() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_install() + .with( + eq(ServiceInstallCtx { + args: vec![ + OsString::from("--rpc"), + OsString::from("127.0.0.1:8081"), + OsString::from("--root-dir"), + OsString::from("/var/safenode-manager/services/safenode1"), + OsString::from("--log-output-dest"), + OsString::from("/var/log/safenode/safenode1"), + OsString::from("--upnp"), + ], + autostart: false, + contents: None, + environment: None, + label: "safenode1".parse()?, + program: current_node_bin.to_path_buf(), + username: Some("safe".to_string()), + working_directory: None, + }), + eq(false), + ) + .times(1) + .returning(|_, _| Ok(())); + + // after service restart + mock_service_control + .expect_start() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_wait() + .with(eq(3000)) + .times(1) + .returning(|_| ()); + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(100)); + mock_rpc_client + .expect_is_node_connected_to_network() + .times(1) + .returning(|_| Ok(())); + mock_rpc_client.expect_node_info().times(1).returning(|| { + Ok(NodeInfo { + pid: 2000, + peer_id: PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?, + data_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + log_path: PathBuf::from("/var/log/safenode/safenode1"), + version: target_version.to_string(), + uptime: std::time::Duration::from_secs(1), // the service was just started + wallet_balance: 0, + }) + }); + mock_rpc_client + .expect_network_info() + .times(1) + .returning(|| { + Ok(NetworkInfo { + connected_peers: Vec::new(), + listeners: Vec::new(), + }) + }); + + let mut service_data = NodeServiceData { + auto_restart: false, + connected_peers: None, + data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + genesis: false, + home_network: false, + listen_addr: None, + local: false, + log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), + log_format: None, + metrics_port: None, + node_port: None, + number: 1, + owner: None, + peer_id: Some(PeerId::from_str( + "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", + )?), + pid: Some(1000), + reward_balance: Some(NanoTokens::zero()), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), + safenode_path: current_node_bin.to_path_buf(), + service_name: "safenode1".to_string(), + status: ServiceStatus::Running, + upnp: true, + user: Some("safe".to_string()), + user_mode: false, + version: current_version.to_string(), + }; + let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)) + .with_connection_timeout(Duration::from_secs( + DEFAULT_NODE_STARTUP_CONNECTION_TIMEOUT_S, + )); + + let mut service_manager = ServiceManager::new( + service, + Box::new(mock_service_control), + VerbosityLevel::Normal, + ); + + service_manager + .upgrade(UpgradeOptions { + auto_restart: false, + bootstrap_peers: Vec::new(), + env_variables: None, + force: false, + start_service: true, + target_bin_path: target_node_bin.to_path_buf(), + target_version: Version::parse(target_version).unwrap(), + }) + .await?; + + Ok(()) + } + #[tokio::test] async fn remove_should_remove_an_added_node() -> Result<()> { let temp_dir = assert_fs::TempDir::new()?; diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index d071f70dd5..aa468a5179 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -540,7 +540,7 @@ mod tests { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> RpcResult<()>; async fn node_stop(&self, delay_millis: u64) -> RpcResult<()>; async fn node_update(&self, delay_millis: u64) -> RpcResult<()>; - async fn is_node_connected_to_network(&self, timeout: std::time::Duration) -> RpcResult; + async fn is_node_connected_to_network(&self, timeout: std::time::Duration) -> RpcResult<()>; async fn update_log_level(&self, log_levels: String) -> RpcResult<()>; } } diff --git a/sn_service_management/src/auditor.rs b/sn_service_management/src/auditor.rs index eb3da2b5e2..66f00a0eb5 100644 --- a/sn_service_management/src/auditor.rs +++ b/sn_service_management/src/auditor.rs @@ -12,7 +12,7 @@ use crate::{ use async_trait::async_trait; use serde::{Deserialize, Serialize}; use service_manager::ServiceInstallCtx; -use std::{ffi::OsString, path::PathBuf, time::Duration}; +use std::{ffi::OsString, path::PathBuf}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct AuditorServiceData { @@ -108,14 +108,10 @@ impl<'a> ServiceStateActions for AuditorService<'a> { self.service_data.status = ServiceStatus::Removed; } - async fn on_start( - &mut self, - pid: Option, - _full_refresh: bool, - ) -> Result> { + async fn on_start(&mut self, pid: Option, _full_refresh: bool) -> Result<()> { self.service_data.pid = pid; self.service_data.status = ServiceStatus::Running; - Ok(None) + Ok(()) } async fn on_stop(&mut self) -> Result<()> { diff --git a/sn_service_management/src/daemon.rs b/sn_service_management/src/daemon.rs index a7d6e02184..c617515fe5 100644 --- a/sn_service_management/src/daemon.rs +++ b/sn_service_management/src/daemon.rs @@ -14,7 +14,7 @@ use crate::{ use async_trait::async_trait; use serde::{Deserialize, Serialize}; use service_manager::ServiceInstallCtx; -use std::{ffi::OsString, net::SocketAddr, path::PathBuf, time::Duration}; +use std::{ffi::OsString, net::SocketAddr, path::PathBuf}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct DaemonServiceData { @@ -101,14 +101,10 @@ impl<'a> ServiceStateActions for DaemonService<'a> { self.service_data.status = ServiceStatus::Removed; } - async fn on_start( - &mut self, - pid: Option, - _full_refresh: bool, - ) -> Result> { + async fn on_start(&mut self, pid: Option, _full_refresh: bool) -> Result<()> { self.service_data.pid = pid; self.service_data.status = ServiceStatus::Running; - Ok(None) + Ok(()) } async fn on_stop(&mut self) -> Result<()> { diff --git a/sn_service_management/src/faucet.rs b/sn_service_management/src/faucet.rs index 3ebf26e16f..f1c3d8f952 100644 --- a/sn_service_management/src/faucet.rs +++ b/sn_service_management/src/faucet.rs @@ -12,7 +12,7 @@ use crate::{ use async_trait::async_trait; use serde::{Deserialize, Serialize}; use service_manager::ServiceInstallCtx; -use std::{ffi::OsString, path::PathBuf, time::Duration}; +use std::{ffi::OsString, path::PathBuf}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct FaucetServiceData { @@ -105,14 +105,10 @@ impl<'a> ServiceStateActions for FaucetService<'a> { self.service_data.status = ServiceStatus::Removed; } - async fn on_start( - &mut self, - pid: Option, - _full_refresh: bool, - ) -> Result> { + async fn on_start(&mut self, pid: Option, _full_refresh: bool) -> Result<()> { self.service_data.pid = pid; self.service_data.status = ServiceStatus::Running; - Ok(None) + Ok(()) } async fn on_stop(&mut self) -> Result<()> { diff --git a/sn_service_management/src/lib.rs b/sn_service_management/src/lib.rs index dd782da842..db32f81c34 100644 --- a/sn_service_management/src/lib.rs +++ b/sn_service_management/src/lib.rs @@ -30,7 +30,6 @@ use service_manager::ServiceInstallCtx; use std::{ io::{Read, Write}, path::{Path, PathBuf}, - time::Duration, }; pub use daemon::{DaemonService, DaemonServiceData}; @@ -87,8 +86,7 @@ pub trait ServiceStateActions { fn name(&self) -> String; fn pid(&self) -> Option; fn on_remove(&mut self); - /// Optionally returns the duration it took to start the service - async fn on_start(&mut self, pid: Option, full_refresh: bool) -> Result>; + async fn on_start(&mut self, pid: Option, full_refresh: bool) -> Result<()>; async fn on_stop(&mut self) -> Result<()>; fn set_version(&mut self, version: &str); fn status(&self) -> ServiceStatus; diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index d4393156e5..dcf18ee059 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -16,13 +16,11 @@ use sn_protocol::get_port_from_multiaddr; use sn_transfers::NanoTokens; use std::{ffi::OsString, net::SocketAddr, path::PathBuf, str::FromStr, time::Duration}; -const DEFAULT_RPC_CONNECTION_TIMEOUT: Duration = Duration::from_secs(120); - pub struct NodeService<'a> { pub service_data: &'a mut NodeServiceData, pub rpc_actions: Box, - /// Timeout for connecting to the node via RPC - pub connection_timeout: Duration, + /// Used to enable dynamic startup delay based on the time it takes for a node to connect to the network. + pub connection_timeout: Option, } impl<'a> NodeService<'a> { @@ -33,12 +31,14 @@ impl<'a> NodeService<'a> { NodeService { rpc_actions, service_data, - connection_timeout: DEFAULT_RPC_CONNECTION_TIMEOUT, + connection_timeout: None, } } + /// Set the max time to wait for the node to connect to the network. + /// If not set, we do not perform a dynamic startup delay. pub fn with_connection_timeout(mut self, connection_timeout: Duration) -> NodeService<'a> { - self.connection_timeout = connection_timeout; + self.connection_timeout = Some(connection_timeout); self } } @@ -143,16 +143,22 @@ impl<'a> ServiceStateActions for NodeService<'a> { self.service_data.status = ServiceStatus::Removed; } - async fn on_start(&mut self, pid: Option, full_refresh: bool) -> Result> { - let (start_duration, connected_peers, pid, peer_id) = if full_refresh { + async fn on_start(&mut self, pid: Option, full_refresh: bool) -> Result<()> { + let (connected_peers, pid, peer_id) = if full_refresh { debug!( "Performing full refresh for {}", self.service_data.service_name ); - let connection_duration = self - .rpc_actions - .is_node_connected_to_network(self.connection_timeout) - .await?; + if let Some(connection_timeout) = self.connection_timeout { + debug!( + "Performing dynamic startup delay for {}", + self.service_data.service_name + ); + self.rpc_actions + .is_node_connected_to_network(connection_timeout) + .await?; + } + let node_info = self .rpc_actions .node_info() @@ -189,7 +195,6 @@ impl<'a> ServiceStateActions for NodeService<'a> { } ( - Some(connection_duration), Some(network_info.connected_peers), pid, Some(node_info.peer_id), @@ -201,7 +206,6 @@ impl<'a> ServiceStateActions for NodeService<'a> { ); debug!("Previously assigned data will be used"); ( - None, self.service_data.connected_peers.clone(), pid, self.service_data.peer_id, @@ -212,7 +216,7 @@ impl<'a> ServiceStateActions for NodeService<'a> { self.service_data.peer_id = peer_id; self.service_data.pid = pid; self.service_data.status = ServiceStatus::Running; - Ok(start_duration) + Ok(()) } async fn on_stop(&mut self) -> Result<()> { diff --git a/sn_service_management/src/rpc.rs b/sn_service_management/src/rpc.rs index 44fe11d136..1aae055dc9 100644 --- a/sn_service_management/src/rpc.rs +++ b/sn_service_management/src/rpc.rs @@ -51,9 +51,7 @@ pub trait RpcActions: Sync { async fn node_restart(&self, delay_millis: u64, retain_peer_id: bool) -> Result<()>; async fn node_stop(&self, delay_millis: u64) -> Result<()>; async fn node_update(&self, delay_millis: u64) -> Result<()>; - /// Returns Ok(duration) if the node has been successfully connected to the network and the duration - /// it took to connect to the node via RPC. - async fn is_node_connected_to_network(&self, timeout: Duration) -> Result; + async fn is_node_connected_to_network(&self, timeout: Duration) -> Result<()>; async fn update_log_level(&self, log_levels: String) -> Result<()>; } @@ -228,7 +226,7 @@ impl RpcActions for RpcClient { Ok(()) } - async fn is_node_connected_to_network(&self, timeout: Duration) -> Result { + async fn is_node_connected_to_network(&self, timeout: Duration) -> Result<()> { let max_attempts = std::cmp::max(1, timeout.as_secs() / self.retry_delay.as_secs()); trace!( "RPC conneciton max attempts set to: {max_attempts} with retry_delay of {:?}", @@ -247,7 +245,7 @@ impl RpcActions for RpcClient { .await { if response.get_ref().connected_peers.len() > CLOSE_GROUP_SIZE { - return Ok(Duration::from_secs(attempts * self.retry_delay.as_secs())); + return Ok(()); } else { error!( "Node does not have enough peers connected yet. Retrying {attempts}/{max_attempts}", From 807e2ca2456efa06ea403f0c85c245577441d9dc Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 7 Aug 2024 18:07:21 +0530 Subject: [PATCH 063/115] fix(network): add peer to blocklist only after informing it --- sn_networking/src/cmd.rs | 51 ++++++++++++++++++++++++++++++++++--- sn_networking/src/driver.rs | 2 ++ sn_node/src/node.rs | 19 ++------------ 3 files changed, 51 insertions(+), 21 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index ce926d20bc..47742d689c 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -109,6 +109,10 @@ pub enum LocalSwarmCmd { key: RecordKey, record_type: RecordType, }, + /// Add a peer to the blocklist + AddPeerToBlockList { + peer_id: PeerId, + }, /// Notify whether peer is in trouble RecordNodeIssue { peer_id: PeerId, @@ -247,7 +251,9 @@ impl Debug for LocalSwarmCmd { PrettyPrintRecordKey::from(key) ) } - + LocalSwarmCmd::AddPeerToBlockList { peer_id } => { + write!(f, "LocalSwarmCmd::AddPeerToBlockList {peer_id:?}") + } LocalSwarmCmd::RecordNodeIssue { peer_id, issue } => { write!( f, @@ -763,7 +769,10 @@ impl SwarmDriver { .send(current_state) .map_err(|_| NetworkError::InternalMsgChannelDropped)?; } - + LocalSwarmCmd::AddPeerToBlockList { peer_id } => { + cmd_string = "AddPeerToBlockList"; + self.swarm.behaviour_mut().blocklist.block_peer(peer_id); + } LocalSwarmCmd::RecordNodeIssue { peer_id, issue } => { cmd_string = "RecordNodeIssues"; self.record_node_issue(peer_id, issue); @@ -862,18 +871,52 @@ impl SwarmDriver { } if *is_bad { - warn!("Cleaning out bad_peer {peer_id:?} and adding it to the blocklist"); + warn!("Cleaning out bad_peer {peer_id:?}. Will be added to the blocklist after informing that peer."); if let Some(dead_peer) = self.swarm.behaviour_mut().kademlia.remove_peer(&peer_id) { self.update_on_peer_removal(*dead_peer.node.key.preimage()); } - self.swarm.behaviour_mut().blocklist.block_peer(peer_id); if is_new_bad { + // bubble up the event to be handled if needed. self.send_event(NetworkEvent::PeerConsideredAsBad { detected_by: self.self_peer_id, bad_peer: peer_id, + bad_behaviour: bad_behaviour.clone(), + }); + + // inform the bad node about it and add to the blocklist after that. + + // response handling + let (tx, rx) = oneshot::channel(); + let local_swarm_cmd_sender = self.local_cmd_sender.clone(); + tokio::spawn(async move { + match rx.await { + Ok(result) => { + debug!("Got response for Cmd::PeerConsideredAsBad from {peer_id:?} {result:?}"); + if let Err(err) = local_swarm_cmd_sender + .send(LocalSwarmCmd::AddPeerToBlockList { peer_id }) + .await + { + error!("SwarmDriver failed to send LocalSwarmCmd: {err}"); + } + } + Err(err) => { + error!("Failed to get response from one shot channel for Cmd::PeerConsideredAsBad : {err:?}"); + } + } + }); + + // request + let request = Request::Cmd(Cmd::PeerConsideredAsBad { + detected_by: NetworkAddress::from_peer(self.self_peer_id), + bad_peer: NetworkAddress::from_peer(peer_id), bad_behaviour, }); + self.queue_network_swarm_cmd(NetworkSwarmCmd::SendRequest { + req: request, + peer: peer_id, + sender: Some(tx), + }); } } } diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 9ad9deefbd..4d97a5b252 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -612,6 +612,7 @@ impl NetworkBuilder { // and not block the processing thread unintentionally network_cmd_sender: network_swarm_cmd_sender.clone(), network_cmd_receiver: network_swarm_cmd_receiver, + local_cmd_sender: local_swarm_cmd_sender.clone(), local_cmd_receiver: local_swarm_cmd_receiver, event_sender: network_event_sender, pending_get_closest_peers: Default::default(), @@ -661,6 +662,7 @@ pub struct SwarmDriver { pub(crate) network_metrics: Option, network_cmd_sender: mpsc::Sender, + pub(crate) local_cmd_sender: mpsc::Sender, local_cmd_receiver: mpsc::Receiver, network_cmd_receiver: mpsc::Receiver, event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index bff7b5164c..90e6bab514 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -28,7 +28,7 @@ use sn_networking::{ }; use sn_protocol::{ error::Error as ProtocolError, - messages::{ChunkProof, Cmd, CmdResponse, Query, QueryResponse, Request, Response}, + messages::{ChunkProof, CmdResponse, Query, QueryResponse, Request, Response}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; use sn_transfers::{HotWallet, MainPubkey, MainSecretKey, NanoTokens, PAYMENT_FORWARD_PK}; @@ -463,24 +463,9 @@ impl Node { NetworkEvent::PeerWithUnsupportedProtocol { .. } => { event_header = "PeerWithUnsupportedProtocol"; } - NetworkEvent::PeerConsideredAsBad { - detected_by, - bad_peer, - bad_behaviour, - } => { + NetworkEvent::PeerConsideredAsBad { bad_peer, .. } => { event_header = "PeerConsideredAsBad"; self.record_metrics(Marker::PeerConsideredAsBad(&bad_peer)); - - let request = Request::Cmd(Cmd::PeerConsideredAsBad { - detected_by: NetworkAddress::from_peer(detected_by), - bad_peer: NetworkAddress::from_peer(bad_peer), - bad_behaviour, - }); - - let network = self.network().clone(); - let _handle = spawn(async move { - network.send_req_ignore_reply(request, bad_peer); - }); } NetworkEvent::FlaggedAsBadNode { flagged_by } => { event_header = "FlaggedAsBadNode"; From cf8e671fa0f74ecd1a7a58528e85a6f1928c5316 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 7 Aug 2024 21:29:16 +0800 Subject: [PATCH 064/115] fix(node): avoid deserialise error cause a confirmed_spend to be non-exist --- sn_node/src/put_validation.rs | 18 +----------------- sn_transfers/src/wallet/hot_wallet.rs | 11 ++++++++--- sn_transfers/src/wallet/wallet_file.rs | 12 ++++++++++++ 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 5948d69c6b..ef2adccf3a 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -551,23 +551,7 @@ impl Node { } let spend_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - match wallet.get_confirmed_spend(spend_addr) { - Ok(None) => true, - Ok(Some(_spend)) => { - warn!( - "Burnt spend {} detected for record payment {pretty_key}", - cash_note.unique_pubkey() - ); - false - } - Err(err) => { - error!( - "When checking confirmed_spend {}, enountered error {err:?}", - cash_note.unique_pubkey() - ); - true - } - } + !wallet.has_confirmed_spend(spend_addr) }); if cash_notes.is_empty() { info!("All incoming cash notes were already received, no need to further process"); diff --git a/sn_transfers/src/wallet/hot_wallet.rs b/sn_transfers/src/wallet/hot_wallet.rs index 9438dee146..af1d5b1e6a 100644 --- a/sn_transfers/src/wallet/hot_wallet.rs +++ b/sn_transfers/src/wallet/hot_wallet.rs @@ -11,9 +11,9 @@ use super::{ data_payments::{PaymentDetails, PaymentQuote}, keys::{get_main_key_from_disk, store_new_keypair}, wallet_file::{ - get_confirmed_spend, get_unconfirmed_spend_requests, load_created_cash_note, - remove_cash_notes, remove_unconfirmed_spend_requests, store_created_cash_notes, - store_unconfirmed_spend_requests, + get_confirmed_spend, get_unconfirmed_spend_requests, has_confirmed_spend, + load_created_cash_note, remove_cash_notes, remove_unconfirmed_spend_requests, + store_created_cash_notes, store_unconfirmed_spend_requests, }, watch_only::WatchOnlyWallet, Error, Result, @@ -181,6 +181,11 @@ impl HotWallet { get_confirmed_spend(self.watchonly_wallet.wallet_dir(), spend_addr) } + /// Check whether have the specific confirmed spend. + pub fn has_confirmed_spend(&mut self, spend_addr: SpendAddress) -> bool { + has_confirmed_spend(self.watchonly_wallet.wallet_dir(), spend_addr) + } + /// Remove unconfirmed_spend_requests from disk. fn remove_unconfirmed_spend_requests(&mut self) -> Result<()> { remove_unconfirmed_spend_requests( diff --git a/sn_transfers/src/wallet/wallet_file.rs b/sn_transfers/src/wallet/wallet_file.rs index e0d71a02ff..d09109821c 100644 --- a/sn_transfers/src/wallet/wallet_file.rs +++ b/sn_transfers/src/wallet/wallet_file.rs @@ -98,6 +98,18 @@ pub(super) fn get_confirmed_spend( Ok(Some(confirmed_spend)) } +/// Returns whether a spend is put as `confirmed`. +/// +/// Note: due to the disk operations' async behaviour. +/// reading a `exist` spend file, could end with a deserialization error. +pub(super) fn has_confirmed_spend(wallet_dir: &Path, spend_addr: SpendAddress) -> bool { + let spends_dir = wallet_dir.join(CONFIRMED_SPENDS_DIR_NAME); + let spend_hex_name = spend_addr.to_hex(); + let spend_file_path = spends_dir.join(spend_hex_name); + debug!("Try to getting a confirmed_spend instance from: {spend_file_path:?}"); + spend_file_path.exists() +} + /// Returns `Some(Vec)` or None if file doesn't exist. pub(super) fn get_unconfirmed_spend_requests( wallet_dir: &Path, From 7eb71d3a27744ef57847b059b96a549643b0a113 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 7 Aug 2024 20:25:46 +0530 Subject: [PATCH 065/115] fix(metrics): update current wallet balance after forwarding rewards --- sn_node/src/node.rs | 57 ++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index bff7b5164c..395e37e65a 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -385,13 +385,19 @@ impl Node { #[cfg(feature = "open-metrics")] let total_forwarded_rewards = self.node_metrics().map(|metrics|metrics.total_forwarded_rewards.clone()); + #[cfg(feature = "open-metrics")] + let current_reward_wallet_balance = self.node_metrics().map(|metrics|metrics.current_reward_wallet_balance.clone()); let _handle = spawn(async move { #[cfg(feature = "open-metrics")] - let _ = Self::try_forward_balance(network, forwarding_reason, total_forwarded_rewards); + if let Err(err) = Self::try_forward_balance(network, forwarding_reason, total_forwarded_rewards,current_reward_wallet_balance) { + error!("Error while trying to forward balance: {err:?}"); + } #[cfg(not(feature = "open-metrics"))] - let _ = Self::try_forward_balance(network, forwarding_reason); + if let Err(err) = Self::try_forward_balance(network, forwarding_reason) { + error!("Error while trying to forward balance: {err:?}"); + } info!("Periodic balance forward took {:?}", start.elapsed()); }); } @@ -834,39 +840,13 @@ impl Node { } } - #[cfg(not(feature = "open-metrics"))] - fn try_forward_balance(network: Network, forward_reason: String) -> Result<()> { - if let Err(err) = Self::try_forward_balance_inner(network, forward_reason) { - error!("Error while trying to forward balance: {err:?}"); - return Err(err); - } - Ok(()) - } - - #[cfg(feature = "open-metrics")] + /// Forward received rewards to another address fn try_forward_balance( network: Network, forward_reason: String, - forwarded_balance_metric: Option, + #[cfg(feature = "open-metrics")] forwarded_balance_metric: Option, + #[cfg(feature = "open-metrics")] current_reward_wallet_balance: Option, ) -> Result<()> { - match Self::try_forward_balance_inner(network, forward_reason) { - Ok(cumulative_forwarded_amount) => { - if let Some(forwarded_balance_metric) = forwarded_balance_metric { - let _ = forwarded_balance_metric.set(cumulative_forwarded_amount as i64); - } - } - Err(err) => { - error!("Error while trying to forward balance: {err:?}"); - return Err(err); - } - }; - - Ok(()) - } - - /// Forward received rewards to another address - /// Returns the cumulative amount forwarded - fn try_forward_balance_inner(network: Network, forward_reason: String) -> Result { let mut spend_requests = vec![]; { // load wallet @@ -945,7 +925,20 @@ impl Node { debug!("Updating forwarded balance to {updated_balance}"); write_forwarded_balance_value(&balance_file_path, updated_balance)?; - Ok(updated_balance) + #[cfg(feature = "open-metrics")] + { + if let Some(forwarded_balance_metric) = forwarded_balance_metric { + let _ = forwarded_balance_metric.set(updated_balance as i64); + } + + let wallet = HotWallet::load_from(network.root_dir_path())?; + let balance = wallet.balance(); + if let Some(current_reward_wallet_balance) = current_reward_wallet_balance { + let _ = current_reward_wallet_balance.set(balance.as_nano() as i64); + } + } + + Ok(()) } } From 5b800d05c38cdd75004d8ca6293c74b275fcbb4f Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 7 Aug 2024 19:37:17 +0530 Subject: [PATCH 066/115] refactor(network): remove some unused network events --- sn_networking/src/cmd.rs | 22 ++++------ sn_networking/src/driver.rs | 11 +++++ sn_networking/src/event/mod.rs | 21 --------- sn_networking/src/event/request_response.rs | 11 ++--- sn_networking/src/log_markers.rs | 5 +++ sn_networking/src/metrics/mod.rs | 48 ++++++++++++++++++--- sn_networking/src/record_store.rs | 6 --- sn_node/src/log_markers.rs | 6 --- sn_node/src/metrics.rs | 26 ----------- sn_node/src/node.rs | 8 ---- 10 files changed, 73 insertions(+), 91 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 47742d689c..0201760d45 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -10,6 +10,7 @@ use crate::{ driver::{PendingGetClosestType, SwarmDriver}, error::{NetworkError, Result}, event::TerminateNodeReason, + log_markers::Marker, multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, CLOSE_GROUP_SIZE, REPLICATION_PEERS_COUNT, }; @@ -547,18 +548,19 @@ impl SwarmDriver { } LocalSwarmCmd::GetLocalStoreCost { key, sender } => { cmd_string = "GetLocalStoreCost"; - let cost = self + let (cost, quoting_metrics) = self .swarm .behaviour_mut() .kademlia .store_mut() .store_cost(&key); - #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - let _ = metrics.store_cost.set(cost.0.as_nano() as i64); - } - let _res = sender.send(cost); + self.record_metrics(Marker::StoreCost { + cost: cost.as_nano(), + quoting_metrics: "ing_metrics, + }); + + let _res = sender.send((cost, quoting_metrics)); } LocalSwarmCmd::PaymentReceived => { cmd_string = "PaymentReceived"; @@ -877,13 +879,7 @@ impl SwarmDriver { } if is_new_bad { - // bubble up the event to be handled if needed. - self.send_event(NetworkEvent::PeerConsideredAsBad { - detected_by: self.self_peer_id, - bad_peer: peer_id, - bad_behaviour: bad_behaviour.clone(), - }); - + self.record_metrics(Marker::PeerConsideredAsBad { bad_peer: &peer_id }); // inform the bad node about it and add to the blocklist after that. // response handling diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 4d97a5b252..a87397b042 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -16,6 +16,7 @@ use crate::{ cmd::{LocalSwarmCmd, NetworkSwarmCmd}, error::{NetworkError, Result}, event::{NetworkEvent, NodeEvent}, + log_markers::Marker, multiaddr_pop_p2p, network_discovery::NetworkDiscovery, record_store::{ClientRecordStore, NodeRecordStore, NodeRecordStoreConfig}, @@ -917,6 +918,16 @@ impl SwarmDriver { } } + /// Calls Marker::log() to insert the marker into the log files. + /// Also calls NodeMetrics::record() to record the metric if the `open-metrics` feature flag is enabled. + pub(crate) fn record_metrics(&self, marker: Marker) { + marker.log(); + #[cfg(feature = "open-metrics")] + if let Some(network_metrics) = self.network_metrics.as_ref() { + network_metrics.record_from_marker(marker) + } + } + /// Listen on the provided address. Also records it within RelayManager pub(crate) fn listen_on(&mut self, addr: Multiaddr) -> Result<()> { let id = self.swarm.listen_on(addr.clone())?; diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 2ac4d75a98..5e82742d6a 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -128,14 +128,6 @@ pub enum NetworkEvent { our_protocol: String, their_protocol: String, }, - /// A peer from our RT is considered as bad due to the included behaviour - PeerConsideredAsBad { - detected_by: PeerId, - bad_peer: PeerId, - bad_behaviour: String, - }, - /// We have been flagged as a bad node by a peer. - FlaggedAsBadNode { flagged_by: PeerId }, /// The records bearing these keys are to be fetched from the holder or the network KeysToFetchForReplication(Vec<(PeerId, RecordKey)>), /// Started listening on a new address @@ -187,19 +179,6 @@ impl Debug for NetworkEvent { } => { write!(f, "NetworkEvent::PeerWithUnsupportedProtocol({our_protocol:?}, {their_protocol:?})") } - NetworkEvent::PeerConsideredAsBad { - bad_peer, - bad_behaviour, - .. - } => { - write!( - f, - "NetworkEvent::PeerConsideredAsBad({bad_peer:?}, {bad_behaviour:?})" - ) - } - NetworkEvent::FlaggedAsBadNode { flagged_by } => { - write!(f, "NetworkEvent::FlaggedAsBadNode({flagged_by:?})") - } NetworkEvent::KeysToFetchForReplication(list) => { let keys_len = list.len(); write!(f, "NetworkEvent::KeysForReplication({keys_len:?})") diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 81beae7764..c197050db3 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, sort_peers_by_address, MsgResponder, NetworkError, NetworkEvent, - SwarmDriver, CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address, MsgResponder, NetworkError, + NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, }; use itertools::Itertools; use libp2p::request_response::{self, Message}; @@ -97,9 +97,10 @@ impl SwarmDriver { if bad_peer == self.self_peer_id { warn!("Peer {detected_by:?} consider us as BAD, due to {bad_behaviour:?}."); - self.send_event(NetworkEvent::FlaggedAsBadNode { - flagged_by: detected_by, - }) + self.record_metrics(Marker::FlaggedAsBadNode { + flagged_by: &detected_by, + }); + // TODO: shall we terminate self after received such notifications // from the majority close_group nodes around us? } else { diff --git a/sn_networking/src/log_markers.rs b/sn_networking/src/log_markers.rs index 2e6582e424..7ce21a3c1e 100644 --- a/sn_networking/src/log_markers.rs +++ b/sn_networking/src/log_markers.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use libp2p::PeerId; use sn_transfers::QuotingMetrics; // this gets us to_string easily enough use strum::Display; @@ -26,6 +27,10 @@ pub enum Marker<'a> { cost: u64, quoting_metrics: &'a QuotingMetrics, }, + /// The peer has been considered as bad + PeerConsideredAsBad { bad_peer: &'a PeerId }, + /// We have been flagged as a bad node by a peer. + FlaggedAsBadNode { flagged_by: &'a PeerId }, } impl<'a> Marker<'a> { diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index ba8cdebad0..d69eb1df42 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -6,11 +6,14 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::target_arch::sleep; +use crate::{log_markers::Marker, target_arch::sleep}; use libp2p::metrics::{Metrics as Libp2pMetrics, Recorder}; #[cfg(feature = "upnp")] -use prometheus_client::metrics::{counter::Counter, family::Family}; -use prometheus_client::{metrics::gauge::Gauge, registry::Registry}; +use prometheus_client::metrics::family::Family; +use prometheus_client::{ + metrics::{counter::Counter, gauge::Gauge}, + registry::Registry, +}; use sysinfo::{Pid, ProcessRefreshKind, System}; use tokio::time::Duration; @@ -33,9 +36,11 @@ pub(crate) struct NetworkMetrics { pub(crate) open_connections: Gauge, pub(crate) peers_in_routing_table: Gauge, pub(crate) records_stored: Gauge, - pub(crate) store_cost: Gauge, + store_cost: Gauge, + bad_peers_count: Counter, + shunned_count: Counter, #[cfg(feature = "upnp")] - pub(crate) upnp_events: Family, + upnp_events: Family, // system info process_memory_used_mb: Gauge, @@ -86,6 +91,20 @@ impl NetworkMetrics { store_cost.clone(), ); + let shunned_count = Counter::default(); + sub_registry.register( + "shunned_count", + "Number of peers that have shunned our node", + shunned_count.clone(), + ); + + let bad_peers_count = Counter::default(); + sub_registry.register( + "bad_peers_count", + "Number of bad peers that have been detected by us and been added to the blocklist", + bad_peers_count.clone(), + ); + #[cfg(feature = "upnp")] let upnp_events = Family::default(); #[cfg(feature = "upnp")] @@ -117,6 +136,8 @@ impl NetworkMetrics { open_connections, peers_in_routing_table, store_cost, + bad_peers_count, + shunned_count, #[cfg(feature = "upnp")] upnp_events, process_memory_used_mb, @@ -155,10 +176,25 @@ impl NetworkMetrics { } }); } + + // Records the metric + pub(crate) fn record_from_marker(&self, log_marker: Marker) { + match log_marker { + Marker::PeerConsideredAsBad { .. } => { + let _ = self.bad_peers_count.inc(); + } + Marker::FlaggedAsBadNode { .. } => { + let _ = self.shunned_count.inc(); + } + Marker::StoreCost { cost, .. } => { + let _ = self.store_cost.set(cost as i64); + } + _ => {} + } + } } /// Impl the Recorder traits again for our struct. - impl Recorder for NetworkMetrics { fn record(&self, event: &libp2p::kad::Event) { self.libp2p_metrics.record(event) diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index a32c8c4b2a..412d8da649 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -639,12 +639,6 @@ impl NodeRecordStore { // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): info!("Cost is now {cost:?} for quoting_metrics {quoting_metrics:?}"); - Marker::StoreCost { - cost, - quoting_metrics: "ing_metrics, - } - .log(); - (NanoTokens::from(cost), quoting_metrics) } diff --git a/sn_node/src/log_markers.rs b/sn_node/src/log_markers.rs index 76afe3040d..ab1aacf325 100644 --- a/sn_node/src/log_markers.rs +++ b/sn_node/src/log_markers.rs @@ -51,12 +51,6 @@ pub enum Marker<'a> { /// Valid spend stored ValidSpendPutFromClient(&'a PrettyPrintRecordKey<'a>), - /// The peer has been considered as bad - PeerConsideredAsBad(&'a PeerId), - - /// We have been flagged as a bad node by a peer. - FlaggedAsBadNode(&'a PeerId), - /// Record rejected RecordRejected(&'a PrettyPrintRecordKey<'a>, &'a Error), diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index 0cf9b720ca..d7c3cbaa17 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -32,8 +32,6 @@ pub(crate) struct NodeMetrics { // routing table peer_added_to_routing_table: Counter, peer_removed_from_routing_table: Counter, - bad_peers_count: Counter, - shunned_count: Counter, // wallet pub(crate) current_reward_wallet_balance: Gauge, @@ -102,20 +100,6 @@ impl NodeMetrics { peer_removed_from_routing_table.clone(), ); - let shunned_count = Counter::default(); - sub_registry.register( - "shunned_count", - "Number of peers that have shunned our node", - shunned_count.clone(), - ); - - let bad_peers_count = Counter::default(); - sub_registry.register( - "bad_peers_count", - "Number of bad peers that have been detected by us and been added to the blocklist", - bad_peers_count.clone(), - ); - let current_reward_wallet_balance = Gauge::default(); sub_registry.register( "current_reward_wallet_balance", @@ -144,8 +128,6 @@ impl NodeMetrics { replication_keys_to_fetch, peer_added_to_routing_table, peer_removed_from_routing_table, - bad_peers_count, - shunned_count, current_reward_wallet_balance, total_forwarded_rewards, started_instant: Instant::now(), @@ -203,14 +185,6 @@ impl NodeMetrics { let _ = self.peer_removed_from_routing_table.inc(); } - Marker::PeerConsideredAsBad(_) => { - let _ = self.bad_peers_count.inc(); - } - - Marker::FlaggedAsBadNode(_) => { - let _ = self.shunned_count.inc(); - } - _ => {} } } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 90e6bab514..54a52c0dd9 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -463,14 +463,6 @@ impl Node { NetworkEvent::PeerWithUnsupportedProtocol { .. } => { event_header = "PeerWithUnsupportedProtocol"; } - NetworkEvent::PeerConsideredAsBad { bad_peer, .. } => { - event_header = "PeerConsideredAsBad"; - self.record_metrics(Marker::PeerConsideredAsBad(&bad_peer)); - } - NetworkEvent::FlaggedAsBadNode { flagged_by } => { - event_header = "FlaggedAsBadNode"; - self.record_metrics(Marker::FlaggedAsBadNode(&flagged_by)); - } NetworkEvent::NewListenAddr(_) => { event_header = "NewListenAddr"; if !cfg!(feature = "local-discovery") { From d527e0bdd0e4eeb80a1184f5f5572616119c7c46 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 7 Aug 2024 22:04:33 +0530 Subject: [PATCH 067/115] feat(network): expose bandwidth per transport protocol --- sn_networking/src/driver.rs | 63 +++++++++++++++------------- sn_networking/src/transport/other.rs | 9 +++- 2 files changed, 42 insertions(+), 30 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 9ad9deefbd..5d0ea036cd 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -429,9 +429,42 @@ impl NetworkBuilder { PrettyPrintKBucketKey(NetworkAddress::from_peer(peer_id).as_kbucket_key()) ); + #[cfg(feature = "open-metrics")] + let mut metrics_registry = self.metrics_registry.unwrap_or_default(); + + // ==== Transport ==== + #[cfg(feature = "open-metrics")] + let main_transport = transport::build_transport(&self.keypair, &mut metrics_registry); + #[cfg(not(feature = "open-metrics"))] + let main_transport = transport::build_transport(&self.keypair); + let transport = if !self.local { + debug!("Preventing non-global dials"); + // Wrap upper in a transport that prevents dialing local addresses. + libp2p::core::transport::global_only::Transport::new(main_transport).boxed() + } else { + main_transport + }; + + let (relay_transport, relay_behaviour) = + libp2p::relay::client::new(self.keypair.public().to_peer_id()); + let relay_transport = relay_transport + .upgrade(libp2p::core::upgrade::Version::V1Lazy) + .authenticate( + libp2p::noise::Config::new(&self.keypair) + .expect("Signing libp2p-noise static DH keypair failed."), + ) + .multiplex(libp2p::yamux::Config::default()) + .or_transport(transport); + + let transport = relay_transport + .map(|either_output, _| match either_output { + Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), + Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), + }) + .boxed(); + #[cfg(feature = "open-metrics")] let network_metrics = if let Some(port) = self.metrics_server_port { - let mut metrics_registry = self.metrics_registry.unwrap_or_default(); let metrics = NetworkMetrics::new(&mut metrics_registry); run_metrics_server(metrics_registry, port); Some(metrics) @@ -518,16 +551,6 @@ impl NetworkBuilder { libp2p::identify::Behaviour::new(cfg) }; - let main_transport = transport::build_transport(&self.keypair); - - let transport = if !self.local { - debug!("Preventing non-global dials"); - // Wrap upper in a transport that prevents dialing local addresses. - libp2p::core::transport::global_only::Transport::new(main_transport).boxed() - } else { - main_transport - }; - #[cfg(feature = "upnp")] let upnp = if !self.local && !is_client && upnp { debug!("Enabling UPnP port opening behavior"); @@ -537,24 +560,6 @@ impl NetworkBuilder { } .into(); // Into `Toggle` - let (relay_transport, relay_behaviour) = - libp2p::relay::client::new(self.keypair.public().to_peer_id()); - let relay_transport = relay_transport - .upgrade(libp2p::core::upgrade::Version::V1Lazy) - .authenticate( - libp2p::noise::Config::new(&self.keypair) - .expect("Signing libp2p-noise static DH keypair failed."), - ) - .multiplex(libp2p::yamux::Config::default()) - .or_transport(transport); - - let transport = relay_transport - .map(|either_output, _| match either_output { - Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .boxed(); - let relay_server = { let relay_server_cfg = relay::Config { max_reservations: 128, // Amount of peers we are relaying for diff --git a/sn_networking/src/transport/other.rs b/sn_networking/src/transport/other.rs index 215fad3e9c..78683ca15d 100644 --- a/sn_networking/src/transport/other.rs +++ b/sn_networking/src/transport/other.rs @@ -1,5 +1,7 @@ #[cfg(feature = "websockets")] use futures::future::Either; +#[cfg(feature = "open-metrics")] +use libp2p::metrics::Registry; #[cfg(feature = "websockets")] use libp2p::{core::upgrade, noise, yamux}; use libp2p::{ @@ -8,8 +10,13 @@ use libp2p::{ PeerId, Transport as _, }; -pub(crate) fn build_transport(keypair: &Keypair) -> transport::Boxed<(PeerId, StreamMuxerBox)> { +pub(crate) fn build_transport( + keypair: &Keypair, + #[cfg(feature = "open-metrics")] registry: &mut Registry, +) -> transport::Boxed<(PeerId, StreamMuxerBox)> { let trans = generate_quic_transport(keypair); + #[cfg(feature = "open-metrics")] + let trans = libp2p::metrics::BandwidthTransport::new(trans, registry); #[cfg(feature = "websockets")] // Using a closure here due to the complex return type From c4af66c4466d6f0a6eea9fc3a9c4f471c107b2c8 Mon Sep 17 00:00:00 2001 From: qima Date: Fri, 9 Aug 2024 16:15:43 +0800 Subject: [PATCH 068/115] test: verification involving child key --- sn_transfers/src/cashnotes/unique_keys.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/sn_transfers/src/cashnotes/unique_keys.rs b/sn_transfers/src/cashnotes/unique_keys.rs index b8e1f90d57..1bbfc38056 100644 --- a/sn_transfers/src/cashnotes/unique_keys.rs +++ b/sn_transfers/src/cashnotes/unique_keys.rs @@ -370,4 +370,23 @@ mod tests { Ok(()) } + + #[test] + fn verification_using_child_key() -> eyre::Result<()> { + let msg = "just a test string".as_bytes(); + let main_sk = MainSecretKey::random(); + let derived_sk = main_sk.random_derived_key(&mut rand::thread_rng()); + + // Signature signed by parent key can not be verified by the child key. + let signature = main_sk.sign(msg); + assert!(main_sk.main_pubkey().verify(&signature, msg)); + assert!(!derived_sk.unique_pubkey().verify(&signature, msg)); + + // Signature signed by child key can not be verified by the parent key. + let signature = derived_sk.sign(msg); + assert!(derived_sk.unique_pubkey().verify(&signature, msg)); + assert!(!main_sk.main_pubkey().verify(&signature, msg)); + + Ok(()) + } } From 52feda4cd0ba31bdc2ee8c66b5d1ff7d462c3cd2 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 13 Aug 2024 18:20:10 +0530 Subject: [PATCH 069/115] feat(metrics): export metadata via the metrics port --- sn_client/src/api.rs | 8 --- sn_networking/src/driver.rs | 49 ++++++++++++++---- sn_networking/src/metrics/mod.rs | 13 ++--- sn_networking/src/metrics/upnp.rs | 2 +- sn_networking/src/metrics_service.rs | 77 +++++++++++++++++++++++----- sn_node/src/metrics.rs | 5 +- sn_node/src/node.rs | 38 +++++++++----- 7 files changed, 141 insertions(+), 51 deletions(-) diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index a58112f079..e38e22c4c3 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -17,8 +17,6 @@ use libp2p::{ kad::{Quorum, Record}, Multiaddr, PeerId, }; -#[cfg(feature = "open-metrics")] -use prometheus_client::registry::Registry; use rand::{thread_rng, Rng}; use sn_networking::{ get_signed_spend_from_record, multiaddr_is_global, @@ -106,14 +104,8 @@ impl Client { let root_dir = std::env::temp_dir(); trace!("Starting Kad swarm in client mode..{root_dir:?}."); - #[cfg(not(feature = "open-metrics"))] let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local, root_dir); - #[cfg(feature = "open-metrics")] - let mut network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local, root_dir); - #[cfg(feature = "open-metrics")] - network_builder.metrics_registry(Some(Registry::default())); - let (network, mut network_event_receiver, swarm_driver) = network_builder.build_client()?; info!("Client constructed network and swarm_driver"); diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 3c544af5f6..b18b532074 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. #[cfg(feature = "open-metrics")] -use crate::metrics::NetworkMetrics; +use crate::metrics::NetworkMetricsRecorder; #[cfg(feature = "open-metrics")] use crate::metrics_service::run_metrics_server; use crate::{ @@ -49,7 +49,7 @@ use libp2p::{ Multiaddr, PeerId, }; #[cfg(feature = "open-metrics")] -use prometheus_client::registry::Registry; +use prometheus_client::{metrics::info::Info, registry::Registry}; use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, storage::RetryStrategy, @@ -216,9 +216,10 @@ pub struct NetworkBuilder { concurrency_limit: Option, initial_peers: Vec, #[cfg(feature = "open-metrics")] + metrics_metadata_registry: Option, + #[cfg(feature = "open-metrics")] metrics_registry: Option, #[cfg(feature = "open-metrics")] - /// Set to Some to enable the metrics server metrics_server_port: Option, #[cfg(feature = "upnp")] upnp: bool, @@ -236,6 +237,8 @@ impl NetworkBuilder { concurrency_limit: None, initial_peers: Default::default(), #[cfg(feature = "open-metrics")] + metrics_metadata_registry: None, + #[cfg(feature = "open-metrics")] metrics_registry: None, #[cfg(feature = "open-metrics")] metrics_server_port: None, @@ -264,12 +267,22 @@ impl NetworkBuilder { self.initial_peers = initial_peers; } + /// Set the Registry that will be served at the `/metadata` endpoint. This Registry should contain only the static + /// info about the peer. Configure the `metrics_server_port` to enable the metrics server. + #[cfg(feature = "open-metrics")] + pub fn metrics_metadata_registry(&mut self, metrics_metadata_registry: Registry) { + self.metrics_metadata_registry = Some(metrics_metadata_registry); + } + + /// Set the Registry that will be served at the `/metrics` endpoint. + /// Configure the `metrics_server_port` to enable the metrics server. #[cfg(feature = "open-metrics")] - pub fn metrics_registry(&mut self, metrics_registry: Option) { - self.metrics_registry = metrics_registry; + pub fn metrics_registry(&mut self, metrics_registry: Registry) { + self.metrics_registry = Some(metrics_registry); } #[cfg(feature = "open-metrics")] + /// The metrics server is enabled only if the port is provided. pub fn metrics_server_port(&mut self, port: Option) { self.metrics_server_port = port; } @@ -466,9 +479,27 @@ impl NetworkBuilder { #[cfg(feature = "open-metrics")] let network_metrics = if let Some(port) = self.metrics_server_port { - let metrics = NetworkMetrics::new(&mut metrics_registry); - run_metrics_server(metrics_registry, port); - Some(metrics) + let network_metrics = NetworkMetricsRecorder::new(&mut metrics_registry); + let mut metadata_registry = self.metrics_metadata_registry.unwrap_or_default(); + let network_metadata_sub_registry = + metadata_registry.sub_registry_with_prefix("sn_networking"); + + network_metadata_sub_registry.register( + "peer_id", + "Identifier of a peer of the network", + Info::new(vec![("peer_id".to_string(), peer_id.to_string())]), + ); + network_metadata_sub_registry.register( + "identify_protocol_str", + "The protocol version string that is used to connect to the correct network", + Info::new(vec![( + "identify_protocol_str".to_string(), + IDENTIFY_PROTOCOL_STR.to_string(), + )]), + ); + + run_metrics_server(metrics_registry, metadata_registry, port); + Some(network_metrics) } else { None }; @@ -665,7 +696,7 @@ pub struct SwarmDriver { /// The peers that are closer to our PeerId. Includes self. pub(crate) replication_fetcher: ReplicationFetcher, #[cfg(feature = "open-metrics")] - pub(crate) network_metrics: Option, + pub(crate) network_metrics: Option, network_cmd_sender: mpsc::Sender, pub(crate) local_cmd_sender: mpsc::Sender, diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index d69eb1df42..a76afeeb8d 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -24,7 +24,8 @@ mod upnp; const UPDATE_INTERVAL: Duration = Duration::from_secs(15); const TO_MB: u64 = 1_000_000; -pub(crate) struct NetworkMetrics { +/// The shared recorders that are used to record metrics. +pub(crate) struct NetworkMetricsRecorder { // Records libp2p related metrics // Must directly call self.libp2p_metrics.record(libp2p_event) with Recorder trait in scope. But since we have // re-implemented the trait for the wrapper struct, we can instead call self.record(libp2p_event) @@ -47,7 +48,7 @@ pub(crate) struct NetworkMetrics { process_cpu_usage_percentage: Gauge, } -impl NetworkMetrics { +impl NetworkMetricsRecorder { pub fn new(registry: &mut Registry) -> Self { let libp2p_metrics = Libp2pMetrics::new(registry); let sub_registry = registry.sub_registry_with_prefix("sn_networking"); @@ -195,25 +196,25 @@ impl NetworkMetrics { } /// Impl the Recorder traits again for our struct. -impl Recorder for NetworkMetrics { +impl Recorder for NetworkMetricsRecorder { fn record(&self, event: &libp2p::kad::Event) { self.libp2p_metrics.record(event) } } -impl Recorder for NetworkMetrics { +impl Recorder for NetworkMetricsRecorder { fn record(&self, event: &libp2p::relay::Event) { self.libp2p_metrics.record(event) } } -impl Recorder for NetworkMetrics { +impl Recorder for NetworkMetricsRecorder { fn record(&self, event: &libp2p::identify::Event) { self.libp2p_metrics.record(event) } } -impl Recorder> for NetworkMetrics { +impl Recorder> for NetworkMetricsRecorder { fn record(&self, event: &libp2p::swarm::SwarmEvent) { self.libp2p_metrics.record(event); } diff --git a/sn_networking/src/metrics/upnp.rs b/sn_networking/src/metrics/upnp.rs index e49d44c885..9dd3b923b7 100644 --- a/sn_networking/src/metrics/upnp.rs +++ b/sn_networking/src/metrics/upnp.rs @@ -24,7 +24,7 @@ impl From<&libp2p::upnp::Event> for EventType { } } -impl super::Recorder for super::NetworkMetrics { +impl super::Recorder for super::NetworkMetricsRecorder { fn record(&self, event: &libp2p::upnp::Event) { self.upnp_events .get_or_create(&UpnpEventLabels { diff --git a/sn_networking/src/metrics_service.rs b/sn_networking/src/metrics_service.rs index 0b75fa9261..4d8e0a165f 100644 --- a/sn_networking/src/metrics_service.rs +++ b/sn_networking/src/metrics_service.rs @@ -18,13 +18,19 @@ use std::{ const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; -pub(crate) fn run_metrics_server(registry: Registry, port: u16) { +pub(crate) fn run_metrics_server( + metrics_registry: Registry, + metadata_registry: Registry, + port: u16, +) { // todo: containers don't work with localhost. let addr = ([127, 0, 0, 1], port).into(); tokio::spawn(async move { - let server = Server::bind(&addr).serve(MakeMetricService::new(registry)); + let server = + Server::bind(&addr).serve(MakeMetricService::new(metrics_registry, metadata_registry)); info!("Metrics server on http://{}/metrics", server.local_addr()); + info!("Metadata server on http://{}/metadata", server.local_addr()); println!("Metrics server on http://{}/metrics", server.local_addr()); // run the server forever if let Err(e) = server.await { @@ -33,16 +39,22 @@ pub(crate) fn run_metrics_server(registry: Registry, port: u16) { }); } +type SharedRegistry = Arc>; + pub(crate) struct MetricService { - reg: Arc>, + metrics_registry: SharedRegistry, + metadata_registry: SharedRegistry, } -type SharedRegistry = Arc>; - impl MetricService { - fn get_reg(&mut self) -> SharedRegistry { - Arc::clone(&self.reg) + fn get_metrics_registry(&mut self) -> SharedRegistry { + Arc::clone(&self.metrics_registry) } + + fn get_metadata_registry(&mut self) -> SharedRegistry { + Arc::clone(&self.metadata_registry) + } + fn respond_with_metrics(&mut self) -> Result> { let mut response: Response = Response::default(); @@ -53,7 +65,30 @@ impl MetricService { .map_err(|_| NetworkError::NetworkMetricError)?, ); - let reg = self.get_reg(); + let reg = self.get_metrics_registry(); + let reg = reg.lock().map_err(|_| NetworkError::NetworkMetricError)?; + encode(&mut response.body_mut(), ®).map_err(|err| { + error!("Failed to encode the metrics Registry {err:?}"); + NetworkError::NetworkMetricError + })?; + + *response.status_mut() = StatusCode::OK; + + Ok(response) + } + + // send a json response of the metadata key, value + fn respond_with_metadata(&mut self) -> Result> { + let mut response: Response = Response::default(); + + response.headers_mut().insert( + hyper::header::CONTENT_TYPE, + METRICS_CONTENT_TYPE + .try_into() + .map_err(|_| NetworkError::NetworkMetricError)?, + ); + + let reg = self.get_metadata_registry(); let reg = reg.lock().map_err(|_| NetworkError::NetworkMetricError)?; encode(&mut response.body_mut(), ®).map_err(|err| { error!("Failed to encode the metrics Registry {err:?}"); @@ -98,6 +133,11 @@ impl Service> for MetricService { Ok(resp) => resp, Err(_) => self.respond_with_500_server_error(), } + } else if req_method == Method::GET && req_path == "/metadata" { + match self.respond_with_metadata() { + Ok(resp) => resp, + Err(_) => self.respond_with_500_server_error(), + } } else { self.respond_with_404_not_found() }; @@ -106,13 +146,18 @@ impl Service> for MetricService { } pub(crate) struct MakeMetricService { - reg: SharedRegistry, + metrics_registry: SharedRegistry, + metadata_registry: SharedRegistry, } impl MakeMetricService { - pub(crate) fn new(registry: Registry) -> MakeMetricService { + pub(crate) fn new( + metrics_registry: Registry, + metadata_registry: Registry, + ) -> MakeMetricService { MakeMetricService { - reg: Arc::new(Mutex::new(registry)), + metrics_registry: Arc::new(Mutex::new(metrics_registry)), + metadata_registry: Arc::new(Mutex::new(metadata_registry)), } } } @@ -127,8 +172,14 @@ impl Service for MakeMetricService { } fn call(&mut self, _: T) -> Self::Future { - let reg = Arc::clone(&self.reg); - let fut = async move { Ok(MetricService { reg }) }; + let metrics_registry = Arc::clone(&self.metrics_registry); + let metadata_registry = Arc::clone(&self.metadata_registry); + let fut = async move { + Ok(MetricService { + metrics_registry, + metadata_registry, + }) + }; Box::pin(fut) } } diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index d7c3cbaa17..4ba458448e 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -20,7 +20,8 @@ use prometheus_client::{ use sn_networking::Instant; #[derive(Clone)] -pub(crate) struct NodeMetrics { +/// The shared recorders that are used to record metrics. +pub(crate) struct NodeMetricsRecorder { /// put record put_record_ok: Family, put_record_err: Counter, @@ -54,7 +55,7 @@ enum RecordType { Spend, } -impl NodeMetrics { +impl NodeMetricsRecorder { pub(crate) fn new(registry: &mut Registry) -> Self { let sub_registry = registry.sub_registry_with_prefix("sn_node"); diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index c99e476d69..ff52954716 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -13,12 +13,12 @@ use super::{ Marker, NodeEvent, }; #[cfg(feature = "open-metrics")] -use crate::metrics::NodeMetrics; +use crate::metrics::NodeMetricsRecorder; use crate::RunningNode; use bytes::Bytes; use libp2p::{identity::Keypair, Multiaddr, PeerId}; #[cfg(feature = "open-metrics")] -use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::metrics::{gauge::Gauge, info::Info}; #[cfg(feature = "open-metrics")] use prometheus_client::registry::Registry; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; @@ -155,21 +155,35 @@ impl NodeBuilder { // store in case it's a fresh wallet created if none was found wallet.deposit_and_store_to_disk(&vec![])?; + let mut network_builder = NetworkBuilder::new(self.keypair, self.local, self.root_dir); + #[cfg(feature = "open-metrics")] - let (metrics_registry, node_metrics) = if self.metrics_server_port.is_some() { + let node_metrics = if self.metrics_server_port.is_some() { + // metadata registry + let mut metadata_registry = Registry::default(); + let node_metadata_sub_registry = metadata_registry.sub_registry_with_prefix("sn_node"); + node_metadata_sub_registry.register( + "safenode_version", + "The version of the safe node", + Info::new(vec![( + "safenode_version".to_string(), + env!("CARGO_PKG_VERSION").to_string(), + )]), + ); + network_builder.metrics_metadata_registry(metadata_registry); + + // metrics registry let mut metrics_registry = Registry::default(); - let node_metrics = NodeMetrics::new(&mut metrics_registry); - (Some(metrics_registry), Some(node_metrics)) + let node_metrics = NodeMetricsRecorder::new(&mut metrics_registry); + network_builder.metrics_registry(metrics_registry); + + Some(node_metrics) } else { - (None, None) + None }; - let mut network_builder = NetworkBuilder::new(self.keypair, self.local, self.root_dir); - network_builder.listen_addr(self.addr); #[cfg(feature = "open-metrics")] - network_builder.metrics_registry(metrics_registry); - #[cfg(feature = "open-metrics")] network_builder.metrics_server_port(self.metrics_server_port); network_builder.initial_peers(self.initial_peers.clone()); network_builder.is_behind_home_network(self.is_behind_home_network); @@ -220,7 +234,7 @@ struct NodeInner { initial_peers: Vec, network: Network, #[cfg(feature = "open-metrics")] - node_metrics: Option, + node_metrics: Option, /// Node owner's discord username, in readable format /// If not set, there will be no payment forward to be undertaken owner: Option, @@ -245,7 +259,7 @@ impl Node { #[cfg(feature = "open-metrics")] /// Returns a reference to the NodeMetrics if the `open-metrics` feature flag is enabled - pub(crate) fn node_metrics(&self) -> Option<&NodeMetrics> { + pub(crate) fn node_metrics(&self) -> Option<&NodeMetricsRecorder> { self.inner.node_metrics.as_ref() } From 474d46691114ddfc47e98ec7293b79a4242b346d Mon Sep 17 00:00:00 2001 From: qima Date: Thu, 15 Aug 2024 19:10:03 +0800 Subject: [PATCH 070/115] feat(node): prune unrelevant records if accumulated too many --- sn_networking/src/cmd.rs | 13 +++++++++ sn_networking/src/lib.rs | 4 +++ sn_networking/src/record_store.rs | 42 +++++++++++++++++++++++++++ sn_networking/src/record_store_api.rs | 9 ++++++ sn_node/src/node.rs | 14 +++++++++ sn_node/src/replication.rs | 5 ++++ 6 files changed, 87 insertions(+) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 0201760d45..41e91a9e37 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -133,6 +133,8 @@ pub enum LocalSwarmCmd { /// Triggers interval repliation /// NOTE: This does result in outgoing messages, but is produced locally TriggerIntervalReplication, + /// Triggers unrelevant record cleanup + TriggerUnrelevantRecordCleanup, } /// Commands to send to the Swarm @@ -281,6 +283,9 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::TriggerIntervalReplication => { write!(f, "LocalSwarmCmd::TriggerIntervalReplication") } + LocalSwarmCmd::TriggerUnrelevantRecordCleanup => { + write!(f, "LocalSwarmCmd::TriggerUnrelevantRecordCleanup") + } } } } @@ -817,6 +822,14 @@ impl SwarmDriver { self.send_event(NetworkEvent::KeysToFetchForReplication(new_keys_to_fetch)); } } + LocalSwarmCmd::TriggerUnrelevantRecordCleanup => { + cmd_string = "TriggerUnrelevantRecordCleanup"; + self.swarm + .behaviour_mut() + .kademlia + .store_mut() + .cleanup_unrelevant_records(); + } } self.log_handling(cmd_string.to_string(), start.elapsed()); diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 6492cec4b1..66bc0af974 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -801,6 +801,10 @@ impl Network { self.send_local_swarm_cmd(LocalSwarmCmd::QuoteVerification { quotes }); } + pub fn trigger_unrelevant_record_cleanup(&self) { + self.send_local_swarm_cmd(LocalSwarmCmd::TriggerUnrelevantRecordCleanup) + } + /// Helper to send NetworkSwarmCmd fn send_network_swarm_cmd(&self, cmd: NetworkSwarmCmd) { send_network_swarm_cmd(self.network_swarm_cmd_sender().clone(), cmd); diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 412d8da649..2be44a2adc 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -448,6 +448,48 @@ impl NodeRecordStore { Ok(()) } + + // When the accumulated record copies exceeds the `expotional pricing point` (max_records * 0.6) + // those `out of range` records shall be cleaned up. + // This is to avoid `over-quoting` during restart, when RT is not fully populated, + // result in mis-calculation of relevant records. + pub fn cleanup_unrelevant_records(&mut self) { + let accumulated_records = self.records.len(); + if accumulated_records < 6 * MAX_RECORDS_COUNT / 10 { + return; + } + + let responsible_range = if let Some(range) = self.responsible_distance_range { + range + } else { + return; + }; + + let mut removed_keys = Vec::new(); + self.records.retain(|key, _val| { + let kbucket_key = KBucketKey::new(key.to_vec()); + let is_in_range = + responsible_range >= self.local_key.distance(&kbucket_key).ilog2().unwrap_or(0); + if !is_in_range { + removed_keys.push(key.clone()); + } + is_in_range + }); + + // Each `remove` function call will try to re-calculate furthest + // when the key to be removed is the current furthest. + // To avoid duplicated calculation, hence reset `furthest` first here. + self.farthest_record = self.calculate_farthest(); + + for key in removed_keys.iter() { + // Deletion from disk will be undertaken as a spawned task, + // hence safe to call this function repeatedly here. + self.remove(key); + } + + info!("Cleaned up {} unrelevant records, among the original {accumulated_records} accumulated_records", + removed_keys.len()); + } } impl NodeRecordStore { diff --git a/sn_networking/src/record_store_api.rs b/sn_networking/src/record_store_api.rs index 570720f9ff..3ab93c9d42 100644 --- a/sn_networking/src/record_store_api.rs +++ b/sn_networking/src/record_store_api.rs @@ -169,4 +169,13 @@ impl UnifiedRecordStore { Self::Node(store) => store.mark_as_stored(k, record_type), }; } + + pub(crate) fn cleanup_unrelevant_records(&mut self) { + match self { + Self::Client(_store) => { + warn!("Calling cleanup_unrelevant_records at Client. This should not happen"); + } + Self::Node(store) => store.cleanup_unrelevant_records(), + } + } } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index c99e476d69..0d8b60f314 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -79,6 +79,9 @@ const FORWARDED_BALANCE_FILE_NAME: &str = "forwarded_balance"; /// Interval to update the nodes uptime metric const UPTIME_METRICS_UPDATE_INTERVAL: Duration = Duration::from_secs(10); +/// Interval to clean up unrelevant records +const UNRELEVANT_RECORDS_CLEANUP_INTERVAL: Duration = Duration::from_secs(3600); + /// Helper to build and run a Node pub struct NodeBuilder { keypair: Keypair, @@ -323,6 +326,10 @@ impl Node { tokio::time::interval(UPTIME_METRICS_UPDATE_INTERVAL); let _ = uptime_metrics_update_interval.tick().await; // first tick completes immediately + let mut unrelevant_records_cleanup_interval = + tokio::time::interval(UNRELEVANT_RECORDS_CLEANUP_INTERVAL); + let _ = unrelevant_records_cleanup_interval.tick().await; // first tick completes immediately + loop { let peers_connected = &peers_connected; @@ -410,6 +417,13 @@ impl Node { let _ = node_metrics.uptime.set(node_metrics.started_instant.elapsed().as_secs() as i64); } } + _ = unrelevant_records_cleanup_interval.tick() => { + let network = self.network().clone(); + + let _handle = spawn(async move { + Self::trigger_unrelevant_record_cleanup(network); + }); + } } } }); diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index 070a858228..65c4accf7d 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -25,6 +25,11 @@ impl Node { network.trigger_interval_replication() } + /// Cleanup unrelevant records if accumulated too many. + pub(crate) fn trigger_unrelevant_record_cleanup(network: Network) { + network.trigger_unrelevant_record_cleanup() + } + /// Get the Record from a peer or from the network without waiting. pub(crate) fn fetch_replication_keys_without_wait( &self, From dc85fbd06783d683b88c677ed8b051d1b525aaaa Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 7 Aug 2024 22:33:50 +0100 Subject: [PATCH 071/115] chore: script to list merged pull requests Give the script a file with a list of PR numbers and it will print various things that can be used in the release candidate threads on Discourse. Now that we're using merge commits, the list of PR numbers is easily obtainable from the comparison of the `main` and `stable` branches, using `git log stable..main --oneline`. --- resources/scripts/list-numbered-prs.py | 99 ++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100755 resources/scripts/list-numbered-prs.py diff --git a/resources/scripts/list-numbered-prs.py b/resources/scripts/list-numbered-prs.py new file mode 100755 index 0000000000..f25a3c241f --- /dev/null +++ b/resources/scripts/list-numbered-prs.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python + +import os +import sys +from collections import defaultdict +from github import Github + +def has_breaking_change(commits): + for commit in commits: + commit_message = commit.commit.message + if '!' in commit_message.split('\n')[0] or 'BREAKING CHANGE' in commit_message: + return True + return False + + +def main(pr_numbers): + token = os.getenv("GITHUB_PAT_SAFE_NETWORK_PR_LIST") + if not token: + raise Exception("The GITHUB_PAT_SAFE_NETWORK_PR_LIST environment variable must be set") + + g = Github(token) + repo = g.get_repo("maidsafe/safe_network") + + filtered_pulls = [] + for pr_num in pr_numbers: + print(f"Processing #{pr_num}...") + pull = repo.get_pull(pr_num) + if not pull.closed_at and not pull.merged_at: + raise Exception(f"PR {pr_num} has not been closed yet") + commits = pull.get_commits() + breaking = has_breaking_change(commits) + filtered_pulls.append({ + "number": pull.number, + "title": pull.title, + "author": pull.user.login, + "closed_at": pull.closed_at, + "breaking": breaking, + "commits": commits + }) + filtered_pulls.sort(key=lambda pr: pr["closed_at"]) + + print("Flat list:") + for pr in filtered_pulls: + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f"{closed_date} #{pr['number']} -- {pr['title']} [@{pr['author']}] {breaking_text}") + print("Flat list markdown:") + for pr in filtered_pulls: + pr_number = pr["number"] + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f"{closed_date} [#{pr_number}](https://github.com/maidsafe/safe_network/pull/{pr_number}) -- {pr['title']} [@{pr['author']}] {breaking_text}") + + print() + grouped_pulls = defaultdict(list) + for pr in filtered_pulls: + grouped_pulls[pr["author"]].append(pr) + + print("Grouped by author:") + for author, prs in grouped_pulls.items(): + print(f"@{author}") + for pr in prs: + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f" {closed_date} #{pr['number']} -- {pr['title']} {breaking_text}") + print() + + print("Grouped by author with commits:") + for author, prs in grouped_pulls.items(): + print(f"@{author}") + for pr in prs: + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f" {closed_date} #{pr['number']} -- {pr['title']} {breaking_text}") + for commit in pr["commits"]: + print(f" {commit.commit.message.split('\n')[0]}") + print() + + print("Grouped by author markdown:") + for author, prs in grouped_pulls.items(): + print(f"@{author}") + for pr in prs: + pr_number = pr["number"] + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f" {closed_date} [#{pr_number}](https://github.com/maidsafe/safe_network/pull/{pr_number}) -- {pr['title']} {breaking_text}") + print() + +def read_pr_numbers(file_path): + with open(file_path, 'r') as file: + return [int(line.strip()) for line in file] + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python script.py ") + sys.exit(1) + + file_path = sys.argv[1] + main(read_pr_numbers(file_path)) From ae287b7fa746ea4c36d25052b82f9488b70819b2 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 5 Aug 2024 09:03:13 +0200 Subject: [PATCH 072/115] feat(launchpad): help page redesign --- Cargo.lock | 20 +++ node-launchpad/Cargo.toml | 1 + node-launchpad/src/components/help.rs | 225 ++++++++++++++++++++++++ node-launchpad/src/widgets/hyperlink.rs | 15 ++ 4 files changed, 261 insertions(+) create mode 100644 node-launchpad/src/components/help.rs diff --git a/Cargo.lock b/Cargo.lock index 03451177cc..787dc0e381 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -143,6 +143,19 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" +[[package]] +name = "ansi-to-tui" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3bf628a79452df9614d933012dc500f8cb6ddad8c897ff8122ea1c0b187ff7" +dependencies = [ + "nom", + "ratatui", + "simdutf8", + "smallvec", + "thiserror", +] + [[package]] name = "anstream" version = "0.6.14" @@ -4642,6 +4655,7 @@ dependencies = [ name = "node-launchpad" version = "0.3.11" dependencies = [ + "ansi-to-tui", "atty", "better-panic", "chrono", @@ -6895,6 +6909,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "slab" version = "0.4.9" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 813f973d53..d91df41c34 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -62,6 +62,7 @@ tracing-error = "0.2.0" tracing-subscriber = { version = "0.3.17", features = ["env-filter", "serde"] } tui-input = "0.8.0" which = "6.0.1" +ansi-to-tui = "4.1.0" [build-dependencies] vergen = { version = "8.2.6", features = ["build", "git", "gitoxide", "cargo"] } diff --git a/node-launchpad/src/components/help.rs b/node-launchpad/src/components/help.rs new file mode 100644 index 0000000000..9f981b988c --- /dev/null +++ b/node-launchpad/src/components/help.rs @@ -0,0 +1,225 @@ +use color_eyre::eyre::Result; +use ratatui::{ + layout::{Constraint, Direction, Layout, Rect}, + style::{Style, Stylize}, + text::{Line, Span}, + widgets::{Block, Borders, Cell, Padding, Row, Table}, + Frame, +}; +use tokio::sync::mpsc::UnboundedSender; + +use super::Component; +use crate::{ + action::Action, + style::{EUCALYPTUS, GHOST_WHITE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE}, + widgets::hyperlink::Hyperlink, +}; +use ansi_to_tui::IntoText; + +#[derive(Clone)] +pub struct Help { + pub active: bool, + pub action_tx: Option>, +} + +impl Help { + pub async fn new() -> Result { + Ok(Self { + active: true, + action_tx: None, + }) + } +} + +impl Component for Help { + fn init(&mut self, _area: Rect) -> Result<()> { + Ok(()) + } + + fn draw(&mut self, f: &mut Frame<'_>, area: Rect) -> Result<()> { + // We define a layout, top and down box. + let layout = Layout::default() + .direction(Direction::Vertical) + .constraints(vec![Constraint::Percentage(55), Constraint::Percentage(45)]) + .split(area); + + // ---- Get Help & Support ---- + // Links + + let quickstart_guide_link = Hyperlink::new( + "docs.autonomi.com/getstarted", + "https://docs.autonomi.com/getstarted", + ); + let beta_rewards_link = Hyperlink::new("autonomi.com/beta", "https://autonomi.com/beta"); + let get_direct_support_link = + Hyperlink::new("autonomi.com/support", "https://autonomi.com/support"); + let download_latest_link = + Hyperlink::new("autonomi.com/downloads", "https://autonomi.com/downloads"); + + // Content + let rows_help_and_support = vec![ + Row::new(vec![ + Cell::from(Line::from(vec![Span::styled( + "See the quick start guides:", + Style::default().fg(GHOST_WHITE), + )])), + Cell::from(Line::from(vec![Span::styled( + "To join the Beta Rewards Program:", + Style::default().fg(GHOST_WHITE), + )])), + ]), + Row::new(vec![ + Cell::from( + quickstart_guide_link + .to_string() + .into_text() + .unwrap() + .clone(), + ), + Cell::from(beta_rewards_link.to_string().into_text().unwrap().clone()), + ]), + Row::new(vec![ + Cell::from(Span::raw(" ")), // Empty row for padding + Cell::from(Span::raw(" ")), + ]), + Row::new(vec![ + Cell::from(Line::from(vec![Span::styled( + "Get Direct Support:", + Style::default().fg(GHOST_WHITE), + )])), + Cell::from(Line::from(vec![Span::styled( + "Download the latest launchpad:", + Style::default().fg(GHOST_WHITE), + )])), + ]), + Row::new(vec![ + Cell::from( + get_direct_support_link + .to_string() + .into_text() + .unwrap() + .clone(), + ), + Cell::from( + download_latest_link + .to_string() + .into_text() + .unwrap() + .clone(), + ), + ]), + ]; + + let table_help_and_support = Table::new( + rows_help_and_support, + vec![Constraint::Percentage(50), Constraint::Percentage(50)], + ) + .block( + Block::new() + .borders(Borders::ALL) + .padding(Padding::uniform(1)) + .title(" Get Help & Support ") + .title_style(Style::default().bold()), + ); + + f.render_widget(table_help_and_support, layout[0]); + + // ---- Keyboard shortcuts ---- + let rows_keyboard_shortcuts = vec![ + Row::new(vec![ + Cell::from(Line::from(vec![ + Span::styled("[S] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Status", Style::default().fg(VIVID_SKY_BLUE)), + ])), + Cell::from(Line::from(vec![ + Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Manage Nodes", Style::default().fg(EUCALYPTUS)), + ])), + Cell::from(Line::from(vec![ + Span::styled("[Ctrl+D] ", Style::default().fg(GHOST_WHITE)), + Span::styled( + "Change Storage Drive", + Style::default().fg(VERY_LIGHT_AZURE), + ), + ])), + ]), + Row::new(vec![ + Cell::from(Span::raw(" ")), // Empty row for padding + Cell::from(Span::raw(" ")), + Cell::from(Span::raw(" ")), + ]), + Row::new(vec![ + Cell::from(Line::from(vec![ + Span::styled("[O] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Options", Style::default().fg(VIVID_SKY_BLUE)), + ])), + Cell::from(Line::from(vec![ + Span::styled("[Ctrl+S] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Start Nodes", Style::default().fg(EUCALYPTUS)), + ])), + Cell::from(Line::from(vec![ + Span::styled("[Ctrl+B] ", Style::default().fg(GHOST_WHITE)), + Span::styled( + "Edit Discord Username", + Style::default().fg(VERY_LIGHT_AZURE), + ), + ])), + ]), + Row::new(vec![ + Cell::from(Span::raw(" ")), // Empty row for padding + Cell::from(Span::raw(" ")), + Cell::from(Span::raw(" ")), + ]), + Row::new(vec![ + Cell::from(Line::from(vec![ + Span::styled("[H] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Help", Style::default().fg(VIVID_SKY_BLUE)), + ])), + Cell::from(Line::from(vec![ + Span::styled("[Ctrl+X] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Stop Nodes", Style::default().fg(EUCALYPTUS)), + ])), + Cell::from(Line::from(vec![ + Span::styled("[Ctrl+L] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Open Logs Folder", Style::default().fg(VERY_LIGHT_AZURE)), + ])), + ]), + Row::new(vec![ + Cell::from(Span::raw(" ")), // Empty row for padding + Cell::from(Span::raw(" ")), + Cell::from(Span::raw(" ")), + ]), + Row::new(vec![ + Cell::from(Line::from(vec![ + Span::styled("[Q] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Quit", Style::default().fg(VIVID_SKY_BLUE)), + ])), + Cell::from(Line::from(vec![ + Span::styled("[Ctrl+R] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Reset All Nodes", Style::default().fg(EUCALYPTUS)), + ])), + Cell::from(""), + ]), + ]; + + let table_keyboard_shortcuts = Table::new( + rows_keyboard_shortcuts, + vec![ + Constraint::Percentage(33), + Constraint::Percentage(33), + Constraint::Percentage(33), + ], + ) + .block( + Block::new() + .borders(Borders::ALL) + .padding(Padding::uniform(1)) + .title(" Keyboard Shortcuts ") + .title_style(Style::default().bold()), + ); + + f.render_widget(table_keyboard_shortcuts, layout[1]); + + Ok(()) + } +} diff --git a/node-launchpad/src/widgets/hyperlink.rs b/node-launchpad/src/widgets/hyperlink.rs index 591b8f88f9..149603fdb0 100644 --- a/node-launchpad/src/widgets/hyperlink.rs +++ b/node-launchpad/src/widgets/hyperlink.rs @@ -8,6 +8,7 @@ use itertools::Itertools; use ratatui::{prelude::*, widgets::WidgetRef}; +use std::fmt; /// A hyperlink widget that renders a hyperlink in the terminal using [OSC 8]. /// @@ -26,6 +27,20 @@ impl<'content> Hyperlink<'content> { } } +// Displays the hyperlink in the terminal using OSC 8. +// Underline solid \x1b[4m +// Foreground color 45 \x1b[38;5;45m +impl fmt::Display for Hyperlink<'_> { + //TODO: Parameterize the color, underline, bold, etc. + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "\x1b[4m\x1b[38;5;45m\x1B]8;;{}\x07{}\x1B]8;;\x07\x1b[0m", + self.url, self.text + ) + } +} + impl WidgetRef for Hyperlink<'_> { fn render_ref(&self, area: Rect, buffer: &mut Buffer) { self.text.render_ref(area, buffer); From 17c86affd608503351cda713ebacc0411d14ea95 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 14:04:33 +0200 Subject: [PATCH 073/115] chore(manager): verbosity level when resetting nodes --- sn_node_manager/src/cmd/node.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index ca903ae8f5..7a06ee7c63 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -263,7 +263,9 @@ pub async fn remove( } pub async fn reset(force: bool, verbosity: VerbosityLevel) -> Result<()> { - print_banner("Reset Safenode Services"); + if verbosity != VerbosityLevel::Minimal { + print_banner("Reset Safenode Services"); + } info!("Resetting all safenode services, with force={force}"); if !force { From 5ae0599b644cc3f5541bb0c992ec593dd0111ff6 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 14:10:02 +0200 Subject: [PATCH 074/115] chore(launchpad): caps on todo comments --- node-launchpad/src/bin/tui/main.rs | 2 +- node-launchpad/src/bin/tui/terminal.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/node-launchpad/src/bin/tui/main.rs b/node-launchpad/src/bin/tui/main.rs index 842cabbb6b..965eefb142 100644 --- a/node-launchpad/src/bin/tui/main.rs +++ b/node-launchpad/src/bin/tui/main.rs @@ -95,7 +95,7 @@ async fn main() -> Result<()> { #[cfg(target_os = "windows")] if !is_running_as_root() { { - // todo: There is no terminal to show this error message when double clicking on the exe. + // TODO: There is no terminal to show this error message when double clicking on the exe. error!("Admin privileges required to run on Windows. Exiting."); color_eyre::eyre::bail!("Admin privileges required to run on Windows. Exiting."); } diff --git a/node-launchpad/src/bin/tui/terminal.rs b/node-launchpad/src/bin/tui/terminal.rs index e02040460d..0f5da9f6fb 100644 --- a/node-launchpad/src/bin/tui/terminal.rs +++ b/node-launchpad/src/bin/tui/terminal.rs @@ -30,7 +30,7 @@ pub(crate) fn detect_and_setup_terminal() -> Result { #[cfg(target_os = "windows")] if !is_running_as_root() { { - // todo: There is no terminal to show this error message when double clicking on the exe. + // TODO: There is no terminal to show this error message when double clicking on the exe. error!("Admin privileges required to run on Windows. Exiting."); color_eyre::eyre::bail!("Admin privileges required to run"); } From 5ae28d6a236cead1c6f80c55455c715149748141 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 14:13:51 +0200 Subject: [PATCH 075/115] chore(launchpad): new scenes and actions --- node-launchpad/src/action.rs | 34 ++++++++++++++++++++++++---------- node-launchpad/src/mode.rs | 12 +++++++----- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/node-launchpad/src/action.rs b/node-launchpad/src/action.rs index 6ddb8c5ed9..d6cbe92f4f 100644 --- a/node-launchpad/src/action.rs +++ b/node-launchpad/src/action.rs @@ -15,13 +15,16 @@ use strum::Display; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Display, Deserialize)] pub enum Action { - HomeActions(HomeActions), - TabActions(TabActions), + MenuActions(MenuActions), + StatusActions(StatusActions), + OptionsActions(OptionsActions), + SwitchScene(Scene), SwitchInputMode(InputMode), StoreDiscordUserName(String), StoreNodesToStart(usize), + StoreStorageDrive(String, String), Tick, Render, @@ -32,11 +35,11 @@ pub enum Action { Refresh, Error(String), Help, + Noop, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Display, Deserialize)] -pub enum HomeActions { - ResetNodes, +pub enum StatusActions { StartNodes, StopNodes, StartNodesCompleted, @@ -47,17 +50,28 @@ pub enum HomeActions { NodesStatsObtained(NodeStats), - TriggerBetaProgramme, TriggerManageNodes, - TriggerHelp, - TriggerResetNodesPopUp, PreviousTableItem, NextTableItem, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Display, Deserialize)] -pub enum TabActions { - NextTab, - PreviousTab, +pub enum OptionsActions { + ResetNodes, + + TriggerChangeDrive, + TriggerChangeDriveConfirm(String, String), + TriggerBetaProgramme, + TriggerResetNodes, + TriggerAccessLogs, + UpdateBetaProgrammeUsername(String), + UpdateStorageDrive(String, String), +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Display, Deserialize)] +pub enum MenuActions { + StatusTab, + OptionsTab, + HelpTab, } diff --git a/node-launchpad/src/mode.rs b/node-launchpad/src/mode.rs index 3968c059a0..a0b8421d3f 100644 --- a/node-launchpad/src/mode.rs +++ b/node-launchpad/src/mode.rs @@ -11,12 +11,14 @@ use serde::{Deserialize, Serialize}; #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum Scene { #[default] - Home, + Status, Options, - BetaProgramme, - ManageNodes, - HelpPopUp, - ResetPopUp, + Help, + ChangeDrivePopUp, + ChangeDriveConfirmPopup, + BetaProgrammePopUp, + ManageNodesPopUp, + ResetNodesPopUp, } #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] From a7c091deae49276cf9d36d18fbc5da14c8d678ec Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 14:14:28 +0200 Subject: [PATCH 076/115] chore(launchpad): cargo dependencies --- Cargo.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 787dc0e381..ae4f01442e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7600,11 +7600,11 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "rustversion", @@ -8463,9 +8463,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" [[package]] name = "unicode-xid" From 7b5eb407b77a0d444af76945fb5ea594a1153f4a Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 14:16:08 +0200 Subject: [PATCH 077/115] chore(launchpad): new keybidings for new sections --- node-launchpad/.config/config.json5 | 89 ++++++++++++++++++----------- 1 file changed, 57 insertions(+), 32 deletions(-) diff --git a/node-launchpad/.config/config.json5 b/node-launchpad/.config/config.json5 index bc1857fbfc..79912612a5 100644 --- a/node-launchpad/.config/config.json5 +++ b/node-launchpad/.config/config.json5 @@ -1,48 +1,73 @@ { "keybindings": { - "Home": { - "": {"HomeActions":"StartNodes"}, - "": {"HomeActions":"StartNodes"}, - "": {"HomeActions":"StartNodes"}, - "": {"HomeActions":"StopNodes"}, - "": {"HomeActions":"StopNodes"}, - "": {"HomeActions":"StopNodes"}, - - "up" : {"HomeActions":"PreviousTableItem"}, - "down": {"HomeActions":"NextTableItem"}, - - "tab": {"TabActions":"NextTab"}, - "": {"TabActions":"PreviousTab"}, - - "": {"HomeActions":"TriggerBetaProgramme"}, - "": {"HomeActions":"TriggerBetaProgramme"}, - "": {"HomeActions":"TriggerBetaProgramme"}, - "": {"HomeActions":"TriggerManageNodes"}, - "": {"HomeActions":"TriggerManageNodes"}, - "": {"HomeActions":"TriggerManageNodes"}, - "": {"HomeActions":"TriggerHelp"}, - "": {"HomeActions":"TriggerHelp"}, - "": {"HomeActions":"TriggerHelp"}, - "": {"HomeActions":"TriggerResetNodesPopUp"}, - "": {"HomeActions":"TriggerResetNodesPopUp"}, - "": {"HomeActions":"TriggerResetNodesPopUp"}, + "Status" : { + "": {"MenuActions":"StatusTab"}, + "": {"MenuActions":"StatusTab"}, + "": {"MenuActions":"OptionsTab"}, + "": {"MenuActions":"OptionsTab"}, + "": {"MenuActions":"HelpTab"}, + "": {"MenuActions":"HelpTab"}, + + "": {"StatusActions":"StartNodes"}, + "": {"StatusActions":"StartNodes"}, + "": {"StatusActions":"StartNodes"}, + "": {"StatusActions":"StopNodes"}, + "": {"StatusActions":"StopNodes"}, + "": {"StatusActions":"StopNodes"}, + + "up" : {"StatusActions":"PreviousTableItem"}, + "down": {"StatusActions":"NextTableItem"}, + + "": {"StatusActions":"TriggerManageNodes"}, + "": {"StatusActions":"TriggerManageNodes"}, + "": {"StatusActions":"TriggerManageNodes"}, "": "Quit", "": "Quit", "": "Quit", - "": "Quit", "": "Quit", "": "Suspend" // Suspend the application }, "Options": { - "tab": {"TabActions":"NextTab"}, - "": {"TabActions":"PreviousTab"}, - "enter": {"SwitchInputMode":"Entry"}, - + "": {"MenuActions":"StatusTab"}, + "": {"MenuActions":"StatusTab"}, + "": {"MenuActions":"OptionsTab"}, + "": {"MenuActions":"OptionsTab"}, + "": {"MenuActions":"HelpTab"}, + "": {"MenuActions":"HelpTab"}, + + "": {"OptionsActions":"TriggerChangeDrive"}, + "": {"OptionsActions":"TriggerChangeDrive"}, + "": {"OptionsActions":"TriggerChangeDrive"}, + "": {"OptionsActions":"TriggerBetaProgramme"}, + "": {"OptionsActions":"TriggerBetaProgramme"}, + "": {"OptionsActions":"TriggerBetaProgramme"}, + "": {"OptionsActions":"TriggerAccessLogs"}, + "": {"OptionsActions":"TriggerAccessLogs"}, + "": {"OptionsActions":"TriggerAccessLogs"}, + "": {"OptionsActions":"TriggerResetNodes"}, + "": {"OptionsActions":"TriggerResetNodes"}, + "": {"OptionsActions":"TriggerResetNodes"}, + + + "": "Quit", + "": "Quit", + "": "Quit", + "": "Quit", + "": "Suspend" // Suspend the application + }, + "Help": { + "": {"MenuActions":"StatusTab"}, + "": {"MenuActions":"StatusTab"}, + "": {"MenuActions":"OptionsTab"}, + "": {"MenuActions":"OptionsTab"}, + "": {"MenuActions":"HelpTab"}, + "": {"MenuActions":"HelpTab"}, + + "": "Quit", "": "Quit", "": "Quit", - "": "Quit", "": "Quit", "": "Suspend" // Suspend the application } From 217af40323fa706e96b0a3ca6bbff5f85e6c58b4 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 14:19:13 +0200 Subject: [PATCH 078/115] feat(launchpad): popups reorg and new one change drive --- node-launchpad/src/components/popup.rs | 5 +- .../src/components/popup/beta_programme.rs | 35 +- .../src/components/popup/change_drive.rs | 398 ++++++++++++++++++ .../components/popup/change_drive_confirm.rs | 216 ++++++++++ node-launchpad/src/components/popup/help.rs | 155 ------- .../src/components/popup/manage_nodes.rs | 26 +- .../popup/{reset.rs => reset_nodes.rs} | 53 ++- 7 files changed, 690 insertions(+), 198 deletions(-) create mode 100644 node-launchpad/src/components/popup/change_drive.rs create mode 100644 node-launchpad/src/components/popup/change_drive_confirm.rs delete mode 100644 node-launchpad/src/components/popup/help.rs rename node-launchpad/src/components/popup/{reset.rs => reset_nodes.rs} (79%) diff --git a/node-launchpad/src/components/popup.rs b/node-launchpad/src/components/popup.rs index 48d2044a04..925c6573d0 100644 --- a/node-launchpad/src/components/popup.rs +++ b/node-launchpad/src/components/popup.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. pub mod beta_programme; -pub mod help; +pub mod change_drive; +pub mod change_drive_confirm; pub mod manage_nodes; -pub mod reset; +pub mod reset_nodes; diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index 5d253a4a58..18c8192faa 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -9,7 +9,7 @@ use super::super::utils::centered_rect_fixed; use super::super::Component; use crate::{ - action::Action, + action::{Action, OptionsActions}, mode::{InputMode, Scene}, style::{clear_area, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VIVID_SKY_BLUE}, widgets::hyperlink::Hyperlink, @@ -65,12 +65,13 @@ impl BetaProgramme { self.state = BetaProgrammeState::DiscordIdAlreadySet; vec![ Action::StoreDiscordUserName(self.discord_input_filed.value().to_string()), - Action::SwitchScene(Scene::Home), + Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername(username)), + Action::SwitchScene(Scene::Options), ] } KeyCode::Esc => { debug!( - "Got Esc, restoring the old value {} and switching to home", + "Got Esc, restoring the old value {} and switching to actual screen", self.old_value ); // reset to old value @@ -78,7 +79,7 @@ impl BetaProgramme { .discord_input_filed .clone() .with_value(self.old_value.clone()); - vec![Action::SwitchScene(Scene::Home)] + vec![Action::SwitchScene(Scene::Options)] } KeyCode::Char(' ') => vec![], KeyCode::Backspace => { @@ -128,10 +129,10 @@ impl Component for BetaProgramme { } BetaProgrammeState::RejectTCs => { if let KeyCode::Esc = key.code { - debug!("RejectTCs msg closed. Switching to home scene."); + debug!("RejectTCs msg closed. Switching to Status scene."); self.state = BetaProgrammeState::ShowTCs; } - vec![Action::SwitchScene(Scene::Home)] + vec![Action::SwitchScene(Scene::Status)] } BetaProgrammeState::AcceptTCsAndEnterDiscordId => self.capture_inputs(key), }; @@ -141,10 +142,10 @@ impl Component for BetaProgramme { fn update(&mut self, action: Action) -> Result> { let send_back = match action { Action::SwitchScene(scene) => match scene { - Scene::BetaProgramme => { + Scene::BetaProgrammePopUp => { self.active = true; self.old_value = self.discord_input_filed.value().to_string(); - // set to entry input mode as we want to handle everything within our handle_key_events + // Set to InputMode::Entry as we want to handle everything within our handle_key_events // so by default if this scene is active, we capture inputs. Some(Action::SwitchInputMode(InputMode::Entry)) } @@ -198,22 +199,22 @@ impl Component for BetaProgramme { // for the prompt text Constraint::Length(3), // for the input - Constraint::Length(2), - // for the text Constraint::Length(3), + // for the text + Constraint::Length(4), // gap - Constraint::Length(3), + Constraint::Length(1), // for the buttons Constraint::Length(1), ], ) .split(layer_one[1]); - let prompt = Paragraph::new("Discord Username associated with this device:") + let prompt_text = Paragraph::new("Discord Username associated with this device:") .alignment(Alignment::Center) .fg(GHOST_WHITE); - f.render_widget(prompt, layer_two[0]); + f.render_widget(prompt_text, layer_two[0]); let input = Paragraph::new(self.discord_input_filed.value()) .alignment(Alignment::Center) @@ -232,7 +233,13 @@ impl Component for BetaProgramme { ); f.render_widget(input, layer_two[1]); - let text = Paragraph::new(" Changing your Username will reset all nodes,\n and any Nanos left on this device will be\n lost."); + let text = Paragraph::new(Text::from(vec![ + Line::raw("Changing your Username will reset all nodes,"), + Line::raw("and any Nanos left on this device will be lost."), + ])) + .alignment(Alignment::Center) + .block(Block::default().padding(Padding::horizontal(2))); + f.render_widget(text.fg(GHOST_WHITE), layer_two[2]); let dash = Block::new() diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs new file mode 100644 index 0000000000..d2c6bba2b9 --- /dev/null +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -0,0 +1,398 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::default::Default; + +use super::super::utils::centered_rect_fixed; + +use color_eyre::Result; +use crossterm::event::{KeyCode, KeyEvent}; +use ratatui::{ + layout::{Alignment, Constraint, Direction, Layout, Rect}, + style::{Modifier, Style, Stylize}, + text::{Line, Span}, + widgets::{Block, Borders, HighlightSpacing, List, ListItem, ListState, Padding, Paragraph}, +}; + +use crate::{ + action::{Action, OptionsActions}, + components::Component, + mode::{InputMode, Scene}, + style::{ + clear_area, COOL_GREY, DARK_GUNMETAL, EUCALYPTUS, GHOST_WHITE, INDIGO, LIGHT_PERIWINKLE, + SPACE_CADET, VIVID_SKY_BLUE, + }, + system, +}; + +#[derive(Default)] +pub struct ChangeDrivePopup { + active: bool, + items: StatefulList, + drive_selection: DriveItem, + user_moved: bool, // This is used to check if the user has moved the selection and style it accordingly +} + +impl ChangeDrivePopup { + pub fn new(storage_mountpoint: String) -> Self { + let drives_and_space = system::get_list_of_drives_and_available_space(); + + let mut selected_drive: DriveItem = DriveItem::default(); + // Create a vector of DriveItem from drives_and_space + let drives_items: Vec = drives_and_space + .iter() + .map(|(drive_name, mountpoint, space)| { + let size_str = format!("{:.2} GB", *space as f64 / 1e9); + let size_str_cloned = size_str.clone(); + DriveItem { + name: drive_name.to_string(), + mountpoint: mountpoint.to_string(), + size: size_str, + status: if mountpoint == &storage_mountpoint { + selected_drive = DriveItem { + name: drive_name.to_string(), + mountpoint: mountpoint.to_string(), + size: size_str_cloned, + status: DriveStatus::Selected, + }; + DriveStatus::Selected + } else { + DriveStatus::NotSelected + }, + } + }) + .collect::>(); + debug!("Drive Mountpoint in Config: {}", storage_mountpoint); + debug!("Drives and space: {:?}", drives_and_space); + let items = StatefulList::with_items(drives_items); + Self { + active: false, + items, + drive_selection: selected_drive, + user_moved: false, + } + } + // Deselect all drives + fn deselect_all(&mut self) { + for item in &mut self.items.items { + item.status = DriveStatus::NotSelected; + } + } + // Change the status of the selected drive to Selected + #[allow(dead_code)] + fn change_status(&mut self) { + self.deselect_all(); + if let Some(i) = self.items.state.selected() { + self.items.items[i].status = DriveStatus::Selected; + self.drive_selection = self.items.items[i].clone(); + } + } + // Highlight the drive that is currently selected in this component members + fn select_drive(&mut self) { + self.deselect_all(); + for (index, item) in self.items.items.iter_mut().enumerate() { + if item.mountpoint == self.drive_selection.mountpoint { + item.status = DriveStatus::Selected; + self.items.state.select(Some(index)); + break; + } + } + } + // return the selected drive + fn return_selection(&mut self) -> DriveItem { + if let Some(i) = self.items.state.selected() { + return self.items.items[i].clone(); + } + DriveItem::default() + } +} + +impl Component for ChangeDrivePopup { + fn handle_key_events(&mut self, key: KeyEvent) -> Result> { + if !self.active { + return Ok(vec![]); + } + let send_back = match key.code { + KeyCode::Enter => { + // We allow action if we have more than one drive and the action is not + // over the drive already selected + let drive = self.return_selection(); + if self.items.items.len() > 1 + && (drive.name != self.drive_selection.name + && drive.mountpoint != self.drive_selection.mountpoint) + { + debug!( + "Got Enter and there's a new selection, storing value and switching to Options" + ); + // self.change_status(); + debug!("Drive selected: {:?}", self.drive_selection.name); + vec![ + Action::OptionsActions(OptionsActions::TriggerChangeDriveConfirm( + drive.mountpoint.clone(), + drive.name.clone(), + )), + Action::SwitchScene(Scene::ChangeDriveConfirmPopup), + ] + } else { + debug!("Got Enter, but no new selection. We should not do anything"); + vec![Action::SwitchScene(Scene::ChangeDrivePopUp)] + } + } + KeyCode::Esc => { + debug!("Got Esc, switching to Options"); + vec![Action::SwitchScene(Scene::Options)] + } + KeyCode::Up => { + let drive = self.return_selection(); + if self.items.items.len() > 1 { + self.user_moved = drive.name == self.drive_selection.name + && drive.mountpoint == self.drive_selection.mountpoint; + self.items.previous(); + } + vec![] + } + KeyCode::Down => { + let drive = self.return_selection(); + if self.items.items.len() > 1 { + self.user_moved = drive.name == self.drive_selection.name + && drive.mountpoint == self.drive_selection.mountpoint; + self.items.next(); + } + vec![] + } + _ => { + vec![] + } + }; + Ok(send_back) + } + + fn update(&mut self, action: Action) -> Result> { + let send_back = match action { + Action::SwitchScene(scene) => match scene { + Scene::ChangeDrivePopUp => { + self.active = true; + self.user_moved = false; + self.select_drive(); + Some(Action::SwitchInputMode(InputMode::Entry)) + } + _ => { + self.active = false; + None + } + }, + // Useful when the user has selected a drive but didn't confirm it + Action::OptionsActions(OptionsActions::UpdateStorageDrive(mountpoint, drive_name)) => { + self.drive_selection.mountpoint = mountpoint; + self.drive_selection.name = drive_name; + self.select_drive(); + None + } + _ => None, + }; + Ok(send_back) + } + + fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: Rect) -> Result<()> { + if !self.active { + return Ok(()); + } + + let layer_zero = centered_rect_fixed(52, 15, area); + + let layer_one = Layout::new( + Direction::Vertical, + [ + // Padding from title to the table + Constraint::Length(1), + // Table + Constraint::Min(1), + // for the pop_up_border + Constraint::Length(1), + ], + ) + .split(layer_zero); + + let pop_up_border = Paragraph::new("").block( + Block::default() + .borders(Borders::ALL) + .title(" Select a Drive ") + .title_style(Style::new().fg(VIVID_SKY_BLUE)) + .padding(Padding::uniform(2)) + .border_style(Style::new().fg(VIVID_SKY_BLUE)) + .bg(DARK_GUNMETAL), + ); + clear_area(f, layer_zero); + + let layer_two = Layout::new( + Direction::Vertical, + [ + // for the table + Constraint::Length(10), + // gap + Constraint::Length(3), + // for the buttons + Constraint::Length(1), + ], + ) + .split(layer_one[1]); + + // Drive selector + let items: Vec = self + .items + .items + .iter() + .enumerate() + .map(|(i, drive_item)| drive_item.to_list_item(i, layer_two[0].width as usize)) + .collect(); + + let items = List::new(items) + .block(Block::default().padding(Padding::uniform(1))) + .highlight_style( + Style::default() + .add_modifier(Modifier::BOLD) + .add_modifier(Modifier::REVERSED) + .fg(INDIGO), + ) + .highlight_spacing(HighlightSpacing::Always); + + f.render_stateful_widget(items, layer_two[0], &mut self.items.state); + + // Dash + let dash = Block::new() + .borders(Borders::BOTTOM) + .border_style(Style::new().fg(GHOST_WHITE)); + f.render_widget(dash, layer_two[1]); + + // Buttons + let buttons_layer = + Layout::horizontal(vec![Constraint::Percentage(50), Constraint::Percentage(50)]) + .split(layer_two[2]); + + let button_no = Line::from(vec![Span::styled( + "Cancel [Esc]", + Style::default().fg(LIGHT_PERIWINKLE), + )]); + + f.render_widget( + Paragraph::new(button_no) + .block(Block::default().padding(Padding::horizontal(2))) + .alignment(Alignment::Left), + buttons_layer[0], + ); + + let button_yes = Line::from(if self.user_moved { + vec![ + Span::styled("Change Drive ", Style::default().fg(EUCALYPTUS)), + Span::styled("[Enter]", Style::default().fg(LIGHT_PERIWINKLE).bold()), + ] + } else { + vec![ + Span::styled("Change Drive ", Style::default().fg(COOL_GREY)), + Span::styled("[Enter]", Style::default().fg(LIGHT_PERIWINKLE)), + ] + }) + .alignment(Alignment::Right); + + f.render_widget( + Paragraph::new(button_yes) + .block(Block::default().padding(Padding::horizontal(2))) + .alignment(Alignment::Right), + buttons_layer[1], + ); + + // We render now so the borders are on top of the other widgets + f.render_widget(pop_up_border, layer_zero); + + Ok(()) + } +} + +#[derive(Default)] +struct StatefulList { + state: ListState, + items: Vec, + last_selected: Option, +} + +impl StatefulList { + fn with_items(items: Vec) -> Self { + StatefulList { + state: ListState::default(), + items, + last_selected: None, + } + } + + fn next(&mut self) { + let i = match self.state.selected() { + Some(i) => { + if i >= self.items.len() - 1 { + 0 + } else { + i + 1 + } + } + None => self.last_selected.unwrap_or(0), + }; + self.state.select(Some(i)); + } + + fn previous(&mut self) { + let i = match self.state.selected() { + Some(i) => { + if i == 0 { + self.items.len() - 1 + } else { + i - 1 + } + } + None => self.last_selected.unwrap_or(0), + }; + self.state.select(Some(i)); + } +} + +#[derive(Default, Debug, Copy, Clone)] +enum DriveStatus { + Selected, + #[default] + NotSelected, +} + +#[derive(Default, Debug, Clone)] +pub struct DriveItem { + name: String, + mountpoint: String, + size: String, + status: DriveStatus, +} + +impl DriveItem { + fn to_list_item(&self, _index: usize, width: usize) -> ListItem { + let spaces = width - self.name.len() - self.size.len() - " ".len() - 4; + + let line = match self.status { + DriveStatus::NotSelected => Line::from(vec![ + Span::raw(" "), + Span::styled(self.name.clone(), Style::default().fg(VIVID_SKY_BLUE)), + Span::raw(" ".repeat(spaces)), + Span::styled(self.size.clone(), Style::default().fg(LIGHT_PERIWINKLE)), + ]), + DriveStatus::Selected => Line::from(vec![ + Span::styled(" â–º", Style::default().fg(EUCALYPTUS)), + Span::raw(" "), + Span::styled(self.name.clone(), Style::default().fg(VIVID_SKY_BLUE)), + Span::raw(" ".repeat(spaces)), + Span::styled(self.size.clone(), Style::default().fg(GHOST_WHITE)), + ]), + }; + + ListItem::new(line).style(Style::default().bg(SPACE_CADET)) + } +} diff --git a/node-launchpad/src/components/popup/change_drive_confirm.rs b/node-launchpad/src/components/popup/change_drive_confirm.rs new file mode 100644 index 0000000000..fb242ded87 --- /dev/null +++ b/node-launchpad/src/components/popup/change_drive_confirm.rs @@ -0,0 +1,216 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use super::super::utils::centered_rect_fixed; + +use color_eyre::Result; +use crossterm::event::{KeyCode, KeyEvent}; +use ratatui::{ + layout::{Alignment, Constraint, Direction, Layout, Rect}, + style::{Style, Stylize}, + text::{Line, Span}, + widgets::{Block, Borders, Padding, Paragraph, Wrap}, +}; + +use crate::{ + action::{Action, OptionsActions}, + components::Component, + mode::{InputMode, Scene}, + style::{clear_area, DARK_GUNMETAL, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VIVID_SKY_BLUE}, +}; + +#[derive(Default)] +pub struct ChangeDriveConfirmPopup { + active: bool, + drive_selection_mountpoint: String, + drive_selection_name: String, +} + +impl Component for ChangeDriveConfirmPopup { + fn handle_key_events(&mut self, key: KeyEvent) -> Result> { + if !self.active { + return Ok(vec![]); + } + let send_back = match key.code { + KeyCode::Enter => { + debug!("Got Enter, storing value and switching to Options"); + vec![ + Action::StoreStorageDrive( + self.drive_selection_mountpoint.clone(), + self.drive_selection_name.clone(), + ), + Action::OptionsActions(OptionsActions::UpdateStorageDrive( + self.drive_selection_mountpoint.clone(), + self.drive_selection_name.clone(), + )), + Action::SwitchScene(Scene::Options), + ] + } + KeyCode::Esc => { + debug!("Got Esc, switching to Options"); + vec![Action::SwitchScene(Scene::Options)] + } + _ => { + vec![] + } + }; + Ok(send_back) + } + + fn update(&mut self, action: Action) -> Result> { + let send_back = match action { + Action::SwitchScene(scene) => match scene { + Scene::ChangeDriveConfirmPopup => { + self.active = true; + Some(Action::SwitchInputMode(InputMode::Entry)) + } + _ => { + self.active = false; + None + } + }, + Action::OptionsActions(OptionsActions::TriggerChangeDriveConfirm(mountpoint, name)) => { + self.drive_selection_mountpoint = mountpoint; + self.drive_selection_name = name; + None + } + _ => None, + }; + Ok(send_back) + } + + fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: Rect) -> Result<()> { + if !self.active { + return Ok(()); + } + + let layer_zero = centered_rect_fixed(52, 15, area); + + let layer_one = Layout::new( + Direction::Vertical, + [ + // Padding from title to the table + Constraint::Length(1), + // Text + Constraint::Min(1), + // for the pop_up_border + Constraint::Length(1), + ], + ) + .split(layer_zero); + + let pop_up_border = Paragraph::new("").block( + Block::default() + .borders(Borders::ALL) + .title(" Confirm & Reset ") + .title_style(Style::new().fg(VIVID_SKY_BLUE)) + .padding(Padding::uniform(2)) + .border_style(Style::new().fg(VIVID_SKY_BLUE)) + .bg(DARK_GUNMETAL), + ); + clear_area(f, layer_zero); + + let layer_two = Layout::new( + Direction::Vertical, + [ + // for the table + Constraint::Length(10), + // gap + Constraint::Length(3), + // for the buttons + Constraint::Length(1), + ], + ) + .split(layer_one[1]); + + // Text + let text = vec![ + Line::from(vec![]), // Empty line + Line::from(vec![]), // Empty line + Line::from(vec![ + Span::styled("Changing storage to ", Style::default().fg(GHOST_WHITE)), + Span::styled( + format!("{} ", self.drive_selection_name), + Style::default().fg(VIVID_SKY_BLUE), + ), + Span::styled("will ", Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Center), + Line::from(vec![Span::styled( + "reset all nodes.", + Style::default().fg(GHOST_WHITE), + )]) + .alignment(Alignment::Center), + Line::from(vec![]), // Empty line + Line::from(vec![]), // Empty line + Line::from(vec![ + Span::styled("You’ll need to ", Style::default().fg(GHOST_WHITE)), + Span::styled("Add ", Style::default().fg(GHOST_WHITE).bold()), + Span::styled("and ", Style::default().fg(GHOST_WHITE)), + Span::styled("Start ", Style::default().fg(GHOST_WHITE).bold()), + Span::styled( + "them again afterwards. Are you sure you want to continue?", + Style::default().fg(GHOST_WHITE), + ), + ]) + .alignment(Alignment::Center), + ]; + let paragraph = Paragraph::new(text) + .wrap(Wrap { trim: false }) + .block( + Block::default() + .borders(Borders::NONE) + .padding(Padding::horizontal(2)), + ) + .alignment(Alignment::Center) + .style(Style::default().fg(GHOST_WHITE).bg(DARK_GUNMETAL)); + + f.render_widget(paragraph, layer_two[0]); + + // Dash + let dash = Block::new() + .borders(Borders::BOTTOM) + .border_style(Style::new().fg(GHOST_WHITE)); + f.render_widget(dash, layer_two[1]); + + // Buttons + let buttons_layer = + Layout::horizontal(vec![Constraint::Percentage(30), Constraint::Percentage(70)]) + .split(layer_two[2]); + + let button_no = Line::from(vec![Span::styled( + "Back [Esc]", + Style::default().fg(LIGHT_PERIWINKLE), + )]); + + f.render_widget( + Paragraph::new(button_no) + .block(Block::default().padding(Padding::horizontal(2))) + .alignment(Alignment::Left), + buttons_layer[0], + ); + + let button_yes = Line::from(vec![ + Span::styled("Yes, change drive ", Style::default().fg(EUCALYPTUS)), + Span::styled("[Enter]", Style::default().fg(LIGHT_PERIWINKLE).bold()), + ]) + .alignment(Alignment::Right); + + f.render_widget( + Paragraph::new(button_yes) + .block(Block::default().padding(Padding::horizontal(2))) + .alignment(Alignment::Right), + buttons_layer[1], + ); + + // We render now so the borders are on top of the other widgets + f.render_widget(pop_up_border, layer_zero); + + Ok(()) + } +} diff --git a/node-launchpad/src/components/popup/help.rs b/node-launchpad/src/components/popup/help.rs deleted file mode 100644 index 8d9150f8d5..0000000000 --- a/node-launchpad/src/components/popup/help.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::super::{utils::centered_rect_fixed, Component}; -use crate::{ - action::Action, - mode::{InputMode, Scene}, - style::{clear_area, EUCALYPTUS, GHOST_WHITE, VIVID_SKY_BLUE}, - tui::Frame, - widgets::hyperlink::Hyperlink, -}; -use color_eyre::eyre::Result; -use crossterm::event::{KeyCode, KeyEvent}; -use ratatui::{ - prelude::{Rect, *}, - widgets::*, -}; - -#[derive(Default)] -pub struct HelpPopUp { - /// Whether the component is active right now, capturing keystrokes + drawing things. - active: bool, -} - -impl Component for HelpPopUp { - fn handle_key_events(&mut self, key: KeyEvent) -> Result> { - if !self.active { - return Ok(vec![]); - } - - let send_back = match key.code { - KeyCode::Esc => { - debug!("Got Esc, exiting HelpPopUp"); - vec![Action::SwitchScene(Scene::Home)] - } - _ => { - vec![] - } - }; - - Ok(send_back) - } - - fn update(&mut self, action: Action) -> Result> { - let send_back = match action { - Action::SwitchScene(scene) => match scene { - Scene::HelpPopUp => { - self.active = true; - Some(Action::SwitchInputMode(InputMode::Entry)) - } - _ => { - self.active = false; - None - } - }, - _ => None, - }; - Ok(send_back) - } - - fn draw(&mut self, f: &mut Frame<'_>, area: Rect) -> Result<()> { - if !self.active { - return Ok(()); - } - - let layer_zero = centered_rect_fixed(50, 12, area); - - let layer_one = Layout::new( - Direction::Vertical, - [ - // for the layer 0 border - Constraint::Length(2), - // lines - Constraint::Length(1), - Constraint::Length(1), - Constraint::Length(1), - Constraint::Length(1), - Constraint::Length(1), - Constraint::Length(1), - // dash - Constraint::Min(1), - // button - Constraint::Length(1), - Constraint::Length(1), - ], - ) - .split(layer_zero); - clear_area(f, layer_zero); - - let pop_up_border = Paragraph::new("").block( - Block::default() - .borders(Borders::ALL) - .title(" Get Help ") - .title_style(style::Style::default().fg(EUCALYPTUS)) - .border_style(Style::new().fg(EUCALYPTUS)), - ); - - let line1 = Paragraph::new(" See the quick start guides:") - .block(Block::default().padding(Padding::horizontal(1))); - f.render_widget(line1.fg(GHOST_WHITE), layer_one[1]); - - let link1 = Hyperlink::new( - Span::styled( - " https://autonomi.com/getting-started", - Style::default().fg(VIVID_SKY_BLUE), - ), - "https://autonomi.com/getting-started", - ); - f.render_widget_ref(link1, layer_one[2]); - - let line2 = Paragraph::new(" Get direct help via Discord:") - .fg(GHOST_WHITE) - .block(Block::default().padding(Padding::horizontal(1))); - f.render_widget(line2, layer_one[3]); - - let link2 = Hyperlink::new( - Span::styled( - " https://discord.gg/autonomi", - Style::default().fg(VIVID_SKY_BLUE), - ), - "https://discord.gg/autonomi", - ); - f.render_widget_ref(link2, layer_one[4]); - - let line3 = Paragraph::new(" To join the Beta Rewards Program:") - .fg(GHOST_WHITE) - .block(Block::default().padding(Padding::horizontal(1))); - f.render_widget(line3, layer_one[5]); - let link3 = Hyperlink::new( - Span::styled( - " https://autonomi.com/beta", - Style::default().fg(VIVID_SKY_BLUE), - ), - "https://autonomi.com/beta", - ); - f.render_widget_ref(link3, layer_one[6]); - - let dash = Block::new() - .borders(Borders::BOTTOM) - .border_style(Style::new().fg(GHOST_WHITE)); - f.render_widget(dash, layer_one[7]); - - let button = Paragraph::new(" Close [Esc]").style(Style::default().fg(GHOST_WHITE)); - f.render_widget(button, layer_one[8]); - - f.render_widget(pop_up_border, layer_zero); - - Ok(()) - } -} diff --git a/node-launchpad/src/components/popup/manage_nodes.rs b/node-launchpad/src/components/popup/manage_nodes.rs index 9d3959647e..5b9abb59e5 100644 --- a/node-launchpad/src/components/popup/manage_nodes.rs +++ b/node-launchpad/src/components/popup/manage_nodes.rs @@ -36,7 +36,7 @@ pub struct ManageNodes { } impl ManageNodes { - pub fn new(nodes_to_start: usize) -> Result { + pub async fn new(nodes_to_start: usize) -> Result { let nodes_to_start = std::cmp::min(nodes_to_start, MAX_NODE_COUNT); let new = Self { active: false, @@ -113,7 +113,7 @@ impl Component for ManageNodes { ); vec![ Action::StoreNodesToStart(nodes_to_start), - Action::SwitchScene(Scene::Home), + Action::SwitchScene(Scene::Status), ] } KeyCode::Esc => { @@ -126,7 +126,7 @@ impl Component for ManageNodes { .nodes_to_start_input .clone() .with_value(self.old_value.clone()); - vec![Action::SwitchScene(Scene::Home)] + vec![Action::SwitchScene(Scene::Status)] } KeyCode::Char(c) if c.is_numeric() => { // don't allow leading zeros @@ -192,7 +192,7 @@ impl Component for ManageNodes { fn update(&mut self, action: Action) -> Result> { let send_back = match action { Action::SwitchScene(scene) => match scene { - Scene::ManageNodes => { + Scene::ManageNodesPopUp => { self.active = true; self.old_value = self.nodes_to_start_input.value().to_string(); // set to entry input mode as we want to handle everything within our handle_key_events @@ -227,7 +227,7 @@ impl Component for ManageNodes { // gap before help Constraint::Length(1), // for the help - Constraint::Length(3), + Constraint::Length(7), // for the dash Constraint::Min(1), // for the buttons @@ -299,10 +299,18 @@ impl Component for ManageNodes { f.render_widget(info, layer_one[2]); // ==== help ==== - let help = Paragraph::new( - format!(" Note: Each node will use a small amount of CPU\n Memory and Network Bandwidth. We recommend\n starting no more than 5 at a time (max {MAX_NODE_COUNT} nodes).") - ) - .fg(GHOST_WHITE); + let help = Paragraph::new(vec![ + Line::raw(format!( + "Note: Each node will use a small amount of CPU Memory and Network Bandwidth. \ + We recommend starting no more than 5 at a time (max {MAX_NODE_COUNT} nodes)." + )), + Line::raw(""), + Line::raw("▲▼ to change the number of nodes to start."), + ]) + .wrap(Wrap { trim: false }) + .block(Block::default().padding(Padding::horizontal(4))) + .alignment(Alignment::Center) + .fg(GHOST_WHITE); f.render_widget(help, layer_one[4]); // ==== dash ==== diff --git a/node-launchpad/src/components/popup/reset.rs b/node-launchpad/src/components/popup/reset_nodes.rs similarity index 79% rename from node-launchpad/src/components/popup/reset.rs rename to node-launchpad/src/components/popup/reset_nodes.rs index 2545ed3bf6..7d83ef17bf 100644 --- a/node-launchpad/src/components/popup/reset.rs +++ b/node-launchpad/src/components/popup/reset_nodes.rs @@ -8,9 +8,9 @@ use super::super::{utils::centered_rect_fixed, Component}; use crate::{ - action::{Action, HomeActions}, + action::{Action, OptionsActions}, mode::{InputMode, Scene}, - style::{clear_area, GHOST_WHITE, LIGHT_PERIWINKLE, VIVID_SKY_BLUE}, + style::{clear_area, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VIVID_SKY_BLUE}, }; use color_eyre::Result; use crossterm::event::{Event, KeyCode, KeyEvent}; @@ -34,19 +34,19 @@ impl Component for ResetNodesPopup { let input = self.confirmation_input_field.value().to_string(); if input.to_lowercase() == "reset" { - debug!("Got reset, sending Reset action and switching to home"); + debug!("Got reset, sending Reset action and switching to Options"); vec![ - Action::SwitchScene(Scene::Home), - Action::HomeActions(HomeActions::ResetNodes), + Action::OptionsActions(OptionsActions::ResetNodes), + Action::SwitchScene(Scene::Options), ] } else { - debug!("Got Enter, but RESET is not typed. Switching to home"); - vec![Action::SwitchScene(Scene::Home)] + debug!("Got Enter, but RESET is not typed. Switching to Options"); + vec![Action::SwitchScene(Scene::Options)] } } KeyCode::Esc => { - debug!("Got Esc, switching to home"); - vec![Action::SwitchScene(Scene::Home)] + debug!("Got Esc, switching to Options"); + vec![Action::SwitchScene(Scene::Options)] } KeyCode::Char(' ') => vec![], KeyCode::Backspace => { @@ -68,7 +68,7 @@ impl Component for ResetNodesPopup { fn update(&mut self, action: Action) -> Result> { let send_back = match action { Action::SwitchScene(scene) => match scene { - Scene::ResetPopUp => { + Scene::ResetNodesPopUp => { self.active = true; self.confirmation_input_field = self .confirmation_input_field @@ -112,7 +112,7 @@ impl Component for ResetNodesPopup { let pop_up_border = Paragraph::new("").block( Block::default() .borders(Borders::ALL) - .title("Reset Nodes") + .title(" Reset Nodes ") .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(VIVID_SKY_BLUE)), @@ -139,6 +139,7 @@ impl Component for ResetNodesPopup { let prompt = Paragraph::new("Type in 'reset' and press Enter to Reset all your nodes") .wrap(Wrap { trim: false }) + .block(Block::new().padding(Padding::horizontal(2))) .alignment(Alignment::Center) .fg(GHOST_WHITE); @@ -161,8 +162,12 @@ impl Component for ResetNodesPopup { ); f.render_widget(input, layer_two[1]); - let text = Paragraph::new(" This will clear out all the nodes and all \n the stored data. You should still keep all\n your earned rewards."); - f.render_widget(text.fg(GHOST_WHITE), layer_two[2]); + let text = Paragraph::new("This will clear out all the nodes and all the stored data. You should still keep all your earned rewards.") + .wrap(Wrap { trim: false }) + .block(Block::new().padding(Padding::horizontal(2))) + .alignment(Alignment::Center) + .fg(GHOST_WHITE); + f.render_widget(text, layer_two[2]); let dash = Block::new() .borders(Borders::BOTTOM) @@ -174,17 +179,29 @@ impl Component for ResetNodesPopup { .split(layer_two[4]); let button_no = Line::from(vec![Span::styled( - " No, Cancel [Esc]", + "No, Cancel [Esc]", Style::default().fg(LIGHT_PERIWINKLE), )]); - f.render_widget(button_no, buttons_layer[0]); + f.render_widget( + Paragraph::new(button_no) + .block(Block::default().padding(Padding::horizontal(2))) + .alignment(Alignment::Left), + buttons_layer[0], + ); let button_yes = Line::from(vec![Span::styled( "Reset Nodes [Enter]", - Style::default().fg(LIGHT_PERIWINKLE), - )]); - f.render_widget(button_yes, buttons_layer[1]); + Style::default().fg(EUCALYPTUS), + )]) + .alignment(Alignment::Right); + + f.render_widget( + Paragraph::new(button_yes) + .block(Block::default().padding(Padding::horizontal(2))) + .alignment(Alignment::Right), + buttons_layer[1], + ); f.render_widget(pop_up_border, layer_zero); From 34655e45b601d89d905133552b026420d73388ee Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 14:20:00 +0200 Subject: [PATCH 079/115] feat(launchpad): hyperlink widget --- node-launchpad/src/widgets/hyperlink.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node-launchpad/src/widgets/hyperlink.rs b/node-launchpad/src/widgets/hyperlink.rs index 149603fdb0..0798811ae0 100644 --- a/node-launchpad/src/widgets/hyperlink.rs +++ b/node-launchpad/src/widgets/hyperlink.rs @@ -31,7 +31,7 @@ impl<'content> Hyperlink<'content> { // Underline solid \x1b[4m // Foreground color 45 \x1b[38;5;45m impl fmt::Display for Hyperlink<'_> { - //TODO: Parameterize the color, underline, bold, etc. + //TODO: Parameterize the color, underline, bold, etc. Use ratatui::Style. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, From 8c37e783c89fb6e5ae7098a60534d414a67ed1d9 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 14:22:11 +0200 Subject: [PATCH 080/115] feat(launchpad): footer, header, new screens and home rename --- node-launchpad/src/components.rs | 5 +- node-launchpad/src/components/footer.rs | 155 ++---- node-launchpad/src/components/header.rs | 98 ++++ node-launchpad/src/components/help.rs | 54 +- node-launchpad/src/components/options.rs | 275 ++++++++++ .../src/components/{home.rs => status.rs} | 499 ++++++++---------- 6 files changed, 694 insertions(+), 392 deletions(-) create mode 100644 node-launchpad/src/components/header.rs create mode 100644 node-launchpad/src/components/options.rs rename node-launchpad/src/components/{home.rs => status.rs} (58%) diff --git a/node-launchpad/src/components.rs b/node-launchpad/src/components.rs index 9353fb5bc7..413a0628dc 100644 --- a/node-launchpad/src/components.rs +++ b/node-launchpad/src/components.rs @@ -18,8 +18,11 @@ use crate::{ }; pub mod footer; -pub mod home; +pub mod header; +pub mod help; +pub mod options; pub mod popup; +pub mod status; pub mod utils; /// `Component` is a trait that represents a visual and interactive element of the user interface. diff --git a/node-launchpad/src/components/footer.rs b/node-launchpad/src/components/footer.rs index 31296c8d82..045e91f8ff 100644 --- a/node-launchpad/src/components/footer.rs +++ b/node-launchpad/src/components/footer.rs @@ -6,125 +6,64 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::Component; -use crate::{ - action::Action, - mode::Scene, - style::{COOL_GREY, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VERY_LIGHT_AZURE}, -}; -use color_eyre::eyre::Result; +use crate::style::{EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE}; use ratatui::{prelude::*, widgets::*}; -pub struct Footer { - current_scene: Scene, - nodes_to_start_configured: bool, +pub enum NodesToStart { + Configured, + NotConfigured, } -impl Footer { - pub fn new(nodes_to_start_configured: bool) -> Self { - Self { - current_scene: Scene::Home, - nodes_to_start_configured, - } - } -} - -impl Component for Footer { - fn update(&mut self, action: Action) -> Result> { - match action { - Action::SwitchScene(scene) => { - self.current_scene = scene; - } - Action::StoreNodesToStart(count) => { - self.nodes_to_start_configured = count > 0; - } - _ => {} - } - Ok(None) - } - - fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: Rect) -> Result<()> { - let layer_zero = Layout::new( - Direction::Vertical, - [ - // for the rest of the home scene - Constraint::Min(1), - // our footer - Constraint::Max(4), - ], - ) - .split(area); - let border = Paragraph::new("").block( - Block::default() - .title(" Key Commands ") - .borders(Borders::ALL) - .border_style(Style::default().fg(COOL_GREY)), - ); +#[derive(Default)] +pub struct Footer {} - let layer_one = Layout::new( - Direction::Vertical, - [ - // border - Constraint::Length(1), - // line1 - Constraint::Length(2), - // line2 - Constraint::Length(1), - // border - Constraint::Length(1), - ], - ) - .split(layer_zero[1]); +impl StatefulWidget for Footer { + type State = NodesToStart; - let text_style = if self.nodes_to_start_configured { - Style::default().fg(EUCALYPTUS) + fn render(self, area: Rect, buf: &mut Buffer, state: &mut Self::State) { + let (text_style, command_style) = if matches!(state, NodesToStart::Configured) { + ( + Style::default().fg(EUCALYPTUS), + Style::default().fg(GHOST_WHITE), + ) } else { - Style::default().fg(LIGHT_PERIWINKLE) + ( + Style::default().fg(LIGHT_PERIWINKLE), + Style::default().fg(LIGHT_PERIWINKLE), + ) }; - let command_style = if self.nodes_to_start_configured { - Style::default().fg(GHOST_WHITE) - } else { - Style::default().fg(LIGHT_PERIWINKLE) - }; - - let (line1, line2) = match self.current_scene { - Scene::Home - | Scene::BetaProgramme - | Scene::HelpPopUp - | Scene::ManageNodes - | Scene::ResetPopUp => { - let line1 = Line::from(vec![ - Span::styled(" [Ctrl+S] ", command_style), - Span::styled("Start all Nodes ", text_style), - Span::styled("[Ctrl+X] ", command_style), - Span::styled("Stop all Nodes ", text_style), - Span::styled("[H] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Help", Style::default().fg(EUCALYPTUS)), - ]); + let command1 = vec![ + Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE)), + Span::styled("Manage Nodes", Style::default().fg(EUCALYPTUS)), + ]; + let command2 = vec![ + Span::styled("[Ctrl+S] ", command_style), + Span::styled("Start Nodes", text_style), + ]; + let command3 = vec![ + Span::styled("[Ctrl+X] ", command_style), + Span::styled("Stop Nodes", text_style), + ]; - let line2 = Line::from(vec![ - Span::styled(" [Ctrl+G] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Manage Nodes ", Style::default().fg(EUCALYPTUS)), - Span::styled("[Ctrl+B] ", Style::default().fg(GHOST_WHITE)), - Span::styled( - "Beta Rewards Program ", - Style::default().fg(VERY_LIGHT_AZURE), - ), - Span::styled("[Q] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Quit", Style::default().fg(EUCALYPTUS)), - ]); - - (line1, line2) - } - Scene::Options => (Line::from("none"), Line::from("none")), - }; + let cell1 = Cell::from(Line::from(command1)); + let cell2 = Cell::from(Line::from(command2)); + let cell3 = Cell::from(Line::from(command3)); + let row = Row::new(vec![cell1, cell2, cell3]); - f.render_widget(Paragraph::new(line1), layer_one[1]); - f.render_widget(Paragraph::new(line2), layer_one[2]); - // render the border after the text so we don't get white spaces at the left border - f.render_widget(border, layer_zero[1]); + let table = Table::new(vec![row], vec![Constraint::Max(1)]) + .block( + Block::default() + .borders(Borders::ALL) + .padding(Padding::horizontal(1)), + ) + .widths(vec![ + Constraint::Percentage(25), + Constraint::Percentage(25), + Constraint::Percentage(25), + Constraint::Percentage(25), + ]); - Ok(()) + StatefulWidget::render(table, area, buf, &mut TableState::default()); } } diff --git a/node-launchpad/src/components/header.rs b/node-launchpad/src/components/header.rs new file mode 100644 index 0000000000..030dbcc6c0 --- /dev/null +++ b/node-launchpad/src/components/header.rs @@ -0,0 +1,98 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::style::{GHOST_WHITE, LIGHT_PERIWINKLE, VIVID_SKY_BLUE}; +use ratatui::{prelude::*, widgets::*}; + +pub enum SelectedMenuItem { + Status, + Options, + Help, +} + +pub struct Header { + launchpad_version_str: String, +} + +impl Default for Header { + fn default() -> Self { + let version_str = env!("CARGO_PKG_VERSION"); + Self { + launchpad_version_str: version_str.to_string(), + } + } +} + +impl Header { + pub fn new() -> Self { + Self::default() + } +} + +impl StatefulWidget for Header { + type State = SelectedMenuItem; + + fn render(self, area: Rect, buf: &mut Buffer, state: &mut Self::State) { + let layout = Layout::default() + .direction(Direction::Vertical) + .constraints(vec![Constraint::Length(1)]) + .split(area); + + // Define content of the header + let application_text = Span::styled( + format!(" Autonomi Node Launchpad (v{})", self.launchpad_version_str), + Style::default().fg(LIGHT_PERIWINKLE), + ); + + // Determine the color for each part of the menu based on the state + let status_color = if matches!(state, SelectedMenuItem::Status) { + VIVID_SKY_BLUE + } else { + GHOST_WHITE + }; + + let options_color = if matches!(state, SelectedMenuItem::Options) { + VIVID_SKY_BLUE + } else { + GHOST_WHITE + }; + + let help_color = if matches!(state, SelectedMenuItem::Help) { + VIVID_SKY_BLUE + } else { + GHOST_WHITE + }; + + // Create styled spans for each part of the menu + let status = Span::styled("[S]tatus", Style::default().fg(status_color)); + let options = Span::styled("[O]ptions", Style::default().fg(options_color)); + let help = Span::styled("[H]elp", Style::default().fg(help_color)); + + // Combine the menu parts with separators + let menu = vec![status, Span::raw(" | "), options, Span::raw(" | "), help]; + + // Calculate spacing between title and menu items + let total_width = (layout[0].width - 1) as usize; + let spaces = " ".repeat(total_width.saturating_sub( + application_text.content.len() + menu.iter().map(|s| s.width()).sum::(), + )); + + // Create a line with left and right text + let line = Line::from( + vec![application_text, Span::raw(spaces)] + .into_iter() + .chain(menu) + .collect::>(), + ); + + // Create a Paragraph widget to display the line + let paragraph = Paragraph::new(line).block(Block::default().borders(Borders::NONE)); + + paragraph.render(layout[0], buf); + } +} diff --git a/node-launchpad/src/components/help.rs b/node-launchpad/src/components/help.rs index 9f981b988c..5f64da0748 100644 --- a/node-launchpad/src/components/help.rs +++ b/node-launchpad/src/components/help.rs @@ -1,3 +1,4 @@ +use super::header::SelectedMenuItem; use color_eyre::eyre::Result; use ratatui::{ layout::{Constraint, Direction, Layout, Rect}, @@ -10,7 +11,9 @@ use tokio::sync::mpsc::UnboundedSender; use super::Component; use crate::{ - action::Action, + action::{Action, MenuActions}, + components::header::Header, + mode::{InputMode, Scene}, style::{EUCALYPTUS, GHOST_WHITE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE}, widgets::hyperlink::Hyperlink, }; @@ -25,7 +28,7 @@ pub struct Help { impl Help { pub async fn new() -> Result { Ok(Self { - active: true, + active: false, action_tx: None, }) } @@ -37,12 +40,23 @@ impl Component for Help { } fn draw(&mut self, f: &mut Frame<'_>, area: Rect) -> Result<()> { + if !self.active { + return Ok(()); + } // We define a layout, top and down box. let layout = Layout::default() .direction(Direction::Vertical) - .constraints(vec![Constraint::Percentage(55), Constraint::Percentage(45)]) + .constraints(vec![ + Constraint::Length(1), + Constraint::Min(7), + Constraint::Max(9), + ]) .split(area); + // ==== Header ===== + let header = Header::new(); + f.render_stateful_widget(header, layout[0], &mut SelectedMenuItem::Help); + // ---- Get Help & Support ---- // Links @@ -79,7 +93,8 @@ impl Component for Help { Cell::from(beta_rewards_link.to_string().into_text().unwrap().clone()), ]), Row::new(vec![ - Cell::from(Span::raw(" ")), // Empty row for padding + // Empty row for padding + Cell::from(Span::raw(" ")), Cell::from(Span::raw(" ")), ]), Row::new(vec![ @@ -122,7 +137,7 @@ impl Component for Help { .title_style(Style::default().bold()), ); - f.render_widget(table_help_and_support, layout[0]); + f.render_widget(table_help_and_support, layout[1]); // ---- Keyboard shortcuts ---- let rows_keyboard_shortcuts = vec![ @@ -144,7 +159,8 @@ impl Component for Help { ])), ]), Row::new(vec![ - Cell::from(Span::raw(" ")), // Empty row for padding + // Empty row for padding + Cell::from(Span::raw(" ")), Cell::from(Span::raw(" ")), Cell::from(Span::raw(" ")), ]), @@ -166,7 +182,8 @@ impl Component for Help { ])), ]), Row::new(vec![ - Cell::from(Span::raw(" ")), // Empty row for padding + // Empty row for padding + Cell::from(Span::raw(" ")), Cell::from(Span::raw(" ")), Cell::from(Span::raw(" ")), ]), @@ -185,7 +202,8 @@ impl Component for Help { ])), ]), Row::new(vec![ - Cell::from(Span::raw(" ")), // Empty row for padding + // Empty row for padding + Cell::from(Span::raw(" ")), Cell::from(Span::raw(" ")), Cell::from(Span::raw(" ")), ]), @@ -218,8 +236,26 @@ impl Component for Help { .title_style(Style::default().bold()), ); - f.render_widget(table_keyboard_shortcuts, layout[1]); + f.render_widget(table_keyboard_shortcuts, layout[2]); Ok(()) } + + fn update(&mut self, action: Action) -> Result> { + match action { + Action::SwitchScene(scene) => match scene { + Scene::Help => { + self.active = true; + // make sure we're in navigation mode + return Ok(Some(Action::SwitchInputMode(InputMode::Navigation))); + } + _ => self.active = false, + }, + Action::MenuActions(MenuActions::HelpTab) => { + return Ok(Some(Action::SwitchScene(Scene::Help))); + } + _ => {} + } + Ok(None) + } } diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs new file mode 100644 index 0000000000..7d2280ddc5 --- /dev/null +++ b/node-launchpad/src/components/options.rs @@ -0,0 +1,275 @@ +use color_eyre::eyre::Result; +use ratatui::{ + layout::{Alignment, Constraint, Direction, Layout, Rect}, + style::{Style, Stylize}, + text::{Line, Span}, + widgets::{Block, Borders, Cell, Row, Table}, + Frame, +}; +use tokio::sync::mpsc::UnboundedSender; + +use super::{header::SelectedMenuItem, Component}; +use crate::{ + action::{Action, MenuActions, OptionsActions}, + components::header::Header, + mode::{InputMode, Scene}, + style::{EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE}, + system, +}; +use sn_node_manager::config::get_user_safenode_data_dir; + +#[derive(Clone)] +pub struct Options { + pub storage_mountpoint: String, + pub storage_drive: String, + pub discord_username: String, + pub active: bool, + pub action_tx: Option>, +} + +impl Options { + pub async fn new( + storage_mountpoint: String, + storage_drive: String, + discord_username: String, + ) -> Result { + Ok(Self { + storage_mountpoint, + storage_drive, + discord_username, + active: false, + action_tx: None, + }) + } +} + +impl Component for Options { + fn init(&mut self, _area: Rect) -> Result<()> { + Ok(()) + } + + fn draw(&mut self, f: &mut Frame<'_>, area: Rect) -> Result<()> { + if !self.active { + return Ok(()); + } + // Define the layout to split the area into four sections + let layout = Layout::default() + .direction(Direction::Vertical) + .constraints( + [ + Constraint::Length(1), + Constraint::Length(5), + Constraint::Length(5), + Constraint::Length(5), + Constraint::Length(5), + ] + .as_ref(), + ) + .split(area); + + // ==== Header ===== + let header = Header::new(); + f.render_stateful_widget(header, layout[0], &mut SelectedMenuItem::Options); + + // Storage Drive + let block1 = Block::default() + .title(" Storage Drive ") + .title_style(Style::default().bold().fg(GHOST_WHITE)) + .style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .border_style(Style::default().fg(VIVID_SKY_BLUE)); + let storage_drivename = Table::new( + vec![ + Row::new(vec![ + Cell::from(Span::raw(" ")), // Empty row for padding + Cell::from(Span::raw(" ")), + ]), + Row::new(vec![ + Cell::from( + Line::from(vec![Span::styled( + format!(" {} ", self.storage_drive), + Style::default().fg(VIVID_SKY_BLUE), + )]) + .alignment(Alignment::Left), + ), + Cell::from( + Line::from(vec![ + Span::styled(" Change Drive ", Style::default().fg(VERY_LIGHT_AZURE)), + Span::styled(" [Ctrl+D] ", Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Right), + ), + ]), + ], + &[Constraint::Percentage(50), Constraint::Percentage(50)], + ) + .block(block1) + .style(Style::default().fg(GHOST_WHITE)); + + // Beta Rewards Program — Discord Username + let block2 = Block::default() + .title(" Beta Rewards Program — Discord Username ") + .title_style(Style::default().bold().fg(GHOST_WHITE)) + .style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .border_style(Style::default().fg(VIVID_SKY_BLUE)); + let beta_rewards = Table::new( + vec![ + Row::new(vec![ + // Empty row for padding + Cell::from(Span::raw(" ")), + Cell::from(Span::raw(" ")), + ]), + Row::new(vec![ + Cell::from( + Line::from(vec![Span::styled( + format!(" {} ", self.discord_username), + Style::default().fg(VIVID_SKY_BLUE), + )]) + .alignment(Alignment::Left), + ), + Cell::from( + Line::from(vec![ + Span::styled( + " Edit Discord Username ", + Style::default().fg(VERY_LIGHT_AZURE), + ), + Span::styled(" [Ctrl+B] ", Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Right), + ), + ]), + ], + &[Constraint::Percentage(50), Constraint::Percentage(50)], + ) + .block(block2) + .style(Style::default().fg(GHOST_WHITE)); + + // Access Logs + let block3 = Block::default() + .title(" Access Logs ") + .title_style(Style::default().bold().fg(GHOST_WHITE)) + .style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .border_style(Style::default().fg(VIVID_SKY_BLUE)); + let logs_folder = Table::new( + vec![ + Row::new(vec![ + // Empty row for padding + Cell::from(Span::raw(" ")), + Cell::from(Span::raw(" ")), + ]), + Row::new(vec![ + Cell::from( + Line::from(vec![Span::styled( + " Open the Logs folder on this device ", + Style::default().fg(LIGHT_PERIWINKLE), + )]) + .alignment(Alignment::Left), + ), + Cell::from( + Line::from(vec![ + Span::styled(" Access Logs ", Style::default().fg(VERY_LIGHT_AZURE)), + Span::styled(" [Ctrl+L] ", Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Right), + ), + ]), + ], + &[Constraint::Percentage(50), Constraint::Percentage(50)], + ) + .block(block3) + .style(Style::default().fg(GHOST_WHITE)); + + // Reset All Nodes + let block4 = Block::default() + .title(" Reset All Nodes ") + .title_style(Style::default().bold().fg(GHOST_WHITE)) + .style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .border_style(Style::default().fg(EUCALYPTUS)); + let reset_nodes = Table::new( + vec![ + Row::new(vec![ + Cell::from(Span::raw(" ")), // Empty row for padding + Cell::from(Span::raw(" ")), + ]), + Row::new(vec![ + Cell::from( + Line::from(vec![Span::styled( + " Remove and Reset all Nodes on this device ", + Style::default().fg(LIGHT_PERIWINKLE), + )]) + .alignment(Alignment::Left), + ), + Cell::from( + Line::from(vec![ + Span::styled(" Begin Reset ", Style::default().fg(EUCALYPTUS)), + Span::styled(" [Ctrl+R] ", Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Right), + ), + ]), + ], + &[Constraint::Percentage(50), Constraint::Percentage(50)], + ) + .block(block4) + .style(Style::default().fg(GHOST_WHITE)); + + // Render the tables in their respective sections + f.render_widget(storage_drivename, layout[1]); + f.render_widget(beta_rewards, layout[2]); + f.render_widget(logs_folder, layout[3]); + f.render_widget(reset_nodes, layout[4]); + + Ok(()) + } + + fn update(&mut self, action: Action) -> Result> { + match action { + Action::SwitchScene(scene) => match scene { + Scene::Options + | Scene::BetaProgrammePopUp + | Scene::ResetNodesPopUp + | Scene::ChangeDrivePopUp + | Scene::ChangeDriveConfirmPopup => { + self.active = true; + // make sure we're in navigation mode + return Ok(Some(Action::SwitchInputMode(InputMode::Navigation))); + } + _ => self.active = false, + }, + Action::MenuActions(MenuActions::OptionsTab) => { + return Ok(Some(Action::SwitchScene(Scene::Options))); + } + Action::OptionsActions(action) => match action { + OptionsActions::TriggerChangeDrive => { + return Ok(Some(Action::SwitchScene(Scene::ChangeDrivePopUp))); + } + OptionsActions::UpdateStorageDrive(mountpoint, drive) => { + self.storage_mountpoint = mountpoint; + self.storage_drive = drive; + } + OptionsActions::TriggerBetaProgramme => { + return Ok(Some(Action::SwitchScene(Scene::BetaProgrammePopUp))); + } + OptionsActions::UpdateBetaProgrammeUsername(username) => { + self.discord_username = username; + } + OptionsActions::TriggerAccessLogs => { + if let Err(e) = + system::open_folder(get_user_safenode_data_dir()?.to_str().unwrap()) + { + error!("Failed to open folder: {}", e); + } + } + OptionsActions::TriggerResetNodes => { + return Ok(Some(Action::SwitchScene(Scene::ResetNodesPopUp))) + } + _ => {} + }, + _ => {} + } + Ok(None) + } +} diff --git a/node-launchpad/src/components/home.rs b/node-launchpad/src/components/status.rs similarity index 58% rename from node-launchpad/src/components/home.rs rename to node-launchpad/src/components/status.rs index a122698971..4bea5ebd95 100644 --- a/node-launchpad/src/components/home.rs +++ b/node-launchpad/src/components/status.rs @@ -6,9 +6,16 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::{popup::manage_nodes::GB_PER_NODE, utils::centered_rect_fixed, Component, Frame}; +use super::footer::NodesToStart; +use super::header::SelectedMenuItem; +use super::{ + footer::Footer, header::Header, popup::manage_nodes::GB_PER_NODE, utils::centered_rect_fixed, + Component, Frame, +}; +use crate::action::OptionsActions; +use crate::config::get_launchpad_nodes_data_dir_path; use crate::{ - action::{Action, HomeActions}, + action::{Action, MenuActions, StatusActions}, config::Config, mode::{InputMode, Scene}, node_stats::NodeStats, @@ -18,8 +25,10 @@ use crate::{ }, }; use color_eyre::eyre::{OptionExt, Result}; +use crossterm::event::KeyEvent; +use ratatui::text::Span; use ratatui::{prelude::*, widgets::*}; -use sn_node_manager::{config::get_node_registry_path, VerbosityLevel}; +use sn_node_manager::config::get_node_registry_path; use sn_peers_acquisition::PeersArgs; use sn_service_management::{ control::ServiceController, NodeRegistry, NodeServiceData, ServiceStatus, @@ -31,17 +40,19 @@ use std::{ }; use tokio::sync::mpsc::UnboundedSender; +use super::super::node_mgmt::{maintain_n_running_nodes, reset_nodes, stop_nodes}; + const NODE_STAT_UPDATE_INTERVAL: Duration = Duration::from_secs(5); /// If nat detection fails for more than 3 times, we don't want to waste time running during every node start. const MAX_ERRORS_WHILE_RUNNING_NAT_DETECTION: usize = 3; -pub struct Home { +#[derive(Clone)] +pub struct Status { /// Whether the component is active right now, capturing keystrokes + drawing things. active: bool, action_sender: Option>, config: Config, // state - launchpad_version_str: String, node_services: Vec, is_nat_status_determined: bool, error_while_running_nat_detection: usize, @@ -57,28 +68,30 @@ pub struct Home { peers_args: PeersArgs, // If path is provided, we don't fetch the binary from the network safenode_path: Option, + // Path where the node data is stored + data_dir_path: PathBuf, } +#[derive(Clone)] pub enum LockRegistryState { StartingNodes, StoppingNodes, ResettingNodes, } -impl Home { +impl Status { pub async fn new( allocated_disk_space: usize, discord_username: &str, peers_args: PeersArgs, safenode_path: Option, + data_dir_path: PathBuf, ) -> Result { - let version_str = env!("CARGO_PKG_VERSION"); - let mut home = Self { + let mut status = Self { peers_args, action_sender: Default::default(), config: Default::default(), active: true, - launchpad_version_str: version_str.to_string(), node_services: Default::default(), is_nat_status_determined: false, error_while_running_nat_detection: 0, @@ -89,6 +102,7 @@ impl Home { lock_registry: None, discord_username: discord_username.to_string(), safenode_path, + data_dir_path, }; let now = Instant::now(); @@ -103,13 +117,13 @@ impl Home { .await?; node_registry.save()?; debug!("Node registry states refreshed in {:?}", now.elapsed()); - home.load_node_registry_and_update_states()?; + status.load_node_registry_and_update_states()?; - Ok(home) + Ok(status) } /// Tries to trigger the update of node stats if the last update was more than `NODE_STAT_UPDATE_INTERVAL` ago. - /// The result is sent via the HomeActions::NodesStatsObtained action. + /// The result is sent via the StatusActions::NodesStatsObtained action. fn try_update_node_stats(&mut self, force_update: bool) -> Result<()> { if self.node_stats_last_update.elapsed() > NODE_STAT_UPDATE_INTERVAL || force_update { self.node_stats_last_update = Instant::now(); @@ -208,7 +222,7 @@ impl Home { } } -impl Component for Home { +impl Component for Status { fn register_action_handler(&mut self, tx: UnboundedSender) -> Result<()> { self.action_sender = Some(tx); @@ -226,29 +240,33 @@ impl Component for Home { #[allow(clippy::comparison_chain)] fn update(&mut self, action: Action) -> Result> { match action { + Action::Tick => { + self.try_update_node_stats(false)?; + } Action::SwitchScene(scene) => match scene { - Scene::Home => { + Scene::Status => { self.active = true; // make sure we're in navigation mode return Ok(Some(Action::SwitchInputMode(InputMode::Navigation))); } - Scene::BetaProgramme - | Scene::ManageNodes - | Scene::HelpPopUp - | Scene::ResetPopUp => self.active = true, + Scene::ManageNodesPopUp => self.active = true, _ => self.active = false, }, + Action::MenuActions(MenuActions::StatusTab) => { + return Ok(Some(Action::SwitchScene(Scene::Status))); + } Action::StoreNodesToStart(count) => { self.nodes_to_start = count; if self.nodes_to_start == 0 { info!("Nodes to start set to 0. Sending command to stop all nodes."); - return Ok(Some(Action::HomeActions(HomeActions::StopNodes))); + return Ok(Some(Action::StatusActions(StatusActions::StopNodes))); } else { info!("Nodes to start set to: {count}. Sending command to start nodes"); - return Ok(Some(Action::HomeActions(HomeActions::StartNodes))); + return Ok(Some(Action::StatusActions(StatusActions::StartNodes))); } } Action::StoreDiscordUserName(username) => { + debug!("Storing discord username: {username:?}"); let has_changed = self.discord_username != username; let we_have_nodes = !self.node_services.is_empty(); @@ -261,44 +279,97 @@ impl Component for Home { reset_nodes(action_sender, true); } } - Action::HomeActions(HomeActions::StartNodes) => { - if self.lock_registry.is_some() { - error!("Registry is locked. Cannot start node now."); - return Ok(None); - } - - if self.nodes_to_start == 0 { - info!("Nodes to start not set. Ask for input."); - return Ok(Some(Action::HomeActions(HomeActions::TriggerManageNodes))); - } - - self.lock_registry = Some(LockRegistryState::StartingNodes); + Action::StoreStorageDrive(ref drive_mountpoint, ref _drive_name) => { let action_sender = self.get_actions_sender()?; - info!("Running maintain node count: {:?}", self.nodes_to_start); - - maintain_n_running_nodes( - self.nodes_to_start as u16, - self.discord_username.clone(), - self.peers_args.clone(), - self.should_we_run_nat_detection(), - self.safenode_path.clone(), - action_sender, - ); + reset_nodes(action_sender, false); + self.data_dir_path = + get_launchpad_nodes_data_dir_path(PathBuf::from(drive_mountpoint.clone()))?; } - Action::HomeActions(HomeActions::StopNodes) => { - if self.lock_registry.is_some() { - error!("Registry is locked. Cannot stop node now."); - return Ok(None); + Action::StatusActions(status_action) => { + match status_action { + StatusActions::NodesStatsObtained(stats) => { + self.node_stats = stats; + } + StatusActions::StartNodesCompleted | StatusActions::StopNodesCompleted => { + self.lock_registry = None; + self.load_node_registry_and_update_states()?; + } + StatusActions::ResetNodesCompleted { trigger_start_node } => { + self.lock_registry = None; + self.load_node_registry_and_update_states()?; + + if trigger_start_node { + debug!("Reset nodes completed. Triggering start nodes."); + return Ok(Some(Action::StatusActions(StatusActions::StartNodes))); + } + debug!("Reset nodes completed"); + } + StatusActions::SuccessfullyDetectedNatStatus => { + debug!("Successfully detected nat status, is_nat_status_determined set to true"); + self.is_nat_status_determined = true; + } + StatusActions::ErrorWhileRunningNatDetection => { + self.error_while_running_nat_detection += 1; + debug!( + "Error while running nat detection. Error count: {}", + self.error_while_running_nat_detection + ); + } + StatusActions::TriggerManageNodes => { + return Ok(Some(Action::SwitchScene(Scene::ManageNodesPopUp))); + } + StatusActions::PreviousTableItem => { + self.select_previous_table_item(); + } + StatusActions::NextTableItem => { + self.select_next_table_item(); + } + StatusActions::StartNodes => { + debug!("Got action to start nodes"); + if self.lock_registry.is_some() { + error!("Registry is locked. Cannot start node now."); + return Ok(None); + } + + if self.nodes_to_start == 0 { + info!("Nodes to start not set. Ask for input."); + return Ok(Some(Action::StatusActions( + StatusActions::TriggerManageNodes, + ))); + } + + self.lock_registry = Some(LockRegistryState::StartingNodes); + let action_sender = self.get_actions_sender()?; + info!("Running maintain node count: {:?}", self.nodes_to_start); + + maintain_n_running_nodes( + self.nodes_to_start as u16, + self.discord_username.clone(), + self.peers_args.clone(), + self.should_we_run_nat_detection(), + self.safenode_path.clone(), + Some(self.data_dir_path.clone()), + action_sender, + ); + } + StatusActions::StopNodes => { + debug!("Got action to stop nodes"); + if self.lock_registry.is_some() { + error!("Registry is locked. Cannot stop node now."); + return Ok(None); + } + + let running_nodes = self.get_running_nodes(); + self.lock_registry = Some(LockRegistryState::StoppingNodes); + let action_sender = self.get_actions_sender()?; + info!("Stopping node service: {running_nodes:?}"); + + stop_nodes(running_nodes, action_sender); + } } - - let running_nodes = self.get_running_nodes(); - self.lock_registry = Some(LockRegistryState::StoppingNodes); - let action_sender = self.get_actions_sender()?; - info!("Stopping node service: {running_nodes:?}"); - - stop_nodes(running_nodes, action_sender); } - Action::HomeActions(HomeActions::ResetNodes) => { + Action::OptionsActions(OptionsActions::ResetNodes) => { + debug!("Got action to reset nodes"); if self.lock_registry.is_some() { error!("Registry is locked. Cannot reset nodes now."); return Ok(None); @@ -309,58 +380,6 @@ impl Component for Home { info!("Got action to reset nodes"); reset_nodes(action_sender, false); } - - Action::Tick => { - self.try_update_node_stats(false)?; - } - Action::HomeActions(HomeActions::NodesStatsObtained(stats)) => { - self.node_stats = stats; - } - Action::HomeActions(HomeActions::StartNodesCompleted) - | Action::HomeActions(HomeActions::StopNodesCompleted) => { - self.lock_registry = None; - self.load_node_registry_and_update_states()?; - } - Action::HomeActions(HomeActions::ResetNodesCompleted { trigger_start_node }) => { - self.lock_registry = None; - self.load_node_registry_and_update_states()?; - - if trigger_start_node { - debug!("Reset nodes completed. Triggering start nodes."); - return Ok(Some(Action::HomeActions(HomeActions::StartNodes))); - } - debug!("Reset nodes completed"); - } - Action::HomeActions(HomeActions::SuccessfullyDetectedNatStatus) => { - debug!("Successfully detected nat status, is_nat_status_determined set to true"); - self.is_nat_status_determined = true; - } - Action::HomeActions(HomeActions::ErrorWhileRunningNatDetection) => { - self.error_while_running_nat_detection += 1; - debug!( - "Error while running nat detection. Error count: {}", - self.error_while_running_nat_detection - ); - } - // todo: should triggers go here? Make distinction between a component + a scene and how they interact. - Action::HomeActions(HomeActions::TriggerBetaProgramme) => { - return Ok(Some(Action::SwitchScene(Scene::BetaProgramme))); - } - Action::HomeActions(HomeActions::TriggerManageNodes) => { - return Ok(Some(Action::SwitchScene(Scene::ManageNodes))); - } - Action::HomeActions(HomeActions::TriggerHelp) => { - return Ok(Some(Action::SwitchScene(Scene::HelpPopUp))); - } - Action::HomeActions(HomeActions::TriggerResetNodesPopUp) => { - return Ok(Some(Action::SwitchScene(Scene::ResetPopUp))); - } - Action::HomeActions(HomeActions::PreviousTableItem) => { - self.select_previous_table_item(); - } - Action::HomeActions(HomeActions::NextTableItem) => { - self.select_next_table_item(); - } _ => {} } Ok(None) @@ -371,67 +390,55 @@ impl Component for Home { return Ok(()); } - let layer_zero = Layout::new( + let layout = Layout::new( Direction::Vertical, [ - // header - Constraint::Max(1), - // device status + // Header + Constraint::Length(1), + // Device status Constraint::Max(6), - // node status + // Node status Constraint::Min(3), - // footer - Constraint::Max(4), + // Footer + Constraint::Length(3), ], ) .split(area); - // ==== Header ==== + // ==== Header ===== - let layer_one_header = Layout::new( - Direction::Horizontal, - [Constraint::Percentage(100), Constraint::Percentage(0)], // We leave space for future tabs - ) - .split(layer_zero[0]); - - f.render_widget( - Paragraph::new(format!( - " Autonomi Node Launchpad (v{})", - self.launchpad_version_str - )) - .alignment(Alignment::Left) - .fg(LIGHT_PERIWINKLE), - layer_one_header[0], - ); + let header = Header::new(); + f.render_stateful_widget(header, layout[0], &mut SelectedMenuItem::Status); // ==== Device Status ===== - if self.node_services.is_empty() { + if self.discord_username.is_empty() { let line1 = Line::from(vec![Span::styled( - "No Nodes on this device", - Style::default().fg(GHOST_WHITE), + "Add this device to the Beta Rewards Program", + Style::default().fg(VERY_LIGHT_AZURE), )]); let line2 = Line::from(vec![ - Span::styled("Press ", Style::default().fg(GHOST_WHITE)), - Span::styled("Ctrl+G", Style::default().fg(EUCALYPTUS)), + Span::styled("Press ", Style::default().fg(VERY_LIGHT_AZURE)), + Span::styled("[Ctrl+B]", Style::default().fg(GHOST_WHITE)), + Span::styled(" to add your ", Style::default().fg(VERY_LIGHT_AZURE)), Span::styled( - " to Add Nodes and get started.", - Style::default().fg(GHOST_WHITE), + "Discord Username", + Style::default().fg(VERY_LIGHT_AZURE).bold(), ), ]); f.render_widget( - Paragraph::new(vec![line1, line2]).block( + Paragraph::new(vec![Line::raw(""), Line::raw(""), line1, line2]).block( Block::default() .title(" Device Status ") .title_style(Style::new().fg(GHOST_WHITE)) .borders(Borders::ALL) - .padding(Padding::uniform(1)) + .padding(Padding::horizontal(1)) .border_style(Style::new().fg(VERY_LIGHT_AZURE)), ), - layer_zero[1], + layout[1], ); } else { - // display stats as a table + // Device Status as a table let storage_allocated_row = Row::new(vec![ Cell::new("Storage Allocated".to_string()).fg(GHOST_WHITE), @@ -451,23 +458,34 @@ impl Component for Home { Cell::new(memory_use_val).fg(GHOST_WHITE), ]); - // Combine "Total Nanos Earned" and "Discord Username" into a single row - let mut username_color = GHOST_WHITE; + // Combine "Nanos Earned" and "Discord Username" into a single row + let discord_username_title = Span::styled( + "Discord Username: ".to_string(), + Style::default().fg(VIVID_SKY_BLUE), + ); + + let discord_username = if !self.discord_username.is_empty() { + Span::styled( + self.discord_username.clone(), + Style::default().fg(VIVID_SKY_BLUE), + ) + .bold() + } else { + Span::styled( + "[Ctrl+B] to set".to_string(), + Style::default().fg(GHOST_WHITE), + ) + }; + let total_nanos_earned_and_discord = Row::new(vec![ - Cell::new("Total Nanos Earned".to_string()).fg(VIVID_SKY_BLUE), + Cell::new("Nanos Earned".to_string()).fg(VIVID_SKY_BLUE), Cell::new(self.node_stats.forwarded_rewards.to_string()) .fg(VIVID_SKY_BLUE) .bold(), - Cell::new("".to_string()), - Cell::new("Discord Username:".to_string()).fg(VIVID_SKY_BLUE), - Cell::new(if self.discord_username.is_empty() { - "[Ctrl+B] to set".to_string() - } else { - username_color = VIVID_SKY_BLUE; - self.discord_username.clone() - }) - .fg(username_color) - .bold(), + Cell::new( + Line::from(vec![discord_username_title, discord_username]) + .alignment(Alignment::Right), + ), ]); let stats_rows = vec![ @@ -479,9 +497,7 @@ impl Component for Home { let column_constraints = [ Constraint::Percentage(25), Constraint::Percentage(5), - Constraint::Percentage(35), // empty cell to avoid alignment left <> right - Constraint::Percentage(20), - Constraint::Percentage(15), + Constraint::Percentage(70), ]; let stats_table = Table::new(stats_rows, stats_width) .block( @@ -493,7 +509,7 @@ impl Component for Home { .style(Style::default().fg(VERY_LIGHT_AZURE)), ) .widths(column_constraints); - f.render_widget(stats_table, layer_zero[1]); + f.render_widget(stats_table, layout[1]); }; // ==== Node Status ===== @@ -521,18 +537,34 @@ impl Component for Home { .collect(); if node_rows.is_empty() { + let line1 = Line::from(vec![ + Span::styled("Press ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE)), + Span::styled("to Add and ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("Start Nodes ", Style::default().fg(GHOST_WHITE)), + Span::styled("on this device", Style::default().fg(LIGHT_PERIWINKLE)), + ]); + + let line2 = Line::from(vec![Span::styled( + "Each node will use 5GB of storage and a small amount of memory, \ + CPU, and Network bandwidth. Most computers can run many nodes at once, \ + but we recommend you add them gradually", + Style::default().fg(LIGHT_PERIWINKLE), + )]); + f.render_widget( - Paragraph::new("Nodes will appear here when added") + Paragraph::new(vec![Line::raw(""), line1, Line::raw(""), line2]) + .wrap(Wrap { trim: false }) .fg(LIGHT_PERIWINKLE) .block( Block::default() - .title(format!(" Nodes ({}) ", self.nodes_to_start)) + .title(" Nodes (0) ".to_string()) .title_style(Style::default().fg(LIGHT_PERIWINKLE)) .borders(Borders::ALL) .border_style(style::Style::default().fg(COOL_GREY)) - .padding(Padding::uniform(1)), + .padding(Padding::horizontal(1)), ), - layer_zero[2], + layout[2], ); } else { let node_widths = [ @@ -541,7 +573,7 @@ impl Component for Home { Constraint::Max(10), Constraint::Max(10), ]; - let table = Table::new(node_rows, node_widths) + let table = Table::new(node_rows.clone(), node_widths) .column_spacing(2) .highlight_style(Style::new().reversed()) .block( @@ -553,31 +585,50 @@ impl Component for Home { .border_style(Style::default().fg(EUCALYPTUS)), ) .highlight_symbol("*"); - f.render_stateful_widget(table, layer_zero[2], &mut self.node_table_state); + f.render_stateful_widget(table, layout[2], &mut self.node_table_state); } + // ==== Footer ===== + + let footer = Footer::default(); + let footer_state = if !node_rows.is_empty() { + &mut NodesToStart::Configured + } else { + &mut NodesToStart::NotConfigured + }; + f.render_stateful_widget(footer, layout[3], footer_state); + // ===== Popup ===== if let Some(registry_state) = &self.lock_registry { let popup_area = centered_rect_fixed(50, 12, area); clear_area(f, popup_area); - let popup_border = Paragraph::new("Manage Nodes").block( + let popup_border = Paragraph::new("").block( Block::default() .borders(Borders::ALL) + .title(" Manage Nodes ") + .title_style(Style::new().fg(VIVID_SKY_BLUE)) + .padding(Padding::uniform(2)) .border_style(Style::new().fg(GHOST_WHITE)), ); let popup_text = match registry_state { LockRegistryState::StartingNodes => { if self.should_we_run_nat_detection() { - "Starting nodes...\nPlease wait, performing initial NAT detection\nThis may take a couple minutes." + vec![ + Line::raw("Starting nodes..."), + Line::raw(""), + Line::raw(""), + Line::raw("Please wait, performing initial NAT detection"), + Line::raw("This may take a couple minutes."), + ] } else { - "Starting nodes..." + vec![Line::raw("Starting nodes...")] } } - LockRegistryState::StoppingNodes => "Stopping nodes...", - LockRegistryState::ResettingNodes => "Resetting nodes...", + LockRegistryState::StoppingNodes => vec![Line::raw("Stopping nodes...")], + LockRegistryState::ResettingNodes => vec![Line::raw("Resetting nodes...")], }; let centred_area = Layout::new( Direction::Vertical, @@ -594,6 +645,8 @@ impl Component for Home { ) .split(popup_area)[2]; let text = Paragraph::new(popup_text) + .block(Block::default().padding(Padding::horizontal(2))) + .wrap(Wrap { trim: false }) .alignment(Alignment::Center) .fg(EUCALYPTUS); f.render_widget(text, centred_area); @@ -603,111 +656,9 @@ impl Component for Home { Ok(()) } -} -fn stop_nodes(services: Vec, action_sender: UnboundedSender) { - tokio::task::spawn_local(async move { - if let Err(err) = - sn_node_manager::cmd::node::stop(vec![], services, VerbosityLevel::Minimal).await - { - error!("Error while stopping services {err:?}"); - } else { - info!("Successfully stopped services"); - } - if let Err(err) = action_sender.send(Action::HomeActions(HomeActions::StopNodesCompleted)) { - error!("Error while sending action: {err:?}"); - } - }); -} - -async fn run_nat_detection_process() -> Result<()> { - sn_node_manager::cmd::nat_detection::run_nat_detection( - None, - true, - None, - None, - Some("0.1.0".to_string()), - VerbosityLevel::Minimal, - ) - .await?; - Ok(()) -} - -fn maintain_n_running_nodes( - count: u16, - owner: String, - peers_args: PeersArgs, - run_nat_detection: bool, - safenode_path: Option, - action_sender: UnboundedSender, -) { - tokio::task::spawn_local(async move { - if run_nat_detection { - if let Err(err) = run_nat_detection_process().await { - error!("Error while running nat detection {err:?}. Registering the error."); - if let Err(err) = action_sender.send(Action::HomeActions( - HomeActions::ErrorWhileRunningNatDetection, - )) { - error!("Error while sending action: {err:?}"); - } - } else { - info!("Successfully ran nat detection."); - } - } - - let owner = if owner.is_empty() { None } else { Some(owner) }; - if let Err(err) = sn_node_manager::cmd::node::maintain_n_running_nodes( - false, - true, - 120, - count, - None, - true, - None, - false, - false, - None, - None, - None, - None, - owner, - peers_args, - None, - None, - safenode_path, - None, - false, - None, - None, - VerbosityLevel::Minimal, - None, - ) - .await - { - error!("Error while maintaining {count:?} running nodes {err:?}"); - } else { - info!("Maintained {count} running nodes successfully."); - } - if let Err(err) = action_sender.send(Action::HomeActions(HomeActions::StartNodesCompleted)) - { - error!("Error while sending action: {err:?}"); - } - }); -} - -fn reset_nodes(action_sender: UnboundedSender, start_nodes_after_reset: bool) { - tokio::task::spawn_local(async move { - if let Err(err) = sn_node_manager::cmd::node::reset(true, VerbosityLevel::Minimal).await { - error!("Error while resetting services {err:?}"); - } else { - info!("Successfully reset services"); - } - if let Err(err) = - action_sender.send(Action::HomeActions(HomeActions::ResetNodesCompleted { - trigger_start_node: start_nodes_after_reset, - })) - { - error!("Error while sending action: {err:?}"); - } - }); + fn handle_key_events(&mut self, key: KeyEvent) -> Result> { + debug!("Key received in Home: {:?}", key); + Ok(vec![]) + } } From 1c1af7814f9384cfa756d7cd849860daadf5612b Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 14:24:09 +0200 Subject: [PATCH 081/115] feat(launchpad): new mods, config and application overall --- node-launchpad/src/app.rs | 70 +++++++++++++----- node-launchpad/src/config.rs | 39 +++++++++- node-launchpad/src/lib.rs | 2 + node-launchpad/src/node_mgmt.rs | 121 +++++++++++++++++++++++++++++++ node-launchpad/src/node_stats.rs | 8 +- node-launchpad/src/system.rs | 113 +++++++++++++++++++++++++++++ node-launchpad/src/utils.rs | 10 ++- 7 files changed, 338 insertions(+), 25 deletions(-) create mode 100644 node-launchpad/src/node_mgmt.rs create mode 100644 node-launchpad/src/system.rs diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index 130cbc664c..5eb7c7a3aa 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -10,17 +10,23 @@ use std::path::PathBuf; use crate::{ action::Action, - components::popup::{ - beta_programme::BetaProgramme, help::HelpPopUp, manage_nodes::ManageNodes, - reset::ResetNodesPopup, + components::{ + help::Help, + options::Options, + popup::{ + beta_programme::BetaProgramme, change_drive::ChangeDrivePopup, + change_drive_confirm::ChangeDriveConfirmPopup, manage_nodes::ManageNodes, + reset_nodes::ResetNodesPopup, + }, + status::Status, + Component, }, - components::{footer::Footer, home::Home, Component}, - config::{AppData, Config}, + config::{get_launchpad_nodes_data_dir_path, AppData, Config}, mode::{InputMode, Scene}, style::SPACE_CADET, tui, }; -use color_eyre::eyre::Result; +use color_eyre::eyre::{eyre, Result}; use crossterm::event::KeyEvent; use ratatui::{prelude::Rect, style::Style, widgets::Block}; use sn_peers_acquisition::PeersArgs; @@ -46,21 +52,39 @@ impl App { peers_args: PeersArgs, safenode_path: Option, ) -> Result { + // Configurations let app_data = AppData::load()?; + let config = Config::new()?; + + let data_dir_path = match &app_data.storage_mountpoint { + Some(path) => get_launchpad_nodes_data_dir_path(PathBuf::from(path))?, + None => return Err(eyre!("Storage mountpoint for node data is not set")), + }; + debug!("Data dir path for nodes: {data_dir_path:?}"); - let home = Home::new( + // Main Screens + let status = Status::new( app_data.nodes_to_start, &app_data.discord_username, peers_args, safenode_path, + data_dir_path, ) .await?; - let config = Config::new()?; - let discord_username_input = BetaProgramme::new(app_data.discord_username.clone()); - let manage_nodes = ManageNodes::new(app_data.nodes_to_start)?; - let footer = Footer::new(app_data.nodes_to_start > 0); - let help = HelpPopUp::default(); + let options = Options::new( + app_data.storage_mountpoint.clone().unwrap(), + app_data.storage_drive.clone().unwrap(), + app_data.discord_username.clone(), + ) + .await?; + let help = Help::new().await?; + + // Popups let reset_nodes = ResetNodesPopup::default(); + let discord_username_input = BetaProgramme::new(app_data.discord_username.clone()); + let manage_nodes = ManageNodes::new(app_data.nodes_to_start).await?; + let change_drive = ChangeDrivePopup::new(app_data.storage_mountpoint.clone().unwrap()); + let change_drive_confirm = ChangeDriveConfirmPopup::default(); Ok(Self { config, @@ -68,17 +92,21 @@ impl App { tick_rate, frame_rate, components: vec![ - Box::new(footer), - Box::new(home), - Box::new(discord_username_input), - Box::new(manage_nodes), + // Sections + Box::new(status), + Box::new(options), Box::new(help), + // Popups + Box::new(change_drive), + Box::new(change_drive_confirm), Box::new(reset_nodes), + Box::new(manage_nodes), + Box::new(discord_username_input), ], should_quit: false, should_suspend: false, input_mode: InputMode::Navigation, - scene: Scene::Home, + scene: Scene::Status, last_tick_key_events: Vec::new(), }) } @@ -184,6 +212,7 @@ impl App { info!("Input mode switched to: {mode:?}"); self.input_mode = mode; } + // Storing Application Data Action::StoreDiscordUserName(ref username) => { debug!("Storing discord username: {username:?}"); self.app_data.discord_username.clone_from(username); @@ -194,6 +223,13 @@ impl App { self.app_data.nodes_to_start = count; self.app_data.save()?; } + Action::StoreStorageDrive(ref drive_mountpoint, ref drive_name) => { + debug!("Storing storage drive: {drive_mountpoint:?}, {drive_name:?}"); + self.app_data.storage_mountpoint = + Some(drive_mountpoint.as_str().to_string()); + self.app_data.storage_drive = Some(drive_name.as_str().to_string()); + self.app_data.save()?; + } _ => {} } for component in self.components.iter_mut() { diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index 825128466c..0cbede766b 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::system; use crate::{action::Action, mode::Scene}; use color_eyre::eyre::Result; use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; @@ -17,6 +18,26 @@ use std::path::PathBuf; const CONFIG: &str = include_str!("../.config/config.json5"); +// Where to store the Nodes data +pub fn get_launchpad_nodes_data_dir_path(base_dir: PathBuf) -> Result { + // We get the data dir and we remove the first character which is the / character + let mut data_directory = dirs_next::data_dir() + .expect("Data directory is obtainable") + .to_str() + .unwrap() + .to_string(); + data_directory.remove(0); + + let mut mount_point = base_dir.clone(); + mount_point.push(data_directory); + mount_point.push("safe"); + mount_point.push("node"); + debug!("Creating nodes data dir: {:?}", mount_point.as_path()); + std::fs::create_dir_all(mount_point.as_path())?; + Ok(mount_point) +} + +// Where to store the Launchpad config & logs pub fn get_launchpad_data_dir_path() -> Result { let mut home_dirs = dirs_next::data_dir().expect("Data directory is obtainable"); home_dirs.push("safe"); @@ -52,6 +73,8 @@ pub async fn configure_winsw() -> Result<()> { pub struct AppData { pub discord_username: String, pub nodes_to_start: usize, + pub storage_mountpoint: Option, + pub storage_drive: Option, } impl AppData { @@ -66,10 +89,22 @@ impl AppData { let data = std::fs::read_to_string(config_path) .map_err(|_| color_eyre::eyre::eyre!("Failed to read app data file"))?; + let app_data: AppData = serde_json::from_str(&data) .map_err(|_| color_eyre::eyre::eyre!("Failed to parse app data"))?; - Ok(app_data) + let mut app_data_cloned = app_data.clone(); + if app_data.storage_mountpoint.is_none() || app_data.storage_drive.is_none() { + // If the storage drive is not set, set it to the default mount point + let drive_info = system::get_default_mount_point(); + app_data_cloned.storage_drive = Some(drive_info.0); + app_data_cloned.storage_mountpoint = Some(drive_info.1); + debug!( + "Setting storage drive to {:?}", + app_data_cloned.storage_mountpoint + ); + } + Ok(app_data_cloned) } pub fn save(&self) -> Result<()> { @@ -531,7 +566,7 @@ mod tests { let c = Config::new()?; assert_eq!( c.keybindings - .get(&Scene::Home) + .get(&Scene::Status) .unwrap() .get(&parse_key_sequence("").unwrap_or_default()) .unwrap(), diff --git a/node-launchpad/src/lib.rs b/node-launchpad/src/lib.rs index 8e9ba7b8d4..dc0ecc2e73 100644 --- a/node-launchpad/src/lib.rs +++ b/node-launchpad/src/lib.rs @@ -11,8 +11,10 @@ pub mod app; pub mod components; pub mod config; pub mod mode; +pub mod node_mgmt; pub mod node_stats; pub mod style; +pub mod system; pub mod tui; pub mod utils; pub mod widgets; diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs new file mode 100644 index 0000000000..cf9910beb0 --- /dev/null +++ b/node-launchpad/src/node_mgmt.rs @@ -0,0 +1,121 @@ +use std::path::PathBuf; + +use sn_node_manager::VerbosityLevel; +use sn_peers_acquisition::PeersArgs; +use tokio::sync::mpsc::UnboundedSender; + +use crate::action::{Action, StatusActions}; +use color_eyre::eyre::Result; + +const NODE_START_INTERVAL: usize = 10; + +pub fn stop_nodes(services: Vec, action_sender: UnboundedSender) { + tokio::task::spawn_local(async move { + if let Err(err) = + sn_node_manager::cmd::node::stop(vec![], services, VerbosityLevel::Minimal).await + { + error!("Error while stopping services {err:?}"); + } else { + info!("Successfully stopped services"); + } + if let Err(err) = + action_sender.send(Action::StatusActions(StatusActions::StopNodesCompleted)) + { + error!("Error while sending action: {err:?}"); + } + }); +} + +pub fn maintain_n_running_nodes( + count: u16, + owner: String, + peers_args: PeersArgs, + run_nat_detection: bool, + safenode_path: Option, + data_dir_path: Option, + action_sender: UnboundedSender, +) { + tokio::task::spawn_local(async move { + if run_nat_detection { + if let Err(err) = run_nat_detection_process().await { + error!("Error while running nat detection {err:?}. Registering the error."); + if let Err(err) = action_sender.send(Action::StatusActions( + StatusActions::ErrorWhileRunningNatDetection, + )) { + error!("Error while sending action: {err:?}"); + } + } else { + info!("Successfully ran nat detection."); + } + } + + let owner = if owner.is_empty() { None } else { Some(owner) }; + debug!("Maintaining {count} running nodes in data_dir_path: {data_dir_path:?}"); + if let Err(err) = sn_node_manager::cmd::node::maintain_n_running_nodes( + false, + true, + count, + data_dir_path, + true, + None, + false, + false, + None, + None, + None, + None, + owner, + peers_args, + None, + None, + safenode_path, + None, + false, + None, + None, + VerbosityLevel::Minimal, + NODE_START_INTERVAL as u64, + ) + .await + { + error!("Error while maintaining {count:?} running nodes {err:?}"); + } else { + info!("Maintained {count} running nodes successfully."); + } + if let Err(err) = + action_sender.send(Action::StatusActions(StatusActions::StartNodesCompleted)) + { + error!("Error while sending action: {err:?}"); + } + }); +} + +pub fn reset_nodes(action_sender: UnboundedSender, start_nodes_after_reset: bool) { + tokio::task::spawn_local(async move { + if let Err(err) = sn_node_manager::cmd::node::reset(true, VerbosityLevel::Minimal).await { + error!("Error while resetting services {err:?}"); + } else { + info!("Successfully reset services"); + } + if let Err(err) = + action_sender.send(Action::StatusActions(StatusActions::ResetNodesCompleted { + trigger_start_node: start_nodes_after_reset, + })) + { + error!("Error while sending action: {err:?}"); + } + }); +} + +async fn run_nat_detection_process() -> Result<()> { + sn_node_manager::cmd::nat_detection::run_nat_detection( + None, + true, + None, + None, + Some("0.1.0".to_string()), + VerbosityLevel::Minimal, + ) + .await?; + Ok(()) +} diff --git a/node-launchpad/src/node_stats.rs b/node-launchpad/src/node_stats.rs index fec3eb9bf3..c43d868eef 100644 --- a/node-launchpad/src/node_stats.rs +++ b/node-launchpad/src/node_stats.rs @@ -13,7 +13,7 @@ use sn_service_management::{NodeServiceData, ServiceStatus}; use std::{path::PathBuf, time::Instant}; use tokio::sync::mpsc::UnboundedSender; -use crate::action::{Action, HomeActions}; +use crate::action::{Action, StatusActions}; #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct NodeStats { @@ -86,9 +86,9 @@ impl NodeStats { } } - if let Err(err) = action_sender.send(Action::HomeActions(HomeActions::NodesStatsObtained( - all_node_stats, - ))) { + if let Err(err) = action_sender.send(Action::StatusActions( + StatusActions::NodesStatsObtained(all_node_stats), + )) { error!("Error while sending action: {err:?}"); } } diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs new file mode 100644 index 0000000000..199367687b --- /dev/null +++ b/node-launchpad/src/system.rs @@ -0,0 +1,113 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::env; +use std::fs::OpenOptions; +use std::io::Write; +use std::path::Path; +use std::process::Command; +use sysinfo::Disks; + +// Tries to get the default (drive name, mount point) of the current executable +// to be used as the default drive +pub fn get_default_mount_point() -> (String, String) { + // Create a new System instance + let disks = Disks::new_with_refreshed_list(); + + // Get the current executable path + let exe_path = env::current_exe().expect("Failed to get current executable path"); + + // Iterate over the disks and find the one that matches the executable path + for disk in disks.list() { + if exe_path.starts_with(disk.mount_point()) { + return ( + disk.name().to_string_lossy().into(), + disk.mount_point().to_string_lossy().into_owned(), + ); + } + } + // If no matching disk is found, return an empty string or handle the error as needed + (String::new(), String::new()) +} + +// Checks if the given path (a drive) is read-only +fn is_read_only>(path: P) -> bool { + let test_file_path = path.as_ref().join("lauchpad_test_write_permission.tmp"); + + // Try to create and write to a temporary file + let result = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&test_file_path) + .and_then(|mut file| file.write_all(b"test")); + + match result { + Ok(_) => { + // Clean up the test file if write was successful + let _ = std::fs::remove_file(test_file_path); + false + } + Err(err) => { + // Check if the error is due to a read-only file system + err.kind() == std::io::ErrorKind::PermissionDenied + } + } +} + +// Gets a list of drives and their available space +pub fn get_list_of_drives_and_available_space() -> Vec<(String, String, u64)> { + // Create a new System instance + let disks = Disks::new_with_refreshed_list(); + + // Get the list of disks + let mut drives: Vec<(String, String, u64)> = Vec::new(); + for disk in disks.list() { + // Check if the disk is already in the list + let disk_info = ( + disk.name() + .to_string_lossy() + .into_owned() + .trim() + .to_string(), + disk.mount_point() + .to_string_lossy() + .into_owned() + .trim() + .to_string(), + disk.available_space(), + ); + // We don't check for write permission on removable drives + if !disk.is_removable() { + // Check if the disk is read-only and skip it + if is_read_only(disk.mount_point()) { + continue; + } + } + if !drives.contains(&disk_info) { + drives.push(disk_info); + } + } + debug!("Drives detected: {:?}", drives); + drives +} + +// Opens a folder in the file explorer +pub fn open_folder(path: &str) -> std::io::Result<()> { + if Path::new(path).exists() { + #[cfg(target_os = "macos")] + Command::new("open").arg(path).spawn()?.wait()?; + #[cfg(target_os = "windows")] + Command::new("explorer").arg(path).spawn()?.wait()?; + #[cfg(target_os = "linux")] + Command::new("xdg-open").arg(path).spawn()?.wait()?; + } else { + error!("Path does not exist: {}", path); + } + Ok(()) +} diff --git a/node-launchpad/src/utils.rs b/node-launchpad/src/utils.rs index 3357fb41b7..ffb997246c 100644 --- a/node-launchpad/src/utils.rs +++ b/node-launchpad/src/utils.rs @@ -75,10 +75,16 @@ pub fn initialize_panic_handler() -> Result<()> { Ok(()) } -// todo: use sn_logging +// Gets the current logging path +pub fn get_logging_path() -> Result { + let log_path = get_launchpad_data_dir_path()?.join("logs"); + Ok(log_path) +} + +// TODO: use sn_logging pub fn initialize_logging() -> Result<()> { let timestamp = chrono::Local::now().format("%Y-%m-%d_%H-%M-%S").to_string(); - let log_path = get_launchpad_data_dir_path()?.join("logs"); + let log_path = get_logging_path()?; std::fs::create_dir_all(&log_path)?; let log_file = std::fs::File::create(log_path.join(format!("launchpad_{timestamp}.log"))) .context(format!("Failed to create file {log_path:?}"))?; From 058ff82b2b4dc9496fd9d635f6e494bbb412bf4a Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 12 Aug 2024 15:15:31 +0200 Subject: [PATCH 082/115] feat(launchpad): resolve merge conflict --- node-launchpad/src/node_mgmt.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index cf9910beb0..1bfe76588b 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -7,8 +7,6 @@ use tokio::sync::mpsc::UnboundedSender; use crate::action::{Action, StatusActions}; use color_eyre::eyre::Result; -const NODE_START_INTERVAL: usize = 10; - pub fn stop_nodes(services: Vec, action_sender: UnboundedSender) { tokio::task::spawn_local(async move { if let Err(err) = @@ -50,10 +48,10 @@ pub fn maintain_n_running_nodes( } let owner = if owner.is_empty() { None } else { Some(owner) }; - debug!("Maintaining {count} running nodes in data_dir_path: {data_dir_path:?}"); if let Err(err) = sn_node_manager::cmd::node::maintain_n_running_nodes( false, true, + 120, count, data_dir_path, true, @@ -74,7 +72,7 @@ pub fn maintain_n_running_nodes( None, None, VerbosityLevel::Minimal, - NODE_START_INTERVAL as u64, + None, ) .await { From f562b9a70120f5f7301227173f427a11d528fa60 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 13 Aug 2024 08:53:18 +0200 Subject: [PATCH 083/115] chore(launchpad): adressing pull request comments --- node-launchpad/.config/config.json5 | 36 +++++++++---------- node-launchpad/src/action.rs | 8 ----- node-launchpad/src/app.rs | 2 +- node-launchpad/src/components/help.rs | 5 +-- node-launchpad/src/components/options.rs | 5 +-- .../src/components/popup/manage_nodes.rs | 2 +- node-launchpad/src/components/status.rs | 5 +-- node-launchpad/src/config.rs | 14 +++----- 8 files changed, 28 insertions(+), 49 deletions(-) diff --git a/node-launchpad/.config/config.json5 b/node-launchpad/.config/config.json5 index 79912612a5..8bbd2f356e 100644 --- a/node-launchpad/.config/config.json5 +++ b/node-launchpad/.config/config.json5 @@ -1,12 +1,12 @@ { "keybindings": { "Status" : { - "": {"MenuActions":"StatusTab"}, - "": {"MenuActions":"StatusTab"}, - "": {"MenuActions":"OptionsTab"}, - "": {"MenuActions":"OptionsTab"}, - "": {"MenuActions":"HelpTab"}, - "": {"MenuActions":"HelpTab"}, + "": {"SwitchScene":"Status"}, + "": {"SwitchScene":"Status"}, + "": {"SwitchScene":"Options"}, + "": {"SwitchScene":"Options"}, + "": {"SwitchScene":"Help"}, + "": {"SwitchScene":"Help"}, "": {"StatusActions":"StartNodes"}, "": {"StatusActions":"StartNodes"}, @@ -29,12 +29,12 @@ "": "Suspend" // Suspend the application }, "Options": { - "": {"MenuActions":"StatusTab"}, - "": {"MenuActions":"StatusTab"}, - "": {"MenuActions":"OptionsTab"}, - "": {"MenuActions":"OptionsTab"}, - "": {"MenuActions":"HelpTab"}, - "": {"MenuActions":"HelpTab"}, + "": {"SwitchScene":"Status"}, + "": {"SwitchScene":"Status"}, + "": {"SwitchScene":"Options"}, + "": {"SwitchScene":"Options"}, + "": {"SwitchScene":"Help"}, + "": {"SwitchScene":"Help"}, "": {"OptionsActions":"TriggerChangeDrive"}, "": {"OptionsActions":"TriggerChangeDrive"}, @@ -57,12 +57,12 @@ "": "Suspend" // Suspend the application }, "Help": { - "": {"MenuActions":"StatusTab"}, - "": {"MenuActions":"StatusTab"}, - "": {"MenuActions":"OptionsTab"}, - "": {"MenuActions":"OptionsTab"}, - "": {"MenuActions":"HelpTab"}, - "": {"MenuActions":"HelpTab"}, + "": {"SwitchScene":"Status"}, + "": {"SwitchScene":"Status"}, + "": {"SwitchScene":"Options"}, + "": {"SwitchScene":"Options"}, + "": {"SwitchScene":"Help"}, + "": {"SwitchScene":"Help"}, "": "Quit", diff --git a/node-launchpad/src/action.rs b/node-launchpad/src/action.rs index d6cbe92f4f..0af70338e5 100644 --- a/node-launchpad/src/action.rs +++ b/node-launchpad/src/action.rs @@ -15,7 +15,6 @@ use strum::Display; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Display, Deserialize)] pub enum Action { - MenuActions(MenuActions), StatusActions(StatusActions), OptionsActions(OptionsActions), @@ -68,10 +67,3 @@ pub enum OptionsActions { UpdateBetaProgrammeUsername(String), UpdateStorageDrive(String, String), } - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Display, Deserialize)] -pub enum MenuActions { - StatusTab, - OptionsTab, - HelpTab, -} diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index 5eb7c7a3aa..dc05821cfa 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -82,7 +82,7 @@ impl App { // Popups let reset_nodes = ResetNodesPopup::default(); let discord_username_input = BetaProgramme::new(app_data.discord_username.clone()); - let manage_nodes = ManageNodes::new(app_data.nodes_to_start).await?; + let manage_nodes = ManageNodes::new(app_data.nodes_to_start).unwrap(); let change_drive = ChangeDrivePopup::new(app_data.storage_mountpoint.clone().unwrap()); let change_drive_confirm = ChangeDriveConfirmPopup::default(); diff --git a/node-launchpad/src/components/help.rs b/node-launchpad/src/components/help.rs index 5f64da0748..d761740245 100644 --- a/node-launchpad/src/components/help.rs +++ b/node-launchpad/src/components/help.rs @@ -11,7 +11,7 @@ use tokio::sync::mpsc::UnboundedSender; use super::Component; use crate::{ - action::{Action, MenuActions}, + action::Action, components::header::Header, mode::{InputMode, Scene}, style::{EUCALYPTUS, GHOST_WHITE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE}, @@ -251,9 +251,6 @@ impl Component for Help { } _ => self.active = false, }, - Action::MenuActions(MenuActions::HelpTab) => { - return Ok(Some(Action::SwitchScene(Scene::Help))); - } _ => {} } Ok(None) diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 7d2280ddc5..82026a1ba5 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -10,7 +10,7 @@ use tokio::sync::mpsc::UnboundedSender; use super::{header::SelectedMenuItem, Component}; use crate::{ - action::{Action, MenuActions, OptionsActions}, + action::{Action, OptionsActions}, components::header::Header, mode::{InputMode, Scene}, style::{EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE}, @@ -239,9 +239,6 @@ impl Component for Options { } _ => self.active = false, }, - Action::MenuActions(MenuActions::OptionsTab) => { - return Ok(Some(Action::SwitchScene(Scene::Options))); - } Action::OptionsActions(action) => match action { OptionsActions::TriggerChangeDrive => { return Ok(Some(Action::SwitchScene(Scene::ChangeDrivePopUp))); diff --git a/node-launchpad/src/components/popup/manage_nodes.rs b/node-launchpad/src/components/popup/manage_nodes.rs index 5b9abb59e5..90f4ba34d9 100644 --- a/node-launchpad/src/components/popup/manage_nodes.rs +++ b/node-launchpad/src/components/popup/manage_nodes.rs @@ -36,7 +36,7 @@ pub struct ManageNodes { } impl ManageNodes { - pub async fn new(nodes_to_start: usize) -> Result { + pub fn new(nodes_to_start: usize) -> Result { let nodes_to_start = std::cmp::min(nodes_to_start, MAX_NODE_COUNT); let new = Self { active: false, diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 4bea5ebd95..68b40cc625 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -15,7 +15,7 @@ use super::{ use crate::action::OptionsActions; use crate::config::get_launchpad_nodes_data_dir_path; use crate::{ - action::{Action, MenuActions, StatusActions}, + action::{Action, StatusActions}, config::Config, mode::{InputMode, Scene}, node_stats::NodeStats, @@ -252,9 +252,6 @@ impl Component for Status { Scene::ManageNodesPopUp => self.active = true, _ => self.active = false, }, - Action::MenuActions(MenuActions::StatusTab) => { - return Ok(Some(Action::SwitchScene(Scene::Status))); - } Action::StoreNodesToStart(count) => { self.nodes_to_start = count; if self.nodes_to_start == 0 { diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index 0cbede766b..3b67333204 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -90,21 +90,17 @@ impl AppData { let data = std::fs::read_to_string(config_path) .map_err(|_| color_eyre::eyre::eyre!("Failed to read app data file"))?; - let app_data: AppData = serde_json::from_str(&data) + let mut app_data: AppData = serde_json::from_str(&data) .map_err(|_| color_eyre::eyre::eyre!("Failed to parse app data"))?; - let mut app_data_cloned = app_data.clone(); if app_data.storage_mountpoint.is_none() || app_data.storage_drive.is_none() { // If the storage drive is not set, set it to the default mount point let drive_info = system::get_default_mount_point(); - app_data_cloned.storage_drive = Some(drive_info.0); - app_data_cloned.storage_mountpoint = Some(drive_info.1); - debug!( - "Setting storage drive to {:?}", - app_data_cloned.storage_mountpoint - ); + app_data.storage_drive = Some(drive_info.0); + app_data.storage_mountpoint = Some(drive_info.1); + debug!("Setting storage drive to {:?}", app_data.storage_mountpoint); } - Ok(app_data_cloned) + Ok(app_data) } pub fn save(&self) -> Result<()> { From 664a2bc04e4a0b2eeee86954e9d8e4b4c3fd81f7 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 13 Aug 2024 10:23:40 +0200 Subject: [PATCH 084/115] chore(launchpad): clippy warning fixed --- node-launchpad/src/components/help.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/node-launchpad/src/components/help.rs b/node-launchpad/src/components/help.rs index d761740245..5e9b286b42 100644 --- a/node-launchpad/src/components/help.rs +++ b/node-launchpad/src/components/help.rs @@ -242,16 +242,14 @@ impl Component for Help { } fn update(&mut self, action: Action) -> Result> { - match action { - Action::SwitchScene(scene) => match scene { - Scene::Help => { - self.active = true; - // make sure we're in navigation mode - return Ok(Some(Action::SwitchInputMode(InputMode::Navigation))); - } - _ => self.active = false, - }, - _ => {} + if let Action::SwitchScene(scene) = action { + if let Scene::Help = scene { + self.active = true; + // make sure we're in navigation mode + return Ok(Some(Action::SwitchInputMode(InputMode::Navigation))); + } else { + self.active = false; + } } Ok(None) } From cb2b62f5db548e9f161d3290d9ab3601e10f1d0d Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 13 Aug 2024 12:31:57 +0200 Subject: [PATCH 085/115] feat(launchpad): unify change_drive and change_drive_confirm --- node-launchpad/src/action.rs | 1 - node-launchpad/src/app.rs | 20 +- node-launchpad/src/components/options.rs | 3 +- node-launchpad/src/components/popup.rs | 1 - .../src/components/popup/change_drive.rs | 396 +++++++++++++----- .../components/popup/change_drive_confirm.rs | 216 ---------- node-launchpad/src/mode.rs | 1 - 7 files changed, 298 insertions(+), 340 deletions(-) delete mode 100644 node-launchpad/src/components/popup/change_drive_confirm.rs diff --git a/node-launchpad/src/action.rs b/node-launchpad/src/action.rs index 0af70338e5..744a11171e 100644 --- a/node-launchpad/src/action.rs +++ b/node-launchpad/src/action.rs @@ -60,7 +60,6 @@ pub enum OptionsActions { ResetNodes, TriggerChangeDrive, - TriggerChangeDriveConfirm(String, String), TriggerBetaProgramme, TriggerResetNodes, TriggerAccessLogs, diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index dc05821cfa..ccbb47cd47 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -15,8 +15,7 @@ use crate::{ options::Options, popup::{ beta_programme::BetaProgramme, change_drive::ChangeDrivePopup, - change_drive_confirm::ChangeDriveConfirmPopup, manage_nodes::ManageNodes, - reset_nodes::ResetNodesPopup, + manage_nodes::ManageNodes, reset_nodes::ResetNodesPopup, }, status::Status, Component, @@ -72,8 +71,14 @@ impl App { ) .await?; let options = Options::new( - app_data.storage_mountpoint.clone().unwrap(), - app_data.storage_drive.clone().unwrap(), + app_data + .storage_mountpoint + .clone() + .ok_or_else(|| eyre!("Creating Options screen, storage_mountpoint is None"))?, + app_data + .storage_drive + .clone() + .ok_or_else(|| eyre!("Creating Options screen, storage_drive is None"))?, app_data.discord_username.clone(), ) .await?; @@ -83,8 +88,10 @@ impl App { let reset_nodes = ResetNodesPopup::default(); let discord_username_input = BetaProgramme::new(app_data.discord_username.clone()); let manage_nodes = ManageNodes::new(app_data.nodes_to_start).unwrap(); - let change_drive = ChangeDrivePopup::new(app_data.storage_mountpoint.clone().unwrap()); - let change_drive_confirm = ChangeDriveConfirmPopup::default(); + let change_drive = + ChangeDrivePopup::new(app_data.storage_mountpoint.clone().ok_or_else(|| { + eyre!("Creating Change Drive screen, storage_mountpoint is None") + })?); Ok(Self { config, @@ -98,7 +105,6 @@ impl App { Box::new(help), // Popups Box::new(change_drive), - Box::new(change_drive_confirm), Box::new(reset_nodes), Box::new(manage_nodes), Box::new(discord_username_input), diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 82026a1ba5..7270166932 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -231,8 +231,7 @@ impl Component for Options { Scene::Options | Scene::BetaProgrammePopUp | Scene::ResetNodesPopUp - | Scene::ChangeDrivePopUp - | Scene::ChangeDriveConfirmPopup => { + | Scene::ChangeDrivePopUp => { self.active = true; // make sure we're in navigation mode return Ok(Some(Action::SwitchInputMode(InputMode::Navigation))); diff --git a/node-launchpad/src/components/popup.rs b/node-launchpad/src/components/popup.rs index 925c6573d0..11c2bf9a3d 100644 --- a/node-launchpad/src/components/popup.rs +++ b/node-launchpad/src/components/popup.rs @@ -8,6 +8,5 @@ pub mod beta_programme; pub mod change_drive; -pub mod change_drive_confirm; pub mod manage_nodes; pub mod reset_nodes; diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index d2c6bba2b9..2e6ae128b6 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -6,7 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::default::Default; +use std::{default::Default, rc::Rc}; use super::super::utils::centered_rect_fixed; @@ -16,7 +16,9 @@ use ratatui::{ layout::{Alignment, Constraint, Direction, Layout, Rect}, style::{Modifier, Style, Stylize}, text::{Line, Span}, - widgets::{Block, Borders, HighlightSpacing, List, ListItem, ListState, Padding, Paragraph}, + widgets::{ + Block, Borders, HighlightSpacing, List, ListItem, ListState, Padding, Paragraph, Wrap, + }, }; use crate::{ @@ -30,12 +32,21 @@ use crate::{ system, }; +#[derive(Default)] +enum ChangeDriveState { + #[default] + Selection, + ConfirmChange, +} + #[derive(Default)] pub struct ChangeDrivePopup { active: bool, + state: ChangeDriveState, items: StatefulList, drive_selection: DriveItem, - user_moved: bool, // This is used to check if the user has moved the selection and style it accordingly + drive_selection_initial_state: DriveItem, + user_moved: bool, // Used to check if the user has moved the selection and style it accordingly } impl ChangeDrivePopup { @@ -72,11 +83,15 @@ impl ChangeDrivePopup { let items = StatefulList::with_items(drives_items); Self { active: false, + state: ChangeDriveState::Selection, items, - drive_selection: selected_drive, + drive_selection: selected_drive.clone(), + drive_selection_initial_state: selected_drive.clone(), user_moved: false, } } + + /// Interacts with the List of drives // Deselect all drives fn deselect_all(&mut self) { for item in &mut self.items.items { @@ -85,7 +100,7 @@ impl ChangeDrivePopup { } // Change the status of the selected drive to Selected #[allow(dead_code)] - fn change_status(&mut self) { + fn assign_drive_selection(&mut self) { self.deselect_all(); if let Some(i) = self.items.state.selected() { self.items.items[i].status = DriveStatus::Selected; @@ -110,114 +125,15 @@ impl ChangeDrivePopup { } DriveItem::default() } -} - -impl Component for ChangeDrivePopup { - fn handle_key_events(&mut self, key: KeyEvent) -> Result> { - if !self.active { - return Ok(vec![]); - } - let send_back = match key.code { - KeyCode::Enter => { - // We allow action if we have more than one drive and the action is not - // over the drive already selected - let drive = self.return_selection(); - if self.items.items.len() > 1 - && (drive.name != self.drive_selection.name - && drive.mountpoint != self.drive_selection.mountpoint) - { - debug!( - "Got Enter and there's a new selection, storing value and switching to Options" - ); - // self.change_status(); - debug!("Drive selected: {:?}", self.drive_selection.name); - vec![ - Action::OptionsActions(OptionsActions::TriggerChangeDriveConfirm( - drive.mountpoint.clone(), - drive.name.clone(), - )), - Action::SwitchScene(Scene::ChangeDriveConfirmPopup), - ] - } else { - debug!("Got Enter, but no new selection. We should not do anything"); - vec![Action::SwitchScene(Scene::ChangeDrivePopUp)] - } - } - KeyCode::Esc => { - debug!("Got Esc, switching to Options"); - vec![Action::SwitchScene(Scene::Options)] - } - KeyCode::Up => { - let drive = self.return_selection(); - if self.items.items.len() > 1 { - self.user_moved = drive.name == self.drive_selection.name - && drive.mountpoint == self.drive_selection.mountpoint; - self.items.previous(); - } - vec![] - } - KeyCode::Down => { - let drive = self.return_selection(); - if self.items.items.len() > 1 { - self.user_moved = drive.name == self.drive_selection.name - && drive.mountpoint == self.drive_selection.mountpoint; - self.items.next(); - } - vec![] - } - _ => { - vec![] - } - }; - Ok(send_back) - } - - fn update(&mut self, action: Action) -> Result> { - let send_back = match action { - Action::SwitchScene(scene) => match scene { - Scene::ChangeDrivePopUp => { - self.active = true; - self.user_moved = false; - self.select_drive(); - Some(Action::SwitchInputMode(InputMode::Entry)) - } - _ => { - self.active = false; - None - } - }, - // Useful when the user has selected a drive but didn't confirm it - Action::OptionsActions(OptionsActions::UpdateStorageDrive(mountpoint, drive_name)) => { - self.drive_selection.mountpoint = mountpoint; - self.drive_selection.name = drive_name; - self.select_drive(); - None - } - _ => None, - }; - Ok(send_back) - } - - fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: Rect) -> Result<()> { - if !self.active { - return Ok(()); - } - - let layer_zero = centered_rect_fixed(52, 15, area); - - let layer_one = Layout::new( - Direction::Vertical, - [ - // Padding from title to the table - Constraint::Length(1), - // Table - Constraint::Min(1), - // for the pop_up_border - Constraint::Length(1), - ], - ) - .split(layer_zero); + /// Draw functions + // Draws the Drive Selection screen + fn draw_selection_state( + &mut self, + f: &mut crate::tui::Frame<'_>, + layer_zero: Rect, + layer_one: Rc<[Rect]>, + ) -> Paragraph { let pop_up_border = Paragraph::new("").block( Block::default() .borders(Borders::ALL) @@ -306,6 +222,262 @@ impl Component for ChangeDrivePopup { buttons_layer[1], ); + pop_up_border + } + + // Draws the Confirmation screen + fn draw_confirm_change_state( + &mut self, + f: &mut crate::tui::Frame<'_>, + layer_zero: Rect, + layer_one: Rc<[Rect]>, + ) -> Paragraph { + let pop_up_border = Paragraph::new("").block( + Block::default() + .borders(Borders::ALL) + .title(" Confirm & Reset ") + .title_style(Style::new().fg(VIVID_SKY_BLUE)) + .padding(Padding::uniform(2)) + .border_style(Style::new().fg(VIVID_SKY_BLUE)) + .bg(DARK_GUNMETAL), + ); + clear_area(f, layer_zero); + + let layer_two = Layout::new( + Direction::Vertical, + [ + // for the table + Constraint::Length(10), + // gap + Constraint::Length(3), + // for the buttons + Constraint::Length(1), + ], + ) + .split(layer_one[1]); + + // Text + let text = vec![ + Line::from(vec![]), // Empty line + Line::from(vec![]), // Empty line + Line::from(vec![ + Span::styled("Changing storage to ", Style::default().fg(GHOST_WHITE)), + Span::styled( + format!("{} ", self.drive_selection.name), + Style::default().fg(VIVID_SKY_BLUE), + ), + Span::styled("will ", Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Center), + Line::from(vec![Span::styled( + "reset all nodes.", + Style::default().fg(GHOST_WHITE), + )]) + .alignment(Alignment::Center), + Line::from(vec![]), // Empty line + Line::from(vec![]), // Empty line + Line::from(vec![ + Span::styled("You’ll need to ", Style::default().fg(GHOST_WHITE)), + Span::styled("Add ", Style::default().fg(GHOST_WHITE).bold()), + Span::styled("and ", Style::default().fg(GHOST_WHITE)), + Span::styled("Start ", Style::default().fg(GHOST_WHITE).bold()), + Span::styled( + "them again afterwards. Are you sure you want to continue?", + Style::default().fg(GHOST_WHITE), + ), + ]) + .alignment(Alignment::Center), + ]; + let paragraph = Paragraph::new(text) + .wrap(Wrap { trim: false }) + .block( + Block::default() + .borders(Borders::NONE) + .padding(Padding::horizontal(2)), + ) + .alignment(Alignment::Center) + .style(Style::default().fg(GHOST_WHITE).bg(DARK_GUNMETAL)); + + f.render_widget(paragraph, layer_two[0]); + + // Dash + let dash = Block::new() + .borders(Borders::BOTTOM) + .border_style(Style::new().fg(GHOST_WHITE)); + f.render_widget(dash, layer_two[1]); + + // Buttons + let buttons_layer = + Layout::horizontal(vec![Constraint::Percentage(30), Constraint::Percentage(70)]) + .split(layer_two[2]); + + let button_no = Line::from(vec![Span::styled( + "Back [Esc]", + Style::default().fg(LIGHT_PERIWINKLE), + )]); + + f.render_widget( + Paragraph::new(button_no) + .block(Block::default().padding(Padding::horizontal(2))) + .alignment(Alignment::Left), + buttons_layer[0], + ); + + let button_yes = Line::from(vec![ + Span::styled("Yes, change drive ", Style::default().fg(EUCALYPTUS)), + Span::styled("[Enter]", Style::default().fg(LIGHT_PERIWINKLE).bold()), + ]) + .alignment(Alignment::Right); + + f.render_widget( + Paragraph::new(button_yes) + .block(Block::default().padding(Padding::horizontal(2))) + .alignment(Alignment::Right), + buttons_layer[1], + ); + + pop_up_border + } +} + +impl Component for ChangeDrivePopup { + fn handle_key_events(&mut self, key: KeyEvent) -> Result> { + if !self.active { + return Ok(vec![]); + } + let send_back: Vec = match &self.state { + ChangeDriveState::Selection => { + match key.code { + KeyCode::Enter => { + // We allow action if we have more than one drive and the action is not + // over the drive already selected + let drive = self.return_selection(); + if self.items.items.len() > 1 + && (drive.name != self.drive_selection.name + && drive.mountpoint != self.drive_selection.mountpoint) + { + debug!( + "Got Enter and there's a new selection, storing value and switching to Options" + ); + debug!("Drive selected: {:?}", self.drive_selection.name); + self.drive_selection_initial_state = self.drive_selection.clone(); + self.assign_drive_selection(); + self.state = ChangeDriveState::ConfirmChange; + vec![] + } else { + debug!("Got Enter, but no new selection. We should not do anything"); + vec![Action::SwitchScene(Scene::ChangeDrivePopUp)] + } + } + KeyCode::Esc => { + debug!("Got Esc, switching to Options"); + vec![Action::SwitchScene(Scene::Options)] + } + KeyCode::Up => { + let drive = self.return_selection(); + if self.items.items.len() > 1 { + self.user_moved = drive.name == self.drive_selection.name + && drive.mountpoint == self.drive_selection.mountpoint; + self.items.previous(); + } + vec![] + } + KeyCode::Down => { + let drive = self.return_selection(); + if self.items.items.len() > 1 { + self.user_moved = drive.name == self.drive_selection.name + && drive.mountpoint == self.drive_selection.mountpoint; + self.items.next(); + } + vec![] + } + _ => { + vec![] + } + } + } + ChangeDriveState::ConfirmChange => match key.code { + KeyCode::Enter => { + debug!("Got Enter, storing value and switching to Options"); + vec![ + Action::StoreStorageDrive( + self.drive_selection.mountpoint.clone(), + self.drive_selection.name.clone(), + ), + Action::OptionsActions(OptionsActions::UpdateStorageDrive( + self.drive_selection.mountpoint.clone(), + self.drive_selection.name.clone(), + )), + Action::SwitchScene(Scene::Options), + ] + } + KeyCode::Esc => { + debug!("Got Esc, switching to Options"); + self.drive_selection = self.drive_selection_initial_state.clone(); + self.state = ChangeDriveState::Selection; + vec![Action::SwitchScene(Scene::Options)] + } + _ => { + vec![] + } + }, + }; + Ok(send_back) + } + + fn update(&mut self, action: Action) -> Result> { + let send_back = match action { + Action::SwitchScene(scene) => match scene { + Scene::ChangeDrivePopUp => { + self.active = true; + self.user_moved = false; + self.state = ChangeDriveState::Selection; + self.select_drive(); + Some(Action::SwitchInputMode(InputMode::Entry)) + } + _ => { + self.active = false; + None + } + }, + // Useful when the user has selected a drive but didn't confirm it + Action::OptionsActions(OptionsActions::UpdateStorageDrive(mountpoint, drive_name)) => { + self.drive_selection.mountpoint = mountpoint; + self.drive_selection.name = drive_name; + self.select_drive(); + None + } + _ => None, + }; + Ok(send_back) + } + + fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: Rect) -> Result<()> { + if !self.active { + return Ok(()); + } + + let layer_zero = centered_rect_fixed(52, 15, area); + + let layer_one = Layout::new( + Direction::Vertical, + [ + // Padding from title to the table + Constraint::Length(1), + // Table + Constraint::Min(1), + // for the pop_up_border + Constraint::Length(1), + ], + ) + .split(layer_zero); + + let pop_up_border: Paragraph = match self.state { + ChangeDriveState::Selection => self.draw_selection_state(f, layer_zero, layer_one), + ChangeDriveState::ConfirmChange => { + self.draw_confirm_change_state(f, layer_zero, layer_one) + } + }; // We render now so the borders are on top of the other widgets f.render_widget(pop_up_border, layer_zero); diff --git a/node-launchpad/src/components/popup/change_drive_confirm.rs b/node-launchpad/src/components/popup/change_drive_confirm.rs deleted file mode 100644 index fb242ded87..0000000000 --- a/node-launchpad/src/components/popup/change_drive_confirm.rs +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::super::utils::centered_rect_fixed; - -use color_eyre::Result; -use crossterm::event::{KeyCode, KeyEvent}; -use ratatui::{ - layout::{Alignment, Constraint, Direction, Layout, Rect}, - style::{Style, Stylize}, - text::{Line, Span}, - widgets::{Block, Borders, Padding, Paragraph, Wrap}, -}; - -use crate::{ - action::{Action, OptionsActions}, - components::Component, - mode::{InputMode, Scene}, - style::{clear_area, DARK_GUNMETAL, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VIVID_SKY_BLUE}, -}; - -#[derive(Default)] -pub struct ChangeDriveConfirmPopup { - active: bool, - drive_selection_mountpoint: String, - drive_selection_name: String, -} - -impl Component for ChangeDriveConfirmPopup { - fn handle_key_events(&mut self, key: KeyEvent) -> Result> { - if !self.active { - return Ok(vec![]); - } - let send_back = match key.code { - KeyCode::Enter => { - debug!("Got Enter, storing value and switching to Options"); - vec![ - Action::StoreStorageDrive( - self.drive_selection_mountpoint.clone(), - self.drive_selection_name.clone(), - ), - Action::OptionsActions(OptionsActions::UpdateStorageDrive( - self.drive_selection_mountpoint.clone(), - self.drive_selection_name.clone(), - )), - Action::SwitchScene(Scene::Options), - ] - } - KeyCode::Esc => { - debug!("Got Esc, switching to Options"); - vec![Action::SwitchScene(Scene::Options)] - } - _ => { - vec![] - } - }; - Ok(send_back) - } - - fn update(&mut self, action: Action) -> Result> { - let send_back = match action { - Action::SwitchScene(scene) => match scene { - Scene::ChangeDriveConfirmPopup => { - self.active = true; - Some(Action::SwitchInputMode(InputMode::Entry)) - } - _ => { - self.active = false; - None - } - }, - Action::OptionsActions(OptionsActions::TriggerChangeDriveConfirm(mountpoint, name)) => { - self.drive_selection_mountpoint = mountpoint; - self.drive_selection_name = name; - None - } - _ => None, - }; - Ok(send_back) - } - - fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: Rect) -> Result<()> { - if !self.active { - return Ok(()); - } - - let layer_zero = centered_rect_fixed(52, 15, area); - - let layer_one = Layout::new( - Direction::Vertical, - [ - // Padding from title to the table - Constraint::Length(1), - // Text - Constraint::Min(1), - // for the pop_up_border - Constraint::Length(1), - ], - ) - .split(layer_zero); - - let pop_up_border = Paragraph::new("").block( - Block::default() - .borders(Borders::ALL) - .title(" Confirm & Reset ") - .title_style(Style::new().fg(VIVID_SKY_BLUE)) - .padding(Padding::uniform(2)) - .border_style(Style::new().fg(VIVID_SKY_BLUE)) - .bg(DARK_GUNMETAL), - ); - clear_area(f, layer_zero); - - let layer_two = Layout::new( - Direction::Vertical, - [ - // for the table - Constraint::Length(10), - // gap - Constraint::Length(3), - // for the buttons - Constraint::Length(1), - ], - ) - .split(layer_one[1]); - - // Text - let text = vec![ - Line::from(vec![]), // Empty line - Line::from(vec![]), // Empty line - Line::from(vec![ - Span::styled("Changing storage to ", Style::default().fg(GHOST_WHITE)), - Span::styled( - format!("{} ", self.drive_selection_name), - Style::default().fg(VIVID_SKY_BLUE), - ), - Span::styled("will ", Style::default().fg(GHOST_WHITE)), - ]) - .alignment(Alignment::Center), - Line::from(vec![Span::styled( - "reset all nodes.", - Style::default().fg(GHOST_WHITE), - )]) - .alignment(Alignment::Center), - Line::from(vec![]), // Empty line - Line::from(vec![]), // Empty line - Line::from(vec![ - Span::styled("You’ll need to ", Style::default().fg(GHOST_WHITE)), - Span::styled("Add ", Style::default().fg(GHOST_WHITE).bold()), - Span::styled("and ", Style::default().fg(GHOST_WHITE)), - Span::styled("Start ", Style::default().fg(GHOST_WHITE).bold()), - Span::styled( - "them again afterwards. Are you sure you want to continue?", - Style::default().fg(GHOST_WHITE), - ), - ]) - .alignment(Alignment::Center), - ]; - let paragraph = Paragraph::new(text) - .wrap(Wrap { trim: false }) - .block( - Block::default() - .borders(Borders::NONE) - .padding(Padding::horizontal(2)), - ) - .alignment(Alignment::Center) - .style(Style::default().fg(GHOST_WHITE).bg(DARK_GUNMETAL)); - - f.render_widget(paragraph, layer_two[0]); - - // Dash - let dash = Block::new() - .borders(Borders::BOTTOM) - .border_style(Style::new().fg(GHOST_WHITE)); - f.render_widget(dash, layer_two[1]); - - // Buttons - let buttons_layer = - Layout::horizontal(vec![Constraint::Percentage(30), Constraint::Percentage(70)]) - .split(layer_two[2]); - - let button_no = Line::from(vec![Span::styled( - "Back [Esc]", - Style::default().fg(LIGHT_PERIWINKLE), - )]); - - f.render_widget( - Paragraph::new(button_no) - .block(Block::default().padding(Padding::horizontal(2))) - .alignment(Alignment::Left), - buttons_layer[0], - ); - - let button_yes = Line::from(vec![ - Span::styled("Yes, change drive ", Style::default().fg(EUCALYPTUS)), - Span::styled("[Enter]", Style::default().fg(LIGHT_PERIWINKLE).bold()), - ]) - .alignment(Alignment::Right); - - f.render_widget( - Paragraph::new(button_yes) - .block(Block::default().padding(Padding::horizontal(2))) - .alignment(Alignment::Right), - buttons_layer[1], - ); - - // We render now so the borders are on top of the other widgets - f.render_widget(pop_up_border, layer_zero); - - Ok(()) - } -} diff --git a/node-launchpad/src/mode.rs b/node-launchpad/src/mode.rs index a0b8421d3f..641c433dfc 100644 --- a/node-launchpad/src/mode.rs +++ b/node-launchpad/src/mode.rs @@ -15,7 +15,6 @@ pub enum Scene { Options, Help, ChangeDrivePopUp, - ChangeDriveConfirmPopup, BetaProgrammePopUp, ManageNodesPopUp, ResetNodesPopUp, From a5ea409ae225e959aaa0579a853a164de41c0cea Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 13 Aug 2024 13:34:05 +0200 Subject: [PATCH 086/115] chore(launchpad): using different paths for different drives --- .../src/components/popup/manage_nodes.rs | 17 ++++---------- node-launchpad/src/config.rs | 23 +++++++++++-------- node-launchpad/src/system.rs | 10 ++++++++ 3 files changed, 28 insertions(+), 22 deletions(-) diff --git a/node-launchpad/src/components/popup/manage_nodes.rs b/node-launchpad/src/components/popup/manage_nodes.rs index 90f4ba34d9..c4b62225ad 100644 --- a/node-launchpad/src/components/popup/manage_nodes.rs +++ b/node-launchpad/src/components/popup/manage_nodes.rs @@ -6,10 +6,10 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::system::get_mount_point; use color_eyre::{eyre::ContextCompat, Result}; use crossterm::event::{Event, KeyCode, KeyEvent}; use ratatui::{prelude::*, widgets::*}; -use std::path::PathBuf; use sysinfo::Disks; use tui_input::{backend::crossterm::EventHandler, Input}; @@ -55,10 +55,10 @@ impl ManageNodes { let disks = Disks::new_with_refreshed_list(); if tracing::level_enabled!(tracing::Level::DEBUG) { for disk in disks.list() { - let res = disk.mount_point().ends_with(Self::get_mount_point()); + let res = disk.mount_point().ends_with(get_mount_point()); debug!( "Disk: {disk:?} ends with '{:?}': {res:?}", - Self::get_mount_point() + get_mount_point() ); } } @@ -66,7 +66,7 @@ impl ManageNodes { let available_space_b = disks .list() .iter() - .find(|disk| disk.mount_point().ends_with(Self::get_mount_point())) + .find(|disk| disk.mount_point().ends_with(get_mount_point())) .context("Cannot find the primary disk")? .available_space() as usize; @@ -78,15 +78,6 @@ impl ManageNodes { fn max_nodes_to_start(&self) -> usize { std::cmp::min(self.available_disk_space_gb / GB_PER_NODE, MAX_NODE_COUNT) } - - #[cfg(unix)] - fn get_mount_point() -> PathBuf { - PathBuf::from("/") - } - #[cfg(windows)] - fn get_mount_point() -> PathBuf { - PathBuf::from("C:\\") - } } impl Component for ManageNodes { diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index 3b67333204..d61101169b 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::system; +use crate::system::get_mount_point; use crate::{action::Action, mode::Scene}; use color_eyre::eyre::Result; use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; @@ -19,16 +20,20 @@ use std::path::PathBuf; const CONFIG: &str = include_str!("../.config/config.json5"); // Where to store the Nodes data +// If the base dir is the default mount point, we store in "$HOME/user_data_dir/safe/node" +// if not we store in "$MOUNTPOINT/safe/node" pub fn get_launchpad_nodes_data_dir_path(base_dir: PathBuf) -> Result { - // We get the data dir and we remove the first character which is the / character - let mut data_directory = dirs_next::data_dir() - .expect("Data directory is obtainable") - .to_str() - .unwrap() - .to_string(); - data_directory.remove(0); - - let mut mount_point = base_dir.clone(); + let mut mount_point = PathBuf::new(); + + let data_directory = if base_dir == get_mount_point() { + dirs_next::data_dir() + .expect("Data directory is obtainable") + .to_str() + .unwrap() + .to_string() + } else { + base_dir.to_str().unwrap().to_string() + }; mount_point.push(data_directory); mount_point.push("safe"); mount_point.push("node"); diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs index 199367687b..6122a772c9 100644 --- a/node-launchpad/src/system.rs +++ b/node-launchpad/src/system.rs @@ -10,6 +10,7 @@ use std::env; use std::fs::OpenOptions; use std::io::Write; use std::path::Path; +use std::path::PathBuf; use std::process::Command; use sysinfo::Disks; @@ -111,3 +112,12 @@ pub fn open_folder(path: &str) -> std::io::Result<()> { } Ok(()) } + +#[cfg(unix)] +pub fn get_mount_point() -> PathBuf { + PathBuf::from("/") +} +#[cfg(windows)] +pub fn get_mount_point() -> PathBuf { + PathBuf::from("C:\\") +} From fa96641344a9ff71d16341ea6f6f91e0a21acbfb Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 13 Aug 2024 15:55:05 +0200 Subject: [PATCH 087/115] feat(lauchpad): update manage_nodes screen with new drive size --- node-launchpad/src/app.rs | 8 +++- .../src/components/popup/manage_nodes.rs | 43 ++++++------------- node-launchpad/src/components/status.rs | 2 +- node-launchpad/src/system.rs | 25 +++++++++++ 4 files changed, 47 insertions(+), 31 deletions(-) diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index ccbb47cd47..7b007f39da 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -87,7 +87,13 @@ impl App { // Popups let reset_nodes = ResetNodesPopup::default(); let discord_username_input = BetaProgramme::new(app_data.discord_username.clone()); - let manage_nodes = ManageNodes::new(app_data.nodes_to_start).unwrap(); + let manage_nodes = ManageNodes::new( + app_data.nodes_to_start, + app_data + .storage_mountpoint + .clone() + .ok_or_else(|| eyre!("Creating Manage Nodes screen, storage_drive is None"))?, + )?; let change_drive = ChangeDrivePopup::new(app_data.storage_mountpoint.clone().ok_or_else(|| { eyre!("Creating Change Drive screen, storage_mountpoint is None") diff --git a/node-launchpad/src/components/popup/manage_nodes.rs b/node-launchpad/src/components/popup/manage_nodes.rs index c4b62225ad..92c67ca5c9 100644 --- a/node-launchpad/src/components/popup/manage_nodes.rs +++ b/node-launchpad/src/components/popup/manage_nodes.rs @@ -6,11 +6,11 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::system::get_mount_point; -use color_eyre::{eyre::ContextCompat, Result}; +use crate::action::OptionsActions; +use crate::system::get_available_space_b; +use color_eyre::Result; use crossterm::event::{Event, KeyCode, KeyEvent}; use ratatui::{prelude::*, widgets::*}; -use sysinfo::Disks; use tui_input::{backend::crossterm::EventHandler, Input}; use crate::{ @@ -30,19 +30,21 @@ pub struct ManageNodes { /// Whether the component is active right now, capturing keystrokes + drawing things. active: bool, available_disk_space_gb: usize, + storage_mountpoint: String, nodes_to_start_input: Input, // cache the old value incase user presses Esc. old_value: String, } impl ManageNodes { - pub fn new(nodes_to_start: usize) -> Result { + pub fn new(nodes_to_start: usize, storage_mountpoint: String) -> Result { let nodes_to_start = std::cmp::min(nodes_to_start, MAX_NODE_COUNT); let new = Self { active: false, - available_disk_space_gb: Self::get_available_space_b()? / GB, + available_disk_space_gb: get_available_space_b(storage_mountpoint.clone())? / GB, nodes_to_start_input: Input::default().with_value(nodes_to_start.to_string()), old_value: Default::default(), + storage_mountpoint: storage_mountpoint.clone(), }; Ok(new) } @@ -51,28 +53,6 @@ impl ManageNodes { self.nodes_to_start_input.value().parse().unwrap_or(0) } - fn get_available_space_b() -> Result { - let disks = Disks::new_with_refreshed_list(); - if tracing::level_enabled!(tracing::Level::DEBUG) { - for disk in disks.list() { - let res = disk.mount_point().ends_with(get_mount_point()); - debug!( - "Disk: {disk:?} ends with '{:?}': {res:?}", - get_mount_point() - ); - } - } - - let available_space_b = disks - .list() - .iter() - .find(|disk| disk.mount_point().ends_with(get_mount_point())) - .context("Cannot find the primary disk")? - .available_space() as usize; - - Ok(available_space_b) - } - // Returns the max number of nodes to start // It is the minimum of the available disk space and the max nodes limit fn max_nodes_to_start(&self) -> usize { @@ -100,8 +80,8 @@ impl Component for ManageNodes { .with_value(nodes_to_start.to_string()); debug!( - "Got Enter, value found to be {nodes_to_start} derived from input: {nodes_to_start_str:?} and switching scene", - ); + "Got Enter, value found to be {nodes_to_start} derived from input: {nodes_to_start_str:?} and switching scene", + ); vec![ Action::StoreNodesToStart(nodes_to_start), Action::SwitchScene(Scene::Status), @@ -195,6 +175,11 @@ impl Component for ManageNodes { None } }, + Action::OptionsActions(OptionsActions::UpdateStorageDrive(mountpoint, _drive_name)) => { + self.storage_mountpoint.clone_from(&mountpoint); + self.available_disk_space_gb = get_available_space_b(mountpoint.clone())? / GB; + None + } _ => None, }; Ok(send_back) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 68b40cc625..11406e13dc 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -655,7 +655,7 @@ impl Component for Status { } fn handle_key_events(&mut self, key: KeyEvent) -> Result> { - debug!("Key received in Home: {:?}", key); + debug!("Key received in Status: {:?}", key); Ok(vec![]) } } diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs index 6122a772c9..9ed9d67c8f 100644 --- a/node-launchpad/src/system.rs +++ b/node-launchpad/src/system.rs @@ -6,6 +6,8 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use color_eyre::eyre::ContextCompat; +use color_eyre::Result; use std::env; use std::fs::OpenOptions; use std::io::Write; @@ -121,3 +123,26 @@ pub fn get_mount_point() -> PathBuf { pub fn get_mount_point() -> PathBuf { PathBuf::from("C:\\") } + +// Gets available disk space in bytes for the given mountpoint +pub fn get_available_space_b(storage_mountpoint: String) -> Result { + let disks = Disks::new_with_refreshed_list(); + if tracing::level_enabled!(tracing::Level::DEBUG) { + for disk in disks.list() { + let res = disk.mount_point().to_string_lossy() == storage_mountpoint.clone(); + debug!( + "Disk: {disk:?} is equal to '{:?}': {res:?}", + storage_mountpoint.clone(), + ); + } + } + + let available_space_b = disks + .list() + .iter() + .find(|disk| disk.mount_point().to_string_lossy() == storage_mountpoint.clone()) + .context("Cannot find the primary disk")? + .available_space() as usize; + + Ok(available_space_b) +} From 8ffbf2b99ea9173bdd33c6b9167efca1ef19ec9b Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 14 Aug 2024 16:24:30 +0200 Subject: [PATCH 088/115] chore(launchpad): rename function get_mount_point --- node-launchpad/src/config.rs | 4 ++-- node-launchpad/src/system.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index d61101169b..bb08be9def 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::system; -use crate::system::get_mount_point; +use crate::system::get_primary_mount_point; use crate::{action::Action, mode::Scene}; use color_eyre::eyre::Result; use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; @@ -25,7 +25,7 @@ const CONFIG: &str = include_str!("../.config/config.json5"); pub fn get_launchpad_nodes_data_dir_path(base_dir: PathBuf) -> Result { let mut mount_point = PathBuf::new(); - let data_directory = if base_dir == get_mount_point() { + let data_directory = if base_dir == get_primary_mount_point() { dirs_next::data_dir() .expect("Data directory is obtainable") .to_str() diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs index 9ed9d67c8f..342605d344 100644 --- a/node-launchpad/src/system.rs +++ b/node-launchpad/src/system.rs @@ -116,11 +116,11 @@ pub fn open_folder(path: &str) -> std::io::Result<()> { } #[cfg(unix)] -pub fn get_mount_point() -> PathBuf { +pub fn get_primary_mount_point() -> PathBuf { PathBuf::from("/") } #[cfg(windows)] -pub fn get_mount_point() -> PathBuf { +pub fn get_primary_mount_point() -> PathBuf { PathBuf::from("C:\\") } From de3fc4d16d832b3bcd3c4fb7f5a9645a9012f8b3 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 14 Aug 2024 17:36:49 +0200 Subject: [PATCH 089/115] chore(launchpad): addressing pull request comments --- node-launchpad/src/action.rs | 5 +-- node-launchpad/src/app.rs | 3 +- node-launchpad/src/components/options.rs | 6 ++-- .../src/components/popup/change_drive.rs | 12 +++---- .../src/components/popup/manage_nodes.rs | 10 +++--- node-launchpad/src/components/status.rs | 2 +- node-launchpad/src/config.rs | 19 +++++------ node-launchpad/src/system.rs | 32 ++++++++----------- 8 files changed, 43 insertions(+), 46 deletions(-) diff --git a/node-launchpad/src/action.rs b/node-launchpad/src/action.rs index 744a11171e..e58402a462 100644 --- a/node-launchpad/src/action.rs +++ b/node-launchpad/src/action.rs @@ -11,6 +11,7 @@ use crate::{ node_stats::NodeStats, }; use serde::{Deserialize, Serialize}; +use std::path::PathBuf; use strum::Display; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Display, Deserialize)] @@ -23,7 +24,7 @@ pub enum Action { StoreDiscordUserName(String), StoreNodesToStart(usize), - StoreStorageDrive(String, String), + StoreStorageDrive(PathBuf, String), Tick, Render, @@ -64,5 +65,5 @@ pub enum OptionsActions { TriggerResetNodes, TriggerAccessLogs, UpdateBetaProgrammeUsername(String), - UpdateStorageDrive(String, String), + UpdateStorageDrive(PathBuf, String), } diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index 7b007f39da..eb189a11aa 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -237,8 +237,7 @@ impl App { } Action::StoreStorageDrive(ref drive_mountpoint, ref drive_name) => { debug!("Storing storage drive: {drive_mountpoint:?}, {drive_name:?}"); - self.app_data.storage_mountpoint = - Some(drive_mountpoint.as_str().to_string()); + self.app_data.storage_mountpoint = Some(drive_mountpoint.clone()); self.app_data.storage_drive = Some(drive_name.as_str().to_string()); self.app_data.save()?; } diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 7270166932..ca4801c8ff 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use color_eyre::eyre::Result; use ratatui::{ layout::{Alignment, Constraint, Direction, Layout, Rect}, @@ -20,7 +22,7 @@ use sn_node_manager::config::get_user_safenode_data_dir; #[derive(Clone)] pub struct Options { - pub storage_mountpoint: String, + pub storage_mountpoint: PathBuf, pub storage_drive: String, pub discord_username: String, pub active: bool, @@ -29,7 +31,7 @@ pub struct Options { impl Options { pub async fn new( - storage_mountpoint: String, + storage_mountpoint: PathBuf, storage_drive: String, discord_username: String, ) -> Result { diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index 2e6ae128b6..2366f6ef2f 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -6,7 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::{default::Default, rc::Rc}; +use std::{default::Default, path::PathBuf, rc::Rc}; use super::super::utils::centered_rect_fixed; @@ -50,7 +50,7 @@ pub struct ChangeDrivePopup { } impl ChangeDrivePopup { - pub fn new(storage_mountpoint: String) -> Self { + pub fn new(storage_mountpoint: PathBuf) -> Self { let drives_and_space = system::get_list_of_drives_and_available_space(); let mut selected_drive: DriveItem = DriveItem::default(); @@ -62,12 +62,12 @@ impl ChangeDrivePopup { let size_str_cloned = size_str.clone(); DriveItem { name: drive_name.to_string(), - mountpoint: mountpoint.to_string(), + mountpoint: mountpoint.clone(), size: size_str, status: if mountpoint == &storage_mountpoint { selected_drive = DriveItem { name: drive_name.to_string(), - mountpoint: mountpoint.to_string(), + mountpoint: mountpoint.clone(), size: size_str_cloned, status: DriveStatus::Selected, }; @@ -78,7 +78,7 @@ impl ChangeDrivePopup { } }) .collect::>(); - debug!("Drive Mountpoint in Config: {}", storage_mountpoint); + debug!("Drive Mountpoint in Config: {:?}", storage_mountpoint); debug!("Drives and space: {:?}", drives_and_space); let items = StatefulList::with_items(drives_items); Self { @@ -540,7 +540,7 @@ enum DriveStatus { #[derive(Default, Debug, Clone)] pub struct DriveItem { name: String, - mountpoint: String, + mountpoint: PathBuf, size: String, status: DriveStatus, } diff --git a/node-launchpad/src/components/popup/manage_nodes.rs b/node-launchpad/src/components/popup/manage_nodes.rs index 92c67ca5c9..1810d02713 100644 --- a/node-launchpad/src/components/popup/manage_nodes.rs +++ b/node-launchpad/src/components/popup/manage_nodes.rs @@ -6,6 +6,8 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use std::path::PathBuf; + use crate::action::OptionsActions; use crate::system::get_available_space_b; use color_eyre::Result; @@ -30,18 +32,18 @@ pub struct ManageNodes { /// Whether the component is active right now, capturing keystrokes + drawing things. active: bool, available_disk_space_gb: usize, - storage_mountpoint: String, + storage_mountpoint: PathBuf, nodes_to_start_input: Input, // cache the old value incase user presses Esc. old_value: String, } impl ManageNodes { - pub fn new(nodes_to_start: usize, storage_mountpoint: String) -> Result { + pub fn new(nodes_to_start: usize, storage_mountpoint: PathBuf) -> Result { let nodes_to_start = std::cmp::min(nodes_to_start, MAX_NODE_COUNT); let new = Self { active: false, - available_disk_space_gb: get_available_space_b(storage_mountpoint.clone())? / GB, + available_disk_space_gb: get_available_space_b(&storage_mountpoint)? / GB, nodes_to_start_input: Input::default().with_value(nodes_to_start.to_string()), old_value: Default::default(), storage_mountpoint: storage_mountpoint.clone(), @@ -177,7 +179,7 @@ impl Component for ManageNodes { }, Action::OptionsActions(OptionsActions::UpdateStorageDrive(mountpoint, _drive_name)) => { self.storage_mountpoint.clone_from(&mountpoint); - self.available_disk_space_gb = get_available_space_b(mountpoint.clone())? / GB; + self.available_disk_space_gb = get_available_space_b(&mountpoint)? / GB; None } _ => None, diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 11406e13dc..f45de69abc 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -280,7 +280,7 @@ impl Component for Status { let action_sender = self.get_actions_sender()?; reset_nodes(action_sender, false); self.data_dir_path = - get_launchpad_nodes_data_dir_path(PathBuf::from(drive_mountpoint.clone()))?; + get_launchpad_nodes_data_dir_path(drive_mountpoint.to_path_buf())?; } Action::StatusActions(status_action) => { match status_action { diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index bb08be9def..c47341dda3 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -9,7 +9,7 @@ use crate::system; use crate::system::get_primary_mount_point; use crate::{action::Action, mode::Scene}; -use color_eyre::eyre::Result; +use color_eyre::eyre::{eyre, Result}; use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; use derive_deref::{Deref, DerefMut}; use ratatui::style::{Color, Modifier, Style}; @@ -25,14 +25,10 @@ const CONFIG: &str = include_str!("../.config/config.json5"); pub fn get_launchpad_nodes_data_dir_path(base_dir: PathBuf) -> Result { let mut mount_point = PathBuf::new(); - let data_directory = if base_dir == get_primary_mount_point() { - dirs_next::data_dir() - .expect("Data directory is obtainable") - .to_str() - .unwrap() - .to_string() + let data_directory: PathBuf = if base_dir == get_primary_mount_point() { + dirs_next::data_dir().ok_or_else(|| eyre!("Data directory is not obtainable"))? } else { - base_dir.to_str().unwrap().to_string() + base_dir }; mount_point.push(data_directory); mount_point.push("safe"); @@ -44,7 +40,8 @@ pub fn get_launchpad_nodes_data_dir_path(base_dir: PathBuf) -> Result { // Where to store the Launchpad config & logs pub fn get_launchpad_data_dir_path() -> Result { - let mut home_dirs = dirs_next::data_dir().expect("Data directory is obtainable"); + let mut home_dirs = + dirs_next::data_dir().ok_or_else(|| eyre!("Data directory is not obtainable"))?; home_dirs.push("safe"); home_dirs.push("launchpad"); std::fs::create_dir_all(home_dirs.as_path())?; @@ -78,7 +75,7 @@ pub async fn configure_winsw() -> Result<()> { pub struct AppData { pub discord_username: String, pub nodes_to_start: usize, - pub storage_mountpoint: Option, + pub storage_mountpoint: Option, pub storage_drive: Option, } @@ -100,7 +97,7 @@ impl AppData { if app_data.storage_mountpoint.is_none() || app_data.storage_drive.is_none() { // If the storage drive is not set, set it to the default mount point - let drive_info = system::get_default_mount_point(); + let drive_info = system::get_default_mount_point()?; app_data.storage_drive = Some(drive_info.0); app_data.storage_mountpoint = Some(drive_info.1); debug!("Setting storage drive to {:?}", app_data.storage_mountpoint); diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs index 342605d344..f132de3acd 100644 --- a/node-launchpad/src/system.rs +++ b/node-launchpad/src/system.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use color_eyre::eyre::eyre; use color_eyre::eyre::ContextCompat; use color_eyre::Result; use std::env; @@ -18,24 +19,23 @@ use sysinfo::Disks; // Tries to get the default (drive name, mount point) of the current executable // to be used as the default drive -pub fn get_default_mount_point() -> (String, String) { +pub fn get_default_mount_point() -> Result<(String, PathBuf)> { // Create a new System instance let disks = Disks::new_with_refreshed_list(); // Get the current executable path - let exe_path = env::current_exe().expect("Failed to get current executable path"); + let exe_path = env::current_exe()?; // Iterate over the disks and find the one that matches the executable path for disk in disks.list() { if exe_path.starts_with(disk.mount_point()) { - return ( + return Ok(( disk.name().to_string_lossy().into(), - disk.mount_point().to_string_lossy().into_owned(), - ); + disk.mount_point().to_path_buf(), + )); } } - // If no matching disk is found, return an empty string or handle the error as needed - (String::new(), String::new()) + Err(eyre!("Cannot find the default mount point")) } // Checks if the given path (a drive) is read-only @@ -64,12 +64,12 @@ fn is_read_only>(path: P) -> bool { } // Gets a list of drives and their available space -pub fn get_list_of_drives_and_available_space() -> Vec<(String, String, u64)> { +pub fn get_list_of_drives_and_available_space() -> Vec<(String, PathBuf, u64)> { // Create a new System instance let disks = Disks::new_with_refreshed_list(); // Get the list of disks - let mut drives: Vec<(String, String, u64)> = Vec::new(); + let mut drives: Vec<(String, PathBuf, u64)> = Vec::new(); for disk in disks.list() { // Check if the disk is already in the list let disk_info = ( @@ -78,11 +78,7 @@ pub fn get_list_of_drives_and_available_space() -> Vec<(String, String, u64)> { .into_owned() .trim() .to_string(), - disk.mount_point() - .to_string_lossy() - .into_owned() - .trim() - .to_string(), + disk.mount_point().to_path_buf(), disk.available_space(), ); // We don't check for write permission on removable drives @@ -125,14 +121,14 @@ pub fn get_primary_mount_point() -> PathBuf { } // Gets available disk space in bytes for the given mountpoint -pub fn get_available_space_b(storage_mountpoint: String) -> Result { +pub fn get_available_space_b(storage_mountpoint: &PathBuf) -> Result { let disks = Disks::new_with_refreshed_list(); if tracing::level_enabled!(tracing::Level::DEBUG) { for disk in disks.list() { - let res = disk.mount_point().to_string_lossy() == storage_mountpoint.clone(); + let res = disk.mount_point() == storage_mountpoint; debug!( "Disk: {disk:?} is equal to '{:?}': {res:?}", - storage_mountpoint.clone(), + storage_mountpoint, ); } } @@ -140,7 +136,7 @@ pub fn get_available_space_b(storage_mountpoint: String) -> Result { let available_space_b = disks .list() .iter() - .find(|disk| disk.mount_point().to_string_lossy() == storage_mountpoint.clone()) + .find(|disk| disk.mount_point() == storage_mountpoint) .context("Cannot find the primary disk")? .available_space() as usize; From f983380f2e059692aeeac317c27ef87e4189da06 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 14 Aug 2024 23:56:58 +0200 Subject: [PATCH 090/115] fix(launchpad): selecting appropriate drives to choose from --- node-launchpad/src/app.rs | 4 +- .../src/components/popup/change_drive.rs | 87 ++++++++++++------- node-launchpad/src/components/status.rs | 2 +- node-launchpad/src/config.rs | 45 ++++++++-- node-launchpad/src/system.rs | 59 +++++++++++-- 5 files changed, 146 insertions(+), 51 deletions(-) diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index eb189a11aa..48804d6c79 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -56,7 +56,7 @@ impl App { let config = Config::new()?; let data_dir_path = match &app_data.storage_mountpoint { - Some(path) => get_launchpad_nodes_data_dir_path(PathBuf::from(path))?, + Some(path) => get_launchpad_nodes_data_dir_path(&PathBuf::from(path), true)?, None => return Err(eyre!("Storage mountpoint for node data is not set")), }; debug!("Data dir path for nodes: {data_dir_path:?}"); @@ -97,7 +97,7 @@ impl App { let change_drive = ChangeDrivePopup::new(app_data.storage_mountpoint.clone().ok_or_else(|| { eyre!("Creating Change Drive screen, storage_mountpoint is None") - })?); + })?)?; Ok(Self { config, diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index 2366f6ef2f..33443800f4 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -24,6 +24,7 @@ use ratatui::{ use crate::{ action::{Action, OptionsActions}, components::Component, + config::get_launchpad_nodes_data_dir_path, mode::{InputMode, Scene}, style::{ clear_area, COOL_GREY, DARK_GUNMETAL, EUCALYPTUS, GHOST_WHITE, INDIGO, LIGHT_PERIWINKLE, @@ -50,8 +51,8 @@ pub struct ChangeDrivePopup { } impl ChangeDrivePopup { - pub fn new(storage_mountpoint: PathBuf) -> Self { - let drives_and_space = system::get_list_of_drives_and_available_space(); + pub fn new(storage_mountpoint: PathBuf) -> Result { + let drives_and_space = system::get_list_of_available_drives_and_available_space()?; let mut selected_drive: DriveItem = DriveItem::default(); // Create a vector of DriveItem from drives_and_space @@ -81,24 +82,27 @@ impl ChangeDrivePopup { debug!("Drive Mountpoint in Config: {:?}", storage_mountpoint); debug!("Drives and space: {:?}", drives_and_space); let items = StatefulList::with_items(drives_items); - Self { + Ok(Self { active: false, state: ChangeDriveState::Selection, items, drive_selection: selected_drive.clone(), drive_selection_initial_state: selected_drive.clone(), user_moved: false, - } + }) } - /// Interacts with the List of drives - // Deselect all drives + // --- Interactions with the List of drives --- + + /// Deselects all drives in the list of items + /// fn deselect_all(&mut self) { for item in &mut self.items.items { item.status = DriveStatus::NotSelected; } } - // Change the status of the selected drive to Selected + /// Assigns to self.drive_selection the selected drive in the list + /// #[allow(dead_code)] fn assign_drive_selection(&mut self) { self.deselect_all(); @@ -107,7 +111,8 @@ impl ChangeDrivePopup { self.drive_selection = self.items.items[i].clone(); } } - // Highlight the drive that is currently selected in this component members + /// Highlights the drive that is currently selected in the list of items. + /// fn select_drive(&mut self) { self.deselect_all(); for (index, item) in self.items.items.iter_mut().enumerate() { @@ -118,7 +123,8 @@ impl ChangeDrivePopup { } } } - // return the selected drive + /// Returns the highlighted drive in the list of items. + /// fn return_selection(&mut self) -> DriveItem { if let Some(i) = self.items.state.selected() { return self.items.items[i].clone(); @@ -126,7 +132,8 @@ impl ChangeDrivePopup { DriveItem::default() } - /// Draw functions + // -- Draw functions -- + // Draws the Drive Selection screen fn draw_selection_state( &mut self, @@ -353,13 +360,12 @@ impl Component for ChangeDrivePopup { // over the drive already selected let drive = self.return_selection(); if self.items.items.len() > 1 - && (drive.name != self.drive_selection.name - && drive.mountpoint != self.drive_selection.mountpoint) + && (drive.mountpoint != self.drive_selection.mountpoint) { debug!( "Got Enter and there's a new selection, storing value and switching to Options" ); - debug!("Drive selected: {:?}", self.drive_selection.name); + debug!("Drive selected: {:?}", drive.name); self.drive_selection_initial_state = self.drive_selection.clone(); self.assign_drive_selection(); self.state = ChangeDriveState::ConfirmChange; @@ -374,20 +380,22 @@ impl Component for ChangeDrivePopup { vec![Action::SwitchScene(Scene::Options)] } KeyCode::Up => { - let drive = self.return_selection(); if self.items.items.len() > 1 { - self.user_moved = drive.name == self.drive_selection.name - && drive.mountpoint == self.drive_selection.mountpoint; self.items.previous(); + let drive = self.return_selection(); + self.user_moved = drive.mountpoint != self.drive_selection.mountpoint; } vec![] } KeyCode::Down => { - let drive = self.return_selection(); if self.items.items.len() > 1 { - self.user_moved = drive.name == self.drive_selection.name - && drive.mountpoint == self.drive_selection.mountpoint; self.items.next(); + let drive = self.return_selection(); + debug!( + "User moved: {:?} != {:?}", + drive.mountpoint, self.drive_selection.mountpoint + ); + self.user_moved = drive.mountpoint != self.drive_selection.mountpoint; } vec![] } @@ -399,17 +407,36 @@ impl Component for ChangeDrivePopup { ChangeDriveState::ConfirmChange => match key.code { KeyCode::Enter => { debug!("Got Enter, storing value and switching to Options"); - vec![ - Action::StoreStorageDrive( - self.drive_selection.mountpoint.clone(), - self.drive_selection.name.clone(), - ), - Action::OptionsActions(OptionsActions::UpdateStorageDrive( - self.drive_selection.mountpoint.clone(), - self.drive_selection.name.clone(), - )), - Action::SwitchScene(Scene::Options), - ] + // Let's create the data directory for the new drive + self.drive_selection = self.return_selection(); + match get_launchpad_nodes_data_dir_path(&self.drive_selection.mountpoint, true) + { + Ok(_path) => { + // TODO: probably delete the old data directory before switching + // Taking in account if it's the default mountpoint + // (were the executable is) + vec![ + Action::StoreStorageDrive( + self.drive_selection.mountpoint.clone(), + self.drive_selection.name.clone(), + ), + Action::OptionsActions(OptionsActions::UpdateStorageDrive( + self.drive_selection.mountpoint.clone(), + self.drive_selection.name.clone(), + )), + Action::SwitchScene(Scene::Options), + ] + } + Err(e) => { + self.drive_selection = self.drive_selection_initial_state.clone(); + self.state = ChangeDriveState::Selection; + error!( + "Error creating folder {:?}: {}", + self.drive_selection.mountpoint, e + ); + vec![Action::SwitchScene(Scene::Options)] + } + } } KeyCode::Esc => { debug!("Got Esc, switching to Options"); diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index f45de69abc..fbe03f2fc4 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -280,7 +280,7 @@ impl Component for Status { let action_sender = self.get_actions_sender()?; reset_nodes(action_sender, false); self.data_dir_path = - get_launchpad_nodes_data_dir_path(drive_mountpoint.to_path_buf())?; + get_launchpad_nodes_data_dir_path(&drive_mountpoint.to_path_buf(), false)?; } Action::StatusActions(status_action) => { match status_action { diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index c47341dda3..041827b034 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -19,26 +19,53 @@ use std::path::PathBuf; const CONFIG: &str = include_str!("../.config/config.json5"); -// Where to store the Nodes data -// If the base dir is the default mount point, we store in "$HOME/user_data_dir/safe/node" -// if not we store in "$MOUNTPOINT/safe/node" -pub fn get_launchpad_nodes_data_dir_path(base_dir: PathBuf) -> Result { +/// Where to store the Nodes data. +/// +/// If is the primary mount point, we store in "/$HOME/user_data_dir/safe/node". +/// +/// if not we store in "/safe/node". +/// +/// If should_create is true, the directory will be created if it doesn't exists. +pub fn get_launchpad_nodes_data_dir_path( + base_dir: &PathBuf, + should_create: bool, +) -> Result { let mut mount_point = PathBuf::new(); - let data_directory: PathBuf = if base_dir == get_primary_mount_point() { - dirs_next::data_dir().ok_or_else(|| eyre!("Data directory is not obtainable"))? + let data_directory: PathBuf = if *base_dir == get_primary_mount_point() { + dirs_next::data_dir().ok_or_else(|| { + eyre!( + "Data directory is not obtainable for base_dir {:?}", + base_dir + ) + })? } else { - base_dir + base_dir.clone() }; mount_point.push(data_directory); mount_point.push("safe"); mount_point.push("node"); debug!("Creating nodes data dir: {:?}", mount_point.as_path()); - std::fs::create_dir_all(mount_point.as_path())?; + if should_create { + match std::fs::create_dir_all(mount_point.as_path()) { + Ok(_) => debug!("Nodes {:?} data dir created successfully", mount_point), + Err(e) => { + error!( + "Failed to create nodes data dir in {:?}: {:?}", + mount_point, e + ); + return Err(eyre!( + "Failed to create nodes data dir in {:?}", + mount_point + )); + } + } + } Ok(mount_point) } -// Where to store the Launchpad config & logs +/// Where to store the Launchpad config & logs. +/// pub fn get_launchpad_data_dir_path() -> Result { let mut home_dirs = dirs_next::data_dir().ok_or_else(|| eyre!("Data directory is not obtainable"))?; diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs index f132de3acd..c46781e02e 100644 --- a/node-launchpad/src/system.rs +++ b/node-launchpad/src/system.rs @@ -17,6 +17,8 @@ use std::path::PathBuf; use std::process::Command; use sysinfo::Disks; +use crate::config::get_launchpad_nodes_data_dir_path; + // Tries to get the default (drive name, mount point) of the current executable // to be used as the default drive pub fn get_default_mount_point() -> Result<(String, PathBuf)> { @@ -38,6 +40,20 @@ pub fn get_default_mount_point() -> Result<(String, PathBuf)> { Err(eyre!("Cannot find the default mount point")) } +/// Checks if the given is the default mount point of the current executable +/// +/// We return an error if we cannot find the default mount point or the current executable +pub fn is_default_mount_point(path: &Path) -> Result { + let disks = Disks::new_with_refreshed_list(); + let exe_path = env::current_exe()?; + for disk in disks.list() { + if exe_path.starts_with(disk.mount_point()) { + return Ok(disk.mount_point() == path); + } + } + Err(eyre!("Cannot find the default mount point")) +} + // Checks if the given path (a drive) is read-only fn is_read_only>(path: P) -> bool { let test_file_path = path.as_ref().join("lauchpad_test_write_permission.tmp"); @@ -63,8 +79,11 @@ fn is_read_only>(path: P) -> bool { } } -// Gets a list of drives and their available space -pub fn get_list_of_drives_and_available_space() -> Vec<(String, PathBuf, u64)> { +/// Gets a list of available drives and their available space. +/// +/// An available drive is a drive that is not read-only on the data directory. +/// +pub fn get_list_of_available_drives_and_available_space() -> Result> { // Create a new System instance let disks = Disks::new_with_refreshed_list(); @@ -81,19 +100,41 @@ pub fn get_list_of_drives_and_available_space() -> Vec<(String, PathBuf, u64)> { disk.mount_point().to_path_buf(), disk.available_space(), ); - // We don't check for write permission on removable drives - if !disk.is_removable() { - // Check if the disk is read-only and skip it - if is_read_only(disk.mount_point()) { - continue; + + if is_read_only(get_launchpad_nodes_data_dir_path( + &disk.mount_point().to_path_buf(), + false, + )?) { + debug!( + "Data dir path on {:?} is read-only. We skip this disk.", + disk_info + ); + continue; + } + + // To handle the case where the same disk is mounted multiple times + // We check names and free space to determine if it's the same disk + let mut skip_drive = false; + for drive in &drives { + if drive.0 == disk_info.0 && drive.2 == disk_info.2 { + debug!( + "Disk already in our list of available disks: {:?}", + disk_info + ); + skip_drive = true; + break; } } - if !drives.contains(&disk_info) { + if !skip_drive { + debug!( + "Adding disk to our list of available disks: {:?}", + disk_info + ); drives.push(disk_info); } } debug!("Drives detected: {:?}", drives); - drives + Ok(drives) } // Opens a folder in the file explorer From 5958270aadd5982b404530bc84046d3c3fccd9d4 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 15 Aug 2024 08:45:46 +0200 Subject: [PATCH 091/115] chore(launchpad): documentatinon styling --- node-launchpad/src/config.rs | 2 +- node-launchpad/src/system.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index 041827b034..81f2f8223b 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -21,7 +21,7 @@ const CONFIG: &str = include_str!("../.config/config.json5"); /// Where to store the Nodes data. /// -/// If is the primary mount point, we store in "/$HOME/user_data_dir/safe/node". +/// If `base_dir` is the primary mount point, we store in "/$HOME/user_data_dir/safe/node". /// /// if not we store in "/safe/node". /// diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs index c46781e02e..14a6287505 100644 --- a/node-launchpad/src/system.rs +++ b/node-launchpad/src/system.rs @@ -40,7 +40,7 @@ pub fn get_default_mount_point() -> Result<(String, PathBuf)> { Err(eyre!("Cannot find the default mount point")) } -/// Checks if the given is the default mount point of the current executable +/// Checks if the given `path` is the default mount point of the current executable /// /// We return an error if we cannot find the default mount point or the current executable pub fn is_default_mount_point(path: &Path) -> Result { From f430f26b205fb80df1ffb922ac41437919dfc564 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 15 Aug 2024 08:47:56 +0200 Subject: [PATCH 092/115] feat(launchpad): stying of node status section --- node-launchpad/src/components/footer.rs | 1 + node-launchpad/src/components/status.rs | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/node-launchpad/src/components/footer.rs b/node-launchpad/src/components/footer.rs index 045e91f8ff..6401766850 100644 --- a/node-launchpad/src/components/footer.rs +++ b/node-launchpad/src/components/footer.rs @@ -55,6 +55,7 @@ impl StatefulWidget for Footer { .block( Block::default() .borders(Borders::ALL) + .border_style(Style::default().fg(EUCALYPTUS)) .padding(Padding::horizontal(1)), ) .widths(vec![ diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index fbe03f2fc4..402d131476 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -20,8 +20,7 @@ use crate::{ mode::{InputMode, Scene}, node_stats::NodeStats, style::{ - clear_area, COOL_GREY, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VERY_LIGHT_AZURE, - VIVID_SKY_BLUE, + clear_area, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE, }, }; use color_eyre::eyre::{OptionExt, Result}; @@ -558,7 +557,7 @@ impl Component for Status { .title(" Nodes (0) ".to_string()) .title_style(Style::default().fg(LIGHT_PERIWINKLE)) .borders(Borders::ALL) - .border_style(style::Style::default().fg(COOL_GREY)) + .border_style(style::Style::default().fg(EUCALYPTUS)) .padding(Padding::horizontal(1)), ), layout[2], From 2d7d6c1ea2ea2ccb7292aa481775d71dd639c45d Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 15 Aug 2024 09:38:32 +0200 Subject: [PATCH 093/115] chore(launchpad): debug info deleted/moved --- node-launchpad/src/components/popup/change_drive.rs | 4 ---- node-launchpad/src/config.rs | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index 33443800f4..4ece76a877 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -391,10 +391,6 @@ impl Component for ChangeDrivePopup { if self.items.items.len() > 1 { self.items.next(); let drive = self.return_selection(); - debug!( - "User moved: {:?} != {:?}", - drive.mountpoint, self.drive_selection.mountpoint - ); self.user_moved = drive.mountpoint != self.drive_selection.mountpoint; } vec![] diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index 81f2f8223b..b2f192324f 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -45,8 +45,8 @@ pub fn get_launchpad_nodes_data_dir_path( mount_point.push(data_directory); mount_point.push("safe"); mount_point.push("node"); - debug!("Creating nodes data dir: {:?}", mount_point.as_path()); if should_create { + debug!("Creating nodes data dir: {:?}", mount_point.as_path()); match std::fs::create_dir_all(mount_point.as_path()) { Ok(_) => debug!("Nodes {:?} data dir created successfully", mount_point), Err(e) => { From f529a0b9d73802d4853a2432b7b6613f746446ee Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 15 Aug 2024 11:17:01 +0200 Subject: [PATCH 094/115] fix(launchpad): using faccess crate for rw detection --- Cargo.lock | 12 +++++++ node-launchpad/Cargo.toml | 1 + node-launchpad/src/system.rs | 67 ++++++++++++++---------------------- 3 files changed, 38 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae4f01442e..e28903626d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2024,6 +2024,17 @@ dependencies = [ "once_cell", ] +[[package]] +name = "faccess" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ae66425802d6a903e268ae1a08b8c38ba143520f227a205edf4e9c7e3e26d5" +dependencies = [ + "bitflags 1.3.2", + "libc", + "winapi", +] + [[package]] name = "fake-simd" version = "0.1.2" @@ -4666,6 +4677,7 @@ dependencies = [ "derive_deref", "directories", "dirs-next", + "faccess", "fs_extra", "futures", "human-panic", diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index d91df41c34..ad3e6e89a2 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -63,6 +63,7 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter", "serde"] } tui-input = "0.8.0" which = "6.0.1" ansi-to-tui = "4.1.0" +faccess = "0.2.4" [build-dependencies] vergen = { version = "8.2.6", features = ["build", "git", "gitoxide", "cargo"] } diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs index 14a6287505..4fb436bd79 100644 --- a/node-launchpad/src/system.rs +++ b/node-launchpad/src/system.rs @@ -9,16 +9,15 @@ use color_eyre::eyre::eyre; use color_eyre::eyre::ContextCompat; use color_eyre::Result; +use faccess::{AccessMode, PathExt}; + use std::env; -use std::fs::OpenOptions; -use std::io::Write; + use std::path::Path; use std::path::PathBuf; use std::process::Command; use sysinfo::Disks; -use crate::config::get_launchpad_nodes_data_dir_path; - // Tries to get the default (drive name, mount point) of the current executable // to be used as the default drive pub fn get_default_mount_point() -> Result<(String, PathBuf)> { @@ -40,43 +39,23 @@ pub fn get_default_mount_point() -> Result<(String, PathBuf)> { Err(eyre!("Cannot find the default mount point")) } -/// Checks if the given `path` is the default mount point of the current executable -/// -/// We return an error if we cannot find the default mount point or the current executable -pub fn is_default_mount_point(path: &Path) -> Result { - let disks = Disks::new_with_refreshed_list(); - let exe_path = env::current_exe()?; - for disk in disks.list() { - if exe_path.starts_with(disk.mount_point()) { - return Ok(disk.mount_point() == path); - } - } - Err(eyre!("Cannot find the default mount point")) -} - -// Checks if the given path (a drive) is read-only -fn is_read_only>(path: P) -> bool { - let test_file_path = path.as_ref().join("lauchpad_test_write_permission.tmp"); - - // Try to create and write to a temporary file - let result = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&test_file_path) - .and_then(|mut file| file.write_all(b"test")); - - match result { +// Checks if the given path has read and write access +fn has_read_write_access(path: PathBuf) -> bool { + let check_access = |mode, access_type| match path.access(mode) { Ok(_) => { - // Clean up the test file if write was successful - let _ = std::fs::remove_file(test_file_path); - false + debug!("{} access granted for {:?}", access_type, path); + true } - Err(err) => { - // Check if the error is due to a read-only file system - err.kind() == std::io::ErrorKind::PermissionDenied + Err(_) => { + debug!("{} access denied for {:?}", access_type, path); + false } - } + }; + + let read = check_access(AccessMode::READ, "Read"); + let write = check_access(AccessMode::WRITE, "Write"); + + read && write } /// Gets a list of available drives and their available space. @@ -101,10 +80,14 @@ pub fn get_list_of_available_drives_and_available_space() -> Result Date: Thu, 15 Aug 2024 17:56:30 +0200 Subject: [PATCH 095/115] chore(launchpad): fn to detect drives cleanup --- node-launchpad/src/system.rs | 54 ++++++++++++------------------------ 1 file changed, 17 insertions(+), 37 deletions(-) diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs index 4fb436bd79..936fb650a6 100644 --- a/node-launchpad/src/system.rs +++ b/node-launchpad/src/system.rs @@ -63,13 +63,10 @@ fn has_read_write_access(path: PathBuf) -> bool { /// An available drive is a drive that is not read-only on the data directory. /// pub fn get_list_of_available_drives_and_available_space() -> Result> { - // Create a new System instance let disks = Disks::new_with_refreshed_list(); - - // Get the list of disks let mut drives: Vec<(String, PathBuf, u64)> = Vec::new(); + for disk in disks.list() { - // Check if the disk is already in the list let disk_info = ( disk.name() .to_string_lossy() @@ -80,42 +77,25 @@ pub fn get_list_of_available_drives_and_available_space() -> Result Date: Mon, 19 Aug 2024 09:44:16 +0200 Subject: [PATCH 096/115] fix(launchpad): opening log path for windows --- sn_node_manager/src/config.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sn_node_manager/src/config.rs b/sn_node_manager/src/config.rs index 64cb732e0a..4e8ad9c47a 100644 --- a/sn_node_manager/src/config.rs +++ b/sn_node_manager/src/config.rs @@ -254,6 +254,7 @@ pub fn is_running_as_root() -> bool { std::fs::read_dir("C:\\Windows\\System32\\config").is_ok() } +#[cfg(not(windows))] pub fn get_user_safenode_data_dir() -> Result { Ok(dirs_next::data_dir() .ok_or_else(|| { @@ -263,3 +264,8 @@ pub fn get_user_safenode_data_dir() -> Result { .join("safe") .join("node")) } + +#[cfg(windows)] +pub fn get_user_safenode_data_dir() -> Result { + get_service_log_dir_path(ReleaseType::NodeLaunchpad, None, None); +} From 35e2aaca42068d9ff7d98db15496d65d2e3f111b Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 19 Aug 2024 11:02:31 +0200 Subject: [PATCH 097/115] fix(launchpad): use sn_node_manager::get_service_log_dir_path --- node-launchpad/src/components/options.rs | 11 +++++++---- sn_node_manager/src/config.rs | 6 ------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index ca4801c8ff..9158d89d1c 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -8,6 +8,7 @@ use ratatui::{ widgets::{Block, Borders, Cell, Row, Table}, Frame, }; +use sn_releases::ReleaseType; use tokio::sync::mpsc::UnboundedSender; use super::{header::SelectedMenuItem, Component}; @@ -18,7 +19,7 @@ use crate::{ style::{EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE}, system, }; -use sn_node_manager::config::get_user_safenode_data_dir; +use sn_node_manager::config::get_service_log_dir_path; #[derive(Clone)] pub struct Options { @@ -255,9 +256,11 @@ impl Component for Options { self.discord_username = username; } OptionsActions::TriggerAccessLogs => { - if let Err(e) = - system::open_folder(get_user_safenode_data_dir()?.to_str().unwrap()) - { + if let Err(e) = system::open_folder( + get_service_log_dir_path(ReleaseType::NodeLaunchpad, None, None)? + .to_str() + .unwrap(), + ) { error!("Failed to open folder: {}", e); } } diff --git a/sn_node_manager/src/config.rs b/sn_node_manager/src/config.rs index 4e8ad9c47a..64cb732e0a 100644 --- a/sn_node_manager/src/config.rs +++ b/sn_node_manager/src/config.rs @@ -254,7 +254,6 @@ pub fn is_running_as_root() -> bool { std::fs::read_dir("C:\\Windows\\System32\\config").is_ok() } -#[cfg(not(windows))] pub fn get_user_safenode_data_dir() -> Result { Ok(dirs_next::data_dir() .ok_or_else(|| { @@ -264,8 +263,3 @@ pub fn get_user_safenode_data_dir() -> Result { .join("safe") .join("node")) } - -#[cfg(windows)] -pub fn get_user_safenode_data_dir() -> Result { - get_service_log_dir_path(ReleaseType::NodeLaunchpad, None, None); -} From 994922149996c635c396a9a79fb4bc823c5ea0f6 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 19 Aug 2024 11:11:46 +0200 Subject: [PATCH 098/115] fix(launchpad): unwrap vs eyre --- node-launchpad/src/components/options.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 9158d89d1c..e3dc97764a 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use color_eyre::eyre::Result; +use color_eyre::eyre::{eyre, Result}; use ratatui::{ layout::{Alignment, Constraint, Direction, Layout, Rect}, style::{Style, Stylize}, @@ -259,7 +259,9 @@ impl Component for Options { if let Err(e) = system::open_folder( get_service_log_dir_path(ReleaseType::NodeLaunchpad, None, None)? .to_str() - .unwrap(), + .ok_or_else(|| { + eyre!("We cannot get the log dir path for Node-Launchpad") + })?, ) { error!("Failed to open folder: {}", e); } From 709c75f03efd45acf2dd34d450c89a9b9ab3be7f Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Fri, 2 Aug 2024 20:27:17 +0100 Subject: [PATCH 099/115] chore: provide keys from environments Two environments are defined on the repository, and those now provide the compile-time keys that are used in the currency system and also form part of the network identity string. --- .github/workflows/release.yml | 12 +++++++----- Justfile | 8 ++++++++ 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index edb068d2a2..30139d2f26 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,11 +9,6 @@ on: # also be updated. env: WORKFLOW_URL: https://github.com/maidsafe/safe_network/actions/runs - GENESIS_PK: ${{ secrets.STABLE_GENESIS_PK }} - GENESIS_SK: ${{ secrets.STABLE_GENESIS_SK }} - FOUNDATION_PK: ${{ secrets.STABLE_FOUNDATION_PK }} - NETWORK_ROYALTIES_PK: ${{ secrets.STABLE_NETWORK_ROYALTIES_PK }} - PAYMENT_FORWARD_PK: ${{ secrets.STABLE_REWARD_FORWARDING_PK }} jobs: build: @@ -22,6 +17,13 @@ jobs: (github.ref == 'refs/heads/stable' || startsWith(github.ref, 'refs/heads/rc')) }} name: build + environment: ${{ github.ref == 'refs/heads/stable' && 'stable' || 'release-candidate' }} + env: + FOUNDATION_PK: ${{ vars.FOUNDATION_PK }} + GENESIS_PK: ${{ vars.GENESIS_PK }} + GENESIS_SK: ${{ secrets.GENESIS_SK }} + NETWORK_ROYALTIES_PK: ${{ vars.NETWORK_ROYALTIES_PK }} + PAYMENT_FORWARD_PK: ${{ vars.PAYMENT_FORWARD_PK }} runs-on: ${{ matrix.os }} strategy: matrix: diff --git a/Justfile b/Justfile index 450ea0ca71..d52041e73f 100644 --- a/Justfile +++ b/Justfile @@ -107,6 +107,14 @@ build-release-artifacts arch: mkdir artifacts cargo clean + echo "===============" + echo "= Public Keys =" + echo "===============" + echo "FOUNDATION_PK: $FOUNDATION_PK" + echo "GENESIS_PK: $GENESIS_PK" + echo "NETWORK_ROYALTIES_PK: $NETWORK_ROYALTIES_PK" + echo "PAYMENT_FORWARD_PK: $PAYMENT_FORWARD_PK" + cross_container_opts="--env \"GENESIS_PK=$GENESIS_PK\" --env \"GENESIS_SK=$GENESIS_SK\" --env \"FOUNDATION_PK=$FOUNDATION_PK\" --env \"NETWORK_ROYALTIES_PK=$NETWORK_ROYALTIES_PK\" --env \"PAYMENT_FORWARD_PK=$PAYMENT_FORWARD_PK\"" export CROSS_CONTAINER_OPTS=$cross_container_opts From c0eddf5cda07841c6a3ae04ba45a69df4a6ab954 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Fri, 2 Aug 2024 21:01:23 +0100 Subject: [PATCH 100/115] chore: use a prefix for release tag It's useful to differentiate the tag between the stable or the release candidate. --- .github/workflows/release.yml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 30139d2f26..bb738a9fd8 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -141,7 +141,6 @@ jobs: name: github release runs-on: ubuntu-latest needs: [ build ] - steps: - uses: actions/checkout@v4 - uses: actions/download-artifact@master @@ -178,6 +177,16 @@ jobs: shell: bash run: cargo binstall --no-confirm just + - name: set stable release prefix + if: ${{ github.ref == 'refs/heads/stable' }} + run: | + echo "RELEASE_PREFIX=stable" >> $GITHUB_ENV + + - name: set rc release prefix + if: ${{ startsWith(github.ref, 'refs/heads/rc') }} + run: | + echo "RELEASE_PREFIX=rc" >> $GITHUB_ENV + - name: set package version shell: bash run: | @@ -198,7 +207,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} with: - tag_name: ${{ env.PACKAGE_VERSION }} + tag_name: ${{ env.RELEASE_PREFIX }}-${{ env.PACKAGE_VERSION }} release_name: ${{ env.PACKAGE_VERSION }} draft: false prerelease: ${{ startsWith(github.ref, 'refs/heads/rc') && true || false }} From c2c7f3ad06e90add3e83f2feb2d6a993ede17f4b Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Fri, 2 Aug 2024 21:33:05 +0100 Subject: [PATCH 101/115] chore: script for removing s3 bin archives This is useful if something goes wrong with an RC or stable build or you just need to run the build again and you want new binaries to be uploaded to S3. Obviously it should be used with care, but in some situations it is necessary. It is extremely tedious to remove the files manually, and often when trying to do that, one of them will be missed, which can cause the whole run to fail. --- .github/workflows/release.yml | 2 +- .../scripts/remove-s3-binary-archives.sh | 64 +++++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) create mode 100755 resources/scripts/remove-s3-binary-archives.sh diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bb738a9fd8..d76079329b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -219,7 +219,7 @@ jobs: run: | ( cd packaged_architectures - ls | xargs gh release upload ${{ env.PACKAGE_VERSION }} + ls | xargs gh release upload ${{ env.RELEASE_PREFIX }}-${{ env.PACKAGE_VERSION }} ) - name: post notification to slack on failure diff --git a/resources/scripts/remove-s3-binary-archives.sh b/resources/scripts/remove-s3-binary-archives.sh new file mode 100755 index 0000000000..71cdd0159b --- /dev/null +++ b/resources/scripts/remove-s3-binary-archives.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# This script can be useful in rare cases where you need to run the stable or rc build again. It +# will clear out all the binary archives from S3. The version numbers used are from the crates on +# the branch on which the script is running. That fact should make it pretty difficult to delete +# anything unintentionally, but obviously, just use care with the script. + +architectures=( + "aarch64-apple-darwin" + "aarch64-unknown-linux-musl" + "arm-unknown-linux-musleabi" + "armv7-unknown-linux-musleabihf" + "x86_64-apple-darwin" + "x86_64-pc-windows-msvc" + "x86_64-unknown-linux-musl" +) +declare -A binary_crate_dir_mappings=( + ["faucet"]="sn_faucet" + ["nat-detection"]="nat-detection" + ["node-launchpad"]="node-launchpad" + ["safe"]="sn_cli" + ["safenode"]="sn_node" + ["safenode-manager"]="sn_node_manager" + ["safenode_rpc_client"]="sn_node_rpc_client" + ["safenodemand"]="sn_node_manager" + ["sn_auditor"]="sn_auditor" +) +declare -A binary_s3_bucket_mappings=( + ["faucet"]="sn-faucet" + ["nat-detection"]="nat-detection" + ["node-launchpad"]="node-launchpad" + ["safe"]="sn-cli" + ["safenode"]="sn-node" + ["safenode-manager"]="sn-node-manager" + ["safenode_rpc_client"]="sn-node-rpc-client" + ["safenodemand"]="sn-node-manager" + ["sn_auditor"]="sn-auditor" +) + +for arch in "${architectures[@]}"; do + for binary in "${!binary_crate_dir_mappings[@]}"; do + crate_dir="${binary_crate_dir_mappings[$binary]}" + bucket_name="${binary_s3_bucket_mappings[$binary]}" + version=$(grep "^version" < $crate_dir/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g') + zip_filename="${binary}-${version}-${arch}.zip" + tar_filename="${binary}-${version}-${arch}.tar.gz" + + dest="s3://${bucket_name}/${zip_filename}" + if aws s3 ls "$dest" > /dev/null 2>&1; then + aws s3 rm $dest + echo "Removed $dest" + else + echo "$dest did not exist" + fi + + dest="s3://${bucket_name}/${tar_filename}" + if aws s3 ls "$dest" > /dev/null 2>&1; then + aws s3 rm $dest + echo "Removed $dest" + else + echo "$dest did not exist" + fi + done +done From e4305e28dcc6f06dcaaa8bda46a79d4c2a2de98a Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Fri, 16 Aug 2024 21:22:17 +0100 Subject: [PATCH 102/115] fix: save node registry on every upgrade Save the registry when each node is upgraded, whether there was an error or not. Previously, the registry was only being saved after the entire suite was processed. Waiting to the end was problematic during the deployment process when Ansible's SSH connection was lost. It resulted in the registry not being updated and hence no nodes were being reported as being upgraded. This led me on a bit of a wild goose chase when debugging the connection issues. It will be very useful to have this fix in for the upgrade process, because nodes should then be correctly reported as upgraded even if a connection is lost. --- sn_node_manager/src/cmd/node.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index 7a06ee7c63..bca1e536ed 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -555,6 +555,7 @@ pub async fn upgrade( service_manager.service.service_data.service_name.clone(), upgrade_result, )); + node_registry.save()?; } Err(err) => { error!("Error upgrading service {service_name}: {err}"); @@ -562,11 +563,11 @@ pub async fn upgrade( node.service_name.clone(), UpgradeResult::Error(format!("Error: {err}")), )); + node_registry.save()?; } } } - node_registry.save()?; print_upgrade_summary(upgrade_summary.clone()); if upgrade_summary.iter().any(|(_, r)| { From 03c7fbb66598806afd8abf6fe019986229457812 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Fri, 16 Aug 2024 22:33:44 +0100 Subject: [PATCH 103/115] Merge pull request #2047 from jacderida/rc-2024.08.1 fix: save node registry on every upgrade From 4fdb6035ef3710869d282fbefa5c43b97551af73 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 19 Aug 2024 15:40:52 +0100 Subject: [PATCH 104/115] chore(release): release candidate 2024.08.2.1 ====================== New Crate Versions ====================== sn_auditor: 0.2.4-rc.1 sn_build_info: 0.1.11-rc.1 sn_cli: 0.94.1-rc.1 sn_client: 0.109.1-rc.1 sn_faucet: 0.4.32-rc.1 sn_logging: 0.2.32-rc.1 sn_metrics: 0.1.12-rc.1 nat-detection: 0.2.2-rc.1 sn_networking: 0.17.2-rc.1 sn_node: 0.110.1-rc.1 node-launchpad: 0.3.12-rc.1 sn_node_manager: 0.10.2-rc.1 sn_node_rpc_client: 0.6.27-rc.1 sn_peers_acquisition: 0.4.2-rc.1 sn_protocol: 0.17.7-rc.1 sn_registers: 0.3.17-rc.1 sn_service_management: 0.3.10-rc.1 sn_transfers: 0.18.10-rc.1 test_utils: 0.4.3-rc.1 token_supplies: 0.1.50-rc.1 ======================= New Binary Versions ======================= faucet: 0.4.32-rc.1 nat-detection: 0.2.2-rc.1 node-launchpad: 0.3.12-rc.1 safe: 0.94.1-rc.1 safenode: 0.110.1-rc.1 safenode-manager: 0.10.2-rc.1 safenode_rpc_client: 0.6.27-rc.1 safenodemand: 0.10.2-rc.1 sn_auditor: 0.2.4-rc.1 --- CHANGELOG.md | 14 +++++++++++ Cargo.lock | 40 ++++++++++++++++---------------- nat-detection/Cargo.toml | 4 ++-- node-launchpad/Cargo.toml | 8 +++---- release-cycle-info | 2 +- sn_auditor/Cargo.toml | 8 +++---- sn_build_info/Cargo.toml | 2 +- sn_cli/Cargo.toml | 14 +++++------ sn_client/Cargo.toml | 18 +++++++------- sn_faucet/Cargo.toml | 16 ++++++------- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 10 ++++---- sn_node/Cargo.toml | 24 +++++++++---------- sn_node_manager/Cargo.toml | 12 +++++----- sn_node_rpc_client/Cargo.toml | 16 ++++++------- sn_peers_acquisition/Cargo.toml | 4 ++-- sn_protocol/Cargo.toml | 6 ++--- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 +++---- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 2 +- token_supplies/Cargo.toml | 2 +- 23 files changed, 116 insertions(+), 102 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d2915d56e..eda6eec7e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,20 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 *When editing this file, please respect a line length of 100.* +## 2024-08-27 + +### Binaries + +* `faucet` v0.4.32 +* `nat-detection` v0.2.2 +* `node-launchpad` v0.3.12 +* `safe` v0.94.1 +* `safenode` v0.110.1 +* `safenode-manager` v0.10.2 +* `safenodemand` v0.10.2 +* `safenode_rpc_client` v0.6.27 +* `sn_auditor` v0.2.4 + ## 2024-07-25 ### Binaries diff --git a/Cargo.lock b/Cargo.lock index e28903626d..d94effa9ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4549,7 +4549,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.1" +version = "0.2.2-rc.1" dependencies = [ "clap", "clap-verbosity-flag", @@ -4664,7 +4664,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.3.11" +version = "0.3.12-rc.1" dependencies = [ "ansi-to-tui", "atty", @@ -6944,7 +6944,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.10.1" +version = "0.10.2-rc.1" dependencies = [ "assert_cmd", "assert_fs", @@ -7006,7 +7006,7 @@ dependencies = [ [[package]] name = "sn_auditor" -version = "0.2.3" +version = "0.2.4-rc.1" dependencies = [ "blsttc", "clap", @@ -7040,14 +7040,14 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.10" +version = "0.1.11-rc.1" dependencies = [ "vergen", ] [[package]] name = "sn_cli" -version = "0.94.0" +version = "0.94.1-rc.1" dependencies = [ "aes 0.7.5", "base64 0.22.1", @@ -7089,7 +7089,7 @@ dependencies = [ [[package]] name = "sn_client" -version = "0.109.0" +version = "0.109.1-rc.1" dependencies = [ "assert_matches", "async-trait", @@ -7172,7 +7172,7 @@ dependencies = [ [[package]] name = "sn_faucet" -version = "0.4.31" +version = "0.4.32-rc.1" dependencies = [ "assert_fs", "base64 0.22.1", @@ -7204,7 +7204,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.31" +version = "0.2.32-rc.1" dependencies = [ "chrono", "color-eyre", @@ -7229,7 +7229,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.11" +version = "0.1.12-rc.1" dependencies = [ "clap", "color-eyre", @@ -7243,7 +7243,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.17.1" +version = "0.17.2-rc.1" dependencies = [ "aes-gcm-siv", "async-trait", @@ -7286,7 +7286,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.110.0" +version = "0.110.1-rc.1" dependencies = [ "assert_fs", "assert_matches", @@ -7340,7 +7340,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.26" +version = "0.6.27-rc.1" dependencies = [ "assert_fs", "async-trait", @@ -7367,7 +7367,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.4.1" +version = "0.4.2-rc.1" dependencies = [ "clap", "lazy_static", @@ -7383,7 +7383,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.6" +version = "0.17.7-rc.1" dependencies = [ "blsttc", "bytes", @@ -7410,7 +7410,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.3.16" +version = "0.3.17-rc.1" dependencies = [ "blsttc", "crdts", @@ -7427,7 +7427,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.3.9" +version = "0.3.10-rc.1" dependencies = [ "async-trait", "dirs-next", @@ -7453,7 +7453,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.18.9" +version = "0.18.10-rc.1" dependencies = [ "assert_fs", "blsttc", @@ -7786,7 +7786,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.2" +version = "0.4.3-rc.1" dependencies = [ "color-eyre", "dirs-next", @@ -7918,7 +7918,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.49" +version = "0.1.50-rc.1" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 35c4cfab43..54dbdcaf6b 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.1" +version = "0.2.2-rc.1" [[bin]] name = "nat-detection" @@ -28,7 +28,7 @@ libp2p = { version = "0.53", features = [ "macros", "upnp", ] } -sn_networking = { path = "../sn_networking", version = "0.17.1" } +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.1" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index ad3e6e89a2..24431f40ad 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.3.11" +version = "0.3.12-rc.1" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -48,10 +48,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn-node-manager = { version = "0.10.1", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.4.1", path = "../sn_peers_acquisition" } +sn-node-manager = { version = "0.10.2-rc.1", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.4.2-rc.1", path = "../sn_peers_acquisition" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.3.9", path = "../sn_service_management" } +sn_service_management = { version = "0.3.10-rc.1", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/release-cycle-info b/release-cycle-info index 14b23f7ad5..9d8bd27a42 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -6,5 +6,5 @@ # # Both of these numbers are used in the packaged version number, which is a collective version # number for all the released binaries. -release-cycle: 1 +release-cycle: 2 release-cycle-counter: 1 diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index 6919e4ae63..b80b2d7036 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Network Auditor" name = "sn_auditor" -version = "0.2.3" +version = "0.2.4-rc.1" edition = "2021" homepage = "https://maidsafe.net" repository = "https://github.com/maidsafe/safe_network" @@ -31,9 +31,9 @@ graphviz-rust = { version = "0.9.0", optional = true } lazy_static = "1.4.0" serde = { version = "1.0.133", features = ["derive", "rc"] } serde_json = "1.0.108" -sn_client = { path = "../sn_client", version = "0.109.0" } -sn_logging = { path = "../sn_logging", version = "0.2.31" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } tiny_http = { version = "0.12", features = ["ssl-rustls"] } tracing = { version = "~0.1.26" } tokio = { version = "1.32.0", features = [ diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 8f61fdff57..62b7431918 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.10" +version = "0.1.11-rc.1" [build-dependencies] vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index ab809fc121..b10188f54a 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_cli" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.94.0" +version = "0.94.1-rc.1" [[bin]] path = "src/bin/main.rs" @@ -58,11 +58,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ rmp-serde = "1.1.1" rpassword = "7.3.1" serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.10" } -sn_client = { path = "../sn_client", version = "0.109.0" } -sn_logging = { path = "../sn_logging", version = "0.2.31" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.6" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.1" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } tempfile = "3.6.0" tiny-keccak = "~2.0.2" tokio = { version = "1.32.0", features = [ @@ -84,7 +84,7 @@ eyre = "0.6.8" criterion = "0.5.1" tempfile = "3.6.0" rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.109.0", features = [ +sn_client = { path = "../sn_client", version = "0.109.1-rc.1", features = [ "test-utils", ] } diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index e637c17321..55440f72c7 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.109.0" +version = "0.109.1-rc.1" [features] default = [] @@ -49,16 +49,16 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.17.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.6" } -sn_registers = { path = "../sn_registers", version = "0.3.16" } -sn_transfers = { path = "../sn_transfers", version = "0.18.9" } +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } tempfile = "3.6.0" thiserror = "1.0.23" tiny-keccak = "~2.0.2" tracing = { version = "~0.1.26" } xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1", optional = true } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1", optional = true } eyre = { version = "0.6.8", optional = true } [dev-dependencies] @@ -67,8 +67,8 @@ dirs-next = "~2.0.0" # add rand to libp2p libp2p-identity = { version = "0.2.7", features = ["rand"] } sn_client = { path = "../sn_client", features = ["test-utils"] } -sn_logging = { path = "../sn_logging", version = "0.2.31" } -sn_registers = { path = "../sn_registers", version = "0.3.16", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1", features = [ "test-utils", ] } @@ -83,7 +83,7 @@ crate-type = ["cdylib", "rlib"] getrandom = { version = "0.2.12", features = ["js"] } wasm-bindgen = "0.2.90" wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } console_error_panic_hook = "0.1.6" tracing-wasm = "0.2.1" wasmtimer = "0.2.0" diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml index 4fd88e3198..0a6602dc26 100644 --- a/sn_faucet/Cargo.toml +++ b/sn_faucet/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_faucet" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.31" +version = "0.4.32-rc.1" [features] default = ["gifting"] @@ -37,13 +37,13 @@ indicatif = { version = "0.17.5", features = ["tokio"] } minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.10" } -sn_cli = { path = "../sn_cli", version = "0.94.0" } -sn_client = { path = "../sn_client", version = "0.109.0" } -sn_logging = { path = "../sn_logging", version = "0.2.31" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.6" } -sn_transfers = { path = "../sn_transfers", version = "0.18.9" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.1" } +sn_cli = { path = "../sn_cli", version = "0.94.1-rc.1" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } tracing = { version = "~0.1.26" } url = "2.5.0" diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index 66cc6870ce..08381c957b 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.31" +version = "0.2.32-rc.1" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index 00aef41f04..ffdd1a94d1 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.11" +version = "0.1.12-rc.1" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index d443f763e0..1d4e8738ee 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.1" +version = "0.17.2-rc.1" [features] default = ["libp2p/quic"] @@ -53,10 +53,10 @@ rand = { version = "~0.8.5", features = ["small_rng"] } rayon = "1.8.0" rmp-serde = "1.1.1" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path="../sn_build_info", version = "0.1.10" } -sn_protocol = { path = "../sn_protocol", version = "0.17.6" } -sn_transfers = { path = "../sn_transfers", version = "0.18.9" } -sn_registers = { path = "../sn_registers", version = "0.3.16" } +sn_build_info = { path="../sn_build_info", version = "0.1.11-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 0d7d526f36..5dabe3a0bf 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.110.0" +version = "0.110.1-rc.1" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -50,14 +50,14 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.10" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } -sn_logging = { path = "../sn_logging", version = "0.2.31" } -sn_networking = { path = "../sn_networking", version = "0.17.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.6" } -sn_registers = { path = "../sn_registers", version = "0.3.16" } -sn_transfers = { path = "../sn_transfers", version = "0.18.9" } -sn_service_management = { path = "../sn_service_management", version = "0.3.9" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } +sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.1" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -84,11 +84,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_client = { path = "../sn_client", version = "0.109.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.6", features = [ +sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.9", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index becade3053..b3d72afb75 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.10.1" +version = "0.10.2-rc.1" [[bin]] name = "safenode-manager" @@ -44,12 +44,12 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.31" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.6" } -sn_service_management = { path = "../sn_service_management", version = "0.3.9" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } +sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.1" } sn-releases = "0.2.6" -sn_transfers = { path = "../sn_transfers", version = "0.18.9" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 229941c7ec..724c570c81 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.26" +version = "0.6.27-rc.1" [[bin]] name = "safenode_rpc_client" @@ -23,13 +23,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version="0.53", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", version = "0.109.0" } -sn_logging = { path = "../sn_logging", version = "0.2.31" } -sn_node = { path = "../sn_node", version = "0.110.0" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.6", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.3.9" } -sn_transfers = { path = "../sn_transfers", version = "0.18.9" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } +sn_node = { path = "../sn_node", version = "0.110.1-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 5df30921a8..7a0554c756 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.1" +version = "0.4.2-rc.1" [features] local-discovery = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version="0.53", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_networking = { path = "../sn_networking", version = "0.17.1", optional = true} +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.1", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false} tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index c30e4081b5..6a29597b65 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.6" +version = "0.17.7-rc.1" [features] default = [] @@ -27,8 +27,8 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_transfers = { path = "../sn_transfers", version = "0.18.9" } -sn_registers = { path = "../sn_registers", version = "0.3.16" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index ea41bcd7c7..ccb3ceac62 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.16" +version = "0.3.17-rc.1" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index cb30be547f..da581f6f3a 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.9" +version = "0.3.10-rc.1" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.31" } -sn_protocol = { path = "../sn_protocol", version = "0.17.6", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.9" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index c87b586660..6503cc4f04 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.18.9" +version = "0.18.10-rc.1" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 17e006d306..c0530bf767 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.2" +version = "0.4.3-rc.1" [dependencies] color-eyre = "~0.6.2" diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index 7f182cd035..dd185abd84 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.49" +version = "0.1.50-rc.1" [dependencies] From f29f134dcf710fc1a5cc9151b4128cd45c5a7e81 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 20 Aug 2024 08:46:19 +0200 Subject: [PATCH 105/115] feat(networking): increase circuit bytes limit --- sn_networking/src/driver.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index b18b532074..75bacf09f3 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -598,6 +598,8 @@ impl NetworkBuilder { max_circuits: 1024, // The total amount of relayed connections at any given moment. max_circuits_per_peer: 256, // Amount of relayed connections per peer (both dst and src) circuit_src_rate_limiters: vec![], // No extra rate limiting for now + // We should at least be able to relay packets with chunks etc. + max_circuit_bytes: MAX_PACKET_SIZE as u64, ..Default::default() }; libp2p::relay::Behaviour::new(peer_id, relay_server_cfg) From 296412e1a423acfdceefc1e6cbc2e05f11c98a55 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 20 Aug 2024 08:46:19 +0200 Subject: [PATCH 106/115] feat(networking): increase circuit bytes limit --- sn_networking/src/driver.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index b18b532074..75bacf09f3 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -598,6 +598,8 @@ impl NetworkBuilder { max_circuits: 1024, // The total amount of relayed connections at any given moment. max_circuits_per_peer: 256, // Amount of relayed connections per peer (both dst and src) circuit_src_rate_limiters: vec![], // No extra rate limiting for now + // We should at least be able to relay packets with chunks etc. + max_circuit_bytes: MAX_PACKET_SIZE as u64, ..Default::default() }; libp2p::relay::Behaviour::new(peer_id, relay_server_cfg) From 0c687b6c8a2f7042dfc4ac0184f5156961ccd606 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 20 Aug 2024 08:50:27 +0200 Subject: [PATCH 107/115] feat(networking): re-introduce DCUtR Revert "chore: remove dcutr" This reverts commit a48b8007f4d7325e1689d3ff9a19d6127faba390. --- Cargo.lock | 40 +++++++++++++++++++++++++++++--- sn_networking/Cargo.toml | 1 + sn_networking/src/driver.rs | 2 ++ sn_networking/src/event/mod.rs | 6 +++++ sn_networking/src/event/swarm.rs | 12 ++++++++++ sn_networking/src/metrics/mod.rs | 6 +++++ 6 files changed, 64 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d94effa9ea..9a692b0e57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3760,6 +3760,7 @@ dependencies = [ "libp2p-autonat", "libp2p-connection-limits", "libp2p-core", + "libp2p-dcutr", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3856,6 +3857,29 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-dcutr" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4f7bb7fa2b9e6cad9c30a6f67e3ff5c1e4b658c62b6375e35861a85f9c97bf3" +dependencies = [ + "asynchronous-codec 0.6.2", + "either", + "futures", + "futures-bounded", + "futures-timer", + "instant", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "lru 0.11.1", + "quick-protobuf", + "quick-protobuf-codec 0.2.0", + "thiserror", + "tracing", + "void", +] + [[package]] name = "libp2p-dns" version = "0.41.1" @@ -3917,7 +3941,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru", + "lru 0.12.3", "quick-protobuf", "quick-protobuf-codec 0.3.1", "smallvec", @@ -4003,6 +4027,7 @@ dependencies = [ "futures", "instant", "libp2p-core", + "libp2p-dcutr", "libp2p-identify", "libp2p-identity", "libp2p-kad", @@ -4124,7 +4149,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "lru", + "lru 0.12.3", "multistream-select", "once_cell", "rand 0.8.5", @@ -4296,6 +4321,15 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +[[package]] +name = "lru" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "lru" version = "0.12.3" @@ -5989,7 +6023,7 @@ dependencies = [ "compact_str", "crossterm", "itertools 0.12.1", - "lru", + "lru 0.12.3", "paste", "serde", "stability", diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 1d4e8738ee..d4037b7b85 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -30,6 +30,7 @@ libp2p = { version = "0.53", features = [ "request-response", "cbor", "identify", + "dcutr", "tcp", "relay", "noise", diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 75bacf09f3..b598ae5e7b 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -201,6 +201,7 @@ pub(super) struct NodeBehaviour { pub(super) upnp: libp2p::swarm::behaviour::toggle::Toggle, pub(super) relay_client: libp2p::relay::client::Behaviour, pub(super) relay_server: libp2p::relay::Behaviour, + pub(super) dcutr: libp2p::dcutr::Behaviour, pub(super) kademlia: kad::Behaviour, pub(super) request_response: request_response::cbor::Behaviour, } @@ -616,6 +617,7 @@ impl NetworkBuilder { identify, #[cfg(feature = "local-discovery")] mdns, + dcutr: libp2p::dcutr::Behaviour::new(peer_id), }; #[cfg(not(target_arch = "wasm32"))] diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 5e82742d6a..ef34676d5c 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -42,6 +42,7 @@ pub(super) enum NodeEvent { #[cfg(feature = "local-discovery")] Mdns(Box), Identify(Box), + Dcutr(Box), RelayClient(Box), RelayServer(Box), Void(void::Void), @@ -78,6 +79,11 @@ impl From for NodeEvent { NodeEvent::Identify(Box::new(event)) } } +impl From for NodeEvent { + fn from(event: libp2p::dcutr::Event) -> Self { + NodeEvent::Dcutr(Box::new(event)) + } +} impl From for NodeEvent { fn from(event: libp2p::relay::client::Event) -> Self { NodeEvent::RelayClient(Box::new(event)) diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 78496bbae4..56889a9deb 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -60,6 +60,18 @@ impl SwarmDriver { event_string = "kad_event"; self.handle_kad_event(kad_event)?; } + SwarmEvent::Behaviour(NodeEvent::Dcutr(event)) => { + #[cfg(feature = "open-metrics")] + if let Some(metrics) = &self.network_metrics { + metrics.record(&(*event)); + } + + event_string = "dcutr_event"; + info!( + "Dcutr with remote peer: {:?} is: {:?}", + event.remote_peer_id, event.result + ); + } SwarmEvent::Behaviour(NodeEvent::RelayClient(event)) => { event_string = "relay_client_event"; diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index a76afeeb8d..34deee32f5 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -202,6 +202,12 @@ impl Recorder for NetworkMetricsRecorder { } } +impl Recorder for NetworkMetricsRecorder { + fn record(&self, event: &libp2p::dcutr::Event) { + self.libp2p_metrics.record(event) + } +} + impl Recorder for NetworkMetricsRecorder { fn record(&self, event: &libp2p::relay::Event) { self.libp2p_metrics.record(event) From 7ac63bbad089ad363792cede7c59154eb5b7a243 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 19 Aug 2024 14:03:44 +0530 Subject: [PATCH 108/115] feat(metrics): expose values from QuotingMetrics --- sn_networking/src/metrics/mod.rs | 75 +++++++++++++++++++++++++++----- 1 file changed, 64 insertions(+), 11 deletions(-) diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index a76afeeb8d..ddaaf8a874 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -30,6 +30,8 @@ pub(crate) struct NetworkMetricsRecorder { // Must directly call self.libp2p_metrics.record(libp2p_event) with Recorder trait in scope. But since we have // re-implemented the trait for the wrapper struct, we can instead call self.record(libp2p_event) libp2p_metrics: Libp2pMetrics, + #[cfg(feature = "upnp")] + upnp_events: Family, // metrics from sn_networking pub(crate) connected_peers: Gauge, @@ -37,11 +39,17 @@ pub(crate) struct NetworkMetricsRecorder { pub(crate) open_connections: Gauge, pub(crate) peers_in_routing_table: Gauge, pub(crate) records_stored: Gauge, + + // store cost store_cost: Gauge, + relevant_records: Gauge, + max_records: Gauge, + received_payment_count: Gauge, + live_time: Gauge, + + // bad node metrics bad_peers_count: Counter, shunned_count: Counter, - #[cfg(feature = "upnp")] - upnp_events: Family, // system info process_memory_used_mb: Gauge, @@ -85,12 +93,6 @@ impl NetworkMetricsRecorder { "The total number of peers in our routing table", peers_in_routing_table.clone(), ); - let store_cost = Gauge::default(); - sub_registry.register( - "store_cost", - "The store cost of the node", - store_cost.clone(), - ); let shunned_count = Counter::default(); sub_registry.register( @@ -129,18 +131,58 @@ impl NetworkMetricsRecorder { process_cpu_usage_percentage.clone(), ); + // store cost + let store_cost_sub_registry = sub_registry.sub_registry_with_prefix("store_cost"); + let store_cost = Gauge::default(); + store_cost_sub_registry.register( + "store_cost", + "The store cost of the node", + store_cost.clone(), + ); + let relevant_records = Gauge::default(); + store_cost_sub_registry.register( + "relevant_records", + "The number of records that we're responsible for", + relevant_records.clone(), + ); + let max_records = Gauge::default(); + store_cost_sub_registry.register( + "max_records", + "The maximum number of records that we can store", + max_records.clone(), + ); + let received_payment_count = Gauge::default(); + store_cost_sub_registry.register( + "received_payment_count", + "The number of payments received by our node", + received_payment_count.clone(), + ); + let live_time = Gauge::default(); + store_cost_sub_registry.register( + "live_time", + "The time for which the node has been alive", + live_time.clone(), + ); + let network_metrics = Self { libp2p_metrics, + #[cfg(feature = "upnp")] + upnp_events, + records_stored, estimated_network_size, connected_peers, open_connections, peers_in_routing_table, store_cost, + relevant_records, + max_records, + received_payment_count, + live_time, + bad_peers_count, shunned_count, - #[cfg(feature = "upnp")] - upnp_events, + process_memory_used_mb, process_cpu_usage_percentage, }; @@ -187,8 +229,19 @@ impl NetworkMetricsRecorder { Marker::FlaggedAsBadNode { .. } => { let _ = self.shunned_count.inc(); } - Marker::StoreCost { cost, .. } => { + Marker::StoreCost { + cost, + quoting_metrics, + } => { let _ = self.store_cost.set(cost as i64); + let _ = self + .relevant_records + .set(quoting_metrics.close_records_stored as i64); + let _ = self.max_records.set(quoting_metrics.max_records as i64); + let _ = self + .received_payment_count + .set(quoting_metrics.received_payment_count as i64); + let _ = self.live_time.set(quoting_metrics.live_time as i64); } _ => {} } From 8011c83142112d158d0b205b55f22fee6f0d679a Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 21 Aug 2024 13:11:12 +0100 Subject: [PATCH 109/115] chore(release): release candidate 2024.08.2.2 ====================== New Crate Versions ====================== sn_auditor: 0.2.4-rc.2 sn_build_info: 0.1.11-rc.2 sn_cli: 0.94.1-rc.2 sn_client: 0.109.1-rc.2 sn_faucet: 0.4.32-rc.2 sn_logging: 0.2.32-rc.2 sn_metrics: 0.1.12-rc.2 nat-detection: 0.2.2-rc.2 sn_networking: 0.17.2-rc.2 sn_node: 0.110.1-rc.2 node-launchpad: 0.3.12-rc.2 sn_node_manager: 0.10.2-rc.2 sn_node_rpc_client: 0.6.27-rc.2 sn_peers_acquisition: 0.4.2-rc.2 sn_protocol: 0.17.7-rc.2 sn_registers: 0.3.17-rc.2 sn_service_management: 0.3.10-rc.2 sn_transfers: 0.18.10-rc.2 test_utils: 0.4.3-rc.2 token_supplies: 0.1.50-rc.2 ======================= New Binary Versions ======================= faucet: 0.4.32-rc.2 nat-detection: 0.2.2-rc.2 node-launchpad: 0.3.12-rc.2 safe: 0.94.1-rc.2 safenode: 0.110.1-rc.2 safenode-manager: 0.10.2-rc.2 safenode_rpc_client: 0.6.27-rc.2 safenodemand: 0.10.2-rc.2 sn_auditor: 0.2.4-rc.2 --- Cargo.lock | 40 ++++++++++++++++---------------- nat-detection/Cargo.toml | 4 ++-- node-launchpad/Cargo.toml | 8 +++---- release-cycle-info | 2 +- sn_auditor/Cargo.toml | 8 +++---- sn_build_info/Cargo.toml | 2 +- sn_cli/Cargo.toml | 14 +++++------ sn_client/Cargo.toml | 18 +++++++------- sn_faucet/Cargo.toml | 16 ++++++------- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 10 ++++---- sn_node/Cargo.toml | 24 +++++++++---------- sn_node_manager/Cargo.toml | 12 +++++----- sn_node_rpc_client/Cargo.toml | 16 ++++++------- sn_peers_acquisition/Cargo.toml | 4 ++-- sn_protocol/Cargo.toml | 6 ++--- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 +++---- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 2 +- token_supplies/Cargo.toml | 2 +- 22 files changed, 102 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d94effa9ea..5fe83a0b4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4549,7 +4549,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.2-rc.1" +version = "0.2.2-rc.2" dependencies = [ "clap", "clap-verbosity-flag", @@ -4664,7 +4664,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.3.12-rc.1" +version = "0.3.12-rc.2" dependencies = [ "ansi-to-tui", "atty", @@ -6944,7 +6944,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.10.2-rc.1" +version = "0.10.2-rc.2" dependencies = [ "assert_cmd", "assert_fs", @@ -7006,7 +7006,7 @@ dependencies = [ [[package]] name = "sn_auditor" -version = "0.2.4-rc.1" +version = "0.2.4-rc.2" dependencies = [ "blsttc", "clap", @@ -7040,14 +7040,14 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.11-rc.1" +version = "0.1.11-rc.2" dependencies = [ "vergen", ] [[package]] name = "sn_cli" -version = "0.94.1-rc.1" +version = "0.94.1-rc.2" dependencies = [ "aes 0.7.5", "base64 0.22.1", @@ -7089,7 +7089,7 @@ dependencies = [ [[package]] name = "sn_client" -version = "0.109.1-rc.1" +version = "0.109.1-rc.2" dependencies = [ "assert_matches", "async-trait", @@ -7172,7 +7172,7 @@ dependencies = [ [[package]] name = "sn_faucet" -version = "0.4.32-rc.1" +version = "0.4.32-rc.2" dependencies = [ "assert_fs", "base64 0.22.1", @@ -7204,7 +7204,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.32-rc.1" +version = "0.2.32-rc.2" dependencies = [ "chrono", "color-eyre", @@ -7229,7 +7229,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.12-rc.1" +version = "0.1.12-rc.2" dependencies = [ "clap", "color-eyre", @@ -7243,7 +7243,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.17.2-rc.1" +version = "0.17.2-rc.2" dependencies = [ "aes-gcm-siv", "async-trait", @@ -7286,7 +7286,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.110.1-rc.1" +version = "0.110.1-rc.2" dependencies = [ "assert_fs", "assert_matches", @@ -7340,7 +7340,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.27-rc.1" +version = "0.6.27-rc.2" dependencies = [ "assert_fs", "async-trait", @@ -7367,7 +7367,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.4.2-rc.1" +version = "0.4.2-rc.2" dependencies = [ "clap", "lazy_static", @@ -7383,7 +7383,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.7-rc.1" +version = "0.17.7-rc.2" dependencies = [ "blsttc", "bytes", @@ -7410,7 +7410,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.3.17-rc.1" +version = "0.3.17-rc.2" dependencies = [ "blsttc", "crdts", @@ -7427,7 +7427,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.3.10-rc.1" +version = "0.3.10-rc.2" dependencies = [ "async-trait", "dirs-next", @@ -7453,7 +7453,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.18.10-rc.1" +version = "0.18.10-rc.2" dependencies = [ "assert_fs", "blsttc", @@ -7786,7 +7786,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.3-rc.1" +version = "0.4.3-rc.2" dependencies = [ "color-eyre", "dirs-next", @@ -7918,7 +7918,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.50-rc.1" +version = "0.1.50-rc.2" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 54dbdcaf6b..0535824f8f 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.2-rc.1" +version = "0.2.2-rc.2" [[bin]] name = "nat-detection" @@ -28,7 +28,7 @@ libp2p = { version = "0.53", features = [ "macros", "upnp", ] } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.1" } +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.2" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 24431f40ad..b82edf1b91 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.3.12-rc.1" +version = "0.3.12-rc.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -48,10 +48,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn-node-manager = { version = "0.10.2-rc.1", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.4.2-rc.1", path = "../sn_peers_acquisition" } +sn-node-manager = { version = "0.10.2-rc.2", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.4.2-rc.2", path = "../sn_peers_acquisition" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.3.10-rc.1", path = "../sn_service_management" } +sn_service_management = { version = "0.3.10-rc.2", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/release-cycle-info b/release-cycle-info index 9d8bd27a42..28385361a8 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -7,4 +7,4 @@ # Both of these numbers are used in the packaged version number, which is a collective version # number for all the released binaries. release-cycle: 2 -release-cycle-counter: 1 +release-cycle-counter: 2 diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index b80b2d7036..c06016b83b 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Network Auditor" name = "sn_auditor" -version = "0.2.4-rc.1" +version = "0.2.4-rc.2" edition = "2021" homepage = "https://maidsafe.net" repository = "https://github.com/maidsafe/safe_network" @@ -31,9 +31,9 @@ graphviz-rust = { version = "0.9.0", optional = true } lazy_static = "1.4.0" serde = { version = "1.0.133", features = ["derive", "rc"] } serde_json = "1.0.108" -sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } tiny_http = { version = "0.12", features = ["ssl-rustls"] } tracing = { version = "~0.1.26" } tokio = { version = "1.32.0", features = [ diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 62b7431918..a529ab13f3 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.11-rc.1" +version = "0.1.11-rc.2" [build-dependencies] vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index b10188f54a..cf87bce642 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_cli" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.94.1-rc.1" +version = "0.94.1-rc.2" [[bin]] path = "src/bin/main.rs" @@ -58,11 +58,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ rmp-serde = "1.1.1" rpassword = "7.3.1" serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.1" } -sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.2" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } tempfile = "3.6.0" tiny-keccak = "~2.0.2" tokio = { version = "1.32.0", features = [ @@ -84,7 +84,7 @@ eyre = "0.6.8" criterion = "0.5.1" tempfile = "3.6.0" rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.109.1-rc.1", features = [ +sn_client = { path = "../sn_client", version = "0.109.1-rc.2", features = [ "test-utils", ] } diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index 55440f72c7..c6f6a21c05 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.109.1-rc.1" +version = "0.109.1-rc.2" [features] default = [] @@ -49,16 +49,16 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } tempfile = "3.6.0" thiserror = "1.0.23" tiny-keccak = "~2.0.2" tracing = { version = "~0.1.26" } xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1", optional = true } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2", optional = true } eyre = { version = "0.6.8", optional = true } [dev-dependencies] @@ -67,8 +67,8 @@ dirs-next = "~2.0.0" # add rand to libp2p libp2p-identity = { version = "0.2.7", features = ["rand"] } sn_client = { path = "../sn_client", features = ["test-utils"] } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2", features = [ "test-utils", ] } @@ -83,7 +83,7 @@ crate-type = ["cdylib", "rlib"] getrandom = { version = "0.2.12", features = ["js"] } wasm-bindgen = "0.2.90" wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } console_error_panic_hook = "0.1.6" tracing-wasm = "0.2.1" wasmtimer = "0.2.0" diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml index 0a6602dc26..ca88bdbb83 100644 --- a/sn_faucet/Cargo.toml +++ b/sn_faucet/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_faucet" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.32-rc.1" +version = "0.4.32-rc.2" [features] default = ["gifting"] @@ -37,13 +37,13 @@ indicatif = { version = "0.17.5", features = ["tokio"] } minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.1" } -sn_cli = { path = "../sn_cli", version = "0.94.1-rc.1" } -sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.2" } +sn_cli = { path = "../sn_cli", version = "0.94.1-rc.2" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } tracing = { version = "~0.1.26" } url = "2.5.0" diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index 08381c957b..e2b0bf0da5 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.32-rc.1" +version = "0.2.32-rc.2" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index ffdd1a94d1..fec2e25735 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.12-rc.1" +version = "0.1.12-rc.2" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 1d4e8738ee..9259531e80 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.2-rc.1" +version = "0.17.2-rc.2" [features] default = ["libp2p/quic"] @@ -53,10 +53,10 @@ rand = { version = "~0.8.5", features = ["small_rng"] } rayon = "1.8.0" rmp-serde = "1.1.1" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path="../sn_build_info", version = "0.1.11-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1" } +sn_build_info = { path="../sn_build_info", version = "0.1.11-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 5dabe3a0bf..8b3e8e47a1 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.110.1-rc.1" +version = "0.110.1-rc.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -50,14 +50,14 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } -sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } +sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.2" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -84,11 +84,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1", features = [ +sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index b3d72afb75..e0cc764574 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.10.2-rc.1" +version = "0.10.2-rc.2" [[bin]] name = "safenode-manager" @@ -44,12 +44,12 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1" } -sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } +sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.2" } sn-releases = "0.2.6" -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 724c570c81..4cba3d2535 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.27-rc.1" +version = "0.6.27-rc.2" [[bin]] name = "safenode_rpc_client" @@ -23,13 +23,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version="0.53", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", version = "0.109.1-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } -sn_node = { path = "../sn_node", version = "0.110.1-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } +sn_node = { path = "../sn_node", version = "0.110.1-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 7a0554c756..4c81cb01f1 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.2-rc.1" +version = "0.4.2-rc.2" [features] local-discovery = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version="0.53", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.1", optional = true} +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.2", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false} tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 6a29597b65..e2244f9db7 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.7-rc.1" +version = "0.17.7-rc.2" [features] default = [] @@ -27,8 +27,8 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index ccb3ceac62..fa2b80ff50 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.17-rc.1" +version = "0.3.17-rc.2" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index da581f6f3a..5dffe2aede 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.10-rc.1" +version = "0.3.10-rc.2" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.1", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index 6503cc4f04..3ea9b276fd 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.18.10-rc.1" +version = "0.18.10-rc.2" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index c0530bf767..49706ca994 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.3-rc.1" +version = "0.4.3-rc.2" [dependencies] color-eyre = "~0.6.2" diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index dd185abd84..93fe27a81a 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.50-rc.1" +version = "0.1.50-rc.2" [dependencies] From dcf8ba29bae1750668cab17d2eae2a4ea93191c2 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Sat, 24 Aug 2024 19:05:58 +0100 Subject: [PATCH 110/115] docs: changelog for `2024.08.2.2` release The previous entries had information about binary versions, highlights, and merged pull requests. These have been removed. I decided the changelog should only contain the detailed changes, and those other things should be on the release notes. --- CHANGELOG.md | 179 +++++++++++++++++++++++---------------------------- 1 file changed, 80 insertions(+), 99 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eda6eec7e0..75abab8ee8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,109 +9,101 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## 2024-08-27 -### Binaries - -* `faucet` v0.4.32 -* `nat-detection` v0.2.2 -* `node-launchpad` v0.3.12 -* `safe` v0.94.1 -* `safenode` v0.110.1 -* `safenode-manager` v0.10.2 -* `safenodemand` v0.10.2 -* `safenode_rpc_client` v0.6.27 -* `sn_auditor` v0.2.4 - -## 2024-07-25 - -### Binaries - -* `faucet` v0.4.31 -* `nat-detection` v0.2.1 -* `node-launchpad` v0.3.11 -* `safe` v0.94.0 -* `safenode` v0.110.0 -* `safenode-manager` v0.10.1 -* `safenodemand` v0.10.1 -* `safenode_rpc_client` v0.6.26 -* `sn_auditor` v0.2.3 - -### 🔦 Highlights - -* The introduction of a record-store cache has significantly reduced the node's disk IO. As a side - effect, the CPU does less work, and performance improves. RAM usage has increased by around 25MB per - node, but we view this as a reasonable trade off. -* The node's relay server now supports more connections: when running with `--home-network`, up to - 256 will be supported, and otherwise, it will be 1024. Along with minor tweaks to utilize the - relay server properly, this should hopefully result in less connections being dropped. -* Reward forwarding is more robust. -* Chunk verification is now probabilistic, which should reduce messaging. In combination with - replication messages also being reduced, this should result in a bandwidth usage reduction of - ~20%. -* Replication messages are less frequent, reducing bandwidth by ~20% per node. -* Bad nodes and nodes with a mismatched protocol are now added to a block list. This reduces the - chance of a network interference and the impact of a bad node in the network. -* For the time being, hole punching has been removed. It was causing handshake time outs, resulting - in home nodes being less stable. It will be re-enabled in the future. -* Wallet password encryption enhances security, and in the case of secret key leakage, prevents - unauthorized access. -* Native Apple Silicon (M-series) binaries have been added to our releases, meaning M-series Mac - users do not have to rely on running Intel binaries with Rosetta. - -### Merged Pull Requests - -2024-07-11 [#1945](https://github.com/maidsafe/safe_network/pull/1945) -- feat: double spend spam protection +### Network -2024-07-11 [#1952](https://github.com/maidsafe/safe_network/pull/1952) -- fix(auditor): create auditor directory if it doesn't exist +#### Added -2024-07-11 [#1951](https://github.com/maidsafe/safe_network/pull/1951) -- test(spend_simulation): add more attacks +- The node will now report its bandwidth usage through the metrics endpoint. +- The metrics server has a new `/metadata` path which will provide static information about the node, + including peer ID and version. +- The metrics server exposes more metrics on store cost derivation. These include relevant record + count and number of payments received. +- The metrics server exposes metrics related to bad node detection. +- Test to confirm main key can’t verify signature signed by child key. +- Avoid excessively high quotes by pruning records that are not relevant. -2024-07-11 [#1953](https://github.com/maidsafe/safe_network/pull/1953) -- chore/fix(resources): use more portable shebang +#### Changed -2024-07-12 [#1959](https://github.com/maidsafe/safe_network/pull/1959) -- refactor outdated conn removal +- Bad node detection and bootstrap intervals have been increased. This should reduce the number + of messages being sent. +- The spend parent verification strategy was refactored to be more aligned with the public + network. +- Nodes now prioritize local work over new work from the network, which reduces memory footprint. +- Multiple GET queries to the same address are now de-duplicated and will result in a single query + being processed. +- Improve efficiency of command handling and the record store cache. +- A parent spend is now trusted with a majority of close group nodes, rather than all of them. This + increases the chance of the spend being stored successfully when some percentage of nodes are slow + to respond. -2024-07-12 [#1964](https://github.com/maidsafe/safe_network/pull/1964) -- refactor(cli)!: `wallet address` and `wallet create` changes +#### Fixed -2024-07-15 [#1946](https://github.com/maidsafe/safe_network/pull/1946) -- docs(sn_client): Basic documentation +- The amount of bytes a home node could send and receive per relay connection is increased. This + solves a problem where transmission of data is interrupted, causing home nodes to malfunction. +- Fetching the network contacts now times out and retries. Previously we would wait for an excessive + amount of time, which could cause the node to hang during start up. +- If a node has been shunned, we inform that node before blocking all communication to it. +- The current wallet balance metric is updated more frequently and will now reflect the correct + state. +- Avoid burnt spend during forwarding by correctly handling repeated CashNotes and confirmed spends. +- Fix logging for CashNote and confirmed spend disk ops +- Check whether a CashNote has already been received to avoid duplicate CashNotes in the wallet. -2024-07-15 [#1966](https://github.com/maidsafe/safe_network/pull/1966) -- fix(network): do not add bootstrap peer as relay candidate +### Node Manager -2024-07-16 [#1969](https://github.com/maidsafe/safe_network/pull/1969) -- chore(network): force close connection if there is a protocol mistmatch +#### Added -2024-07-16 [#1972](https://github.com/maidsafe/safe_network/pull/1972) -- feat(safenode_rpc_client): added `--version` flag +- The `local run` command supports `--metrics-port`, `--node-port` and `--rpc-port` arguments. +- The `start` command waits for the node to connect to the network before attempting to start the + next node. If it takes more than 300 seconds to connect, we consider that a failure and move to the + next node. The `--connection-timeout` argument can be used to vary the timeout. If you prefer the + old behaviour, you can use the `--interval` argument, which will continue to apply a static, + time-based interval. -2024-07-17 [#1973](https://github.com/maidsafe/safe_network/pull/1973) -- Auditor supplement features +#### Changed -2024-07-17 [#1975](https://github.com/maidsafe/safe_network/pull/1975) -- feat(networking): remove self.close_group and checks there as unused +- On an upgrade, the node registry is saved after each node is processed, as opposed to waiting + until the end. This means if there is an unexpected failure, the registry will have the + information about which nodes have already been upgraded. -2024-07-18 [#1976](https://github.com/maidsafe/safe_network/pull/1976) -- chore(networking): make ChunkVerification probabalistic +### Launchpad -2024-07-18 [#1949](https://github.com/maidsafe/safe_network/pull/1949) -- feat(wallet): wallet secret key file encryption +#### Added -2024-07-18 [#1977](https://github.com/maidsafe/safe_network/pull/1977) -- Reduce replication msg processing +- The user can choose a different drive for the node's data directory. +- New sections in the UI: `Options` and `Help`. +- A navigation bar has been added with `Status`, `Options` and `Help` sections. +- The node's logs can be viewed from the `Options` section. -2024-07-18 [#1983](https://github.com/maidsafe/safe_network/pull/1983) -- fix(node): remove cn from disk and flush to confirmed_spends during forwarding +#### Changed -2024-07-18 [#1980](https://github.com/maidsafe/safe_network/pull/1980) -- feat(networking): add small record cache +- Increased spacing for title and paragraphs. +- Increased spacing on footer. +- Increased spacing on box titles. +- Moved `Discord Username` from the top title into the `Device Status` section. +- Made the general layout of `Device Status` more compact. -2024-07-18 [#1982](https://github.com/maidsafe/safe_network/pull/1982) -- feat(network): implement blocklist behaviour +### Client -2024-07-18 [#1984](https://github.com/maidsafe/safe_network/pull/1984) -- chore(node): move sn_client to dev deps +#### Added -2024-07-18 [#1985](https://github.com/maidsafe/safe_network/pull/1985) -- Fix Nano count disappearing from Launchpad after restart +- The `safe files download` command now displays duration per file. -2024-07-19 [#1971](https://github.com/maidsafe/safe_network/pull/1971) -- feat!: limit error surface +#### Changed -2024-07-19 [#1986](https://github.com/maidsafe/safe_network/pull/1986) -- Add native Apple Silicon binaries to the release artifacts +- Adjust the put and get configuration scheme to align the client with a more realistic network + which would have some percentage of slow nodes. +- Improved spend logging to help debug the upload process. -2024-07-19 [#1955](https://github.com/maidsafe/safe_network/pull/1955) -- feat(networking): relax relay limits +#### Fixed -2024-07-24 [#1990](https://github.com/maidsafe/safe_network/pull/1990) -- chore: implement new process in release workflow +- Avoid a corrupt wallet by terminating the payment process during an unrecoverable error. -### Detailed Changes +## 2024-07-25 -#### Network +### Network -##### Added +#### Added - Protection against an attack allowing bad nodes or clients to shadow a spend (make it disappear) through spamming. @@ -128,7 +120,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 effect, the CPU does less work, and performance improves. RAM usage has increased by around 25MB per node, but we view this as a reasonable trade off. -##### Changed +#### Changed - For the time being, hole punching has been removed. It was causing handshake time outs, resulting in home nodes being less stable. It will be re-enabled in the future. @@ -139,16 +131,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 replication messages also being reduced, this should result in a bandwidth usage reduction of ~20%. -##### Fixed +#### Fixed - During payment forwarding, CashNotes are removed from disk and confirmed spends are stored to disk. This is necessary for resolving burnt spend attempts for forwarded payments. - Fix a bug where the auditor was not storing data to disk because of a missing directory. - Bootstrap peers are not added as relay candidates as we do not want to overwhelm them. -#### Client +### Client -##### Added +#### Added - Basic global documentation for the `sn_client` crate. - Option to encrypt the wallet private key with a password, in a file called @@ -164,45 +156,34 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 be used with the mnemonic to create a new private key. - A new `wallet encrypt` command encrypts an existing wallet. -##### Changed +#### Changed - The `wallet address` command no longer creates a new wallet if no wallet exists. - The `wallet create` command creates a wallet using the account mnemonic instead of requiring a hex-encoded secret key. - The `wallet create` `--key` and `--derivation` arguments are mutually exclusive. -#### Launchpad +### Launchpad -##### Fixed +#### Fixed - The `Total Nanos Earned` stat no longer resets on restart. -#### RPC Client +### RPC Client -##### Added +#### Added - A `--version` argument shows the binary version -#### Other +### Other -##### Added +#### Added - Native Apple Silicon (M-series) binaries have been added to our releases, meaning M-series Mac users do not have to rely on running Intel binaries with Rosetta. ## 2024-07-10 -### Binaries - -* `faucet` v0.4.30 -* `nat-detection` v0.2.0 -* `node-launchpad` v0.3.10 -* `safe` v0.93.9 -* `safenode` v0.109.0 -* `safenode-manager` v0.10.0 -* `sn_auditor` v0.2.2 -* `sn_node_rpc_client` v0.6.25 - ### Network #### Added From ecf1c2991dc07f87f387525d66317f51bc1d1765 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 26 Aug 2024 19:35:07 +0530 Subject: [PATCH 111/115] refactor(metrics): revert store cost metrics breaking change --- sn_networking/src/metrics/mod.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index ddaaf8a874..a7fdfbeee1 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -132,35 +132,34 @@ impl NetworkMetricsRecorder { ); // store cost - let store_cost_sub_registry = sub_registry.sub_registry_with_prefix("store_cost"); let store_cost = Gauge::default(); - store_cost_sub_registry.register( + sub_registry.register( "store_cost", "The store cost of the node", store_cost.clone(), ); let relevant_records = Gauge::default(); - store_cost_sub_registry.register( + sub_registry.register( "relevant_records", - "The number of records that we're responsible for", + "The number of records that we're responsible for. This is used to calculate the store cost", relevant_records.clone(), ); let max_records = Gauge::default(); - store_cost_sub_registry.register( + sub_registry.register( "max_records", - "The maximum number of records that we can store", + "The maximum number of records that we can store. This is used to calculate the store cost", max_records.clone(), ); let received_payment_count = Gauge::default(); - store_cost_sub_registry.register( + sub_registry.register( "received_payment_count", - "The number of payments received by our node", + "The number of payments received by our node. This is used to calculate the store cost", received_payment_count.clone(), ); let live_time = Gauge::default(); - store_cost_sub_registry.register( + sub_registry.register( "live_time", - "The time for which the node has been alive", + "The time for which the node has been alive. This is used to calculate the store cost", live_time.clone(), ); From 2d9baecb904d1392e4ccf9fe0321c4cd63138b0c Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 26 Aug 2024 15:51:40 +0100 Subject: [PATCH 112/115] chore(release): release candidate 2024.08.2.2 ====================== New Crate Versions ====================== sn_auditor: 0.2.4-rc.3 sn_build_info: 0.1.11-rc.3 sn_cli: 0.94.1-rc.3 sn_client: 0.109.1-rc.3 sn_faucet: 0.4.32-rc.3 sn_logging: 0.2.32-rc.3 sn_metrics: 0.1.12-rc.3 nat-detection: 0.2.2-rc.3 sn_networking: 0.17.2-rc.3 sn_node: 0.110.1-rc.3 node-launchpad: 0.3.12-rc.3 sn_node_manager: 0.10.2-rc.3 sn_node_rpc_client: 0.6.27-rc.3 sn_peers_acquisition: 0.4.2-rc.3 sn_protocol: 0.17.7-rc.3 sn_registers: 0.3.17-rc.3 sn_service_management: 0.3.10-rc.3 sn_transfers: 0.18.10-rc.3 test_utils: 0.4.3-rc.3 token_supplies: 0.1.50-rc.3 ======================= New Binary Versions ======================= faucet: 0.4.32-rc.3 nat-detection: 0.2.2-rc.3 node-launchpad: 0.3.12-rc.3 safe: 0.94.1-rc.3 safenode: 0.110.1-rc.3 safenode-manager: 0.10.2-rc.3 safenode_rpc_client: 0.6.27-rc.3 safenodemand: 0.10.2-rc.3 sn_auditor: 0.2.4-rc.3 --- Cargo.lock | 40 ++++++++++++++++---------------- nat-detection/Cargo.toml | 4 ++-- node-launchpad/Cargo.toml | 8 +++---- release-cycle-info | 2 +- sn_auditor/Cargo.toml | 8 +++---- sn_build_info/Cargo.toml | 2 +- sn_cli/Cargo.toml | 14 +++++------ sn_client/Cargo.toml | 18 +++++++------- sn_faucet/Cargo.toml | 16 ++++++------- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 10 ++++---- sn_node/Cargo.toml | 24 +++++++++---------- sn_node_manager/Cargo.toml | 12 +++++----- sn_node_rpc_client/Cargo.toml | 16 ++++++------- sn_peers_acquisition/Cargo.toml | 4 ++-- sn_protocol/Cargo.toml | 6 ++--- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 +++---- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 2 +- token_supplies/Cargo.toml | 2 +- 22 files changed, 102 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5fe83a0b4d..7dd19d7cd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4549,7 +4549,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.2-rc.2" +version = "0.2.2-rc.3" dependencies = [ "clap", "clap-verbosity-flag", @@ -4664,7 +4664,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.3.12-rc.2" +version = "0.3.12-rc.3" dependencies = [ "ansi-to-tui", "atty", @@ -6944,7 +6944,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.10.2-rc.2" +version = "0.10.2-rc.3" dependencies = [ "assert_cmd", "assert_fs", @@ -7006,7 +7006,7 @@ dependencies = [ [[package]] name = "sn_auditor" -version = "0.2.4-rc.2" +version = "0.2.4-rc.3" dependencies = [ "blsttc", "clap", @@ -7040,14 +7040,14 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.11-rc.2" +version = "0.1.11-rc.3" dependencies = [ "vergen", ] [[package]] name = "sn_cli" -version = "0.94.1-rc.2" +version = "0.94.1-rc.3" dependencies = [ "aes 0.7.5", "base64 0.22.1", @@ -7089,7 +7089,7 @@ dependencies = [ [[package]] name = "sn_client" -version = "0.109.1-rc.2" +version = "0.109.1-rc.3" dependencies = [ "assert_matches", "async-trait", @@ -7172,7 +7172,7 @@ dependencies = [ [[package]] name = "sn_faucet" -version = "0.4.32-rc.2" +version = "0.4.32-rc.3" dependencies = [ "assert_fs", "base64 0.22.1", @@ -7204,7 +7204,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.32-rc.2" +version = "0.2.32-rc.3" dependencies = [ "chrono", "color-eyre", @@ -7229,7 +7229,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.12-rc.2" +version = "0.1.12-rc.3" dependencies = [ "clap", "color-eyre", @@ -7243,7 +7243,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.17.2-rc.2" +version = "0.17.2-rc.3" dependencies = [ "aes-gcm-siv", "async-trait", @@ -7286,7 +7286,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.110.1-rc.2" +version = "0.110.1-rc.3" dependencies = [ "assert_fs", "assert_matches", @@ -7340,7 +7340,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.27-rc.2" +version = "0.6.27-rc.3" dependencies = [ "assert_fs", "async-trait", @@ -7367,7 +7367,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.4.2-rc.2" +version = "0.4.2-rc.3" dependencies = [ "clap", "lazy_static", @@ -7383,7 +7383,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.7-rc.2" +version = "0.17.7-rc.3" dependencies = [ "blsttc", "bytes", @@ -7410,7 +7410,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.3.17-rc.2" +version = "0.3.17-rc.3" dependencies = [ "blsttc", "crdts", @@ -7427,7 +7427,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.3.10-rc.2" +version = "0.3.10-rc.3" dependencies = [ "async-trait", "dirs-next", @@ -7453,7 +7453,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.18.10-rc.2" +version = "0.18.10-rc.3" dependencies = [ "assert_fs", "blsttc", @@ -7786,7 +7786,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.3-rc.2" +version = "0.4.3-rc.3" dependencies = [ "color-eyre", "dirs-next", @@ -7918,7 +7918,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.50-rc.2" +version = "0.1.50-rc.3" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 0535824f8f..486e848543 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.2-rc.2" +version = "0.2.2-rc.3" [[bin]] name = "nat-detection" @@ -28,7 +28,7 @@ libp2p = { version = "0.53", features = [ "macros", "upnp", ] } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.2" } +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.3" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index b82edf1b91..1ab9928bc8 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.3.12-rc.2" +version = "0.3.12-rc.3" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -48,10 +48,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn-node-manager = { version = "0.10.2-rc.2", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.4.2-rc.2", path = "../sn_peers_acquisition" } +sn-node-manager = { version = "0.10.2-rc.3", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.4.2-rc.3", path = "../sn_peers_acquisition" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.3.10-rc.2", path = "../sn_service_management" } +sn_service_management = { version = "0.3.10-rc.3", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/release-cycle-info b/release-cycle-info index 28385361a8..df33ca2ed8 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -7,4 +7,4 @@ # Both of these numbers are used in the packaged version number, which is a collective version # number for all the released binaries. release-cycle: 2 -release-cycle-counter: 2 +release-cycle-counter: 3 diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index c06016b83b..a018d1ee65 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Network Auditor" name = "sn_auditor" -version = "0.2.4-rc.2" +version = "0.2.4-rc.3" edition = "2021" homepage = "https://maidsafe.net" repository = "https://github.com/maidsafe/safe_network" @@ -31,9 +31,9 @@ graphviz-rust = { version = "0.9.0", optional = true } lazy_static = "1.4.0" serde = { version = "1.0.133", features = ["derive", "rc"] } serde_json = "1.0.108" -sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } tiny_http = { version = "0.12", features = ["ssl-rustls"] } tracing = { version = "~0.1.26" } tokio = { version = "1.32.0", features = [ diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index a529ab13f3..9826c79007 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.11-rc.2" +version = "0.1.11-rc.3" [build-dependencies] vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index cf87bce642..6d9fcd0f52 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_cli" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.94.1-rc.2" +version = "0.94.1-rc.3" [[bin]] path = "src/bin/main.rs" @@ -58,11 +58,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ rmp-serde = "1.1.1" rpassword = "7.3.1" serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.2" } -sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.3" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } tempfile = "3.6.0" tiny-keccak = "~2.0.2" tokio = { version = "1.32.0", features = [ @@ -84,7 +84,7 @@ eyre = "0.6.8" criterion = "0.5.1" tempfile = "3.6.0" rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.109.1-rc.2", features = [ +sn_client = { path = "../sn_client", version = "0.109.1-rc.3", features = [ "test-utils", ] } diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index c6f6a21c05..8c520414a1 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.109.1-rc.2" +version = "0.109.1-rc.3" [features] default = [] @@ -49,16 +49,16 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } tempfile = "3.6.0" thiserror = "1.0.23" tiny-keccak = "~2.0.2" tracing = { version = "~0.1.26" } xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2", optional = true } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3", optional = true } eyre = { version = "0.6.8", optional = true } [dev-dependencies] @@ -67,8 +67,8 @@ dirs-next = "~2.0.0" # add rand to libp2p libp2p-identity = { version = "0.2.7", features = ["rand"] } sn_client = { path = "../sn_client", features = ["test-utils"] } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3", features = [ "test-utils", ] } @@ -83,7 +83,7 @@ crate-type = ["cdylib", "rlib"] getrandom = { version = "0.2.12", features = ["js"] } wasm-bindgen = "0.2.90" wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } console_error_panic_hook = "0.1.6" tracing-wasm = "0.2.1" wasmtimer = "0.2.0" diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml index ca88bdbb83..91b83980aa 100644 --- a/sn_faucet/Cargo.toml +++ b/sn_faucet/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_faucet" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.32-rc.2" +version = "0.4.32-rc.3" [features] default = ["gifting"] @@ -37,13 +37,13 @@ indicatif = { version = "0.17.5", features = ["tokio"] } minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.2" } -sn_cli = { path = "../sn_cli", version = "0.94.1-rc.2" } -sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.3" } +sn_cli = { path = "../sn_cli", version = "0.94.1-rc.3" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } tracing = { version = "~0.1.26" } url = "2.5.0" diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index e2b0bf0da5..f9fb9321e8 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.32-rc.2" +version = "0.2.32-rc.3" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index fec2e25735..037b2a1807 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.12-rc.2" +version = "0.1.12-rc.3" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 9259531e80..e49c46d72b 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.2-rc.2" +version = "0.17.2-rc.3" [features] default = ["libp2p/quic"] @@ -53,10 +53,10 @@ rand = { version = "~0.8.5", features = ["small_rng"] } rayon = "1.8.0" rmp-serde = "1.1.1" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path="../sn_build_info", version = "0.1.11-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2" } +sn_build_info = { path="../sn_build_info", version = "0.1.11-rc.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 8b3e8e47a1..ceba8d50d9 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.110.1-rc.2" +version = "0.110.1-rc.3" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -50,14 +50,14 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } -sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.3" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } +sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.3" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -84,11 +84,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2", features = [ +sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index e0cc764574..a0ead22dea 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.10.2-rc.2" +version = "0.10.2-rc.3" [[bin]] name = "safenode-manager" @@ -44,12 +44,12 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2" } -sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } +sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.3" } sn-releases = "0.2.6" -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 4cba3d2535..755f9f5c0c 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.27-rc.2" +version = "0.6.27-rc.3" [[bin]] name = "safenode_rpc_client" @@ -23,13 +23,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version="0.53", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", version = "0.109.1-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } -sn_node = { path = "../sn_node", version = "0.110.1-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } +sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } +sn_node = { path = "../sn_node", version = "0.110.1-rc.3" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.3" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 4c81cb01f1..4a97e334e0 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.2-rc.2" +version = "0.4.2-rc.3" [features] local-discovery = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version="0.53", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.2", optional = true} +sn_networking = { path = "../sn_networking", version = "0.17.2-rc.3", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false} tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index e2244f9db7..55b8da88f9 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.7-rc.2" +version = "0.17.7-rc.3" [features] default = [] @@ -27,8 +27,8 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } +sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index fa2b80ff50..d9f882f9b5 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.17-rc.2" +version = "0.3.17-rc.3" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 5dffe2aede..3141f73865 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.10-rc.2" +version = "0.3.10-rc.3" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.2", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index 3ea9b276fd..1e59d12d46 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.18.10-rc.2" +version = "0.18.10-rc.3" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 49706ca994..deada5c55f 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.3-rc.2" +version = "0.4.3-rc.3" [dependencies] color-eyre = "~0.6.2" diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index 93fe27a81a..eb99cfab91 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.50-rc.2" +version = "0.1.50-rc.3" [dependencies] From 9f25908bd4c88aa53f59805f2423e2f13158498a Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 27 Aug 2024 10:51:37 +0100 Subject: [PATCH 113/115] Revert "feat(networking): re-introduce DCUtR" This reverts commit 0c687b6c8a2f7042dfc4ac0184f5156961ccd606. --- Cargo.lock | 40 +++----------------------------- sn_networking/Cargo.toml | 1 - sn_networking/src/driver.rs | 2 -- sn_networking/src/event/mod.rs | 6 ----- sn_networking/src/event/swarm.rs | 12 ---------- sn_networking/src/metrics/mod.rs | 6 ----- 6 files changed, 3 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1d62df4424..7dd19d7cd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3760,7 +3760,6 @@ dependencies = [ "libp2p-autonat", "libp2p-connection-limits", "libp2p-core", - "libp2p-dcutr", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3857,29 +3856,6 @@ dependencies = [ "void", ] -[[package]] -name = "libp2p-dcutr" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4f7bb7fa2b9e6cad9c30a6f67e3ff5c1e4b658c62b6375e35861a85f9c97bf3" -dependencies = [ - "asynchronous-codec 0.6.2", - "either", - "futures", - "futures-bounded", - "futures-timer", - "instant", - "libp2p-core", - "libp2p-identity", - "libp2p-swarm", - "lru 0.11.1", - "quick-protobuf", - "quick-protobuf-codec 0.2.0", - "thiserror", - "tracing", - "void", -] - [[package]] name = "libp2p-dns" version = "0.41.1" @@ -3941,7 +3917,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru 0.12.3", + "lru", "quick-protobuf", "quick-protobuf-codec 0.3.1", "smallvec", @@ -4027,7 +4003,6 @@ dependencies = [ "futures", "instant", "libp2p-core", - "libp2p-dcutr", "libp2p-identify", "libp2p-identity", "libp2p-kad", @@ -4149,7 +4124,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "lru 0.12.3", + "lru", "multistream-select", "once_cell", "rand 0.8.5", @@ -4321,15 +4296,6 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" -[[package]] -name = "lru" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21" -dependencies = [ - "hashbrown 0.14.5", -] - [[package]] name = "lru" version = "0.12.3" @@ -6023,7 +5989,7 @@ dependencies = [ "compact_str", "crossterm", "itertools 0.12.1", - "lru 0.12.3", + "lru", "paste", "serde", "stability", diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 5fe538018b..e49c46d72b 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -30,7 +30,6 @@ libp2p = { version = "0.53", features = [ "request-response", "cbor", "identify", - "dcutr", "tcp", "relay", "noise", diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index b598ae5e7b..75bacf09f3 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -201,7 +201,6 @@ pub(super) struct NodeBehaviour { pub(super) upnp: libp2p::swarm::behaviour::toggle::Toggle, pub(super) relay_client: libp2p::relay::client::Behaviour, pub(super) relay_server: libp2p::relay::Behaviour, - pub(super) dcutr: libp2p::dcutr::Behaviour, pub(super) kademlia: kad::Behaviour, pub(super) request_response: request_response::cbor::Behaviour, } @@ -617,7 +616,6 @@ impl NetworkBuilder { identify, #[cfg(feature = "local-discovery")] mdns, - dcutr: libp2p::dcutr::Behaviour::new(peer_id), }; #[cfg(not(target_arch = "wasm32"))] diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index ef34676d5c..5e82742d6a 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -42,7 +42,6 @@ pub(super) enum NodeEvent { #[cfg(feature = "local-discovery")] Mdns(Box), Identify(Box), - Dcutr(Box), RelayClient(Box), RelayServer(Box), Void(void::Void), @@ -79,11 +78,6 @@ impl From for NodeEvent { NodeEvent::Identify(Box::new(event)) } } -impl From for NodeEvent { - fn from(event: libp2p::dcutr::Event) -> Self { - NodeEvent::Dcutr(Box::new(event)) - } -} impl From for NodeEvent { fn from(event: libp2p::relay::client::Event) -> Self { NodeEvent::RelayClient(Box::new(event)) diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 56889a9deb..78496bbae4 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -60,18 +60,6 @@ impl SwarmDriver { event_string = "kad_event"; self.handle_kad_event(kad_event)?; } - SwarmEvent::Behaviour(NodeEvent::Dcutr(event)) => { - #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&(*event)); - } - - event_string = "dcutr_event"; - info!( - "Dcutr with remote peer: {:?} is: {:?}", - event.remote_peer_id, event.result - ); - } SwarmEvent::Behaviour(NodeEvent::RelayClient(event)) => { event_string = "relay_client_event"; diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index c4fd0d69bf..a7fdfbeee1 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -254,12 +254,6 @@ impl Recorder for NetworkMetricsRecorder { } } -impl Recorder for NetworkMetricsRecorder { - fn record(&self, event: &libp2p::dcutr::Event) { - self.libp2p_metrics.record(event) - } -} - impl Recorder for NetworkMetricsRecorder { fn record(&self, event: &libp2p::relay::Event) { self.libp2p_metrics.record(event) From 5e8fb24b859e6afb48d4878799fd966326527940 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 27 Aug 2024 10:52:03 +0100 Subject: [PATCH 114/115] Revert "feat(networking): increase circuit bytes limit" This reverts commit 296412e1a423acfdceefc1e6cbc2e05f11c98a55. --- sn_networking/src/driver.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 75bacf09f3..b18b532074 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -598,8 +598,6 @@ impl NetworkBuilder { max_circuits: 1024, // The total amount of relayed connections at any given moment. max_circuits_per_peer: 256, // Amount of relayed connections per peer (both dst and src) circuit_src_rate_limiters: vec![], // No extra rate limiting for now - // We should at least be able to relay packets with chunks etc. - max_circuit_bytes: MAX_PACKET_SIZE as u64, ..Default::default() }; libp2p::relay::Behaviour::new(peer_id, relay_server_cfg) From 87a06ff2b5fe53e7967bb8c9fb931b95de05b141 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 27 Aug 2024 11:07:02 +0100 Subject: [PATCH 115/115] chore(release): stable release 2024.08.2.3 ====================== New Crate Versions ====================== sn_auditor: 0.2.4 sn_build_info: 0.1.11 sn_cli: 0.94.1 sn_client: 0.109.1 sn_faucet: 0.4.32 sn_logging: 0.2.32 sn_metrics: 0.1.12 nat-detection: 0.2.2 sn_networking: 0.17.2 sn_node: 0.110.1 node-launchpad: 0.3.12 sn_node_manager: 0.10.2 sn_node_rpc_client: 0.6.27 sn_peers_acquisition: 0.4.2 sn_protocol: 0.17.7 sn_registers: 0.3.17 sn_service_management: 0.3.10 sn_transfers: 0.18.10 test_utils: 0.4.3 token_supplies: 0.1.50 ======================= New Binary Versions ======================= faucet: 0.4.32 nat-detection: 0.2.2 node-launchpad: 0.3.12 safe: 0.94.1 safenode: 0.110.1 safenode-manager: 0.10.2 safenode_rpc_client: 0.6.27 safenodemand: 0.10.2 sn_auditor: 0.2.4 --- Cargo.lock | 40 ++++++++++++++++---------------- nat-detection/Cargo.toml | 4 ++-- node-launchpad/Cargo.toml | 8 +++---- sn_auditor/Cargo.toml | 8 +++---- sn_build_info/Cargo.toml | 2 +- sn_cli/Cargo.toml | 14 +++++------ sn_client/Cargo.toml | 18 +++++++------- sn_faucet/Cargo.toml | 16 ++++++------- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 10 ++++---- sn_node/Cargo.toml | 24 +++++++++---------- sn_node_manager/Cargo.toml | 12 +++++----- sn_node_rpc_client/Cargo.toml | 16 ++++++------- sn_peers_acquisition/Cargo.toml | 4 ++-- sn_protocol/Cargo.toml | 6 ++--- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 +++---- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 2 +- token_supplies/Cargo.toml | 2 +- 21 files changed, 101 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7dd19d7cd5..577410be8b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4549,7 +4549,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.2-rc.3" +version = "0.2.2" dependencies = [ "clap", "clap-verbosity-flag", @@ -4664,7 +4664,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.3.12-rc.3" +version = "0.3.12" dependencies = [ "ansi-to-tui", "atty", @@ -6944,7 +6944,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.10.2-rc.3" +version = "0.10.2" dependencies = [ "assert_cmd", "assert_fs", @@ -7006,7 +7006,7 @@ dependencies = [ [[package]] name = "sn_auditor" -version = "0.2.4-rc.3" +version = "0.2.4" dependencies = [ "blsttc", "clap", @@ -7040,14 +7040,14 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.11-rc.3" +version = "0.1.11" dependencies = [ "vergen", ] [[package]] name = "sn_cli" -version = "0.94.1-rc.3" +version = "0.94.1" dependencies = [ "aes 0.7.5", "base64 0.22.1", @@ -7089,7 +7089,7 @@ dependencies = [ [[package]] name = "sn_client" -version = "0.109.1-rc.3" +version = "0.109.1" dependencies = [ "assert_matches", "async-trait", @@ -7172,7 +7172,7 @@ dependencies = [ [[package]] name = "sn_faucet" -version = "0.4.32-rc.3" +version = "0.4.32" dependencies = [ "assert_fs", "base64 0.22.1", @@ -7204,7 +7204,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.32-rc.3" +version = "0.2.32" dependencies = [ "chrono", "color-eyre", @@ -7229,7 +7229,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.12-rc.3" +version = "0.1.12" dependencies = [ "clap", "color-eyre", @@ -7243,7 +7243,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.17.2-rc.3" +version = "0.17.2" dependencies = [ "aes-gcm-siv", "async-trait", @@ -7286,7 +7286,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.110.1-rc.3" +version = "0.110.1" dependencies = [ "assert_fs", "assert_matches", @@ -7340,7 +7340,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.27-rc.3" +version = "0.6.27" dependencies = [ "assert_fs", "async-trait", @@ -7367,7 +7367,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.4.2-rc.3" +version = "0.4.2" dependencies = [ "clap", "lazy_static", @@ -7383,7 +7383,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.7-rc.3" +version = "0.17.7" dependencies = [ "blsttc", "bytes", @@ -7410,7 +7410,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.3.17-rc.3" +version = "0.3.17" dependencies = [ "blsttc", "crdts", @@ -7427,7 +7427,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.3.10-rc.3" +version = "0.3.10" dependencies = [ "async-trait", "dirs-next", @@ -7453,7 +7453,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.18.10-rc.3" +version = "0.18.10" dependencies = [ "assert_fs", "blsttc", @@ -7786,7 +7786,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.3-rc.3" +version = "0.4.3" dependencies = [ "color-eyre", "dirs-next", @@ -7918,7 +7918,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.50-rc.3" +version = "0.1.50" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 486e848543..914cc1f349 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.2-rc.3" +version = "0.2.2" [[bin]] name = "nat-detection" @@ -28,7 +28,7 @@ libp2p = { version = "0.53", features = [ "macros", "upnp", ] } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.3" } +sn_networking = { path = "../sn_networking", version = "0.17.2" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 1ab9928bc8..74c2f37ad9 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.3.12-rc.3" +version = "0.3.12" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -48,10 +48,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn-node-manager = { version = "0.10.2-rc.3", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.4.2-rc.3", path = "../sn_peers_acquisition" } +sn-node-manager = { version = "0.10.2", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.4.2", path = "../sn_peers_acquisition" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.3.10-rc.3", path = "../sn_service_management" } +sn_service_management = { version = "0.3.10", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index a018d1ee65..3aa7408673 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Network Auditor" name = "sn_auditor" -version = "0.2.4-rc.3" +version = "0.2.4" edition = "2021" homepage = "https://maidsafe.net" repository = "https://github.com/maidsafe/safe_network" @@ -31,9 +31,9 @@ graphviz-rust = { version = "0.9.0", optional = true } lazy_static = "1.4.0" serde = { version = "1.0.133", features = ["derive", "rc"] } serde_json = "1.0.108" -sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } +sn_client = { path = "../sn_client", version = "0.109.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2" } tiny_http = { version = "0.12", features = ["ssl-rustls"] } tracing = { version = "~0.1.26" } tokio = { version = "1.32.0", features = [ diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 9826c79007..5f7234d406 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.11-rc.3" +version = "0.1.11" [build-dependencies] vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index 6d9fcd0f52..47a2dd6de3 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_cli" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.94.1-rc.3" +version = "0.94.1" [[bin]] path = "src/bin/main.rs" @@ -58,11 +58,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ rmp-serde = "1.1.1" rpassword = "7.3.1" serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.3" } -sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11" } +sn_client = { path = "../sn_client", version = "0.109.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7" } tempfile = "3.6.0" tiny-keccak = "~2.0.2" tokio = { version = "1.32.0", features = [ @@ -84,7 +84,7 @@ eyre = "0.6.8" criterion = "0.5.1" tempfile = "3.6.0" rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.109.1-rc.3", features = [ +sn_client = { path = "../sn_client", version = "0.109.1", features = [ "test-utils", ] } diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index 8c520414a1..b1fd1be42a 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.109.1-rc.3" +version = "0.109.1" [features] default = [] @@ -49,16 +49,16 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } +sn_networking = { path = "../sn_networking", version = "0.17.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7" } +sn_registers = { path = "../sn_registers", version = "0.3.17" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10" } tempfile = "3.6.0" thiserror = "1.0.23" tiny-keccak = "~2.0.2" tracing = { version = "~0.1.26" } xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3", optional = true } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2", optional = true } eyre = { version = "0.6.8", optional = true } [dev-dependencies] @@ -67,8 +67,8 @@ dirs-next = "~2.0.0" # add rand to libp2p libp2p-identity = { version = "0.2.7", features = ["rand"] } sn_client = { path = "../sn_client", features = ["test-utils"] } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.32" } +sn_registers = { path = "../sn_registers", version = "0.3.17", features = [ "test-utils", ] } @@ -83,7 +83,7 @@ crate-type = ["cdylib", "rlib"] getrandom = { version = "0.2.12", features = ["js"] } wasm-bindgen = "0.2.90" wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2" } console_error_panic_hook = "0.1.6" tracing-wasm = "0.2.1" wasmtimer = "0.2.0" diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml index 91b83980aa..f6520473fa 100644 --- a/sn_faucet/Cargo.toml +++ b/sn_faucet/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_faucet" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.32-rc.3" +version = "0.4.32" [features] default = ["gifting"] @@ -37,13 +37,13 @@ indicatif = { version = "0.17.5", features = ["tokio"] } minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.3" } -sn_cli = { path = "../sn_cli", version = "0.94.1-rc.3" } -sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11" } +sn_cli = { path = "../sn_cli", version = "0.94.1" } +sn_client = { path = "../sn_client", version = "0.109.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10" } tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } tracing = { version = "~0.1.26" } url = "2.5.0" diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index f9fb9321e8..63a149c2e8 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.32-rc.3" +version = "0.2.32" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index 037b2a1807..47154778ba 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.12-rc.3" +version = "0.1.12" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index e49c46d72b..466a920e48 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.2-rc.3" +version = "0.17.2" [features] default = ["libp2p/quic"] @@ -53,10 +53,10 @@ rand = { version = "~0.8.5", features = ["small_rng"] } rayon = "1.8.0" rmp-serde = "1.1.1" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path="../sn_build_info", version = "0.1.11-rc.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3" } +sn_build_info = { path="../sn_build_info", version = "0.1.11" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10" } +sn_registers = { path = "../sn_registers", version = "0.3.17" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index ceba8d50d9..92c24e07de 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.110.1-rc.3" +version = "0.110.1" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -50,14 +50,14 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.11-rc.3" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } -sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.3" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2" } +sn_logging = { path = "../sn_logging", version = "0.2.32" } +sn_networking = { path = "../sn_networking", version = "0.17.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7" } +sn_registers = { path = "../sn_registers", version = "0.3.17" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10" } +sn_service_management = { path = "../sn_service_management", version = "0.3.10" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -84,11 +84,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3", features = [ +sn_client = { path = "../sn_client", version = "0.109.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.18.10", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index a0ead22dea..732b560319 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.10.2-rc.3" +version = "0.10.2" [[bin]] name = "safenode-manager" @@ -44,12 +44,12 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3" } -sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.3" } +sn_logging = { path = "../sn_logging", version = "0.2.32" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7" } +sn_service_management = { path = "../sn_service_management", version = "0.3.10" } sn-releases = "0.2.6" -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 755f9f5c0c..e38f8dfb2a 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.27-rc.3" +version = "0.6.27" [[bin]] name = "safenode_rpc_client" @@ -23,13 +23,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version="0.53", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", version = "0.109.1-rc.3" } -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } -sn_node = { path = "../sn_node", version = "0.110.1-rc.3" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2-rc.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.3.10-rc.3" } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } +sn_client = { path = "../sn_client", version = "0.109.1" } +sn_logging = { path = "../sn_logging", version = "0.2.32" } +sn_node = { path = "../sn_node", version = "0.110.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.3.10" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 4a97e334e0..0036835a3f 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.2-rc.3" +version = "0.4.2" [features] local-discovery = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version="0.53", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_networking = { path = "../sn_networking", version = "0.17.2-rc.3", optional = true} +sn_networking = { path = "../sn_networking", version = "0.17.2", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false} tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 55b8da88f9..93ffa24797 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.7-rc.3" +version = "0.17.7" [features] default = [] @@ -27,8 +27,8 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } -sn_registers = { path = "../sn_registers", version = "0.3.17-rc.3" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10" } +sn_registers = { path = "../sn_registers", version = "0.3.17" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index d9f882f9b5..f1a80ab393 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.17-rc.3" +version = "0.3.17" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 3141f73865..f3ccfe6acc 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.10-rc.3" +version = "0.3.10" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.32-rc.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.7-rc.3", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.32" } +sn_protocol = { path = "../sn_protocol", version = "0.17.7", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.10-rc.3" } +sn_transfers = { path = "../sn_transfers", version = "0.18.10" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index 1e59d12d46..6b009d51fb 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.18.10-rc.3" +version = "0.18.10" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index deada5c55f..5dfe93d186 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.3-rc.3" +version = "0.4.3" [dependencies] color-eyre = "~0.6.2" diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index eb99cfab91..7b5b640541 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.50-rc.3" +version = "0.1.50" [dependencies]