From 0277b31cea0c9f7b9b2b61e49718fd8eb6821ab0 Mon Sep 17 00:00:00 2001 From: Floris Bruynooghe Date: Mon, 28 Aug 2023 18:10:24 +0200 Subject: [PATCH] refactor: Reduce amount of info logging (#1418) ## Description There is a bunch of netcheck stuff that happens periodically and does not need info logging for this. Similarly this reduces the log level of some very regular logging in the derp client, magicsock and iroh. Lastly log_concise_report is updated to not do so many allocations while building the log message. ## Notes & open questions This does not yet (knowlying) address the issue of the ios debug console becoming unusable due to logging. ## Change checklist - [x] Self-review. - [x] ~~Documentation updates if relevant.~~ - [x] ~~Tests if relevant.~~ --- iroh-net/src/derp/http/client.rs | 99 +++++++++++++++--------------- iroh-net/src/magicsock.rs | 7 --- iroh-net/src/netcheck.rs | 48 ++++++++------- iroh-net/src/netcheck/reportgen.rs | 16 +++-- iroh/src/node.rs | 20 +++--- 5 files changed, 92 insertions(+), 98 deletions(-) diff --git a/iroh-net/src/derp/http/client.rs b/iroh-net/src/derp/http/client.rs index 5746b49af5..deb49d7ed7 100644 --- a/iroh-net/src/derp/http/client.rs +++ b/iroh-net/src/derp/http/client.rs @@ -18,7 +18,7 @@ use tokio::sync::oneshot; use tokio::sync::Mutex; use tokio::task::{JoinHandle, JoinSet}; use tokio::time::Instant; -use tracing::{debug, info_span, instrument, trace, warn, Instrument}; +use tracing::{debug, info, info_span, instrument, trace, warn, Instrument}; use url::Url; use crate::derp::{ @@ -508,7 +508,7 @@ impl Client { .await .map_err(ClientError::Hyper)? } else { - tracing::debug!("Starting handshake"); + debug!("Starting handshake"); let (mut request_sender, connection) = hyper::client::conn::Builder::new() .handshake(tcp_stream) .await @@ -606,7 +606,7 @@ impl Client { .ok_or_else(|| ClientError::InvalidUrl("missing url port".into()))?; let addr = SocketAddr::new(dst_ip, port); - tracing::debug!("connecting to {}", addr); + debug!("connecting to {}", addr); let tcp_stream = TcpStream::connect(addr).await?; Ok(tcp_stream) } @@ -836,15 +836,15 @@ impl Client { match client.recv().await { Ok(msg) => { if let Ok(region) = self.current_region().await { - tracing::trace!("[DERP] <- {} ({:?})", self.target_string(®ion), msg); + trace!("[DERP] <- {} ({:?})", self.target_string(®ion), msg); } else if let Some(url) = self.url() { - tracing::trace!("[DERP] <- {url} ({:?})", msg); + trace!("[DERP] <- {url} ({:?})", msg); } if let ReceivedMessage::Pong(ping) = msg { if let Some(chan) = self.unregister_ping(ping).await { if chan.send(()).is_err() { - tracing::warn!("pong recieved for ping {ping:?}, but the receiving channel was closed"); + warn!("pong recieved for ping {ping:?}, but the receiving channel was closed"); } continue; } @@ -970,7 +970,7 @@ impl Client { key } Err(e) => { - tracing::warn!("error connecting to derp server {e}"); + warn!("error connecting to derp server {e}"); tokio::time::sleep(MESH_CLIENT_REDIAL_DELAY).await; continue; } @@ -981,14 +981,14 @@ impl Client { } let peers_present = PeersPresent::new(server_public_key); - tracing::info!("Connected to mesh derp server {server_public_key:?}"); + info!("Connected to mesh derp server {server_public_key:?}"); // receive detail loop loop { let (msg, conn_gen) = match self.recv_detail().await { Ok(res) => res, Err(e) => { - tracing::warn!("recv error: {e:?}"); + warn!("recv error: {e:?}"); tokio::time::sleep(MESH_CLIENT_REDIAL_DELAY).await; break; } @@ -1001,7 +1001,7 @@ impl Client { // for the same peer key in the derp server in the future, we may // be okay to just listen for future peer present // messages without re establishing this connection as a "watcher" - tracing::trace!("new connection: {} != {}", conn_gen, last_conn_gen); + trace!("new connection: {} != {}", conn_gen, last_conn_gen); break; } match msg { @@ -1064,39 +1064,42 @@ enum PeersPresentMsg { impl PeersPresent { fn new(remote_server_key: PublicKey) -> Self { let (send, mut recv) = channel(PEERS_PRESENT_QUEUE); - let actor_task = tokio::spawn(async move { - let mut map = HashSet::new(); - let start = Instant::now() + PEERS_PRESENT_LOGGING_DELAY; - let mut status_logging_interval = - tokio::time::interval_at(start, PEERS_PRESENT_LOGGING_INTERVAL); - loop { - tokio::select! { - biased; - msg = recv.recv() => { - match msg { - Some(m) => match m { - PeersPresentMsg::PeerPresent(key) => { - map.insert(key); - } - PeersPresentMsg::PeerGone(key) => { - map.remove(&key); - } - }, - None => { - tracing::warn!("sender dropped, closing `PeersPresent` actor loop"); - break; - }, - } - }, - _ = status_logging_interval.tick() => { - tracing::info!( - "Peers present on Derp Server {:?}:\n{map:?}", - remote_server_key - ); + let actor_task = tokio::spawn( + async move { + let mut map = HashSet::new(); + let start = Instant::now() + PEERS_PRESENT_LOGGING_DELAY; + let mut status_logging_interval = + tokio::time::interval_at(start, PEERS_PRESENT_LOGGING_INTERVAL); + loop { + tokio::select! { + biased; + msg = recv.recv() => { + match msg { + Some(m) => match m { + PeersPresentMsg::PeerPresent(key) => { + map.insert(key); + } + PeersPresentMsg::PeerGone(key) => { + map.remove(&key); + } + }, + None => { + warn!("sender dropped, closing `PeersPresent` actor loop"); + break; + }, + } + }, + _ = status_logging_interval.tick() => { + debug!( + "Peers present on Derp Server {:?}:\n{map:?}", + remote_server_key + ); + } } } } - }.instrument(info_span!("peers-present.actor"))); + .instrument(info_span!("peers-present.actor")), + ); Self { actor_task, actor_channel: send, @@ -1141,7 +1144,7 @@ impl PacketForwarder for Client { } } } - tracing::warn!("attempted three times to forward packet from {srckey:?} to {dstkey:?}, failed. Dropping packet."); + warn!("attempted three times to forward packet from {srckey:?} to {dstkey:?}, failed. Dropping packet."); }.instrument(info_span!("packet-forwarder"))); } } @@ -1282,7 +1285,7 @@ mod tests { let packet_forwarder_handler = PacketForwarderHandler::new(server_channel_s); let mesh_client_key = mesh_client.public_key(); - tracing::info!("mesh client public key: {mesh_client_key:?}"); + info!("mesh client public key: {mesh_client_key:?}"); let (send, mut recv) = tokio::sync::mpsc::channel(32); // spawn a task to run the mesh client @@ -1303,14 +1306,14 @@ mod tests { .server_url(url) .build(SecretKey::generate())?; let normal_client_key = normal_client.public_key(); - tracing::info!("normal client public key: {normal_client:?}"); + info!("normal client public key: {normal_client:?}"); let _ = normal_client.connect().await?; // wait for "add packet forwarder" message match server_channel_r.recv().await { Some(msg) => match msg { ServerMessage::AddPacketForwarder { key, .. } => { - tracing::info!("received `ServerMessage::AddPacketForwarder` for {key:?}"); + debug!("received `ServerMessage::AddPacketForwarder` for {key:?}"); assert!(key == mesh_client_key || key == normal_client_key); } _ => bail!("expected `ServerMessage::AddPacketForwarder`, got {msg:?}"), @@ -1328,15 +1331,11 @@ mod tests { match server_channel_r.recv().await { Some(msg) => match msg { ServerMessage::RemovePacketForwarder(key) => { - tracing::info!( - "received `ServerMessage::RemovePacketForwarder` for {key:?}" - ); + debug!("received `ServerMessage::RemovePacketForwarder` for {key:?}"); break; } ServerMessage::AddPacketForwarder { key, .. } => { - tracing::info!( - "received `ServerMessage::RemovePacketForwarder` for {key:?}" - ); + debug!("received `ServerMessage::RemovePacketForwarder` for {key:?}"); assert!(key == mesh_client_key || key == normal_client_key); } _ => bail!("expected `ServerMessage::RemovePacketForwarder`, got {msg:?}"), diff --git a/iroh-net/src/magicsock.rs b/iroh-net/src/magicsock.rs index 39f8b4d2de..7766136e53 100644 --- a/iroh-net/src/magicsock.rs +++ b/iroh-net/src/magicsock.rs @@ -2179,13 +2179,6 @@ impl Actor { // remove moribund nodes in the next step below. for n in &self.net_map.as_ref().unwrap().peers { if self.peer_map.endpoint_for_node_key(&n.key).is_none() { - // info!( - // "inserting peer's endpoint {:?} - {:?} {:#?} {:#?}", - // self.inner.public_key, // our own node's public key - // n.key.clone(), // public key of node endpoint being created - // self.peer_map, // map of all endpoints we know about - // n, // node being added - // ); info!( peer = ?n.key, "inserting peer's endpoint in PeerMap" diff --git a/iroh-net/src/netcheck.rs b/iroh-net/src/netcheck.rs index fdf26929d5..97595bfe3c 100644 --- a/iroh-net/src/netcheck.rs +++ b/iroh-net/src/netcheck.rs @@ -14,7 +14,7 @@ use tokio::net::UdpSocket; use tokio::sync::{self, mpsc, oneshot}; use tokio::time::{Duration, Instant}; use tokio_util::sync::CancellationToken; -use tracing::{debug, error, info, info_span, trace, warn, Instrument}; +use tracing::{debug, error, info_span, trace, warn, Instrument}; use crate::net::ip::to_canonical; use crate::util::CancelOnDrop; @@ -691,58 +691,61 @@ impl Actor { } fn log_concise_report(&self, r: &Report, dm: &DerpMap) { - let mut log = "report: ".to_string(); - log += &format!("udp={}", r.udp); + // Since we are to String the writes are infallible. + use std::fmt::Write; + + let mut log = String::with_capacity(256); + write!(log, "report: ").ok(); + write!(log, "udp={}", r.udp).ok(); if !r.ipv4 { - log += &format!(" v4={}", r.ipv4) + write!(log, "v v4={}", r.ipv4).ok(); } if !r.udp { - log += &format!(" icmpv4={}", r.icmpv4) + write!(log, " icmpv4={}", r.icmpv4).ok(); } - log += &format!(" v6={}", r.ipv6); + write!(log, " v6={}", r.ipv6).ok(); if !r.ipv6 { - log += &format!(" v6os={}", r.os_has_ipv6); + write!(log, " v6os={}", r.os_has_ipv6).ok(); } - log += &format!(" mapvarydest={:?}", r.mapping_varies_by_dest_ip); - log += &format!(" hair={:?}", r.hair_pinning); + write!(log, " mapvarydest={:?}", r.mapping_varies_by_dest_ip).ok(); + write!(log, " hair={:?}", r.hair_pinning).ok(); if let Some(probe) = &r.portmap_probe { - log += &format!(" {}", probe); + write!(log, " {}", probe).ok(); } else { - log += " portmap=?"; + write!(log, " portmap=?").ok(); } if let Some(ipp) = r.global_v4 { - log += &format!(" v4a={ipp}"); + write!(log, " v4a={ipp}").ok(); } if let Some(ipp) = r.global_v6 { - log += &format!(" v6a={ipp}"); + write!(log, " v6a={ipp}").ok(); } if let Some(c) = r.captive_portal { - log += &format!(" captiveportal={c}"); + write!(log, " captiveportal={c}").ok(); } - log += &format!(" derp={}", r.preferred_derp); + write!(log, " derp={}", r.preferred_derp).ok(); if r.preferred_derp != 0 { - log += " derpdist="; + write!(log, " derpdist=").ok(); let mut need_comma = false; for rid in &dm.region_ids() { if let Some(d) = r.region_v4_latency.get(*rid) { if need_comma { - log += ","; + write!(log, ",").ok(); } - log += &format!("{}v4:{}", rid, d.as_millis()); + write!(log, "{}v4:{}", rid, d.as_millis()).ok(); need_comma = true; } if let Some(d) = r.region_v6_latency.get(*rid) { if need_comma { - log += ","; + write!(log, ",").ok(); } - log += &format!("{}v6:{}", rid, d.as_millis()); + write!(log, "{}v6:{}", rid, d.as_millis()).ok(); need_comma = true; } } } - - info!("{}", log); + debug!("{}", log); } } @@ -836,6 +839,7 @@ mod tests { use bytes::BytesMut; use tokio::time; + use tracing::info; use crate::defaults::DEFAULT_DERP_STUN_PORT; use crate::derp::{DerpNode, DerpRegion, UseIpv4, UseIpv6}; diff --git a/iroh-net/src/netcheck/reportgen.rs b/iroh-net/src/netcheck/reportgen.rs index 2c0ee2948f..ca5f528b82 100644 --- a/iroh-net/src/netcheck/reportgen.rs +++ b/iroh-net/src/netcheck/reportgen.rs @@ -30,9 +30,7 @@ use rand::seq::IteratorRandom; use tokio::net::UdpSocket; use tokio::sync::{mpsc, oneshot}; use tokio::time::{self, Instant}; -use tracing::{ - debug, debug_span, error, info, info_span, instrument, trace, warn, Instrument, Span, -}; +use tracing::{debug, debug_span, error, info_span, instrument, trace, warn, Instrument, Span}; use super::NetcheckMetrics; use crate::defaults::DEFAULT_DERP_STUN_PORT; @@ -260,7 +258,7 @@ impl Actor { // Drive the portmapper. pm = &mut port_mapping, if self.outstanding_tasks.port_mapper => { - info!(report=?pm, "Portmapper probe report"); + debug!(report=?pm, "Portmapper probe report"); self.report.portmap_probe = pm; port_mapping.inner = None; self.outstanding_tasks.port_mapper = false; @@ -336,7 +334,7 @@ impl Actor { } fn handle_probe_report(&mut self, probe_report: ProbeReport) { - info!("finished probe: {:?}", probe_report); + debug!("finished probe: {:?}", probe_report); let derp_node = probe_report.probe.node(); if let Some(latency) = probe_report.delay { self.report @@ -419,7 +417,7 @@ impl Actor { timeout *= 2; } let reportcheck = self.addr(); - info!( + debug!( reports=self.report.region_latency.len(), delay=?timeout, "Have enough probe reports, aborting further probes soon", @@ -532,11 +530,11 @@ impl Actor { match captive_portal_check.await { Ok(Ok(found)) => Some(found), Ok(Err(err)) => { - info!("check_captive_portal error: {:?}", err); + warn!("check_captive_portal error: {:?}", err); None } Err(_) => { - info!("check_captive_portal timed out"); + warn!("check_captive_portal timed out"); None } } @@ -932,7 +930,7 @@ async fn check_captive_portal(dm: &DerpMap, preferred_derp: Option) -> Resu .map(|s| s.to_str().unwrap_or_default()) == Some(&expected_response); - info!( + debug!( "check_captive_portal url={} status_code={} valid_response={}", res.url(), res.status(), diff --git a/iroh/src/node.rs b/iroh/src/node.rs index 31cbaf60a2..b86aa17b01 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -53,7 +53,7 @@ use quic_rpc::{RpcClient, RpcServer, ServiceEndpoint}; use tokio::sync::{mpsc, RwLock}; use tokio::task::JoinError; use tokio_util::sync::CancellationToken; -use tracing::{debug, trace, warn}; +use tracing::{debug, error, info, trace, warn}; use crate::dial::Ticket; use crate::download::Downloader; @@ -477,7 +477,7 @@ where handle_rpc_request(msg, chan, &handler, &rt); } Err(e) => { - tracing::info!("rpc request error: {:?}", e); + info!("rpc request error: {:?}", e); } } }, @@ -488,7 +488,7 @@ where handle_rpc_request(msg, chan, &handler, &rt); } Err(_) => { - tracing::info!("last controller dropped, shutting down"); + info!("last controller dropped, shutting down"); break; } } @@ -498,7 +498,7 @@ where let alpn = match get_alpn(&mut connecting).await { Ok(alpn) => alpn, Err(err) => { - tracing::error!("invalid handshake: {:?}", err); + error!("invalid handshake: {:?}", err); continue; } }; @@ -897,7 +897,7 @@ impl RpcHandler { { use crate::collection::{Blob, Collection}; use crate::util::io::pathbuf_from_name; - tracing::trace!("exporting collection {} to {}", hash, path.display()); + trace!("exporting collection {} to {}", hash, path.display()); tokio::fs::create_dir_all(&path).await?; let collection = db.get(&hash).context("collection not there")?; let mut reader = collection.data_reader().await?; @@ -908,7 +908,7 @@ impl RpcHandler { if let Some(parent) = path.parent() { tokio::fs::create_dir_all(parent).await?; } - tracing::trace!("exporting blob {} to {}", hash, path.display()); + trace!("exporting blob {} to {}", hash, path.display()); let id = progress.new_id(); let progress1 = progress.clone(); db.export(*hash, path, mode, move |offset| { @@ -947,7 +947,7 @@ impl RpcHandler { ) -> anyhow::Result<()> { let local = self.inner.rt.local_pool().clone(); let hash = msg.hash; - tracing::info!("share: {:?}", msg); + debug!("share: {:?}", msg); let conn = self .inner .endpoint @@ -1119,11 +1119,11 @@ impl RpcHandler { } async fn shutdown(self, request: ShutdownRequest) { if request.force { - tracing::info!("hard shutdown requested"); + info!("hard shutdown requested"); std::process::exit(0); } else { // trigger a graceful shutdown - tracing::info!("graceful shutdown requested"); + info!("graceful shutdown requested"); self.inner.cancel_token.cancel(); } } @@ -1179,7 +1179,7 @@ fn handle_rpc_request< let handler = handler.clone(); rt.main().spawn(async move { use ProviderRequest::*; - tracing::info!( + info!( "handling rpc request: {:?} {}", msg, std::any::type_name::()