From 79cfe6f21e0d4a46309cacafc877f988b4f4b959 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 20 Apr 2026 13:14:19 +0200 Subject: [PATCH 1/2] Add `update_and_persist_node_metrics` helper Extract the repeated "acquire write lock on `node_metrics`, mutate a field or two, then write the encoded struct to the kv-store" idiom into a single helper in `io::utils`. As a side effect, `write_node_metrics` is inlined into the helper. Co-Authored-By: HAL 9000 --- src/builder.rs | 18 ++++++++---------- src/chain/bitcoind.rs | 40 ++++++++++++++++++++-------------------- src/chain/electrum.rs | 37 ++++++++++++++++--------------------- src/chain/esplora.rs | 39 +++++++++++++++++---------------------- src/io/utils.rs | 15 +++++++++++---- src/lib.rs | 38 ++++++++++++++++++++------------------ src/scoring.rs | 10 +++++----- 7 files changed, 97 insertions(+), 100 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 3d12ee103..14cb15a94 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -57,7 +57,7 @@ use crate::io::sqlite_store::SqliteStore; use crate::io::utils::{ read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, - read_scorer, write_node_metrics, + read_scorer, update_and_persist_node_metrics, }; use crate::io::vss_store::VssStoreBuilder; use crate::io::{ @@ -1771,15 +1771,13 @@ fn build_with_store_internal( )); // Reset the RGS sync timestamp in case we somehow switch gossip sources - { - let mut locked_node_metrics = node_metrics.write().expect("lock"); - locked_node_metrics.latest_rgs_snapshot_timestamp = None; - write_node_metrics(&*locked_node_metrics, &*kv_store, Arc::clone(&logger)) - .map_err(|e| { - log_error!(logger, "Failed writing to store: {}", e); - BuildError::WriteFailed - })?; - } + update_and_persist_node_metrics(&node_metrics, &*kv_store, Arc::clone(&logger), |m| { + m.latest_rgs_snapshot_timestamp = None + }) + .map_err(|e| { + log_error!(logger, "Failed writing to store: {}", e); + BuildError::WriteFailed + })?; p2p_source }, GossipSourceConfig::RapidGossipSync(rgs_server) => { diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index cbe094462..2bf059f4e 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -39,7 +39,7 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, ConfirmationTarget, OnchainFeeEstimator, }; -use crate::io::utils::write_node_metrics; +use crate::io::utils::update_and_persist_node_metrics; use crate::logger::{log_bytes, log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -203,15 +203,18 @@ impl BitcoindChainSource { *self.latest_chain_tip.write().expect("lock") = Some(chain_tip); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = self.node_metrics.write().expect("lock"); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger) - .unwrap_or_else(|e| { - log_error!(self.logger, "Failed to persist node metrics: {}", e); - }); + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| { + m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + }, + ) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + }); } break; }, @@ -454,11 +457,10 @@ impl BitcoindChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = self.node_metrics.write().expect("lock"); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + })?; Ok(()) } @@ -568,11 +570,9 @@ impl BitcoindChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().expect("lock"); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt + })?; Ok(()) } diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 5199c135d..c62cbb526 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -30,7 +30,7 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, ConfirmationTarget, OnchainFeeEstimator, }; -use crate::io::utils::write_node_metrics; +use crate::io::utils::update_and_persist_node_metrics; use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; @@ -141,16 +141,12 @@ impl ElectrumChainSource { ); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().expect("lock"); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - &*self.kv_store, - &*self.logger, - )?; - } + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt, + )?; Ok(()) }, Err(e) => Err(e), @@ -238,11 +234,12 @@ impl ElectrumChainSource { if let Ok(_) = res { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().expect("lock"); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt, + )?; } res @@ -271,11 +268,9 @@ impl ElectrumChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().expect("lock"); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt + })?; Ok(()) } diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index d0c683c74..5825a0984 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -22,7 +22,7 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, OnchainFeeEstimator, }; -use crate::io::utils::write_node_metrics; +use crate::io::utils::update_and_persist_node_metrics; use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -122,16 +122,13 @@ impl EsploraChainSource { .duration_since(UNIX_EPOCH) .ok() .map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().expect("lock"); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - &*self.kv_store, - &*self.logger - )?; - } - Ok(()) + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt, + )?; + Ok(()) }, Err(e) => Err(e), }, @@ -263,12 +260,12 @@ impl EsploraChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().expect("lock"); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt, + )?; Ok(()) }, Err(e) => { @@ -348,11 +345,9 @@ impl EsploraChainSource { ); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().expect("lock"); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt + })?; Ok(()) } diff --git a/src/io/utils.rs b/src/io/utils.rs index eef71ec0b..ff78c7e91 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -11,7 +11,7 @@ use std::ops::Deref; #[cfg(unix)] use std::os::unix::fs::OpenOptionsExt; use std::path::Path; -use std::sync::Arc; +use std::sync::{Arc, RwLock}; use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; @@ -346,13 +346,20 @@ where }) } -pub(crate) fn write_node_metrics( - node_metrics: &NodeMetrics, kv_store: &DynStore, logger: L, +/// Take a write lock on `node_metrics`, apply `update`, and persist the result to `kv_store`. +/// +/// The write lock is held across the KV-store write, preserving the invariant that readers only +/// observe the mutation once it has been durably persisted (or the persist has failed). +pub(crate) fn update_and_persist_node_metrics( + node_metrics: &RwLock, kv_store: &DynStore, logger: L, + update: impl FnOnce(&mut NodeMetrics), ) -> Result<(), Error> where L::Target: LdkLogger, { - let data = node_metrics.encode(); + let mut locked_node_metrics = node_metrics.write().expect("lock"); + update(&mut *locked_node_metrics); + let data = locked_node_metrics.encode(); KVStoreSync::write( &*kv_store, NODE_METRICS_PRIMARY_NAMESPACE, diff --git a/src/lib.rs b/src/lib.rs index dd82c39f9..1dd559e69 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -143,7 +143,7 @@ use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; use ffi::*; use gossip::GossipSource; use graph::NetworkGraph; -use io::utils::write_node_metrics; +use io::utils::update_and_persist_node_metrics; pub use lightning; use lightning::chain::BestBlock; use lightning::impl_writeable_tlv_based; @@ -320,15 +320,16 @@ impl Node { gossip_sync_logger, "Background sync of RGS gossip data finished in {}ms.", now.elapsed().as_millis() - ); - { - let mut locked_node_metrics = gossip_node_metrics.write().expect("lock"); - locked_node_metrics.latest_rgs_snapshot_timestamp = Some(updated_timestamp); - write_node_metrics(&*locked_node_metrics, &*gossip_sync_store, Arc::clone(&gossip_sync_logger)) - .unwrap_or_else(|e| { - log_error!(gossip_sync_logger, "Persistence failed: {}", e); - }); - } + ); + update_and_persist_node_metrics( + &gossip_node_metrics, + &*gossip_sync_store, + Arc::clone(&gossip_sync_logger), + |m| m.latest_rgs_snapshot_timestamp = Some(updated_timestamp), + ) + .unwrap_or_else(|e| { + log_error!(gossip_sync_logger, "Persistence failed: {}", e); + }); } Err(e) => { log_error!( @@ -552,14 +553,15 @@ impl Node { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = bcast_node_metrics.write().expect("lock"); - locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*bcast_store, Arc::clone(&bcast_logger)) - .unwrap_or_else(|e| { - log_error!(bcast_logger, "Persistence failed: {}", e); - }); - } + update_and_persist_node_metrics( + &bcast_node_metrics, + &*bcast_store, + Arc::clone(&bcast_logger), + |m| m.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt, + ) + .unwrap_or_else(|e| { + log_error!(bcast_logger, "Persistence failed: {}", e); + }); } else { debug_assert!(false, "We checked whether the node may announce, so node alias should always be set"); continue diff --git a/src/scoring.rs b/src/scoring.rs index f2099cca9..8abc4eab6 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -13,7 +13,7 @@ use crate::io::utils::write_external_pathfinding_scores_to_cache; use crate::logger::LdkLogger; use crate::runtime::Runtime; use crate::types::DynStore; -use crate::{write_node_metrics, Logger, NodeMetrics, Scorer}; +use crate::{update_and_persist_node_metrics, Logger, NodeMetrics, Scorer}; /// Start a background task that periodically downloads scores via an external url and merges them into the local /// pathfinding scores. @@ -86,10 +86,10 @@ async fn sync_external_scores( .duration_since(SystemTime::UNIX_EPOCH) .expect("system time must be after Unix epoch"); scorer.lock().expect("lock").merge(liquidities, duration_since_epoch); - let mut locked_node_metrics = node_metrics.write().expect("lock"); - locked_node_metrics.latest_pathfinding_scores_sync_timestamp = - Some(duration_since_epoch.as_secs()); - write_node_metrics(&*locked_node_metrics, &*kv_store, logger).unwrap_or_else(|e| { + update_and_persist_node_metrics(&node_metrics, &*kv_store, logger, |m| { + m.latest_pathfinding_scores_sync_timestamp = Some(duration_since_epoch.as_secs()); + }) + .unwrap_or_else(|e| { log_error!(logger, "Persisting node metrics failed: {}", e); }); log_trace!(logger, "External scores merged successfully"); From d1065dbfe9b0d2155ee044f3ed4b0662f656270f Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 20 Apr 2026 13:17:08 +0200 Subject: [PATCH 2/2] Read the RGS sync timestamp from the network graph MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, after each successful Rapid Gossip Sync update the background task wrote `latest_rgs_snapshot_timestamp` to the persisted `NodeMetrics` immediately, while the network graph itself is only flushed to disk later by LDK's background processor. A crash in that window left the on-disk metric ahead of the on-disk graph — on restart we'd resume RGS from the newer timestamp and permanently skip the updates that were never persisted together with the graph. Instead, seed the RGS start timestamp from `NetworkGraph::get_last_rapid_gossip_sync_timestamp`, which is part of the graph's own serialized state and therefore lands on disk atomically with the channel updates it describes. The same source now backs the RGS timestamp reported via `NodeStatus::latest_rgs_snapshot_timestamp`, so the reported value always matches what's reflected in the graph. Worst case after a crash is that we refetch the snapshots since the last persisted graph — an idempotent operation — rather than silently losing them. The `latest_rgs_snapshot_timestamp` field is retired from `NodeMetrics`, and TLV slot 6 is kept readable for backwards compatibility via LDK's `legacy` TLV grammar. Old persisted records still deserialize; new records no longer carry slot 6. The dead "reset RGS timestamp on gossip-source switch" block in the P2P builder branch also goes away, since the graph's timestamp remains the correct resume point across a P2P→RGS switch. Co-Authored-By: HAL 9000 --- src/builder.rs | 12 ++------- src/lib.rs | 71 ++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 57 insertions(+), 26 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 14cb15a94..e3866c0c1 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -57,7 +57,7 @@ use crate::io::sqlite_store::SqliteStore; use crate::io::utils::{ read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, - read_scorer, update_and_persist_node_metrics, + read_scorer, }; use crate::io::vss_store::VssStoreBuilder; use crate::io::{ @@ -1770,19 +1770,11 @@ fn build_with_store_internal( Arc::clone(&logger), )); - // Reset the RGS sync timestamp in case we somehow switch gossip sources - update_and_persist_node_metrics(&node_metrics, &*kv_store, Arc::clone(&logger), |m| { - m.latest_rgs_snapshot_timestamp = None - }) - .map_err(|e| { - log_error!(logger, "Failed writing to store: {}", e); - BuildError::WriteFailed - })?; p2p_source }, GossipSourceConfig::RapidGossipSync(rgs_server) => { let latest_sync_timestamp = - node_metrics.read().expect("lock").latest_rgs_snapshot_timestamp.unwrap_or(0); + network_graph.get_last_rapid_gossip_sync_timestamp().unwrap_or(0); Arc::new(GossipSource::new_rgs( rgs_server.clone(), latest_sync_timestamp, diff --git a/src/lib.rs b/src/lib.rs index 1dd559e69..faeb6d339 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -297,9 +297,7 @@ impl Node { if self.gossip_source.is_rgs() { let gossip_source = Arc::clone(&self.gossip_source); - let gossip_sync_store = Arc::clone(&self.kv_store); let gossip_sync_logger = Arc::clone(&self.logger); - let gossip_node_metrics = Arc::clone(&self.node_metrics); let mut stop_gossip_sync = self.stop_sender.subscribe(); self.runtime.spawn_cancellable_background_task(async move { let mut interval = tokio::time::interval(RGS_SYNC_INTERVAL); @@ -315,21 +313,12 @@ impl Node { _ = interval.tick() => { let now = Instant::now(); match gossip_source.update_rgs_snapshot().await { - Ok(updated_timestamp) => { + Ok(_updated_timestamp) => { log_info!( gossip_sync_logger, "Background sync of RGS gossip data finished in {}ms.", now.elapsed().as_millis() ); - update_and_persist_node_metrics( - &gossip_node_metrics, - &*gossip_sync_store, - Arc::clone(&gossip_sync_logger), - |m| m.latest_rgs_snapshot_timestamp = Some(updated_timestamp), - ) - .unwrap_or_else(|e| { - log_error!(gossip_sync_logger, "Persistence failed: {}", e); - }); } Err(e) => { log_error!( @@ -780,7 +769,7 @@ impl Node { let latest_fee_rate_cache_update_timestamp = locked_node_metrics.latest_fee_rate_cache_update_timestamp; let latest_rgs_snapshot_timestamp = - locked_node_metrics.latest_rgs_snapshot_timestamp.map(|val| val as u64); + self.network_graph.get_last_rapid_gossip_sync_timestamp().map(|val| val as u64); let latest_pathfinding_scores_sync_timestamp = locked_node_metrics.latest_pathfinding_scores_sync_timestamp; let latest_node_announcement_broadcast_timestamp = @@ -2110,7 +2099,6 @@ pub(crate) struct NodeMetrics { latest_lightning_wallet_sync_timestamp: Option, latest_onchain_wallet_sync_timestamp: Option, latest_fee_rate_cache_update_timestamp: Option, - latest_rgs_snapshot_timestamp: Option, latest_pathfinding_scores_sync_timestamp: Option, latest_node_announcement_broadcast_timestamp: Option, } @@ -2121,7 +2109,6 @@ impl Default for NodeMetrics { latest_lightning_wallet_sync_timestamp: None, latest_onchain_wallet_sync_timestamp: None, latest_fee_rate_cache_update_timestamp: None, - latest_rgs_snapshot_timestamp: None, latest_pathfinding_scores_sync_timestamp: None, latest_node_announcement_broadcast_timestamp: None, } @@ -2133,7 +2120,8 @@ impl_writeable_tlv_based!(NodeMetrics, { (1, latest_pathfinding_scores_sync_timestamp, option), (2, latest_onchain_wallet_sync_timestamp, option), (4, latest_fee_rate_cache_update_timestamp, option), - (6, latest_rgs_snapshot_timestamp, option), + // 6 used to be latest_rgs_snapshot_timestamp + (6, _legacy_latest_rgs_snapshot_timestamp, (legacy, u32, |_| Ok(()), |_: &NodeMetrics| None::> )), (8, latest_node_announcement_broadcast_timestamp, option), // 10 used to be latest_channel_monitor_archival_height (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_| Ok(()), |_: &NodeMetrics| None::> )), @@ -2174,3 +2162,54 @@ pub(crate) fn new_channel_anchor_reserve_sats( } }) } + +#[cfg(test)] +mod tests { + use super::*; + use lightning::util::ser::{Readable, Writeable}; + + #[test] + fn node_metrics_reads_legacy_rgs_snapshot_timestamp() { + // Pre-#615, `NodeMetrics` persisted `latest_rgs_snapshot_timestamp` as an optional + // `u32` at TLV slot 6. The field has since been retired, but we must still read + // records written by older versions without failing. The shadow struct below + // mirrors main's `NodeMetrics` layout 1:1 so the byte stream we decode matches + // what an older on-disk record actually looked like. + #[derive(Debug)] + struct OldNodeMetrics { + latest_lightning_wallet_sync_timestamp: Option, + latest_onchain_wallet_sync_timestamp: Option, + latest_fee_rate_cache_update_timestamp: Option, + latest_rgs_snapshot_timestamp: Option, + latest_pathfinding_scores_sync_timestamp: Option, + latest_node_announcement_broadcast_timestamp: Option, + } + impl_writeable_tlv_based!(OldNodeMetrics, { + (0, latest_lightning_wallet_sync_timestamp, option), + (1, latest_pathfinding_scores_sync_timestamp, option), + (2, latest_onchain_wallet_sync_timestamp, option), + (4, latest_fee_rate_cache_update_timestamp, option), + (6, latest_rgs_snapshot_timestamp, option), + (8, latest_node_announcement_broadcast_timestamp, option), + // 10 used to be latest_channel_monitor_archival_height + (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_| Ok(()), |_: &OldNodeMetrics| None::> )), + }); + + let old = OldNodeMetrics { + latest_lightning_wallet_sync_timestamp: Some(1_000), + latest_onchain_wallet_sync_timestamp: Some(1_100), + latest_fee_rate_cache_update_timestamp: Some(1_200), + latest_rgs_snapshot_timestamp: Some(1_700_000_000), + latest_pathfinding_scores_sync_timestamp: Some(1_300), + latest_node_announcement_broadcast_timestamp: Some(2_000), + }; + let bytes = old.encode(); + + let new = NodeMetrics::read(&mut &bytes[..]).unwrap(); + assert_eq!(new.latest_lightning_wallet_sync_timestamp, Some(1_000)); + assert_eq!(new.latest_onchain_wallet_sync_timestamp, Some(1_100)); + assert_eq!(new.latest_fee_rate_cache_update_timestamp, Some(1_200)); + assert_eq!(new.latest_pathfinding_scores_sync_timestamp, Some(1_300)); + assert_eq!(new.latest_node_announcement_broadcast_timestamp, Some(2_000)); + } +}