Skip to content

Call timer_tick_occurred in BackgroundProcessor #872

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 13 additions & 11 deletions background-processor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ use std::time::{Duration, Instant};
/// * Monitoring whether the ChannelManager needs to be re-persisted to disk, and if so,
/// writing it to disk/backups by invoking the callback given to it at startup.
/// ChannelManager persistence should be done in the background.
/// * Calling `ChannelManager::timer_chan_freshness_every_min()` every minute (can be done in the
/// * Calling `ChannelManager::timer_tick_occurred()` and
/// `PeerManager::timer_tick_occurred()` every minute (can be done in the
/// background).
///
/// Note that if ChannelManager persistence fails and the persisted manager becomes out-of-date,
Expand All @@ -42,9 +43,9 @@ pub struct BackgroundProcessor {
}

#[cfg(not(test))]
const CHAN_FRESHNESS_TIMER: u64 = 60;
const FRESHNESS_TIMER: u64 = 60;
#[cfg(test)]
const CHAN_FRESHNESS_TIMER: u64 = 1;
const FRESHNESS_TIMER: u64 = 1;

impl BackgroundProcessor {
/// Start a background thread that takes care of responsibilities enumerated in the top-level
Expand Down Expand Up @@ -101,9 +102,10 @@ impl BackgroundProcessor {
log_trace!(logger, "Terminating background processor.");
return Ok(());
}
if current_time.elapsed().as_secs() > CHAN_FRESHNESS_TIMER {
log_trace!(logger, "Calling manager's timer_chan_freshness_every_min");
channel_manager.timer_chan_freshness_every_min();
if current_time.elapsed().as_secs() > FRESHNESS_TIMER {
log_trace!(logger, "Calling ChannelManager's and PeerManager's timer_tick_occurred");
channel_manager.timer_tick_occurred();
peer_manager.timer_tick_occurred();
current_time = Instant::now();
}
}
Expand Down Expand Up @@ -294,16 +296,16 @@ mod tests {
}

#[test]
fn test_chan_freshness_called() {
// Test that ChannelManager's `timer_chan_freshness_every_min` is called every
// `CHAN_FRESHNESS_TIMER`.
let nodes = create_nodes(1, "test_chan_freshness_called".to_string());
fn test_timer_tick_called() {
// Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
// `FRESHNESS_TIMER`.
let nodes = create_nodes(1, "test_timer_tick_called".to_string());
let data_dir = nodes[0].persister.get_data_dir();
let callback = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
loop {
let log_entries = nodes[0].logger.lines.lock().unwrap();
let desired_log = "Calling manager's timer_chan_freshness_every_min".to_string();
let desired_log = "Calling ChannelManager's and PeerManager's timer_tick_occurred".to_string();
if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() {
break
}
Expand Down
2 changes: 1 addition & 1 deletion lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
/// Liveness is called to fluctuate given peer disconnecton/monitor failures/closing.
/// If channel is public, network should have a liveness view announced by us on a
/// best-effort, which means we may filter out some status transitions to avoid spam.
/// See further timer_chan_freshness_every_min.
/// See further timer_tick_occurred.
#[derive(PartialEq)]
enum UpdateStatus {
/// Status has been gossiped.
Expand Down
12 changes: 6 additions & 6 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ pub(super) struct ChannelHolder<Signer: Sign> {
}

/// Events which we process internally but cannot be procsesed immediately at the generation site
/// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with
/// for some reason. They are handled in timer_tick_occurred, so may be processed with
/// quite some time lag.
enum BackgroundEvent {
/// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
Expand Down Expand Up @@ -403,7 +403,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect.
/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
///
/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
Expand Down Expand Up @@ -1959,10 +1959,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
events.append(&mut new_events);
}

/// Free the background events, generally called from timer_chan_freshness_every_min.
/// Free the background events, generally called from timer_tick_occurred.
///
/// Exposed for testing to allow us to process events quickly without generating accidental
/// BroadcastChannelUpdate events in timer_chan_freshness_every_min.
/// BroadcastChannelUpdate events in timer_tick_occurred.
///
/// Expects the caller to have a total_consistency_lock read lock.
fn process_background_events(&self) {
Expand Down Expand Up @@ -1991,7 +1991,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
/// This method handles all the details, and must be called roughly once per minute.
///
/// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
pub fn timer_chan_freshness_every_min(&self) {
pub fn timer_tick_occurred(&self) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
self.process_background_events();

Expand Down Expand Up @@ -3274,7 +3274,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
// We cannot broadcast our latest local state via monitor update (as
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
// timer_chan_freshness_every_min, guaranteeing we're running normally.
// timer_tick_occurred, guaranteeing we're running normally.
if let Some((funding_txo, update)) = failure.0.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
Expand Down
8 changes: 4 additions & 4 deletions lightning/src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7557,7 +7557,7 @@ fn test_check_htlc_underpaying() {

#[test]
fn test_announce_disable_channels() {
// Create 2 channels between A and B. Disconnect B. Call timer_chan_freshness_every_min and check for generated
// Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
// ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.

let chanmon_cfgs = create_chanmon_cfgs(2);
Expand All @@ -7573,8 +7573,8 @@ fn test_announce_disable_channels() {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);

nodes[0].node.timer_chan_freshness_every_min(); // dirty -> stagged
nodes[0].node.timer_chan_freshness_every_min(); // staged -> fresh
nodes[0].node.timer_tick_occurred(); // dirty -> stagged
nodes[0].node.timer_tick_occurred(); // staged -> fresh
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
for e in msg_events {
Expand Down Expand Up @@ -7613,7 +7613,7 @@ fn test_announce_disable_channels() {
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
handle_chan_reestablish_msgs!(nodes[1], nodes[0]);

nodes[0].node.timer_chan_freshness_every_min();
nodes[0].node.timer_tick_occurred();
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
}

Expand Down