Skip to content

Commit df237ba

Browse files
authored
Merge pull request #2391 from TheBlueMatt/2023-07-all-compl-actions
Handle pre-startup and closed-channel monitor update completion actions
2 parents 07606c1 + 550cf91 commit df237ba

File tree

2 files changed

+64
-13
lines changed

2 files changed

+64
-13
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

+11-4
Original file line numberDiff line numberDiff line change
@@ -2346,6 +2346,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
23462346
RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
23472347
check_added_monitors!(nodes[0], 0);
23482348

2349+
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
23492350
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
23502351
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
23512352
nodes[0].node.claim_funds(payment_preimage_0);
@@ -2365,8 +2366,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
23652366
// disconnect the peers. Note that the fuzzer originally found this issue because
23662367
// deserializing a ChannelManager in this state causes an assertion failure.
23672368
if reload_a {
2368-
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
23692369
reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
2370+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2371+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
23702372
} else {
23712373
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
23722374
}
@@ -2406,9 +2408,14 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
24062408
assert_eq!(pending_cs.commitment_signed, cs);
24072409
} else { panic!(); }
24082410

2409-
// There should be no monitor updates as we are still pending awaiting a failed one.
2410-
check_added_monitors!(nodes[0], 0);
2411-
check_added_monitors!(nodes[1], 0);
2411+
if reload_a {
2412+
// The two pending monitor updates were replayed (but are still pending).
2413+
check_added_monitors(&nodes[0], 2);
2414+
} else {
2415+
// There should be no monitor updates as we are still pending awaiting a failed one.
2416+
check_added_monitors(&nodes[0], 0);
2417+
}
2418+
check_added_monitors(&nodes[1], 0);
24122419
}
24132420

24142421
// If we finish updating the monitor, we should free the holding cell right away (this did

lightning/src/ln/channelmanager.rs

+53-9
Original file line numberDiff line numberDiff line change
@@ -530,6 +530,13 @@ enum BackgroundEvent {
530530
funding_txo: OutPoint,
531531
update: ChannelMonitorUpdate
532532
},
533+
/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
534+
/// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending
535+
/// on a channel.
536+
MonitorUpdatesComplete {
537+
counterparty_node_id: PublicKey,
538+
channel_id: [u8; 32],
539+
},
533540
}
534541

535542
#[derive(Debug)]
@@ -4194,6 +4201,22 @@ where
41944201
}
41954202
let _ = handle_error!(self, res, counterparty_node_id);
41964203
},
4204+
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
4205+
let per_peer_state = self.per_peer_state.read().unwrap();
4206+
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4207+
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4208+
let peer_state = &mut *peer_state_lock;
4209+
if let Some(chan) = peer_state.channel_by_id.get_mut(&channel_id) {
4210+
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
4211+
} else {
4212+
let update_actions = peer_state.monitor_update_blocked_actions
4213+
.remove(&channel_id).unwrap_or(Vec::new());
4214+
mem::drop(peer_state_lock);
4215+
mem::drop(per_peer_state);
4216+
self.handle_monitor_update_completion_actions(update_actions);
4217+
}
4218+
}
4219+
},
41974220
}
41984221
}
41994222
NotifyOption::DoPersist
@@ -5004,24 +5027,29 @@ where
50045027
if peer_state_mutex_opt.is_none() { return }
50055028
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
50065029
let peer_state = &mut *peer_state_lock;
5007-
let mut channel = {
5008-
match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
5009-
hash_map::Entry::Occupied(chan) => chan,
5010-
hash_map::Entry::Vacant(_) => return,
5011-
}
5012-
};
5030+
let channel =
5031+
if let Some(chan) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
5032+
chan
5033+
} else {
5034+
let update_actions = peer_state.monitor_update_blocked_actions
5035+
.remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
5036+
mem::drop(peer_state_lock);
5037+
mem::drop(per_peer_state);
5038+
self.handle_monitor_update_completion_actions(update_actions);
5039+
return;
5040+
};
50135041
let remaining_in_flight =
50145042
if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
50155043
pending.retain(|upd| upd.update_id > highest_applied_update_id);
50165044
pending.len()
50175045
} else { 0 };
50185046
log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
5019-
highest_applied_update_id, channel.get().context.get_latest_monitor_update_id(),
5047+
highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
50205048
remaining_in_flight);
5021-
if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id {
5049+
if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
50225050
return;
50235051
}
5024-
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
5052+
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel);
50255053
}
50265054

50275055
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
@@ -8521,6 +8549,16 @@ where
85218549
update: update.clone(),
85228550
});
85238551
}
8552+
if $chan_in_flight_upds.is_empty() {
8553+
// We had some updates to apply, but it turns out they had completed before we
8554+
// were serialized, we just weren't notified of that. Thus, we may have to run
8555+
// the completion actions for any monitor updates, but otherwise are done.
8556+
pending_background_events.push(
8557+
BackgroundEvent::MonitorUpdatesComplete {
8558+
counterparty_node_id: $counterparty_node_id,
8559+
channel_id: $funding_txo.to_channel_id(),
8560+
});
8561+
}
85248562
if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
85258563
log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!");
85268564
return Err(DecodeError::InvalidValue);
@@ -8913,6 +8951,12 @@ where
89138951
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
89148952
.entry(blocked_channel_outpoint.to_channel_id())
89158953
.or_insert_with(Vec::new).push(blocking_action.clone());
8954+
} else {
8955+
// If the channel we were blocking has closed, we don't need to
8956+
// worry about it - the blocked monitor update should never have
8957+
// been released from the `Channel` object so it can't have
8958+
// completed, and if the channel closed there's no reason to bother
8959+
// anymore.
89168960
}
89178961
}
89188962
}

0 commit comments

Comments
 (0)