Skip to content

Commit 1c7b692

Browse files
committed
Simplify cases in handle_new_monitor_update macro
By giving up on a tiny bit of parallelism and tweaking the return types, we can make the `handle_new_monitor_update` macro a bit clearer - now the only cases where its called after a monitor was updated was when the monitor was initially committed.
1 parent 5e528ff commit 1c7b692

File tree

1 file changed

+37
-31
lines changed

1 file changed

+37
-31
lines changed

lightning/src/ln/channelmanager.rs

+37-31
Original file line numberDiff line numberDiff line change
@@ -1860,7 +1860,7 @@ macro_rules! handle_monitor_update_completion {
18601860
}
18611861

18621862
macro_rules! handle_new_monitor_update {
1863-
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_ALREADY_APPLIED, $remove: expr) => { {
1863+
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR, $remove: expr) => { {
18641864
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
18651865
// any case so that it won't deadlock.
18661866
debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
@@ -1871,13 +1871,13 @@ macro_rules! handle_new_monitor_update {
18711871
ChannelMonitorUpdateStatus::InProgress => {
18721872
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
18731873
log_bytes!($chan.context.channel_id()[..]));
1874-
Ok(())
1874+
Ok(false)
18751875
},
18761876
ChannelMonitorUpdateStatus::PermanentFailure => {
18771877
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
18781878
log_bytes!($chan.context.channel_id()[..]));
18791879
update_maps_on_chan_removal!($self, &$chan.context);
1880-
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
1880+
let res = Err(MsgHandleErrInternal::from_finish_shutdown(
18811881
"ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
18821882
$chan.context.get_user_id(), $chan.context.force_shutdown(false),
18831883
$self.get_channel_update_for_broadcast(&$chan).ok()));
@@ -1889,16 +1889,16 @@ macro_rules! handle_new_monitor_update {
18891889
if $chan.no_monitor_updates_pending() {
18901890
handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
18911891
}
1892-
Ok(())
1892+
Ok(true)
18931893
},
18941894
}
18951895
} };
1896-
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, ALREADY_APPLIED) => {
1897-
handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING_ALREADY_APPLIED, $chan_entry.remove_entry())
1896+
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
1897+
handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING_INITIAL_MONITOR, $chan_entry.remove_entry())
18981898
};
18991899
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
19001900
let update_res = $self.chain_monitor.update_channel($funding_txo, &$update);
1901-
handle_new_monitor_update!($self, update_res, $update.update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_ALREADY_APPLIED, $remove)
1901+
handle_new_monitor_update!($self, update_res, $update.update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_INITIAL_MONITOR, $remove)
19021902
} };
19031903
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
19041904
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
@@ -2316,7 +2316,8 @@ where
23162316

23172317
// Update the monitor with the shutdown script if necessary.
23182318
if let Some(monitor_update) = monitor_update_opt.take() {
2319-
break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry);
2319+
break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2320+
peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
23202321
}
23212322

23222323
if chan_entry.get().is_shutdown() {
@@ -3040,19 +3041,18 @@ where
30403041
}, onion_packet, None, &self.logger);
30413042
match break_chan_entry!(self, send_res, chan) {
30423043
Some(monitor_update) => {
3043-
let update_id = monitor_update.update_id;
3044-
let update_res = self.chain_monitor.update_channel(funding_txo, &monitor_update);
3045-
if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED) {
3046-
break Err(e);
3047-
}
3048-
if update_res == ChannelMonitorUpdateStatus::InProgress {
3049-
// Note that MonitorUpdateInProgress here indicates (per function
3050-
// docs) that we will resend the commitment update once monitor
3051-
// updating completes. Therefore, we must return an error
3052-
// indicating that it is unsafe to retry the payment wholesale,
3053-
// which we do in the send_payment check for
3054-
// MonitorUpdateInProgress, below.
3055-
return Err(APIError::MonitorUpdateInProgress);
3044+
match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
3045+
Err(e) => break Err(e),
3046+
Ok(false) => {
3047+
// Note that MonitorUpdateInProgress here indicates (per function
3048+
// docs) that we will resend the commitment update once monitor
3049+
// updating completes. Therefore, we must return an error
3050+
// indicating that it is unsafe to retry the payment wholesale,
3051+
// which we do in the send_payment check for
3052+
// MonitorUpdateInProgress, below.
3053+
return Err(APIError::MonitorUpdateInProgress);
3054+
},
3055+
Ok(true) => {},
30563056
}
30573057
},
30583058
None => { },
@@ -4087,21 +4087,26 @@ where
40874087
let _ = self.chain_monitor.update_channel(funding_txo, &update);
40884088
},
40894089
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
4090-
let update_res = self.chain_monitor.update_channel(funding_txo, &update);
4091-
4090+
let mut updated_chan = false;
40924091
let res = {
40934092
let per_peer_state = self.per_peer_state.read().unwrap();
40944093
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
40954094
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
40964095
let peer_state = &mut *peer_state_lock;
40974096
match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
40984097
hash_map::Entry::Occupied(mut chan) => {
4099-
handle_new_monitor_update!(self, update_res, update.update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED)
4098+
updated_chan = true;
4099+
handle_new_monitor_update!(self, funding_txo, update,
4100+
peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
41004101
},
41014102
hash_map::Entry::Vacant(_) => Ok(()),
41024103
}
41034104
} else { Ok(()) }
41044105
};
4106+
if !updated_chan {
4107+
// TODO: Track this as in-flight even though the channel is closed.
4108+
let _ = self.chain_monitor.update_channel(funding_txo, &update);
4109+
}
41054110
// TODO: If this channel has since closed, we're likely providing a payment
41064111
// preimage update, which we must ensure is durable! We currently don't,
41074112
// however, ensure that.
@@ -5219,7 +5224,7 @@ where
52195224

52205225
let chan = e.insert(chan);
52215226
let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state,
5222-
per_peer_state, chan, MANUALLY_REMOVING_ALREADY_APPLIED,
5227+
per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
52235228
{ peer_state.channel_by_id.remove(&new_channel_id) });
52245229

52255230
// Note that we reply with the new channel_id in error messages if we gave up on the
@@ -5232,7 +5237,7 @@ where
52325237
if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
52335238
res.0 = None;
52345239
}
5235-
res
5240+
res.map(|_| ())
52365241
}
52375242
}
52385243
}
@@ -5253,7 +5258,7 @@ where
52535258
let monitor = try_chan_entry!(self,
52545259
chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
52555260
let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
5256-
let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED);
5261+
let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
52575262
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
52585263
// We weren't able to watch the channel to begin with, so no updates should be made on
52595264
// it. Previously, full_stack_target found an (unreachable) panic when the
@@ -5262,7 +5267,7 @@ where
52625267
shutdown_finish.0.take();
52635268
}
52645269
}
5265-
res
5270+
res.map(|_| ())
52665271
},
52675272
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
52685273
}
@@ -5350,7 +5355,8 @@ where
53505355

53515356
// Update the monitor with the shutdown script if necessary.
53525357
if let Some(monitor_update) = monitor_update_opt {
5353-
break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry);
5358+
break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
5359+
peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
53545360
}
53555361
break Ok(());
53565362
},
@@ -5547,7 +5553,7 @@ where
55475553
let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
55485554
if let Some(monitor_update) = monitor_update_opt {
55495555
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
5550-
peer_state, per_peer_state, chan)
5556+
peer_state, per_peer_state, chan).map(|_| ())
55515557
} else { Ok(()) }
55525558
},
55535559
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
@@ -5684,7 +5690,7 @@ where
56845690
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
56855691
let res = if let Some(monitor_update) = monitor_update_opt {
56865692
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
5687-
peer_state_lock, peer_state, per_peer_state, chan)
5693+
peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
56885694
} else { Ok(()) };
56895695
(htlcs_to_fail, res)
56905696
},

0 commit comments

Comments
 (0)