Skip to content

Commit c843f68

Browse files
committed
Move most handle_new_monitor_update calls to pass the update
Most of the calls to the `handle_new_monitor_update` macro had the exact same pattern - calling `update_monitor` followed by the macro. Given that common pattern will grow to first pushing the new monitor onto an in-flight set and then calling `update_monitor` unifying the pattern into a single macro now avoids more code churn in the coming commits.
1 parent 1433e9e commit c843f68

File tree

1 file changed

+22
-29
lines changed

1 file changed

+22
-29
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 22 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1860,7 +1860,7 @@ macro_rules! handle_monitor_update_completion {
18601860
}
18611861

18621862
macro_rules! handle_new_monitor_update {
1863-
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
1863+
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_ALREADY_APPLIED, $remove: expr) => { {
18641864
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
18651865
// any case so that it won't deadlock.
18661866
debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
@@ -1893,8 +1893,15 @@ macro_rules! handle_new_monitor_update {
18931893
},
18941894
}
18951895
} };
1896-
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
1897-
handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
1896+
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, ALREADY_APPLIED) => {
1897+
handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING_ALREADY_APPLIED, $chan_entry.remove_entry())
1898+
};
1899+
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
1900+
let update_res = $self.chain_monitor.update_channel($funding_txo, &$update);
1901+
handle_new_monitor_update!($self, update_res, $update.update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_ALREADY_APPLIED, $remove)
1902+
} };
1903+
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
1904+
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
18981905
}
18991906
}
19001907

@@ -2309,9 +2316,7 @@ where
23092316

23102317
// Update the monitor with the shutdown script if necessary.
23112318
if let Some(monitor_update) = monitor_update_opt.take() {
2312-
let update_id = monitor_update.update_id;
2313-
let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), &monitor_update);
2314-
break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
2319+
break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry);
23152320
}
23162321

23172322
if chan_entry.get().is_shutdown() {
@@ -3037,7 +3042,7 @@ where
30373042
Some(monitor_update) => {
30383043
let update_id = monitor_update.update_id;
30393044
let update_res = self.chain_monitor.update_channel(funding_txo, &monitor_update);
3040-
if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) {
3045+
if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED) {
30413046
break Err(e);
30423047
}
30433048
if update_res == ChannelMonitorUpdateStatus::InProgress {
@@ -4091,7 +4096,7 @@ where
40914096
let peer_state = &mut *peer_state_lock;
40924097
match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
40934098
hash_map::Entry::Occupied(mut chan) => {
4094-
handle_new_monitor_update!(self, update_res, update.update_id, peer_state_lock, peer_state, per_peer_state, chan)
4099+
handle_new_monitor_update!(self, update_res, update.update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED)
40954100
},
40964101
hash_map::Entry::Vacant(_) => Ok(()),
40974102
}
@@ -4677,9 +4682,7 @@ where
46774682
log_bytes!(chan_id), action);
46784683
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
46794684
}
4680-
let update_id = monitor_update.update_id;
4681-
let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &monitor_update);
4682-
let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
4685+
let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
46834686
peer_state, per_peer_state, chan);
46844687
if let Err(e) = res {
46854688
// TODO: This is a *critical* error - we probably updated the outbound edge
@@ -5216,7 +5219,8 @@ where
52165219

52175220
let chan = e.insert(chan);
52185221
let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state,
5219-
per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
5222+
per_peer_state, chan, MANUALLY_REMOVING_ALREADY_APPLIED,
5223+
{ peer_state.channel_by_id.remove(&new_channel_id) });
52205224

52215225
// Note that we reply with the new channel_id in error messages if we gave up on the
52225226
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
@@ -5249,7 +5253,7 @@ where
52495253
let monitor = try_chan_entry!(self,
52505254
chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
52515255
let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
5252-
let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
5256+
let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED);
52535257
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
52545258
// We weren't able to watch the channel to begin with, so no updates should be made on
52555259
// it. Previously, full_stack_target found an (unreachable) panic when the
@@ -5346,9 +5350,7 @@ where
53465350

53475351
// Update the monitor with the shutdown script if necessary.
53485352
if let Some(monitor_update) = monitor_update_opt {
5349-
let update_id = monitor_update.update_id;
5350-
let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), &monitor_update);
5351-
break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
5353+
break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry);
53525354
}
53535355
break Ok(());
53545356
},
@@ -5544,9 +5546,7 @@ where
55445546
let funding_txo = chan.get().context.get_funding_txo();
55455547
let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
55465548
if let Some(monitor_update) = monitor_update_opt {
5547-
let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), &monitor_update);
5548-
let update_id = monitor_update.update_id;
5549-
handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
5549+
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
55505550
peer_state, per_peer_state, chan)
55515551
} else { Ok(()) }
55525552
},
@@ -5683,9 +5683,7 @@ where
56835683
let funding_txo = chan.get().context.get_funding_txo();
56845684
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
56855685
let res = if let Some(monitor_update) = monitor_update_opt {
5686-
let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), &monitor_update);
5687-
let update_id = monitor_update.update_id;
5688-
handle_new_monitor_update!(self, update_res, update_id,
5686+
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
56895687
peer_state_lock, peer_state, per_peer_state, chan)
56905688
} else { Ok(()) };
56915689
(htlcs_to_fail, res)
@@ -5961,11 +5959,8 @@ where
59615959
if let Some(monitor_update) = monitor_opt {
59625960
has_monitor_update = true;
59635961

5964-
let update_res = self.chain_monitor.update_channel(
5965-
funding_txo.expect("channel is live"), &monitor_update);
5966-
let update_id = monitor_update.update_id;
59675962
let channel_id: [u8; 32] = *channel_id;
5968-
let res = handle_new_monitor_update!(self, update_res, update_id,
5963+
let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
59695964
peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
59705965
peer_state.channel_by_id.remove(&channel_id));
59715966
if res.is_err() {
@@ -6307,9 +6302,7 @@ where
63076302
if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
63086303
log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
63096304
log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
6310-
let update_res = self.chain_monitor.update_channel(channel_funding_outpoint, &monitor_update);
6311-
let update_id = monitor_update.update_id;
6312-
if let Err(e) = handle_new_monitor_update!(self, update_res, update_id,
6305+
if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
63136306
peer_state_lck, peer_state, per_peer_state, chan)
63146307
{
63156308
errors.push((e, counterparty_node_id));

0 commit comments

Comments
 (0)