Skip to content

Commit 5491193

Browse files
committed
Use new monitor persistence flow in funding_signed handling
In the previous commit, we moved all our `ChannelMonitorUpdate` pipelines to use a new async path via the `handle_new_monitor_update` macro. This avoids having two message sending pathways and simply sends messages in the "monitor update completed" flow, which is shared between sync and async monitor updates. Here we reuse the new macro for handling `funding_signed` messages when doing an initial `ChannelMonitor` persistence. This provides a similar benefit, simplifying the code a trivial amount, but importantly allows us to fully remove the original `handle_monitor_update_res` macro.
1 parent 476b9df commit 5491193

File tree

2 files changed

+32
-112
lines changed

2 files changed

+32
-112
lines changed

lightning/src/ln/channel.rs

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2318,13 +2318,9 @@ impl<Signer: Sign> Channel<Signer> {
23182318

23192319
/// Handles a funding_signed message from the remote end.
23202320
/// If this call is successful, broadcast the funding transaction (and not before!)
2321-
pub fn funding_signed<K: Deref, L: Deref>(
2322-
&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, keys_source: &K, logger: &L
2323-
) -> Result<(ChannelMonitor<<K::Target as KeysInterface>::Signer>, Transaction, Option<msgs::ChannelReady>), ChannelError>
2324-
where
2325-
K::Target: KeysInterface,
2326-
L::Target: Logger
2327-
{
2321+
pub fn funding_signed<L: Deref>(
2322+
&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L
2323+
) -> Result<ChannelMonitor<Signer>, ChannelError> where L::Target: Logger {
23282324
if !self.is_outbound() {
23292325
return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
23302326
}
@@ -2395,7 +2391,9 @@ impl<Signer: Sign> Channel<Signer> {
23952391

23962392
log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
23972393

2398-
Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0)))
2394+
let need_channel_ready = self.check_get_channel_ready(0).is_some();
2395+
self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2396+
Ok(channel_monitor)
23992397
}
24002398

24012399
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready

lightning/src/ln/channelmanager.rs

Lines changed: 26 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -1467,69 +1467,6 @@ macro_rules! remove_channel {
14671467
}
14681468
}
14691469

1470-
macro_rules! handle_monitor_update_res {
1471-
($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
1472-
match $err {
1473-
ChannelMonitorUpdateStatus::PermanentFailure => {
1474-
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
1475-
update_maps_on_chan_removal!($self, $chan);
1476-
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
1477-
$chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
1478-
(res, true)
1479-
},
1480-
ChannelMonitorUpdateStatus::InProgress => {
1481-
log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
1482-
log_bytes!($chan_id[..]),
1483-
if $resend_commitment && $resend_raa {
1484-
match $action_type {
1485-
RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
1486-
RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
1487-
}
1488-
} else if $resend_commitment { "commitment" }
1489-
else if $resend_raa { "RAA" }
1490-
else { "nothing" },
1491-
(&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
1492-
(&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
1493-
(&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
1494-
if !$resend_commitment {
1495-
debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
1496-
}
1497-
if !$resend_raa {
1498-
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
1499-
}
1500-
$chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
1501-
(Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
1502-
},
1503-
ChannelMonitorUpdateStatus::Completed => {
1504-
(Ok(()), false)
1505-
},
1506-
}
1507-
};
1508-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
1509-
let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
1510-
if drop {
1511-
$entry.remove_entry();
1512-
}
1513-
res
1514-
} };
1515-
($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
1516-
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
1517-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1518-
} };
1519-
($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
1520-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1521-
};
1522-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
1523-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
1524-
};
1525-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
1526-
handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
1527-
};
1528-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
1529-
handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
1530-
};
1531-
}
1532-
15331470
macro_rules! send_channel_ready {
15341471
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
15351472
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
@@ -1587,9 +1524,9 @@ macro_rules! handle_new_monitor_update {
15871524
res
15881525
},
15891526
ChannelMonitorUpdateStatus::Completed => {
1590-
if $chan.get_next_monitor_update()
1591-
.expect("We can't be processing a monitor udpate if it isn't queued")
1592-
.update_id == $update_id &&
1527+
if ($update_id == 0 || $chan.get_next_monitor_update()
1528+
.expect("We can't be processing a monitor update if it isn't queued")
1529+
.update_id == $update_id) &&
15931530
$chan.get_latest_monitor_update_id() == $update_id
15941531
{
15951532
let mut updates = $chan.monitor_updating_restored(&$self.logger,
@@ -4829,45 +4766,30 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
48294766
}
48304767

48314768
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
4832-
let funding_tx = {
4833-
let best_block = *self.best_block.read().unwrap();
4834-
let mut channel_lock = self.channel_state.lock().unwrap();
4835-
let channel_state = &mut *channel_lock;
4836-
match channel_state.by_id.entry(msg.channel_id) {
4837-
hash_map::Entry::Occupied(mut chan) => {
4838-
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4839-
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4840-
}
4841-
let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) {
4842-
Ok(update) => update,
4843-
Err(e) => try_chan_entry!(self, Err(e), chan),
4844-
};
4845-
match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
4846-
ChannelMonitorUpdateStatus::Completed => {},
4847-
e => {
4848-
let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
4849-
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4850-
// We weren't able to watch the channel to begin with, so no updates should be made on
4851-
// it. Previously, full_stack_target found an (unreachable) panic when the
4852-
// monitor update contained within `shutdown_finish` was applied.
4853-
if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4854-
shutdown_finish.0.take();
4855-
}
4856-
}
4857-
return res
4858-
},
4859-
}
4860-
if let Some(msg) = channel_ready {
4861-
send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg);
4769+
let best_block = *self.best_block.read().unwrap();
4770+
let mut channel_lock = self.channel_state.lock().unwrap();
4771+
let channel_state = &mut *channel_lock;
4772+
match channel_state.by_id.entry(msg.channel_id) {
4773+
hash_map::Entry::Occupied(mut chan) => {
4774+
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4775+
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4776+
}
4777+
let monitor = try_chan_entry!(self,
4778+
chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger), chan);
4779+
let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
4780+
let mut res = handle_new_monitor_update!(self, update_res, 0, channel_lock, channel_state.pending_msg_events, chan);
4781+
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4782+
// We weren't able to watch the channel to begin with, so no updates should be made on
4783+
// it. Previously, full_stack_target found an (unreachable) panic when the
4784+
// monitor update contained within `shutdown_finish` was applied.
4785+
if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4786+
shutdown_finish.0.take();
48624787
}
4863-
funding_tx
4864-
},
4865-
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4866-
}
4867-
};
4868-
log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
4869-
self.tx_broadcaster.broadcast_transaction(&funding_tx);
4870-
Ok(())
4788+
}
4789+
res
4790+
},
4791+
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4792+
}
48714793
}
48724794

48734795
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {

0 commit comments

Comments
 (0)