Skip to content

Commit 1948a6b

Browse files
committed
Use new monitor persistence flow in funding_signed handling
In the previous commit, we moved all our `ChannelMonitorUpdate` pipelines to use a new async path via the `handle_new_monitor_update` macro. This avoids having two message sending pathways and simply sends messages in the "monitor update completed" flow, which is shared between sync and async monitor updates. Here we reuse the new macro for handling `funding_signed` messages when doing an initial `ChannelMonitor` persistence. This provides a similar benefit, simplifying the code a trivial amount, but importantly allows us to fully remove the original `handle_monitor_update_res` macro.
1 parent f1ad14a commit 1948a6b

File tree

2 files changed

+31
-107
lines changed

2 files changed

+31
-107
lines changed

lightning/src/ln/channel.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2356,9 +2356,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
23562356
/// If this call is successful, broadcast the funding transaction (and not before!)
23572357
pub fn funding_signed<SP: Deref, L: Deref>(
23582358
&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2359-
) -> Result<(ChannelMonitor<<SP::Target as SignerProvider>::Signer>, Transaction, Option<msgs::ChannelReady>), ChannelError>
2359+
) -> Result<ChannelMonitor<Signer>, ChannelError>
23602360
where
2361-
SP::Target: SignerProvider,
2361+
SP::Target: SignerProvider<Signer = Signer>,
23622362
L::Target: Logger
23632363
{
23642364
if !self.is_outbound() {
@@ -2431,7 +2431,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
24312431

24322432
log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
24332433

2434-
Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0)))
2434+
let need_channel_ready = self.check_get_channel_ready(0).is_some();
2435+
self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2436+
Ok(channel_monitor)
24352437
}
24362438

24372439
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready

lightning/src/ln/channelmanager.rs

Lines changed: 26 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -1357,69 +1357,6 @@ macro_rules! remove_channel {
13571357
}
13581358
}
13591359

1360-
macro_rules! handle_monitor_update_res {
1361-
($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
1362-
match $err {
1363-
ChannelMonitorUpdateStatus::PermanentFailure => {
1364-
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
1365-
update_maps_on_chan_removal!($self, $chan);
1366-
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
1367-
$chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
1368-
(res, true)
1369-
},
1370-
ChannelMonitorUpdateStatus::InProgress => {
1371-
log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
1372-
log_bytes!($chan_id[..]),
1373-
if $resend_commitment && $resend_raa {
1374-
match $action_type {
1375-
RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
1376-
RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
1377-
}
1378-
} else if $resend_commitment { "commitment" }
1379-
else if $resend_raa { "RAA" }
1380-
else { "nothing" },
1381-
(&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
1382-
(&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
1383-
(&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
1384-
if !$resend_commitment {
1385-
debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
1386-
}
1387-
if !$resend_raa {
1388-
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
1389-
}
1390-
$chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
1391-
(Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
1392-
},
1393-
ChannelMonitorUpdateStatus::Completed => {
1394-
(Ok(()), false)
1395-
},
1396-
}
1397-
};
1398-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
1399-
let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
1400-
if drop {
1401-
$entry.remove_entry();
1402-
}
1403-
res
1404-
} };
1405-
($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
1406-
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
1407-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1408-
} };
1409-
($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
1410-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1411-
};
1412-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
1413-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
1414-
};
1415-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
1416-
handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
1417-
};
1418-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
1419-
handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
1420-
};
1421-
}
1422-
14231360
macro_rules! send_channel_ready {
14241361
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
14251362
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
@@ -4523,49 +4460,34 @@ where
45234460
}
45244461

45254462
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
4526-
let funding_tx = {
4527-
let best_block = *self.best_block.read().unwrap();
4528-
let per_peer_state = self.per_peer_state.read().unwrap();
4529-
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4530-
.ok_or_else(|| {
4531-
debug_assert!(false);
4532-
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
4533-
})?;
4463+
let best_block = *self.best_block.read().unwrap();
4464+
let per_peer_state = self.per_peer_state.read().unwrap();
4465+
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4466+
.ok_or_else(|| {
4467+
debug_assert!(false);
4468+
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
4469+
})?;
45344470

4535-
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4536-
let peer_state = &mut *peer_state_lock;
4537-
match peer_state.channel_by_id.entry(msg.channel_id) {
4538-
hash_map::Entry::Occupied(mut chan) => {
4539-
let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) {
4540-
Ok(update) => update,
4541-
Err(e) => try_chan_entry!(self, Err(e), chan),
4542-
};
4543-
match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
4544-
ChannelMonitorUpdateStatus::Completed => {},
4545-
e => {
4546-
let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
4547-
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4548-
// We weren't able to watch the channel to begin with, so no updates should be made on
4549-
// it. Previously, full_stack_target found an (unreachable) panic when the
4550-
// monitor update contained within `shutdown_finish` was applied.
4551-
if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4552-
shutdown_finish.0.take();
4553-
}
4554-
}
4555-
return res
4556-
},
4557-
}
4558-
if let Some(msg) = channel_ready {
4559-
send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg);
4471+
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4472+
let peer_state = &mut *peer_state_lock;
4473+
match peer_state.channel_by_id.entry(msg.channel_id) {
4474+
hash_map::Entry::Occupied(mut chan) => {
4475+
let monitor = try_chan_entry!(self,
4476+
chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
4477+
let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
4478+
let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, chan);
4479+
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4480+
// We weren't able to watch the channel to begin with, so no updates should be made on
4481+
// it. Previously, full_stack_target found an (unreachable) panic when the
4482+
// monitor update contained within `shutdown_finish` was applied.
4483+
if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4484+
shutdown_finish.0.take();
45604485
}
4561-
funding_tx
4562-
},
4563-
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
4564-
}
4565-
};
4566-
log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
4567-
self.tx_broadcaster.broadcast_transaction(&funding_tx);
4568-
Ok(())
4486+
}
4487+
res
4488+
},
4489+
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4490+
}
45694491
}
45704492

45714493
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {

0 commit comments

Comments
 (0)