Skip to content

Commit c9b53c5

Browse files
committed
Use new monitor persistence flow in funding_signed handling
In the previous commit, we moved all our `ChannelMonitorUpdate` pipelines to use a new async path via the `handle_new_monitor_update` macro. This avoids having two message sending pathways and simply sends messages in the "monitor update completed" flow, which is shared between sync and async monitor updates. Here we reuse the new macro for handling `funding_signed` messages when doing an initial `ChannelMonitor` persistence. This provides a similar benefit, simplifying the code a trivial amount, but importantly allows us to fully remove the original `handle_monitor_update_res` macro.
1 parent 70a6c55 commit c9b53c5

File tree

2 files changed

+33
-109
lines changed

2 files changed

+33
-109
lines changed

lightning/src/ln/channel.rs

+5-3
Original file line numberDiff line numberDiff line change
@@ -2322,9 +2322,9 @@ impl<Signer: Sign> Channel<Signer> {
23222322
/// If this call is successful, broadcast the funding transaction (and not before!)
23232323
pub fn funding_signed<SP: Deref, L: Deref>(
23242324
&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2325-
) -> Result<(ChannelMonitor<<SP::Target as SignerProvider>::Signer>, Transaction, Option<msgs::ChannelReady>), ChannelError>
2325+
) -> Result<ChannelMonitor<Signer>, ChannelError>
23262326
where
2327-
SP::Target: SignerProvider,
2327+
SP::Target: SignerProvider<Signer=Signer>,
23282328
L::Target: Logger
23292329
{
23302330
if !self.is_outbound() {
@@ -2397,7 +2397,9 @@ impl<Signer: Sign> Channel<Signer> {
23972397

23982398
log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
23992399

2400-
Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0)))
2400+
let need_channel_ready = self.check_get_channel_ready(0).is_some();
2401+
self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2402+
Ok(channel_monitor)
24012403
}
24022404

24032405
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready

lightning/src/ln/channelmanager.rs

+28-106
Original file line numberDiff line numberDiff line change
@@ -1309,69 +1309,6 @@ macro_rules! remove_channel {
13091309
}
13101310
}
13111311

1312-
macro_rules! handle_monitor_update_res {
1313-
($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
1314-
match $err {
1315-
ChannelMonitorUpdateStatus::PermanentFailure => {
1316-
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
1317-
update_maps_on_chan_removal!($self, $chan);
1318-
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
1319-
$chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
1320-
(res, true)
1321-
},
1322-
ChannelMonitorUpdateStatus::InProgress => {
1323-
log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
1324-
log_bytes!($chan_id[..]),
1325-
if $resend_commitment && $resend_raa {
1326-
match $action_type {
1327-
RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
1328-
RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
1329-
}
1330-
} else if $resend_commitment { "commitment" }
1331-
else if $resend_raa { "RAA" }
1332-
else { "nothing" },
1333-
(&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
1334-
(&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
1335-
(&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
1336-
if !$resend_commitment {
1337-
debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
1338-
}
1339-
if !$resend_raa {
1340-
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
1341-
}
1342-
$chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
1343-
(Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
1344-
},
1345-
ChannelMonitorUpdateStatus::Completed => {
1346-
(Ok(()), false)
1347-
},
1348-
}
1349-
};
1350-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
1351-
let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
1352-
if drop {
1353-
$entry.remove_entry();
1354-
}
1355-
res
1356-
} };
1357-
($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
1358-
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
1359-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1360-
} };
1361-
($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
1362-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1363-
};
1364-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
1365-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
1366-
};
1367-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
1368-
handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
1369-
};
1370-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
1371-
handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
1372-
};
1373-
}
1374-
13751312
macro_rules! send_channel_ready {
13761313
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
13771314
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
@@ -1429,9 +1366,9 @@ macro_rules! handle_new_monitor_update {
14291366
res
14301367
},
14311368
ChannelMonitorUpdateStatus::Completed => {
1432-
if $chan.get_next_monitor_update()
1433-
.expect("We can't be processing a monitor udpate if it isn't queued")
1434-
.update_id == $update_id &&
1369+
if ($update_id == 0 || $chan.get_next_monitor_update()
1370+
.expect("We can't be processing a monitor update if it isn't queued")
1371+
.update_id == $update_id) &&
14351372
$chan.get_latest_monitor_update_id() == $update_id
14361373
{
14371374
let mut updates = $chan.monitor_updating_restored(&$self.logger,
@@ -4380,48 +4317,33 @@ where
43804317
}
43814318

43824319
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
4383-
let funding_tx = {
4384-
let best_block = *self.best_block.read().unwrap();
4385-
let per_peer_state = self.per_peer_state.read().unwrap();
4386-
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
4387-
if let None = peer_state_mutex_opt {
4388-
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
4389-
}
4320+
let best_block = *self.best_block.read().unwrap();
4321+
let per_peer_state = self.per_peer_state.read().unwrap();
4322+
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
4323+
if let None = peer_state_mutex_opt {
4324+
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
4325+
}
43904326

4391-
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4392-
let peer_state = &mut *peer_state_lock;
4393-
match peer_state.channel_by_id.entry(msg.channel_id) {
4394-
hash_map::Entry::Occupied(mut chan) => {
4395-
let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) {
4396-
Ok(update) => update,
4397-
Err(e) => try_chan_entry!(self, Err(e), chan),
4398-
};
4399-
match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
4400-
ChannelMonitorUpdateStatus::Completed => {},
4401-
e => {
4402-
let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
4403-
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4404-
// We weren't able to watch the channel to begin with, so no updates should be made on
4405-
// it. Previously, full_stack_target found an (unreachable) panic when the
4406-
// monitor update contained within `shutdown_finish` was applied.
4407-
if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4408-
shutdown_finish.0.take();
4409-
}
4410-
}
4411-
return res
4412-
},
4413-
}
4414-
if let Some(msg) = channel_ready {
4415-
send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg);
4327+
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4328+
let peer_state = &mut *peer_state_lock;
4329+
match peer_state.channel_by_id.entry(msg.channel_id) {
4330+
hash_map::Entry::Occupied(mut chan) => {
4331+
let monitor = try_chan_entry!(self,
4332+
chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
4333+
let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
4334+
let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, chan);
4335+
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4336+
// We weren't able to watch the channel to begin with, so no updates should be made on
4337+
// it. Previously, full_stack_target found an (unreachable) panic when the
4338+
// monitor update contained within `shutdown_finish` was applied.
4339+
if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4340+
shutdown_finish.0.take();
44164341
}
4417-
funding_tx
4418-
},
4419-
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
4420-
}
4421-
};
4422-
log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
4423-
self.tx_broadcaster.broadcast_transaction(&funding_tx);
4424-
Ok(())
4342+
}
4343+
res
4344+
},
4345+
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4346+
}
44254347
}
44264348

44274349
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {

0 commit comments

Comments
 (0)