Skip to content

Commit 809dabc

Browse files
committed
Use the new monitor persistence flow for funding_created handling
Building on the previous commits, this finishes our transition to doing all message-sending in the monitor update completion pipeline, unifying our immediate- and async- `ChannelMonitor` update and persistence flows.
1 parent 5e91e3e commit 809dabc

File tree

3 files changed

+41
-52
lines changed

3 files changed

+41
-52
lines changed

lightning/src/ln/channel.rs

+8-8
Original file line numberDiff line numberDiff line change
@@ -2232,11 +2232,8 @@ impl<Signer: Sign> Channel<Signer> {
22322232

22332233
pub fn funding_created<K: Deref, L: Deref>(
22342234
&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, keys_source: &K, logger: &L
2235-
) -> Result<(msgs::FundingSigned, ChannelMonitor<<K::Target as SignerProvider>::Signer>, Option<msgs::ChannelReady>), ChannelError>
2236-
where
2237-
K::Target: KeysInterface,
2238-
L::Target: Logger
2239-
{
2235+
) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
2236+
where K::Target: KeysInterface<Signer = Signer>, L::Target: Logger {
22402237
if self.is_outbound() {
22412238
return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
22422239
}
@@ -2310,10 +2307,13 @@ impl<Signer: Sign> Channel<Signer> {
23102307

23112308
log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
23122309

2310+
let need_channel_ready = self.check_get_channel_ready(0).is_some();
2311+
self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2312+
23132313
Ok((msgs::FundingSigned {
23142314
channel_id: self.channel_id,
23152315
signature
2316-
}, channel_monitor, self.check_get_channel_ready(0)))
2316+
}, channel_monitor))
23172317
}
23182318

23192319
/// Handles a funding_signed message from the remote end.
@@ -3702,7 +3702,7 @@ impl<Signer: Sign> Channel<Signer> {
37023702
///
37033703
/// [`chain::Watch`]: crate::chain::Watch
37043704
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3705-
pub fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3705+
fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
37063706
resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
37073707
mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
37083708
mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
@@ -7108,7 +7108,7 @@ mod tests {
71087108
}]};
71097109
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
71107110
let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
7111-
let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
7111+
let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
71127112

71137113
// Node B --> Node A: funding signed
71147114
let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);

lightning/src/ln/channelmanager.rs

+30-41
Original file line numberDiff line numberDiff line change
@@ -4230,55 +4230,27 @@ where
42304230
}
42314231

42324232
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
4233+
let best_block = *self.best_block.read().unwrap();
4234+
42334235
let per_peer_state = self.per_peer_state.read().unwrap();
42344236
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
42354237
if let None = peer_state_mutex_opt {
42364238
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id))
42374239
}
4238-
let ((funding_msg, monitor, mut channel_ready), mut chan) = {
4239-
let best_block = *self.best_block.read().unwrap();
4240-
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4241-
let peer_state = &mut *peer_state_lock;
4240+
4241+
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4242+
let peer_state = &mut *peer_state_lock;
4243+
let ((funding_msg, monitor), chan) =
42424244
match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
42434245
hash_map::Entry::Occupied(mut chan) => {
42444246
(try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove())
42454247
},
42464248
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
4247-
}
4248-
};
4249-
// Because we have exclusive ownership of the channel here we can release the peer_state
4250-
// lock before watch_channel
4251-
match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
4252-
ChannelMonitorUpdateStatus::Completed => {},
4253-
ChannelMonitorUpdateStatus::PermanentFailure => {
4254-
// Note that we reply with the new channel_id in error messages if we gave up on the
4255-
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
4256-
// spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
4257-
// any messages referencing a previously-closed channel anyway.
4258-
// We do not propagate the monitor update to the user as it would be for a monitor
4259-
// that we didn't manage to store (and that we don't care about - we don't respond
4260-
// with the funding_signed so the channel can never go on chain).
4261-
let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
4262-
assert!(failed_htlcs.is_empty());
4263-
return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
4264-
},
4265-
ChannelMonitorUpdateStatus::InProgress => {
4266-
// There's no problem signing a counterparty's funding transaction if our monitor
4267-
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
4268-
// accepted payment from yet. We do, however, need to wait to send our channel_ready
4269-
// until we have persisted our monitor.
4270-
chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
4271-
channel_ready = None; // Don't send the channel_ready now
4272-
},
4273-
}
4274-
// It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the
4275-
// peer exists, despite the inner PeerState potentially having no channels after removing
4276-
// the channel above.
4277-
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4278-
let peer_state = &mut *peer_state_lock;
4249+
};
4250+
42794251
match peer_state.channel_by_id.entry(funding_msg.channel_id) {
42804252
hash_map::Entry::Occupied(_) => {
4281-
return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
4253+
Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
42824254
},
42834255
hash_map::Entry::Vacant(e) => {
42844256
let mut id_to_peer = self.id_to_peer.lock().unwrap();
@@ -4292,17 +4264,34 @@ where
42924264
i_e.insert(chan.get_counterparty_node_id());
42934265
}
42944266
}
4267+
4268+
// There's no problem signing a counterparty's funding transaction if our monitor
4269+
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
4270+
// accepted payment from yet. We do, however, need to wait to send our channel_ready
4271+
// until we have persisted our monitor.
42954272
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
42964273
node_id: counterparty_node_id.clone(),
42974274
msg: funding_msg,
42984275
});
4299-
if let Some(msg) = channel_ready {
4300-
send_channel_ready!(self, peer_state.pending_msg_events, chan, msg);
4276+
4277+
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
4278+
4279+
let chan = e.insert(chan);
4280+
let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, chan, MANUALLY_REMOVING, 42);
4281+
4282+
// Note that we reply with the new channel_id in error messages if we gave up on the
4283+
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
4284+
// spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
4285+
// any messages referencing a previously-closed channel anyway.
4286+
// We do not propagate the monitor update to the user as it would be for a monitor
4287+
// that we didn't manage to store (and that we don't care about - we don't respond
4288+
// with the funding_signed so the channel can never go on chain).
4289+
if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
4290+
res.0 = None;
43014291
}
4302-
e.insert(chan);
4292+
res
43034293
}
43044294
}
4305-
Ok(())
43064295
}
43074296

43084297
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {

lightning/src/ln/functional_tests.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -8684,9 +8684,9 @@ fn test_duplicate_chan_id() {
86848684
};
86858685
check_added_monitors!(nodes[0], 0);
86868686
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
8687-
// At this point we'll try to add a duplicate channel monitor, which will be rejected, but
8688-
// still needs to be cleared here.
8689-
check_added_monitors!(nodes[1], 1);
8687+
// At this point we'll look up if the channel_id is present and immediately fail the channel
8688+
// without trying to persist the `ChannelMonitor.
8689+
check_added_monitors!(nodes[1], 0);
86908690

86918691
// ...still, nodes[1] will reject the duplicate channel.
86928692
{

0 commit comments

Comments
 (0)