Skip to content

Commit e0bb80d

Browse files
committed
Use the new monitor persistence flow for funding_created handling
Building on the previous commits, this finishes our transition to doing all message-sending in the monitor update completion pipeline, unifying our immediate- and async- `ChannelMonitor` update and persistence flows.
1 parent dee2d73 commit e0bb80d

File tree

3 files changed

+41
-52
lines changed

3 files changed

+41
-52
lines changed

lightning/src/ln/channel.rs

+8-8
Original file line numberDiff line numberDiff line change
@@ -2232,11 +2232,8 @@ impl<Signer: Sign> Channel<Signer> {
22322232

22332233
pub fn funding_created<K: Deref, L: Deref>(
22342234
&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, keys_source: &K, logger: &L
2235-
) -> Result<(msgs::FundingSigned, ChannelMonitor<<K::Target as SignerProvider>::Signer>, Option<msgs::ChannelReady>), ChannelError>
2236-
where
2237-
K::Target: KeysInterface,
2238-
L::Target: Logger
2239-
{
2235+
) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
2236+
where K::Target: KeysInterface<Signer = Signer>, L::Target: Logger {
22402237
if self.is_outbound() {
22412238
return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
22422239
}
@@ -2310,10 +2307,13 @@ impl<Signer: Sign> Channel<Signer> {
23102307

23112308
log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
23122309

2310+
let need_channel_ready = self.check_get_channel_ready(0).is_some();
2311+
self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2312+
23132313
Ok((msgs::FundingSigned {
23142314
channel_id: self.channel_id,
23152315
signature
2316-
}, channel_monitor, self.check_get_channel_ready(0)))
2316+
}, channel_monitor))
23172317
}
23182318

23192319
/// Handles a funding_signed message from the remote end.
@@ -3702,7 +3702,7 @@ impl<Signer: Sign> Channel<Signer> {
37023702
///
37033703
/// [`chain::Watch`]: crate::chain::Watch
37043704
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3705-
pub fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3705+
fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
37063706
resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
37073707
mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
37083708
mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
@@ -7108,7 +7108,7 @@ mod tests {
71087108
}]};
71097109
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
71107110
let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
7111-
let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
7111+
let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
71127112

71137113
// Node B --> Node A: funding signed
71147114
let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);

lightning/src/ln/channelmanager.rs

+30-41
Original file line numberDiff line numberDiff line change
@@ -4229,55 +4229,27 @@ where
42294229
}
42304230

42314231
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
4232+
let best_block = *self.best_block.read().unwrap();
4233+
42324234
let per_peer_state = self.per_peer_state.read().unwrap();
42334235
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
42344236
if let None = peer_state_mutex_opt {
42354237
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id))
42364238
}
4237-
let ((funding_msg, monitor, mut channel_ready), mut chan) = {
4238-
let best_block = *self.best_block.read().unwrap();
4239-
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4240-
let peer_state = &mut *peer_state_lock;
4239+
4240+
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4241+
let peer_state = &mut *peer_state_lock;
4242+
let ((funding_msg, monitor), chan) =
42414243
match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
42424244
hash_map::Entry::Occupied(mut chan) => {
42434245
(try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove())
42444246
},
42454247
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
4246-
}
4247-
};
4248-
// Because we have exclusive ownership of the channel here we can release the peer_state
4249-
// lock before watch_channel
4250-
match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
4251-
ChannelMonitorUpdateStatus::Completed => {},
4252-
ChannelMonitorUpdateStatus::PermanentFailure => {
4253-
// Note that we reply with the new channel_id in error messages if we gave up on the
4254-
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
4255-
// spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
4256-
// any messages referencing a previously-closed channel anyway.
4257-
// We do not propagate the monitor update to the user as it would be for a monitor
4258-
// that we didn't manage to store (and that we don't care about - we don't respond
4259-
// with the funding_signed so the channel can never go on chain).
4260-
let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
4261-
assert!(failed_htlcs.is_empty());
4262-
return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
4263-
},
4264-
ChannelMonitorUpdateStatus::InProgress => {
4265-
// There's no problem signing a counterparty's funding transaction if our monitor
4266-
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
4267-
// accepted payment from yet. We do, however, need to wait to send our channel_ready
4268-
// until we have persisted our monitor.
4269-
chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
4270-
channel_ready = None; // Don't send the channel_ready now
4271-
},
4272-
}
4273-
// It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the
4274-
// peer exists, despite the inner PeerState potentially having no channels after removing
4275-
// the channel above.
4276-
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4277-
let peer_state = &mut *peer_state_lock;
4248+
};
4249+
42784250
match peer_state.channel_by_id.entry(funding_msg.channel_id) {
42794251
hash_map::Entry::Occupied(_) => {
4280-
return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
4252+
Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
42814253
},
42824254
hash_map::Entry::Vacant(e) => {
42834255
let mut id_to_peer = self.id_to_peer.lock().unwrap();
@@ -4291,17 +4263,34 @@ where
42914263
i_e.insert(chan.get_counterparty_node_id());
42924264
}
42934265
}
4266+
4267+
// There's no problem signing a counterparty's funding transaction if our monitor
4268+
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
4269+
// accepted payment from yet. We do, however, need to wait to send our channel_ready
4270+
// until we have persisted our monitor.
42944271
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
42954272
node_id: counterparty_node_id.clone(),
42964273
msg: funding_msg,
42974274
});
4298-
if let Some(msg) = channel_ready {
4299-
send_channel_ready!(self, peer_state.pending_msg_events, chan, msg);
4275+
4276+
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
4277+
4278+
let chan = e.insert(chan);
4279+
let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, chan, MANUALLY_REMOVING, 42);
4280+
4281+
// Note that we reply with the new channel_id in error messages if we gave up on the
4282+
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
4283+
// spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
4284+
// any messages referencing a previously-closed channel anyway.
4285+
// We do not propagate the monitor update to the user as it would be for a monitor
4286+
// that we didn't manage to store (and that we don't care about - we don't respond
4287+
// with the funding_signed so the channel can never go on chain).
4288+
if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
4289+
res.0 = None;
43004290
}
4301-
e.insert(chan);
4291+
res
43024292
}
43034293
}
4304-
Ok(())
43054294
}
43064295

43074296
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {

lightning/src/ln/functional_tests.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -8684,9 +8684,9 @@ fn test_duplicate_chan_id() {
86848684
};
86858685
check_added_monitors!(nodes[0], 0);
86868686
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
8687-
// At this point we'll try to add a duplicate channel monitor, which will be rejected, but
8688-
// still needs to be cleared here.
8689-
check_added_monitors!(nodes[1], 1);
8687+
// At this point we'll look up if the channel_id is present and immediately fail the channel
8688+
// without trying to persist the `ChannelMonitor.
8689+
check_added_monitors!(nodes[1], 0);
86908690

86918691
// ...still, nodes[1] will reject the duplicate channel.
86928692
{

0 commit comments

Comments
 (0)