Skip to content

Delay RAA-after-next processing until PaymentSent is are handled #2112

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 1 addition & 16 deletions lightning-invoice/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1373,22 +1373,7 @@ mod test {
assert_eq!(other_events.borrow().len(), 1);
check_payment_claimable(&other_events.borrow()[0], payment_hash, payment_secret, payment_amt, payment_preimage_opt, invoice.recover_payee_pub_key());
do_claim_payment_along_route(&nodes[0], &[&vec!(&nodes[fwd_idx])[..]], false, payment_preimage);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
Event::PaymentSent { payment_preimage: ref ev_preimage, payment_hash: ref ev_hash, ref fee_paid_msat, .. } => {
assert_eq!(payment_preimage, *ev_preimage);
assert_eq!(payment_hash, *ev_hash);
assert_eq!(fee_paid_msat, &Some(0));
},
_ => panic!("Unexpected event")
}
match events[1] {
Event::PaymentPathSuccessful { payment_hash: hash, .. } => {
assert_eq!(hash, Some(payment_hash));
},
_ => panic!("Unexpected event")
}
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
}

#[test]
Expand Down
8 changes: 4 additions & 4 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -805,7 +805,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L
#[cfg(test)]
mod tests {
use crate::{check_added_monitors, check_closed_broadcast, check_closed_event};
use crate::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
use crate::{expect_payment_claimed, expect_payment_path_successful, get_event_msg};
use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
Expand Down Expand Up @@ -888,7 +888,7 @@ mod tests {

let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
Expand All @@ -901,7 +901,7 @@ mod tests {
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());

nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
Expand Down Expand Up @@ -985,7 +985,7 @@ mod tests {
}
}

expect_payment_sent!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, true, false);
}

#[test]
Expand Down
5 changes: 5 additions & 0 deletions lightning/src/events/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,11 @@ pub enum Event {
/// payment is no longer retryable, due either to the [`Retry`] provided or
/// [`ChannelManager::abandon_payment`] having been called for the corresponding payment.
///
/// In exceedingly rare cases, it is possible that an [`Event::PaymentFailed`] is generated for
/// a payment after an [`Event::PaymentSent`] event for this same payment has already been
/// received and processed. In this case, the [`Event::PaymentFailed`] event MUST be ignored,
/// and the payment MUST be treated as having succeeded.
///
/// [`Retry`]: crate::ln::channelmanager::Retry
/// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
PaymentFailed {
Expand Down
79 changes: 72 additions & 7 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1403,6 +1403,7 @@ fn claim_while_disconnected_monitor_update_fail() {
MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);

Expand Down Expand Up @@ -1440,7 +1441,7 @@ fn claim_while_disconnected_monitor_update_fail() {

nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
check_added_monitors!(nodes[0], 1);
expect_payment_sent!(nodes[0], payment_preimage_1);
expect_payment_path_successful!(nodes[0]);

claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
}
Expand Down Expand Up @@ -2196,7 +2197,7 @@ fn test_fail_htlc_on_broadcast_after_claim() {
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);

nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
expect_payment_path_successful!(nodes[0]);
}
Expand Down Expand Up @@ -2449,7 +2450,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
assert!(updates.update_fee.is_none());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[1], payment_preimage_0);
expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false);
assert_eq!(updates.update_add_htlcs.len(), 1);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
updates.commitment_signed
Expand All @@ -2466,7 +2467,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
check_added_monitors!(nodes[1], 1);

commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false);

let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
Expand Down Expand Up @@ -2567,7 +2568,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
if htlc_status == HTLCStatusAtDupClaim::Cleared {
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
expect_payment_path_successful!(nodes[0]);
Expand Down Expand Up @@ -2598,7 +2599,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
}
if htlc_status != HTLCStatusAtDupClaim::Cleared {
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
Expand Down Expand Up @@ -2797,7 +2798,7 @@ fn double_temp_error() {
assert_eq!(node_id, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
check_added_monitors!(nodes[0], 0);
expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
check_added_monitors!(nodes[0], 1);
nodes[0].node.process_pending_htlc_forwards();
Expand Down Expand Up @@ -3024,3 +3025,67 @@ fn test_inbound_reload_without_init_mon() {
do_test_inbound_reload_without_init_mon(false, true);
do_test_inbound_reload_without_init_mon(false, false);
}

#[test]
fn test_blocked_chan_preimage_release() {
// Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to
// be handled HTLC preimage `ChannelMonitorUpdate`s will still go out.
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);

create_announced_chan_between_nodes(&nodes, 0, 1).2;
create_announced_chan_between_nodes(&nodes, 1, 2).2;

send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);

// Tee up two payments in opposite directions across nodes[1], one it sent to generate a
// PaymentSent event and one it forwards.
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);

// Claim the first payment to get a `PaymentSent` event (but don't handle it yet).
nodes[2].node.claim_funds(payment_preimage_1);
check_added_monitors(&nodes[2], 1);
expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);

let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, false, false);
check_added_monitors(&nodes[1], 0);

// Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to
// claim an HTLC on its channel with nodes[2], but that channel is blocked on the above
// `PaymentSent` event.
nodes[0].node.claim_funds(payment_preimage_2);
check_added_monitors(&nodes[0], 1);
expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000);

let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());

// Finish the CS dance between nodes[0] and nodes[1].
do_commitment_signed_dance(&nodes[1], &nodes[0], &as_htlc_fulfill_updates.commitment_signed, false, false);
check_added_monitors(&nodes[1], 0);

let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 3);
if let Event::PaymentSent { .. } = events[0] {} else { panic!(); }
if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }

// The event processing should release the last RAA update.
check_added_monitors(&nodes[1], 1);

// When we fetch the next update the message getter will generate the next update for nodes[2],
// generating a further monitor update.
let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
check_added_monitors(&nodes[1], 1);

nodes[2].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
}
40 changes: 32 additions & 8 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3201,7 +3201,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
where F::Target: FeeEstimator, L::Target: Logger,
{
Expand Down Expand Up @@ -3382,6 +3382,22 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
}
}

let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
let release_state_str =
if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
macro_rules! return_with_htlcs_to_fail {
($htlcs_to_fail: expr) => {
if !release_monitor {
self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
update: monitor_update,
});
return Ok(($htlcs_to_fail, None));
} else {
return Ok(($htlcs_to_fail, Some(monitor_update)));
}
}
}

if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
// We can't actually generate a new commitment transaction (incl by freeing holding
// cells) while we can't update the monitor, so we just return what we have.
Expand All @@ -3400,7 +3416,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
self.context.monitor_pending_failures.append(&mut revoked_htlcs);
self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
return_with_htlcs_to_fail!(Vec::new());
}

match self.free_holding_cell_htlcs(fee_estimator, logger) {
Expand All @@ -3410,8 +3426,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
self.context.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);

log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
log_bytes!(self.context.channel_id()), release_state_str);

self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
return_with_htlcs_to_fail!(htlcs_to_fail);
},
(None, htlcs_to_fail) => {
if require_commitment {
Expand All @@ -3422,14 +3441,19 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
self.context.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);

log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
log_bytes!(self.context.channel_id()),
update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
release_state_str);

self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
return_with_htlcs_to_fail!(htlcs_to_fail);
} else {
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
log_bytes!(self.context.channel_id()), release_state_str);

self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
return_with_htlcs_to_fail!(htlcs_to_fail);
}
}
}
Expand Down
Loading