Skip to content

Rename Event PaymentFailed -> PaymentPathFailed #1084

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -827,7 +827,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
}
},
events::Event::PaymentSent { .. } => {},
events::Event::PaymentFailed { .. } => {},
events::Event::PaymentPathFailed { .. } => {},
events::Event::PaymentForwarded { .. } if $node == 1 => {},
events::Event::PendingHTLCsForwardable { .. } => {
nodes[$node].process_pending_htlc_forwards();
Expand Down
6 changes: 3 additions & 3 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) {
};

// TODO: Once we hit the chain with the failure transaction we should check that we get a
// PaymentFailed event
// PaymentPathFailed event

assert_eq!(nodes[0].node.list_channels().len(), 0);
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
Expand Down Expand Up @@ -267,7 +267,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
check_closed_broadcast!(nodes[0], true);

// TODO: Once we hit the chain with the failure transaction we should check that we get a
// PaymentFailed event
// PaymentPathFailed event

assert_eq!(nodes[0].node.list_channels().len(), 0);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
Expand Down Expand Up @@ -1819,7 +1819,7 @@ fn test_monitor_update_on_pending_forwards() {

let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
if let Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } = events[0] {
assert_eq!(payment_hash, payment_hash_1);
assert!(rejected_by_dest);
} else { panic!("Unexpected event!"); }
Expand Down
13 changes: 8 additions & 5 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
/// The session_priv bytes of outbound payments which are pending resolution.
/// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
/// (if the channel has been force-closed), however we track them here to prevent duplicative
/// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative
/// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
/// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
/// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
/// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
Expand Down Expand Up @@ -2875,18 +2875,19 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
self.fail_htlc_backwards_internal(channel_state,
htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
},
HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => {
HTLCSource::OutboundRoute { session_priv, mpp_id, path, .. } => {
let mut session_priv_bytes = [0; 32];
session_priv_bytes.copy_from_slice(&session_priv[..]);
let mut outbounds = self.pending_outbound_payments.lock().unwrap();
if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) {
if sessions.get_mut().remove(&session_priv_bytes) {
self.pending_events.lock().unwrap().push(
events::Event::PaymentFailed {
events::Event::PaymentPathFailed {
payment_hash,
rejected_by_dest: false,
network_update: None,
all_paths_failed: sessions.get().len() == 0,
path: path.clone(),
#[cfg(test)]
error_code: None,
#[cfg(test)]
Expand Down Expand Up @@ -2951,11 +2952,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
// process_onion_failure we should close that channel as it implies our
// next-hop is needlessly blaming us!
self.pending_events.lock().unwrap().push(
events::Event::PaymentFailed {
events::Event::PaymentPathFailed {
payment_hash: payment_hash.clone(),
rejected_by_dest: !payment_retryable,
network_update,
all_paths_failed,
path: path.clone(),
#[cfg(test)]
error_code: onion_error_code,
#[cfg(test)]
Expand All @@ -2977,11 +2979,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
// TODO: For non-temporary failures, we really should be closing the
// channel here as we apparently can't relay through them anyway.
self.pending_events.lock().unwrap().push(
events::Event::PaymentFailed {
events::Event::PaymentPathFailed {
payment_hash: payment_hash.clone(),
rejected_by_dest: path.len() == 1,
network_update: None,
all_paths_failed,
path: path.clone(),
#[cfg(test)]
error_code: Some(*failure_code),
#[cfg(test)]
Expand Down
9 changes: 6 additions & 3 deletions lightning/src/ln/functional_test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1073,7 +1073,7 @@ macro_rules! expect_payment_failed_with_update {
let events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
Event::PaymentFailed { ref payment_hash, rejected_by_dest, ref network_update, ref error_code, ref error_data, .. } => {
Event::PaymentPathFailed { ref payment_hash, rejected_by_dest, ref network_update, ref error_code, ref error_data, .. } => {
assert_eq!(*payment_hash, $expected_payment_hash, "unexpected payment_hash");
assert_eq!(rejected_by_dest, $rejected_by_dest, "unexpected rejected_by_dest value");
assert!(error_code.is_some(), "expected error_code.is_some() = true");
Expand Down Expand Up @@ -1102,7 +1102,7 @@ macro_rules! expect_payment_failed {
let events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
Event::PaymentFailed { ref payment_hash, rejected_by_dest, network_update: _, ref error_code, ref error_data, .. } => {
Event::PaymentPathFailed { ref payment_hash, rejected_by_dest, network_update: _, ref error_code, ref error_data, .. } => {
assert_eq!(*payment_hash, $expected_payment_hash, "unexpected payment_hash");
assert_eq!(rejected_by_dest, $rejected_by_dest, "unexpected rejected_by_dest value");
assert!(error_code.is_some(), "expected error_code.is_some() = true");
Expand Down Expand Up @@ -1399,10 +1399,13 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe
let events = origin_node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
Event::PaymentFailed { payment_hash, rejected_by_dest, all_paths_failed, .. } => {
Event::PaymentPathFailed { payment_hash, rejected_by_dest, all_paths_failed, ref path, .. } => {
assert_eq!(payment_hash, our_payment_hash);
assert!(rejected_by_dest);
assert_eq!(all_paths_failed, i == expected_paths.len() - 1);
for (idx, hop) in expected_route.iter().enumerate() {
assert_eq!(hop.node.get_our_node_id(), path[idx].pubkey);
}
},
_ => panic!("Unexpected event"),
}
Expand Down
30 changes: 15 additions & 15 deletions lightning/src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3020,7 +3020,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
_ => panic!("Unexepected event"),
}
match events[1] {
Event::PaymentFailed { ref payment_hash, .. } => {
Event::PaymentPathFailed { ref payment_hash, .. } => {
assert_eq!(*payment_hash, fourth_payment_hash);
},
_ => panic!("Unexpected event"),
Expand Down Expand Up @@ -3076,7 +3076,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 3);
match events[0] {
Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
assert!(failed_htlcs.insert(payment_hash.0));
// If we delivered B's RAA we got an unknown preimage error, not something
// that we should update our routing table for.
Expand All @@ -3087,14 +3087,14 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
_ => panic!("Unexpected event"),
}
match events[1] {
Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
assert!(failed_htlcs.insert(payment_hash.0));
assert!(network_update.is_some());
},
_ => panic!("Unexpected event"),
}
match events[2] {
Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
assert!(failed_htlcs.insert(payment_hash.0));
assert!(network_update.is_some());
},
Expand Down Expand Up @@ -3190,7 +3190,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
assert_eq!(events.len(), 2);
// Check that Alice fails backward the pending HTLC from the second payment.
match events[0] {
Event::PaymentFailed { payment_hash, .. } => {
Event::PaymentPathFailed { payment_hash, .. } => {
assert_eq!(payment_hash, failed_payment_hash);
},
_ => panic!("Unexpected event"),
Expand Down Expand Up @@ -3392,7 +3392,7 @@ fn test_simple_peer_disconnect() {
_ => panic!("Unexpected event"),
}
match events[1] {
Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } => {
assert_eq!(payment_hash, payment_hash_5);
assert!(rejected_by_dest);
},
Expand Down Expand Up @@ -4192,7 +4192,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
//
// If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
// ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
// PaymentFailed event appearing). However, because we may not serialize the relevant
// PaymentPathFailed event appearing). However, because we may not serialize the relevant
// ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
// consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
// and de-duplicates ChannelMonitor events.
Expand Down Expand Up @@ -5518,7 +5518,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
let mut as_failds = HashSet::new();
let mut as_updates = 0;
for event in as_events.iter() {
if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
assert!(as_failds.insert(*payment_hash));
if *payment_hash != payment_hash_2 {
assert_eq!(*rejected_by_dest, deliver_last_raa);
Expand All @@ -5543,7 +5543,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
let mut bs_failds = HashSet::new();
let mut bs_updates = 0;
for event in bs_events.iter() {
if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
assert!(bs_failds.insert(*payment_hash));
if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
assert_eq!(*rejected_by_dest, deliver_last_raa);
Expand Down Expand Up @@ -6068,7 +6068,7 @@ fn test_fail_holding_cell_htlc_upon_free() {
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match &events[0] {
&Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed } => {
&Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
assert_eq!(our_payment_hash.clone(), *payment_hash);
assert_eq!(*rejected_by_dest, false);
assert_eq!(*all_paths_failed, true);
Expand Down Expand Up @@ -6155,7 +6155,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match &events[0] {
&Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed } => {
&Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
assert_eq!(payment_hash_2.clone(), *payment_hash);
assert_eq!(*rejected_by_dest, false);
assert_eq!(*all_paths_failed, true);
Expand Down Expand Up @@ -7107,12 +7107,12 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
let events = nodes[0].node.get_and_clear_pending_events();
// Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
// Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
assert_eq!(events.len(), 2);
let mut first_failed = false;
for event in events {
match event {
Event::PaymentFailed { payment_hash, .. } => {
Event::PaymentPathFailed { payment_hash, .. } => {
if payment_hash == payment_hash_1 {
assert!(!first_failed);
first_failed = true;
Expand Down Expand Up @@ -7202,14 +7202,14 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
assert_eq!(events.len(), 2);
let first;
match events[0] {
Event::PaymentFailed { payment_hash, .. } => {
Event::PaymentPathFailed { payment_hash, .. } => {
if payment_hash == dust_hash { first = true; }
else { first = false; }
},
_ => panic!("Unexpected event"),
}
match events[1] {
Event::PaymentFailed { payment_hash, .. } => {
Event::PaymentPathFailed { payment_hash, .. } => {
if first { assert_eq!(payment_hash, non_dust_hash); }
else { assert_eq!(payment_hash, dust_hash); }
},
Expand Down
2 changes: 1 addition & 1 deletion lightning/src/ln/onion_route_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case:

let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
if let &Event::PaymentFailed { payment_hash:_, ref rejected_by_dest, ref network_update, ref error_code, error_data: _, ref all_paths_failed } = &events[0] {
if let &Event::PaymentPathFailed { payment_hash:_, ref rejected_by_dest, ref network_update, ref error_code, error_data: _, ref all_paths_failed, path: _ } = &events[0] {
assert_eq!(*rejected_by_dest, !expected_retryable);
assert_eq!(*all_paths_failed, true);
assert_eq!(*error_code, expected_error_code);
Expand Down
13 changes: 8 additions & 5 deletions lightning/src/routing/network_graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ impl_writeable_tlv_based_enum_upgradable!(NetworkUpdate,
impl<C: Deref, L: Deref> EventHandler for NetGraphMsgHandler<C, L>
where C::Target: chain::Access, L::Target: Logger {
fn handle_event(&self, event: &Event) {
if let Event::PaymentFailed { payment_hash: _, rejected_by_dest: _, network_update, .. } = event {
if let Event::PaymentPathFailed { payment_hash: _, rejected_by_dest: _, network_update, .. } = event {
if let Some(network_update) = network_update {
self.handle_network_update(network_update);
}
Expand All @@ -127,7 +127,7 @@ where C::Target: chain::Access, L::Target: Logger {
/// Provides interface to help with initial routing sync by
/// serving historical announcements.
///
/// Serves as an [`EventHandler`] for applying updates from [`Event::PaymentFailed`] to the
/// Serves as an [`EventHandler`] for applying updates from [`Event::PaymentPathFailed`] to the
/// [`NetworkGraph`].
pub struct NetGraphMsgHandler<C: Deref, L: Deref>
where C::Target: chain::Access, L::Target: Logger
Expand Down Expand Up @@ -1725,10 +1725,11 @@ mod tests {

assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none());

net_graph_msg_handler.handle_event(&Event::PaymentFailed {
net_graph_msg_handler.handle_event(&Event::PaymentPathFailed {
payment_hash: PaymentHash([0; 32]),
rejected_by_dest: false,
all_paths_failed: true,
path: vec![],
network_update: Some(NetworkUpdate::ChannelUpdateMessage {
msg: valid_channel_update,
}),
Expand All @@ -1748,10 +1749,11 @@ mod tests {
}
};

net_graph_msg_handler.handle_event(&Event::PaymentFailed {
net_graph_msg_handler.handle_event(&Event::PaymentPathFailed {
payment_hash: PaymentHash([0; 32]),
rejected_by_dest: false,
all_paths_failed: true,
path: vec![],
network_update: Some(NetworkUpdate::ChannelClosed {
short_channel_id,
is_permanent: false,
Expand All @@ -1770,10 +1772,11 @@ mod tests {

// Permanent closing deletes a channel
{
net_graph_msg_handler.handle_event(&Event::PaymentFailed {
net_graph_msg_handler.handle_event(&Event::PaymentPathFailed {
payment_hash: PaymentHash([0; 32]),
rejected_by_dest: false,
all_paths_failed: true,
path: vec![],
network_update: Some(NetworkUpdate::ChannelClosed {
short_channel_id,
is_permanent: true,
Expand Down
2 changes: 1 addition & 1 deletion lightning/src/routing/router.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use core::cmp;
use core::ops::Deref;

/// A hop in a route
#[derive(Clone, Hash, PartialEq, Eq)]
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct RouteHop {
/// The node_id of the node at this hop.
pub pubkey: PublicKey,
Expand Down
Loading