Skip to content

Commit fdbfa1e

Browse files
committed
Consider channels "live" even if they are awaiting a monitor update
We use `Channel::is_live()` to gate inclusion of a channel in `ChannelManager::list_usable_channels()` and when sending an HTLC to select whether a channel is available for forwarding through/sending to. In both of these cases, we almost certainly want `Channel::is_live()` to include channels which are simply pending a monitor update, as some clients may update monitors asynchronously, thus any rejection of HTLCs based on a monitor update still pending causing a race condition. After lightningdevkit#851, we always ensure any holding cells are free'd when sending P2P messages, making this much more trivially correct - instead of having to ensure that we always have a matching holding cell free any time we add something to the holding cell, we can simply rely on the fact that it always happens. Fixes lightningdevkit#661.
1 parent b7d249a commit fdbfa1e

File tree

3 files changed

+122
-87
lines changed

3 files changed

+122
-87
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 110 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -921,40 +921,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
921921
commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
922922
check_added_monitors!(nodes[1], 0);
923923

924-
let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
925-
assert_eq!(events_2.len(), 1);
926-
match events_2.remove(0) {
927-
MessageSendEvent::UpdateHTLCs { node_id, updates } => {
928-
assert_eq!(node_id, nodes[0].node.get_our_node_id());
929-
assert!(updates.update_fulfill_htlcs.is_empty());
930-
assert_eq!(updates.update_fail_htlcs.len(), 1);
931-
assert!(updates.update_fail_malformed_htlcs.is_empty());
932-
assert!(updates.update_add_htlcs.is_empty());
933-
assert!(updates.update_fee.is_none());
934-
935-
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
936-
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
937-
938-
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
939-
assert_eq!(msg_events.len(), 1);
940-
match msg_events[0] {
941-
MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
942-
assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
943-
assert_eq!(msg.contents.flags & 2, 2); // temp disabled
944-
},
945-
_ => panic!("Unexpected event"),
946-
}
947-
948-
let events = nodes[0].node.get_and_clear_pending_events();
949-
assert_eq!(events.len(), 1);
950-
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
951-
assert_eq!(payment_hash, payment_hash_3);
952-
assert!(!rejected_by_dest);
953-
} else { panic!("Unexpected event!"); }
954-
},
955-
_ => panic!("Unexpected event type!"),
956-
};
957-
958924
let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
959925
// Try to route another payment backwards from 2 to make sure 1 holds off on responding
960926
let (payment_preimage_4, payment_hash_4, payment_secret_4) = get_payment_preimage_hash!(nodes[0]);
@@ -970,10 +936,15 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
970936
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
971937
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
972938
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
973-
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
974939
(Some(payment_preimage_4), Some(payment_hash_4))
975940
} else { (None, None) };
976941

942+
// Call forward_pending_htlcs first to make sure we don't have any issues attempting (and
943+
// failing) to forward an HTLC while a channel is still awaiting monitor update restoration.
944+
expect_pending_htlcs_forwardable!(nodes[1]);
945+
check_added_monitors!(nodes[1], 0);
946+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
947+
977948
// Restore monitor updating, ensuring we immediately get a fail-back update and a
978949
// update_add update.
979950
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
@@ -1020,14 +991,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
1020991

1021992
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
1022993
commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
1023-
let events_4 = nodes[0].node.get_and_clear_pending_events();
1024-
assert_eq!(events_4.len(), 1);
1025-
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
1026-
assert_eq!(payment_hash, payment_hash_1);
1027-
assert!(rejected_by_dest);
1028-
} else { panic!("Unexpected event!"); }
994+
expect_payment_failed!(nodes[0], payment_hash_1, true);
1029995

1030996
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
997+
let as_cs;
1031998
if test_ignore_second_cs {
1032999
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
10331000
check_added_monitors!(nodes[2], 1);
@@ -1043,40 +1010,83 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
10431010

10441011
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
10451012
check_added_monitors!(nodes[1], 1);
1046-
let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1047-
assert!(as_cs.update_add_htlcs.is_empty());
1013+
as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1014+
1015+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
1016+
check_added_monitors!(nodes[1], 1);
1017+
} else {
1018+
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
1019+
check_added_monitors!(nodes[2], 1);
1020+
1021+
let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
1022+
assert_eq!(bs_revoke_and_commit.len(), 2);
1023+
match bs_revoke_and_commit[0] {
1024+
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1025+
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1026+
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
1027+
check_added_monitors!(nodes[1], 1);
1028+
},
1029+
_ => panic!("Unexpected event"),
1030+
}
1031+
1032+
as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1033+
1034+
match bs_revoke_and_commit[1] {
1035+
MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1036+
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1037+
assert!(updates.update_add_htlcs.is_empty());
1038+
assert!(updates.update_fail_htlcs.is_empty());
1039+
assert!(updates.update_fail_malformed_htlcs.is_empty());
1040+
assert!(updates.update_fulfill_htlcs.is_empty());
1041+
assert!(updates.update_fee.is_none());
1042+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1043+
check_added_monitors!(nodes[1], 1);
1044+
},
1045+
_ => panic!("Unexpected event"),
1046+
}
1047+
}
1048+
assert_eq!(as_cs.update_add_htlcs.len(), 1);
10481049
assert!(as_cs.update_fail_htlcs.is_empty());
10491050
assert!(as_cs.update_fail_malformed_htlcs.is_empty());
10501051
assert!(as_cs.update_fulfill_htlcs.is_empty());
10511052
assert!(as_cs.update_fee.is_none());
1052-
1053-
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
1054-
check_added_monitors!(nodes[1], 1);
10551053
let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
10561054

1055+
1056+
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
10571057
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
10581058
check_added_monitors!(nodes[2], 1);
10591059
let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
10601060

10611061
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
10621062
check_added_monitors!(nodes[2], 1);
1063-
assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1063+
let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
10641064

10651065
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
10661066
check_added_monitors!(nodes[1], 1);
10671067
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1068-
} else {
1069-
commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
1070-
}
1068+
1069+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1070+
check_added_monitors!(nodes[1], 1);
1071+
let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1072+
1073+
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1074+
check_added_monitors!(nodes[2], 1);
1075+
assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1076+
10711077

10721078
expect_pending_htlcs_forwardable!(nodes[2]);
10731079

10741080
let events_6 = nodes[2].node.get_and_clear_pending_events();
1075-
assert_eq!(events_6.len(), 1);
1081+
assert_eq!(events_6.len(), 2);
10761082
match events_6[0] {
10771083
Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
10781084
_ => panic!("Unexpected event"),
10791085
};
1086+
match events_6[1] {
1087+
Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1088+
_ => panic!("Unexpected event"),
1089+
};
10801090

10811091
if test_ignore_second_cs {
10821092
expect_pending_htlcs_forwardable!(nodes[1]);
@@ -1611,9 +1621,9 @@ fn first_message_on_recv_ordering() {
16111621
fn test_monitor_update_fail_claim() {
16121622
// Basic test for monitor update failures when processing claim_funds calls.
16131623
// We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1614-
// update to claim the payment. We then send a payment C->B->A, making the forward of this
1615-
// payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
1616-
// updating and claim the payment on B.
1624+
// update to claim the payment. We then send two payments C->B->A, which are held at B.
1625+
// Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1626+
// the payments from C onwards to A.
16171627
let chanmon_cfgs = create_chanmon_cfgs(3);
16181628
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
16191629
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
@@ -1629,12 +1639,19 @@ fn test_monitor_update_fail_claim() {
16291639

16301640
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
16311641
assert!(nodes[1].node.claim_funds(payment_preimage_1));
1642+
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
16321643
check_added_monitors!(nodes[1], 1);
16331644

1645+
// Note that at this point there is a pending commitment transaction update for A being held by
1646+
// B. Even when we go to send the payment from C through B to A, B will not update this
1647+
// already-signed commitment transaction and will instead wait for it to resolve before
1648+
// forwarding the payment onwards.
1649+
16341650
let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]);
1651+
let route;
16351652
{
16361653
let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
1637-
let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1654+
route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap();
16381655
nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
16391656
check_added_monitors!(nodes[2], 1);
16401657
}
@@ -1649,29 +1666,19 @@ fn test_monitor_update_fail_claim() {
16491666
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
16501667
let events = nodes[1].node.get_and_clear_pending_msg_events();
16511668
assert_eq!(events.len(), 0);
1652-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
16531669
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
16541670

1655-
let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1656-
nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
1657-
commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
1658-
1659-
let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
1660-
assert_eq!(msg_events.len(), 1);
1661-
match msg_events[0] {
1662-
MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
1663-
assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
1664-
assert_eq!(msg.contents.flags & 2, 2); // temp disabled
1665-
},
1666-
_ => panic!("Unexpected event"),
1667-
}
1671+
let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1672+
nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
1673+
check_added_monitors!(nodes[2], 1);
16681674

1669-
let events = nodes[2].node.get_and_clear_pending_events();
1675+
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
16701676
assert_eq!(events.len(), 1);
1671-
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1672-
assert_eq!(payment_hash, payment_hash_2);
1673-
assert!(!rejected_by_dest);
1674-
} else { panic!("Unexpected event!"); }
1677+
let payment_event = SendEvent::from_event(events.pop().unwrap());
1678+
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1679+
let events = nodes[1].node.get_and_clear_pending_msg_events();
1680+
assert_eq!(events.len(), 0);
1681+
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
16751682

16761683
// Now restore monitor updating on the 0<->1 channel and claim the funds on B.
16771684
let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
@@ -1681,12 +1688,37 @@ fn test_monitor_update_fail_claim() {
16811688
let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
16821689
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
16831690
commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1691+
expect_payment_sent!(nodes[0], payment_preimage_1);
1692+
1693+
// Get the payment forwards, note that they were batched into one commitment update.
1694+
expect_pending_htlcs_forwardable!(nodes[1]);
1695+
check_added_monitors!(nodes[1], 1);
1696+
let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1697+
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1698+
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1699+
commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1700+
expect_pending_htlcs_forwardable!(nodes[0]);
16841701

16851702
let events = nodes[0].node.get_and_clear_pending_events();
1686-
assert_eq!(events.len(), 1);
1687-
if let Event::PaymentSent { payment_preimage, .. } = events[0] {
1688-
assert_eq!(payment_preimage, payment_preimage_1);
1689-
} else { panic!("Unexpected event!"); }
1703+
assert_eq!(events.len(), 2);
1704+
match events[0] {
1705+
Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
1706+
assert_eq!(payment_hash_2, *payment_hash);
1707+
assert!(payment_preimage.is_none());
1708+
assert_eq!(payment_secret_2, *payment_secret);
1709+
assert_eq!(1_000_000, amt);
1710+
},
1711+
_ => panic!("Unexpected event"),
1712+
}
1713+
match events[1] {
1714+
Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
1715+
assert_eq!(payment_hash_3, *payment_hash);
1716+
assert!(payment_preimage.is_none());
1717+
assert_eq!(payment_secret_3, *payment_secret);
1718+
assert_eq!(1_000_000, amt);
1719+
},
1720+
_ => panic!("Unexpected event"),
1721+
}
16901722
}
16911723

16921724
#[test]

lightning/src/ln/channel.rs

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3513,7 +3513,7 @@ impl<Signer: Sign> Channel<Signer> {
35133513
/// is_usable() and considers things like the channel being temporarily disabled.
35143514
/// Allowed in any state (including after shutdown)
35153515
pub fn is_live(&self) -> bool {
3516-
self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) == 0)
3516+
self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
35173517
}
35183518

35193519
/// Returns true if this channel has been marked as awaiting a monitor update to move forward.
@@ -4030,9 +4030,13 @@ impl<Signer: Sign> Channel<Signer> {
40304030

40314031
/// Adds a pending outbound HTLC to this channel, note that you probably want
40324032
/// send_htlc_and_commit instead cause you'll want both messages at once.
4033-
/// This returns an option instead of a pure UpdateAddHTLC as we may be in a state where we are
4034-
/// waiting on the remote peer to send us a revoke_and_ack during which time we cannot add new
4035-
/// HTLCs on the wire or we wouldn't be able to determine what they actually ACK'ed.
4033+
/// This returns an option instead of a pure UpdateAddHTLC as we may be in a state where we
4034+
/// cannot add HTLCs on the wire.
4035+
/// In cases where we're waiting on the remote peer to send us a revoke_and_ack, if we did, we
4036+
/// wouldn't be able to determine what they actually ACK'ed when we do receive the
4037+
/// revoke_and_ack.
4038+
/// In cases where we're marked MonitorUpdateFailed, we cannot commit to a new state as we may
4039+
/// not yet have sent the previous commitment update messages and will need to regenerate them.
40364040
/// You MUST call send_commitment prior to any other calls on this Channel
40374041
/// If an Err is returned, it's a ChannelError::Ignore!
40384042
pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> {
@@ -4052,14 +4056,14 @@ impl<Signer: Sign> Channel<Signer> {
40524056
return Err(ChannelError::Ignore(format!("Cannot send less than their minimum HTLC value ({})", self.counterparty_htlc_minimum_msat)));
40534057
}
40544058

4055-
if (self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
4059+
if (self.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
40564060
// Note that this should never really happen, if we're !is_live() on receipt of an
40574061
// incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
40584062
// the user to send directly into a !is_live() channel. However, if we
40594063
// disconnected during the time the previous hop was doing the commitment dance we may
40604064
// end up getting here after the forwarding delay. In any case, returning an
40614065
// IgnoreError will get ChannelManager to do the right thing and fail backwards now.
4062-
return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected/frozen for channel monitor update".to_owned()));
4066+
return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
40634067
}
40644068

40654069
let (outbound_htlc_count, htlc_outbound_value_msat) = self.get_outbound_pending_htlc_stats();
@@ -4104,7 +4108,7 @@ impl<Signer: Sign> Channel<Signer> {
41044108
}
41054109

41064110
// Now update local state:
4107-
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
4111+
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
41084112
self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
41094113
amount_msat,
41104114
payment_hash,

lightning/src/ln/channelmanager.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -677,8 +677,7 @@ pub struct ChannelDetails {
677677
/// point after the funding transaction received enough confirmations).
678678
pub is_funding_locked: bool,
679679
/// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
680-
/// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the
681-
/// channel is not currently negotiating a shutdown.
680+
/// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
682681
///
683682
/// This is a strict superset of `is_funding_locked`.
684683
pub is_usable: bool,

0 commit comments

Comments
 (0)