Skip to content

Commit eab577c

Browse files
committed
Consider channels "live" even if they are awaiting a monitor update
We use `Channel::is_live()` to gate inclusion of a channel in `ChannelManager::list_usable_channels()` and when sending an HTLC to select whether a channel is available for forwarding through/sending to. In both of these cases, we almost certainly want `Channel::is_live()` to include channels which are simply pending a monitor update, as some clients may update monitors asynchronously, thus any rejection of HTLCs based on a monitor update still pending causing a race condition. After lightningdevkit#851, we always ensure any holding cells are free'd when sending P2P messages, making this much more trivially correct - instead of having to ensure that we always have a matching holding cell free any time we add something to the holding cell, we can simply rely on the fact that it always happens. Fixes lightningdevkit#661.
1 parent 2940099 commit eab577c

File tree

3 files changed

+122
-87
lines changed

3 files changed

+122
-87
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 110 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -922,40 +922,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
922922
commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
923923
check_added_monitors!(nodes[1], 0);
924924

925-
let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
926-
assert_eq!(events_2.len(), 1);
927-
match events_2.remove(0) {
928-
MessageSendEvent::UpdateHTLCs { node_id, updates } => {
929-
assert_eq!(node_id, nodes[0].node.get_our_node_id());
930-
assert!(updates.update_fulfill_htlcs.is_empty());
931-
assert_eq!(updates.update_fail_htlcs.len(), 1);
932-
assert!(updates.update_fail_malformed_htlcs.is_empty());
933-
assert!(updates.update_add_htlcs.is_empty());
934-
assert!(updates.update_fee.is_none());
935-
936-
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
937-
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
938-
939-
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
940-
assert_eq!(msg_events.len(), 1);
941-
match msg_events[0] {
942-
MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
943-
assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
944-
assert_eq!(msg.contents.flags & 2, 2); // temp disabled
945-
},
946-
_ => panic!("Unexpected event"),
947-
}
948-
949-
let events = nodes[0].node.get_and_clear_pending_events();
950-
assert_eq!(events.len(), 1);
951-
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
952-
assert_eq!(payment_hash, payment_hash_3);
953-
assert!(!rejected_by_dest);
954-
} else { panic!("Unexpected event!"); }
955-
},
956-
_ => panic!("Unexpected event type!"),
957-
};
958-
959925
let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
960926
// Try to route another payment backwards from 2 to make sure 1 holds off on responding
961927
let (payment_preimage_4, payment_hash_4, payment_secret_4) = get_payment_preimage_hash!(nodes[0]);
@@ -971,10 +937,15 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
971937
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
972938
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
973939
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
974-
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
975940
(Some(payment_preimage_4), Some(payment_hash_4))
976941
} else { (None, None) };
977942

943+
// Call forward_pending_htlcs first to make sure we don't have any issues attempting (and
944+
// failing) to forward an HTLC while a channel is still awaiting monitor update restoration.
945+
expect_pending_htlcs_forwardable!(nodes[1]);
946+
check_added_monitors!(nodes[1], 0);
947+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
948+
978949
// Restore monitor updating, ensuring we immediately get a fail-back update and a
979950
// update_add update.
980951
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
@@ -1021,14 +992,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
1021992

1022993
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
1023994
commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
1024-
let events_4 = nodes[0].node.get_and_clear_pending_events();
1025-
assert_eq!(events_4.len(), 1);
1026-
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
1027-
assert_eq!(payment_hash, payment_hash_1);
1028-
assert!(rejected_by_dest);
1029-
} else { panic!("Unexpected event!"); }
995+
expect_payment_failed!(nodes[0], payment_hash_1, true);
1030996

1031997
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
998+
let as_cs;
1032999
if test_ignore_second_cs {
10331000
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
10341001
check_added_monitors!(nodes[2], 1);
@@ -1044,40 +1011,83 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
10441011

10451012
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
10461013
check_added_monitors!(nodes[1], 1);
1047-
let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1048-
assert!(as_cs.update_add_htlcs.is_empty());
1014+
as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1015+
1016+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
1017+
check_added_monitors!(nodes[1], 1);
1018+
} else {
1019+
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
1020+
check_added_monitors!(nodes[2], 1);
1021+
1022+
let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
1023+
assert_eq!(bs_revoke_and_commit.len(), 2);
1024+
match bs_revoke_and_commit[0] {
1025+
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1026+
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1027+
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
1028+
check_added_monitors!(nodes[1], 1);
1029+
},
1030+
_ => panic!("Unexpected event"),
1031+
}
1032+
1033+
as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1034+
1035+
match bs_revoke_and_commit[1] {
1036+
MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1037+
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1038+
assert!(updates.update_add_htlcs.is_empty());
1039+
assert!(updates.update_fail_htlcs.is_empty());
1040+
assert!(updates.update_fail_malformed_htlcs.is_empty());
1041+
assert!(updates.update_fulfill_htlcs.is_empty());
1042+
assert!(updates.update_fee.is_none());
1043+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1044+
check_added_monitors!(nodes[1], 1);
1045+
},
1046+
_ => panic!("Unexpected event"),
1047+
}
1048+
}
1049+
assert_eq!(as_cs.update_add_htlcs.len(), 1);
10491050
assert!(as_cs.update_fail_htlcs.is_empty());
10501051
assert!(as_cs.update_fail_malformed_htlcs.is_empty());
10511052
assert!(as_cs.update_fulfill_htlcs.is_empty());
10521053
assert!(as_cs.update_fee.is_none());
1053-
1054-
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
1055-
check_added_monitors!(nodes[1], 1);
10561054
let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
10571055

1056+
1057+
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
10581058
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
10591059
check_added_monitors!(nodes[2], 1);
10601060
let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
10611061

10621062
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
10631063
check_added_monitors!(nodes[2], 1);
1064-
assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1064+
let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
10651065

10661066
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
10671067
check_added_monitors!(nodes[1], 1);
10681068
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1069-
} else {
1070-
commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
1071-
}
1069+
1070+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1071+
check_added_monitors!(nodes[1], 1);
1072+
let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1073+
1074+
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1075+
check_added_monitors!(nodes[2], 1);
1076+
assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1077+
10721078

10731079
expect_pending_htlcs_forwardable!(nodes[2]);
10741080

10751081
let events_6 = nodes[2].node.get_and_clear_pending_events();
1076-
assert_eq!(events_6.len(), 1);
1082+
assert_eq!(events_6.len(), 2);
10771083
match events_6[0] {
10781084
Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
10791085
_ => panic!("Unexpected event"),
10801086
};
1087+
match events_6[1] {
1088+
Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1089+
_ => panic!("Unexpected event"),
1090+
};
10811091

10821092
if test_ignore_second_cs {
10831093
expect_pending_htlcs_forwardable!(nodes[1]);
@@ -1612,9 +1622,9 @@ fn first_message_on_recv_ordering() {
16121622
fn test_monitor_update_fail_claim() {
16131623
// Basic test for monitor update failures when processing claim_funds calls.
16141624
// We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1615-
// update to claim the payment. We then send a payment C->B->A, making the forward of this
1616-
// payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
1617-
// updating and claim the payment on B.
1625+
// update to claim the payment. We then send two payments C->B->A, which are held at B.
1626+
// Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1627+
// the payments from C onwards to A.
16181628
let chanmon_cfgs = create_chanmon_cfgs(3);
16191629
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
16201630
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
@@ -1630,12 +1640,19 @@ fn test_monitor_update_fail_claim() {
16301640

16311641
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
16321642
assert!(nodes[1].node.claim_funds(payment_preimage_1));
1643+
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
16331644
check_added_monitors!(nodes[1], 1);
16341645

1646+
// Note that at this point there is a pending commitment transaction update for A being held by
1647+
// B. Even when we go to send the payment from C through B to A, B will not update this
1648+
// already-signed commitment transaction and will instead wait for it to resolve before
1649+
// forwarding the payment onwards.
1650+
16351651
let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]);
1652+
let route;
16361653
{
16371654
let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
1638-
let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1655+
route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap();
16391656
nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
16401657
check_added_monitors!(nodes[2], 1);
16411658
}
@@ -1650,29 +1667,19 @@ fn test_monitor_update_fail_claim() {
16501667
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
16511668
let events = nodes[1].node.get_and_clear_pending_msg_events();
16521669
assert_eq!(events.len(), 0);
1653-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
16541670
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
16551671

1656-
let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1657-
nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
1658-
commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
1659-
1660-
let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
1661-
assert_eq!(msg_events.len(), 1);
1662-
match msg_events[0] {
1663-
MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
1664-
assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
1665-
assert_eq!(msg.contents.flags & 2, 2); // temp disabled
1666-
},
1667-
_ => panic!("Unexpected event"),
1668-
}
1672+
let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1673+
nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
1674+
check_added_monitors!(nodes[2], 1);
16691675

1670-
let events = nodes[2].node.get_and_clear_pending_events();
1676+
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
16711677
assert_eq!(events.len(), 1);
1672-
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1673-
assert_eq!(payment_hash, payment_hash_2);
1674-
assert!(!rejected_by_dest);
1675-
} else { panic!("Unexpected event!"); }
1678+
let payment_event = SendEvent::from_event(events.pop().unwrap());
1679+
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1680+
let events = nodes[1].node.get_and_clear_pending_msg_events();
1681+
assert_eq!(events.len(), 0);
1682+
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
16761683

16771684
// Now restore monitor updating on the 0<->1 channel and claim the funds on B.
16781685
let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
@@ -1682,12 +1689,37 @@ fn test_monitor_update_fail_claim() {
16821689
let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
16831690
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
16841691
commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1692+
expect_payment_sent!(nodes[0], payment_preimage_1);
1693+
1694+
// Get the payment forwards, note that they were batched into one commitment update.
1695+
expect_pending_htlcs_forwardable!(nodes[1]);
1696+
check_added_monitors!(nodes[1], 1);
1697+
let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1698+
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1699+
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1700+
commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1701+
expect_pending_htlcs_forwardable!(nodes[0]);
16851702

16861703
let events = nodes[0].node.get_and_clear_pending_events();
1687-
assert_eq!(events.len(), 1);
1688-
if let Event::PaymentSent { payment_preimage, .. } = events[0] {
1689-
assert_eq!(payment_preimage, payment_preimage_1);
1690-
} else { panic!("Unexpected event!"); }
1704+
assert_eq!(events.len(), 2);
1705+
match events[0] {
1706+
Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
1707+
assert_eq!(payment_hash_2, *payment_hash);
1708+
assert!(payment_preimage.is_none());
1709+
assert_eq!(payment_secret_2, *payment_secret);
1710+
assert_eq!(1_000_000, amt);
1711+
},
1712+
_ => panic!("Unexpected event"),
1713+
}
1714+
match events[1] {
1715+
Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
1716+
assert_eq!(payment_hash_3, *payment_hash);
1717+
assert!(payment_preimage.is_none());
1718+
assert_eq!(payment_secret_3, *payment_secret);
1719+
assert_eq!(1_000_000, amt);
1720+
},
1721+
_ => panic!("Unexpected event"),
1722+
}
16911723
}
16921724

16931725
#[test]

lightning/src/ln/channel.rs

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3460,7 +3460,7 @@ impl<Signer: Sign> Channel<Signer> {
34603460
/// is_usable() and considers things like the channel being temporarily disabled.
34613461
/// Allowed in any state (including after shutdown)
34623462
pub fn is_live(&self) -> bool {
3463-
self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) == 0)
3463+
self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
34643464
}
34653465

34663466
/// Returns true if this channel has been marked as awaiting a monitor update to move forward.
@@ -3974,9 +3974,13 @@ impl<Signer: Sign> Channel<Signer> {
39743974

39753975
/// Adds a pending outbound HTLC to this channel, note that you probably want
39763976
/// send_htlc_and_commit instead cause you'll want both messages at once.
3977-
/// This returns an option instead of a pure UpdateAddHTLC as we may be in a state where we are
3978-
/// waiting on the remote peer to send us a revoke_and_ack during which time we cannot add new
3979-
/// HTLCs on the wire or we wouldn't be able to determine what they actually ACK'ed.
3977+
/// This returns an option instead of a pure UpdateAddHTLC as we may be in a state where we
3978+
/// cannot add HTLCs on the wire.
3979+
/// In cases where we're waiting on the remote peer to send us a revoke_and_ack, if we did, we
3980+
/// wouldn't be able to determine what they actually ACK'ed when we do receive the
3981+
/// revoke_and_ack.
3982+
/// In cases where we're marked MonitorUpdateFailed, we cannot commit to a new state as we may
3983+
/// not yet have sent the previous commitment update messages and will need to regenerate them.
39803984
/// You MUST call send_commitment prior to any other calls on this Channel
39813985
/// If an Err is returned, it's a ChannelError::Ignore!
39823986
pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> {
@@ -3996,14 +4000,14 @@ impl<Signer: Sign> Channel<Signer> {
39964000
return Err(ChannelError::Ignore(format!("Cannot send less than their minimum HTLC value ({})", self.counterparty_htlc_minimum_msat)));
39974001
}
39984002

3999-
if (self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
4003+
if (self.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
40004004
// Note that this should never really happen, if we're !is_live() on receipt of an
40014005
// incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
40024006
// the user to send directly into a !is_live() channel. However, if we
40034007
// disconnected during the time the previous hop was doing the commitment dance we may
40044008
// end up getting here after the forwarding delay. In any case, returning an
40054009
// IgnoreError will get ChannelManager to do the right thing and fail backwards now.
4006-
return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected/frozen for channel monitor update".to_owned()));
4010+
return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
40074011
}
40084012

40094013
let (outbound_htlc_count, htlc_outbound_value_msat) = self.get_outbound_pending_htlc_stats();
@@ -4048,7 +4052,7 @@ impl<Signer: Sign> Channel<Signer> {
40484052
}
40494053

40504054
// Now update local state:
4051-
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
4055+
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
40524056
self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
40534057
amount_msat,
40544058
payment_hash,

lightning/src/ln/channelmanager.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -678,8 +678,7 @@ pub struct ChannelDetails {
678678
/// point after the funding transaction received enough confirmations).
679679
pub is_funding_locked: bool,
680680
/// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
681-
/// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the
682-
/// channel is not currently negotiating a shutdown.
681+
/// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
683682
///
684683
/// This is a strict superset of `is_funding_locked`.
685684
pub is_usable: bool,

0 commit comments

Comments
 (0)