Skip to content

Commit 72f4bc3

Browse files
committed
Block the mon update removing a preimage until upstream mon writes
When we forward a payment and receive an `update_fulfill_htlc` message from the downstream channel, we immediately claim the HTLC on the upstream channel, before even doing a `commitment_signed` dance on the downstream channel. This implies that our `ChannelMonitorUpdate`s "go out" in the right order - first we ensure we'll get our money by writing the preimage down, then we write the update that resolves giving money on the downstream node. This is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are generated, but of course looking forward we want to support asynchronous updates, which may complete in any order. Thus, here, we enforce the correct ordering by blocking the downstream `ChannelMonitorUpdate` until the upstream one completes. Like the `PaymentSent` event handling we do so only for the `revoke_and_ack` `ChannelMonitorUpdate`, ensuring the preimage-containing upstream update has a full RTT to complete before we actually manage to slow anything down.
1 parent ff36dab commit 72f4bc3

File tree

3 files changed

+196
-37
lines changed

3 files changed

+196
-37
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

+133-4
Original file line numberDiff line numberDiff line change
@@ -3021,18 +3021,27 @@ fn test_blocked_chan_preimage_release() {
30213021
check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
30223022
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
30233023

3024-
// Finish the CS dance between nodes[0] and nodes[1].
3025-
commitment_signed_dance!(nodes[1], nodes[0], as_htlc_fulfill_updates.commitment_signed, false);
3024+
// Finish the CS dance between nodes[0] and nodes[1]. Note that until the final RAA CS is held
3025+
// until the full set of `ChannelMonitorUpdate`s on the nodes[1] <-> nodes[2] channel are
3026+
// complete, while the preimage that we care about ensuring is on disk did make it there above,
3027+
// the holding logic doesn't care about the type of update, it just cares that there is one.
3028+
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed);
3029+
check_added_monitors(&nodes[1], 1);
3030+
let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
3031+
assert!(a.is_none());
3032+
3033+
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
30263034
check_added_monitors(&nodes[1], 0);
3035+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
30273036

30283037
let events = nodes[1].node.get_and_clear_pending_events();
30293038
assert_eq!(events.len(), 3);
30303039
if let Event::PaymentSent { .. } = events[0] {} else { panic!(); }
30313040
if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
30323041
if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
30333042

3034-
// The event processing should release the last RAA update.
3035-
check_added_monitors(&nodes[1], 1);
3043+
// The event processing should release the last RAA updates on both channels.
3044+
check_added_monitors(&nodes[1], 2);
30363045

30373046
// When we fetch the next update the message getter will generate the next update for nodes[2],
30383047
// generating a further monitor update.
@@ -3043,3 +3052,123 @@ fn test_blocked_chan_preimage_release() {
30433052
commitment_signed_dance!(nodes[2], nodes[1], bs_htlc_fulfill_updates.commitment_signed, false);
30443053
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
30453054
}
3055+
3056+
fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
3057+
// When we forward a payment and receive an `update_fulfill_htlc` message from the downstream
3058+
// channel, we immediately claim the HTLC on the upstream channel, before even doing a
3059+
// `commitment_signed` dance on the downstream channel. This implies that our
3060+
// `ChannelMonitorUpdate`s "go out" in the right order - first we ensure we'll get our money,
3061+
// then we write the update that resolves giving money on the downstream node. This is safe as
3062+
// long as `ChannelMonitorUpdate`s complete in the order in which they are generated, but of
3063+
// course this may not be the case. For asynchronous update writes, we have to ensure monitor
3064+
// updates can block each other, preventing the inversion all together.
3065+
let chanmon_cfgs = create_chanmon_cfgs(3);
3066+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3067+
3068+
let persister;
3069+
let new_chain_monitor;
3070+
let nodes_1_deserialized;
3071+
3072+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3073+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3074+
3075+
let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3076+
let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3077+
3078+
// Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3079+
// `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
3080+
// on the B<->C channel but leave the A<->B monitor update pending, then reload B.
3081+
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
3082+
3083+
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3084+
3085+
nodes[2].node.claim_funds(payment_preimage);
3086+
check_added_monitors(&nodes[2], 1);
3087+
expect_payment_claimed!(nodes[2], payment_hash, 100_000);
3088+
3089+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3090+
let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3091+
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3092+
3093+
// B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3094+
// for it since the monitor update is marked in-progress.
3095+
check_added_monitors(&nodes[1], 1);
3096+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3097+
3098+
// Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we
3099+
// won't get the preimage when the nodes reconnect, at which point we have to ensure we get it
3100+
// from the ChannelMonitor.
3101+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3102+
check_added_monitors(&nodes[1], 1);
3103+
if complete_bc_commitment_dance {
3104+
let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3105+
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3106+
check_added_monitors(&nodes[2], 1);
3107+
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3108+
check_added_monitors(&nodes[2], 1);
3109+
let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3110+
3111+
// At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the
3112+
// preimage in the A <-> B channel, which will prevent it from persisting the
3113+
// `ChannelMonitorUpdate` here to avoid "losing" the preimage.
3114+
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_raa);
3115+
check_added_monitors(&nodes[1], 0);
3116+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3117+
}
3118+
3119+
// Now reload node B
3120+
let manager_b = nodes[1].node.encode();
3121+
3122+
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3123+
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3124+
3125+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3126+
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3127+
3128+
// If we used the latest ChannelManager to reload from, we should have both channels still
3129+
// live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
3130+
// before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
3131+
// When we call `timer_tick_occurred` we will get that monitor update back, which we'll
3132+
// complete after reconnecting to our peers.
3133+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3134+
nodes[1].node.timer_tick_occurred();
3135+
check_added_monitors(&nodes[1], 1);
3136+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3137+
3138+
// Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
3139+
// the end go ahead and do that, though the -2 in `reconnect_nodes` indicates that we
3140+
// expect to *not* receive the final RAA ChannelMonitorUpdate.
3141+
if complete_bc_commitment_dance {
3142+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3143+
} else {
3144+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, -2), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3145+
}
3146+
3147+
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3148+
3149+
// (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
3150+
// disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
3151+
// process.
3152+
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3153+
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3154+
3155+
// When we fetch B's HTLC update messages here (now that the ChannelMonitorUpdate has
3156+
// completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
3157+
// channel.
3158+
let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
3159+
check_added_monitors(&nodes[1], 1);
3160+
3161+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
3162+
do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
3163+
3164+
expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, false);
3165+
3166+
// Finally, check that the payment was, ultimately, seen as sent by node A.
3167+
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
3168+
}
3169+
3170+
#[test]
3171+
fn test_inverted_mon_completion_order() {
3172+
do_test_inverted_mon_completion_order(true);
3173+
do_test_inverted_mon_completion_order(false);
3174+
}

lightning/src/ln/channelmanager.rs

+35-10
Original file line numberDiff line numberDiff line change
@@ -4511,9 +4511,12 @@ where
45114511
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
45124512
}
45134513

4514-
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_outpoint: OutPoint) {
4514+
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint) {
45154515
match source {
45164516
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
4517+
if let Some(pubkey) = next_channel_counterparty_node_id {
4518+
debug_assert_eq!(pubkey, path.hops[0].pubkey);
4519+
}
45174520
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
45184521
channel_funding_outpoint: next_channel_outpoint,
45194522
counterparty_node_id: path.hops[0].pubkey,
@@ -4540,7 +4543,17 @@ where
45404543
next_channel_id: Some(next_channel_outpoint.to_channel_id()),
45414544
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
45424545
},
4543-
downstream_counterparty_and_funding_outpoint: None,
4546+
downstream_counterparty_and_funding_outpoint:
4547+
if let Some(node_id) = next_channel_counterparty_node_id {
4548+
Some((node_id, next_channel_outpoint, completed_blocker))
4549+
} else {
4550+
// We can only get `None` here if we are processing a
4551+
// `ChannelMonitor`-originated event, in which case we
4552+
// don't care about ensuring we wake the downstream
4553+
// channel's monitor updating - the channel is already
4554+
// closed.
4555+
None
4556+
},
45444557
})
45454558
} else { None }
45464559
});
@@ -5253,13 +5266,27 @@ where
52535266
match peer_state.channel_by_id.entry(msg.channel_id) {
52545267
hash_map::Entry::Occupied(mut chan) => {
52555268
let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan);
5269+
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
5270+
peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
5271+
.or_insert_with(Vec::new)
5272+
.push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
5273+
}
5274+
// Note that we do not need to push an `actions_blocking_raa_monitor_updates`
5275+
// entry here, even though we *do* need to block the next RAA coming in from
5276+
// generating a monitor update which we let fly. We do this instead in the
5277+
// `claim_funds_internal` by attaching a `ReleaseRAAChannelMonitorUpdate`
5278+
// action to the event generated when we "claim" the sent payment. This is
5279+
// guaranteed to all complete before we process the RAA even though there is no
5280+
// lock held through that point as we aren't allowed to see another P2P message
5281+
// from the counterparty until we return, but `claim_funds_internal` runs
5282+
// first.
52565283
funding_txo = chan.get().get_funding_txo().expect("We won't accept a fulfill until funded");
52575284
res
52585285
},
52595286
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
52605287
}
52615288
};
5262-
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo);
5289+
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, Some(*counterparty_node_id), funding_txo);
52635290
Ok(())
52645291
}
52655292

@@ -5466,12 +5493,10 @@ where
54665493
match peer_state.channel_by_id.entry(msg.channel_id) {
54675494
hash_map::Entry::Occupied(mut chan) => {
54685495
let funding_txo = chan.get().get_funding_txo();
5469-
let mon_update_blocked = self.pending_events.lock().unwrap().iter().any(|(_, action)| {
5470-
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
5471-
channel_funding_outpoint: funding_txo.expect("We won't accept an RAA until funded"),
5472-
counterparty_node_id: *counterparty_node_id,
5473-
})
5474-
});
5496+
let mon_update_blocked = self.raa_monitor_updates_held(
5497+
&peer_state.actions_blocking_raa_monitor_updates,
5498+
chan.get().get_funding_txo().expect("We won't accept an RAA until funded"),
5499+
*counterparty_node_id);
54755500
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self,
54765501
chan.get_mut().revoke_and_ack(&msg, &self.logger, mon_update_blocked), chan);
54775502
let res = if let Some(monitor_update) = monitor_update_opt {
@@ -5652,7 +5677,7 @@ where
56525677
MonitorEvent::HTLCEvent(htlc_update) => {
56535678
if let Some(preimage) = htlc_update.payment_preimage {
56545679
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
5655-
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint);
5680+
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, counterparty_node_id, funding_outpoint);
56565681
} else {
56575682
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
56585683
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };

lightning/src/ln/functional_test_utils.rs

+28-23
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,20 @@ pub fn mine_transactions<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, txn: &[&Tra
7474
let height = node.best_block_info().1 + 1;
7575
confirm_transactions_at(node, txn, height);
7676
}
77+
/// Mine a single block containing the given transaction without extra checks which may impact
78+
/// ChannelManager state.
79+
pub fn mine_transaction_without_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
80+
let height = node.best_block_info().1 + 1;
81+
let mut block = Block {
82+
header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
83+
txdata: Vec::new(),
84+
};
85+
for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
86+
block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
87+
}
88+
block.txdata.push((*tx).clone());
89+
do_connect_block_without_checks(node, block, false);
90+
}
7791
/// Mine the given transaction at the given height, mining blocks as required to build to that
7892
/// height
7993
///
@@ -202,19 +216,19 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) ->
202216
assert!(depth >= 1);
203217
for i in 1..depth {
204218
let prev_blockhash = block.header.block_hash();
205-
do_connect_block(node, block, skip_intermediaries);
219+
do_connect_block_with_checks(node, block, skip_intermediaries);
206220
block = Block {
207221
header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height + i, bits: 42, nonce: 42 },
208222
txdata: vec![],
209223
};
210224
}
211225
let hash = block.header.block_hash();
212-
do_connect_block(node, block, false);
226+
do_connect_block_with_checks(node, block, false);
213227
hash
214228
}
215229

216230
pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
217-
do_connect_block(node, block.clone(), false);
231+
do_connect_block_with_checks(node, block.clone(), false);
218232
}
219233

220234
fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
@@ -224,8 +238,14 @@ fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
224238
}
225239
}
226240

227-
fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
241+
fn do_connect_block_with_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
242+
call_claimable_balances(node);
243+
do_connect_block_without_checks(node, block, skip_intermediaries);
228244
call_claimable_balances(node);
245+
node.node.test_process_background_events();
246+
}
247+
248+
fn do_connect_block_without_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
229249
let height = node.best_block_info().1 + 1;
230250
#[cfg(feature = "std")] {
231251
eprintln!("Connecting block using Block Connection Style: {:?}", *node.connect_style.borrow());
@@ -280,8 +300,6 @@ fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, sk
280300
}
281301
}
282302
}
283-
call_claimable_balances(node);
284-
node.node.test_process_background_events();
285303
}
286304

287305
pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) {
@@ -1677,20 +1695,7 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '
16771695
check_added_monitors!(node_a, 1);
16781696

16791697
// If this commitment signed dance was due to a claim, don't check for an RAA monitor update.
1680-
let got_claim = node_a.node.pending_events.lock().unwrap().iter().any(|(ev, action)| {
1681-
let matching_action = if let Some(channelmanager::EventCompletionAction::ReleaseRAAChannelMonitorUpdate
1682-
{ channel_funding_outpoint, counterparty_node_id }) = action
1683-
{
1684-
if channel_funding_outpoint.to_channel_id() == commitment_signed.channel_id {
1685-
assert_eq!(*counterparty_node_id, node_b.node.get_our_node_id());
1686-
true
1687-
} else { false }
1688-
} else { false };
1689-
if matching_action {
1690-
if let Event::PaymentSent { .. } = ev {} else { panic!(); }
1691-
}
1692-
matching_action
1693-
});
1698+
let got_claim = node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), commitment_signed.channel_id);
16941699
if fail_backwards { assert!(!got_claim); }
16951700
commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false, got_claim);
16961701

@@ -2997,7 +3002,7 @@ pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a,
29973002
}
29983003
if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
29993004
let commitment_update = chan_msgs.2.unwrap();
3000-
if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
3005+
if pending_htlc_adds.1 > 0 { // We use -1/-2 to denote a response commitment_signed
30013006
assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
30023007
}
30033008
assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.1 + pending_cell_htlc_claims.1);
@@ -3013,7 +3018,7 @@ pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a,
30133018
node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail);
30143019
}
30153020

3016-
if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
3021+
if pending_htlc_adds.1 >= 0 { // We use -1/-2 to denote a response commitment_signed
30173022
commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
30183023
} else {
30193024
node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed);
@@ -3022,7 +3027,7 @@ pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a,
30223027
// No commitment_signed so get_event_msg's assert(len == 1) passes
30233028
node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack);
30243029
assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
3025-
check_added_monitors!(node_a, 1);
3030+
check_added_monitors(node_a, if pending_htlc_adds.1 == -1 { 1 } else { 0 });
30263031
}
30273032
} else {
30283033
assert!(chan_msgs.2.is_none());

0 commit comments

Comments
 (0)