Skip to content

Commit cdff295

Browse files
committed
Properly enforce that all ChannelMonitorUpdates are ordered
c99d3d7 updated `ChannelMonitorUpdate::update_id` to continue counting up even after the channel is closed. It, however, accidentally updated the `ChannelMonitorUpdate` application logic to skip testing that `ChannelMonitorUpdate`s are well-ordered after the channel has been closed (in an attempt to ensure other checks in the same conditional block were applied). This fixes that oversight.
1 parent 572f6b4 commit cdff295

File tree

3 files changed

+79
-3
lines changed

3 files changed

+79
-3
lines changed

lightning/src/chain/channelmonitor.rs

+5-2
Original file line numberDiff line numberDiff line change
@@ -3150,8 +3150,11 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
31503150
panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage");
31513151
},
31523152
}
3153-
} else if self.latest_update_id + 1 != updates.update_id {
3154-
panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
3153+
}
3154+
if updates.update_id != LEGACY_CLOSED_CHANNEL_UPDATE_ID {
3155+
if self.latest_update_id + 1 != updates.update_id {
3156+
panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
3157+
}
31553158
}
31563159
let mut ret = Ok(());
31573160
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator);

lightning/src/ln/monitor_tests.rs

+71-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
//! Further functional tests which test blockchain reorganizations.
1111
1212
use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor};
13-
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource};
13+
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep};
1414
use crate::chain::transaction::OutPoint;
1515
use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight};
1616
use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource};
@@ -3175,3 +3175,73 @@ fn test_event_replay_causing_monitor_replay() {
31753175
// Expect the `PaymentSent` to get replayed, this time without the duplicate monitor update
31763176
expect_payment_sent(&nodes[0], payment_preimage, None, false, false /* expected post-event monitor update*/);
31773177
}
3178+
3179+
#[test]
3180+
fn test_update_replay_panics() {
3181+
// Tests that replaying a `ChannelMonitorUpdate` or applying them out-of-order causes a panic.
3182+
let chanmon_cfgs = create_chanmon_cfgs(2);
3183+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3184+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3185+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3186+
3187+
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3188+
let monitor = get_monitor!(nodes[1], chan.2).clone();
3189+
3190+
// Create some updates to apply
3191+
let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3192+
let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3193+
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), "".to_owned()).unwrap();
3194+
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
3195+
check_closed_event(&nodes[1], 1, reason, false, &[nodes[0].node.get_our_node_id()], 100_000);
3196+
check_closed_broadcast(&nodes[1], 1, true);
3197+
check_added_monitors(&nodes[1], 1);
3198+
3199+
nodes[1].node.claim_funds(payment_preimage_1);
3200+
check_added_monitors(&nodes[1], 1);
3201+
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
3202+
3203+
nodes[1].node.claim_funds(payment_preimage_2);
3204+
check_added_monitors(&nodes[1], 1);
3205+
expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
3206+
3207+
let mut updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap().get_mut(&chan.2).unwrap().split_off(0);
3208+
3209+
// Update `monitor` until there's just one normal updates, an FC update, and a post-FC claim
3210+
// update pending
3211+
for update in updates.drain(..updates.len() - 4) {
3212+
monitor.update_monitor(&update, &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap();
3213+
}
3214+
assert_eq!(updates.len(), 4);
3215+
assert!(matches!(updates[1].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
3216+
assert!(matches!(updates[2].updates[0], ChannelMonitorUpdateStep::PaymentPreimage { .. }));
3217+
assert!(matches!(updates[3].updates[0], ChannelMonitorUpdateStep::PaymentPreimage { .. }));
3218+
3219+
// Ensure applying the force-close update skipping the last normal update fails
3220+
let poisoned_monitor = monitor.clone();
3221+
std::panic::catch_unwind(|| {
3222+
let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger);
3223+
// We should panic, rather than returning an error here.
3224+
}).unwrap_err();
3225+
3226+
// Then apply the last normal and force-close update and make sure applying the preimage
3227+
// updates out-of-order fails.
3228+
monitor.update_monitor(&updates[0], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap();
3229+
monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap();
3230+
3231+
let poisoned_monitor = monitor.clone();
3232+
std::panic::catch_unwind(|| {
3233+
let _ = poisoned_monitor.update_monitor(&updates[3], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger);
3234+
// We should panic, rather than returning an error here.
3235+
}).unwrap_err();
3236+
3237+
// Make sure re-applying the force-close update fails
3238+
let poisoned_monitor = monitor.clone();
3239+
std::panic::catch_unwind(|| {
3240+
let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger);
3241+
// We should panic, rather than returning an error here.
3242+
}).unwrap_err();
3243+
3244+
// ...and finally ensure that applying all the updates succeeds.
3245+
monitor.update_monitor(&updates[2], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap();
3246+
monitor.update_monitor(&updates[3], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap();
3247+
}

lightning/src/sync/nostd_sync.rs

+3
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ pub struct Mutex<T: ?Sized> {
99
inner: RefCell<T>,
1010
}
1111

12+
#[cfg(test)]
13+
impl<T: ?Sized> core::panic::RefUnwindSafe for Mutex<T> {}
14+
1215
#[must_use = "if unused the Mutex will immediately unlock"]
1316
pub struct MutexGuard<'a, T: ?Sized + 'a> {
1417
lock: RefMut<'a, T>,

0 commit comments

Comments
 (0)