Skip to content

Commit 467f253

Browse files
committed
Stop relying on ChannelMonitor persistence after manager read
When we discover we've only partially claimed an MPP HTLC during `ChannelManager` reading, we need to add the payment preimage to all other `ChannelMonitor`s that were a part of the payment. We previously did this with a direct call on the `ChannelMonitor`, requiring users write the full `ChannelMonitor` to disk to ensure that updated information made it. This adds quite a bit of delay during initial startup - fully resilvering each `ChannelMonitor` just to handle this one case is incredibly excessive. Over the past few commits we dropped the need to pass HTLCs directly to the `ChannelMonitor`s using the background events to provide `ChannelMonitorUpdate`s insetad. Thus, here we finally drop the requirement to resilver `ChannelMonitor`s on startup.
1 parent e827f12 commit 467f253

File tree

5 files changed

+21
-11
lines changed

5 files changed

+21
-11
lines changed

fuzz/src/chanmon_consistency.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -768,7 +768,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
768768
chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, prev_state);
769769
}
770770
let mut monitor_refs = new_hash_map();
771-
for (outpoint, monitor) in monitors.iter_mut() {
771+
for (outpoint, monitor) in monitors.iter() {
772772
monitor_refs.insert(*outpoint, monitor);
773773
}
774774

lightning/src/ln/channelmanager.rs

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1629,7 +1629,7 @@ where
16291629
/// let mut channel_monitors = read_channel_monitors();
16301630
/// let args = ChannelManagerReadArgs::new(
16311631
/// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
1632-
/// router, message_router, logger, default_config, channel_monitors.iter_mut().collect(),
1632+
/// router, message_router, logger, default_config, channel_monitors.iter().collect(),
16331633
/// );
16341634
/// let (block_hash, channel_manager) =
16351635
/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
@@ -12231,9 +12231,12 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
1223112231
/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
1223212232
/// same way you would handle a [`chain::Filter`] call using
1223312233
/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
12234-
/// 4) Reconnect blocks on your [`ChannelMonitor`]s.
12235-
/// 5) Disconnect/connect blocks on the [`ChannelManager`].
12236-
/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
12234+
/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain.
12235+
/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain.
12236+
/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
12237+
/// This is important if you have replayed a nontrivial number of blocks in step (4), allowing
12238+
/// you to avoid having to replay the same blocks if you shut down quickly after startup. It is
12239+
/// otherwise not required.
1223712240
/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
1223812241
/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
1223912242
/// the next step.
@@ -12316,7 +12319,7 @@ where
1231612319
/// this struct.
1231712320
///
1231812321
/// This is not exported to bindings users because we have no HashMap bindings
12319-
pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
12322+
pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
1232012323
}
1232112324

1232212325
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
@@ -12339,7 +12342,7 @@ where
1233912342
entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
1234012343
chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
1234112344
default_config: UserConfig,
12342-
mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
12345+
mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
1234312346
) -> Self {
1234412347
Self {
1234512348
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,

lightning/src/ln/functional_test_utils.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -686,7 +686,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
686686
// them to ensure we can write and reload our ChannelManager.
687687
{
688688
let mut channel_monitors = new_hash_map();
689-
for monitor in deserialized_monitors.iter_mut() {
689+
for monitor in deserialized_monitors.iter() {
690690
channel_monitors.insert(monitor.get_funding_txo().0, monitor);
691691
}
692692

@@ -1128,7 +1128,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User
11281128
let mut node_read = &chanman_encoded[..];
11291129
let (_, node_deserialized) = {
11301130
let mut channel_monitors = new_hash_map();
1131-
for monitor in monitors_read.iter_mut() {
1131+
for monitor in monitors_read.iter() {
11321132
assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
11331133
}
11341134
<(BlockHash, TestChannelManager<'b, 'c>)>::read(&mut node_read, ChannelManagerReadArgs {

lightning/src/ln/reload_tests.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
426426
chain_monitor: nodes[0].chain_monitor,
427427
tx_broadcaster: nodes[0].tx_broadcaster,
428428
logger: &logger,
429-
channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
429+
channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
430430
}) { } else {
431431
panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
432432
};
@@ -444,7 +444,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
444444
chain_monitor: nodes[0].chain_monitor,
445445
tx_broadcaster: nodes[0].tx_broadcaster,
446446
logger: &logger,
447-
channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
447+
channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
448448
}).unwrap();
449449
nodes_0_deserialized = nodes_0_deserialized_tmp;
450450
assert!(nodes_0_read.is_empty());

pending_changelog/3322-b.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
API Updates
2+
===========
3+
4+
As a part of adding robustness against several unlikely scenarios, redundant `PaymentClaimed`
5+
`Event`s will be generated more frequently on startup for payments received on LDK 0.1 and
6+
newer. A new `Event::PaymentClaimed::payment_id` field may be used to better differentiate
7+
between redundant payments.

0 commit comments

Comments
 (0)