@@ -1085,7 +1085,7 @@ pub(crate) struct PendingMPPClaim {
1085
1085
}
1086
1086
1087
1087
#[derive(Clone, Debug, PartialEq, Eq)]
1088
- /// When we're claiming a(n MPP) payment, we want to store information about thay payment in the
1088
+ /// When we're claiming a(n MPP) payment, we want to store information about that payment in the
1089
1089
/// [`ChannelMonitor`] so that we can replay the claim without any information from the
1090
1090
/// [`ChannelManager`] at all. This struct stores that information with enough to replay claims
1091
1091
/// against all MPP parts as well as generate an [`Event::PaymentClaimed`].
@@ -12984,65 +12984,6 @@ where
12984
12984
12985
12985
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
12986
12986
12987
- for (_, monitor) in args.channel_monitors.iter() {
12988
- for (payment_hash, (payment_preimage, _)) in monitor.get_stored_preimages() {
12989
- if let Some(payment) = claimable_payments.remove(&payment_hash) {
12990
- log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
12991
- let mut claimable_amt_msat = 0;
12992
- let mut receiver_node_id = Some(our_network_pubkey);
12993
- let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
12994
- if phantom_shared_secret.is_some() {
12995
- let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode)
12996
- .expect("Failed to get node_id for phantom node recipient");
12997
- receiver_node_id = Some(phantom_pubkey)
12998
- }
12999
- for claimable_htlc in &payment.htlcs {
13000
- claimable_amt_msat += claimable_htlc.value;
13001
-
13002
- // Add a holding-cell claim of the payment to the Channel, which should be
13003
- // applied ~immediately on peer reconnection. Because it won't generate a
13004
- // new commitment transaction we can just provide the payment preimage to
13005
- // the corresponding ChannelMonitor and nothing else.
13006
- //
13007
- // We do so directly instead of via the normal ChannelMonitor update
13008
- // procedure as the ChainMonitor hasn't yet been initialized, implying
13009
- // we're not allowed to call it directly yet. Further, we do the update
13010
- // without incrementing the ChannelMonitor update ID as there isn't any
13011
- // reason to.
13012
- // If we were to generate a new ChannelMonitor update ID here and then
13013
- // crash before the user finishes block connect we'd end up force-closing
13014
- // this channel as well. On the flip side, there's no harm in restarting
13015
- // without the new monitor persisted - we'll end up right back here on
13016
- // restart.
13017
- let previous_channel_id = claimable_htlc.prev_hop.channel_id;
13018
- if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
13019
- let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
13020
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13021
- let peer_state = &mut *peer_state_lock;
13022
- if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
13023
- let logger = WithChannelContext::from(&args.logger, &channel.context, Some(payment_hash));
13024
- channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
13025
- }
13026
- }
13027
- if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
13028
- previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
13029
- }
13030
- }
13031
- let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
13032
- pending_events_read.push_back((events::Event::PaymentClaimed {
13033
- receiver_node_id,
13034
- payment_hash,
13035
- purpose: payment.purpose,
13036
- amount_msat: claimable_amt_msat,
13037
- htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
13038
- sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
13039
- onion_fields: payment.onion_fields,
13040
- payment_id: Some(payment_id),
13041
- }, None));
13042
- }
13043
- }
13044
- }
13045
-
13046
12987
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
13047
12988
if let Some(peer_state) = per_peer_state.get(&node_id) {
13048
12989
for (channel_id, actions) in monitor_update_blocked_actions.iter() {
@@ -13143,6 +13084,72 @@ where
13143
13084
default_configuration: args.default_config,
13144
13085
};
13145
13086
13087
+ for (_, monitor) in args.channel_monitors.iter() {
13088
+ for (payment_hash, (payment_preimage, _)) in monitor.get_stored_preimages() {
13089
+ let per_peer_state = channel_manager.per_peer_state.read().unwrap();
13090
+ let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
13091
+ let payment = claimable_payments.claimable_payments.remove(&payment_hash);
13092
+ mem::drop(claimable_payments);
13093
+ if let Some(payment) = payment {
13094
+ log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
13095
+ let mut claimable_amt_msat = 0;
13096
+ let mut receiver_node_id = Some(our_network_pubkey);
13097
+ let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
13098
+ if phantom_shared_secret.is_some() {
13099
+ let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode)
13100
+ .expect("Failed to get node_id for phantom node recipient");
13101
+ receiver_node_id = Some(phantom_pubkey)
13102
+ }
13103
+ for claimable_htlc in &payment.htlcs {
13104
+ claimable_amt_msat += claimable_htlc.value;
13105
+
13106
+ // Add a holding-cell claim of the payment to the Channel, which should be
13107
+ // applied ~immediately on peer reconnection. Because it won't generate a
13108
+ // new commitment transaction we can just provide the payment preimage to
13109
+ // the corresponding ChannelMonitor and nothing else.
13110
+ //
13111
+ // We do so directly instead of via the normal ChannelMonitor update
13112
+ // procedure as the ChainMonitor hasn't yet been initialized, implying
13113
+ // we're not allowed to call it directly yet. Further, we do the update
13114
+ // without incrementing the ChannelMonitor update ID as there isn't any
13115
+ // reason to.
13116
+ // If we were to generate a new ChannelMonitor update ID here and then
13117
+ // crash before the user finishes block connect we'd end up force-closing
13118
+ // this channel as well. On the flip side, there's no harm in restarting
13119
+ // without the new monitor persisted - we'll end up right back here on
13120
+ // restart.
13121
+ let previous_channel_id = claimable_htlc.prev_hop.channel_id;
13122
+ let peer_node_id_opt = channel_manager.outpoint_to_peer.lock().unwrap()
13123
+ .get(&claimable_htlc.prev_hop.outpoint).cloned();
13124
+ if let Some(peer_node_id) = peer_node_id_opt {
13125
+ let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
13126
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13127
+ let peer_state = &mut *peer_state_lock;
13128
+ if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
13129
+ let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash));
13130
+ channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
13131
+ }
13132
+ }
13133
+ if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
13134
+ previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &channel_manager.tx_broadcaster, &channel_manager.fee_estimator, &channel_manager.logger);
13135
+ }
13136
+ }
13137
+ let mut pending_events = channel_manager.pending_events.lock().unwrap();
13138
+ let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
13139
+ pending_events.push_back((events::Event::PaymentClaimed {
13140
+ receiver_node_id,
13141
+ payment_hash,
13142
+ purpose: payment.purpose,
13143
+ amount_msat: claimable_amt_msat,
13144
+ htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
13145
+ sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
13146
+ onion_fields: payment.onion_fields,
13147
+ payment_id: Some(payment_id),
13148
+ }, None));
13149
+ }
13150
+ }
13151
+ }
13152
+
13146
13153
for htlc_source in failed_htlcs.drain(..) {
13147
13154
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
13148
13155
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
0 commit comments