-
Notifications
You must be signed in to change notification settings - Fork 403
Expose {prev,next}_user_channel_id
fields in PaymentForwarded
#2924
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5733,7 +5733,7 @@ where | |
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, | ||
forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool, | ||
startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>, | ||
next_channel_outpoint: OutPoint, next_channel_id: ChannelId, | ||
next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>, | ||
) { | ||
match source { | ||
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { | ||
|
@@ -5752,11 +5752,10 @@ where | |
}, | ||
HTLCSource::PreviousHopData(hop_data) => { | ||
let prev_channel_id = hop_data.channel_id; | ||
let prev_user_channel_id = hop_data.user_channel_id; | ||
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); | ||
#[cfg(debug_assertions)] | ||
let claiming_chan_funding_outpoint = hop_data.outpoint; | ||
#[cfg(debug_assertions)] | ||
let claiming_channel_id = hop_data.channel_id; | ||
let res = self.claim_funds_from_hop(hop_data, payment_preimage, | ||
|htlc_claim_value_msat, definitely_duplicate| { | ||
let chan_to_release = | ||
|
@@ -5814,7 +5813,7 @@ where | |
BackgroundEvent::MonitorUpdatesComplete { | ||
channel_id, .. | ||
} => | ||
*channel_id == claiming_channel_id, | ||
*channel_id == prev_channel_id, | ||
} | ||
}), "{:?}", *background_events); | ||
} | ||
|
@@ -5838,12 +5837,14 @@ where | |
"skimmed_fee_msat must always be included in total_fee_earned_msat"); | ||
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { | ||
event: events::Event::PaymentForwarded { | ||
total_fee_earned_msat, | ||
claim_from_onchain_tx: from_onchain, | ||
prev_channel_id: Some(prev_channel_id), | ||
next_channel_id: Some(next_channel_id), | ||
outbound_amount_forwarded_msat: forwarded_htlc_value_msat, | ||
prev_user_channel_id, | ||
next_user_channel_id, | ||
total_fee_earned_msat, | ||
skimmed_fee_msat, | ||
claim_from_onchain_tx: from_onchain, | ||
outbound_amount_forwarded_msat: forwarded_htlc_value_msat, | ||
}, | ||
downstream_counterparty_and_funding_outpoint: chan_to_release, | ||
}) | ||
|
@@ -6817,6 +6818,7 @@ where | |
|
||
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { | ||
let funding_txo; | ||
let next_user_channel_id; | ||
let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = { | ||
let per_peer_state = self.per_peer_state.read().unwrap(); | ||
let peer_state_mutex = per_peer_state.get(counterparty_node_id) | ||
|
@@ -6846,6 +6848,7 @@ where | |
// outbound HTLC is claimed. This is guaranteed to all complete before we | ||
// process the RAA as messages are processed from single peers serially. | ||
funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded"); | ||
next_user_channel_id = chan.context.get_user_id(); | ||
res | ||
} else { | ||
return try_chan_phase_entry!(self, Err(ChannelError::Close( | ||
|
@@ -6857,7 +6860,7 @@ where | |
}; | ||
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), | ||
Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id), | ||
funding_txo, msg.channel_id | ||
funding_txo, msg.channel_id, Some(next_user_channel_id), | ||
); | ||
|
||
Ok(()) | ||
|
@@ -7359,7 +7362,7 @@ where | |
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage); | ||
self.claim_funds_internal(htlc_update.source, preimage, | ||
htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true, | ||
false, counterparty_node_id, funding_outpoint, channel_id); | ||
false, counterparty_node_id, funding_outpoint, channel_id, None); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Would we consider including this field in There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thought about that too, but that might require starting to track it in the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We also currently don't have any concept of how to handle user channel ids after a channel closes - is recycling allowed at that point? |
||
} else { | ||
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); | ||
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id }; | ||
|
@@ -11319,7 +11322,9 @@ where | |
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the | ||
// channel is closed we just assume that it probably came from an on-chain claim. | ||
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None, | ||
downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id); | ||
downstream_closed, true, downstream_node_id, downstream_funding, | ||
downstream_channel_id, None | ||
); | ||
} | ||
|
||
//TODO: Broadcast channel update for closed channels, but only after we've made a | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
FWIW, tests pass when I revert this change.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ugh, good catch. That's due to the
map_or
in the test which makes it too forgiving. I'll see to revisit that.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Now pushed a fixup to the test: #2924 (comment)
It's not perfect, but I wanted to avoid cluttering the macro by introducing yet another
expected_X
variable just for this slim use case. Let me know if you'd prefer it over the current approach though.