@@ -1636,9 +1636,6 @@ pub struct ChannelDetails {
1636
1636
pub counterparty: ChannelCounterparty,
1637
1637
/// The Channel's funding transaction output, if we've negotiated the funding transaction with
1638
1638
/// our counterparty already.
1639
- ///
1640
- /// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to
1641
- /// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`.
1642
1639
pub funding_txo: Option<OutPoint>,
1643
1640
/// The features which this channel operates with. See individual features for more info.
1644
1641
///
@@ -2296,7 +2293,7 @@ macro_rules! handle_new_monitor_update {
2296
2293
handle_new_monitor_update!($self, $update_res, $chan, _internal,
2297
2294
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
2298
2295
};
2299
- ($self: ident, $funding_txo: expr, $channel_id: expr, $ update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
2296
+ ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
2300
2297
let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
2301
2298
.or_insert_with(Vec::new);
2302
2299
// During startup, we push monitor updates as background events through to here in
@@ -2753,7 +2750,7 @@ where
2753
2750
2754
2751
// Update the monitor with the shutdown script if necessary.
2755
2752
if let Some(monitor_update) = monitor_update_opt.take() {
2756
- handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update,
2753
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2757
2754
peer_state_lock, peer_state, per_peer_state, chan);
2758
2755
}
2759
2756
} else {
@@ -3414,7 +3411,7 @@ where
3414
3411
}, onion_packet, None, &self.fee_estimator, &&logger);
3415
3412
match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
3416
3413
Some(monitor_update) => {
3417
- match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
3414
+ match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
3418
3415
false => {
3419
3416
// Note that MonitorUpdateInProgress here indicates (per function
3420
3417
// docs) that we will resend the commitment update once monitor
@@ -4770,7 +4767,7 @@ where
4770
4767
hash_map::Entry::Occupied(mut chan_phase) => {
4771
4768
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
4772
4769
updated_chan = true;
4773
- handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(),
4770
+ handle_new_monitor_update!(self, funding_txo, update.clone(),
4774
4771
peer_state_lock, peer_state, per_peer_state, chan);
4775
4772
} else {
4776
4773
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
@@ -5574,7 +5571,7 @@ where
5574
5571
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
5575
5572
}
5576
5573
if !during_init {
5577
- handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop.channel_id, monitor_update, peer_state_lock,
5574
+ handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
5578
5575
peer_state, per_peer_state, chan);
5579
5576
} else {
5580
5577
// If we're running during init we cannot update a monitor directly -
@@ -6570,7 +6567,7 @@ where
6570
6567
}
6571
6568
// Update the monitor with the shutdown script if necessary.
6572
6569
if let Some(monitor_update) = monitor_update_opt {
6573
- handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update,
6570
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
6574
6571
peer_state_lock, peer_state, per_peer_state, chan);
6575
6572
}
6576
6573
},
@@ -6852,7 +6849,7 @@ where
6852
6849
let funding_txo = chan.context.get_funding_txo();
6853
6850
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
6854
6851
if let Some(monitor_update) = monitor_update_opt {
6855
- handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock,
6852
+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
6856
6853
peer_state, per_peer_state, chan);
6857
6854
}
6858
6855
Ok(())
@@ -7031,7 +7028,7 @@ where
7031
7028
if let Some(monitor_update) = monitor_update_opt {
7032
7029
let funding_txo = funding_txo_opt
7033
7030
.expect("Funding outpoint must have been set for RAA handling to succeed");
7034
- handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update,
7031
+ handle_new_monitor_update!(self, funding_txo, monitor_update,
7035
7032
peer_state_lock, peer_state, per_peer_state, chan);
7036
7033
}
7037
7034
htlcs_to_fail
@@ -7372,7 +7369,7 @@ where
7372
7369
if let Some(monitor_update) = monitor_opt {
7373
7370
has_monitor_update = true;
7374
7371
7375
- handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update,
7372
+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
7376
7373
peer_state_lock, peer_state, per_peer_state, chan);
7377
7374
continue 'peer_loop;
7378
7375
}
@@ -8141,7 +8138,7 @@ where
8141
8138
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
8142
8139
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
8143
8140
channel_id);
8144
- handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update,
8141
+ handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
8145
8142
peer_state_lck, peer_state, per_peer_state, chan);
8146
8143
if further_update_exists {
8147
8144
// If there are more `ChannelMonitorUpdate`s to process, restart at the
@@ -9816,7 +9813,7 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, {
9816
9813
(2, prev_short_channel_id, required),
9817
9814
(4, prev_htlc_id, required),
9818
9815
(6, prev_funding_outpoint, required),
9819
- // Note that by the time we get past the required read for type 2 above, prev_funding_outpoint will be
9816
+ // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
9820
9817
// filled in, so we can safely unwrap it here.
9821
9818
(7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
9822
9819
});
@@ -11079,12 +11076,11 @@ where
11079
11076
Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
11080
11077
} = action {
11081
11078
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
11082
- let channel_id = blocked_channel_id;
11083
11079
log_trace!(logger,
11084
11080
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
11085
- channel_id );
11081
+ blocked_channel_id );
11086
11082
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
11087
- .entry(*channel_id )
11083
+ .entry(*blocked_channel_id )
11088
11084
.or_insert_with(Vec::new).push(blocking_action.clone());
11089
11085
} else {
11090
11086
// If the channel we were blocking has closed, we don't need to
0 commit comments