@@ -749,6 +749,14 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
749
749
monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
750
750
monitor_pending_finalized_fulfills: Vec<HTLCSource>,
751
751
752
+ /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
753
+ /// but our signer (initially) refused to give us a signature, we should retry at some point in
754
+ /// the future when the signer indicates it may have a signature for us.
755
+ ///
756
+ /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
757
+ /// setting it again as a side-effect of [`Channel::channel_reestablish`].
758
+ signer_pending_commitment_update: bool,
759
+
752
760
// pending_update_fee is filled when sending and receiving update_fee.
753
761
//
754
762
// Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
@@ -3166,8 +3174,8 @@ impl<SP: Deref> Channel<SP> where
3166
3174
self.context.monitor_pending_revoke_and_ack = true;
3167
3175
if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3168
3176
// If we were going to send a commitment_signed after the RAA, go ahead and do all
3169
- // the corresponding HTLC status updates so that get_last_commitment_update
3170
- // includes the right HTLCs.
3177
+ // the corresponding HTLC status updates so that
3178
+ // get_last_commitment_update_for_send includes the right HTLCs.
3171
3179
self.context.monitor_pending_commitment_signed = true;
3172
3180
let mut additional_update = self.build_commitment_no_status_check(logger);
3173
3181
// build_commitment_no_status_check may bump latest_monitor_id but we want them to be
@@ -3541,9 +3549,10 @@ impl<SP: Deref> Channel<SP> where
3541
3549
// cells) while we can't update the monitor, so we just return what we have.
3542
3550
if require_commitment {
3543
3551
self.context.monitor_pending_commitment_signed = true;
3544
- // When the monitor updating is restored we'll call get_last_commitment_update(),
3545
- // which does not update state, but we're definitely now awaiting a remote revoke
3546
- // before we can step forward any more, so set it here.
3552
+ // When the monitor updating is restored we'll call
3553
+ // get_last_commitment_update_for_send(), which does not update state, but we're
3554
+ // definitely now awaiting a remote revoke before we can step forward any more, so
3555
+ // set it here.
3547
3556
let mut additional_update = self.build_commitment_no_status_check(logger);
3548
3557
// build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3549
3558
// strictly increasing by one, so decrement it here.
@@ -3846,9 +3855,11 @@ impl<SP: Deref> Channel<SP> where
3846
3855
Some(self.get_last_revoke_and_ack())
3847
3856
} else { None };
3848
3857
let commitment_update = if self.context.monitor_pending_commitment_signed {
3849
- self.mark_awaiting_response();
3850
- Some(self.get_last_commitment_update(logger))
3858
+ self.get_last_commitment_update_for_send(logger).ok()
3851
3859
} else { None };
3860
+ if commitment_update.is_some() {
3861
+ self.mark_awaiting_response();
3862
+ }
3852
3863
3853
3864
self.context.monitor_pending_revoke_and_ack = false;
3854
3865
self.context.monitor_pending_commitment_signed = false;
@@ -3909,7 +3920,8 @@ impl<SP: Deref> Channel<SP> where
3909
3920
}
3910
3921
}
3911
3922
3912
- fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
3923
+ /// Gets the last commitment update for immediate sending to our peer.
3924
+ fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
3913
3925
let mut update_add_htlcs = Vec::new();
3914
3926
let mut update_fulfill_htlcs = Vec::new();
3915
3927
let mut update_fail_htlcs = Vec::new();
@@ -3965,13 +3977,26 @@ impl<SP: Deref> Channel<SP> where
3965
3977
})
3966
3978
} else { None };
3967
3979
3968
- log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
3980
+ log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
3969
3981
&self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
3970
3982
update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
3971
- msgs::CommitmentUpdate {
3983
+ let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
3984
+ if self.context.signer_pending_commitment_update {
3985
+ log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
3986
+ self.context.signer_pending_commitment_update = false;
3987
+ }
3988
+ update
3989
+ } else {
3990
+ if !self.context.signer_pending_commitment_update {
3991
+ log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
3992
+ self.context.signer_pending_commitment_update = true;
3993
+ }
3994
+ return Err(());
3995
+ };
3996
+ Ok(msgs::CommitmentUpdate {
3972
3997
update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
3973
- commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0 ,
3974
- }
3998
+ commitment_signed,
3999
+ })
3975
4000
}
3976
4001
3977
4002
/// Gets the `Shutdown` message we should send our peer on reconnect, if any.
@@ -4151,7 +4176,7 @@ impl<SP: Deref> Channel<SP> where
4151
4176
Ok(ReestablishResponses {
4152
4177
channel_ready, shutdown_msg, announcement_sigs,
4153
4178
raa: required_revoke,
4154
- commitment_update: Some( self.get_last_commitment_update (logger)),
4179
+ commitment_update: self.get_last_commitment_update_for_send (logger).ok( ),
4155
4180
order: self.context.resend_order.clone(),
4156
4181
})
4157
4182
}
@@ -5525,7 +5550,7 @@ impl<SP: Deref> Channel<SP> where
5525
5550
}
5526
5551
5527
5552
let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5528
- .map_err(|_| ChannelError::Close ("Failed to get signatures for new commitment_signed".to_owned()))?;
5553
+ .map_err(|_| ChannelError::Ignore ("Failed to get signatures for new commitment_signed".to_owned()))?;
5529
5554
signature = res.0;
5530
5555
htlc_signatures = res.1;
5531
5556
@@ -5848,6 +5873,8 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5848
5873
monitor_pending_failures: Vec::new(),
5849
5874
monitor_pending_finalized_fulfills: Vec::new(),
5850
5875
5876
+ signer_pending_commitment_update: false,
5877
+
5851
5878
#[cfg(debug_assertions)]
5852
5879
holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
5853
5880
#[cfg(debug_assertions)]
@@ -6502,6 +6529,8 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6502
6529
monitor_pending_failures: Vec::new(),
6503
6530
monitor_pending_finalized_fulfills: Vec::new(),
6504
6531
6532
+ signer_pending_commitment_update: false,
6533
+
6505
6534
#[cfg(debug_assertions)]
6506
6535
holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6507
6536
#[cfg(debug_assertions)]
@@ -7593,6 +7622,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
7593
7622
monitor_pending_failures,
7594
7623
monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7595
7624
7625
+ signer_pending_commitment_update: false,
7626
+
7596
7627
pending_update_fee,
7597
7628
holding_cell_update_fee,
7598
7629
next_holder_htlc_id,
0 commit comments