Skip to content

Commit 56f16ea

Browse files
authored
Merge pull request #349 from ariard/2019-07-data_loss
Implement option_data_loss_protect on both sides
2 parents 127ce29 + 41def65 commit 56f16ea

File tree

6 files changed

+272
-31
lines changed

6 files changed

+272
-31
lines changed

src/ln/channel.rs

Lines changed: 48 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ use secp256k1::{Secp256k1,Signature};
1616
use secp256k1;
1717

1818
use ln::msgs;
19-
use ln::msgs::{DecodeError, OptionalField, LocalFeatures};
19+
use ln::msgs::{DecodeError, OptionalField, LocalFeatures, DataLossProtect};
2020
use ln::channelmonitor::ChannelMonitor;
2121
use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
2222
use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment,HTLC_SUCCESS_TX_WEIGHT,HTLC_TIMEOUT_TX_WEIGHT};
@@ -32,7 +32,7 @@ use util::config::{UserConfig,ChannelConfig};
3232

3333
use std;
3434
use std::default::Default;
35-
use std::{cmp,mem};
35+
use std::{cmp,mem,fmt};
3636
use std::sync::{Arc};
3737

3838
#[cfg(test)]
@@ -366,10 +366,23 @@ pub const OFFERED_HTLC_SCRIPT_WEIGHT: usize = 133;
366366
/// Used to return a simple Error back to ChannelManager. Will get converted to a
367367
/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
368368
/// channel_id in ChannelManager.
369-
#[derive(Debug)]
370369
pub(super) enum ChannelError {
371370
Ignore(&'static str),
372371
Close(&'static str),
372+
CloseDelayBroadcast {
373+
msg: &'static str,
374+
update: Option<ChannelMonitor>
375+
},
376+
}
377+
378+
impl fmt::Debug for ChannelError {
379+
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
380+
match self {
381+
&ChannelError::Ignore(e) => write!(f, "Ignore : {}", e),
382+
&ChannelError::Close(e) => write!(f, "Close : {}", e),
383+
&ChannelError::CloseDelayBroadcast { msg, .. } => write!(f, "CloseDelayBroadcast : {}", msg)
384+
}
385+
}
373386
}
374387

375388
macro_rules! secp_check {
@@ -2499,6 +2512,22 @@ impl Channel {
24992512
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish"));
25002513
}
25012514

2515+
if msg.next_remote_commitment_number > 0 {
2516+
match msg.data_loss_protect {
2517+
OptionalField::Present(ref data_loss) => {
2518+
if chan_utils::build_commitment_secret(self.local_keys.commitment_seed, INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1) != data_loss.your_last_per_commitment_secret {
2519+
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided"));
2520+
}
2521+
if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number {
2522+
self.channel_monitor.provide_rescue_remote_commitment_tx_info(data_loss.my_current_per_commitment_point);
2523+
return Err(ChannelError::CloseDelayBroadcast { msg: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting", update: Some(self.channel_monitor.clone())
2524+
});
2525+
}
2526+
},
2527+
OptionalField::Absent => {}
2528+
}
2529+
}
2530+
25022531
// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
25032532
// remaining cases either succeed or ErrorMessage-fail).
25042533
self.channel_state &= !(ChannelState::PeerDisconnected as u32);
@@ -2575,7 +2604,7 @@ impl Channel {
25752604
// now!
25762605
match self.free_holding_cell_htlcs() {
25772606
Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
2578-
Err(ChannelError::Ignore(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
2607+
Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast { .. }) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
25792608
Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)),
25802609
Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)),
25812610
}
@@ -3255,6 +3284,20 @@ impl Channel {
32553284
pub fn get_channel_reestablish(&self) -> msgs::ChannelReestablish {
32563285
assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
32573286
assert_ne!(self.cur_remote_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
3287+
let data_loss_protect = if self.cur_remote_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
3288+
let remote_last_secret = self.channel_monitor.get_secret(self.cur_remote_commitment_transaction_number + 2).unwrap();
3289+
log_trace!(self, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret));
3290+
OptionalField::Present(DataLossProtect {
3291+
your_last_per_commitment_secret: remote_last_secret,
3292+
my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number + 1))
3293+
})
3294+
} else {
3295+
log_debug!(self, "We don't seen yet any revoked secret, if this channnel has already been updated it means we are fallen-behind, you should wait for other peer closing");
3296+
OptionalField::Present(DataLossProtect {
3297+
your_last_per_commitment_secret: [0;32],
3298+
my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number))
3299+
})
3300+
};
32583301
msgs::ChannelReestablish {
32593302
channel_id: self.channel_id(),
32603303
// The protocol has two different commitment number concepts - the "commitment
@@ -3275,7 +3318,7 @@ impl Channel {
32753318
// dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
32763319
// overflow here.
32773320
next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_remote_commitment_transaction_number - 1,
3278-
data_loss_protect: OptionalField::Absent,
3321+
data_loss_protect,
32793322
}
32803323
}
32813324

src/ln/channelmanager.rs

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,15 @@ impl MsgHandleErrInternal {
208208
},
209209
}),
210210
},
211+
ChannelError::CloseDelayBroadcast { msg, .. } => HandleError {
212+
err: msg,
213+
action: Some(msgs::ErrorAction::SendErrorMessage {
214+
msg: msgs::ErrorMessage {
215+
channel_id,
216+
data: msg.to_string()
217+
},
218+
}),
219+
},
211220
},
212221
shutdown_finish: None,
213222
}
@@ -447,6 +456,7 @@ macro_rules! break_chan_entry {
447456
}
448457
break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
449458
},
459+
Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
450460
}
451461
}
452462
}
@@ -466,6 +476,31 @@ macro_rules! try_chan_entry {
466476
}
467477
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
468478
},
479+
Err(ChannelError::CloseDelayBroadcast { msg, update }) => {
480+
log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
481+
let (channel_id, mut chan) = $entry.remove_entry();
482+
if let Some(short_id) = chan.get_short_channel_id() {
483+
$channel_state.short_to_id.remove(&short_id);
484+
}
485+
if let Some(update) = update {
486+
if let Err(e) = $self.monitor.add_update_monitor(update.get_funding_txo().unwrap(), update) {
487+
match e {
488+
// Upstream channel is dead, but we want at least to fail backward HTLCs to save
489+
// downstream channels. In case of PermanentFailure, we are not going to be able
490+
// to claim back to_remote output on remote commitment transaction. Doesn't
491+
// make a difference here, we are concern about HTLCs circuit, not onchain funds.
492+
ChannelMonitorUpdateErr::PermanentFailure => {},
493+
ChannelMonitorUpdateErr::TemporaryFailure => {},
494+
}
495+
}
496+
}
497+
let mut shutdown_res = chan.force_shutdown();
498+
if shutdown_res.0.len() >= 1 {
499+
log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid());
500+
}
501+
shutdown_res.0.clear();
502+
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
503+
}
469504
}
470505
}
471506
}

src/ln/channelmonitor.rs

Lines changed: 71 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -456,6 +456,10 @@ pub struct ChannelMonitor {
456456
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
457457

458458
destination_script: Script,
459+
// Thanks to data loss protection, we may be able to claim our non-htlc funds
460+
// back, this is the script we have to spend from but we need to
461+
// scan every commitment transaction for that
462+
to_remote_rescue: Option<(Script, SecretKey)>,
459463

460464
// Used to track outpoint in the process of being claimed by our transactions. We need to scan all transactions
461465
// for inputs spending this. If height timer (u32) is expired and claim tx hasn't reached enough confirmations
@@ -535,6 +539,7 @@ impl PartialEq for ChannelMonitor {
535539
self.current_local_signed_commitment_tx != other.current_local_signed_commitment_tx ||
536540
self.payment_preimages != other.payment_preimages ||
537541
self.destination_script != other.destination_script ||
542+
self.to_remote_rescue != other.to_remote_rescue ||
538543
self.our_claim_txn_waiting_first_conf != other.our_claim_txn_waiting_first_conf ||
539544
self.onchain_events_waiting_threshold_conf != other.onchain_events_waiting_threshold_conf
540545
{
@@ -585,6 +590,7 @@ impl ChannelMonitor {
585590

586591
payment_preimages: HashMap::new(),
587592
destination_script: destination_script,
593+
to_remote_rescue: None,
588594

589595
our_claim_txn_waiting_first_conf: HashMap::new(),
590596

@@ -763,6 +769,22 @@ impl ChannelMonitor {
763769
}
764770
}
765771

772+
pub(super) fn provide_rescue_remote_commitment_tx_info(&mut self, their_revocation_point: PublicKey) {
773+
match self.key_storage {
774+
Storage::Local { ref payment_base_key, .. } => {
775+
if let Ok(payment_key) = chan_utils::derive_public_key(&self.secp_ctx, &their_revocation_point, &PublicKey::from_secret_key(&self.secp_ctx, &payment_base_key)) {
776+
let to_remote_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0)
777+
.push_slice(&Hash160::hash(&payment_key.serialize())[..])
778+
.into_script();
779+
if let Ok(to_remote_key) = chan_utils::derive_private_key(&self.secp_ctx, &their_revocation_point, &payment_base_key) {
780+
self.to_remote_rescue = Some((to_remote_script, to_remote_key));
781+
}
782+
}
783+
},
784+
Storage::Watchtower { .. } => {}
785+
}
786+
}
787+
766788
/// Informs this monitor of the latest local (ie broadcastable) commitment transaction. The
767789
/// monitor watches for timeouts and may broadcast it if we approach such a timeout. Thus, it
768790
/// is important that any clones of this channel monitor (including remote clones) by kept
@@ -852,6 +874,7 @@ impl ChannelMonitor {
852874
self.current_local_signed_commitment_tx = Some(local_tx);
853875
}
854876
self.payment_preimages = other.payment_preimages;
877+
self.to_remote_rescue = other.to_remote_rescue;
855878
}
856879

857880
self.current_remote_commitment_number = cmp::min(self.current_remote_commitment_number, other.current_remote_commitment_number);
@@ -1105,6 +1128,13 @@ impl ChannelMonitor {
11051128

11061129
self.last_block_hash.write(writer)?;
11071130
self.destination_script.write(writer)?;
1131+
if let Some((ref to_remote_script, ref local_key)) = self.to_remote_rescue {
1132+
writer.write_all(&[1; 1])?;
1133+
to_remote_script.write(writer)?;
1134+
local_key.write(writer)?;
1135+
} else {
1136+
writer.write_all(&[0; 1])?;
1137+
}
11081138

11091139
writer.write_all(&byte_utils::be64_to_array(self.our_claim_txn_waiting_first_conf.len() as u64))?;
11101140
for (ref outpoint, claim_tx_data) in self.our_claim_txn_waiting_first_conf.iter() {
@@ -1733,6 +1763,16 @@ impl ChannelMonitor {
17331763
txn_to_broadcast.push(spend_tx);
17341764
}
17351765
}
1766+
} else if let Some((ref to_remote_rescue, ref local_key)) = self.to_remote_rescue {
1767+
for (idx, outp) in tx.output.iter().enumerate() {
1768+
if to_remote_rescue == &outp.script_pubkey {
1769+
spendable_outputs.push(SpendableOutputDescriptor::DynamicOutputP2WPKH {
1770+
outpoint: BitcoinOutPoint { txid: commitment_txid, vout: idx as u32 },
1771+
key: local_key.clone(),
1772+
output: outp.clone(),
1773+
});
1774+
}
1775+
}
17361776
}
17371777

17381778
(txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs)
@@ -2048,9 +2088,16 @@ impl ChannelMonitor {
20482088
None
20492089
}
20502090

2051-
/// Used by ChannelManager deserialization to broadcast the latest local state if it's copy of
2052-
/// the Channel was out-of-date.
2053-
pub(super) fn get_latest_local_commitment_txn(&self) -> Vec<Transaction> {
2091+
/// Used by ChannelManager deserialization to broadcast the latest local state if its copy of
2092+
/// the Channel was out-of-date. You may use it to get a broadcastable local toxic tx in case of
2093+
/// fallen-behind, i.e when receiving a channel_reestablish with a proof that our remote side knows
2094+
/// a higher revocation secret than the local commitment number we are aware of. Broadcasting these
2095+
/// transactions are UNSAFE, as they allow remote side to punish you. Nevertheless you may want to
2096+
/// broadcast them if remote don't close channel with his higher commitment transaction after a
2097+
/// substantial amount of time (a month or even a year) to get back funds. Best may be to contact
2098+
/// out-of-band the other node operator to coordinate with him if option is available to you.
2099+
/// In any-case, choice is up to the user.
2100+
pub fn get_latest_local_commitment_txn(&self) -> Vec<Transaction> {
20542101
if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
20552102
let mut res = vec![local_tx.tx.clone()];
20562103
match self.key_storage {
@@ -2088,19 +2135,21 @@ impl ChannelMonitor {
20882135
}
20892136
};
20902137
if funding_txo.is_none() || (prevout.txid == funding_txo.as_ref().unwrap().0.txid && prevout.vout == funding_txo.as_ref().unwrap().0.index as u32) {
2091-
let (remote_txn, new_outputs, mut spendable_output) = self.check_spend_remote_transaction(tx, height, fee_estimator);
2092-
txn = remote_txn;
2093-
spendable_outputs.append(&mut spendable_output);
2094-
if !new_outputs.1.is_empty() {
2095-
watch_outputs.push(new_outputs);
2096-
}
2097-
if txn.is_empty() {
2098-
let (local_txn, mut spendable_output, new_outputs) = self.check_spend_local_transaction(tx, height);
2138+
if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 {
2139+
let (remote_txn, new_outputs, mut spendable_output) = self.check_spend_remote_transaction(tx, height, fee_estimator);
2140+
txn = remote_txn;
20992141
spendable_outputs.append(&mut spendable_output);
2100-
txn = local_txn;
21012142
if !new_outputs.1.is_empty() {
21022143
watch_outputs.push(new_outputs);
21032144
}
2145+
if txn.is_empty() {
2146+
let (local_txn, mut spendable_output, new_outputs) = self.check_spend_local_transaction(tx, height);
2147+
spendable_outputs.append(&mut spendable_output);
2148+
txn = local_txn;
2149+
if !new_outputs.1.is_empty() {
2150+
watch_outputs.push(new_outputs);
2151+
}
2152+
}
21042153
}
21052154
if !funding_txo.is_none() && txn.is_empty() {
21062155
if let Some(spendable_output) = self.check_spend_closing_transaction(tx) {
@@ -2627,6 +2676,15 @@ impl<R: ::std::io::Read> ReadableArgs<R, Arc<Logger>> for (Sha256dHash, ChannelM
26272676

26282677
let last_block_hash: Sha256dHash = Readable::read(reader)?;
26292678
let destination_script = Readable::read(reader)?;
2679+
let to_remote_rescue = match <u8 as Readable<R>>::read(reader)? {
2680+
0 => None,
2681+
1 => {
2682+
let to_remote_script = Readable::read(reader)?;
2683+
let local_key = Readable::read(reader)?;
2684+
Some((to_remote_script, local_key))
2685+
}
2686+
_ => return Err(DecodeError::InvalidValue),
2687+
};
26302688

26312689
let our_claim_txn_waiting_first_conf_len: u64 = Readable::read(reader)?;
26322690
let mut our_claim_txn_waiting_first_conf = HashMap::with_capacity(cmp::min(our_claim_txn_waiting_first_conf_len as usize, MAX_ALLOC_SIZE / 128));
@@ -2736,6 +2794,7 @@ impl<R: ::std::io::Read> ReadableArgs<R, Arc<Logger>> for (Sha256dHash, ChannelM
27362794
payment_preimages,
27372795

27382796
destination_script,
2797+
to_remote_rescue,
27392798

27402799
our_claim_txn_waiting_first_conf,
27412800

0 commit comments

Comments
 (0)