Skip to content

BaseSign + ChannelMonitor temporary error support #1538

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 53 additions & 29 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ use chain;
use chain::{BestBlock, WatchedOutput};
use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use chain::transaction::{OutPoint, TransactionData};
use chain::keysinterface::{SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, Sign, KeysInterface};
use chain::keysinterface::{SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, Sign, KeysInterface, SignError};
use chain::onchaintx::OnchainTxHandler;
use chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
use chain::Filter;
Expand Down Expand Up @@ -1098,7 +1098,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
broadcaster: &B,
fee_estimator: &F,
logger: &L,
) where
) -> Result<(), SignError> where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
Expand All @@ -1111,7 +1111,8 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
&self,
broadcaster: &B,
logger: &L,
) where
)
where
B::Target: BroadcasterInterface,
L::Target: Logger,
{
Expand Down Expand Up @@ -1210,7 +1211,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
/// substantial amount of time (a month or even a year) to get back funds. Best may be to contact
/// out-of-band the other node operator to coordinate with him if option is available to you.
/// In any-case, choice is up to the user.
pub fn get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
pub fn get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Result<Vec<Transaction>, SignError>
where L::Target: Logger {
self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger)
}
Expand Down Expand Up @@ -1847,7 +1848,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {

/// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
/// commitment_tx_infos which contain the payment hash have been revoked.
fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B, fee_estimator: &F, logger: &L)
fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), SignError>
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
Expand All @@ -1859,19 +1860,19 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
macro_rules! claim_htlcs {
($commitment_number: expr, $txid: expr) => {
let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs($commitment_number, $txid, None);
self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger)?;
}
}
if let Some(txid) = self.current_counterparty_commitment_txid {
if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
claim_htlcs!(*commitment_number, txid);
return;
return Ok(());
}
}
if let Some(txid) = self.prev_counterparty_commitment_txid {
if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
claim_htlcs!(*commitment_number, txid);
return;
return Ok(());
}
}

Expand All @@ -1885,21 +1886,33 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
// block. Even if not, its a reasonable metric for the bump criteria on the HTLC
// transactions.
let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
if self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger).is_err() {
log_warn!(logger, "Unable to broadcast claims because signer is unavailable, will retry");
}
if let Some(ref tx) = self.prev_holder_signed_commitment_tx {
let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx, self.best_block.height());
self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
if self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger).is_err() {
log_warn!(logger, "Unable to broadcast claims for prev tx because signer is unavailable, will retry");
}
}
}
Ok(())
}

pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &L)
where B::Target: BroadcasterInterface,
L::Target: Logger,
{
for tx in self.get_latest_holder_commitment_txn(logger).iter() {
log_info!(logger, "Broadcasting local {}", log_tx!(tx));
broadcaster.broadcast_transaction(tx);
match self.get_latest_holder_commitment_txn(logger) {
Ok(txs) => {
for tx in txs.iter() {
log_info!(logger, "Broadcasting local {}", log_tx!(tx));
broadcaster.broadcast_transaction(tx);
}
}
Err(_) => {
log_warn!(logger, "Unable to broadcast holder tx because signer is unavailable, will retry");
}
}
self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
}
Expand Down Expand Up @@ -1945,7 +1958,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
},
ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => {
log_trace!(logger, "Updating ChannelMonitor with payment preimage");
self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage, broadcaster, fee_estimator, logger)
// No further error handling needed
let _ = self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage, broadcaster, fee_estimator, logger);
},
ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => {
log_trace!(logger, "Updating ChannelMonitor with commitment secret");
Expand Down Expand Up @@ -2291,10 +2305,11 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
}
}

pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Result<Vec<Transaction>, SignError>
where L::Target: Logger {
log_debug!(logger, "Getting signed latest holder commitment transaction!");
self.holder_tx_signed = true;
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript)?;
let txid = commitment_tx.txid();
let mut holder_transactions = vec![commitment_tx];
for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() {
Expand All @@ -2313,14 +2328,14 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
continue;
} else { None };
if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx(
&::bitcoin::OutPoint { txid, vout }, &preimage) {
&::bitcoin::OutPoint { txid, vout }, &preimage)? {
holder_transactions.push(htlc_tx);
}
}
}
// We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do.
// The data will be re-generated and tracked in check_spend_holder_transaction if we get a confirmation.
holder_transactions
Ok(holder_transactions)
}

#[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
Expand Down Expand Up @@ -2504,17 +2519,24 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height());
claimable_outpoints.push(commitment_package);
self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
self.holder_tx_signed = true;
// Because we're broadcasting a commitment transaction, we should construct the package
// assuming it gets confirmed in the next block. Sadly, we have code which considers
// "not yet confirmed" things as discardable, so we cannot do that here.
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx);
if !new_outputs.is_empty() {
watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
match self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript) {
Ok(commitment_tx) => {
// Because we're broadcasting a commitment transaction, we should construct the package
// assuming it gets confirmed in the next block. Sadly, we have code which considers
// "not yet confirmed" things as discardable, so we cannot do that here.
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx);
if !new_outputs.is_empty() {
watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
}
claimable_outpoints.append(&mut new_outpoints);

}
Err(_) => {
log_warn!(logger, "Unable to broadcast holder commitment tx because the signer is not available, will retry");
}
}
claimable_outpoints.append(&mut new_outpoints);
}

// Find which on-chain events have reached their confirmation threshold.
Expand Down Expand Up @@ -2587,7 +2609,9 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
}
}

self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger);
if self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger).is_err() {
log_warn!(logger, "Unable to broadcast claims because signer was not available, will retry");
}

// Determine new outputs to watch by comparing against previously known outputs to watch,
// updating the latter in the process.
Expand Down Expand Up @@ -3580,7 +3604,7 @@ mod tests {
monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger);
monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key, &logger);
for &(ref preimage, ref hash) in preimages.iter() {
monitor.provide_payment_preimage(hash, preimage, &broadcaster, &fee_estimator, &logger);
monitor.provide_payment_preimage(hash, preimage, &broadcaster, &fee_estimator, &logger).unwrap();
}

// Now provide a secret, pruning preimages 10-15
Expand Down
38 changes: 24 additions & 14 deletions lightning/src/chain/keysinterface.rs
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,15 @@ impl_writeable_tlv_based_enum!(SpendableOutputDescriptor,
(2, StaticPaymentOutput),
);

/// An error while signing
#[derive(Debug)]
pub enum SignError {
/// The signer is temporarily unavailable
Temporary,
/// A signer internal error
Internal
}
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just to confirm if we agree all on the direction, in case of SignError::Temporary yielded by finalize_claim_tx, we return a PendingEvent::SignTransaction to ChannelMonitor queued on a new internal buffer ?

How we would dry-up the queue ? Inside block_confirmed or with a new monitor_timer_tick_occured(), I lean to favor the latter as blocks elapsing is a bit precious in Lightning.

If we get an SignError::Internal maybe we should flow it back up to ChannelManager with a ChannelMonitor::TemporaryFailure and stop accepting HTLCs we won't be able to claim anyways ?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just to confirm if we agree all on the direction, in case of SignError::Temporary yielded by finalize_claim_tx, we return a PendingEvent::SignTransaction to ChannelMonitor queued on a new internal buffer ?

Initially, I thought we would just call ChannelMonitor.update_claims_view with empty inputs and the current logic would have it retry the requests in self.pending_claim_requests. It seemed like the least invasive approach.

However, it seems that @TheBlueMatt prefers if we just retry the signing/broadcasting part, and not the rest of the logic. Maybe he can clarify how his envisions the triggering of the retry.

As to implementation details - there are already three buffers in ChannelMonitorImpl:

	pending_monitor_events: Vec<MonitorEvent>,
	pending_events: Vec<Event>,
	onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,

I'm wondering if any of these could be used for this purpose.

How we would dry-up the queue ? Inside block_confirmed or with a new monitor_timer_tick_occured(), I lean to favor the latter as blocks elapsing is a bit precious in Lightning.

The signer may connect at any time, which may not overlap with the block arrival time. So it's probably best that the BackgroundProcessor tick gets triggered by the signer connecting so we can catch up on all housekeeping tasks. ChannelMonitor already implements EventsProvider::process_pending_events, so perhaps it makes sense to add the code there. Or it may be cleaner to add another call as you say.

If we get an SignError::Internal maybe we should flow it back up to ChannelManager with a ChannelMonitor::TemporaryFailure and stop accepting HTLCs we won't be able to claim anyways ?

The Internal error is just a renaming of the existing Result<_, ()> error returns from BaseSign. It seems like we currently almost always panic, or we should, because it's caused by a key derivation failing, which has an infinitesimal chance of happening and probably indicates a hardware / RNG problem. In any case, it's out of scope of the current effort to change the handling of this case.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

However, it seems that @TheBlueMatt prefers if we just retry the signing/broadcasting part, and not the rest of the logic. Maybe he can clarify how his envisions the triggering of the retry.

I'd imagine we'd leave that up to the user or maybe even do it on every block (though that will basically break ~every test, so maybe we don't do that by default in tests?). If the signer is connected intermittently, hopefully the user has some mechanism to learn when the signer is available so they can re-sign.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd imagine we'd leave that up to the user or maybe even do it on every block (though that will basically break ~every test, so maybe we don't do that by default in tests?).

IIUC the idea as just retrying the signing/broadcasting part I'm good with that, it's more likely the update_claims_view logic is going to be more complex in the future (e.g locking fee-bumping UTXOs) though to break it to avoid dependency bug like the one above.

As to implementation details - there are already three buffers in ChannelMonitorImpl:

I think the last one onchain_events_awaiting_threshold_conf would fit, all the events are destinied to be consumed by ChannelMonitorImpl. Maybe can be recall something like "monitor_events_awaiting_time_point` where time point is either a blockchain height or the signer waking up.

The signer may connect at any time, which may not overlap with the block arrival time. So it's probably best that the BackgroundProcessor tick gets triggered by the signer connecting so we can catch up on all housekeeping tasks. ChannelMonitor already implements EventsProvider::process_pending_events, so perhaps it makes sense to add the code there. Or it may be cleaner to add another call as you say.

Yeah ChannelMonitorImpl::get_and_clear_pending_events sounds a good temporary location for now.

The Internal error is just a renaming of the existing Result<_, ()> error returns from BaseSign. It seems like we currently almost always panic, or we should, because it's caused by a key derivation failing, which has an infinitesimal chance of happening and probably indicates a hardware / RNG problem. In any case, it's out of scope of the current effort to change the handling of this case.

Sure, we can deferred how we react to catastrophic hardware failures to the future.


/// A trait to sign lightning channel transactions as described in BOLT 3.
///
/// Signing services could be implemented on a hardware wallet. In this case,
Expand Down Expand Up @@ -278,7 +287,7 @@ pub trait BaseSign {
//
// TODO: Document the things someone using this interface should enforce before signing.
// TODO: Key derivation failure should panic rather than Err
fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()>;
fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), SignError>;

/// Same as sign_holder_commitment, but exists only for tests to get access to holder commitment
/// transactions which will be broadcasted later, after the channel has moved on to a newer
Expand All @@ -301,7 +310,7 @@ pub trait BaseSign {
/// revoked the state which they eventually broadcast. It's not a _holder_ secret key and does
/// not allow the spending of any funds by itself (you need our holder revocation_secret to do
/// so).
fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, SignError>;

/// Create a signature for the given input in a transaction spending a commitment transaction
/// HTLC output when our counterparty broadcasts an old state.
Expand All @@ -320,7 +329,7 @@ pub trait BaseSign {
///
/// htlc holds HTLC elements (hash, timelock), thus changing the format of the witness script
/// (which is committed to in the BIP 143 signatures).
fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, SignError>;

/// Create a signature for a claiming transaction for a HTLC output on a counterparty's commitment
/// transaction, either offered or received.
Expand Down Expand Up @@ -683,13 +692,14 @@ impl BaseSign for InMemorySigner {
Ok(())
}

fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), SignError> {
let funding_pubkey = PublicKey::from_secret_key(secp_ctx, &self.funding_key);
let funding_redeemscript = make_funding_redeemscript(&funding_pubkey, &self.counterparty_pubkeys().funding_pubkey);
let trusted_tx = commitment_tx.trust();
let sig = trusted_tx.built_transaction().sign(&self.funding_key, &funding_redeemscript, self.channel_value_satoshis, secp_ctx);
let channel_parameters = self.get_channel_parameters();
let htlc_sigs = trusted_tx.get_htlc_sigs(&self.htlc_base_key, &channel_parameters.as_holder_broadcastable(), secp_ctx)?;
let htlc_sigs = trusted_tx.get_htlc_sigs(&self.htlc_base_key, &channel_parameters.as_holder_broadcastable(), secp_ctx)
.map_err(|()| SignError::Internal)?;
Ok((sig, htlc_sigs))
}

Expand All @@ -704,26 +714,26 @@ impl BaseSign for InMemorySigner {
Ok((sig, htlc_sigs))
}

fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
let revocation_key = chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_key, &self.revocation_base_key).map_err(|_| ())?;
fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, SignError> {
let revocation_key = chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_key, &self.revocation_base_key).map_err(|_| SignError::Internal)?;
let per_commitment_point = PublicKey::from_secret_key(secp_ctx, &per_commitment_key);
let revocation_pubkey = chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &self.pubkeys().revocation_basepoint).map_err(|_| ())?;
let revocation_pubkey = chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &self.pubkeys().revocation_basepoint).map_err(|_| SignError::Internal)?;
let witness_script = {
let counterparty_delayedpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.counterparty_pubkeys().delayed_payment_basepoint).map_err(|_| ())?;
let counterparty_delayedpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.counterparty_pubkeys().delayed_payment_basepoint).map_err(|_| SignError::Internal)?;
chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.holder_selected_contest_delay(), &counterparty_delayedpubkey)
};
let mut sighash_parts = sighash::SighashCache::new(justice_tx);
let sighash = hash_to_message!(&sighash_parts.segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All).unwrap()[..]);
return Ok(sign(secp_ctx, &sighash, &revocation_key))
}

fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
let revocation_key = chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_key, &self.revocation_base_key).map_err(|_| ())?;
fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, SignError> {
let revocation_key = chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_key, &self.revocation_base_key).map_err(|_| SignError::Internal)?;
let per_commitment_point = PublicKey::from_secret_key(secp_ctx, &per_commitment_key);
let revocation_pubkey = chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &self.pubkeys().revocation_basepoint).map_err(|_| ())?;
let revocation_pubkey = chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &self.pubkeys().revocation_basepoint).map_err(|_| SignError::Internal)?;
let witness_script = {
let counterparty_htlcpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.counterparty_pubkeys().htlc_basepoint).map_err(|_| ())?;
let holder_htlcpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.pubkeys().htlc_basepoint).map_err(|_| ())?;
let counterparty_htlcpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.counterparty_pubkeys().htlc_basepoint).map_err(|_| SignError::Internal)?;
let holder_htlcpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.pubkeys().htlc_basepoint).map_err(|_| SignError::Internal)?;
chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, self.opt_anchors(), &counterparty_htlcpubkey, &holder_htlcpubkey, &revocation_pubkey)
};
let mut sighash_parts = sighash::SighashCache::new(justice_tx);
Expand Down
Loading