Skip to content

Commit c5648f2

Browse files
henghongleejkczyz
authored andcommitted
Use wrapper to add context to logging
Using a decorator pattern, add peer_id and channel_id to Record stored on Logger.
1 parent ce47d24 commit c5648f2

File tree

5 files changed

+284
-220
lines changed

5 files changed

+284
-220
lines changed

lightning/src/chain/chainmonitor.rs

+37-23
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ use bitcoin::hash_types::{Txid, BlockHash};
2929
use crate::chain;
3030
use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
3131
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
32-
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
32+
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
3333
use crate::chain::transaction::{OutPoint, TransactionData};
3434
use crate::sign::WriteableEcdsaChannelSigner;
3535
use crate::events;
@@ -359,6 +359,7 @@ where C::Target: chain::Filter,
359359
process: FN, funding_outpoint: &OutPoint, monitor_state: &MonitorHolder<ChannelSigner>
360360
) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
361361
let monitor = &monitor_state.monitor;
362+
let logger = WithChannelMonitor::from(&self.logger, &monitor);
362363
let mut txn_outputs;
363364
{
364365
txn_outputs = process(monitor, txdata);
@@ -375,12 +376,12 @@ where C::Target: chain::Filter,
375376
}
376377
}
377378

378-
log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
379+
log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
379380
match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
380381
ChannelMonitorUpdateStatus::Completed =>
381-
log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
382+
log_trace!(logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
382383
ChannelMonitorUpdateStatus::InProgress => {
383-
log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
384+
log_debug!(logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
384385
pending_monitor_updates.push(update_id);
385386
},
386387
ChannelMonitorUpdateStatus::UnrecoverableError => {
@@ -619,8 +620,9 @@ where C::Target: chain::Filter,
619620
pub fn rebroadcast_pending_claims(&self) {
620621
let monitors = self.monitors.read().unwrap();
621622
for (_, monitor_holder) in &*monitors {
623+
let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor);
622624
monitor_holder.monitor.rebroadcast_pending_claims(
623-
&*self.broadcaster, &*self.fee_estimator, &*self.logger
625+
&*self.broadcaster, &*self.fee_estimator, &logger
624626
)
625627
}
626628
}
@@ -638,17 +640,19 @@ where
638640
fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
639641
log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
640642
self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
643+
let logger = WithChannelMonitor::from(&self.logger, &monitor);
641644
monitor.block_connected(
642-
header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
645+
header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &logger)
643646
});
644647
}
645648

646649
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
647650
let monitor_states = self.monitors.read().unwrap();
648651
log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
649652
for monitor_state in monitor_states.values() {
653+
let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
650654
monitor_state.monitor.block_disconnected(
651-
header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
655+
header, height, &*self.broadcaster, &*self.fee_estimator, &logger);
652656
}
653657
}
654658
}
@@ -665,27 +669,30 @@ where
665669
fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
666670
log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
667671
self.process_chain_data(header, None, txdata, |monitor, txdata| {
672+
let logger = WithChannelMonitor::from(&self.logger, &monitor);
668673
monitor.transactions_confirmed(
669-
header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
674+
header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &logger)
670675
});
671676
}
672677

673678
fn transaction_unconfirmed(&self, txid: &Txid) {
674679
log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
675680
let monitor_states = self.monitors.read().unwrap();
676681
for monitor_state in monitor_states.values() {
677-
monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
682+
let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
683+
monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &logger);
678684
}
679685
}
680686

681687
fn best_block_updated(&self, header: &BlockHeader, height: u32) {
682688
log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
683689
self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
690+
let logger = WithChannelMonitor::from(&self.logger, &monitor);
684691
// While in practice there shouldn't be any recursive calls when given empty txdata,
685692
// it's still possible if a chain::Filter implementation returns a transaction.
686693
debug_assert!(txdata.is_empty());
687694
monitor.best_block_updated(
688-
header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
695+
header, height, &*self.broadcaster, &*self.fee_estimator, &logger)
689696
});
690697
}
691698

@@ -711,29 +718,30 @@ where C::Target: chain::Filter,
711718
P::Target: Persist<ChannelSigner>,
712719
{
713720
fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<ChannelMonitorUpdateStatus, ()> {
721+
let logger = WithChannelMonitor::from(&self.logger, &monitor);
714722
let mut monitors = self.monitors.write().unwrap();
715723
let entry = match monitors.entry(funding_outpoint) {
716724
hash_map::Entry::Occupied(_) => {
717-
log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
725+
log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
718726
return Err(());
719727
},
720728
hash_map::Entry::Vacant(e) => e,
721729
};
722-
log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
730+
log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
723731
let update_id = MonitorUpdateId::from_new_monitor(&monitor);
724732
let mut pending_monitor_updates = Vec::new();
725733
let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
726734
match persist_res {
727735
ChannelMonitorUpdateStatus::InProgress => {
728-
log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
736+
log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
729737
pending_monitor_updates.push(update_id);
730738
},
731739
ChannelMonitorUpdateStatus::Completed => {
732-
log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
740+
log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
733741
},
734742
ChannelMonitorUpdateStatus::UnrecoverableError => {
735743
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
736-
log_error!(self.logger, "{}", err_str);
744+
log_error!(logger, "{}", err_str);
737745
panic!("{}", err_str);
738746
},
739747
}
@@ -765,7 +773,8 @@ where C::Target: chain::Filter,
765773
},
766774
Some(monitor_state) => {
767775
let monitor = &monitor_state.monitor;
768-
log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
776+
let logger = WithChannelMonitor::from(&self.logger, &monitor);
777+
log_trace!(logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
769778
let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
770779

771780
let update_id = MonitorUpdateId::from_monitor_update(update);
@@ -776,18 +785,18 @@ where C::Target: chain::Filter,
776785
// We don't want to persist a `monitor_update` which results in a failure to apply later
777786
// while reading `channel_monitor` with updates from storage. Instead, we should persist
778787
// the entire `channel_monitor` here.
779-
log_warn!(self.logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
788+
log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
780789
self.persister.update_persisted_channel(funding_txo, None, monitor, update_id)
781790
} else {
782791
self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id)
783792
};
784793
match persist_res {
785794
ChannelMonitorUpdateStatus::InProgress => {
786795
pending_monitor_updates.push(update_id);
787-
log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
796+
log_debug!(logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
788797
},
789798
ChannelMonitorUpdateStatus::Completed => {
790-
log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
799+
log_debug!(logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
791800
},
792801
ChannelMonitorUpdateStatus::UnrecoverableError => { /* we'll panic in a moment */ },
793802
}
@@ -799,12 +808,16 @@ where C::Target: chain::Filter,
799808
}
800809
};
801810
if let ChannelMonitorUpdateStatus::UnrecoverableError = ret {
811+
let logger = WithChannelMonitor::from(
812+
&self.logger, &monitors.get(&funding_txo).unwrap().monitor
813+
);
814+
802815
// Take the monitors lock for writing so that we poison it and any future
803816
// operations going forward fail immediately.
804817
core::mem::drop(monitors);
805818
let _poison = self.monitors.write().unwrap();
806819
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
807-
log_error!(self.logger, "{}", err_str);
820+
log_error!(logger, "{}", err_str);
808821
panic!("{}", err_str);
809822
}
810823
ret
@@ -813,12 +826,13 @@ where C::Target: chain::Filter,
813826
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
814827
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
815828
for monitor_state in self.monitors.read().unwrap().values() {
829+
let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
816830
let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
817831
if !is_pending_monitor_update || monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize <= self.highest_chain_height.load(Ordering::Acquire) {
818832
if is_pending_monitor_update {
819-
log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
820-
log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released.");
821-
log_error!(self.logger, " This may cause duplicate payment events to be generated.");
833+
log_error!(logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
834+
log_error!(logger, " To avoid funds-loss, we are allowing monitor updates to be released.");
835+
log_error!(logger, " This may cause duplicate payment events to be generated.");
822836
}
823837
let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
824838
if monitor_events.len() > 0 {

lightning/src/chain/channelmonitor.rs

+5-5
Original file line numberDiff line numberDiff line change
@@ -4167,11 +4167,11 @@ where
41674167
L::Target: Logger,
41684168
{
41694169
fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
4170-
self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &*self.3);
4170+
self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
41714171
}
41724172

41734173
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
4174-
self.0.block_disconnected(header, height, &*self.1, &*self.2, &*self.3);
4174+
self.0.block_disconnected(header, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
41754175
}
41764176
}
41774177

@@ -4183,15 +4183,15 @@ where
41834183
L::Target: Logger,
41844184
{
41854185
fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
4186-
self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &*self.3);
4186+
self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
41874187
}
41884188

41894189
fn transaction_unconfirmed(&self, txid: &Txid) {
4190-
self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &*self.3);
4190+
self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
41914191
}
41924192

41934193
fn best_block_updated(&self, header: &BlockHeader, height: u32) {
4194-
self.0.best_block_updated(header, height, &*self.1, &*self.2, &*self.3);
4194+
self.0.best_block_updated(header, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
41954195
}
41964196

41974197
fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {

lightning/src/ln/channel.rs

+5-5
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ use crate::ln::chan_utils;
3434
use crate::ln::onion_utils::HTLCFailReason;
3535
use crate::chain::BestBlock;
3636
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
37-
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
37+
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
3838
use crate::chain::transaction::{OutPoint, TransactionData};
3939
use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
4040
use crate::events::ClosureReason;
@@ -2748,14 +2748,14 @@ impl<SP: Deref> Channel<SP> where
27482748
funding_redeemscript.clone(), self.context.channel_value_satoshis,
27492749
obscure_factor,
27502750
holder_commitment_tx, best_block, self.context.counterparty_node_id);
2751-
2751+
let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
27522752
channel_monitor.provide_initial_counterparty_commitment_tx(
27532753
counterparty_initial_bitcoin_tx.txid, Vec::new(),
27542754
self.context.cur_counterparty_commitment_transaction_number,
27552755
self.context.counterparty_cur_commitment_point.unwrap(),
27562756
counterparty_initial_commitment_tx.feerate_per_kw(),
27572757
counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2758-
counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2758+
counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
27592759

27602760
assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
27612761
if self.context.is_batch_funding() {
@@ -6910,13 +6910,13 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
69106910
funding_redeemscript.clone(), self.context.channel_value_satoshis,
69116911
obscure_factor,
69126912
holder_commitment_tx, best_block, self.context.counterparty_node_id);
6913-
6913+
let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
69146914
channel_monitor.provide_initial_counterparty_commitment_tx(
69156915
counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
69166916
self.context.cur_counterparty_commitment_transaction_number + 1,
69176917
self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
69186918
counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6919-
counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6919+
counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
69206920

69216921
log_info!(logger, "{} funding_signed for peer for channel {}",
69226922
if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());

0 commit comments

Comments
 (0)