@@ -24,7 +24,6 @@ use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
24
24
use crate :: sign:: { EntropySource , NodeSigner , ecdsa:: WriteableEcdsaChannelSigner , SignerProvider } ;
25
25
use crate :: chain:: transaction:: OutPoint ;
26
26
use crate :: chain:: channelmonitor:: { ChannelMonitor , ChannelMonitorUpdate , CLOSED_CHANNEL_UPDATE_ID } ;
27
- use crate :: chain:: ChannelMonitorUpdateStatus ;
28
27
use crate :: ln:: channelmanager:: ChannelManager ;
29
28
use crate :: routing:: router:: Router ;
30
29
use crate :: routing:: gossip:: NetworkGraph ;
@@ -347,7 +346,7 @@ where
347
346
///
348
347
/// # Pruning stale channel updates
349
348
///
350
- /// Stale updates are pruned when consolidation threshold is reached according to `maximum_pending_updates`.
349
+ /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
351
350
/// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
352
351
/// are deleted.
353
352
/// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
@@ -630,7 +629,7 @@ where
630
629
Err ( e) => {
631
630
log_error ! (
632
631
self . logger,
633
- "error writing channel monitor {}/{}/{} reason: {}" ,
632
+ "Failed to write ChannelMonitor {}/{}/{} reason: {}" ,
634
633
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE ,
635
634
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE ,
636
635
monitor_name. as_str( ) ,
@@ -672,7 +671,7 @@ where
672
671
Err ( e) => {
673
672
log_error ! (
674
673
self . logger,
675
- "error writing channel monitor update {}/{}/{} reason: {}" ,
674
+ "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}" ,
676
675
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE ,
677
676
monitor_name. as_str( ) ,
678
677
update_name. as_str( ) ,
@@ -686,27 +685,24 @@ where
686
685
// In case of channel-close monitor update, we need to read old monitor before persisting
687
686
// the new one in order to determine the cleanup range.
688
687
let maybe_old_monitor = match monitor. get_latest_update_id ( ) {
689
- CLOSED_CHANNEL_UPDATE_ID => Some ( self . read_monitor ( & monitor_name) ) ,
688
+ CLOSED_CHANNEL_UPDATE_ID => self . read_monitor ( & monitor_name) . ok ( ) ,
690
689
_ => None
691
690
} ;
692
691
693
692
// We could write this update, but it meets criteria of our design that calls for a full monitor write.
694
693
let monitor_update_status = self . persist_new_channel ( funding_txo, monitor, monitor_update_call_id) ;
695
694
696
- if let ChannelMonitorUpdateStatus :: Completed = monitor_update_status {
695
+ if let chain :: ChannelMonitorUpdateStatus :: Completed = monitor_update_status {
697
696
let cleanup_range = if monitor. get_latest_update_id ( ) == CLOSED_CHANNEL_UPDATE_ID {
698
- match maybe_old_monitor {
699
- Some ( Ok ( ( _, ref old_monitor) ) ) => {
700
- let start = old_monitor. get_latest_update_id ( ) ;
701
- // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
702
- let end = cmp:: min (
703
- start. saturating_add ( self . maximum_pending_updates ) ,
704
- CLOSED_CHANNEL_UPDATE_ID - 1 ,
705
- ) ;
706
- Some ( ( start, end) )
707
- }
708
- _ => None
709
- }
697
+ maybe_old_monitor. map ( |( _, ref old_monitor) | {
698
+ let start = old_monitor. get_latest_update_id ( ) ;
699
+ // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
700
+ let end = cmp:: min (
701
+ start. saturating_add ( self . maximum_pending_updates ) ,
702
+ CLOSED_CHANNEL_UPDATE_ID - 1 ,
703
+ ) ;
704
+ ( start, end)
705
+ } )
710
706
} else {
711
707
let end = monitor. get_latest_update_id ( ) ;
712
708
let start = end. saturating_sub ( self . maximum_pending_updates ) ;
@@ -746,7 +742,7 @@ where
746
742
) {
747
743
log_error ! (
748
744
self . logger,
749
- "error cleaning up channel monitor updates for monitor {}, reason: {}" ,
745
+ "Failed to clean up channel monitor updates for monitor {}, reason: {}" ,
750
746
monitor_name. as_str( ) ,
751
747
e
752
748
) ;
0 commit comments