@@ -78,7 +78,7 @@ impl MonitorUpdateId {
78
78
/// `Persist` defines behavior for persisting channel monitors: this could mean
79
79
/// writing once to disk, and/or uploading to one or more backup services.
80
80
///
81
- /// Each method can return three possible values:
81
+ /// Each method can return two possible values:
82
82
/// * If persistence (including any relevant `fsync()` calls) happens immediately, the
83
83
/// implementation should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal
84
84
/// channel operation should continue.
@@ -91,10 +91,9 @@ impl MonitorUpdateId {
91
91
/// Note that unlike the direct [`chain::Watch`] interface,
92
92
/// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
93
93
///
94
- /// * If persistence fails for some reason, implementations should return
95
- /// [`ChannelMonitorUpdateStatus::PermanentFailure`], in which case the channel will likely be
96
- /// closed without broadcasting the latest state. See
97
- /// [`ChannelMonitorUpdateStatus::PermanentFailure`] for more details.
94
+ /// If persistence fails for some reason, implementations should still return
95
+ /// [`ChannelMonitorUpdateStatus::InProgress`] and attempt to shut down or otherwise resolve the
96
+ /// situation ASAP.
98
97
///
99
98
/// Third-party watchtowers may be built as a part of an implementation of this trait, with the
100
99
/// advantage that you can control whether to resume channel operation depending on if an update
@@ -335,11 +334,6 @@ where C::Target: chain::Filter,
335
334
match self . persister . update_persisted_channel ( * funding_outpoint, None , monitor, update_id) {
336
335
ChannelMonitorUpdateStatus :: Completed =>
337
336
log_trace ! ( self . logger, "Finished syncing Channel Monitor for channel {}" , log_funding_info!( monitor) ) ,
338
- ChannelMonitorUpdateStatus :: PermanentFailure => {
339
- monitor_state. channel_perm_failed . store ( true , Ordering :: Release ) ;
340
- self . pending_monitor_events . lock ( ) . unwrap ( ) . push ( ( * funding_outpoint, vec ! [ MonitorEvent :: UpdateFailed ( * funding_outpoint) ] , monitor. get_counterparty_node_id ( ) ) ) ;
341
- self . event_notifier . notify ( ) ;
342
- }
343
337
ChannelMonitorUpdateStatus :: InProgress => {
344
338
log_debug ! ( self . logger, "Channel Monitor sync for channel {} in progress, holding events until completion!" , log_funding_info!( monitor) ) ;
345
339
pending_monitor_updates. push ( update_id) ;
@@ -673,12 +667,12 @@ where C::Target: chain::Filter,
673
667
///
674
668
/// Note that we persist the given `ChannelMonitor` while holding the `ChainMonitor`
675
669
/// monitors lock.
676
- fn watch_channel ( & self , funding_outpoint : OutPoint , monitor : ChannelMonitor < ChannelSigner > ) -> ChannelMonitorUpdateStatus {
670
+ fn watch_channel ( & self , funding_outpoint : OutPoint , monitor : ChannelMonitor < ChannelSigner > ) -> Result < ChannelMonitorUpdateStatus , ( ) > {
677
671
let mut monitors = self . monitors . write ( ) . unwrap ( ) ;
678
672
let entry = match monitors. entry ( funding_outpoint) {
679
673
hash_map:: Entry :: Occupied ( _) => {
680
674
log_error ! ( self . logger, "Failed to add new channel data: channel monitor for given outpoint is already present" ) ;
681
- return ChannelMonitorUpdateStatus :: PermanentFailure
675
+ return Err ( ( ) ) ;
682
676
} ,
683
677
hash_map:: Entry :: Vacant ( e) => e,
684
678
} ;
@@ -691,10 +685,6 @@ where C::Target: chain::Filter,
691
685
log_info ! ( self . logger, "Persistence of new ChannelMonitor for channel {} in progress" , log_funding_info!( monitor) ) ;
692
686
pending_monitor_updates. push ( update_id) ;
693
687
} ,
694
- ChannelMonitorUpdateStatus :: PermanentFailure => {
695
- log_error ! ( self . logger, "Persistence of new ChannelMonitor for channel {} failed" , log_funding_info!( monitor) ) ;
696
- return persist_res;
697
- } ,
698
688
ChannelMonitorUpdateStatus :: Completed => {
699
689
log_info ! ( self . logger, "Persistence of new ChannelMonitor for channel {} completed" , log_funding_info!( monitor) ) ;
700
690
}
@@ -708,7 +698,7 @@ where C::Target: chain::Filter,
708
698
channel_perm_failed : AtomicBool :: new ( false ) ,
709
699
last_chain_persist_height : AtomicUsize :: new ( self . highest_chain_height . load ( Ordering :: Acquire ) ) ,
710
700
} ) ;
711
- persist_res
701
+ Ok ( persist_res)
712
702
}
713
703
714
704
/// Note that we persist the given `ChannelMonitor` update while holding the
@@ -723,10 +713,10 @@ where C::Target: chain::Filter,
723
713
// We should never ever trigger this from within ChannelManager. Technically a
724
714
// user could use this object with some proxying in between which makes this
725
715
// possible, but in tests and fuzzing, this should be a panic.
726
- #[ cfg( any ( test , fuzzing ) ) ]
716
+ #[ cfg( debug_assertions ) ]
727
717
panic ! ( "ChannelManager generated a channel update for a channel that was not yet registered!" ) ;
728
- #[ cfg( not( any ( test , fuzzing ) ) ) ]
729
- ChannelMonitorUpdateStatus :: PermanentFailure
718
+ #[ cfg( not( debug_assertions ) ) ]
719
+ ChannelMonitorUpdateStatus :: InProgress
730
720
} ,
731
721
Some ( monitor_state) => {
732
722
let monitor = & monitor_state. monitor ;
@@ -745,18 +735,14 @@ where C::Target: chain::Filter,
745
735
pending_monitor_updates. push ( update_id) ;
746
736
log_debug ! ( self . logger, "Persistence of ChannelMonitorUpdate for channel {} in progress" , log_funding_info!( monitor) ) ;
747
737
} ,
748
- ChannelMonitorUpdateStatus :: PermanentFailure => {
749
- monitor_state. channel_perm_failed . store ( true , Ordering :: Release ) ;
750
- log_error ! ( self . logger, "Persistence of ChannelMonitorUpdate for channel {} failed" , log_funding_info!( monitor) ) ;
751
- } ,
752
738
ChannelMonitorUpdateStatus :: Completed => {
753
739
log_debug ! ( self . logger, "Persistence of ChannelMonitorUpdate for channel {} completed" , log_funding_info!( monitor) ) ;
754
740
} ,
755
741
}
756
742
if update_res. is_err ( ) {
757
- ChannelMonitorUpdateStatus :: PermanentFailure
743
+ ChannelMonitorUpdateStatus :: InProgress
758
744
} else if monitor_state. channel_perm_failed . load ( Ordering :: Acquire ) {
759
- ChannelMonitorUpdateStatus :: PermanentFailure
745
+ ChannelMonitorUpdateStatus :: InProgress
760
746
} else {
761
747
persist_res
762
748
}
@@ -831,12 +817,12 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L
831
817
832
818
#[ cfg( test) ]
833
819
mod tests {
834
- use crate :: { check_added_monitors, check_closed_broadcast , check_closed_event } ;
820
+ use crate :: check_added_monitors;
835
821
use crate :: { expect_payment_claimed, expect_payment_path_successful, get_event_msg} ;
836
822
use crate :: { get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err} ;
837
823
use crate :: chain:: { ChannelMonitorUpdateStatus , Confirm , Watch } ;
838
824
use crate :: chain:: channelmonitor:: LATENCY_GRACE_PERIOD_BLOCKS ;
839
- use crate :: events:: { Event , ClosureReason , MessageSendEvent , MessageSendEventsProvider } ;
825
+ use crate :: events:: { Event , MessageSendEvent , MessageSendEventsProvider } ;
840
826
use crate :: ln:: channelmanager:: { PaymentSendFailure , PaymentId , RecipientOnionFields } ;
841
827
use crate :: ln:: functional_test_utils:: * ;
842
828
use crate :: ln:: msgs:: ChannelMessageHandler ;
@@ -988,12 +974,8 @@ mod tests {
988
974
chanmon_cfgs[ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: Completed ) ;
989
975
unwrap_send_err ! ( nodes[ 0 ] . node. send_payment_with_route( & route, second_payment_hash,
990
976
RecipientOnionFields :: secret_only( second_payment_secret) , PaymentId ( second_payment_hash. 0 )
991
- ) , true , APIError :: ChannelUnavailable { ref err } ,
992
- assert!( err. contains( "ChannelMonitor storage failure" ) ) ) ;
993
- check_added_monitors ! ( nodes[ 0 ] , 2 ) ; // After the failure we generate a close-channel monitor update
994
- check_closed_broadcast ! ( nodes[ 0 ] , true ) ;
995
- check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: ProcessingError { err: "ChannelMonitor storage failure" . to_string( ) } ,
996
- [ nodes[ 1 ] . node. get_our_node_id( ) ] , 100000 ) ;
977
+ ) , false , APIError :: MonitorUpdateInProgress , { } ) ;
978
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
997
979
998
980
// However, as the ChainMonitor is still waiting for the original persistence to complete,
999
981
// it won't yet release the MonitorEvents.
@@ -1020,28 +1002,4 @@ mod tests {
1020
1002
do_chainsync_pauses_events ( false ) ;
1021
1003
do_chainsync_pauses_events ( true ) ;
1022
1004
}
1023
-
1024
- #[ test]
1025
- fn update_during_chainsync_fails_channel ( ) {
1026
- let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
1027
- let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
1028
- let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
1029
- let nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
1030
- create_announced_chan_between_nodes ( & nodes, 0 , 1 ) ;
1031
-
1032
- chanmon_cfgs[ 0 ] . persister . chain_sync_monitor_persistences . lock ( ) . unwrap ( ) . clear ( ) ;
1033
- chanmon_cfgs[ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: PermanentFailure ) ;
1034
-
1035
- connect_blocks ( & nodes[ 0 ] , 1 ) ;
1036
- // Before processing events, the ChannelManager will still think the Channel is open and
1037
- // there won't be any ChannelMonitorUpdates
1038
- assert_eq ! ( nodes[ 0 ] . node. list_channels( ) . len( ) , 1 ) ;
1039
- check_added_monitors ! ( nodes[ 0 ] , 0 ) ;
1040
- // ... however once we get events once, the channel will close, creating a channel-closed
1041
- // ChannelMonitorUpdate.
1042
- check_closed_broadcast ! ( nodes[ 0 ] , true ) ;
1043
- check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync" . to_string( ) } ,
1044
- [ nodes[ 1 ] . node. get_our_node_id( ) ] , 100000 ) ;
1045
- check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
1046
- }
1047
1005
}
0 commit comments