@@ -51,6 +51,7 @@ use lightning::offers::invoice_request::UnsignedInvoiceRequest;
51
51
use lightning:: onion_message:: messenger:: { Destination , MessageRouter , OnionMessagePath } ;
52
52
use lightning:: util:: test_channel_signer:: { TestChannelSigner , EnforcementState } ;
53
53
use lightning:: util:: errors:: APIError ;
54
+ use lightning:: util:: hash_tables:: * ;
54
55
use lightning:: util:: logger:: Logger ;
55
56
use lightning:: util:: config:: UserConfig ;
56
57
use lightning:: util:: ser:: { Readable , ReadableArgs , Writeable , Writer } ;
@@ -66,7 +67,6 @@ use bitcoin::secp256k1::schnorr;
66
67
67
68
use std:: mem;
68
69
use std:: cmp:: { self , Ordering } ;
69
- use hashbrown:: { HashSet , hash_map, HashMap } ;
70
70
use std:: sync:: { Arc , Mutex } ;
71
71
use std:: sync:: atomic;
72
72
use std:: io:: Cursor ;
@@ -157,7 +157,7 @@ impl TestChainMonitor {
157
157
logger,
158
158
keys,
159
159
persister,
160
- latest_monitors : Mutex :: new ( HashMap :: new ( ) ) ,
160
+ latest_monitors : Mutex :: new ( new_hash_map ( ) ) ,
161
161
}
162
162
}
163
163
}
@@ -173,16 +173,13 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
173
173
174
174
fn update_channel ( & self , funding_txo : OutPoint , update : & channelmonitor:: ChannelMonitorUpdate ) -> chain:: ChannelMonitorUpdateStatus {
175
175
let mut map_lock = self . latest_monitors . lock ( ) . unwrap ( ) ;
176
- let mut map_entry = match map_lock. entry ( funding_txo) {
177
- hash_map:: Entry :: Occupied ( entry) => entry,
178
- hash_map:: Entry :: Vacant ( _) => panic ! ( "Didn't have monitor on update call" ) ,
179
- } ;
176
+ let map_entry = map_lock. get_mut ( & funding_txo) . expect ( "Didn't have monitor on update call" ) ;
180
177
let deserialized_monitor = <( BlockHash , channelmonitor:: ChannelMonitor < TestChannelSigner > ) >::
181
- read ( & mut Cursor :: new ( & map_entry. get ( ) . 1 ) , ( & * self . keys , & * self . keys ) ) . unwrap ( ) . 1 ;
178
+ read ( & mut Cursor :: new ( & map_entry. 1 ) , ( & * self . keys , & * self . keys ) ) . unwrap ( ) . 1 ;
182
179
deserialized_monitor. update_monitor ( update, & & TestBroadcaster { } , & & FuzzEstimator { ret_val : atomic:: AtomicU32 :: new ( 253 ) } , & self . logger ) . unwrap ( ) ;
183
180
let mut ser = VecWriter ( Vec :: new ( ) ) ;
184
181
deserialized_monitor. write ( & mut ser) . unwrap ( ) ;
185
- map_entry. insert ( ( update. update_id , ser. 0 ) ) ;
182
+ * map_entry = ( update. update_id , ser. 0 ) ;
186
183
self . chain_monitor . update_channel ( funding_txo, update)
187
184
}
188
185
@@ -467,7 +464,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
467
464
( $node_id: expr, $fee_estimator: expr) => { {
468
465
let logger: Arc <dyn Logger > = Arc :: new( test_logger:: TestLogger :: new( $node_id. to_string( ) , out. clone( ) ) ) ;
469
466
let node_secret = SecretKey :: from_slice( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , $node_id] ) . unwrap( ) ;
470
- let keys_manager = Arc :: new( KeyProvider { node_secret, rand_bytes_id: atomic:: AtomicU32 :: new( 0 ) , enforcement_states: Mutex :: new( HashMap :: new ( ) ) } ) ;
467
+ let keys_manager = Arc :: new( KeyProvider { node_secret, rand_bytes_id: atomic:: AtomicU32 :: new( 0 ) , enforcement_states: Mutex :: new( new_hash_map ( ) ) } ) ;
471
468
let monitor = Arc :: new( TestChainMonitor :: new( broadcast. clone( ) , logger. clone( ) , $fee_estimator. clone( ) ,
472
469
Arc :: new( TestPersister {
473
470
update_ret: Mutex :: new( ChannelMonitorUpdateStatus :: Completed )
@@ -508,13 +505,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
508
505
config. manually_accept_inbound_channels = true ;
509
506
}
510
507
511
- let mut monitors = HashMap :: new ( ) ;
508
+ let mut monitors = new_hash_map ( ) ;
512
509
let mut old_monitors = $old_monitors. latest_monitors. lock( ) . unwrap( ) ;
513
510
for ( outpoint, ( update_id, monitor_ser) ) in old_monitors. drain( ) {
514
511
monitors. insert( outpoint, <( BlockHash , ChannelMonitor <TestChannelSigner >) >:: read( & mut Cursor :: new( & monitor_ser) , ( & * $keys_manager, & * $keys_manager) ) . expect( "Failed to read monitor" ) . 1 ) ;
515
512
chain_monitor. latest_monitors. lock( ) . unwrap( ) . insert( outpoint, ( update_id, monitor_ser) ) ;
516
513
}
517
- let mut monitor_refs = HashMap :: new ( ) ;
514
+ let mut monitor_refs = new_hash_map ( ) ;
518
515
for ( outpoint, monitor) in monitors. iter_mut( ) {
519
516
monitor_refs. insert( * outpoint, monitor) ;
520
517
}
@@ -981,7 +978,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
981
978
// In case we get 256 payments we may have a hash collision, resulting in the
982
979
// second claim/fail call not finding the duplicate-hash HTLC, so we have to
983
980
// deduplicate the calls here.
984
- let mut claim_set = HashSet :: new ( ) ;
981
+ let mut claim_set = new_hash_map ( ) ;
985
982
let mut events = nodes[ $node] . get_and_clear_pending_events( ) ;
986
983
// Sort events so that PendingHTLCsForwardable get processed last. This avoids a
987
984
// case where we first process a PendingHTLCsForwardable, then claim/fail on a
@@ -1003,7 +1000,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
1003
1000
for event in events. drain( ..) {
1004
1001
match event {
1005
1002
events:: Event :: PaymentClaimable { payment_hash, .. } => {
1006
- if claim_set. insert( payment_hash. 0 ) {
1003
+ if claim_set. insert( payment_hash. 0 , ( ) ) . is_none ( ) {
1007
1004
if $fail {
1008
1005
nodes[ $node] . fail_htlc_backwards( & payment_hash) ;
1009
1006
} else {
0 commit comments