Skip to content

Commit ad39e2f

Browse files
committed
f Switch to use FairRwLock
.. as we want to make sure writers will be preferred here, i.e., avoid that a user calling `list_channels` too often would result in processing being blocked.
1 parent 0572957 commit ad39e2f

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

lightning/src/ln/channelmanager.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -1359,9 +1359,9 @@ where
13591359
///
13601360
/// See `ChannelManager` struct-level documentation for lock order requirements.
13611361
#[cfg(not(any(test, feature = "_test_utils")))]
1362-
per_peer_state: FairRwLock<HashMap<PublicKey, RwLock<PeerState<SP>>>>,
1362+
per_peer_state: FairRwLock<HashMap<PublicKey, FairRwLock<PeerState<SP>>>>,
13631363
#[cfg(any(test, feature = "_test_utils"))]
1364-
pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, RwLock<PeerState<SP>>>>,
1364+
pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, FairRwLock<PeerState<SP>>>>,
13651365

13661366
/// The set of events which we need to give to the user to handle. In some cases an event may
13671367
/// require some further action after the user handles it (currently only blocking a monitor
@@ -9175,7 +9175,7 @@ where
91759175
res = Err(());
91769176
return NotifyOption::SkipPersistNoEvents;
91779177
}
9178-
e.insert(RwLock::new(PeerState {
9178+
e.insert(FairRwLock::new(PeerState {
91799179
channel_by_id: new_hash_map(),
91809180
inbound_channel_request_by_id: new_hash_map(),
91819181
latest_features: init_msg.features.clone(),
@@ -10744,13 +10744,13 @@ where
1074410744
};
1074510745

1074610746
let peer_count: u64 = Readable::read(reader)?;
10747-
let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, RwLock<PeerState<SP>>)>()));
10747+
let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, FairRwLock<PeerState<SP>>)>()));
1074810748
for _ in 0..peer_count {
1074910749
let peer_pubkey = Readable::read(reader)?;
1075010750
let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
1075110751
let mut peer_state = peer_state_from_chans(peer_chans);
1075210752
peer_state.latest_features = Readable::read(reader)?;
10753-
per_peer_state.insert(peer_pubkey, RwLock::new(peer_state));
10753+
per_peer_state.insert(peer_pubkey, FairRwLock::new(peer_state));
1075410754
}
1075510755

1075610756
let event_count: u64 = Readable::read(reader)?;
@@ -10956,7 +10956,7 @@ where
1095610956
// still open, we need to replay any monitor updates that are for closed channels,
1095710957
// creating the neccessary peer_state entries as we go.
1095810958
let peer_state_rwlock = per_peer_state.entry(counterparty_id).or_insert_with(|| {
10959-
RwLock::new(peer_state_from_chans(new_hash_map()))
10959+
FairRwLock::new(peer_state_from_chans(new_hash_map()))
1096010960
});
1096110961
let mut peer_state = peer_state_rwlock.write().unwrap();
1096210962
handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,

0 commit comments

Comments
 (0)