Skip to content

Commit c7b12ff

Browse files
committed
Build per_peer_state immediately in ChannelManager deser
Instead of first building a map from peers to a list of channels then pulling out of that to build the `per_peer_state`, we build `per_peer_state` immediately and store channels in it immediately. This avoids an unnecessary map indirection but also gives us access to the new fields in `per_peer_state` when reading `Channel`s which we'll need in a coming commit.
1 parent ccd292e commit c7b12ff

File tree

3 files changed

+32
-33
lines changed

3 files changed

+32
-33
lines changed

lightning/src/ln/channelmanager.rs

+24-33
Original file line numberDiff line numberDiff line change
@@ -12248,11 +12248,23 @@ where
1224812248
let best_block_height: u32 = Readable::read(reader)?;
1224912249
let best_block_hash: BlockHash = Readable::read(reader)?;
1225012250

12251-
let mut failed_htlcs = Vec::new();
12251+
let empty_peer_state = || {
12252+
PeerState {
12253+
channel_by_id: new_hash_map(),
12254+
inbound_channel_request_by_id: new_hash_map(),
12255+
latest_features: InitFeatures::empty(),
12256+
pending_msg_events: Vec::new(),
12257+
in_flight_monitor_updates: BTreeMap::new(),
12258+
monitor_update_blocked_actions: BTreeMap::new(),
12259+
actions_blocking_raa_monitor_updates: BTreeMap::new(),
12260+
is_connected: false,
12261+
}
12262+
};
1225212263

12264+
let mut failed_htlcs = Vec::new();
1225312265
let channel_count: u64 = Readable::read(reader)?;
1225412266
let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
12255-
let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
12267+
let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
1225612268
let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
1225712269
let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
1225812270
let mut channel_closures = VecDeque::new();
@@ -12340,17 +12352,10 @@ where
1234012352
if let Some(funding_txo) = channel.context.get_funding_txo() {
1234112353
outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
1234212354
}
12343-
match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
12344-
hash_map::Entry::Occupied(mut entry) => {
12345-
let by_id_map = entry.get_mut();
12346-
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
12347-
},
12348-
hash_map::Entry::Vacant(entry) => {
12349-
let mut by_id_map = new_hash_map();
12350-
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
12351-
entry.insert(by_id_map);
12352-
}
12353-
}
12355+
per_peer_state.entry(channel.context.get_counterparty_node_id())
12356+
.or_insert_with(|| Mutex::new(empty_peer_state()))
12357+
.get_mut().unwrap()
12358+
.channel_by_id.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
1235412359
}
1235512360
} else if channel.is_awaiting_initial_mon_persist() {
1235612361
// If we were persisted and shut down while the initial ChannelMonitor persistence
@@ -12417,27 +12422,13 @@ where
1241712422
claimable_htlcs_list.push((payment_hash, previous_hops));
1241812423
}
1241912424

12420-
let peer_state_from_chans = |channel_by_id| {
12421-
PeerState {
12422-
channel_by_id,
12423-
inbound_channel_request_by_id: new_hash_map(),
12424-
latest_features: InitFeatures::empty(),
12425-
pending_msg_events: Vec::new(),
12426-
in_flight_monitor_updates: BTreeMap::new(),
12427-
monitor_update_blocked_actions: BTreeMap::new(),
12428-
actions_blocking_raa_monitor_updates: BTreeMap::new(),
12429-
is_connected: false,
12430-
}
12431-
};
12432-
1243312425
let peer_count: u64 = Readable::read(reader)?;
12434-
let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
1243512426
for _ in 0..peer_count {
12436-
let peer_pubkey = Readable::read(reader)?;
12437-
let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
12438-
let mut peer_state = peer_state_from_chans(peer_chans);
12439-
peer_state.latest_features = Readable::read(reader)?;
12440-
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
12427+
let peer_pubkey: PublicKey = Readable::read(reader)?;
12428+
let latest_features = Readable::read(reader)?;
12429+
if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
12430+
peer_state.get_mut().unwrap().latest_features = latest_features;
12431+
}
1244112432
}
1244212433

1244312434
let event_count: u64 = Readable::read(reader)?;
@@ -12649,7 +12640,7 @@ where
1264912640
// still open, we need to replay any monitor updates that are for closed channels,
1265012641
// creating the neccessary peer_state entries as we go.
1265112642
let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
12652-
Mutex::new(peer_state_from_chans(new_hash_map()))
12643+
Mutex::new(empty_peer_state())
1265312644
});
1265412645
let mut peer_state = peer_state_mutex.lock().unwrap();
1265512646
handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,

lightning/src/sync/debug_sync.rs

+4
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,10 @@ impl<T> Mutex<T> {
311311
}
312312
res
313313
}
314+
315+
pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> {
316+
self.inner.get_mut().map_err(|_| ())
317+
}
314318
}
315319

316320
impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {

lightning/src/sync/nostd_sync.rs

+4
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,10 @@ impl<T> Mutex<T> {
4040
pub fn into_inner(self) -> LockResult<T> {
4141
Ok(self.inner.into_inner())
4242
}
43+
44+
pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> {
45+
Ok(self.inner.get_mut())
46+
}
4347
}
4448

4549
impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {

0 commit comments

Comments
 (0)