Skip to content

Commit ebf1de5

Browse files
committed
Build per_peer_state immediately in ChannelManager deser
Instead of first building a map from peers to a list of channels then pulling out of that to build the `per_peer_state`, we build `per_peer_state` immediately and store channels in it immediately. This avoids an unnecessary map indirection but also gives us access to the new fields in `per_peer_state` when reading `Channel`s which we'll need in a coming commit.
1 parent b423a33 commit ebf1de5

File tree

3 files changed

+32
-33
lines changed

3 files changed

+32
-33
lines changed

lightning/src/ln/channelmanager.rs

+24-33
Original file line numberDiff line numberDiff line change
@@ -12403,11 +12403,23 @@ where
1240312403
let best_block_height: u32 = Readable::read(reader)?;
1240412404
let best_block_hash: BlockHash = Readable::read(reader)?;
1240512405

12406-
let mut failed_htlcs = Vec::new();
12406+
let empty_peer_state = || {
12407+
PeerState {
12408+
channel_by_id: new_hash_map(),
12409+
inbound_channel_request_by_id: new_hash_map(),
12410+
latest_features: InitFeatures::empty(),
12411+
pending_msg_events: Vec::new(),
12412+
in_flight_monitor_updates: BTreeMap::new(),
12413+
monitor_update_blocked_actions: BTreeMap::new(),
12414+
actions_blocking_raa_monitor_updates: BTreeMap::new(),
12415+
is_connected: false,
12416+
}
12417+
};
1240712418

12419+
let mut failed_htlcs = Vec::new();
1240812420
let channel_count: u64 = Readable::read(reader)?;
1240912421
let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
12410-
let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
12422+
let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
1241112423
let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
1241212424
let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
1241312425
let mut channel_closures = VecDeque::new();
@@ -12495,17 +12507,10 @@ where
1249512507
if let Some(funding_txo) = channel.context.get_funding_txo() {
1249612508
outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
1249712509
}
12498-
match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
12499-
hash_map::Entry::Occupied(mut entry) => {
12500-
let by_id_map = entry.get_mut();
12501-
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
12502-
},
12503-
hash_map::Entry::Vacant(entry) => {
12504-
let mut by_id_map = new_hash_map();
12505-
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
12506-
entry.insert(by_id_map);
12507-
}
12508-
}
12510+
per_peer_state.entry(channel.context.get_counterparty_node_id())
12511+
.or_insert_with(|| Mutex::new(empty_peer_state()))
12512+
.get_mut().unwrap()
12513+
.channel_by_id.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
1250912514
}
1251012515
} else if channel.is_awaiting_initial_mon_persist() {
1251112516
// If we were persisted and shut down while the initial ChannelMonitor persistence
@@ -12572,27 +12577,13 @@ where
1257212577
claimable_htlcs_list.push((payment_hash, previous_hops));
1257312578
}
1257412579

12575-
let peer_state_from_chans = |channel_by_id| {
12576-
PeerState {
12577-
channel_by_id,
12578-
inbound_channel_request_by_id: new_hash_map(),
12579-
latest_features: InitFeatures::empty(),
12580-
pending_msg_events: Vec::new(),
12581-
in_flight_monitor_updates: BTreeMap::new(),
12582-
monitor_update_blocked_actions: BTreeMap::new(),
12583-
actions_blocking_raa_monitor_updates: BTreeMap::new(),
12584-
is_connected: false,
12585-
}
12586-
};
12587-
1258812580
let peer_count: u64 = Readable::read(reader)?;
12589-
let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
1259012581
for _ in 0..peer_count {
12591-
let peer_pubkey = Readable::read(reader)?;
12592-
let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
12593-
let mut peer_state = peer_state_from_chans(peer_chans);
12594-
peer_state.latest_features = Readable::read(reader)?;
12595-
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
12582+
let peer_pubkey: PublicKey = Readable::read(reader)?;
12583+
let latest_features = Readable::read(reader)?;
12584+
if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
12585+
peer_state.get_mut().unwrap().latest_features = latest_features;
12586+
}
1259612587
}
1259712588

1259812589
let event_count: u64 = Readable::read(reader)?;
@@ -12804,7 +12795,7 @@ where
1280412795
// still open, we need to replay any monitor updates that are for closed channels,
1280512796
// creating the neccessary peer_state entries as we go.
1280612797
let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
12807-
Mutex::new(peer_state_from_chans(new_hash_map()))
12798+
Mutex::new(empty_peer_state())
1280812799
});
1280912800
let mut peer_state = peer_state_mutex.lock().unwrap();
1281012801
handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,

lightning/src/sync/debug_sync.rs

+4
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,10 @@ impl<T> Mutex<T> {
311311
}
312312
res
313313
}
314+
315+
pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> {
316+
self.inner.get_mut().map_err(|_| ())
317+
}
314318
}
315319

316320
impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {

lightning/src/sync/nostd_sync.rs

+4
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,10 @@ impl<T> Mutex<T> {
4040
pub fn into_inner(self) -> LockResult<T> {
4141
Ok(self.inner.into_inner())
4242
}
43+
44+
pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> {
45+
Ok(self.inner.get_mut())
46+
}
4347
}
4448

4549
impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {

0 commit comments

Comments
 (0)