Skip to content

Commit 3460b37

Browse files
committed
Somewhat optimize the generic Features::requires_unknown_bits
It turns out we spend several percent of our routefinding time just checking if nodes and channels require unknown features byte-by-byte. While the cost is almost certainly dominated by the memory read latency, avoiding doing the checks byte-by-byte should reduce the branch count slightly, which may reduce the overhead.
1 parent 56d4807 commit 3460b37

File tree

1 file changed

+17
-10
lines changed

1 file changed

+17
-10
lines changed

lightning/src/ln/features.rs

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -788,16 +788,23 @@ impl<T: sealed::Context> Features<T> {
788788
pub fn requires_unknown_bits(&self) -> bool {
789789
// Bitwise AND-ing with all even bits set except for known features will select required
790790
// unknown features.
791-
let byte_count = T::KNOWN_FEATURE_MASK.len();
792-
self.flags.iter().enumerate().any(|(i, &byte)| {
793-
let required_features = 0b01_01_01_01;
794-
let unknown_features = if i < byte_count {
795-
!T::KNOWN_FEATURE_MASK[i]
796-
} else {
797-
0b11_11_11_11
798-
};
799-
(byte & (required_features & unknown_features)) != 0
800-
})
791+
let mut known_chunks = T::KNOWN_FEATURE_MASK.chunks(8);
792+
for chunk in self.flags.chunks(8) {
793+
let mut flag_bytes = [0; 8];
794+
flag_bytes[..chunk.len()].copy_from_slice(&chunk);
795+
let flag_int = u64::from_le_bytes(flag_bytes);
796+
797+
let known_chunk = known_chunks.next().unwrap_or(&[0; 0]);
798+
let mut known_bytes = [0; 8];
799+
known_bytes[..known_chunk.len()].copy_from_slice(&known_chunk);
800+
let known_int = u64::from_le_bytes(known_bytes);
801+
802+
const REQ_MASK: u64 = 0x55555555_55555555;
803+
if flag_int & (REQ_MASK & !known_int) != 0 {
804+
return true;
805+
}
806+
}
807+
false
801808
}
802809

803810
pub(crate) fn supports_unknown_bits(&self) -> bool {

0 commit comments

Comments
 (0)