Skip to content

Commit 7a9bea1

Browse files
committed
Use test/_test_utils to enable single-threaded debug assertions
We have a number of debug assertions which are expected to never fire when running in a single thread. This is just fine in tests, and gives us good coverage of our lockorder requirements, but is not-irregularly surprising to users, who may run with their own debug assertions in test environments. Instead, we gate these checks by the `cfg(test)` setting as well as the `_test_utils` feature, ensuring they run in our own tests, but not downstream tests.
1 parent 358d980 commit 7a9bea1

File tree

2 files changed

+14
-14
lines changed

2 files changed

+14
-14
lines changed

lightning-net-tokio/src/lib.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ pub fn setup_inbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Peer
286286
{
287287
let remote_addr = get_addr_from_stream(&stream);
288288
let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
289-
#[cfg(debug_assertions)]
289+
#[cfg(test)]
290290
let last_us = Arc::clone(&us);
291291

292292
let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
@@ -307,8 +307,8 @@ pub fn setup_inbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Peer
307307
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
308308
// keep too many wakers around, this makes sense. The race should be rare (we do
309309
// some work after shutdown()) and an error would be a major memory leak.
310-
#[cfg(debug_assertions)]
311-
assert!(Arc::try_unwrap(last_us).is_ok());
310+
#[cfg(test)]
311+
debug_assert!(Arc::try_unwrap(last_us).is_ok());
312312
}
313313
}
314314
}
@@ -335,7 +335,7 @@ pub fn setup_outbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Pee
335335
{
336336
let remote_addr = get_addr_from_stream(&stream);
337337
let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
338-
#[cfg(debug_assertions)]
338+
#[cfg(test)]
339339
let last_us = Arc::clone(&us);
340340
let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
341341
Some(tokio::spawn(async move {
@@ -381,8 +381,8 @@ pub fn setup_outbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Pee
381381
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
382382
// keep too many wakers around, this makes sense. The race should be rare (we do
383383
// some work after shutdown()) and an error would be a major memory leak.
384-
#[cfg(debug_assertions)]
385-
assert!(Arc::try_unwrap(last_us).is_ok());
384+
#[cfg(test)]
385+
debug_assert!(Arc::try_unwrap(last_us).is_ok());
386386
}
387387
}
388388
}

lightning/src/ln/channelmanager.rs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1153,12 +1153,12 @@ macro_rules! handle_error {
11531153
match $internal {
11541154
Ok(msg) => Ok(msg),
11551155
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
1156-
#[cfg(debug_assertions)]
1156+
#[cfg(any(feature = "_test_utils", test))]
11571157
{
11581158
// In testing, ensure there are no deadlocks where the lock is already held upon
11591159
// entering the macro.
1160-
assert!($self.pending_events.try_lock().is_ok());
1161-
assert!($self.per_peer_state.try_write().is_ok());
1160+
debug_assert!($self.pending_events.try_lock().is_ok());
1161+
debug_assert!($self.per_peer_state.try_write().is_ok());
11621162
}
11631163

11641164
let mut msg_events = Vec::with_capacity(2);
@@ -1193,7 +1193,7 @@ macro_rules! handle_error {
11931193
let mut peer_state = peer_state_mutex.lock().unwrap();
11941194
peer_state.pending_msg_events.append(&mut msg_events);
11951195
}
1196-
#[cfg(debug_assertions)]
1196+
#[cfg(any(feature = "_test_utils", test))]
11971197
{
11981198
if let None = per_peer_state.get(&$counterparty_node_id) {
11991199
// This shouldn't occour in tests unless an unkown counterparty_node_id
@@ -1206,10 +1206,10 @@ macro_rules! handle_error {
12061206
=> {
12071207
assert_eq!(*data, expected_error_str);
12081208
if let Some((err_channel_id, _user_channel_id)) = chan_id {
1209-
assert_eq!(*channel_id, err_channel_id);
1209+
debug_assert_eq!(*channel_id, err_channel_id);
12101210
}
12111211
}
1212-
_ => panic!("Unexpected event"),
1212+
_ => debug_assert!(false, "Unexpected event"),
12131213
}
12141214
}
12151215
}
@@ -3565,7 +3565,7 @@ where
35653565
/// Fails an HTLC backwards to the sender of it to us.
35663566
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
35673567
fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
3568-
#[cfg(debug_assertions)]
3568+
#[cfg(any(feature = "_test_utils", test))]
35693569
{
35703570
// Ensure that no peer state channel storage lock is not held when calling this
35713571
// function.
@@ -3574,7 +3574,7 @@ where
35743574
// this function with any `per_peer_state` peer lock aquired would.
35753575
let per_peer_state = self.per_peer_state.read().unwrap();
35763576
for (_, peer) in per_peer_state.iter() {
3577-
assert!(peer.try_lock().is_ok());
3577+
debug_assert!(peer.try_lock().is_ok());
35783578
}
35793579
}
35803580

0 commit comments

Comments
 (0)