Skip to content

Commit 53d7cda

Browse files
update functional test and check warn message instead of a close channel
Signed-off-by: Vincenzo Palazzo <[email protected]>
1 parent 891c204 commit 53d7cda

File tree

1 file changed

+50
-15
lines changed

1 file changed

+50
-15
lines changed

lightning/src/ln/functional_tests.rs

+50-15
Original file line numberDiff line numberDiff line change
@@ -7282,8 +7282,20 @@ fn test_user_configurable_csv_delay() {
72827282
} else { assert!(false); }
72837283
}
72847284

7285-
#[test]
7286-
fn test_data_loss_protect() {
7285+
/// describe the test case as a enum, istead as boolean in this case!
7286+
/// with the enum there is the possibility to pass more data and
7287+
/// check more corner case.
7288+
#[derive(Debug)]
7289+
enum DataLossProtectTestCase {
7290+
/// The node that send the warning message, will try to
7291+
/// use the channel, but it can't, because after the
7292+
/// warning message we don't change the channel state.
7293+
UseChannel,
7294+
/// Try to reconnect to the node that have send the warning message
7295+
TryToReconnect,
7296+
}
7297+
7298+
fn do_test_data_loss_protect(case: DataLossProtectTestCase) {
72877299
// We want to be sure that :
72887300
// * we don't broadcast our Local Commitment Tx in case of fallen behind
72897301
// (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
@@ -7380,22 +7392,45 @@ fn test_data_loss_protect() {
73807392
}
73817393

73827394
// Check we close channel detecting A is fallen-behind
7395+
// Check if we sent the warning message when we detecting that A is fallen-behind,
7396+
// and we give the possibility to A to be able to recover from error.
73837397
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7384-
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
7385-
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
7386-
check_added_monitors!(nodes[1], 1);
7398+
let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
7399+
assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
7400+
73877401

73887402
// Check A is able to claim to_remote output
7389-
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
7390-
assert_eq!(node_txn.len(), 1);
7391-
check_spends!(node_txn[0], chan.3);
7392-
assert_eq!(node_txn[0].output.len(), 2);
7393-
mine_transaction(&nodes[0], &node_txn[0]);
7394-
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7395-
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
7396-
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
7397-
assert_eq!(spend_txn.len(), 1);
7398-
check_spends!(spend_txn[0], node_txn[0]);
7403+
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
7404+
// The node B should not broadcast the transaction to force close the channel!
7405+
assert!(node_txn.is_empty());
7406+
// B should now detect that there is something wrong and should force close the channel.
7407+
let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
7408+
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
7409+
7410+
// after the waring message sent by B, we should not able to
7411+
// use the channel, or reconnect with success to the channel.
7412+
match case {
7413+
DataLossProtectTestCase::UseChannel => {
7414+
assert!(nodes[0].node.list_usable_channels().is_empty());
7415+
},
7416+
DataLossProtectTestCase::TryToReconnect => {
7417+
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
7418+
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
7419+
let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7420+
7421+
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &retry_reestablish[0]);
7422+
check_added_monitors!(nodes[1], 1);
7423+
let exp_err = "Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided";
7424+
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
7425+
nodes[1].node.get_and_clear_pending_msg_events();
7426+
},
7427+
}
7428+
}
7429+
7430+
#[test]
7431+
fn test_data_loss_protect() {
7432+
do_test_data_loss_protect(DataLossProtectTestCase::UseChannel);
7433+
do_test_data_loss_protect(DataLossProtectTestCase::TryToReconnect);
73997434
}
74007435

74017436
#[test]

0 commit comments

Comments
 (0)