Skip to content

Commit ae47520

Browse files
send warning when we receive a old commitment transaction
During a `channel_reestablish` now we send a warning message when we receive a old commitment transaction from the peer. In addition, this commit include the update of functional test to make sure that the receiver will generate warn messages. Signed-off-by: Vincenzo Palazzo <[email protected]>
1 parent 5feef25 commit ae47520

File tree

2 files changed

+74
-15
lines changed

2 files changed

+74
-15
lines changed

lightning/src/ln/channel.rs

+9
Original file line numberDiff line numberDiff line change
@@ -3737,6 +3737,15 @@ impl<Signer: Sign> Channel<Signer> {
37373737
}
37383738
}
37393739

3740+
// Before change the state of the channel we check if the peer is sending a very old
3741+
// commitment transaction number, if yes we send an error (warning message).
3742+
let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number - 1;
3743+
if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
3744+
return Err(
3745+
ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
3746+
);
3747+
}
3748+
37403749
// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
37413750
// remaining cases either succeed or ErrorMessage-fail).
37423751
self.channel_state &= !(ChannelState::PeerDisconnected as u32);

lightning/src/ln/functional_tests.rs

+65-15
Original file line numberDiff line numberDiff line change
@@ -7304,8 +7304,20 @@ fn test_user_configurable_csv_delay() {
73047304
} else { assert!(false); }
73057305
}
73067306

7307-
#[test]
7308-
fn test_data_loss_protect() {
7307+
/// describe the test case as a enum, istead as boolean in this case!
7308+
/// with the enum there is the possibility to pass more data and
7309+
/// check more corner case.
7310+
#[derive(Debug)]
7311+
enum DataLossProtectTestCase {
7312+
/// The node that send the warning message, will try to
7313+
/// use the channel, but it can't, because after the
7314+
/// warning message we don't change the channel state.
7315+
UseChannel,
7316+
/// Try to reconnect to the node that have send the warning message
7317+
TryToReconnect,
7318+
}
7319+
7320+
fn do_test_data_loss_protect(case: DataLossProtectTestCase) {
73097321
// We want to be sure that :
73107322
// * we don't broadcast our Local Commitment Tx in case of fallen behind
73117323
// (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
@@ -7402,22 +7414,60 @@ fn test_data_loss_protect() {
74027414
}
74037415

74047416
// Check we close channel detecting A is fallen-behind
7417+
// Check if we sent the warning message when we detecting that A is fallen-behind,
7418+
// and we give the possibility to A to be able to recover from error.
74057419
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7406-
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
7407-
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
7408-
check_added_monitors!(nodes[1], 1);
7420+
let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
7421+
assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
74097422

74107423
// Check A is able to claim to_remote output
7411-
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
7412-
assert_eq!(node_txn.len(), 1);
7413-
check_spends!(node_txn[0], chan.3);
7414-
assert_eq!(node_txn[0].output.len(), 2);
7415-
mine_transaction(&nodes[0], &node_txn[0]);
7416-
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7417-
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
7418-
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
7419-
assert_eq!(spend_txn.len(), 1);
7420-
check_spends!(spend_txn[0], node_txn[0]);
7424+
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
7425+
// The node B should not broadcast the transaction to force close the channel!
7426+
assert!(node_txn.is_empty());
7427+
// B should now detect that there is something wrong and should force close the channel.
7428+
let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
7429+
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
7430+
7431+
// after the waring message sent by B, we should not able to
7432+
// use the channel, or reconnect with success to the channel.
7433+
match case {
7434+
DataLossProtectTestCase::UseChannel => {
7435+
assert!(nodes[0].node.list_usable_channels().is_empty());
7436+
},
7437+
DataLossProtectTestCase::TryToReconnect => {
7438+
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
7439+
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
7440+
let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7441+
7442+
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
7443+
let mut err_msgs_0 = Vec::with_capacity(1);
7444+
for msg in nodes[0].node.get_and_clear_pending_msg_events() {
7445+
if let MessageSendEvent::HandleError { ref action, .. } = msg {
7446+
match action {
7447+
&ErrorAction::SendErrorMessage { ref msg } => {
7448+
assert_eq!(msg.data, "Failed to find corresponding channel");
7449+
err_msgs_0.push(msg.clone());
7450+
},
7451+
_ => panic!("Unexpected event!"),
7452+
}
7453+
} else {
7454+
panic!("Unexpected event!");
7455+
}
7456+
}
7457+
assert_eq!(err_msgs_0.len(), 1);
7458+
nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(),&err_msgs_0[0]);
7459+
assert!(nodes[1].node.list_usable_channels().is_empty());
7460+
check_added_monitors!(nodes[1], 1);
7461+
check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() });
7462+
check_closed_broadcast!(nodes[1], false);
7463+
},
7464+
}
7465+
}
7466+
7467+
#[test]
7468+
fn test_data_loss_protect() {
7469+
do_test_data_loss_protect(DataLossProtectTestCase::UseChannel);
7470+
do_test_data_loss_protect(DataLossProtectTestCase::TryToReconnect);
74217471
}
74227472

74237473
#[test]

0 commit comments

Comments
 (0)