Skip to content

Commit 163d535

Browse files
committed
Add test for the change in previous commit
- Also add a correction in accquiring locks in last commit
1 parent 313e824 commit 163d535

File tree

1 file changed

+143
-0
lines changed

1 file changed

+143
-0
lines changed

lightning/src/ln/channelmanager.rs

+143
Original file line numberDiff line numberDiff line change
@@ -2971,6 +2971,86 @@ where
29712971
Ok(counterparty_node_id)
29722972
}
29732973

2974+
#[cfg(test)]
2975+
// Function to test the peer removing from per_peer_state midway of a force close.
2976+
fn test_force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
2977+
-> Result<PublicKey, APIError> {
2978+
let per_peer_state = self.per_peer_state.read().unwrap();
2979+
let peer_state_mutex = per_peer_state.get(peer_node_id)
2980+
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
2981+
let (update_opt, counterparty_node_id) = {
2982+
let mut peer_state = peer_state_mutex.lock().unwrap();
2983+
let closure_reason = if let Some(peer_msg) = peer_msg {
2984+
ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
2985+
} else {
2986+
ClosureReason::HolderForceClosed
2987+
};
2988+
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
2989+
log_error!(self.logger, "Force-closing channel {}", channel_id);
2990+
self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
2991+
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
2992+
mem::drop(peer_state);
2993+
mem::drop(per_peer_state);
2994+
match chan_phase {
2995+
ChannelPhase::Funded(mut chan) => {
2996+
self.finish_close_channel(chan.context.force_shutdown(broadcast));
2997+
(self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
2998+
},
2999+
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
3000+
self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
3001+
// Unfunded channel has no update
3002+
(None, chan_phase.context().get_counterparty_node_id())
3003+
},
3004+
}
3005+
} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
3006+
log_error!(self.logger, "Force-closing channel {}", &channel_id);
3007+
// N.B. that we don't send any channel close event here: we
3008+
// don't have a user_channel_id, and we never sent any opening
3009+
// events anyway.
3010+
(None, *peer_node_id)
3011+
} else {
3012+
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) });
3013+
}
3014+
};
3015+
3016+
// Test: The peer_state corresponding to counterparty_node is removed at this point
3017+
{
3018+
let mut per_peer_state = self.per_peer_state.write().unwrap();
3019+
per_peer_state.remove(peer_node_id);
3020+
}
3021+
3022+
if let Some(update) = update_opt {
3023+
// Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
3024+
// not try to broadcast it via whatever peer we are connected to.
3025+
let brodcast_message_evt = events::MessageSendEvent::BroadcastChannelUpdate {
3026+
msg: update
3027+
};
3028+
3029+
let per_peer_state = self.per_peer_state.read().unwrap();
3030+
3031+
// Attempt to get the peer_state_mutex for the peer we force-closed on (counterparty).
3032+
let peer_state_mutex_opt = per_peer_state.get(peer_node_id);
3033+
3034+
match peer_state_mutex_opt {
3035+
Some(peer_state_mutex) => {
3036+
let mut peer_state = peer_state_mutex.lock().unwrap();
3037+
peer_state.pending_msg_events.push(brodcast_message_evt);
3038+
}
3039+
None => {
3040+
// If we could not find the couterparty in our per_peer_state, we poll
3041+
// the messages together in pending_broadcast_messages, and broadcast
3042+
// them later.
3043+
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
3044+
pending_broadcast_messages.push(brodcast_message_evt);
3045+
log_info!(self.logger, "Not able to broadcast channel_update of force-closed channel right now.
3046+
Will try rebroadcasting later.");
3047+
}
3048+
}
3049+
}
3050+
3051+
Ok(counterparty_node_id)
3052+
}
3053+
29743054
fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
29753055
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
29763056
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
@@ -11456,6 +11536,69 @@ mod tests {
1145611536
}
1145711537
}
1145811538

11539+
fn do_test_rebroadcasting_of_force_close_msg_to_a_peer(connected: bool) {
11540+
let chanmon_cfgs = create_chanmon_cfgs(3);
11541+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
11542+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
11543+
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
11544+
11545+
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
11546+
11547+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
11548+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
11549+
11550+
if !connected {
11551+
nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
11552+
nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
11553+
}
11554+
11555+
nodes[0].node.test_force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
11556+
check_added_monitors!(nodes[0], 1);
11557+
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
11558+
11559+
if connected {
11560+
// Assert that channelUpdate message has been added to node[2] pending msg events
11561+
let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
11562+
let peer_state_2 = nodes_0_per_peer_state.get(&nodes[2].node.get_our_node_id()).unwrap().lock().unwrap();
11563+
assert_eq!(peer_state_2.pending_msg_events.len(), 1);
11564+
}
11565+
else {
11566+
{
11567+
// Assert that channelUpdate message has been added to node[2] pending msg events
11568+
let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
11569+
assert_eq!(pending_broadcast_messages.len(), 1);
11570+
}
11571+
// Now node 0, and 2 reconnects
11572+
nodes[0].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
11573+
features: nodes[1].node.init_features(), networks: None, remote_network_address: None
11574+
}, true).unwrap();
11575+
nodes[2].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
11576+
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
11577+
}, false).unwrap();
11578+
11579+
{
11580+
// Assert that channelUpdate message has been added to node[2] pending msg events
11581+
let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
11582+
let peer_state_2 = nodes_0_per_peer_state.get(&nodes[2].node.get_our_node_id()).unwrap().lock().unwrap();
11583+
assert_eq!(peer_state_2.pending_msg_events.len(), 1);
11584+
}
11585+
11586+
{
11587+
// Assert that channelUpdate message has been added to node[2] pending msg events
11588+
let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
11589+
assert_eq!(pending_broadcast_messages.len(), 0);
11590+
}
11591+
}
11592+
11593+
let _ = nodes[0].node.get_and_clear_pending_msg_events();
11594+
}
11595+
11596+
#[test]
11597+
fn test_rebroadcasting_of_force_close_msg_to_a_peer() {
11598+
do_test_rebroadcasting_of_force_close_msg_to_a_peer(false);
11599+
do_test_rebroadcasting_of_force_close_msg_to_a_peer(true);
11600+
}
11601+
1145911602
#[test]
1146011603
fn test_drop_disconnected_peers_when_removing_channels() {
1146111604
let chanmon_cfgs = create_chanmon_cfgs(2);

0 commit comments

Comments
 (0)