@@ -123,7 +123,11 @@ struct Connection {
123
123
id : u64 ,
124
124
}
125
125
impl Connection {
126
- async fn poll_event_process < CMH , RMH , OMH , L , UMH > ( peer_manager : Arc < peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > , mut event_receiver : mpsc:: Receiver < ( ) > ) where
126
+ async fn poll_event_process < PM , CMH , RMH , OMH , L , UMH > (
127
+ peer_manager : PM ,
128
+ mut event_receiver : mpsc:: Receiver < ( ) > ,
129
+ ) where
130
+ PM : Deref < Target = peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > + ' static + Send + Sync ,
127
131
CMH : Deref + ' static + Send + Sync ,
128
132
RMH : Deref + ' static + Send + Sync ,
129
133
OMH : Deref + ' static + Send + Sync ,
@@ -134,7 +138,7 @@ impl Connection {
134
138
OMH :: Target : OnionMessageHandler + Send + Sync ,
135
139
L :: Target : Logger + Send + Sync ,
136
140
UMH :: Target : CustomMessageHandler + Send + Sync ,
137
- {
141
+ {
138
142
loop {
139
143
if event_receiver. recv ( ) . await . is_none ( ) {
140
144
return ;
@@ -143,7 +147,14 @@ impl Connection {
143
147
}
144
148
}
145
149
146
- async fn schedule_read < CMH , RMH , OMH , L , UMH > ( peer_manager : Arc < peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > , us : Arc < Mutex < Self > > , mut reader : io:: ReadHalf < TcpStream > , mut read_wake_receiver : mpsc:: Receiver < ( ) > , mut write_avail_receiver : mpsc:: Receiver < ( ) > ) where
150
+ async fn schedule_read < PM , CMH , RMH , OMH , L , UMH > (
151
+ peer_manager : PM ,
152
+ us : Arc < Mutex < Self > > ,
153
+ mut reader : io:: ReadHalf < TcpStream > ,
154
+ mut read_wake_receiver : mpsc:: Receiver < ( ) > ,
155
+ mut write_avail_receiver : mpsc:: Receiver < ( ) > ,
156
+ ) where
157
+ PM : Deref < Target = peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > + ' static + Send + Sync + Clone ,
147
158
CMH : Deref + ' static + Send + Sync ,
148
159
RMH : Deref + ' static + Send + Sync ,
149
160
OMH : Deref + ' static + Send + Sync ,
@@ -154,10 +165,10 @@ impl Connection {
154
165
OMH :: Target : OnionMessageHandler + ' static + Send + Sync ,
155
166
L :: Target : Logger + ' static + Send + Sync ,
156
167
UMH :: Target : CustomMessageHandler + ' static + Send + Sync ,
157
- {
168
+ {
158
169
// Create a waker to wake up poll_event_process, above
159
170
let ( event_waker, event_receiver) = mpsc:: channel ( 1 ) ;
160
- tokio:: spawn ( Self :: poll_event_process ( Arc :: clone ( & peer_manager ) , event_receiver) ) ;
171
+ tokio:: spawn ( Self :: poll_event_process ( peer_manager . clone ( ) , event_receiver) ) ;
161
172
162
173
// 8KB is nice and big but also should never cause any issues with stack overflowing.
163
174
let mut buf = [ 0 ; 8192 ] ;
@@ -272,7 +283,11 @@ fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
272
283
/// The returned future will complete when the peer is disconnected and associated handling
273
284
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
274
285
/// not need to poll the provided future in order to make progress.
275
- pub fn setup_inbound < CMH , RMH , OMH , L , UMH > ( peer_manager : Arc < peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > , stream : StdTcpStream ) -> impl std:: future:: Future < Output =( ) > where
286
+ pub fn setup_inbound < PM , CMH , RMH , OMH , L , UMH > (
287
+ peer_manager : PM ,
288
+ stream : StdTcpStream ,
289
+ ) -> impl std:: future:: Future < Output =( ) > where
290
+ PM : Deref < Target = peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > + ' static + Send + Sync + Clone ,
276
291
CMH : Deref + ' static + Send + Sync ,
277
292
RMH : Deref + ' static + Send + Sync ,
278
293
OMH : Deref + ' static + Send + Sync ,
@@ -321,7 +336,12 @@ pub fn setup_inbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Peer
321
336
/// The returned future will complete when the peer is disconnected and associated handling
322
337
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
323
338
/// not need to poll the provided future in order to make progress.
324
- pub fn setup_outbound < CMH , RMH , OMH , L , UMH > ( peer_manager : Arc < peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > , their_node_id : PublicKey , stream : StdTcpStream ) -> impl std:: future:: Future < Output =( ) > where
339
+ pub fn setup_outbound < PM , CMH , RMH , OMH , L , UMH > (
340
+ peer_manager : PM ,
341
+ their_node_id : PublicKey ,
342
+ stream : StdTcpStream ,
343
+ ) -> impl std:: future:: Future < Output =( ) > where
344
+ PM : Deref < Target = peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > + ' static + Send + Sync + Clone ,
325
345
CMH : Deref + ' static + Send + Sync ,
326
346
RMH : Deref + ' static + Send + Sync ,
327
347
OMH : Deref + ' static + Send + Sync ,
@@ -399,7 +419,12 @@ pub fn setup_outbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Pee
399
419
/// disconnected and associated handling futures are freed, though, because all processing in said
400
420
/// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
401
421
/// make progress.
402
- pub async fn connect_outbound < CMH , RMH , OMH , L , UMH > ( peer_manager : Arc < peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > , their_node_id : PublicKey , addr : SocketAddr ) -> Option < impl std:: future:: Future < Output =( ) > > where
422
+ pub async fn connect_outbound < PM , CMH , RMH , OMH , L , UMH > (
423
+ peer_manager : PM ,
424
+ their_node_id : PublicKey ,
425
+ addr : SocketAddr ,
426
+ ) -> Option < impl std:: future:: Future < Output =( ) > > where
427
+ PM : Deref < Target = peer_handler:: PeerManager < SocketDescriptor , CMH , RMH , OMH , L , UMH > > + ' static + Send + Sync + Clone ,
403
428
CMH : Deref + ' static + Send + Sync ,
404
429
RMH : Deref + ' static + Send + Sync ,
405
430
OMH : Deref + ' static + Send + Sync ,
0 commit comments