@@ -58,18 +58,18 @@ DEFINE_STATIC_KEY_DEFERRED_FALSE(ipv6_flowlabel_exclusive, HZ);
58
58
EXPORT_SYMBOL (ipv6_flowlabel_exclusive );
59
59
60
60
#define for_each_fl_rcu (hash , fl ) \
61
- for (fl = rcu_dereference_bh (fl_ht[(hash)]); \
61
+ for (fl = rcu_dereference (fl_ht[(hash)]); \
62
62
fl != NULL; \
63
- fl = rcu_dereference_bh (fl->next))
63
+ fl = rcu_dereference (fl->next))
64
64
#define for_each_fl_continue_rcu (fl ) \
65
- for (fl = rcu_dereference_bh (fl->next); \
65
+ for (fl = rcu_dereference (fl->next); \
66
66
fl != NULL; \
67
- fl = rcu_dereference_bh (fl->next))
67
+ fl = rcu_dereference (fl->next))
68
68
69
69
#define for_each_sk_fl_rcu (np , sfl ) \
70
- for (sfl = rcu_dereference_bh (np->ipv6_fl_list); \
70
+ for (sfl = rcu_dereference (np->ipv6_fl_list); \
71
71
sfl != NULL; \
72
- sfl = rcu_dereference_bh (sfl->next))
72
+ sfl = rcu_dereference (sfl->next))
73
73
74
74
static inline struct ip6_flowlabel * __fl_lookup (struct net * net , __be32 label )
75
75
{
@@ -86,11 +86,11 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
86
86
{
87
87
struct ip6_flowlabel * fl ;
88
88
89
- rcu_read_lock_bh ();
89
+ rcu_read_lock ();
90
90
fl = __fl_lookup (net , label );
91
91
if (fl && !atomic_inc_not_zero (& fl -> users ))
92
92
fl = NULL ;
93
- rcu_read_unlock_bh ();
93
+ rcu_read_unlock ();
94
94
return fl ;
95
95
}
96
96
@@ -217,6 +217,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
217
217
218
218
fl -> label = label & IPV6_FLOWLABEL_MASK ;
219
219
220
+ rcu_read_lock ();
220
221
spin_lock_bh (& ip6_fl_lock );
221
222
if (label == 0 ) {
222
223
for (;;) {
@@ -240,6 +241,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
240
241
if (lfl ) {
241
242
atomic_inc (& lfl -> users );
242
243
spin_unlock_bh (& ip6_fl_lock );
244
+ rcu_read_unlock ();
243
245
return lfl ;
244
246
}
245
247
}
@@ -249,6 +251,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
249
251
rcu_assign_pointer (fl_ht [FL_HASH (fl -> label )], fl );
250
252
atomic_inc (& fl_size );
251
253
spin_unlock_bh (& ip6_fl_lock );
254
+ rcu_read_unlock ();
252
255
return NULL ;
253
256
}
254
257
@@ -263,17 +266,17 @@ struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label)
263
266
264
267
label &= IPV6_FLOWLABEL_MASK ;
265
268
266
- rcu_read_lock_bh ();
269
+ rcu_read_lock ();
267
270
for_each_sk_fl_rcu (np , sfl ) {
268
271
struct ip6_flowlabel * fl = sfl -> fl ;
269
272
270
273
if (fl -> label == label && atomic_inc_not_zero (& fl -> users )) {
271
274
fl -> lastuse = jiffies ;
272
- rcu_read_unlock_bh ();
275
+ rcu_read_unlock ();
273
276
return fl ;
274
277
}
275
278
}
276
- rcu_read_unlock_bh ();
279
+ rcu_read_unlock ();
277
280
return NULL ;
278
281
}
279
282
EXPORT_SYMBOL_GPL (__fl6_sock_lookup );
@@ -475,10 +478,10 @@ static int mem_check(struct sock *sk)
475
478
if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK )
476
479
return 0 ;
477
480
478
- rcu_read_lock_bh ();
481
+ rcu_read_lock ();
479
482
for_each_sk_fl_rcu (np , sfl )
480
483
count ++ ;
481
- rcu_read_unlock_bh ();
484
+ rcu_read_unlock ();
482
485
483
486
if (room <= 0 ||
484
487
((count >= FL_MAX_PER_SOCK ||
@@ -515,7 +518,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
515
518
return 0 ;
516
519
}
517
520
518
- rcu_read_lock_bh ();
521
+ rcu_read_lock ();
519
522
520
523
for_each_sk_fl_rcu (np , sfl ) {
521
524
if (sfl -> fl -> label == (np -> flow_label & IPV6_FLOWLABEL_MASK )) {
@@ -527,11 +530,11 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
527
530
freq -> flr_linger = sfl -> fl -> linger / HZ ;
528
531
529
532
spin_unlock_bh (& ip6_fl_lock );
530
- rcu_read_unlock_bh ();
533
+ rcu_read_unlock ();
531
534
return 0 ;
532
535
}
533
536
}
534
- rcu_read_unlock_bh ();
537
+ rcu_read_unlock ();
535
538
536
539
return - ENOENT ;
537
540
}
@@ -581,16 +584,16 @@ static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq)
581
584
struct ipv6_fl_socklist * sfl ;
582
585
int err ;
583
586
584
- rcu_read_lock_bh ();
587
+ rcu_read_lock ();
585
588
for_each_sk_fl_rcu (np , sfl ) {
586
589
if (sfl -> fl -> label == freq -> flr_label ) {
587
590
err = fl6_renew (sfl -> fl , freq -> flr_linger ,
588
591
freq -> flr_expires );
589
- rcu_read_unlock_bh ();
592
+ rcu_read_unlock ();
590
593
return err ;
591
594
}
592
595
}
593
- rcu_read_unlock_bh ();
596
+ rcu_read_unlock ();
594
597
595
598
if (freq -> flr_share == IPV6_FL_S_NONE &&
596
599
ns_capable (net -> user_ns , CAP_NET_ADMIN )) {
@@ -641,11 +644,11 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq,
641
644
642
645
if (freq -> flr_label ) {
643
646
err = - EEXIST ;
644
- rcu_read_lock_bh ();
647
+ rcu_read_lock ();
645
648
for_each_sk_fl_rcu (np , sfl ) {
646
649
if (sfl -> fl -> label == freq -> flr_label ) {
647
650
if (freq -> flr_flags & IPV6_FL_F_EXCL ) {
648
- rcu_read_unlock_bh ();
651
+ rcu_read_unlock ();
649
652
goto done ;
650
653
}
651
654
fl1 = sfl -> fl ;
@@ -654,7 +657,7 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq,
654
657
break ;
655
658
}
656
659
}
657
- rcu_read_unlock_bh ();
660
+ rcu_read_unlock ();
658
661
659
662
if (!fl1 )
660
663
fl1 = fl_lookup (net , freq -> flr_label );
@@ -809,7 +812,7 @@ static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
809
812
810
813
state -> pid_ns = proc_pid_ns (file_inode (seq -> file )-> i_sb );
811
814
812
- rcu_read_lock_bh ();
815
+ rcu_read_lock ();
813
816
return * pos ? ip6fl_get_idx (seq , * pos - 1 ) : SEQ_START_TOKEN ;
814
817
}
815
818
@@ -828,7 +831,7 @@ static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
828
831
static void ip6fl_seq_stop (struct seq_file * seq , void * v )
829
832
__releases (RCU )
830
833
{
831
- rcu_read_unlock_bh ();
834
+ rcu_read_unlock ();
832
835
}
833
836
834
837
static int ip6fl_seq_show (struct seq_file * seq , void * v )
0 commit comments