@@ -323,29 +323,36 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
323
323
* When we write to a bucket without unlocking, we use rht_assign_locked().
324
324
*/
325
325
326
- static inline void rht_lock (struct bucket_table * tbl ,
327
- struct rhash_lock_head __rcu * * bkt )
326
+ static inline unsigned long rht_lock (struct bucket_table * tbl ,
327
+ struct rhash_lock_head __rcu * * bkt )
328
328
{
329
- local_bh_disable ();
329
+ unsigned long flags ;
330
+
331
+ local_irq_save (flags );
330
332
bit_spin_lock (0 , (unsigned long * )bkt );
331
333
lock_map_acquire (& tbl -> dep_map );
334
+ return flags ;
332
335
}
333
336
334
- static inline void rht_lock_nested (struct bucket_table * tbl ,
335
- struct rhash_lock_head __rcu * * bucket ,
336
- unsigned int subclass )
337
+ static inline unsigned long rht_lock_nested (struct bucket_table * tbl ,
338
+ struct rhash_lock_head __rcu * * bucket ,
339
+ unsigned int subclass )
337
340
{
338
- local_bh_disable ();
341
+ unsigned long flags ;
342
+
343
+ local_irq_save (flags );
339
344
bit_spin_lock (0 , (unsigned long * )bucket );
340
345
lock_acquire_exclusive (& tbl -> dep_map , subclass , 0 , NULL , _THIS_IP_ );
346
+ return flags ;
341
347
}
342
348
343
349
static inline void rht_unlock (struct bucket_table * tbl ,
344
- struct rhash_lock_head __rcu * * bkt )
350
+ struct rhash_lock_head __rcu * * bkt ,
351
+ unsigned long flags )
345
352
{
346
353
lock_map_release (& tbl -> dep_map );
347
354
bit_spin_unlock (0 , (unsigned long * )bkt );
348
- local_bh_enable ( );
355
+ local_irq_restore ( flags );
349
356
}
350
357
351
358
static inline struct rhash_head * __rht_ptr (
@@ -393,15 +400,16 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
393
400
394
401
static inline void rht_assign_unlock (struct bucket_table * tbl ,
395
402
struct rhash_lock_head __rcu * * bkt ,
396
- struct rhash_head * obj )
403
+ struct rhash_head * obj ,
404
+ unsigned long flags )
397
405
{
398
406
if (rht_is_a_nulls (obj ))
399
407
obj = NULL ;
400
408
lock_map_release (& tbl -> dep_map );
401
409
rcu_assign_pointer (* bkt , (void * )obj );
402
410
preempt_enable ();
403
411
__release (bitlock );
404
- local_bh_enable ( );
412
+ local_irq_restore ( flags );
405
413
}
406
414
407
415
/**
@@ -706,6 +714,7 @@ static inline void *__rhashtable_insert_fast(
706
714
struct rhash_head __rcu * * pprev ;
707
715
struct bucket_table * tbl ;
708
716
struct rhash_head * head ;
717
+ unsigned long flags ;
709
718
unsigned int hash ;
710
719
int elasticity ;
711
720
void * data ;
@@ -720,11 +729,11 @@ static inline void *__rhashtable_insert_fast(
720
729
if (!bkt )
721
730
goto out ;
722
731
pprev = NULL ;
723
- rht_lock (tbl , bkt );
732
+ flags = rht_lock (tbl , bkt );
724
733
725
734
if (unlikely (rcu_access_pointer (tbl -> future_tbl ))) {
726
735
slow_path :
727
- rht_unlock (tbl , bkt );
736
+ rht_unlock (tbl , bkt , flags );
728
737
rcu_read_unlock ();
729
738
return rhashtable_insert_slow (ht , key , obj );
730
739
}
@@ -756,9 +765,9 @@ static inline void *__rhashtable_insert_fast(
756
765
RCU_INIT_POINTER (list -> rhead .next , head );
757
766
if (pprev ) {
758
767
rcu_assign_pointer (* pprev , obj );
759
- rht_unlock (tbl , bkt );
768
+ rht_unlock (tbl , bkt , flags );
760
769
} else
761
- rht_assign_unlock (tbl , bkt , obj );
770
+ rht_assign_unlock (tbl , bkt , obj , flags );
762
771
data = NULL ;
763
772
goto out ;
764
773
}
@@ -785,7 +794,7 @@ static inline void *__rhashtable_insert_fast(
785
794
}
786
795
787
796
atomic_inc (& ht -> nelems );
788
- rht_assign_unlock (tbl , bkt , obj );
797
+ rht_assign_unlock (tbl , bkt , obj , flags );
789
798
790
799
if (rht_grow_above_75 (ht , tbl ))
791
800
schedule_work (& ht -> run_work );
@@ -797,7 +806,7 @@ static inline void *__rhashtable_insert_fast(
797
806
return data ;
798
807
799
808
out_unlock :
800
- rht_unlock (tbl , bkt );
809
+ rht_unlock (tbl , bkt , flags );
801
810
goto out ;
802
811
}
803
812
@@ -991,6 +1000,7 @@ static inline int __rhashtable_remove_fast_one(
991
1000
struct rhash_lock_head __rcu * * bkt ;
992
1001
struct rhash_head __rcu * * pprev ;
993
1002
struct rhash_head * he ;
1003
+ unsigned long flags ;
994
1004
unsigned int hash ;
995
1005
int err = - ENOENT ;
996
1006
@@ -999,7 +1009,7 @@ static inline int __rhashtable_remove_fast_one(
999
1009
if (!bkt )
1000
1010
return - ENOENT ;
1001
1011
pprev = NULL ;
1002
- rht_lock (tbl , bkt );
1012
+ flags = rht_lock (tbl , bkt );
1003
1013
1004
1014
rht_for_each_from (he , rht_ptr (bkt , tbl , hash ), tbl , hash ) {
1005
1015
struct rhlist_head * list ;
@@ -1043,14 +1053,14 @@ static inline int __rhashtable_remove_fast_one(
1043
1053
1044
1054
if (pprev ) {
1045
1055
rcu_assign_pointer (* pprev , obj );
1046
- rht_unlock (tbl , bkt );
1056
+ rht_unlock (tbl , bkt , flags );
1047
1057
} else {
1048
- rht_assign_unlock (tbl , bkt , obj );
1058
+ rht_assign_unlock (tbl , bkt , obj , flags );
1049
1059
}
1050
1060
goto unlocked ;
1051
1061
}
1052
1062
1053
- rht_unlock (tbl , bkt );
1063
+ rht_unlock (tbl , bkt , flags );
1054
1064
unlocked :
1055
1065
if (err > 0 ) {
1056
1066
atomic_dec (& ht -> nelems );
@@ -1143,6 +1153,7 @@ static inline int __rhashtable_replace_fast(
1143
1153
struct rhash_lock_head __rcu * * bkt ;
1144
1154
struct rhash_head __rcu * * pprev ;
1145
1155
struct rhash_head * he ;
1156
+ unsigned long flags ;
1146
1157
unsigned int hash ;
1147
1158
int err = - ENOENT ;
1148
1159
@@ -1158,7 +1169,7 @@ static inline int __rhashtable_replace_fast(
1158
1169
return - ENOENT ;
1159
1170
1160
1171
pprev = NULL ;
1161
- rht_lock (tbl , bkt );
1172
+ flags = rht_lock (tbl , bkt );
1162
1173
1163
1174
rht_for_each_from (he , rht_ptr (bkt , tbl , hash ), tbl , hash ) {
1164
1175
if (he != obj_old ) {
@@ -1169,15 +1180,15 @@ static inline int __rhashtable_replace_fast(
1169
1180
rcu_assign_pointer (obj_new -> next , obj_old -> next );
1170
1181
if (pprev ) {
1171
1182
rcu_assign_pointer (* pprev , obj_new );
1172
- rht_unlock (tbl , bkt );
1183
+ rht_unlock (tbl , bkt , flags );
1173
1184
} else {
1174
- rht_assign_unlock (tbl , bkt , obj_new );
1185
+ rht_assign_unlock (tbl , bkt , obj_new , flags );
1175
1186
}
1176
1187
err = 0 ;
1177
1188
goto unlocked ;
1178
1189
}
1179
1190
1180
- rht_unlock (tbl , bkt );
1191
+ rht_unlock (tbl , bkt , flags );
1181
1192
1182
1193
unlocked :
1183
1194
return err ;
0 commit comments