Skip to content

Commit e47877c

Browse files
htejundavem330
authored andcommitted
rhashtable: Allow rhashtable to be used from irq-safe contexts
rhashtable currently only does bh-safe synchronization making it impossible to use from irq-safe contexts. Switch it to use irq-safe synchronization to remove the restriction. v2: Update the lock functions to return the ulong flags value and unlock functions to take the value directly instead of passing around the pointer. Suggested by Linus. Signed-off-by: Tejun Heo <[email protected]> Reviewed-by: David Vernet <[email protected]> Acked-by: Josh Don <[email protected]> Acked-by: Hao Luo <[email protected]> Acked-by: Barret Rhoden <[email protected]> Cc: Linus Torvalds <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent b602d00 commit e47877c

File tree

2 files changed

+46
-31
lines changed

2 files changed

+46
-31
lines changed

include/linux/rhashtable.h

Lines changed: 36 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -323,29 +323,36 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
323323
* When we write to a bucket without unlocking, we use rht_assign_locked().
324324
*/
325325

326-
static inline void rht_lock(struct bucket_table *tbl,
327-
struct rhash_lock_head __rcu **bkt)
326+
static inline unsigned long rht_lock(struct bucket_table *tbl,
327+
struct rhash_lock_head __rcu **bkt)
328328
{
329-
local_bh_disable();
329+
unsigned long flags;
330+
331+
local_irq_save(flags);
330332
bit_spin_lock(0, (unsigned long *)bkt);
331333
lock_map_acquire(&tbl->dep_map);
334+
return flags;
332335
}
333336

334-
static inline void rht_lock_nested(struct bucket_table *tbl,
335-
struct rhash_lock_head __rcu **bucket,
336-
unsigned int subclass)
337+
static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
338+
struct rhash_lock_head __rcu **bucket,
339+
unsigned int subclass)
337340
{
338-
local_bh_disable();
341+
unsigned long flags;
342+
343+
local_irq_save(flags);
339344
bit_spin_lock(0, (unsigned long *)bucket);
340345
lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
346+
return flags;
341347
}
342348

343349
static inline void rht_unlock(struct bucket_table *tbl,
344-
struct rhash_lock_head __rcu **bkt)
350+
struct rhash_lock_head __rcu **bkt,
351+
unsigned long flags)
345352
{
346353
lock_map_release(&tbl->dep_map);
347354
bit_spin_unlock(0, (unsigned long *)bkt);
348-
local_bh_enable();
355+
local_irq_restore(flags);
349356
}
350357

351358
static inline struct rhash_head *__rht_ptr(
@@ -393,15 +400,16 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
393400

394401
static inline void rht_assign_unlock(struct bucket_table *tbl,
395402
struct rhash_lock_head __rcu **bkt,
396-
struct rhash_head *obj)
403+
struct rhash_head *obj,
404+
unsigned long flags)
397405
{
398406
if (rht_is_a_nulls(obj))
399407
obj = NULL;
400408
lock_map_release(&tbl->dep_map);
401409
rcu_assign_pointer(*bkt, (void *)obj);
402410
preempt_enable();
403411
__release(bitlock);
404-
local_bh_enable();
412+
local_irq_restore(flags);
405413
}
406414

407415
/**
@@ -706,6 +714,7 @@ static inline void *__rhashtable_insert_fast(
706714
struct rhash_head __rcu **pprev;
707715
struct bucket_table *tbl;
708716
struct rhash_head *head;
717+
unsigned long flags;
709718
unsigned int hash;
710719
int elasticity;
711720
void *data;
@@ -720,11 +729,11 @@ static inline void *__rhashtable_insert_fast(
720729
if (!bkt)
721730
goto out;
722731
pprev = NULL;
723-
rht_lock(tbl, bkt);
732+
flags = rht_lock(tbl, bkt);
724733

725734
if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
726735
slow_path:
727-
rht_unlock(tbl, bkt);
736+
rht_unlock(tbl, bkt, flags);
728737
rcu_read_unlock();
729738
return rhashtable_insert_slow(ht, key, obj);
730739
}
@@ -756,9 +765,9 @@ static inline void *__rhashtable_insert_fast(
756765
RCU_INIT_POINTER(list->rhead.next, head);
757766
if (pprev) {
758767
rcu_assign_pointer(*pprev, obj);
759-
rht_unlock(tbl, bkt);
768+
rht_unlock(tbl, bkt, flags);
760769
} else
761-
rht_assign_unlock(tbl, bkt, obj);
770+
rht_assign_unlock(tbl, bkt, obj, flags);
762771
data = NULL;
763772
goto out;
764773
}
@@ -785,7 +794,7 @@ static inline void *__rhashtable_insert_fast(
785794
}
786795

787796
atomic_inc(&ht->nelems);
788-
rht_assign_unlock(tbl, bkt, obj);
797+
rht_assign_unlock(tbl, bkt, obj, flags);
789798

790799
if (rht_grow_above_75(ht, tbl))
791800
schedule_work(&ht->run_work);
@@ -797,7 +806,7 @@ static inline void *__rhashtable_insert_fast(
797806
return data;
798807

799808
out_unlock:
800-
rht_unlock(tbl, bkt);
809+
rht_unlock(tbl, bkt, flags);
801810
goto out;
802811
}
803812

@@ -991,6 +1000,7 @@ static inline int __rhashtable_remove_fast_one(
9911000
struct rhash_lock_head __rcu **bkt;
9921001
struct rhash_head __rcu **pprev;
9931002
struct rhash_head *he;
1003+
unsigned long flags;
9941004
unsigned int hash;
9951005
int err = -ENOENT;
9961006

@@ -999,7 +1009,7 @@ static inline int __rhashtable_remove_fast_one(
9991009
if (!bkt)
10001010
return -ENOENT;
10011011
pprev = NULL;
1002-
rht_lock(tbl, bkt);
1012+
flags = rht_lock(tbl, bkt);
10031013

10041014
rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
10051015
struct rhlist_head *list;
@@ -1043,14 +1053,14 @@ static inline int __rhashtable_remove_fast_one(
10431053

10441054
if (pprev) {
10451055
rcu_assign_pointer(*pprev, obj);
1046-
rht_unlock(tbl, bkt);
1056+
rht_unlock(tbl, bkt, flags);
10471057
} else {
1048-
rht_assign_unlock(tbl, bkt, obj);
1058+
rht_assign_unlock(tbl, bkt, obj, flags);
10491059
}
10501060
goto unlocked;
10511061
}
10521062

1053-
rht_unlock(tbl, bkt);
1063+
rht_unlock(tbl, bkt, flags);
10541064
unlocked:
10551065
if (err > 0) {
10561066
atomic_dec(&ht->nelems);
@@ -1143,6 +1153,7 @@ static inline int __rhashtable_replace_fast(
11431153
struct rhash_lock_head __rcu **bkt;
11441154
struct rhash_head __rcu **pprev;
11451155
struct rhash_head *he;
1156+
unsigned long flags;
11461157
unsigned int hash;
11471158
int err = -ENOENT;
11481159

@@ -1158,7 +1169,7 @@ static inline int __rhashtable_replace_fast(
11581169
return -ENOENT;
11591170

11601171
pprev = NULL;
1161-
rht_lock(tbl, bkt);
1172+
flags = rht_lock(tbl, bkt);
11621173

11631174
rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
11641175
if (he != obj_old) {
@@ -1169,15 +1180,15 @@ static inline int __rhashtable_replace_fast(
11691180
rcu_assign_pointer(obj_new->next, obj_old->next);
11701181
if (pprev) {
11711182
rcu_assign_pointer(*pprev, obj_new);
1172-
rht_unlock(tbl, bkt);
1183+
rht_unlock(tbl, bkt, flags);
11731184
} else {
1174-
rht_assign_unlock(tbl, bkt, obj_new);
1185+
rht_assign_unlock(tbl, bkt, obj_new, flags);
11751186
}
11761187
err = 0;
11771188
goto unlocked;
11781189
}
11791190

1180-
rht_unlock(tbl, bkt);
1191+
rht_unlock(tbl, bkt, flags);
11811192

11821193
unlocked:
11831194
return err;

lib/rhashtable.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -231,6 +231,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
231231
struct rhash_head *head, *next, *entry;
232232
struct rhash_head __rcu **pprev = NULL;
233233
unsigned int new_hash;
234+
unsigned long flags;
234235

235236
if (new_tbl->nest)
236237
goto out;
@@ -253,13 +254,14 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
253254

254255
new_hash = head_hashfn(ht, new_tbl, entry);
255256

256-
rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
257+
flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
258+
SINGLE_DEPTH_NESTING);
257259

258260
head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
259261

260262
RCU_INIT_POINTER(entry->next, head);
261263

262-
rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
264+
rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags);
263265

264266
if (pprev)
265267
rcu_assign_pointer(*pprev, next);
@@ -276,18 +278,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
276278
{
277279
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
278280
struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
281+
unsigned long flags;
279282
int err;
280283

281284
if (!bkt)
282285
return 0;
283-
rht_lock(old_tbl, bkt);
286+
flags = rht_lock(old_tbl, bkt);
284287

285288
while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
286289
;
287290

288291
if (err == -ENOENT)
289292
err = 0;
290-
rht_unlock(old_tbl, bkt);
293+
rht_unlock(old_tbl, bkt, flags);
291294

292295
return err;
293296
}
@@ -590,6 +593,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
590593
struct bucket_table *new_tbl;
591594
struct bucket_table *tbl;
592595
struct rhash_lock_head __rcu **bkt;
596+
unsigned long flags;
593597
unsigned int hash;
594598
void *data;
595599

@@ -607,15 +611,15 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
607611
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
608612
data = ERR_PTR(-EAGAIN);
609613
} else {
610-
rht_lock(tbl, bkt);
614+
flags = rht_lock(tbl, bkt);
611615
data = rhashtable_lookup_one(ht, bkt, tbl,
612616
hash, key, obj);
613617
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
614618
hash, obj, data);
615619
if (PTR_ERR(new_tbl) != -EEXIST)
616620
data = ERR_CAST(new_tbl);
617621

618-
rht_unlock(tbl, bkt);
622+
rht_unlock(tbl, bkt, flags);
619623
}
620624
} while (!IS_ERR_OR_NULL(new_tbl));
621625

0 commit comments

Comments
 (0)