Skip to content

Commit c069001

Browse files
NeilBrowndavem330
authored andcommitted
rhashtable: clean up dereference of ->future_tbl.
Using rht_dereference_bucket() to dereference ->future_tbl looks like a type error, and could be confusing. Using rht_dereference_rcu() to test a pointer for NULL adds an unnecessary barrier - rcu_access_pointer() is preferred for NULL tests when no lock is held. This uses 3 different ways to access ->future_tbl. - if we know the mutex is held, use rht_dereference() - if we don't hold the mutex, and are only testing for NULL, use rcu_access_pointer() - otherwise (using RCU protection for true dereference), use rht_dereference_rcu(). Note that this includes a simplification of the call to rhashtable_last_table() - we don't do an extra dereference before the call any more. Acked-by: Herbert Xu <[email protected]> Signed-off-by: NeilBrown <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 0ad6644 commit c069001

File tree

2 files changed

+5
-6
lines changed

2 files changed

+5
-6
lines changed

include/linux/rhashtable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -595,7 +595,7 @@ static inline void *__rhashtable_insert_fast(
595595
lock = rht_bucket_lock(tbl, hash);
596596
spin_lock_bh(lock);
597597

598-
if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
598+
if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
599599
slow_path:
600600
spin_unlock_bh(lock);
601601
rcu_read_unlock();

lib/rhashtable.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -226,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
226226
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
227227
{
228228
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
229-
struct bucket_table *new_tbl = rhashtable_last_table(ht,
230-
rht_dereference_rcu(old_tbl->future_tbl, ht));
229+
struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
231230
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
232231
int err = -EAGAIN;
233232
struct rhash_head *head, *next, *entry;
@@ -467,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
467466

468467
fail:
469468
/* Do not fail the insert if someone else did a rehash. */
470-
if (likely(rcu_dereference_raw(tbl->future_tbl)))
469+
if (likely(rcu_access_pointer(tbl->future_tbl)))
471470
return 0;
472471

473472
/* Schedule async rehash to retry allocation in process context. */
@@ -540,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
540539
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
541540
return ERR_CAST(data);
542541

543-
new_tbl = rcu_dereference(tbl->future_tbl);
542+
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
544543
if (new_tbl)
545544
return new_tbl;
546545

@@ -599,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
599598
break;
600599

601600
spin_unlock_bh(lock);
602-
tbl = rcu_dereference(tbl->future_tbl);
601+
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
603602
}
604603

605604
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);

0 commit comments

Comments
 (0)