Skip to content

Commit ce9b362

Browse files
herbertxdavem330
authored andcommitted
rhashtable: Restore RCU marking on rhash_lock_head
This patch restores the RCU marking on bucket_table->buckets as it really does need RCU protection. Its removal had led to a fatal bug. Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 1748f6a commit ce9b362

File tree

2 files changed

+40
-51
lines changed

2 files changed

+40
-51
lines changed

include/linux/rhashtable.h

Lines changed: 24 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ struct bucket_table {
8484

8585
struct lockdep_map dep_map;
8686

87-
struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp;
87+
struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
8888
};
8989

9090
/*
@@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
261261
void *arg);
262262
void rhashtable_destroy(struct rhashtable *ht);
263263

264-
struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
265-
unsigned int hash);
266-
struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
267-
unsigned int hash);
268-
struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
269-
struct bucket_table *tbl,
270-
unsigned int hash);
264+
struct rhash_lock_head __rcu **rht_bucket_nested(
265+
const struct bucket_table *tbl, unsigned int hash);
266+
struct rhash_lock_head __rcu **__rht_bucket_nested(
267+
const struct bucket_table *tbl, unsigned int hash);
268+
struct rhash_lock_head __rcu **rht_bucket_nested_insert(
269+
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
271270

272271
#define rht_dereference(p, ht) \
273272
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
284283
#define rht_entry(tpos, pos, member) \
285284
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
286285

287-
static inline struct rhash_lock_head *const *rht_bucket(
286+
static inline struct rhash_lock_head __rcu *const *rht_bucket(
288287
const struct bucket_table *tbl, unsigned int hash)
289288
{
290289
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
291290
&tbl->buckets[hash];
292291
}
293292

294-
static inline struct rhash_lock_head **rht_bucket_var(
293+
static inline struct rhash_lock_head __rcu **rht_bucket_var(
295294
struct bucket_table *tbl, unsigned int hash)
296295
{
297296
return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
298297
&tbl->buckets[hash];
299298
}
300299

301-
static inline struct rhash_lock_head **rht_bucket_insert(
300+
static inline struct rhash_lock_head __rcu **rht_bucket_insert(
302301
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
303302
{
304303
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
@@ -325,15 +324,15 @@ static inline struct rhash_lock_head **rht_bucket_insert(
325324
*/
326325

327326
static inline void rht_lock(struct bucket_table *tbl,
328-
struct rhash_lock_head **bkt)
327+
struct rhash_lock_head __rcu **bkt)
329328
{
330329
local_bh_disable();
331330
bit_spin_lock(0, (unsigned long *)bkt);
332331
lock_map_acquire(&tbl->dep_map);
333332
}
334333

335334
static inline void rht_lock_nested(struct bucket_table *tbl,
336-
struct rhash_lock_head **bucket,
335+
struct rhash_lock_head __rcu **bucket,
337336
unsigned int subclass)
338337
{
339338
local_bh_disable();
@@ -342,7 +341,7 @@ static inline void rht_lock_nested(struct bucket_table *tbl,
342341
}
343342

344343
static inline void rht_unlock(struct bucket_table *tbl,
345-
struct rhash_lock_head **bkt)
344+
struct rhash_lock_head __rcu **bkt)
346345
{
347346
lock_map_release(&tbl->dep_map);
348347
bit_spin_unlock(0, (unsigned long *)bkt);
@@ -365,48 +364,41 @@ static inline struct rhash_head *__rht_ptr(
365364
* access is guaranteed, such as when destroying the table.
366365
*/
367366
static inline struct rhash_head *rht_ptr_rcu(
368-
struct rhash_lock_head *const *p)
367+
struct rhash_lock_head __rcu *const *bkt)
369368
{
370-
struct rhash_lock_head __rcu *const *bkt = (void *)p;
371369
return __rht_ptr(rcu_dereference(*bkt), bkt);
372370
}
373371

374372
static inline struct rhash_head *rht_ptr(
375-
struct rhash_lock_head *const *p,
373+
struct rhash_lock_head __rcu *const *bkt,
376374
struct bucket_table *tbl,
377375
unsigned int hash)
378376
{
379-
struct rhash_lock_head __rcu *const *bkt = (void *)p;
380377
return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
381378
}
382379

383380
static inline struct rhash_head *rht_ptr_exclusive(
384-
struct rhash_lock_head *const *p)
381+
struct rhash_lock_head __rcu *const *bkt)
385382
{
386-
struct rhash_lock_head __rcu *const *bkt = (void *)p;
387383
return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
388384
}
389385

390-
static inline void rht_assign_locked(struct rhash_lock_head **bkt,
386+
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
391387
struct rhash_head *obj)
392388
{
393-
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
394-
395389
if (rht_is_a_nulls(obj))
396390
obj = NULL;
397-
rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
391+
rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
398392
}
399393

400394
static inline void rht_assign_unlock(struct bucket_table *tbl,
401-
struct rhash_lock_head **bkt,
395+
struct rhash_lock_head __rcu **bkt,
402396
struct rhash_head *obj)
403397
{
404-
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
405-
406398
if (rht_is_a_nulls(obj))
407399
obj = NULL;
408400
lock_map_release(&tbl->dep_map);
409-
rcu_assign_pointer(*p, obj);
401+
rcu_assign_pointer(*bkt, (void *)obj);
410402
preempt_enable();
411403
__release(bitlock);
412404
local_bh_enable();
@@ -594,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup(
594586
.ht = ht,
595587
.key = key,
596588
};
597-
struct rhash_lock_head *const *bkt;
589+
struct rhash_lock_head __rcu *const *bkt;
598590
struct bucket_table *tbl;
599591
struct rhash_head *he;
600592
unsigned int hash;
@@ -710,7 +702,7 @@ static inline void *__rhashtable_insert_fast(
710702
.ht = ht,
711703
.key = key,
712704
};
713-
struct rhash_lock_head **bkt;
705+
struct rhash_lock_head __rcu **bkt;
714706
struct rhash_head __rcu **pprev;
715707
struct bucket_table *tbl;
716708
struct rhash_head *head;
@@ -996,7 +988,7 @@ static inline int __rhashtable_remove_fast_one(
996988
struct rhash_head *obj, const struct rhashtable_params params,
997989
bool rhlist)
998990
{
999-
struct rhash_lock_head **bkt;
991+
struct rhash_lock_head __rcu **bkt;
1000992
struct rhash_head __rcu **pprev;
1001993
struct rhash_head *he;
1002994
unsigned int hash;
@@ -1148,7 +1140,7 @@ static inline int __rhashtable_replace_fast(
11481140
struct rhash_head *obj_old, struct rhash_head *obj_new,
11491141
const struct rhashtable_params params)
11501142
{
1151-
struct rhash_lock_head **bkt;
1143+
struct rhash_lock_head __rcu **bkt;
11521144
struct rhash_head __rcu **pprev;
11531145
struct rhash_head *he;
11541146
unsigned int hash;

lib/rhashtable.c

Lines changed: 16 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131

3232
union nested_table {
3333
union nested_table __rcu *table;
34-
struct rhash_lock_head *bucket;
34+
struct rhash_lock_head __rcu *bucket;
3535
};
3636

3737
static u32 head_hashfn(struct rhashtable *ht,
@@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
222222
}
223223

224224
static int rhashtable_rehash_one(struct rhashtable *ht,
225-
struct rhash_lock_head **bkt,
225+
struct rhash_lock_head __rcu **bkt,
226226
unsigned int old_hash)
227227
{
228228
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
275275
unsigned int old_hash)
276276
{
277277
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
278-
struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
278+
struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
279279
int err;
280280

281281
if (!bkt)
@@ -485,7 +485,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
485485
}
486486

487487
static void *rhashtable_lookup_one(struct rhashtable *ht,
488-
struct rhash_lock_head **bkt,
488+
struct rhash_lock_head __rcu **bkt,
489489
struct bucket_table *tbl, unsigned int hash,
490490
const void *key, struct rhash_head *obj)
491491
{
@@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
535535
return ERR_PTR(-ENOENT);
536536
}
537537

538-
static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
539-
struct rhash_lock_head **bkt,
540-
struct bucket_table *tbl,
541-
unsigned int hash,
542-
struct rhash_head *obj,
543-
void *data)
538+
static struct bucket_table *rhashtable_insert_one(
539+
struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
540+
struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
541+
void *data)
544542
{
545543
struct bucket_table *new_tbl;
546544
struct rhash_head *head;
@@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
591589
{
592590
struct bucket_table *new_tbl;
593591
struct bucket_table *tbl;
594-
struct rhash_lock_head **bkt;
592+
struct rhash_lock_head __rcu **bkt;
595593
unsigned int hash;
596594
void *data;
597595

@@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht)
11731171
}
11741172
EXPORT_SYMBOL_GPL(rhashtable_destroy);
11751173

1176-
struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
1177-
unsigned int hash)
1174+
struct rhash_lock_head __rcu **__rht_bucket_nested(
1175+
const struct bucket_table *tbl, unsigned int hash)
11781176
{
11791177
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
11801178
unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1202,20 +1200,19 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
12021200
}
12031201
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
12041202

1205-
struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
1206-
unsigned int hash)
1203+
struct rhash_lock_head __rcu **rht_bucket_nested(
1204+
const struct bucket_table *tbl, unsigned int hash)
12071205
{
1208-
static struct rhash_lock_head *rhnull;
1206+
static struct rhash_lock_head __rcu *rhnull;
12091207

12101208
if (!rhnull)
12111209
INIT_RHT_NULLS_HEAD(rhnull);
12121210
return __rht_bucket_nested(tbl, hash) ?: &rhnull;
12131211
}
12141212
EXPORT_SYMBOL_GPL(rht_bucket_nested);
12151213

1216-
struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
1217-
struct bucket_table *tbl,
1218-
unsigned int hash)
1214+
struct rhash_lock_head __rcu **rht_bucket_nested_insert(
1215+
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
12191216
{
12201217
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
12211218
unsigned int index = hash & ((1 << tbl->nest) - 1);

0 commit comments

Comments
 (0)