Skip to content

Commit a7ef23e

Browse files
committed
Merge branch 'rhashtable-Fix-unprotected-RCU-dereference-in-__rht_ptr'
Herbert Xu says: ==================== rhashtable: Fix unprotected RCU dereference in __rht_ptr This patch series fixes an unprotected dereference in __rht_ptr. The first patch is a minimal fix that does not use the correct RCU markings but is suitable for backport, and the second patch cleans up the RCU markings. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 19016d9 + ce9b362 commit a7ef23e

File tree

2 files changed

+47
-57
lines changed

2 files changed

+47
-57
lines changed

include/linux/rhashtable.h

Lines changed: 31 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ struct bucket_table {
8484

8585
struct lockdep_map dep_map;
8686

87-
struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp;
87+
struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
8888
};
8989

9090
/*
@@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
261261
void *arg);
262262
void rhashtable_destroy(struct rhashtable *ht);
263263

264-
struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
265-
unsigned int hash);
266-
struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
267-
unsigned int hash);
268-
struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
269-
struct bucket_table *tbl,
270-
unsigned int hash);
264+
struct rhash_lock_head __rcu **rht_bucket_nested(
265+
const struct bucket_table *tbl, unsigned int hash);
266+
struct rhash_lock_head __rcu **__rht_bucket_nested(
267+
const struct bucket_table *tbl, unsigned int hash);
268+
struct rhash_lock_head __rcu **rht_bucket_nested_insert(
269+
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
271270

272271
#define rht_dereference(p, ht) \
273272
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
284283
#define rht_entry(tpos, pos, member) \
285284
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
286285

287-
static inline struct rhash_lock_head *const *rht_bucket(
286+
static inline struct rhash_lock_head __rcu *const *rht_bucket(
288287
const struct bucket_table *tbl, unsigned int hash)
289288
{
290289
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
291290
&tbl->buckets[hash];
292291
}
293292

294-
static inline struct rhash_lock_head **rht_bucket_var(
293+
static inline struct rhash_lock_head __rcu **rht_bucket_var(
295294
struct bucket_table *tbl, unsigned int hash)
296295
{
297296
return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
298297
&tbl->buckets[hash];
299298
}
300299

301-
static inline struct rhash_lock_head **rht_bucket_insert(
300+
static inline struct rhash_lock_head __rcu **rht_bucket_insert(
302301
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
303302
{
304303
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
@@ -325,15 +324,15 @@ static inline struct rhash_lock_head **rht_bucket_insert(
325324
*/
326325

327326
static inline void rht_lock(struct bucket_table *tbl,
328-
struct rhash_lock_head **bkt)
327+
struct rhash_lock_head __rcu **bkt)
329328
{
330329
local_bh_disable();
331330
bit_spin_lock(0, (unsigned long *)bkt);
332331
lock_map_acquire(&tbl->dep_map);
333332
}
334333

335334
static inline void rht_lock_nested(struct bucket_table *tbl,
336-
struct rhash_lock_head **bucket,
335+
struct rhash_lock_head __rcu **bucket,
337336
unsigned int subclass)
338337
{
339338
local_bh_disable();
@@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl,
342341
}
343342

344343
static inline void rht_unlock(struct bucket_table *tbl,
345-
struct rhash_lock_head **bkt)
344+
struct rhash_lock_head __rcu **bkt)
346345
{
347346
lock_map_release(&tbl->dep_map);
348347
bit_spin_unlock(0, (unsigned long *)bkt);
349348
local_bh_enable();
350349
}
351350

352-
static inline struct rhash_head __rcu *__rht_ptr(
353-
struct rhash_lock_head *const *bkt)
351+
static inline struct rhash_head *__rht_ptr(
352+
struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
354353
{
355-
return (struct rhash_head __rcu *)
356-
((unsigned long)*bkt & ~BIT(0) ?:
354+
return (struct rhash_head *)
355+
((unsigned long)p & ~BIT(0) ?:
357356
(unsigned long)RHT_NULLS_MARKER(bkt));
358357
}
359358

@@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr(
365364
* access is guaranteed, such as when destroying the table.
366365
*/
367366
static inline struct rhash_head *rht_ptr_rcu(
368-
struct rhash_lock_head *const *bkt)
367+
struct rhash_lock_head __rcu *const *bkt)
369368
{
370-
struct rhash_head __rcu *p = __rht_ptr(bkt);
371-
372-
return rcu_dereference(p);
369+
return __rht_ptr(rcu_dereference(*bkt), bkt);
373370
}
374371

375372
static inline struct rhash_head *rht_ptr(
376-
struct rhash_lock_head *const *bkt,
373+
struct rhash_lock_head __rcu *const *bkt,
377374
struct bucket_table *tbl,
378375
unsigned int hash)
379376
{
380-
return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash);
377+
return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
381378
}
382379

383380
static inline struct rhash_head *rht_ptr_exclusive(
384-
struct rhash_lock_head *const *bkt)
381+
struct rhash_lock_head __rcu *const *bkt)
385382
{
386-
return rcu_dereference_protected(__rht_ptr(bkt), 1);
383+
return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
387384
}
388385

389-
static inline void rht_assign_locked(struct rhash_lock_head **bkt,
386+
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
390387
struct rhash_head *obj)
391388
{
392-
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
393-
394389
if (rht_is_a_nulls(obj))
395390
obj = NULL;
396-
rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
391+
rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
397392
}
398393

399394
static inline void rht_assign_unlock(struct bucket_table *tbl,
400-
struct rhash_lock_head **bkt,
395+
struct rhash_lock_head __rcu **bkt,
401396
struct rhash_head *obj)
402397
{
403-
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
404-
405398
if (rht_is_a_nulls(obj))
406399
obj = NULL;
407400
lock_map_release(&tbl->dep_map);
408-
rcu_assign_pointer(*p, obj);
401+
rcu_assign_pointer(*bkt, (void *)obj);
409402
preempt_enable();
410403
__release(bitlock);
411404
local_bh_enable();
@@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup(
593586
.ht = ht,
594587
.key = key,
595588
};
596-
struct rhash_lock_head *const *bkt;
589+
struct rhash_lock_head __rcu *const *bkt;
597590
struct bucket_table *tbl;
598591
struct rhash_head *he;
599592
unsigned int hash;
@@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast(
709702
.ht = ht,
710703
.key = key,
711704
};
712-
struct rhash_lock_head **bkt;
705+
struct rhash_lock_head __rcu **bkt;
713706
struct rhash_head __rcu **pprev;
714707
struct bucket_table *tbl;
715708
struct rhash_head *head;
@@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one(
995988
struct rhash_head *obj, const struct rhashtable_params params,
996989
bool rhlist)
997990
{
998-
struct rhash_lock_head **bkt;
991+
struct rhash_lock_head __rcu **bkt;
999992
struct rhash_head __rcu **pprev;
1000993
struct rhash_head *he;
1001994
unsigned int hash;
@@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast(
11471140
struct rhash_head *obj_old, struct rhash_head *obj_new,
11481141
const struct rhashtable_params params)
11491142
{
1150-
struct rhash_lock_head **bkt;
1143+
struct rhash_lock_head __rcu **bkt;
11511144
struct rhash_head __rcu **pprev;
11521145
struct rhash_head *he;
11531146
unsigned int hash;

lib/rhashtable.c

Lines changed: 16 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131

3232
union nested_table {
3333
union nested_table __rcu *table;
34-
struct rhash_lock_head *bucket;
34+
struct rhash_lock_head __rcu *bucket;
3535
};
3636

3737
static u32 head_hashfn(struct rhashtable *ht,
@@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
222222
}
223223

224224
static int rhashtable_rehash_one(struct rhashtable *ht,
225-
struct rhash_lock_head **bkt,
225+
struct rhash_lock_head __rcu **bkt,
226226
unsigned int old_hash)
227227
{
228228
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
275275
unsigned int old_hash)
276276
{
277277
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
278-
struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
278+
struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
279279
int err;
280280

281281
if (!bkt)
@@ -485,7 +485,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
485485
}
486486

487487
static void *rhashtable_lookup_one(struct rhashtable *ht,
488-
struct rhash_lock_head **bkt,
488+
struct rhash_lock_head __rcu **bkt,
489489
struct bucket_table *tbl, unsigned int hash,
490490
const void *key, struct rhash_head *obj)
491491
{
@@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
535535
return ERR_PTR(-ENOENT);
536536
}
537537

538-
static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
539-
struct rhash_lock_head **bkt,
540-
struct bucket_table *tbl,
541-
unsigned int hash,
542-
struct rhash_head *obj,
543-
void *data)
538+
static struct bucket_table *rhashtable_insert_one(
539+
struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
540+
struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
541+
void *data)
544542
{
545543
struct bucket_table *new_tbl;
546544
struct rhash_head *head;
@@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
591589
{
592590
struct bucket_table *new_tbl;
593591
struct bucket_table *tbl;
594-
struct rhash_lock_head **bkt;
592+
struct rhash_lock_head __rcu **bkt;
595593
unsigned int hash;
596594
void *data;
597595

@@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht)
11731171
}
11741172
EXPORT_SYMBOL_GPL(rhashtable_destroy);
11751173

1176-
struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
1177-
unsigned int hash)
1174+
struct rhash_lock_head __rcu **__rht_bucket_nested(
1175+
const struct bucket_table *tbl, unsigned int hash)
11781176
{
11791177
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
11801178
unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1202,20 +1200,19 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
12021200
}
12031201
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
12041202

1205-
struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
1206-
unsigned int hash)
1203+
struct rhash_lock_head __rcu **rht_bucket_nested(
1204+
const struct bucket_table *tbl, unsigned int hash)
12071205
{
1208-
static struct rhash_lock_head *rhnull;
1206+
static struct rhash_lock_head __rcu *rhnull;
12091207

12101208
if (!rhnull)
12111209
INIT_RHT_NULLS_HEAD(rhnull);
12121210
return __rht_bucket_nested(tbl, hash) ?: &rhnull;
12131211
}
12141212
EXPORT_SYMBOL_GPL(rht_bucket_nested);
12151213

1216-
struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
1217-
struct bucket_table *tbl,
1218-
unsigned int hash)
1214+
struct rhash_lock_head __rcu **rht_bucket_nested_insert(
1215+
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
12191216
{
12201217
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
12211218
unsigned int index = hash & ((1 << tbl->nest) - 1);

0 commit comments

Comments
 (0)