@@ -84,7 +84,7 @@ struct bucket_table {
84
84
85
85
struct lockdep_map dep_map ;
86
86
87
- struct rhash_lock_head * buckets [] ____cacheline_aligned_in_smp ;
87
+ struct rhash_lock_head __rcu * buckets [] ____cacheline_aligned_in_smp ;
88
88
};
89
89
90
90
/*
@@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
261
261
void * arg );
262
262
void rhashtable_destroy (struct rhashtable * ht );
263
263
264
- struct rhash_lock_head * * rht_bucket_nested (const struct bucket_table * tbl ,
265
- unsigned int hash );
266
- struct rhash_lock_head * * __rht_bucket_nested (const struct bucket_table * tbl ,
267
- unsigned int hash );
268
- struct rhash_lock_head * * rht_bucket_nested_insert (struct rhashtable * ht ,
269
- struct bucket_table * tbl ,
270
- unsigned int hash );
264
+ struct rhash_lock_head __rcu * * rht_bucket_nested (
265
+ const struct bucket_table * tbl , unsigned int hash );
266
+ struct rhash_lock_head __rcu * * __rht_bucket_nested (
267
+ const struct bucket_table * tbl , unsigned int hash );
268
+ struct rhash_lock_head __rcu * * rht_bucket_nested_insert (
269
+ struct rhashtable * ht , struct bucket_table * tbl , unsigned int hash );
271
270
272
271
#define rht_dereference (p , ht ) \
273
272
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
284
283
#define rht_entry (tpos , pos , member ) \
285
284
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
286
285
287
- static inline struct rhash_lock_head * const * rht_bucket (
286
+ static inline struct rhash_lock_head __rcu * const * rht_bucket (
288
287
const struct bucket_table * tbl , unsigned int hash )
289
288
{
290
289
return unlikely (tbl -> nest ) ? rht_bucket_nested (tbl , hash ) :
291
290
& tbl -> buckets [hash ];
292
291
}
293
292
294
- static inline struct rhash_lock_head * * rht_bucket_var (
293
+ static inline struct rhash_lock_head __rcu * * rht_bucket_var (
295
294
struct bucket_table * tbl , unsigned int hash )
296
295
{
297
296
return unlikely (tbl -> nest ) ? __rht_bucket_nested (tbl , hash ) :
298
297
& tbl -> buckets [hash ];
299
298
}
300
299
301
- static inline struct rhash_lock_head * * rht_bucket_insert (
300
+ static inline struct rhash_lock_head __rcu * * rht_bucket_insert (
302
301
struct rhashtable * ht , struct bucket_table * tbl , unsigned int hash )
303
302
{
304
303
return unlikely (tbl -> nest ) ? rht_bucket_nested_insert (ht , tbl , hash ) :
@@ -325,15 +324,15 @@ static inline struct rhash_lock_head **rht_bucket_insert(
325
324
*/
326
325
327
326
static inline void rht_lock (struct bucket_table * tbl ,
328
- struct rhash_lock_head * * bkt )
327
+ struct rhash_lock_head __rcu * * bkt )
329
328
{
330
329
local_bh_disable ();
331
330
bit_spin_lock (0 , (unsigned long * )bkt );
332
331
lock_map_acquire (& tbl -> dep_map );
333
332
}
334
333
335
334
static inline void rht_lock_nested (struct bucket_table * tbl ,
336
- struct rhash_lock_head * * bucket ,
335
+ struct rhash_lock_head __rcu * * bucket ,
337
336
unsigned int subclass )
338
337
{
339
338
local_bh_disable ();
@@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl,
342
341
}
343
342
344
343
static inline void rht_unlock (struct bucket_table * tbl ,
345
- struct rhash_lock_head * * bkt )
344
+ struct rhash_lock_head __rcu * * bkt )
346
345
{
347
346
lock_map_release (& tbl -> dep_map );
348
347
bit_spin_unlock (0 , (unsigned long * )bkt );
349
348
local_bh_enable ();
350
349
}
351
350
352
- static inline struct rhash_head __rcu * __rht_ptr (
353
- struct rhash_lock_head * const * bkt )
351
+ static inline struct rhash_head * __rht_ptr (
352
+ struct rhash_lock_head * p , struct rhash_lock_head __rcu * const * bkt )
354
353
{
355
- return (struct rhash_head __rcu * )
356
- ((unsigned long )* bkt & ~BIT (0 ) ?:
354
+ return (struct rhash_head * )
355
+ ((unsigned long )p & ~BIT (0 ) ?:
357
356
(unsigned long )RHT_NULLS_MARKER (bkt ));
358
357
}
359
358
@@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr(
365
364
* access is guaranteed, such as when destroying the table.
366
365
*/
367
366
static inline struct rhash_head * rht_ptr_rcu (
368
- struct rhash_lock_head * const * bkt )
367
+ struct rhash_lock_head __rcu * const * bkt )
369
368
{
370
- struct rhash_head __rcu * p = __rht_ptr (bkt );
371
-
372
- return rcu_dereference (p );
369
+ return __rht_ptr (rcu_dereference (* bkt ), bkt );
373
370
}
374
371
375
372
static inline struct rhash_head * rht_ptr (
376
- struct rhash_lock_head * const * bkt ,
373
+ struct rhash_lock_head __rcu * const * bkt ,
377
374
struct bucket_table * tbl ,
378
375
unsigned int hash )
379
376
{
380
- return rht_dereference_bucket ( __rht_ptr (bkt ) , tbl , hash );
377
+ return __rht_ptr (rht_dereference_bucket ( * bkt , tbl , hash ), bkt );
381
378
}
382
379
383
380
static inline struct rhash_head * rht_ptr_exclusive (
384
- struct rhash_lock_head * const * bkt )
381
+ struct rhash_lock_head __rcu * const * bkt )
385
382
{
386
- return rcu_dereference_protected ( __rht_ptr (bkt ) , 1 );
383
+ return __rht_ptr (rcu_dereference_protected ( * bkt , 1 ), bkt );
387
384
}
388
385
389
- static inline void rht_assign_locked (struct rhash_lock_head * * bkt ,
386
+ static inline void rht_assign_locked (struct rhash_lock_head __rcu * * bkt ,
390
387
struct rhash_head * obj )
391
388
{
392
- struct rhash_head __rcu * * p = (struct rhash_head __rcu * * )bkt ;
393
-
394
389
if (rht_is_a_nulls (obj ))
395
390
obj = NULL ;
396
- rcu_assign_pointer (* p , (void * )((unsigned long )obj | BIT (0 )));
391
+ rcu_assign_pointer (* bkt , (void * )((unsigned long )obj | BIT (0 )));
397
392
}
398
393
399
394
static inline void rht_assign_unlock (struct bucket_table * tbl ,
400
- struct rhash_lock_head * * bkt ,
395
+ struct rhash_lock_head __rcu * * bkt ,
401
396
struct rhash_head * obj )
402
397
{
403
- struct rhash_head __rcu * * p = (struct rhash_head __rcu * * )bkt ;
404
-
405
398
if (rht_is_a_nulls (obj ))
406
399
obj = NULL ;
407
400
lock_map_release (& tbl -> dep_map );
408
- rcu_assign_pointer (* p , obj );
401
+ rcu_assign_pointer (* bkt , ( void * ) obj );
409
402
preempt_enable ();
410
403
__release (bitlock );
411
404
local_bh_enable ();
@@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup(
593
586
.ht = ht ,
594
587
.key = key ,
595
588
};
596
- struct rhash_lock_head * const * bkt ;
589
+ struct rhash_lock_head __rcu * const * bkt ;
597
590
struct bucket_table * tbl ;
598
591
struct rhash_head * he ;
599
592
unsigned int hash ;
@@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast(
709
702
.ht = ht ,
710
703
.key = key ,
711
704
};
712
- struct rhash_lock_head * * bkt ;
705
+ struct rhash_lock_head __rcu * * bkt ;
713
706
struct rhash_head __rcu * * pprev ;
714
707
struct bucket_table * tbl ;
715
708
struct rhash_head * head ;
@@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one(
995
988
struct rhash_head * obj , const struct rhashtable_params params ,
996
989
bool rhlist )
997
990
{
998
- struct rhash_lock_head * * bkt ;
991
+ struct rhash_lock_head __rcu * * bkt ;
999
992
struct rhash_head __rcu * * pprev ;
1000
993
struct rhash_head * he ;
1001
994
unsigned int hash ;
@@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast(
1147
1140
struct rhash_head * obj_old , struct rhash_head * obj_new ,
1148
1141
const struct rhashtable_params params )
1149
1142
{
1150
- struct rhash_lock_head * * bkt ;
1143
+ struct rhash_lock_head __rcu * * bkt ;
1151
1144
struct rhash_head __rcu * * pprev ;
1152
1145
struct rhash_head * he ;
1153
1146
unsigned int hash ;
0 commit comments