@@ -75,7 +75,6 @@ struct nfs4_xattr_cache {
75
75
spinlock_t listxattr_lock ;
76
76
struct inode * inode ;
77
77
struct nfs4_xattr_entry * listxattr ;
78
- struct work_struct work ;
79
78
};
80
79
81
80
struct nfs4_xattr_entry {
@@ -101,8 +100,6 @@ static struct list_lru nfs4_xattr_large_entry_lru;
101
100
102
101
static struct kmem_cache * nfs4_xattr_cache_cachep ;
103
102
104
- static struct workqueue_struct * nfs4_xattr_cache_wq ;
105
-
106
103
/*
107
104
* Hashing helper functions.
108
105
*/
@@ -365,9 +362,8 @@ nfs4_xattr_cache_unlink(struct inode *inode)
365
362
}
366
363
367
364
/*
368
- * Discard a cache. Usually called by a worker, since walking all
369
- * the entries can take up some cycles that we don't want to waste
370
- * in the I/O path. Can also be called from the shrinker callback.
365
+ * Discard a cache. Called by get_cache() if there was an old,
366
+ * invalid cache. Can also be called from a shrinker callback.
371
367
*
372
368
* The cache is dead, it has already been unlinked from its inode,
373
369
* and no longer appears on the cache LRU list.
@@ -414,21 +410,6 @@ nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache)
414
410
kref_put (& cache -> ref , nfs4_xattr_free_cache_cb );
415
411
}
416
412
417
- static void
418
- nfs4_xattr_discard_cache_worker (struct work_struct * work )
419
- {
420
- struct nfs4_xattr_cache * cache = container_of (work ,
421
- struct nfs4_xattr_cache , work );
422
-
423
- nfs4_xattr_discard_cache (cache );
424
- }
425
-
426
- static void
427
- nfs4_xattr_reap_cache (struct nfs4_xattr_cache * cache )
428
- {
429
- queue_work (nfs4_xattr_cache_wq , & cache -> work );
430
- }
431
-
432
413
/*
433
414
* Get a referenced copy of the cache structure. Avoid doing allocs
434
415
* while holding i_lock. Which means that we do some optimistic allocation,
@@ -513,10 +494,10 @@ nfs4_xattr_get_cache(struct inode *inode, int add)
513
494
514
495
out :
515
496
/*
516
- * Discarding an old cache is done via a workqueue .
497
+ * Discard the now orphaned old cache .
517
498
*/
518
499
if (oldcache != NULL )
519
- nfs4_xattr_reap_cache (oldcache );
500
+ nfs4_xattr_discard_cache (oldcache );
520
501
521
502
return cache ;
522
503
}
@@ -1008,7 +989,6 @@ static void nfs4_xattr_cache_init_once(void *p)
1008
989
atomic_long_set (& cache -> nent , 0 );
1009
990
nfs4_xattr_hash_init (cache );
1010
991
cache -> listxattr = NULL ;
1011
- INIT_WORK (& cache -> work , nfs4_xattr_discard_cache_worker );
1012
992
INIT_LIST_HEAD (& cache -> lru );
1013
993
INIT_LIST_HEAD (& cache -> dispose );
1014
994
}
@@ -1039,13 +1019,9 @@ int __init nfs4_xattr_cache_init(void)
1039
1019
if (ret )
1040
1020
goto out2 ;
1041
1021
1042
- nfs4_xattr_cache_wq = alloc_workqueue ("nfs4_xattr" , WQ_MEM_RECLAIM , 0 );
1043
- if (nfs4_xattr_cache_wq == NULL )
1044
- goto out1 ;
1045
-
1046
1022
ret = register_shrinker (& nfs4_xattr_cache_shrinker );
1047
1023
if (ret )
1048
- goto out0 ;
1024
+ goto out1 ;
1049
1025
1050
1026
ret = register_shrinker (& nfs4_xattr_entry_shrinker );
1051
1027
if (ret )
@@ -1058,8 +1034,6 @@ int __init nfs4_xattr_cache_init(void)
1058
1034
unregister_shrinker (& nfs4_xattr_entry_shrinker );
1059
1035
out :
1060
1036
unregister_shrinker (& nfs4_xattr_cache_shrinker );
1061
- out0 :
1062
- destroy_workqueue (nfs4_xattr_cache_wq );
1063
1037
out1 :
1064
1038
list_lru_destroy (& nfs4_xattr_cache_lru );
1065
1039
out2 :
@@ -1079,5 +1053,4 @@ void nfs4_xattr_cache_exit(void)
1079
1053
list_lru_destroy (& nfs4_xattr_entry_lru );
1080
1054
list_lru_destroy (& nfs4_xattr_cache_lru );
1081
1055
kmem_cache_destroy (nfs4_xattr_cache_cachep );
1082
- destroy_workqueue (nfs4_xattr_cache_wq );
1083
1056
}
0 commit comments