@@ -49,15 +49,20 @@ struct cache_entry {
49
49
50
50
struct vol_info {
51
51
char * fullpath ;
52
+ spinlock_t smb_vol_lock ;
52
53
struct smb_vol smb_vol ;
53
54
char * mntdata ;
54
55
struct list_head list ;
56
+ struct list_head rlist ;
57
+ struct kref refcnt ;
55
58
};
56
59
57
60
static struct kmem_cache * cache_slab __read_mostly ;
58
61
static struct workqueue_struct * dfscache_wq __read_mostly ;
59
62
60
63
static int cache_ttl ;
64
+ static DEFINE_SPINLOCK (cache_ttl_lock );
65
+
61
66
static struct nls_table * cache_nlsc ;
62
67
63
68
/*
@@ -69,7 +74,7 @@ static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
69
74
static DEFINE_MUTEX (list_lock );
70
75
71
76
static LIST_HEAD (vol_list );
72
- static DEFINE_MUTEX ( vol_lock );
77
+ static DEFINE_SPINLOCK ( vol_list_lock );
73
78
74
79
static void refresh_cache_worker (struct work_struct * work );
75
80
@@ -300,7 +305,6 @@ int dfs_cache_init(void)
300
305
for (i = 0 ; i < CACHE_HTABLE_SIZE ; i ++ )
301
306
INIT_HLIST_HEAD (& cache_htable [i ]);
302
307
303
- cache_ttl = -1 ;
304
308
cache_nlsc = load_nls_default ();
305
309
306
310
cifs_dbg (FYI , "%s: initialized DFS referral cache\n" , __func__ );
@@ -471,15 +475,15 @@ add_cache_entry(unsigned int hash, const char *path,
471
475
472
476
hlist_add_head_rcu (& ce -> hlist , & cache_htable [hash ]);
473
477
474
- mutex_lock ( & vol_lock );
475
- if (cache_ttl < 0 ) {
478
+ spin_lock ( & cache_ttl_lock );
479
+ if (! cache_ttl ) {
476
480
cache_ttl = ce -> ttl ;
477
481
queue_delayed_work (dfscache_wq , & refresh_task , cache_ttl * HZ );
478
482
} else {
479
483
cache_ttl = min_t (int , cache_ttl , ce -> ttl );
480
484
mod_delayed_work (dfscache_wq , & refresh_task , cache_ttl * HZ );
481
485
}
482
- mutex_unlock ( & vol_lock );
486
+ spin_unlock ( & cache_ttl_lock );
483
487
484
488
return ce ;
485
489
}
@@ -523,21 +527,32 @@ static inline void destroy_slab_cache(void)
523
527
kmem_cache_destroy (cache_slab );
524
528
}
525
529
526
- static inline void free_vol (struct vol_info * vi )
530
+ static void __vol_release (struct vol_info * vi )
527
531
{
528
- list_del (& vi -> list );
529
532
kfree (vi -> fullpath );
530
533
kfree (vi -> mntdata );
531
534
cifs_cleanup_volume_info_contents (& vi -> smb_vol );
532
535
kfree (vi );
533
536
}
534
537
538
+ static void vol_release (struct kref * kref )
539
+ {
540
+ struct vol_info * vi = container_of (kref , struct vol_info , refcnt );
541
+
542
+ spin_lock (& vol_list_lock );
543
+ list_del (& vi -> list );
544
+ spin_unlock (& vol_list_lock );
545
+ __vol_release (vi );
546
+ }
547
+
535
548
static inline void free_vol_list (void )
536
549
{
537
550
struct vol_info * vi , * nvi ;
538
551
539
- list_for_each_entry_safe (vi , nvi , & vol_list , list )
540
- free_vol (vi );
552
+ list_for_each_entry_safe (vi , nvi , & vol_list , list ) {
553
+ list_del_init (& vi -> list );
554
+ __vol_release (vi );
555
+ }
541
556
}
542
557
543
558
/**
@@ -1156,10 +1171,13 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
1156
1171
goto err_free_fullpath ;
1157
1172
1158
1173
vi -> mntdata = mntdata ;
1174
+ spin_lock_init (& vi -> smb_vol_lock );
1175
+ kref_init (& vi -> refcnt );
1159
1176
1160
- mutex_lock ( & vol_lock );
1177
+ spin_lock ( & vol_list_lock );
1161
1178
list_add_tail (& vi -> list , & vol_list );
1162
- mutex_unlock (& vol_lock );
1179
+ spin_unlock (& vol_list_lock );
1180
+
1163
1181
return 0 ;
1164
1182
1165
1183
err_free_fullpath :
@@ -1169,7 +1187,8 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
1169
1187
return rc ;
1170
1188
}
1171
1189
1172
- static inline struct vol_info * find_vol (const char * fullpath )
1190
+ /* Must be called with vol_list_lock held */
1191
+ static struct vol_info * find_vol (const char * fullpath )
1173
1192
{
1174
1193
struct vol_info * vi ;
1175
1194
@@ -1191,30 +1210,31 @@ static inline struct vol_info *find_vol(const char *fullpath)
1191
1210
*/
1192
1211
int dfs_cache_update_vol (const char * fullpath , struct TCP_Server_Info * server )
1193
1212
{
1194
- int rc ;
1195
1213
struct vol_info * vi ;
1196
1214
1197
1215
if (!fullpath || !server )
1198
1216
return - EINVAL ;
1199
1217
1200
1218
cifs_dbg (FYI , "%s: fullpath: %s\n" , __func__ , fullpath );
1201
1219
1202
- mutex_lock (& vol_lock );
1203
-
1220
+ spin_lock (& vol_list_lock );
1204
1221
vi = find_vol (fullpath );
1205
1222
if (IS_ERR (vi )) {
1206
- rc = PTR_ERR ( vi );
1207
- goto out ;
1223
+ spin_unlock ( & vol_list_lock );
1224
+ return PTR_ERR ( vi ) ;
1208
1225
}
1226
+ kref_get (& vi -> refcnt );
1227
+ spin_unlock (& vol_list_lock );
1209
1228
1210
1229
cifs_dbg (FYI , "%s: updating volume info\n" , __func__ );
1230
+ spin_lock (& vi -> smb_vol_lock );
1211
1231
memcpy (& vi -> smb_vol .dstaddr , & server -> dstaddr ,
1212
1232
sizeof (vi -> smb_vol .dstaddr ));
1213
- rc = 0 ;
1233
+ spin_unlock ( & vi -> smb_vol_lock ) ;
1214
1234
1215
- out :
1216
- mutex_unlock ( & vol_lock );
1217
- return rc ;
1235
+ kref_put ( & vi -> refcnt , vol_release );
1236
+
1237
+ return 0 ;
1218
1238
}
1219
1239
1220
1240
/**
@@ -1231,11 +1251,11 @@ void dfs_cache_del_vol(const char *fullpath)
1231
1251
1232
1252
cifs_dbg (FYI , "%s: fullpath: %s\n" , __func__ , fullpath );
1233
1253
1234
- mutex_lock ( & vol_lock );
1254
+ spin_lock ( & vol_list_lock );
1235
1255
vi = find_vol (fullpath );
1236
- if (! IS_ERR ( vi ))
1237
- free_vol ( vi );
1238
- mutex_unlock ( & vol_lock );
1256
+ spin_unlock ( & vol_list_lock );
1257
+
1258
+ kref_put ( & vi -> refcnt , vol_release );
1239
1259
}
1240
1260
1241
1261
/* Get all tcons that are within a DFS namespace and can be refreshed */
@@ -1449,27 +1469,52 @@ static void refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
1449
1469
*/
1450
1470
static void refresh_cache_worker (struct work_struct * work )
1451
1471
{
1452
- struct vol_info * vi ;
1472
+ struct vol_info * vi , * nvi ;
1453
1473
struct TCP_Server_Info * server ;
1454
- LIST_HEAD (list );
1474
+ LIST_HEAD (vols );
1475
+ LIST_HEAD (tcons );
1455
1476
struct cifs_tcon * tcon , * ntcon ;
1456
1477
1457
- mutex_lock (& vol_lock );
1458
-
1478
+ /*
1479
+ * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1480
+ * for refreshing.
1481
+ */
1482
+ spin_lock (& vol_list_lock );
1459
1483
list_for_each_entry (vi , & vol_list , list ) {
1460
1484
server = get_tcp_server (& vi -> smb_vol );
1461
1485
if (!server )
1462
1486
continue ;
1463
1487
1464
- get_tcons (server , & list );
1465
- list_for_each_entry_safe (tcon , ntcon , & list , ulist ) {
1488
+ kref_get (& vi -> refcnt );
1489
+ list_add_tail (& vi -> rlist , & vols );
1490
+ put_tcp_server (server );
1491
+ }
1492
+ spin_unlock (& vol_list_lock );
1493
+
1494
+ /* Walk through all TCONs and refresh any expired cache entry */
1495
+ list_for_each_entry_safe (vi , nvi , & vols , rlist ) {
1496
+ spin_lock (& vi -> smb_vol_lock );
1497
+ server = get_tcp_server (& vi -> smb_vol );
1498
+ spin_unlock (& vi -> smb_vol_lock );
1499
+
1500
+ if (!server )
1501
+ goto next_vol ;
1502
+
1503
+ get_tcons (server , & tcons );
1504
+ list_for_each_entry_safe (tcon , ntcon , & tcons , ulist ) {
1466
1505
refresh_tcon (vi , tcon );
1467
1506
list_del_init (& tcon -> ulist );
1468
1507
cifs_put_tcon (tcon );
1469
1508
}
1470
1509
1471
1510
put_tcp_server (server );
1511
+
1512
+ next_vol :
1513
+ list_del_init (& vi -> rlist );
1514
+ kref_put (& vi -> refcnt , vol_release );
1472
1515
}
1516
+
1517
+ spin_lock (& cache_ttl_lock );
1473
1518
queue_delayed_work (dfscache_wq , & refresh_task , cache_ttl * HZ );
1474
- mutex_unlock ( & vol_lock );
1519
+ spin_unlock ( & cache_ttl_lock );
1475
1520
}
0 commit comments