Skip to content

Commit 06d5737

Browse files
Paulo Alcantara (SUSE)smfrench
authored andcommitted
cifs: Fix potential deadlock when updating vol in cifs_reconnect()
We can't acquire volume lock while refreshing the DFS cache because cifs_reconnect() may call dfs_cache_update_vol() while we are walking through the volume list. To prevent that, make vol_info refcounted, create a temp list with all volumes eligible for refreshing, and then use it without any locks held. Besides, replace vol_lock with a spinlock and protect cache_ttl from concurrent accesses or changes. Signed-off-by: Paulo Alcantara (SUSE) <[email protected]> Signed-off-by: Steve French <[email protected]>
1 parent ff2f7fc commit 06d5737

File tree

1 file changed

+77
-32
lines changed

1 file changed

+77
-32
lines changed

fs/cifs/dfs_cache.c

Lines changed: 77 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -49,15 +49,20 @@ struct cache_entry {
4949

5050
struct vol_info {
5151
char *fullpath;
52+
spinlock_t smb_vol_lock;
5253
struct smb_vol smb_vol;
5354
char *mntdata;
5455
struct list_head list;
56+
struct list_head rlist;
57+
struct kref refcnt;
5558
};
5659

5760
static struct kmem_cache *cache_slab __read_mostly;
5861
static struct workqueue_struct *dfscache_wq __read_mostly;
5962

6063
static int cache_ttl;
64+
static DEFINE_SPINLOCK(cache_ttl_lock);
65+
6166
static struct nls_table *cache_nlsc;
6267

6368
/*
@@ -69,7 +74,7 @@ static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
6974
static DEFINE_MUTEX(list_lock);
7075

7176
static LIST_HEAD(vol_list);
72-
static DEFINE_MUTEX(vol_lock);
77+
static DEFINE_SPINLOCK(vol_list_lock);
7378

7479
static void refresh_cache_worker(struct work_struct *work);
7580

@@ -300,7 +305,6 @@ int dfs_cache_init(void)
300305
for (i = 0; i < CACHE_HTABLE_SIZE; i++)
301306
INIT_HLIST_HEAD(&cache_htable[i]);
302307

303-
cache_ttl = -1;
304308
cache_nlsc = load_nls_default();
305309

306310
cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
@@ -471,15 +475,15 @@ add_cache_entry(unsigned int hash, const char *path,
471475

472476
hlist_add_head_rcu(&ce->hlist, &cache_htable[hash]);
473477

474-
mutex_lock(&vol_lock);
475-
if (cache_ttl < 0) {
478+
spin_lock(&cache_ttl_lock);
479+
if (!cache_ttl) {
476480
cache_ttl = ce->ttl;
477481
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
478482
} else {
479483
cache_ttl = min_t(int, cache_ttl, ce->ttl);
480484
mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
481485
}
482-
mutex_unlock(&vol_lock);
486+
spin_unlock(&cache_ttl_lock);
483487

484488
return ce;
485489
}
@@ -523,21 +527,32 @@ static inline void destroy_slab_cache(void)
523527
kmem_cache_destroy(cache_slab);
524528
}
525529

526-
static inline void free_vol(struct vol_info *vi)
530+
static void __vol_release(struct vol_info *vi)
527531
{
528-
list_del(&vi->list);
529532
kfree(vi->fullpath);
530533
kfree(vi->mntdata);
531534
cifs_cleanup_volume_info_contents(&vi->smb_vol);
532535
kfree(vi);
533536
}
534537

538+
static void vol_release(struct kref *kref)
539+
{
540+
struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
541+
542+
spin_lock(&vol_list_lock);
543+
list_del(&vi->list);
544+
spin_unlock(&vol_list_lock);
545+
__vol_release(vi);
546+
}
547+
535548
static inline void free_vol_list(void)
536549
{
537550
struct vol_info *vi, *nvi;
538551

539-
list_for_each_entry_safe(vi, nvi, &vol_list, list)
540-
free_vol(vi);
552+
list_for_each_entry_safe(vi, nvi, &vol_list, list) {
553+
list_del_init(&vi->list);
554+
__vol_release(vi);
555+
}
541556
}
542557

543558
/**
@@ -1156,10 +1171,13 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
11561171
goto err_free_fullpath;
11571172

11581173
vi->mntdata = mntdata;
1174+
spin_lock_init(&vi->smb_vol_lock);
1175+
kref_init(&vi->refcnt);
11591176

1160-
mutex_lock(&vol_lock);
1177+
spin_lock(&vol_list_lock);
11611178
list_add_tail(&vi->list, &vol_list);
1162-
mutex_unlock(&vol_lock);
1179+
spin_unlock(&vol_list_lock);
1180+
11631181
return 0;
11641182

11651183
err_free_fullpath:
@@ -1169,7 +1187,8 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
11691187
return rc;
11701188
}
11711189

1172-
static inline struct vol_info *find_vol(const char *fullpath)
1190+
/* Must be called with vol_list_lock held */
1191+
static struct vol_info *find_vol(const char *fullpath)
11731192
{
11741193
struct vol_info *vi;
11751194

@@ -1191,30 +1210,31 @@ static inline struct vol_info *find_vol(const char *fullpath)
11911210
*/
11921211
int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
11931212
{
1194-
int rc;
11951213
struct vol_info *vi;
11961214

11971215
if (!fullpath || !server)
11981216
return -EINVAL;
11991217

12001218
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
12011219

1202-
mutex_lock(&vol_lock);
1203-
1220+
spin_lock(&vol_list_lock);
12041221
vi = find_vol(fullpath);
12051222
if (IS_ERR(vi)) {
1206-
rc = PTR_ERR(vi);
1207-
goto out;
1223+
spin_unlock(&vol_list_lock);
1224+
return PTR_ERR(vi);
12081225
}
1226+
kref_get(&vi->refcnt);
1227+
spin_unlock(&vol_list_lock);
12091228

12101229
cifs_dbg(FYI, "%s: updating volume info\n", __func__);
1230+
spin_lock(&vi->smb_vol_lock);
12111231
memcpy(&vi->smb_vol.dstaddr, &server->dstaddr,
12121232
sizeof(vi->smb_vol.dstaddr));
1213-
rc = 0;
1233+
spin_unlock(&vi->smb_vol_lock);
12141234

1215-
out:
1216-
mutex_unlock(&vol_lock);
1217-
return rc;
1235+
kref_put(&vi->refcnt, vol_release);
1236+
1237+
return 0;
12181238
}
12191239

12201240
/**
@@ -1231,11 +1251,11 @@ void dfs_cache_del_vol(const char *fullpath)
12311251

12321252
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
12331253

1234-
mutex_lock(&vol_lock);
1254+
spin_lock(&vol_list_lock);
12351255
vi = find_vol(fullpath);
1236-
if (!IS_ERR(vi))
1237-
free_vol(vi);
1238-
mutex_unlock(&vol_lock);
1256+
spin_unlock(&vol_list_lock);
1257+
1258+
kref_put(&vi->refcnt, vol_release);
12391259
}
12401260

12411261
/* Get all tcons that are within a DFS namespace and can be refreshed */
@@ -1449,27 +1469,52 @@ static void refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
14491469
*/
14501470
static void refresh_cache_worker(struct work_struct *work)
14511471
{
1452-
struct vol_info *vi;
1472+
struct vol_info *vi, *nvi;
14531473
struct TCP_Server_Info *server;
1454-
LIST_HEAD(list);
1474+
LIST_HEAD(vols);
1475+
LIST_HEAD(tcons);
14551476
struct cifs_tcon *tcon, *ntcon;
14561477

1457-
mutex_lock(&vol_lock);
1458-
1478+
/*
1479+
* Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1480+
* for refreshing.
1481+
*/
1482+
spin_lock(&vol_list_lock);
14591483
list_for_each_entry(vi, &vol_list, list) {
14601484
server = get_tcp_server(&vi->smb_vol);
14611485
if (!server)
14621486
continue;
14631487

1464-
get_tcons(server, &list);
1465-
list_for_each_entry_safe(tcon, ntcon, &list, ulist) {
1488+
kref_get(&vi->refcnt);
1489+
list_add_tail(&vi->rlist, &vols);
1490+
put_tcp_server(server);
1491+
}
1492+
spin_unlock(&vol_list_lock);
1493+
1494+
/* Walk through all TCONs and refresh any expired cache entry */
1495+
list_for_each_entry_safe(vi, nvi, &vols, rlist) {
1496+
spin_lock(&vi->smb_vol_lock);
1497+
server = get_tcp_server(&vi->smb_vol);
1498+
spin_unlock(&vi->smb_vol_lock);
1499+
1500+
if (!server)
1501+
goto next_vol;
1502+
1503+
get_tcons(server, &tcons);
1504+
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
14661505
refresh_tcon(vi, tcon);
14671506
list_del_init(&tcon->ulist);
14681507
cifs_put_tcon(tcon);
14691508
}
14701509

14711510
put_tcp_server(server);
1511+
1512+
next_vol:
1513+
list_del_init(&vi->rlist);
1514+
kref_put(&vi->refcnt, vol_release);
14721515
}
1516+
1517+
spin_lock(&cache_ttl_lock);
14731518
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1474-
mutex_unlock(&vol_lock);
1519+
spin_unlock(&cache_ttl_lock);
14751520
}

0 commit comments

Comments
 (0)