@@ -1172,19 +1172,30 @@ static bool is_ses_good(struct cifs_ses *ses)
11721172 return ret ;
11731173}
11741174
1175- /* Refresh dfs referral of tcon and mark it for reconnect if needed */
1176- static int __refresh_tcon ( const char * path , struct cifs_ses * ses , bool force_refresh )
1175+ /* Refresh dfs referral of @ses and mark it for reconnect if needed */
1176+ static void __refresh_ses_referral ( struct cifs_ses * ses , bool force_refresh )
11771177{
11781178 struct TCP_Server_Info * server = ses -> server ;
11791179 DFS_CACHE_TGT_LIST (old_tl );
11801180 DFS_CACHE_TGT_LIST (new_tl );
11811181 bool needs_refresh = false;
11821182 struct cache_entry * ce ;
11831183 unsigned int xid ;
1184+ char * path = NULL ;
11841185 int rc = 0 ;
11851186
11861187 xid = get_xid ();
11871188
1189+ mutex_lock (& server -> refpath_lock );
1190+ if (server -> leaf_fullpath ) {
1191+ path = kstrdup (server -> leaf_fullpath + 1 , GFP_ATOMIC );
1192+ if (!path )
1193+ rc = - ENOMEM ;
1194+ }
1195+ mutex_unlock (& server -> refpath_lock );
1196+ if (!path )
1197+ goto out ;
1198+
11881199 down_read (& htable_rw_lock );
11891200 ce = lookup_cache_entry (path );
11901201 needs_refresh = force_refresh || IS_ERR (ce ) || cache_entry_expired (ce );
@@ -1218,19 +1229,17 @@ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_ref
12181229 free_xid (xid );
12191230 dfs_cache_free_tgts (& old_tl );
12201231 dfs_cache_free_tgts (& new_tl );
1221- return rc ;
1232+ kfree ( path ) ;
12221233}
12231234
1224- static int refresh_tcon (struct cifs_tcon * tcon , bool force_refresh )
1235+ static inline void refresh_ses_referral (struct cifs_ses * ses )
12251236{
1226- struct TCP_Server_Info * server = tcon -> ses -> server ;
1227- struct cifs_ses * ses = tcon -> ses ;
1237+ __refresh_ses_referral ( ses , false) ;
1238+ }
12281239
1229- mutex_lock (& server -> refpath_lock );
1230- if (server -> leaf_fullpath )
1231- __refresh_tcon (server -> leaf_fullpath + 1 , ses , force_refresh );
1232- mutex_unlock (& server -> refpath_lock );
1233- return 0 ;
1240+ static inline void force_refresh_ses_referral (struct cifs_ses * ses )
1241+ {
1242+ __refresh_ses_referral (ses , true);
12341243}
12351244
12361245/**
@@ -1271,25 +1280,20 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
12711280 */
12721281 cifs_sb -> mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH ;
12731282
1274- return refresh_tcon (tcon , true);
1283+ force_refresh_ses_referral (tcon -> ses );
1284+ return 0 ;
12751285}
12761286
12771287/* Refresh all DFS referrals related to DFS tcon */
12781288void dfs_cache_refresh (struct work_struct * work )
12791289{
1280- struct TCP_Server_Info * server ;
12811290 struct cifs_tcon * tcon ;
12821291 struct cifs_ses * ses ;
12831292
12841293 tcon = container_of (work , struct cifs_tcon , dfs_cache_work .work );
12851294
1286- for (ses = tcon -> ses ; ses ; ses = ses -> dfs_root_ses ) {
1287- server = ses -> server ;
1288- mutex_lock (& server -> refpath_lock );
1289- if (server -> leaf_fullpath )
1290- __refresh_tcon (server -> leaf_fullpath + 1 , ses , false);
1291- mutex_unlock (& server -> refpath_lock );
1292- }
1295+ for (ses = tcon -> ses ; ses ; ses = ses -> dfs_root_ses )
1296+ refresh_ses_referral (ses );
12931297
12941298 queue_delayed_work (dfscache_wq , & tcon -> dfs_cache_work ,
12951299 atomic_read (& dfs_cache_ttl ) * HZ );
0 commit comments