Commit 06d57378 authored by Paulo Alcantara (SUSE)'s avatar Paulo Alcantara (SUSE) Committed by Steve French

cifs: Fix potential deadlock when updating vol in cifs_reconnect()

We can't acquire volume lock while refreshing the DFS cache because
cifs_reconnect() may call dfs_cache_update_vol() while we are walking
through the volume list.

To prevent that, make vol_info refcounted, create a temp list with all
volumes eligible for refreshing, and then use it without any locks
held.

Besides, replace vol_lock with a spinlock and protect cache_ttl from
concurrent accesses or changes.
Signed-off-by: default avatarPaulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent ff2f7fc0
...@@ -49,15 +49,20 @@ struct cache_entry { ...@@ -49,15 +49,20 @@ struct cache_entry {
struct vol_info { struct vol_info {
char *fullpath; char *fullpath;
spinlock_t smb_vol_lock;
struct smb_vol smb_vol; struct smb_vol smb_vol;
char *mntdata; char *mntdata;
struct list_head list; struct list_head list;
struct list_head rlist;
struct kref refcnt;
}; };
static struct kmem_cache *cache_slab __read_mostly; static struct kmem_cache *cache_slab __read_mostly;
static struct workqueue_struct *dfscache_wq __read_mostly; static struct workqueue_struct *dfscache_wq __read_mostly;
static int cache_ttl; static int cache_ttl;
static DEFINE_SPINLOCK(cache_ttl_lock);
static struct nls_table *cache_nlsc; static struct nls_table *cache_nlsc;
/* /*
...@@ -69,7 +74,7 @@ static struct hlist_head cache_htable[CACHE_HTABLE_SIZE]; ...@@ -69,7 +74,7 @@ static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
static DEFINE_MUTEX(list_lock); static DEFINE_MUTEX(list_lock);
static LIST_HEAD(vol_list); static LIST_HEAD(vol_list);
static DEFINE_MUTEX(vol_lock); static DEFINE_SPINLOCK(vol_list_lock);
static void refresh_cache_worker(struct work_struct *work); static void refresh_cache_worker(struct work_struct *work);
...@@ -300,7 +305,6 @@ int dfs_cache_init(void) ...@@ -300,7 +305,6 @@ int dfs_cache_init(void)
for (i = 0; i < CACHE_HTABLE_SIZE; i++) for (i = 0; i < CACHE_HTABLE_SIZE; i++)
INIT_HLIST_HEAD(&cache_htable[i]); INIT_HLIST_HEAD(&cache_htable[i]);
cache_ttl = -1;
cache_nlsc = load_nls_default(); cache_nlsc = load_nls_default();
cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__); cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
...@@ -471,15 +475,15 @@ add_cache_entry(unsigned int hash, const char *path, ...@@ -471,15 +475,15 @@ add_cache_entry(unsigned int hash, const char *path,
hlist_add_head_rcu(&ce->hlist, &cache_htable[hash]); hlist_add_head_rcu(&ce->hlist, &cache_htable[hash]);
mutex_lock(&vol_lock); spin_lock(&cache_ttl_lock);
if (cache_ttl < 0) { if (!cache_ttl) {
cache_ttl = ce->ttl; cache_ttl = ce->ttl;
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
} else { } else {
cache_ttl = min_t(int, cache_ttl, ce->ttl); cache_ttl = min_t(int, cache_ttl, ce->ttl);
mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
} }
mutex_unlock(&vol_lock); spin_unlock(&cache_ttl_lock);
return ce; return ce;
} }
...@@ -523,21 +527,32 @@ static inline void destroy_slab_cache(void) ...@@ -523,21 +527,32 @@ static inline void destroy_slab_cache(void)
kmem_cache_destroy(cache_slab); kmem_cache_destroy(cache_slab);
} }
static inline void free_vol(struct vol_info *vi) static void __vol_release(struct vol_info *vi)
{ {
list_del(&vi->list);
kfree(vi->fullpath); kfree(vi->fullpath);
kfree(vi->mntdata); kfree(vi->mntdata);
cifs_cleanup_volume_info_contents(&vi->smb_vol); cifs_cleanup_volume_info_contents(&vi->smb_vol);
kfree(vi); kfree(vi);
} }
static void vol_release(struct kref *kref)
{
struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
spin_lock(&vol_list_lock);
list_del(&vi->list);
spin_unlock(&vol_list_lock);
__vol_release(vi);
}
static inline void free_vol_list(void) static inline void free_vol_list(void)
{ {
struct vol_info *vi, *nvi; struct vol_info *vi, *nvi;
list_for_each_entry_safe(vi, nvi, &vol_list, list) list_for_each_entry_safe(vi, nvi, &vol_list, list) {
free_vol(vi); list_del_init(&vi->list);
__vol_release(vi);
}
} }
/** /**
...@@ -1156,10 +1171,13 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath) ...@@ -1156,10 +1171,13 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
goto err_free_fullpath; goto err_free_fullpath;
vi->mntdata = mntdata; vi->mntdata = mntdata;
spin_lock_init(&vi->smb_vol_lock);
kref_init(&vi->refcnt);
mutex_lock(&vol_lock); spin_lock(&vol_list_lock);
list_add_tail(&vi->list, &vol_list); list_add_tail(&vi->list, &vol_list);
mutex_unlock(&vol_lock); spin_unlock(&vol_list_lock);
return 0; return 0;
err_free_fullpath: err_free_fullpath:
...@@ -1169,7 +1187,8 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath) ...@@ -1169,7 +1187,8 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
return rc; return rc;
} }
static inline struct vol_info *find_vol(const char *fullpath) /* Must be called with vol_list_lock held */
static struct vol_info *find_vol(const char *fullpath)
{ {
struct vol_info *vi; struct vol_info *vi;
...@@ -1191,7 +1210,6 @@ static inline struct vol_info *find_vol(const char *fullpath) ...@@ -1191,7 +1210,6 @@ static inline struct vol_info *find_vol(const char *fullpath)
*/ */
int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server) int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
{ {
int rc;
struct vol_info *vi; struct vol_info *vi;
if (!fullpath || !server) if (!fullpath || !server)
...@@ -1199,22 +1217,24 @@ int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server) ...@@ -1199,22 +1217,24 @@ int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
mutex_lock(&vol_lock); spin_lock(&vol_list_lock);
vi = find_vol(fullpath); vi = find_vol(fullpath);
if (IS_ERR(vi)) { if (IS_ERR(vi)) {
rc = PTR_ERR(vi); spin_unlock(&vol_list_lock);
goto out; return PTR_ERR(vi);
} }
kref_get(&vi->refcnt);
spin_unlock(&vol_list_lock);
cifs_dbg(FYI, "%s: updating volume info\n", __func__); cifs_dbg(FYI, "%s: updating volume info\n", __func__);
spin_lock(&vi->smb_vol_lock);
memcpy(&vi->smb_vol.dstaddr, &server->dstaddr, memcpy(&vi->smb_vol.dstaddr, &server->dstaddr,
sizeof(vi->smb_vol.dstaddr)); sizeof(vi->smb_vol.dstaddr));
rc = 0; spin_unlock(&vi->smb_vol_lock);
out: kref_put(&vi->refcnt, vol_release);
mutex_unlock(&vol_lock);
return rc; return 0;
} }
/** /**
...@@ -1231,11 +1251,11 @@ void dfs_cache_del_vol(const char *fullpath) ...@@ -1231,11 +1251,11 @@ void dfs_cache_del_vol(const char *fullpath)
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
mutex_lock(&vol_lock); spin_lock(&vol_list_lock);
vi = find_vol(fullpath); vi = find_vol(fullpath);
if (!IS_ERR(vi)) spin_unlock(&vol_list_lock);
free_vol(vi);
mutex_unlock(&vol_lock); kref_put(&vi->refcnt, vol_release);
} }
/* Get all tcons that are within a DFS namespace and can be refreshed */ /* Get all tcons that are within a DFS namespace and can be refreshed */
...@@ -1449,27 +1469,52 @@ static void refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon) ...@@ -1449,27 +1469,52 @@ static void refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
*/ */
static void refresh_cache_worker(struct work_struct *work) static void refresh_cache_worker(struct work_struct *work)
{ {
struct vol_info *vi; struct vol_info *vi, *nvi;
struct TCP_Server_Info *server; struct TCP_Server_Info *server;
LIST_HEAD(list); LIST_HEAD(vols);
LIST_HEAD(tcons);
struct cifs_tcon *tcon, *ntcon; struct cifs_tcon *tcon, *ntcon;
mutex_lock(&vol_lock); /*
* Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
* for refreshing.
*/
spin_lock(&vol_list_lock);
list_for_each_entry(vi, &vol_list, list) { list_for_each_entry(vi, &vol_list, list) {
server = get_tcp_server(&vi->smb_vol); server = get_tcp_server(&vi->smb_vol);
if (!server) if (!server)
continue; continue;
get_tcons(server, &list); kref_get(&vi->refcnt);
list_for_each_entry_safe(tcon, ntcon, &list, ulist) { list_add_tail(&vi->rlist, &vols);
put_tcp_server(server);
}
spin_unlock(&vol_list_lock);
/* Walk through all TCONs and refresh any expired cache entry */
list_for_each_entry_safe(vi, nvi, &vols, rlist) {
spin_lock(&vi->smb_vol_lock);
server = get_tcp_server(&vi->smb_vol);
spin_unlock(&vi->smb_vol_lock);
if (!server)
goto next_vol;
get_tcons(server, &tcons);
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
refresh_tcon(vi, tcon); refresh_tcon(vi, tcon);
list_del_init(&tcon->ulist); list_del_init(&tcon->ulist);
cifs_put_tcon(tcon); cifs_put_tcon(tcon);
} }
put_tcp_server(server); put_tcp_server(server);
next_vol:
list_del_init(&vi->rlist);
kref_put(&vi->refcnt, vol_release);
} }
spin_lock(&cache_ttl_lock);
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
mutex_unlock(&vol_lock); spin_unlock(&cache_ttl_lock);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment