Commit bf2069d1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '6.6-rc5-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull smb client fixes from Steve French:

 - fix caching race with open_cached_dir and laundromat cleanup of
   cached dirs (addresses a problem spotted with xfstest run with
   directory leases enabled)

 - reduce excessive resource usage of laundromat threads

* tag '6.6-rc5-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
  smb: client: prevent new fids from being removed by laundromat
  smb: client: make laundromat a delayed worker
parents dc9b2e68 81ba1095
......@@ -15,6 +15,7 @@
static struct cached_fid *init_cached_dir(const char *path);
static void free_cached_dir(struct cached_fid *cfid);
static void smb2_close_cached_fid(struct kref *ref);
static void cfids_laundromat_worker(struct work_struct *work);
static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
const char *path,
......@@ -169,15 +170,18 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
return -ENOENT;
}
/*
* At this point we either have a lease already and we can just
* return it. If not we are guaranteed to be the only thread accessing
* this cfid.
* Return cached fid if it has a lease. Otherwise, it is either a new
* entry or laundromat worker removed it from @cfids->entries. Caller
* will put last reference if the latter.
*/
spin_lock(&cfids->cfid_list_lock);
if (cfid->has_lease) {
spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;
kfree(utf16_path);
return 0;
}
spin_unlock(&cfids->cfid_list_lock);
/*
* Skip any prefix paths in @path as lookup_positive_unlocked() ends up
......@@ -294,9 +298,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
goto oshr_free;
}
}
spin_lock(&cfids->cfid_list_lock);
cfid->dentry = dentry;
cfid->time = jiffies;
cfid->has_lease = true;
spin_unlock(&cfids->cfid_list_lock);
oshr_free:
kfree(utf16_path);
......@@ -305,24 +311,28 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
spin_lock(&cfids->cfid_list_lock);
if (rc && !cfid->has_lease) {
if (cfid->on_list) {
list_del(&cfid->entry);
cfid->on_list = false;
cfids->num_entries--;
if (!cfid->has_lease) {
if (rc) {
if (cfid->on_list) {
list_del(&cfid->entry);
cfid->on_list = false;
cfids->num_entries--;
}
rc = -ENOENT;
} else {
/*
* We are guaranteed to have two references at this
* point. One for the caller and one for a potential
* lease. Release the Lease-ref so that the directory
* will be closed when the caller closes the cached
* handle.
*/
spin_unlock(&cfids->cfid_list_lock);
kref_put(&cfid->refcount, smb2_close_cached_fid);
goto out;
}
rc = -ENOENT;
}
spin_unlock(&cfids->cfid_list_lock);
if (!rc && !cfid->has_lease) {
/*
* We are guaranteed to have two references at this point.
* One for the caller and one for a potential lease.
* Release the Lease-ref so that the directory will be closed
* when the caller closes the cached handle.
*/
kref_put(&cfid->refcount, smb2_close_cached_fid);
}
if (rc) {
if (cfid->is_open)
SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
......@@ -330,7 +340,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
free_cached_dir(cfid);
cfid = NULL;
}
out:
if (rc == 0) {
*ret_cfid = cfid;
atomic_inc(&tcon->num_remote_opens);
......@@ -572,53 +582,51 @@ static void free_cached_dir(struct cached_fid *cfid)
kfree(cfid);
}
static int
cifs_cfids_laundromat_thread(void *p)
static void cfids_laundromat_worker(struct work_struct *work)
{
struct cached_fids *cfids = p;
struct cached_fids *cfids;
struct cached_fid *cfid, *q;
struct list_head entry;
LIST_HEAD(entry);
while (!kthread_should_stop()) {
ssleep(1);
INIT_LIST_HEAD(&entry);
if (kthread_should_stop())
return 0;
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
list_del(&cfid->entry);
list_add(&cfid->entry, &entry);
cfids->num_entries--;
}
}
spin_unlock(&cfids->cfid_list_lock);
cfids = container_of(work, struct cached_fids, laundromat_work.work);
list_for_each_entry_safe(cfid, q, &entry, entry) {
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
if (cfid->time &&
time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
cfid->on_list = false;
list_del(&cfid->entry);
list_move(&cfid->entry, &entry);
cfids->num_entries--;
/* To prevent race with smb2_cached_lease_break() */
kref_get(&cfid->refcount);
}
}
spin_unlock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &entry, entry) {
list_del(&cfid->entry);
/*
* Cancel and wait for the work to finish in case we are racing
* with it.
*/
cancel_work_sync(&cfid->lease_break);
if (cfid->has_lease) {
/*
* Cancel, and wait for the work to finish in
* case we are racing with it.
* Our lease has not yet been cancelled from the server
* so we need to drop the reference.
*/
cancel_work_sync(&cfid->lease_break);
if (cfid->has_lease) {
/*
* We lease has not yet been cancelled from
* the server so we need to drop the reference.
*/
spin_lock(&cfids->cfid_list_lock);
cfid->has_lease = false;
spin_unlock(&cfids->cfid_list_lock);
kref_put(&cfid->refcount, smb2_close_cached_fid);
}
spin_lock(&cfids->cfid_list_lock);
cfid->has_lease = false;
spin_unlock(&cfids->cfid_list_lock);
kref_put(&cfid->refcount, smb2_close_cached_fid);
}
/* Drop the extra reference opened above */
kref_put(&cfid->refcount, smb2_close_cached_fid);
}
return 0;
queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
dir_cache_timeout * HZ);
}
struct cached_fids *init_cached_dirs(void)
{
struct cached_fids *cfids;
......@@ -629,19 +637,10 @@ struct cached_fids *init_cached_dirs(void)
spin_lock_init(&cfids->cfid_list_lock);
INIT_LIST_HEAD(&cfids->entries);
/*
* since we're in a cifs function already, we know that
* this will succeed. No need for try_module_get().
*/
__module_get(THIS_MODULE);
cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread,
cfids, "cifsd-cfid-laundromat");
if (IS_ERR(cfids->laundromat)) {
cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n");
kfree(cfids);
module_put(THIS_MODULE);
return NULL;
}
INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
dir_cache_timeout * HZ);
return cfids;
}
......@@ -657,11 +656,7 @@ void free_cached_dirs(struct cached_fids *cfids)
if (cfids == NULL)
return;
if (cfids->laundromat) {
kthread_stop(cfids->laundromat);
cfids->laundromat = NULL;
module_put(THIS_MODULE);
}
cancel_delayed_work_sync(&cfids->laundromat_work);
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
......
......@@ -57,7 +57,7 @@ struct cached_fids {
spinlock_t cfid_list_lock;
int num_entries;
struct list_head entries;
struct task_struct *laundromat;
struct delayed_work laundromat_work;
};
extern struct cached_fids *init_cached_dirs(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment