Commit 9e992755 authored by Rohith Surabattula's avatar Rohith Surabattula Committed by Steve French

cifs: Call close synchronously during unlink/rename/lease break.

During unlink/rename/lease break, deferred work for close is
scheduled immediately but in an asynchronous manner which might
lead to race with actual(unlink/rename) commands.

This change will schedule close synchronously which will avoid
the race conditions with other commands.
Signed-off-by: default avatarRohith Surabattula <rohiths@microsoft.com>
Reviewed-by: default avatarShyam Prasad N <sprasad@microsoft.com>
Cc: stable@vger.kernel.org # 5.13
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent 41535701
...@@ -1611,6 +1611,11 @@ struct dfs_info3_param { ...@@ -1611,6 +1611,11 @@ struct dfs_info3_param {
int ttl; int ttl;
}; };
struct file_list {
struct list_head list;
struct cifsFileInfo *cfile;
};
/* /*
* common struct for holding inode info when searching for or updating an * common struct for holding inode info when searching for or updating an
* inode with new info * inode with new info
......
...@@ -4847,17 +4847,6 @@ void cifs_oplock_break(struct work_struct *work) ...@@ -4847,17 +4847,6 @@ void cifs_oplock_break(struct work_struct *work)
cifs_dbg(VFS, "Push locks rc = %d\n", rc); cifs_dbg(VFS, "Push locks rc = %d\n", rc);
oplock_break_ack: oplock_break_ack:
/*
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
* not bother sending an oplock release if session to server still is
* disconnected since oplock already released by the server
*/
if (!cfile->oplock_break_cancelled) {
rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
/* /*
* When oplock break is received and there are no active * When oplock break is received and there are no active
* file handles but cached, then schedule deferred close immediately. * file handles but cached, then schedule deferred close immediately.
...@@ -4865,17 +4854,27 @@ void cifs_oplock_break(struct work_struct *work) ...@@ -4865,17 +4854,27 @@ void cifs_oplock_break(struct work_struct *work)
*/ */
spin_lock(&CIFS_I(inode)->deferred_lock); spin_lock(&CIFS_I(inode)->deferred_lock);
is_deferred = cifs_is_deferred_close(cfile, &dclose); is_deferred = cifs_is_deferred_close(cfile, &dclose);
spin_unlock(&CIFS_I(inode)->deferred_lock);
if (is_deferred && if (is_deferred &&
cfile->deferred_close_scheduled && cfile->deferred_close_scheduled &&
delayed_work_pending(&cfile->deferred)) { delayed_work_pending(&cfile->deferred)) {
/* if (cancel_delayed_work(&cfile->deferred)) {
* If there is no pending work, mod_delayed_work queues new work. _cifsFileInfo_put(cfile, false, false);
* So, Increase the ref count to avoid use-after-free. goto oplock_break_done;
*/ }
if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
cifsFileInfo_get(cfile);
} }
spin_unlock(&CIFS_I(inode)->deferred_lock); /*
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
* not bother sending an oplock release if session to server still is
* disconnected since oplock already released by the server
*/
if (!cfile->oplock_break_cancelled) {
rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
oplock_break_done:
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
cifs_done_oplock_break(cinode); cifs_done_oplock_break(cinode);
} }
......
...@@ -723,20 +723,32 @@ void ...@@ -723,20 +723,32 @@ void
cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode) cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
{ {
struct cifsFileInfo *cfile = NULL; struct cifsFileInfo *cfile = NULL;
struct file_list *tmp_list, *tmp_next_list;
struct list_head file_head;
if (cifs_inode == NULL) if (cifs_inode == NULL)
return; return;
INIT_LIST_HEAD(&file_head);
spin_lock(&cifs_inode->open_file_lock);
list_for_each_entry(cfile, &cifs_inode->openFileList, flist) { list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
if (delayed_work_pending(&cfile->deferred)) { if (delayed_work_pending(&cfile->deferred)) {
/* if (cancel_delayed_work(&cfile->deferred)) {
* If there is no pending work, mod_delayed_work queues new work. tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
* So, Increase the ref count to avoid use-after-free. if (tmp_list == NULL)
*/ continue;
if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0)) tmp_list->cfile = cfile;
cifsFileInfo_get(cfile); list_add_tail(&tmp_list->list, &file_head);
}
} }
} }
spin_unlock(&cifs_inode->open_file_lock);
list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
_cifsFileInfo_put(tmp_list->cfile, true, false);
list_del(&tmp_list->list);
kfree(tmp_list);
}
} }
void void
...@@ -744,20 +756,30 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon) ...@@ -744,20 +756,30 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
{ {
struct cifsFileInfo *cfile; struct cifsFileInfo *cfile;
struct list_head *tmp; struct list_head *tmp;
struct file_list *tmp_list, *tmp_next_list;
struct list_head file_head;
INIT_LIST_HEAD(&file_head);
spin_lock(&tcon->open_file_lock); spin_lock(&tcon->open_file_lock);
list_for_each(tmp, &tcon->openFileList) { list_for_each(tmp, &tcon->openFileList) {
cfile = list_entry(tmp, struct cifsFileInfo, tlist); cfile = list_entry(tmp, struct cifsFileInfo, tlist);
if (delayed_work_pending(&cfile->deferred)) { if (delayed_work_pending(&cfile->deferred)) {
/* if (cancel_delayed_work(&cfile->deferred)) {
* If there is no pending work, mod_delayed_work queues new work. tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
* So, Increase the ref count to avoid use-after-free. if (tmp_list == NULL)
*/ continue;
if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0)) tmp_list->cfile = cfile;
cifsFileInfo_get(cfile); list_add_tail(&tmp_list->list, &file_head);
}
} }
} }
spin_unlock(&tcon->open_file_lock); spin_unlock(&tcon->open_file_lock);
list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
_cifsFileInfo_put(tmp_list->cfile, true, false);
list_del(&tmp_list->list);
kfree(tmp_list);
}
} }
/* parses DFS refferal V3 structure /* parses DFS refferal V3 structure
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment