Commit 9e992755 authored by Rohith Surabattula's avatar Rohith Surabattula Committed by Steve French

cifs: Call close synchronously during unlink/rename/lease break.

During unlink/rename/lease break, deferred work for close is
scheduled immediately but in an asynchronous manner which might
lead to race with actual(unlink/rename) commands.

This change will schedule close synchronously which will avoid
the race conditions with other commands.
Signed-off-by: default avatarRohith Surabattula <rohiths@microsoft.com>
Reviewed-by: default avatarShyam Prasad N <sprasad@microsoft.com>
Cc: stable@vger.kernel.org # 5.13
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent 41535701
......@@ -1611,6 +1611,11 @@ struct dfs_info3_param {
int ttl;
};
struct file_list {
struct list_head list;
struct cifsFileInfo *cfile;
};
/*
* common struct for holding inode info when searching for or updating an
* inode with new info
......
......@@ -4847,17 +4847,6 @@ void cifs_oplock_break(struct work_struct *work)
cifs_dbg(VFS, "Push locks rc = %d\n", rc);
oplock_break_ack:
/*
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
* not bother sending an oplock release if session to server still is
* disconnected since oplock already released by the server
*/
if (!cfile->oplock_break_cancelled) {
rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
/*
* When oplock break is received and there are no active
* file handles but cached, then schedule deferred close immediately.
......@@ -4865,17 +4854,27 @@ void cifs_oplock_break(struct work_struct *work)
*/
spin_lock(&CIFS_I(inode)->deferred_lock);
is_deferred = cifs_is_deferred_close(cfile, &dclose);
spin_unlock(&CIFS_I(inode)->deferred_lock);
if (is_deferred &&
cfile->deferred_close_scheduled &&
delayed_work_pending(&cfile->deferred)) {
if (cancel_delayed_work(&cfile->deferred)) {
_cifsFileInfo_put(cfile, false, false);
goto oplock_break_done;
}
}
/*
* If there is no pending work, mod_delayed_work queues new work.
* So, Increase the ref count to avoid use-after-free.
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
* not bother sending an oplock release if session to server still is
* disconnected since oplock already released by the server
*/
if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
cifsFileInfo_get(cfile);
if (!cfile->oplock_break_cancelled) {
rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
spin_unlock(&CIFS_I(inode)->deferred_lock);
oplock_break_done:
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
cifs_done_oplock_break(cinode);
}
......
......@@ -723,20 +723,32 @@ void
cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *cfile = NULL;
struct file_list *tmp_list, *tmp_next_list;
struct list_head file_head;
if (cifs_inode == NULL)
return;
INIT_LIST_HEAD(&file_head);
spin_lock(&cifs_inode->open_file_lock);
list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
if (delayed_work_pending(&cfile->deferred)) {
/*
* If there is no pending work, mod_delayed_work queues new work.
* So, Increase the ref count to avoid use-after-free.
*/
if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
cifsFileInfo_get(cfile);
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
continue;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
}
}
spin_unlock(&cifs_inode->open_file_lock);
list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
_cifsFileInfo_put(tmp_list->cfile, true, false);
list_del(&tmp_list->list);
kfree(tmp_list);
}
}
void
......@@ -744,20 +756,30 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
{
struct cifsFileInfo *cfile;
struct list_head *tmp;
struct file_list *tmp_list, *tmp_next_list;
struct list_head file_head;
INIT_LIST_HEAD(&file_head);
spin_lock(&tcon->open_file_lock);
list_for_each(tmp, &tcon->openFileList) {
cfile = list_entry(tmp, struct cifsFileInfo, tlist);
if (delayed_work_pending(&cfile->deferred)) {
/*
* If there is no pending work, mod_delayed_work queues new work.
* So, Increase the ref count to avoid use-after-free.
*/
if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
cifsFileInfo_get(cfile);
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
continue;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
}
}
spin_unlock(&tcon->open_file_lock);
list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
_cifsFileInfo_put(tmp_list->cfile, true, false);
list_del(&tmp_list->list);
kfree(tmp_list);
}
}
/* parses DFS refferal V3 structure
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment