Commit d7d7a66a authored by Shyam Prasad N's avatar Shyam Prasad N Committed by Steve French

cifs: avoid use of global locks for high contention data

During analysis of multichannel perf, it was seen that
the global locks cifs_tcp_ses_lock and GlobalMid_Lock, which
were shared between various data structures were causing a
lot of contention points.

With this change, we're breaking down the use of these locks
by introducing new locks at more granular levels. i.e.
server->srv_lock, ses->ses_lock and tcon->tc_lock to protect
the unprotected fields of server, session and tcon structs;
and server->mid_lock to protect mid related lists and entries
at server level.
Signed-off-by: default avatarShyam Prasad N <sprasad@microsoft.com>
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent 1bfa25ee
......@@ -55,7 +55,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
return;
cifs_dbg(VFS, "Dump pending requests:\n");
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
mid_entry->mid_state,
......@@ -78,7 +78,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
mid_entry->resp_buf, 62);
}
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
#endif /* CONFIG_CIFS_DEBUG2 */
}
......@@ -463,7 +463,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n\t\t[NONE]");
seq_puts(m, "\n\n\tMIDs: ");
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
seq_printf(m, "\n\tState: %d com: %d pid:"
" %d cbdata: %p mid %llu\n",
......@@ -473,7 +473,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
mid_entry->callback_data,
mid_entry->mid);
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
seq_printf(m, "\n--\n");
}
if (c == 0)
......
......@@ -141,13 +141,13 @@ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
server->tcpStatus == CifsNeedNegotiate) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return rc;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
if (!server->session_estab) {
memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
......
......@@ -731,14 +731,17 @@ static void cifs_umount_begin(struct super_block *sb)
tcon = cifs_sb_master_tcon(cifs_sb);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
/* we have other mounts to same share or we have
already tried to force umount this and woken up
all waiting network requests, nothing to do */
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
return;
} else if (tcon->tc_count == 1)
tcon->status = TID_EXITING;
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
......
......@@ -605,6 +605,7 @@ inc_rfc1001_len(void *buf, int count)
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
spinlock_t srv_lock; /* protect anything here that is not protected */
__u64 conn_id; /* connection identifier (useful for debugging) */
int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */
......@@ -622,6 +623,7 @@ struct TCP_Server_Info {
#endif
wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
spinlock_t mid_lock; /* protect mid queue and it's entries */
struct list_head pending_mid_q;
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
......@@ -1008,6 +1010,7 @@ struct cifs_ses {
struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
struct cifs_tcon *tcon_ipc;
spinlock_t ses_lock; /* protect anything here that is not protected */
struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
......@@ -1169,6 +1172,7 @@ struct cifs_tcon {
struct list_head tcon_list;
int tc_count;
struct list_head rlist; /* reconnect list */
spinlock_t tc_lock; /* protect anything here that is not protected */
atomic_t num_local_opens; /* num of all opens including disconnected */
atomic_t num_remote_opens; /* num of all network opens on server */
struct list_head openFileList;
......@@ -1899,33 +1903,78 @@ require use of the stronger protocol */
*/
/****************************************************************************
* Locking notes. All updates to global variables and lists should be
* protected by spinlocks or semaphores.
* Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according
* to the locking order. i.e. if two locks are to be held together, the lock that
* appears higher in this list needs to be taken before the other.
*
* Spinlocks
* ---------
* GlobalMid_Lock protects:
* list operations on pending_mid_q and oplockQ
* updates to XID counters, multiplex id and SMB sequence numbers
* list operations on global DnotifyReqList
* updates to ses->status and TCP_Server_Info->tcpStatus
* updates to server->CurrentMid
* tcp_ses_lock protects:
* list operations on tcp and SMB session lists
* tcon->open_file_lock protects the list of open files hanging off the tcon
* inode->open_file_lock protects the openFileList hanging off the inode
* cfile->file_info_lock protects counters and fields in cifs file struct
* f_owner.lock protects certain per file struct operations
* mapping->page_lock protects certain per page operations
* If you hold a lock that is lower in this list, and you need to take a higher lock
* (or if you think that one of the functions that you're calling may need to), first
* drop the lock you hold, pick up the higher lock, then the lower one. This will
* ensure that locks are picked up only in one direction in the below table
* (top to bottom).
*
* Note that the cifs_tcon.open_file_lock should be taken before
* not after the cifsInodeInfo.open_file_lock
* Also, if you expect a function to be called with a lock held, explicitly document
* this in the comments on top of your function definition.
*
* Semaphores
* ----------
* cifsInodeInfo->lock_sem protects:
* the list of locks held by the inode
* And also, try to keep the critical sections (lock hold time) to be as minimal as
* possible. Blocking / calling other functions with a lock held always increase
* the risk of a possible deadlock.
*
* Following this rule will avoid unnecessary deadlocks, which can get really hard to
* debug. Also, any new lock that you introduce, please add to this list in the correct
* order.
*
* Please populate this list whenever you introduce new locks in your changes. Or in
* case I've missed some existing locks. Please ensure that it's added in the list
* based on the locking order expected.
*
* =====================================================================================
* Lock Protects Initialization fn
* =====================================================================================
* vol_list_lock
* vol_info->ctx_lock vol_info->ctx
* cifs_sb_info->tlink_tree_lock cifs_sb_info->tlink_tree cifs_setup_cifs_sb
* TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session
* reconnect_mutex
* TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session
* cifs_ses->session_mutex cifs_ses sesInfoAlloc
* cifs_tcon
* cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc
* cifs_tcon->pending_opens
* cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc
* cifs_tcon->bytes_written
* cifs_tcp_ses_lock cifs_tcp_ses_list sesInfoAlloc
* GlobalMid_Lock GlobalMaxActiveXid init_cifs
* GlobalCurrentXid
* GlobalTotalActiveXid
* TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change)
* TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session
* ->CurrentMid
* (any changes in mid_q_entry fields)
* TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session
* ->credits
* ->echo_credits
* ->oplock_credits
* ->reconnect_instance
* cifs_ses->ses_lock (anything that is not protected by another lock and can change)
* cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc
* ->iface_count
* ->iface_last_update
* cifs_ses->chan_lock cifs_ses->chans
* ->chans_need_reconnect
* ->chans_in_reconnect
* cifs_tcon->tc_lock (anything that is not protected by another lock and can change)
* cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode
* cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc
* cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
* ->can_cache_brlcks
* cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
* cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc
* cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
* ->oplock_break_cancelled
* cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc
****************************************************************************/
#ifdef DECLARE_GLOBALS_HERE
......@@ -1946,9 +1995,7 @@ extern struct list_head cifs_tcp_ses_list;
/*
* This lock protects the cifs_tcp_ses_list, the list of smb sessions per
* tcp session, and the list of tcon's per smb session. It also protects
* the reference counters for the server, smb session, and tcon. It also
* protects some fields in the TCP_Server_Info struct such as dstaddr. Finally,
* changes to the tcon->tidStatus should be done while holding this lock.
* the reference counters for the server, smb session, and tcon.
* generally the locks should be taken in order tcp_ses_lock before
* tcon->open_file_lock and that before file->file_info_lock since the
* structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
......
......@@ -74,13 +74,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
struct list_head *tmp1;
/* only send once per connect */
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->ses->ses_lock);
if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&tcon->ses->ses_lock);
return;
}
tcon->status = TID_IN_FILES_INVALIDATE;
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&tcon->ses->ses_lock);
/* list all files open on tree connection and mark them invalid */
spin_lock(&tcon->open_file_lock);
......@@ -98,10 +98,10 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
mutex_unlock(&tcon->crfid.fid_mutex);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_FILES_INVALIDATE)
tcon->status = TID_NEED_TCON;
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&tcon->tc_lock);
/*
* BB Add call to invalidate_inodes(sb) for all superblocks mounted
......@@ -134,18 +134,18 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* only tree disconnect, open, and write, (and ulogoff which does not
* have tcon) are allowed as we start force umount
*/
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) {
if (smb_command != SMB_COM_WRITE_ANDX &&
smb_command != SMB_COM_OPEN_ANDX &&
smb_command != SMB_COM_TREE_DISCONNECT) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb_command);
return -ENODEV;
}
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&tcon->tc_lock);
retries = server->nr_targets;
......@@ -165,12 +165,12 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
}
/* are we still trying to reconnect? */
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
break;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
if (retries && --retries)
continue;
......@@ -201,13 +201,13 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect.
*/
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
rc = -EHOSTDOWN;
goto out;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
/*
* need to prevent multiple threads trying to simultaneously
......
This diff is collapsed.
......@@ -1526,15 +1526,21 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
if (!server->is_dfs_conn)
spin_lock(&server->srv_lock);
if (!server->is_dfs_conn) {
spin_unlock(&server->srv_lock);
continue;
}
spin_unlock(&server->srv_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->tc_lock);
if (!tcon->ipc && !tcon->need_reconnect) {
tcon->tc_count++;
list_add_tail(&tcon->ulist, &tcons);
}
spin_unlock(&tcon->tc_lock);
}
}
}
......
......@@ -69,6 +69,7 @@ sesInfoAlloc(void)
ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
if (ret_buf) {
atomic_inc(&sesInfoAllocCount);
spin_lock_init(&ret_buf->ses_lock);
ret_buf->ses_status = SES_NEW;
++ret_buf->ses_count;
INIT_LIST_HEAD(&ret_buf->smb_ses_list);
......@@ -126,6 +127,7 @@ tconInfoAlloc(void)
atomic_inc(&tconInfoAllocCount);
ret_buf->status = TID_NEW;
++ret_buf->tc_count;
spin_lock_init(&ret_buf->tc_lock);
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
spin_lock_init(&ret_buf->open_file_lock);
......
......@@ -92,17 +92,17 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct mid_q_entry *mid;
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if (compare_mid(mid->mid, buf) &&
mid->mid_state == MID_REQUEST_SUBMITTED &&
le16_to_cpu(mid->command) == buf->Command) {
kref_get(&mid->refcount);
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
return mid;
}
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
return NULL;
}
......@@ -166,7 +166,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
__u16 last_mid, cur_mid;
bool collision, reconnect = false;
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
/* mid is 16 bit only for CIFS/SMB */
cur_mid = (__u16)((server->CurrentMid) & 0xffff);
......@@ -225,7 +225,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
}
cur_mid++;
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
if (reconnect) {
cifs_signal_cifsd_for_reconnect(server, false);
......
......@@ -126,13 +126,13 @@ smb2_add_credits(struct TCP_Server_Info *server,
optype, scredits, add);
}
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect
|| server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
switch (rc) {
case -1:
......@@ -218,12 +218,12 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
spin_lock(&server->req_lock);
} else {
spin_unlock(&server->req_lock);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return -ENOENT;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
spin_lock(&server->req_lock);
scredits = server->credits;
......@@ -319,19 +319,19 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
{
__u64 mid;
/* for SMB2 we need the current value */
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
mid = server->CurrentMid++;
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
return mid;
}
static void
smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
{
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
if (server->CurrentMid >= val)
server->CurrentMid -= val;
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
}
static struct mid_q_entry *
......@@ -346,7 +346,7 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
return NULL;
}
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
......@@ -356,11 +356,11 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
return mid;
}
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
return NULL;
}
......@@ -403,9 +403,9 @@ smb2_negotiate(const unsigned int xid,
{
int rc;
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
server->CurrentMid = 0;
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
rc = SMB2_negotiate(xid, ses, server);
/* BB we probably don't need to retry with modern servers */
if (rc == -EAGAIN)
......@@ -2585,7 +2585,9 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
spin_lock(&tcon->tc_lock);
tcon->need_reconnect = true;
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
pr_warn_once("Server share %s deleted.\n",
tcon->treeName);
......@@ -4561,9 +4563,11 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->Suid == ses_id) {
spin_lock(&ses->ses_lock);
ses_enc_key = enc ? ses->smb3encryptionkey :
ses->smb3decryptionkey;
memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
spin_unlock(&ses->ses_lock);
spin_unlock(&cifs_tcp_ses_lock);
return 0;
}
......@@ -5078,20 +5082,21 @@ static void smb2_decrypt_offload(struct work_struct *work)
mid->callback(mid);
} else {
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&GlobalMid_Lock);
spin_lock(&dw->server->srv_lock);
if (dw->server->tcpStatus == CifsNeedReconnect) {
spin_lock(&dw->server->mid_lock);
mid->mid_state = MID_RETRY_NEEDED;
spin_unlock(&GlobalMid_Lock);
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&dw->server->mid_lock);
spin_unlock(&dw->server->srv_lock);
mid->callback(mid);
} else {
spin_lock(&dw->server->mid_lock);
mid->mid_state = MID_REQUEST_SUBMITTED;
mid->mid_flags &= ~(MID_DELETED);
list_add_tail(&mid->qhead,
&dw->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&dw->server->mid_lock);
spin_unlock(&dw->server->srv_lock);
}
}
cifs_mid_q_entry_release(mid);
......
......@@ -162,7 +162,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0;
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) {
/*
* only tree disconnect, open, and write,
......@@ -172,13 +172,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if ((smb2_command != SMB2_WRITE) &&
(smb2_command != SMB2_CREATE) &&
(smb2_command != SMB2_TREE_DISCONNECT)) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb2_command);
return -ENODEV;
}
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&tcon->tc_lock);
if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) ||
(!tcon->ses->server) || !server)
return -EIO;
......@@ -217,12 +217,12 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
}
/* are we still trying to reconnect? */
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
break;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
if (retries && --retries)
continue;
......@@ -256,13 +256,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
* and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect.
*/
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
rc = -EHOSTDOWN;
goto out;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
/*
* need to prevent multiple threads trying to simultaneously
......@@ -3911,15 +3911,15 @@ SMB2_echo(struct TCP_Server_Info *server)
cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->ops->need_neg &&
server->ops->need_neg(server)) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
/* No need to send echo on newly established connections */
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
return rc;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
(void **)&req, &total_len);
......
......@@ -640,13 +640,13 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
if (!is_signed)
return 0;
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->ops->need_neg &&
server->ops->need_neg(server)) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return 0;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
if (!is_binding && !server->session_estab) {
strncpy(shdr->Signature, "BSRSPYL", 8);
return 0;
......@@ -762,28 +762,30 @@ static int
smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
struct smb2_hdr *shdr, struct mid_q_entry **mid)
{
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return -ENOENT;
}
if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
return -EAGAIN;
}
if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return -EAGAIN;
}
spin_unlock(&server->srv_lock);
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_NEW) {
if ((shdr->Command != SMB2_SESSION_SETUP) &&
(shdr->Command != SMB2_NEGOTIATE)) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
return -EAGAIN;
}
/* else ok - we are setting up session */
......@@ -791,19 +793,19 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
if (ses->ses_status == SES_EXITING) {
if (shdr->Command != SMB2_LOGOFF) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
return -EAGAIN;
}
/* else ok - we are shutting down the session */
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
*mid = smb2_mid_entry_alloc(shdr, server);
if (*mid == NULL)
return -ENOMEM;
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
return 0;
}
......@@ -869,13 +871,13 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
(struct smb2_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return ERR_PTR(-EAGAIN);
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
smb2_seq_num_into_buf(server, shdr);
......
......@@ -154,9 +154,11 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
{
spin_lock(&GlobalMid_Lock);
struct TCP_Server_Info *server = midEntry->server;
spin_lock(&server->mid_lock);
kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
}
void DeleteMidQEntry(struct mid_q_entry *midEntry)
......@@ -167,12 +169,12 @@ void DeleteMidQEntry(struct mid_q_entry *midEntry)
void
cifs_delete_mid(struct mid_q_entry *mid)
{
spin_lock(&GlobalMid_Lock);
spin_lock(&mid->server->mid_lock);
if (!(mid->mid_flags & MID_DELETED)) {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&mid->server->mid_lock);
DeleteMidQEntry(mid);
}
......@@ -577,12 +579,12 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
} else {
spin_unlock(&server->req_lock);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return -ENOENT;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
/*
* For normal commands, reserve the last MAX_COMPOUND
......@@ -725,11 +727,11 @@ cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_NEW) {
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE)) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
return -EAGAIN;
}
/* else ok - we are setting up session */
......@@ -738,19 +740,19 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
if (ses->ses_status == SES_EXITING) {
/* check if SMB session is bad because we are setting it up */
if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
return -EAGAIN;
}
/* else ok - we are shutting down session */
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
*ppmidQ = AllocMidQEntry(in_buf, ses->server);
if (*ppmidQ == NULL)
return -ENOMEM;
spin_lock(&GlobalMid_Lock);
spin_lock(&ses->server->mid_lock);
list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
spin_unlock(&ses->server->mid_lock);
return 0;
}
......@@ -849,9 +851,9 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
mid->mid_state = MID_REQUEST_SUBMITTED;
/* put it on the pending_mid_q */
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
list_add_tail(&mid->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
/*
* Need to store the time in mid before calling I/O. For call_async,
......@@ -912,10 +914,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
__func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
return rc;
case MID_RETRY_NEEDED:
rc = -EAGAIN;
......@@ -935,7 +937,7 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
__func__, mid->mid, mid->mid_state);
rc = -EIO;
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
DeleteMidQEntry(mid);
return rc;
......@@ -1078,12 +1080,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return -ENOENT;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
/*
* Wait for all the requests to become available.
......@@ -1186,17 +1188,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/*
* Compounding is never used during session establish.
*/
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&ses->ses_lock);
if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
cifs_server_lock(server);
smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cifs_server_unlock(server);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&ses->ses_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(server, midQ[i]);
......@@ -1208,14 +1210,14 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true;
credits[i].value = 0;
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
}
}
......@@ -1259,19 +1261,19 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/*
* Compounding is never used during session establish.
*/
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&ses->ses_lock);
if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
struct kvec iov = {
.iov_base = resp_iov[0].iov_base,
.iov_len = resp_iov[0].iov_len
};
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
cifs_server_lock(server);
smb311_update_preauth_hash(ses, server, &iov, 1);
cifs_server_unlock(server);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&ses->ses_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&ses->ses_lock);
out:
/*
......@@ -1360,12 +1362,12 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return -ENOENT;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
......@@ -1419,15 +1421,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(server, midQ);
if (rc != 0) {
send_cancel(server, &rqst, midQ);
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
add_credits(server, &credits, 0);
return rc;
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
}
rc = cifs_sync_mid_result(midQ, server);
......@@ -1505,12 +1507,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return -EIO;
}
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
return -ENOENT;
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
......@@ -1568,12 +1570,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
(server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
if ((rc == -ERESTARTSYS) &&
(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
((server->tcpStatus == CifsGood) ||
(server->tcpStatus == CifsNew))) {
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
......@@ -1600,21 +1602,21 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
rc = wait_for_response(server, midQ);
if (rc) {
send_cancel(server, &rqst, midQ);
spin_lock(&GlobalMid_Lock);
spin_lock(&server->mid_lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
return rc;
}
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->mid_lock);
}
/* We got the response - restart system call. */
rstart = 1;
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&server->srv_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
spin_unlock(&server->srv_lock);
rc = cifs_sync_mid_result(midQ, server);
if (rc != 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment