Commit 610bc17d authored by Steve French's avatar Steve French Committed by Steve French

Do not send junk in bcc area of oplock break SMB Lock request, and always let oplock break release

through even if requests are ahead of it waiting for responses to complete.

Signed-off-by: Steve French (sfrench@us.ibm.com)
parent c0f95d0d
Version 1.19 Version 1.19
------------ ------------
Fix /proc/fs/cifs/Stats and DebugData display to handle larger Fix /proc/fs/cifs/Stats and DebugData display to handle larger
amounts of return data. amounts of return data. Properly limit requests to MAX_REQ (50
is the usual maximum active multiplex SMB/CIFS requests per server).
Version 1.18 Version 1.18
......
...@@ -819,14 +819,20 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, ...@@ -819,14 +819,20 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
pSMB->AndXCommand = 0xFF; /* none */ pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = smb_file_id; /* netfid stays le */ pSMB->Fid = smb_file_id; /* netfid stays le */
if(numLock != 0) {
pSMB->Locks[0].Pid = cpu_to_le16(current->tgid); pSMB->Locks[0].Pid = cpu_to_le16(current->tgid);
/* BB where to store pid high? */
temp = cpu_to_le64(len); temp = cpu_to_le64(len);
pSMB->Locks[0].LengthLow = (__u32)(len & 0xFFFFFFFF); pSMB->Locks[0].LengthLow = (__u32)(temp & 0xFFFFFFFF);
pSMB->Locks[0].LengthHigh = (__u32)(len>>32); pSMB->Locks[0].LengthHigh = (__u32)(temp>>32);
temp = cpu_to_le64(offset); temp = cpu_to_le64(offset);
pSMB->Locks[0].OffsetLow = (__u32)(offset & 0xFFFFFFFF); pSMB->Locks[0].OffsetLow = (__u32)(temp & 0xFFFFFFFF);
pSMB->Locks[0].OffsetHigh = (__u32)(offset>>32); pSMB->Locks[0].OffsetHigh = (__u32)(temp>>32);
pSMB->ByteCount = sizeof (LOCKING_ANDX_RANGE); pSMB->ByteCount = sizeof (LOCKING_ANDX_RANGE);
} else {
/* oplock break */
pSMB->ByteCount = 0;
}
pSMB->hdr.smb_buf_length += pSMB->ByteCount; pSMB->hdr.smb_buf_length += pSMB->ByteCount;
pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount); pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
......
...@@ -175,7 +175,7 @@ cifs_reconnect(struct TCP_Server_Info *server) ...@@ -175,7 +175,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
if(server->tcpStatus != CifsExiting) if(server->tcpStatus != CifsExiting)
server->tcpStatus = CifsGood; server->tcpStatus = CifsGood;
spin_unlock(&GlobalMid_Lock); spin_unlock(&GlobalMid_Lock);
atomic_set(&server->inFlight,0); /* atomic_set(&server->inFlight,0);*/
wake_up(&server->response_q); wake_up(&server->response_q);
} }
} }
......
...@@ -202,25 +202,30 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, ...@@ -202,25 +202,30 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
/* Ensure that we do not send more than 50 overlapping requests /* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or to the same server. We may make this configurable later or
use ses->maxReq */ use ses->maxReq */
if(long_op == -1) {
/* oplock breaks must not be held up */
atomic_inc(&ses->server->inFlight);
} else {
spin_lock(&GlobalMid_Lock); spin_lock(&GlobalMid_Lock);
while(1) { while(1) {
if(atomic_read(&ses->server->inFlight) >= CIFS_MAX_REQ) { if(atomic_read(&ses->server->inFlight) >= CIFS_MAX_REQ){
spin_unlock(&GlobalMid_Lock); spin_unlock(&GlobalMid_Lock);
wait_event(ses->server->request_q,atomic_read(&ses->server->inFlight) < CIFS_MAX_REQ); wait_event(ses->server->request_q,
atomic_read(&ses->server->inFlight)
< CIFS_MAX_REQ);
spin_lock(&GlobalMid_Lock); spin_lock(&GlobalMid_Lock);
} else { } else {
/* can not count locking commands against the total since /* can not count locking commands against total since
they are allowed to block on server */ they are allowed to block on server */
if(long_op < 3) { if(long_op < 3) {
/* update # of requests on the wire to this server */ /* update # of requests on the wire to server */
atomic_inc(&ses->server->inFlight); atomic_inc(&ses->server->inFlight);
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&GlobalMid_Lock);
break; break;
} }
} }
}
/* make sure that we sign in the same order that we send on this socket /* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption and avoid races inside tcp sendmsg code that could cause corruption
of smb data */ of smb data */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment