Commit 48811377 authored by Patrick Mansfield's avatar Patrick Mansfield Committed by James Bottomley

[PATCH] 2/5 scsi-locking-2.5 remove lock hierarchy

Get rid of the lock hierarchy for queue_lock and host_lock (even for the
single_lun case).
parent b60f189f
...@@ -1088,9 +1088,10 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, ...@@ -1088,9 +1088,10 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
/* /*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else * scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0. * return 0. We must end up running the queue again whenever 0 is
* returned, else IO can hang.
* *
* Called with queue_lock and host_lock held. * Called with host_lock held.
*/ */
static inline int scsi_host_queue_ready(struct request_queue *q, static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost, struct Scsi_Host *shost,
...@@ -1157,41 +1158,54 @@ static void scsi_request_fn(request_queue_t *q) ...@@ -1157,41 +1158,54 @@ static void scsi_request_fn(request_queue_t *q)
if (blk_queue_plugged(q)) if (blk_queue_plugged(q))
goto completed; goto completed;
if (blk_queue_empty(q))
goto completed;
/* /*
* get next queueable request. We do this early to make sure * get next queueable request. We do this early to make sure
* that the request is fully prepared even if we cannot * that the request is fully prepared even if we cannot
* accept it. If there is no request, we'll detect this * accept it.
* lower down.
*/ */
req = elv_next_request(q); req = elv_next_request(q);
if (!scsi_dev_queue_ready(q, sdev)) if (!req) {
/*
* If the device is busy, a returning I/O will
* restart the queue. Otherwise, we have to plug
* the queue
*/
if (sdev->device_busy == 0)
blk_plug_device(q);
goto completed; goto completed;
}
spin_lock_irqsave(shost->host_lock, flags); if (!scsi_dev_queue_ready(q, sdev))
if (!scsi_host_queue_ready(q, shost, sdev)) goto completed;
goto after_host_lock;
if (sdev->single_lun && sdev->sdev_target->starget_sdev_user &&
(sdev->sdev_target->starget_sdev_user != sdev))
goto after_host_lock;
/* /*
* If we couldn't find a request that could be queued, then we * Remove the request from the request list.
* can also quit.
*/ */
if (blk_queue_empty(q)) if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0)))
goto after_host_lock; blkdev_dequeue_request(req);
if (!req) { sdev->device_busy++;
/* If the device is busy, a returning I/O spin_unlock_irq(q->queue_lock);
* will restart the queue. Otherwise, we have
* to plug the queue */ spin_lock_irqsave(shost->host_lock, flags);
if (sdev->device_busy == 1) if (!scsi_host_queue_ready(q, shost, sdev))
blk_plug_device(q); goto host_lock_held;
goto after_host_lock;
if (sdev->single_lun) {
if (sdev->sdev_target->starget_sdev_user &&
(sdev->sdev_target->starget_sdev_user != sdev))
goto host_lock_held;
else
sdev->sdev_target->starget_sdev_user = sdev;
} }
shost->host_busy++;
spin_unlock_irqrestore(shost->host_lock, flags);
cmd = req->special; cmd = req->special;
/* /*
...@@ -1200,26 +1214,6 @@ static void scsi_request_fn(request_queue_t *q) ...@@ -1200,26 +1214,6 @@ static void scsi_request_fn(request_queue_t *q)
*/ */
BUG_ON(!cmd); BUG_ON(!cmd);
/*
* Finally, before we release the lock, we copy the
* request to the command block, and remove the
* request from the request list. Note that we always
* operate on the queue head - there is absolutely no
* reason to search the list, because all of the
* commands in this queue are for the same device.
*/
if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0)))
blkdev_dequeue_request(req);
if (sdev->single_lun)
sdev->sdev_target->starget_sdev_user = sdev;
shost->host_busy++;
spin_unlock_irqrestore(shost->host_lock, flags);
sdev->device_busy++;
spin_unlock_irq(q->queue_lock);
/* /*
* Finally, initialize any error handling parameters, and set up * Finally, initialize any error handling parameters, and set up
* the timers for timeouts. * the timers for timeouts.
...@@ -1240,8 +1234,21 @@ static void scsi_request_fn(request_queue_t *q) ...@@ -1240,8 +1234,21 @@ static void scsi_request_fn(request_queue_t *q)
completed: completed:
return; return;
after_host_lock: host_lock_held:
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
/*
* lock q, handle tag, requeue req, and decrement device_busy. We
* must return with queue_lock held.
*
* Decrementing device_busy without checking it is OK, as all such
* cases (host limits or settings) should run the queue at some
* later time.
*/
spin_lock_irq(q->queue_lock);
if (blk_rq_tagged(req))
blk_queue_end_tag(q, req);
__elv_add_request(q, req, 0, 0);
sdev->device_busy--;
} }
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment