Commit 00879482 authored by Patrick Mansfield's avatar Patrick Mansfield Committed by James Bottomley

[PATCH] 6/7 add and use a per-scsi_device queue_lock

Add and use a per scsi_device queue_lock.
parent c270e476
...@@ -620,7 +620,6 @@ void scsi_host_busy_dec_and_test(struct Scsi_Host *shost, Scsi_Device *sdev) ...@@ -620,7 +620,6 @@ void scsi_host_busy_dec_and_test(struct Scsi_Host *shost, Scsi_Device *sdev)
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--; shost->host_busy--;
sdev->device_busy--;
if (shost->in_recovery && shost->host_failed && if (shost->in_recovery && shost->host_failed &&
(shost->host_busy == shost->host_failed)) (shost->host_busy == shost->host_failed))
{ {
......
...@@ -447,8 +447,6 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt) ...@@ -447,8 +447,6 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
host = SCpnt->device->host; host = SCpnt->device->host;
ASSERT_LOCK(host->host_lock, 0);
/* Assign a unique nonzero serial_number. */ /* Assign a unique nonzero serial_number. */
if (++serial_number == 0) if (++serial_number == 0)
serial_number = 1; serial_number = 1;
...@@ -574,8 +572,6 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt) ...@@ -574,8 +572,6 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
{ {
struct Scsi_Host *host = SCpnt->device->host; struct Scsi_Host *host = SCpnt->device->host;
ASSERT_LOCK(host->host_lock, 0);
SCpnt->owner = SCSI_OWNER_MIDLEVEL; SCpnt->owner = SCSI_OWNER_MIDLEVEL;
SRpnt->sr_command = SCpnt; SRpnt->sr_command = SCpnt;
...@@ -819,12 +815,11 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt) ...@@ -819,12 +815,11 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
struct Scsi_Host *host; struct Scsi_Host *host;
Scsi_Device *device; Scsi_Device *device;
Scsi_Request * SRpnt; Scsi_Request * SRpnt;
unsigned int flags;
host = SCpnt->device->host; host = SCpnt->device->host;
device = SCpnt->device; device = SCpnt->device;
ASSERT_LOCK(host->host_lock, 0);
/* /*
* We need to protect the decrement, as otherwise a race condition * We need to protect the decrement, as otherwise a race condition
* would exist. Fiddling with SCpnt isn't a problem as the * would exist. Fiddling with SCpnt isn't a problem as the
...@@ -833,6 +828,9 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt) ...@@ -833,6 +828,9 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
* shared. * shared.
*/ */
scsi_host_busy_dec_and_test(host, device); scsi_host_busy_dec_and_test(host, device);
spin_lock_irqsave(SCpnt->device->request_queue->queue_lock, flags);
SCpnt->device->device_busy--;
spin_unlock_irqrestore(SCpnt->device->request_queue->queue_lock, flags);
/* /*
* Clear the flags which say that the device/host is no longer * Clear the flags which say that the device/host is no longer
......
...@@ -418,7 +418,7 @@ extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, ...@@ -418,7 +418,7 @@ extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
int block_sectors); int block_sectors);
extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd); extern void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd);
extern request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost); extern request_queue_t *scsi_alloc_queue(struct scsi_device *sdev);
extern void scsi_free_queue(request_queue_t *q); extern void scsi_free_queue(request_queue_t *q);
extern int scsi_init_queue(void); extern int scsi_init_queue(void);
extern void scsi_exit_queue(void); extern void scsi_exit_queue(void);
...@@ -554,6 +554,7 @@ struct scsi_device { ...@@ -554,6 +554,7 @@ struct scsi_device {
struct Scsi_Host *host; struct Scsi_Host *host;
request_queue_t *request_queue; request_queue_t *request_queue;
volatile unsigned short device_busy; /* commands actually active on low-level */ volatile unsigned short device_busy; /* commands actually active on low-level */
spinlock_t sdev_lock; /* also the request queue_lock */
spinlock_t list_lock; spinlock_t list_lock;
struct list_head cmd_list; /* queue of in use SCSI Command structures */ struct list_head cmd_list; /* queue of in use SCSI Command structures */
struct list_head starved_entry; struct list_head starved_entry;
......
...@@ -431,8 +431,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) ...@@ -431,8 +431,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
unsigned long flags; unsigned long flags;
int rtn = SUCCESS; int rtn = SUCCESS;
ASSERT_LOCK(host->host_lock, 0);
/* /*
* we will use a queued command if possible, otherwise we will * we will use a queued command if possible, otherwise we will
* emulate the queuing and calling of completion function ourselves. * emulate the queuing and calling of completion function ourselves.
...@@ -1405,8 +1403,6 @@ static void scsi_restart_operations(struct Scsi_Host *shost) ...@@ -1405,8 +1403,6 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
struct scsi_device *sdev; struct scsi_device *sdev;
unsigned long flags; unsigned long flags;
ASSERT_LOCK(shost->host_lock, 0);
/* /*
* If the door was locked, we need to insert a door lock request * If the door was locked, we need to insert a door lock request
* onto the head of the SCSI request queue for the device. There * onto the head of the SCSI request queue for the device. There
...@@ -1434,18 +1430,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost) ...@@ -1434,18 +1430,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* now that error recovery is done, we will need to ensure that these * now that error recovery is done, we will need to ensure that these
* requests are started. * requests are started.
*/ */
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(sdev, &shost->my_devices, siblings) { list_for_each_entry(sdev, &shost->my_devices, siblings) {
if ((shost->can_queue > 0 && spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
(shost->host_busy >= shost->can_queue))
|| (shost->host_blocked)
|| (shost->host_self_blocked)) {
break;
}
__blk_run_queue(sdev->request_queue); __blk_run_queue(sdev->request_queue);
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
} }
spin_unlock_irqrestore(shost->host_lock, flags);
} }
/** /**
......
...@@ -92,6 +92,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) ...@@ -92,6 +92,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{ {
struct Scsi_Host *host = cmd->device->host; struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device; struct scsi_device *device = cmd->device;
unsigned long flags;
SCSI_LOG_MLQUEUE(1, SCSI_LOG_MLQUEUE(1,
printk("Inserting command %p into mlqueue\n", cmd)); printk("Inserting command %p into mlqueue\n", cmd));
...@@ -130,6 +131,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) ...@@ -130,6 +131,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* Decrement the counters, since these commands are no longer * Decrement the counters, since these commands are no longer
* active on the host/device. * active on the host/device.
*/ */
spin_lock_irqsave(device->request_queue->queue_lock, flags);
device->device_busy--;
spin_unlock_irqrestore(device->request_queue->queue_lock, flags);
scsi_host_busy_dec_and_test(host, device); scsi_host_busy_dec_and_test(host, device);
/* /*
...@@ -343,7 +347,7 @@ static int scsi_single_lun_check(struct scsi_device *current_sdev) ...@@ -343,7 +347,7 @@ static int scsi_single_lun_check(struct scsi_device *current_sdev)
* outstanding for current_sdev, call __blk_run_queue for the next * outstanding for current_sdev, call __blk_run_queue for the next
* scsi_device on the same target that has requests. * scsi_device on the same target that has requests.
* *
* Called with queue_lock held. * Called with *no* scsi locks held.
*/ */
static void scsi_single_lun_run(struct scsi_device *current_sdev, static void scsi_single_lun_run(struct scsi_device *current_sdev,
struct request_queue *q) struct request_queue *q)
...@@ -407,9 +411,6 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd) ...@@ -407,9 +411,6 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
struct Scsi_Host *shost; struct Scsi_Host *shost;
unsigned long flags; unsigned long flags;
ASSERT_LOCK(q->queue_lock, 0);
spin_lock_irqsave(q->queue_lock, flags);
if (cmd != NULL) { if (cmd != NULL) {
/* /*
...@@ -418,6 +419,7 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd) ...@@ -418,6 +419,7 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
* in which case we need to request the blocks that come after * in which case we need to request the blocks that come after
* the bad sector. * the bad sector.
*/ */
spin_lock_irqsave(q->queue_lock, flags);
cmd->request->special = cmd; cmd->request->special = cmd;
if (blk_rq_tagged(cmd->request)) if (blk_rq_tagged(cmd->request))
blk_queue_end_tag(q, cmd->request); blk_queue_end_tag(q, cmd->request);
...@@ -430,6 +432,7 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd) ...@@ -430,6 +432,7 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
cmd->request->flags |= REQ_SPECIAL; cmd->request->flags |= REQ_SPECIAL;
cmd->request->flags &= ~REQ_DONTPREP; cmd->request->flags &= ~REQ_DONTPREP;
__elv_add_request(q, cmd->request, 0, 0); __elv_add_request(q, cmd->request, 0, 0);
spin_unlock_irqrestore(q->queue_lock, flags);
} }
sdev = q->queuedata; sdev = q->queuedata;
...@@ -438,6 +441,7 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd) ...@@ -438,6 +441,7 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
scsi_single_lun_run(sdev, q); scsi_single_lun_run(sdev, q);
shost = sdev->host; shost = sdev->host;
spin_lock_irqsave(shost->host_lock, flags);
while (!list_empty(&shost->starved_list) && while (!list_empty(&shost->starved_list) &&
!shost->host_blocked && !shost->host_self_blocked && !shost->host_blocked && !shost->host_self_blocked &&
!((shost->can_queue > 0) && !((shost->can_queue > 0) &&
...@@ -447,15 +451,26 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd) ...@@ -447,15 +451,26 @@ void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
* starved queues, call __blk_run_queue. scsi_request_fn * starved queues, call __blk_run_queue. scsi_request_fn
* drops the queue_lock and can add us back to the * drops the queue_lock and can add us back to the
* starved_list. * starved_list.
*
* host_lock protects the starved_list and starved_entry.
* scsi_request_fn must get the host_lock before checking
* or modifying starved_list or starved_entry.
*/ */
sdev2 = list_entry(shost->starved_list.next, sdev2 = list_entry(shost->starved_list.next,
struct scsi_device, starved_entry); struct scsi_device, starved_entry);
list_del_init(&sdev2->starved_entry); list_del_init(&sdev2->starved_entry);
spin_unlock_irqrestore(shost->host_lock, flags);
spin_lock_irqsave(sdev2->request_queue->queue_lock, flags);
__blk_run_queue(sdev2->request_queue); __blk_run_queue(sdev2->request_queue);
spin_unlock_irqrestore(sdev2->request_queue->queue_lock, flags);
spin_lock_irqsave(shost->host_lock, flags);
} }
spin_unlock_irqrestore(shost->host_lock, flags);
spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q); __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
...@@ -489,8 +504,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, ...@@ -489,8 +504,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
struct request *req = cmd->request; struct request *req = cmd->request;
unsigned long flags; unsigned long flags;
ASSERT_LOCK(q->queue_lock, 0);
/* /*
* If there are blocks left over at the end, set up the command * If there are blocks left over at the end, set up the command
* to queue the remainder of them. * to queue the remainder of them.
...@@ -588,8 +601,6 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd) ...@@ -588,8 +601,6 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
{ {
struct request *req = cmd->request; struct request *req = cmd->request;
ASSERT_LOCK(cmd->device->host->host_lock, 0);
/* /*
* Free up any indirection buffers we allocated for DMA purposes. * Free up any indirection buffers we allocated for DMA purposes.
*/ */
...@@ -670,8 +681,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors, ...@@ -670,8 +681,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
* would be used if we just wanted to retry, for example. * would be used if we just wanted to retry, for example.
* *
*/ */
ASSERT_LOCK(q->queue_lock, 0);
/* /*
* Free up any indirection buffers we allocated for DMA purposes. * Free up any indirection buffers we allocated for DMA purposes.
* For the case of a READ, we need to copy the data out of the * For the case of a READ, we need to copy the data out of the
...@@ -1075,7 +1084,7 @@ static inline int scsi_check_sdev(struct request_queue *q, ...@@ -1075,7 +1084,7 @@ static inline int scsi_check_sdev(struct request_queue *q,
/* /*
* scsi_check_shost: if we can send requests to shost, return 0 else return 1. * scsi_check_shost: if we can send requests to shost, return 0 else return 1.
* *
* Called with the queue_lock held. * Called with queue_lock and host_lock held.
*/ */
static inline int scsi_check_shost(struct request_queue *q, static inline int scsi_check_shost(struct request_queue *q,
struct Scsi_Host *shost, struct Scsi_Host *shost,
...@@ -1132,8 +1141,7 @@ static void scsi_request_fn(request_queue_t *q) ...@@ -1132,8 +1141,7 @@ static void scsi_request_fn(request_queue_t *q)
struct Scsi_Host *shost = sdev->host; struct Scsi_Host *shost = sdev->host;
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
struct request *req; struct request *req;
unsigned int flags;
ASSERT_LOCK(q->queue_lock, 1);
/* /*
* To start with, we keep looping until the queue is empty, or until * To start with, we keep looping until the queue is empty, or until
...@@ -1141,7 +1149,7 @@ static void scsi_request_fn(request_queue_t *q) ...@@ -1141,7 +1149,7 @@ static void scsi_request_fn(request_queue_t *q)
*/ */
for (;;) { for (;;) {
if (blk_queue_plugged(q)) if (blk_queue_plugged(q))
break; goto completed;
/* /*
* get next queueable request. We do this early to make sure * get next queueable request. We do this early to make sure
...@@ -1152,28 +1160,29 @@ static void scsi_request_fn(request_queue_t *q) ...@@ -1152,28 +1160,29 @@ static void scsi_request_fn(request_queue_t *q)
req = elv_next_request(q); req = elv_next_request(q);
if (scsi_check_sdev(q, sdev)) if (scsi_check_sdev(q, sdev))
break; goto completed;
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_check_shost(q, shost, sdev)) if (scsi_check_shost(q, shost, sdev))
break; goto after_host_lock;
if (sdev->single_lun && scsi_single_lun_check(sdev)) if (sdev->single_lun && scsi_single_lun_check(sdev))
break; goto after_host_lock;
/* /*
* If we couldn't find a request that could be queued, then we * If we couldn't find a request that could be queued, then we
* can also quit. * can also quit.
*/ */
if (blk_queue_empty(q)) if (blk_queue_empty(q))
break; goto after_host_lock;
if (!req) { if (!req) {
/* If the device is busy, a returning I/O /* If the device is busy, a returning I/O
* will restart the queue. Otherwise, we have * will restart the queue. Otherwise, we have
* to plug the queue */ * to plug the queue */
if (sdev->device_busy == 0) if (sdev->device_busy == 1)
blk_plug_device(q); blk_plug_device(q);
break; goto after_host_lock;
} }
cmd = req->special; cmd = req->special;
...@@ -1195,11 +1204,9 @@ static void scsi_request_fn(request_queue_t *q) ...@@ -1195,11 +1204,9 @@ static void scsi_request_fn(request_queue_t *q)
if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0))) if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0)))
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
/*
* Now bump the usage count for both the host and the
* device.
*/
shost->host_busy++; shost->host_busy++;
spin_unlock_irqrestore(shost->host_lock, flags);
sdev->device_busy++; sdev->device_busy++;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
...@@ -1220,6 +1227,11 @@ static void scsi_request_fn(request_queue_t *q) ...@@ -1220,6 +1227,11 @@ static void scsi_request_fn(request_queue_t *q)
*/ */
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
} }
completed:
return;
after_host_lock:
spin_unlock_irqrestore(shost->host_lock, flags);
} }
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
...@@ -1241,15 +1253,20 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) ...@@ -1241,15 +1253,20 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
return BLK_BOUNCE_HIGH; return BLK_BOUNCE_HIGH;
} }
request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost) request_queue_t *scsi_alloc_queue(struct scsi_device *sdev)
{ {
request_queue_t *q; request_queue_t *q;
struct Scsi_Host *shost;
q = kmalloc(sizeof(*q), GFP_ATOMIC); q = kmalloc(sizeof(*q), GFP_ATOMIC);
if (!q) if (!q)
return NULL; return NULL;
memset(q, 0, sizeof(*q)); memset(q, 0, sizeof(*q));
/*
* XXX move host code to scsi_register
*/
shost = sdev->host;
if (!shost->max_sectors) { if (!shost->max_sectors) {
/* /*
* Driver imposes no hard sector transfer limit. * Driver imposes no hard sector transfer limit.
...@@ -1258,7 +1275,7 @@ request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost) ...@@ -1258,7 +1275,7 @@ request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost)
shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS; shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
} }
blk_init_queue(q, scsi_request_fn, shost->host_lock); blk_init_queue(q, scsi_request_fn, &sdev->sdev_lock);
blk_queue_prep_rq(q, scsi_prep_fn); blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_max_hw_segments(q, shost->sg_tablesize); blk_queue_max_hw_segments(q, shost->sg_tablesize);
......
...@@ -415,7 +415,8 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost, ...@@ -415,7 +415,8 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
*/ */
sdev->borken = 1; sdev->borken = 1;
sdev->request_queue = scsi_alloc_queue(shost); spin_lock_init(&sdev->sdev_lock);
sdev->request_queue = scsi_alloc_queue(sdev);
if (!sdev->request_queue) if (!sdev->request_queue)
goto out_free_dev; goto out_free_dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment