Commit a10bc12a authored by Douglas Gilbert's avatar Douglas Gilbert Committed by Martin K. Petersen

scsi_debug: replace tasklet with work queue

When a negative value was placed in the delay parameter, a tasklet was
scheduled. Change the tasklet to a work queue. Previously a delay of -1
scheduled a high priority tasklet; since there are no high priority work
queues, treat -1 like other negative values in delay and schedule a work
item.
Signed-off-by: default avatarDouglas Gilbert <dgilbert@interlog.com>
Reviewed-by: default avatarHannes Reinicke <hare@suse.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent c2206098
...@@ -610,15 +610,15 @@ static LIST_HEAD(sdebug_host_list); ...@@ -610,15 +610,15 @@ static LIST_HEAD(sdebug_host_list);
static DEFINE_SPINLOCK(sdebug_host_list_lock); static DEFINE_SPINLOCK(sdebug_host_list_lock);
struct sdebug_hrtimer { /* ... is derived from hrtimer */ struct sdebug_defer {
struct hrtimer hrt; /* must be first element */ struct hrtimer hrt;
struct execute_work ew;
int qa_indx; int qa_indx;
}; };
struct sdebug_queued_cmd { struct sdebug_queued_cmd {
/* in_use flagged by a bit in queued_in_use_bm[] */ /* in_use flagged by a bit in queued_in_use_bm[] */
struct tasklet_struct *tletp; struct sdebug_defer *sd_dp;
struct sdebug_hrtimer *sd_hrtp;
struct scsi_cmnd * a_cmnd; struct scsi_cmnd * a_cmnd;
}; };
static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
...@@ -3349,8 +3349,9 @@ resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ...@@ -3349,8 +3349,9 @@ resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return resp_xdwriteread(scp, lba, num, devip); return resp_xdwriteread(scp, lba, num, devip);
} }
/* When tasklet goes off this function is called. */ /* Queued command completions converge here. */
static void sdebug_q_cmd_complete(unsigned long indx) static void
sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{ {
int qa_indx; int qa_indx;
int retiring = 0; int retiring = 0;
...@@ -3360,7 +3361,7 @@ static void sdebug_q_cmd_complete(unsigned long indx) ...@@ -3360,7 +3361,7 @@ static void sdebug_q_cmd_complete(unsigned long indx)
struct sdebug_dev_info *devip; struct sdebug_dev_info *devip;
atomic_inc(&sdebug_completions); atomic_inc(&sdebug_completions);
qa_indx = indx; qa_indx = sd_dp->qa_indx;
if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
pr_err("wild qa_indx=%d\n", qa_indx); pr_err("wild qa_indx=%d\n", qa_indx);
return; return;
...@@ -3411,64 +3412,21 @@ static void sdebug_q_cmd_complete(unsigned long indx) ...@@ -3411,64 +3412,21 @@ static void sdebug_q_cmd_complete(unsigned long indx)
static enum hrtimer_restart static enum hrtimer_restart
sdebug_q_cmd_hrt_complete(struct hrtimer *timer) sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
{ {
int qa_indx; struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
int retiring = 0; hrt);
unsigned long iflags; sdebug_q_cmd_complete(sd_dp);
struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
struct sdebug_queued_cmd *sqcp;
struct scsi_cmnd *scp;
struct sdebug_dev_info *devip;
atomic_inc(&sdebug_completions);
qa_indx = sd_hrtp->qa_indx;
if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
pr_err("wild qa_indx=%d\n", qa_indx);
goto the_end;
}
spin_lock_irqsave(&queued_arr_lock, iflags);
sqcp = &queued_arr[qa_indx];
scp = sqcp->a_cmnd;
if (NULL == scp) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
pr_err("scp is NULL\n");
goto the_end;
}
devip = (struct sdebug_dev_info *)scp->device->hostdata;
if (devip)
atomic_dec(&devip->num_in_q);
else
pr_err("devip=NULL\n");
if (atomic_read(&retired_max_queue) > 0)
retiring = 1;
sqcp->a_cmnd = NULL;
if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
pr_err("Unexpected completion\n");
goto the_end;
}
if (unlikely(retiring)) { /* user has reduced max_queue */
int k, retval;
retval = atomic_read(&retired_max_queue);
if (qa_indx >= retval) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
pr_err("index %d too large\n", retval);
goto the_end;
}
k = find_last_bit(queued_in_use_bm, retval);
if ((k < sdebug_max_queue) || (k == retval))
atomic_set(&retired_max_queue, 0);
else
atomic_set(&retired_max_queue, k + 1);
}
spin_unlock_irqrestore(&queued_arr_lock, iflags);
scp->scsi_done(scp); /* callback to mid level */
the_end:
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
/* When work queue schedules work, it calls this function. */
static void
sdebug_q_cmd_wq_complete(struct work_struct *work)
{
struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
ew.work);
sdebug_q_cmd_complete(sd_dp);
}
static struct sdebug_dev_info * static struct sdebug_dev_info *
sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
{ {
...@@ -3567,13 +3525,15 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) ...@@ -3567,13 +3525,15 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
} }
} }
/* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */ /* If @cmnd found deletes its timer or work queue and returns true; else
static int stop_queued_cmnd(struct scsi_cmnd *cmnd) returns false */
static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
{ {
unsigned long iflags; unsigned long iflags;
int k, qmax, r_qmax; int k, qmax, r_qmax;
struct sdebug_queued_cmd *sqcp; struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip; struct sdebug_dev_info *devip;
struct sdebug_defer *sd_dp;
spin_lock_irqsave(&queued_arr_lock, iflags); spin_lock_irqsave(&queued_arr_lock, iflags);
qmax = sdebug_max_queue; qmax = sdebug_max_queue;
...@@ -3583,64 +3543,63 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd) ...@@ -3583,64 +3543,63 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
for (k = 0; k < qmax; ++k) { for (k = 0; k < qmax; ++k) {
if (test_bit(k, queued_in_use_bm)) { if (test_bit(k, queued_in_use_bm)) {
sqcp = &queued_arr[k]; sqcp = &queued_arr[k];
if (cmnd == sqcp->a_cmnd) { if (cmnd != sqcp->a_cmnd)
devip = (struct sdebug_dev_info *) continue;
cmnd->device->hostdata; /* found command */
if (devip) devip = (struct sdebug_dev_info *)
atomic_dec(&devip->num_in_q); cmnd->device->hostdata;
sqcp->a_cmnd = NULL; if (devip)
spin_unlock_irqrestore(&queued_arr_lock, atomic_dec(&devip->num_in_q);
iflags); sqcp->a_cmnd = NULL;
if ((sdebug_jdelay > 0) || sd_dp = sqcp->sd_dp;
(sdebug_ndelay > 0)) { spin_unlock_irqrestore(&queued_arr_lock,
if (sqcp->sd_hrtp) iflags);
hrtimer_cancel( if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) {
&sqcp->sd_hrtp->hrt); if (sd_dp)
} else if (sdebug_jdelay < 0) { hrtimer_cancel(&sd_dp->hrt);
if (sqcp->tletp) } else if (sdebug_jdelay < 0) {
tasklet_kill(sqcp->tletp); if (sd_dp)
} cancel_work_sync(&sd_dp->ew.work);
clear_bit(k, queued_in_use_bm);
return 1;
} }
clear_bit(k, queued_in_use_bm);
return true;
} }
} }
spin_unlock_irqrestore(&queued_arr_lock, iflags); spin_unlock_irqrestore(&queued_arr_lock, iflags);
return 0; return false;
} }
/* Deletes (stops) timers or tasklets of all queued commands */ /* Deletes (stops) timers or work queues of all queued commands */
static void stop_all_queued(void) static void stop_all_queued(void)
{ {
unsigned long iflags; unsigned long iflags;
int k; int k;
struct sdebug_queued_cmd *sqcp; struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip; struct sdebug_dev_info *devip;
struct sdebug_defer *sd_dp;
spin_lock_irqsave(&queued_arr_lock, iflags); spin_lock_irqsave(&queued_arr_lock, iflags);
for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
if (test_bit(k, queued_in_use_bm)) { if (test_bit(k, queued_in_use_bm)) {
sqcp = &queued_arr[k]; sqcp = &queued_arr[k];
if (sqcp->a_cmnd) { if (NULL == sqcp->a_cmnd)
devip = (struct sdebug_dev_info *) continue;
sqcp->a_cmnd->device->hostdata; devip = (struct sdebug_dev_info *)
if (devip) sqcp->a_cmnd->device->hostdata;
atomic_dec(&devip->num_in_q); if (devip)
sqcp->a_cmnd = NULL; atomic_dec(&devip->num_in_q);
spin_unlock_irqrestore(&queued_arr_lock, sqcp->a_cmnd = NULL;
iflags); sd_dp = sqcp->sd_dp;
if ((sdebug_jdelay > 0) || spin_unlock_irqrestore(&queued_arr_lock, iflags);
(sdebug_ndelay > 0)) { if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) {
if (sqcp->sd_hrtp) if (sd_dp)
hrtimer_cancel( hrtimer_cancel(&sd_dp->hrt);
&sqcp->sd_hrtp->hrt); } else if (sdebug_jdelay < 0) {
} else if (sdebug_jdelay < 0) { if (sd_dp)
if (sqcp->tletp) cancel_work_sync(&sd_dp->ew.work);
tasklet_kill(sqcp->tletp);
}
clear_bit(k, queued_in_use_bm);
spin_lock_irqsave(&queued_arr_lock, iflags);
} }
clear_bit(k, queued_in_use_bm);
spin_lock_irqsave(&queued_arr_lock, iflags);
} }
} }
spin_unlock_irqrestore(&queued_arr_lock, iflags); spin_unlock_irqrestore(&queued_arr_lock, iflags);
...@@ -3649,30 +3608,27 @@ static void stop_all_queued(void) ...@@ -3649,30 +3608,27 @@ static void stop_all_queued(void)
/* Free queued command memory on heap */ /* Free queued command memory on heap */
static void free_all_queued(void) static void free_all_queued(void)
{ {
unsigned long iflags;
int k; int k;
struct sdebug_queued_cmd *sqcp; struct sdebug_queued_cmd *sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
sqcp = &queued_arr[k]; sqcp = &queued_arr[k];
kfree(sqcp->tletp); kfree(sqcp->sd_dp);
sqcp->tletp = NULL; sqcp->sd_dp = NULL;
kfree(sqcp->sd_hrtp);
sqcp->sd_hrtp = NULL;
} }
spin_unlock_irqrestore(&queued_arr_lock, iflags);
} }
static int scsi_debug_abort(struct scsi_cmnd *SCpnt) static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{ {
bool ok;
++num_aborts; ++num_aborts;
if (SCpnt) { if (SCpnt) {
if (SCpnt->device && ok = stop_queued_cmnd(SCpnt);
(SDEBUG_OPT_ALL_NOISE & sdebug_opts)) if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", sdev_printk(KERN_INFO, SCpnt->device,
__func__); "%s: command%s found\n", __func__,
stop_queued_cmnd(SCpnt); ok ? "" : " not");
} }
return SUCCESS; return SUCCESS;
} }
...@@ -3846,6 +3802,7 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, ...@@ -3846,6 +3802,7 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
int k, num_in_q, qdepth, inject; int k, num_in_q, qdepth, inject;
struct sdebug_queued_cmd *sqcp = NULL; struct sdebug_queued_cmd *sqcp = NULL;
struct scsi_device *sdp; struct scsi_device *sdp;
struct sdebug_defer *sd_dp;
/* this should never happen */ /* this should never happen */
if (WARN_ON(!cmnd)) if (WARN_ON(!cmnd))
...@@ -3912,8 +3869,8 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, ...@@ -3912,8 +3869,8 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sqcp->a_cmnd = cmnd; sqcp->a_cmnd = cmnd;
cmnd->result = scsi_result; cmnd->result = scsi_result;
spin_unlock_irqrestore(&queued_arr_lock, iflags); spin_unlock_irqrestore(&queued_arr_lock, iflags);
sd_dp = sqcp->sd_dp;
if ((delta_jiff > 0) || (sdebug_ndelay > 0)) { if ((delta_jiff > 0) || (sdebug_ndelay > 0)) {
struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
ktime_t kt; ktime_t kt;
if (delta_jiff > 0) { if (delta_jiff > 0) {
...@@ -3923,30 +3880,27 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, ...@@ -3923,30 +3880,27 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
kt = ktime_set(ts.tv_sec, ts.tv_nsec); kt = ktime_set(ts.tv_sec, ts.tv_nsec);
} else } else
kt = ktime_set(0, sdebug_ndelay); kt = ktime_set(0, sdebug_ndelay);
if (NULL == sd_hp) { if (NULL == sd_dp) {
sd_hp = kzalloc(sizeof(*sd_hp), GFP_ATOMIC); sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
if (NULL == sd_hp) if (NULL == sd_dp)
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
sqcp->sd_hrtp = sd_hp; sqcp->sd_dp = sd_dp;
hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC, hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
HRTIMER_MODE_REL); HRTIMER_MODE_REL);
sd_hp->hrt.function = sdebug_q_cmd_hrt_complete; sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
sd_hp->qa_indx = k; sd_dp->qa_indx = k;
} }
hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL); hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL);
} else { /* jdelay < 0 */ } else { /* jdelay < 0 */
if (NULL == sqcp->tletp) { if (NULL == sd_dp) {
sqcp->tletp = kzalloc(sizeof(*sqcp->tletp), sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
GFP_ATOMIC); if (NULL == sd_dp)
if (NULL == sqcp->tletp)
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
tasklet_init(sqcp->tletp, sqcp->sd_dp = sd_dp;
sdebug_q_cmd_complete, k); sd_dp->qa_indx = k;
INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
} }
if (-1 == delta_jiff) schedule_work(&sd_dp->ew.work);
tasklet_hi_schedule(sqcp->tletp);
else
tasklet_schedule(sqcp->tletp);
} }
if ((SDEBUG_OPT_Q_NOISE & sdebug_opts) && if ((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
(scsi_result == device_qfull_result)) (scsi_result == device_qfull_result))
...@@ -4149,6 +4103,9 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf, ...@@ -4149,6 +4103,9 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
if (k != sdebug_max_queue) if (k != sdebug_max_queue)
res = -EBUSY; /* have queued commands */ res = -EBUSY; /* have queued commands */
else { else {
/* make sure sdebug_defer instances get
* re-allocated for new delay variant */
free_all_queued();
sdebug_jdelay = jdelay; sdebug_jdelay = jdelay;
sdebug_ndelay = 0; sdebug_ndelay = 0;
} }
...@@ -4181,6 +4138,9 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, ...@@ -4181,6 +4138,9 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
if (k != sdebug_max_queue) if (k != sdebug_max_queue)
res = -EBUSY; /* have queued commands */ res = -EBUSY; /* have queued commands */
else { else {
/* make sure sdebug_defer instances get
* re-allocated for new delay variant */
free_all_queued();
sdebug_ndelay = ndelay; sdebug_ndelay = ndelay;
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY; : DEF_JDELAY;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment