Commit 251a1524 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Seven fixes, five in drivers.

  The two core changes are a trivial warning removal in scsi_scan.c and
  a change to rescan for capacity when a device makes a user induced
  (via a write to the state variable) offline->running transition to fix
  issues with device mapper"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: core: Fix capacity set to zero after offlinining device
  scsi: sr: Return correct event when media event code is 3
  scsi: ibmvfc: Fix command state accounting and stale response detection
  scsi: core: Avoid printing an error if target_alloc() returns -ENXIO
  scsi: scsi_dh_rdac: Avoid crash during rdac_bus_attach()
  scsi: megaraid_mm: Fix end of loop tests for list_for_each_entry()
  scsi: pm80xx: Fix TMF task completion race condition
parents 0c2e31d2 f0f82e24
...@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev, ...@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
if (!h->ctlr) if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL; err = SCSI_DH_RES_TEMP_UNAVAIL;
else { else {
list_add_rcu(&h->node, &h->ctlr->dh_list);
h->sdev = sdev; h->sdev = sdev;
list_add_rcu(&h->node, &h->ctlr->dh_list);
} }
spin_unlock(&list_lock); spin_unlock(&list_lock);
err = SCSI_DH_OK; err = SCSI_DH_OK;
...@@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev ) ...@@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
spin_lock(&list_lock); spin_lock(&list_lock);
if (h->ctlr) { if (h->ctlr) {
list_del_rcu(&h->node); list_del_rcu(&h->node);
h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller); kref_put(&h->ctlr->kref, release_controller);
} }
spin_unlock(&list_lock); spin_unlock(&list_lock);
sdev->handler_data = NULL; sdev->handler_data = NULL;
synchronize_rcu();
kfree(h); kfree(h);
} }
......
...@@ -807,6 +807,13 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost, ...@@ -807,6 +807,13 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
struct ibmvfc_event *evt = &pool->events[i]; struct ibmvfc_event *evt = &pool->events[i];
/*
* evt->active states
* 1 = in flight
* 0 = being completed
* -1 = free/freed
*/
atomic_set(&evt->active, -1);
atomic_set(&evt->free, 1); atomic_set(&evt->free, 1);
evt->crq.valid = 0x80; evt->crq.valid = 0x80;
evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
...@@ -1017,6 +1024,7 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt) ...@@ -1017,6 +1024,7 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt)
BUG_ON(!ibmvfc_valid_event(pool, evt)); BUG_ON(!ibmvfc_valid_event(pool, evt));
BUG_ON(atomic_inc_return(&evt->free) != 1); BUG_ON(atomic_inc_return(&evt->free) != 1);
BUG_ON(atomic_dec_and_test(&evt->active));
spin_lock_irqsave(&evt->queue->l_lock, flags); spin_lock_irqsave(&evt->queue->l_lock, flags);
list_add_tail(&evt->queue_list, &evt->queue->free); list_add_tail(&evt->queue_list, &evt->queue->free);
...@@ -1072,6 +1080,12 @@ static void ibmvfc_complete_purge(struct list_head *purge_list) ...@@ -1072,6 +1080,12 @@ static void ibmvfc_complete_purge(struct list_head *purge_list)
**/ **/
static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
{ {
/*
* Anything we are failing should still be active. Otherwise, it
* implies we already got a response for the command and are doing
* something bad like double completing it.
*/
BUG_ON(!atomic_dec_and_test(&evt->active));
if (evt->cmnd) { if (evt->cmnd) {
evt->cmnd->result = (error_code << 16); evt->cmnd->result = (error_code << 16);
evt->done = ibmvfc_scsi_eh_done; evt->done = ibmvfc_scsi_eh_done;
...@@ -1723,6 +1737,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, ...@@ -1723,6 +1737,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->done(evt); evt->done(evt);
} else { } else {
atomic_set(&evt->active, 1);
spin_unlock_irqrestore(&evt->queue->l_lock, flags); spin_unlock_irqrestore(&evt->queue->l_lock, flags);
ibmvfc_trc_start(evt); ibmvfc_trc_start(evt);
} }
...@@ -3251,7 +3266,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, ...@@ -3251,7 +3266,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
return; return;
} }
if (unlikely(atomic_read(&evt->free))) { if (unlikely(atomic_dec_if_positive(&evt->active))) {
dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
crq->ioba); crq->ioba);
return; return;
...@@ -3778,7 +3793,7 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost ...@@ -3778,7 +3793,7 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost
return; return;
} }
if (unlikely(atomic_read(&evt->free))) { if (unlikely(atomic_dec_if_positive(&evt->active))) {
dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
crq->ioba); crq->ioba);
return; return;
......
...@@ -745,6 +745,7 @@ struct ibmvfc_event { ...@@ -745,6 +745,7 @@ struct ibmvfc_event {
struct ibmvfc_target *tgt; struct ibmvfc_target *tgt;
struct scsi_cmnd *cmnd; struct scsi_cmnd *cmnd;
atomic_t free; atomic_t free;
atomic_t active;
union ibmvfc_iu *xfer_iu; union ibmvfc_iu *xfer_iu;
void (*done)(struct ibmvfc_event *evt); void (*done)(struct ibmvfc_event *evt);
void (*_done)(struct ibmvfc_event *evt); void (*_done)(struct ibmvfc_event *evt);
......
...@@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) ...@@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
mimd_t mimd; mimd_t mimd;
uint32_t adapno; uint32_t adapno;
int iterator; int iterator;
bool is_found;
if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
*rval = -EFAULT; *rval = -EFAULT;
...@@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) ...@@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
adapter = NULL; adapter = NULL;
iterator = 0; iterator = 0;
is_found = false;
list_for_each_entry(adapter, &adapters_list_g, list) { list_for_each_entry(adapter, &adapters_list_g, list) {
if (iterator++ == adapno) break; if (iterator++ == adapno) {
is_found = true;
break;
}
} }
if (!adapter) { if (!is_found) {
*rval = -ENODEV; *rval = -ENODEV;
return NULL; return NULL;
} }
...@@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc) ...@@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc)
uint32_t adapno; uint32_t adapno;
int iterator; int iterator;
mraid_mmadp_t* adapter; mraid_mmadp_t* adapter;
bool is_found;
/* /*
* When the kioc returns from driver, make sure it still doesn't * When the kioc returns from driver, make sure it still doesn't
...@@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc) ...@@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc)
iterator = 0; iterator = 0;
adapter = NULL; adapter = NULL;
adapno = kioc->adapno; adapno = kioc->adapno;
is_found = false;
con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
"ioctl that was timedout before\n")); "ioctl that was timedout before\n"));
list_for_each_entry(adapter, &adapters_list_g, list) { list_for_each_entry(adapter, &adapters_list_g, list) {
if (iterator++ == adapno) break; if (iterator++ == adapno) {
is_found = true;
break;
}
} }
kioc->timedout = 0; kioc->timedout = 0;
if (adapter) { if (is_found)
mraid_mm_dealloc_kioc( adapter, kioc ); mraid_mm_dealloc_kioc( adapter, kioc );
}
} }
else { else {
wake_up(&wait_q); wake_up(&wait_q);
......
...@@ -684,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev) ...@@ -684,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev)
void pm8001_task_done(struct sas_task *task) void pm8001_task_done(struct sas_task *task)
{ {
if (!del_timer(&task->slow_task->timer)) del_timer(&task->slow_task->timer);
return;
complete(&task->slow_task->completion); complete(&task->slow_task->completion);
} }
...@@ -693,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t) ...@@ -693,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t)
{ {
struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task; struct sas_task *task = slow->task;
unsigned long flags;
task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_lock_irqsave(&task->task_state_lock, flags);
complete(&task->slow_task->completion); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
} }
#define PM8001_TASK_TIMEOUT 20 #define PM8001_TASK_TIMEOUT 20
...@@ -748,13 +752,10 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, ...@@ -748,13 +752,10 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
} }
res = -TMF_RESP_FUNC_FAILED; res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */ /* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
pm8001_dbg(pm8001_ha, FAIL, tmf->tmf);
"TMF task[%x]timeout.\n", goto ex_err;
tmf->tmf);
goto ex_err;
}
} }
if (task->task_status.resp == SAS_TASK_COMPLETE && if (task->task_status.resp == SAS_TASK_COMPLETE &&
...@@ -834,12 +835,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, ...@@ -834,12 +835,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
wait_for_completion(&task->slow_task->completion); wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED; res = TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */ /* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
pm8001_dbg(pm8001_ha, FAIL, goto ex_err;
"TMF task timeout.\n");
goto ex_err;
}
} }
if (task->task_status.resp == SAS_TASK_COMPLETE && if (task->task_status.resp == SAS_TASK_COMPLETE &&
......
...@@ -475,7 +475,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, ...@@ -475,7 +475,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
error = shost->hostt->target_alloc(starget); error = shost->hostt->target_alloc(starget);
if(error) { if(error) {
dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); if (error != -ENXIO)
dev_err(dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final /* don't want scsi_target_reap to do the final
* put because it will be under the host lock */ * put because it will be under the host lock */
scsi_target_destroy(starget); scsi_target_destroy(starget);
......
...@@ -807,11 +807,14 @@ store_state_field(struct device *dev, struct device_attribute *attr, ...@@ -807,11 +807,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex); mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, state); ret = scsi_device_set_state(sdev, state);
/* /*
* If the device state changes to SDEV_RUNNING, we need to run * If the device state changes to SDEV_RUNNING, we need to
* the queue to avoid I/O hang. * rescan the device to revalidate it, and run the queue to
* avoid I/O hang.
*/ */
if (ret == 0 && state == SDEV_RUNNING) if (ret == 0 && state == SDEV_RUNNING) {
scsi_rescan_device(dev);
blk_mq_run_hw_queues(sdev->request_queue, true); blk_mq_run_hw_queues(sdev->request_queue, true);
}
mutex_unlock(&sdev->state_mutex); mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL; return ret == 0 ? count : -EINVAL;
......
...@@ -221,7 +221,7 @@ static unsigned int sr_get_events(struct scsi_device *sdev) ...@@ -221,7 +221,7 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
else if (med->media_event_code == 2) else if (med->media_event_code == 2)
return DISK_EVENT_MEDIA_CHANGE; return DISK_EVENT_MEDIA_CHANGE;
else if (med->media_event_code == 3) else if (med->media_event_code == 3)
return DISK_EVENT_EJECT_REQUEST; return DISK_EVENT_MEDIA_CHANGE;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment