Commit e140f731 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
 "Mostly small bug fixes and trivial updates.

  The major new core update is a change to the way device, target and
  host reference counting is done to try to make it more robust (this
  change has soaked for a while to try to winkle out any bugs)"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: pm8001: Fix typo 'the the' in comment
  scsi: megaraid_sas: Remove redundant variable cmd_type
  scsi: FlashPoint: Remove redundant variable bm_int_st
  scsi: zfcp: Fix missing auto port scan and thus missing target ports
  scsi: core: Call blk_mq_free_tag_set() earlier
  scsi: core: Simplify LLD module reference counting
  scsi: core: Make sure that hosts outlive targets
  scsi: core: Make sure that targets outlive devices
  scsi: ufs: ufs-pci: Correct check for RESET DSM
  scsi: target: core: De-RCU of se_lun and se_lun acl
  scsi: target: core: Fix race during ACL removal
  scsi: ufs: core: Correct ufshcd_shutdown() flow
  scsi: ufs: core: Increase the maximum data buffer size
  scsi: lpfc: Check the return value of alloc_workqueue()
parents abe7a481 c6380f99
......@@ -145,27 +145,33 @@ void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
int ret = -EIO;
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
if (zfcp_fsf_open_wka_port(wka_port))
if (zfcp_fsf_open_wka_port(wka_port)) {
/* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
goto out;
}
}
mutex_unlock(&wka_port->mutex);
wait_event(wka_port->completion_wq,
wait_event(wka_port->opened,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
return 0;
ret = 0;
goto out;
}
return -EIO;
out:
mutex_unlock(&wka_port->mutex);
return ret;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
......@@ -181,9 +187,12 @@ static void zfcp_fc_wka_port_offline(struct work_struct *work)
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
/* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
goto out;
}
wait_event(wka_port->closed,
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
out:
mutex_unlock(&wka_port->mutex);
}
......@@ -193,13 +202,15 @@ static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
schedule_delayed_work(&wka_port->work, HZ / 100);
queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
msecs_to_jiffies(10));
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
init_waitqueue_head(&wka_port->completion_wq);
init_waitqueue_head(&wka_port->opened);
init_waitqueue_head(&wka_port->closed);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
......
......@@ -185,7 +185,8 @@ enum zfcp_fc_wka_status {
/**
* struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
* @adapter: Pointer to adapter structure this WKA port belongs to
* @completion_wq: Wait for completion of open/close command
* @opened: Wait for completion of open command
* @closed: Wait for completion of close command
* @status: Current status of WKA port
* @refcount: Reference count to keep port open as long as it is in use
* @d_id: FC destination id or well-known-address
......@@ -195,7 +196,8 @@ enum zfcp_fc_wka_status {
*/
struct zfcp_fc_wka_port {
struct zfcp_adapter *adapter;
wait_queue_head_t completion_wq;
wait_queue_head_t opened;
wait_queue_head_t closed;
enum zfcp_fc_wka_status status;
atomic_t refcount;
u32 d_id;
......
......@@ -1907,7 +1907,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
wake_up(&wka_port->completion_wq);
wake_up(&wka_port->opened);
}
/**
......@@ -1966,7 +1966,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
}
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
wake_up(&wka_port->closed);
}
/**
......
......@@ -1712,7 +1712,7 @@ static unsigned char FlashPoint_InterruptPending(void *pCurrCard)
static int FlashPoint_HandleInterrupt(void *pcard)
{
struct sccb *currSCCB;
unsigned char thisCard, result, bm_status, bm_int_st;
unsigned char thisCard, result, bm_status;
unsigned short hp_int;
unsigned char i, target;
struct sccb_card *pCurrCard = pcard;
......@@ -1723,7 +1723,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
MDISABLE_INT(ioport);
if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON)
if (RD_HARPOON(ioport + hp_int_status) & EXT_STATUS_ON)
bm_status = RD_HARPOON(ioport + hp_ext_status) &
(unsigned char)BAD_EXT_STATUS;
else
......
......@@ -190,6 +190,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
transport_unregister_device(&shost->shost_gendev);
device_unregister(&shost->shost_dev);
device_del(&shost->shost_gendev);
/*
* After scsi_remove_host() has returned the scsi LLD module can be
* unloaded and/or the host resources can be released. Hence wait until
* the dependent SCSI targets and devices are gone before returning.
*/
wait_event(shost->targets_wq, atomic_read(&shost->target_count) == 0);
scsi_mq_destroy_tags(shost);
}
EXPORT_SYMBOL(scsi_remove_host);
......@@ -300,8 +309,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
return error;
/*
* Any host allocation in this function will be freed in
* scsi_host_dev_release().
* Any resources associated with the SCSI host in this function except
* the tag set will be freed by scsi_host_dev_release().
*/
out_del_dev:
device_del(&shost->shost_dev);
......@@ -317,6 +326,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
scsi_mq_destroy_tags(shost);
fail:
return error;
}
......@@ -350,9 +360,6 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
if (shost->tag_set.tags)
scsi_mq_destroy_tags(shost);
kfree(shost->shost_data);
ida_free(&host_index_ida, shost->host_no);
......@@ -399,6 +406,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
init_waitqueue_head(&shost->targets_wq);
index = ida_alloc(&host_index_ida, GFP_KERNEL);
if (index < 0) {
......
......@@ -7948,6 +7948,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* The lpfc_wq workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
if (!phba->wq)
return -ENOMEM;
/*
* Initialize timers used by driver
......
......@@ -3199,7 +3199,6 @@ megasas_build_io_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd)
{
int sge_count;
u8 cmd_type;
u16 pd_index = 0;
u8 drive_type = 0;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
......@@ -3225,7 +3224,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
*/
io_request->IoFlags = cpu_to_le16(scp->cmd_len);
switch (cmd_type = megasas_cmd_type(scp)) {
switch (megasas_cmd_type(scp)) {
case READ_WRITE_LDIO:
megasas_build_ldio_fusion(instance, scp, cmd);
break;
......
......@@ -3138,7 +3138,7 @@ int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
*
* when HBA driver received the identify done event or initiate FIS received
* event(for SATA), it will invoke this function to notify the sas layer that
* the sas toplogy has formed, please discover the the whole sas domain,
* the sas toplogy has formed, please discover the whole sas domain,
* while receive a broadcast(change) primitive just tell the sas
* layer to discover the changed domain rather than the whole domain.
*/
......
......@@ -586,10 +586,13 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
struct module *mod = sdev->host->hostt->module;
/*
* Decreasing the module reference count before the device reference
* count is safe since scsi_remove_host() only returns after all
* devices have been removed.
*/
module_put(sdev->host->hostt->module);
put_device(&sdev->sdev_gendev);
module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
......
......@@ -406,9 +406,14 @@ static void scsi_target_destroy(struct scsi_target *starget)
static void scsi_target_dev_release(struct device *dev)
{
struct device *parent = dev->parent;
struct Scsi_Host *shost = dev_to_shost(parent);
struct scsi_target *starget = to_scsi_target(dev);
kfree(starget);
if (atomic_dec_return(&shost->target_count) == 0)
wake_up(&shost->targets_wq);
put_device(parent);
}
......@@ -521,6 +526,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
init_waitqueue_head(&starget->sdev_wq);
atomic_inc(&shost->target_count);
retry:
spin_lock_irqsave(shost->host_lock, flags);
......
......@@ -443,18 +443,15 @@ static void scsi_device_cls_release(struct device *class_dev)
static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
struct scsi_device *sdev;
struct scsi_device *sdev = container_of(work, struct scsi_device,
ew.work);
struct scsi_target *starget = sdev->sdev_target;
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
struct module *mod;
sdev = container_of(work, struct scsi_device, ew.work);
mod = sdev->host->hostt->module;
scsi_dh_release_device(sdev);
......@@ -516,19 +513,16 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(sdev->inquiry);
kfree(sdev);
if (starget && atomic_dec_return(&starget->sdev_count) == 0)
wake_up(&starget->sdev_wq);
if (parent)
put_device(parent);
module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
/* Set module pointer as NULL in case of module unloading */
if (!try_module_get(sdp->host->hostt->module))
sdp->host->hostt->module = NULL;
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
......@@ -1535,6 +1529,14 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
/*
* After scsi_remove_target() returns its caller can remove resources
* associated with @starget, e.g. an rport or session. Wait until all
* devices associated with @starget have been removed to prevent that
* a SCSI error handling callback function triggers a use-after-free.
*/
wait_event(starget->sdev_wq, atomic_read(&starget->sdev_count) == 0);
}
/**
......@@ -1645,6 +1647,9 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
atomic_inc(&starget->sdev_count);
/*
* device can now only be removed via __scsi_remove_device() so hold
* the target. Target will be held in CREATED state until something
......
......@@ -934,8 +934,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
spin_lock(&lun->lun_deve_lock);
list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
lacl = rcu_dereference_check(se_deve->se_lun_acl,
lockdep_is_held(&lun->lun_deve_lock));
lacl = se_deve->se_lun_acl;
/*
* spc4r37 p.242:
......
......@@ -75,7 +75,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
return TCM_WRITE_PROTECTED;
}
se_lun = rcu_dereference(deve->se_lun);
se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
......@@ -152,7 +152,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
se_lun = rcu_dereference(deve->se_lun);
se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
......@@ -216,7 +216,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
lun = rcu_dereference(deve->se_lun);
lun = deve->se_lun;
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
......@@ -243,11 +243,8 @@ void core_free_device_list_for_node(
struct se_dev_entry *deve;
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
struct se_lun *lun = rcu_dereference_check(deve->se_lun,
lockdep_is_held(&nacl->lun_entry_mutex));
core_disable_device_list_for_node(lun, deve, nacl, tpg);
}
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
mutex_unlock(&nacl->lun_entry_mutex);
}
......@@ -334,8 +331,7 @@ int core_enable_device_list_for_node(
mutex_lock(&nacl->lun_entry_mutex);
orig = target_nacl_find_deve(nacl, mapped_lun);
if (orig && orig->se_lun) {
struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
lockdep_is_held(&nacl->lun_entry_mutex));
struct se_lun *orig_lun = orig->se_lun;
if (orig_lun != lun) {
pr_err("Existing orig->se_lun doesn't match new lun"
......@@ -355,8 +351,8 @@ int core_enable_device_list_for_node(
return -EINVAL;
}
rcu_assign_pointer(new->se_lun, lun);
rcu_assign_pointer(new->se_lun_acl, lun_acl);
new->se_lun = lun;
new->se_lun_acl = lun_acl;
hlist_del_rcu(&orig->link);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
......@@ -374,8 +370,8 @@ int core_enable_device_list_for_node(
return 0;
}
rcu_assign_pointer(new->se_lun, lun);
rcu_assign_pointer(new->se_lun_acl, lun_acl);
new->se_lun = lun;
new->se_lun_acl = lun_acl;
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
......@@ -434,9 +430,6 @@ void core_disable_device_list_for_node(
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
rcu_assign_pointer(orig->se_lun, NULL);
rcu_assign_pointer(orig->se_lun_acl, NULL);
kfree_rcu(orig, rcu_head);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
......@@ -457,10 +450,7 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
lockdep_is_held(&nacl->lun_entry_mutex));
if (lun != tmp_lun)
if (lun != deve->se_lun)
continue;
core_disable_device_list_for_node(lun, deve, nacl, tpg);
......
......@@ -739,8 +739,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (!deve_tmp->se_lun_acl)
continue;
lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl,
lockdep_is_held(&lun_tmp->lun_deve_lock));
lacl_tmp = deve_tmp->se_lun_acl;
nacl_tmp = lacl_tmp->se_lun_nacl;
/*
* Skip the matching struct se_node_acl that is allocated
......@@ -784,8 +783,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* the original *pr_reg is processed in
* __core_scsi3_add_registration()
*/
dest_lun = rcu_dereference_check(deve_tmp->se_lun,
kref_read(&deve_tmp->pr_kref) != 0);
dest_lun = deve_tmp->se_lun;
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, dest_lun, deve_tmp,
......@@ -1437,34 +1435,26 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
{
struct se_lun_acl *lun_acl;
/*
* For nacl->dynamic_node_acl=1
*/
lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
kref_read(&se_deve->pr_kref) != 0);
if (!lun_acl)
if (!se_deve->se_lun_acl)
return 0;
return target_depend_item(&lun_acl->se_lun_group.cg_item);
return target_depend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
}
static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
{
struct se_lun_acl *lun_acl;
/*
* For nacl->dynamic_node_acl=1
*/
lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
kref_read(&se_deve->pr_kref) != 0);
if (!lun_acl) {
if (!se_deve->se_lun_acl) {
kref_put(&se_deve->pr_kref, target_pr_kref_release);
return;
}
target_undepend_item(&lun_acl->se_lun_group.cg_item);
target_undepend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
kref_put(&se_deve->pr_kref, target_pr_kref_release);
}
......@@ -1751,8 +1741,7 @@ core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
kref_read(&dest_se_deve->pr_kref) != 0);
dest_lun = dest_se_deve->se_lun;
dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_lun, dest_se_deve,
......@@ -3446,8 +3435,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
kref_read(&dest_se_deve->pr_kref) != 0);
struct se_lun *dest_lun = dest_se_deve->se_lun;
spin_unlock(&dev->dev_reservation_lock);
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
......
......@@ -877,7 +877,6 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
......@@ -886,9 +885,9 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
lun = rcu_dereference(deve->se_lun);
/* scsiDeviceIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
......@@ -1217,7 +1216,6 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
......@@ -1226,9 +1224,9 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
lun = rcu_dereference(deve->se_lun);
/* scsiDeviceIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
......
......@@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
struct se_device *this_dev;
int rc;
this_lun = rcu_dereference(deve->se_lun);
this_lun = deve->se_lun;
this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
......
......@@ -8326,6 +8326,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
......@@ -9508,12 +9509,8 @@ EXPORT_SYMBOL(ufshcd_runtime_resume);
int ufshcd_shutdown(struct ufs_hba *hba)
{
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
pm_runtime_get_sync(hba->dev);
ufshcd_suspend(hba);
ufshcd_suspend(hba);
out:
hba->is_powered = false;
/* allow force shutdown even in case of errors */
return 0;
......
......@@ -24,7 +24,7 @@ struct ufs_host {
void (*late_init)(struct ufs_hba *hba);
};
enum {
enum intel_ufs_dsm_func_id {
INTEL_DSM_FNS = 0,
INTEL_DSM_RESET = 1,
};
......@@ -42,6 +42,15 @@ static const guid_t intel_dsm_guid =
GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
static bool __intel_dsm_supported(struct intel_host *host,
enum intel_ufs_dsm_func_id fn)
{
return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
}
#define INTEL_DSM_SUPPORTED(host, name) \
__intel_dsm_supported(host, INTEL_DSM_##name)
static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
......@@ -71,7 +80,7 @@ static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
static int intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
if (!__intel_dsm_supported(intel_host, fn))
return -EOPNOTSUPP;
return __intel_dsm(intel_host, dev, fn, result);
......@@ -300,7 +309,7 @@ static int ufs_intel_device_reset(struct ufs_hba *hba)
{
struct intel_host *host = ufshcd_get_variant(hba);
if (host->dsm_fns & INTEL_DSM_RESET) {
if (INTEL_DSM_SUPPORTED(host, RESET)) {
u32 result = 0;
int err;
......@@ -342,7 +351,7 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
return -ENOMEM;
ufshcd_set_variant(hba, host);
intel_dsm_init(host, hba->dev);
if (host->dsm_fns & INTEL_DSM_RESET) {
if (INTEL_DSM_SUPPORTED(host, RESET)) {
if (hba->vops->device_reset)
hba->caps |= UFSHCD_CAP_DEEPSLEEP;
} else {
......
......@@ -309,6 +309,8 @@ struct scsi_target {
struct list_head devices;
struct device dev;
struct kref reap_ref; /* last put renders target invisible */
atomic_t sdev_count;
wait_queue_head_t sdev_wq;
unsigned int channel;
unsigned int id; /* target id ... replace
* scsi_device.id eventually */
......
......@@ -690,6 +690,9 @@ struct Scsi_Host {
/* ldm bits */
struct device shost_gendev, shost_dev;
atomic_t target_count;
wait_queue_head_t targets_wq;
/*
* Points to the transport data (if any) which is allocated
* separately
......
......@@ -665,9 +665,9 @@ struct se_dev_entry {
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
struct kref pr_kref;
struct completion pr_comp;
struct se_lun_acl __rcu *se_lun_acl;
struct se_lun_acl *se_lun_acl;
spinlock_t ua_lock;
struct se_lun __rcu *se_lun;
struct se_lun *se_lun;
#define DEF_PR_REG_ACTIVE 1
unsigned long deve_flags;
struct list_head alua_port_list;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment