Commit 702256e6 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "The bulk of the series are bugfixes for qla2xxx target NPIV support
  that went in for v3.14-rc1.  Also included are a few DIF related
  fixes, a qla2xxx fix (Cc'ed to stable) from Greg W., and vhost/scsi
  protocol version related fix from Venkatesh.

  Also just a heads up that a series to address a number of issues with
  iser-target active I/O reset/shutdown is still being tested, and will
  be included in a separate -rc6 PULL request"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  vhost/scsi: Check LUN structure byte 0 is set to 1, per spec
  qla2xxx: Fix kernel panic on selective retransmission request
  Target/sbc: Don't use sg as iterator in sbc_verify_read
  target: Add DIF sense codes in transport_generic_request_failure
  target/sbc: Fix sbc_dif_copy_prot addr offset bug
  tcm_qla2xxx: Fix NAA formatted name for NPIV WWPNs
  tcm_qla2xxx: Perform configfs depend/undepend for base_tpg
  tcm_qla2xxx: Add NPIV specific enable/disable attribute logic
  qla2xxx: Check + fail when npiv_vports_inuse exists in shutdown
  qla2xxx: Fix qlt_lport_register base_vha callback race
parents 3750c140 7fe412d0
...@@ -790,17 +790,32 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt) ...@@ -790,17 +790,32 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
} }
/* Called by tcm_qla2xxx configfs code */ /* Called by tcm_qla2xxx configfs code */
void qlt_stop_phase1(struct qla_tgt *tgt) int qlt_stop_phase1(struct qla_tgt *tgt)
{ {
struct scsi_qla_host *vha = tgt->vha; struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = tgt->ha; struct qla_hw_data *ha = tgt->ha;
unsigned long flags; unsigned long flags;
mutex_lock(&qla_tgt_mutex);
if (!vha->fc_vport) {
struct Scsi_Host *sh = vha->host;
struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
bool npiv_vports;
spin_lock_irqsave(sh->host_lock, flags);
npiv_vports = (fc_host->npiv_vports_inuse);
spin_unlock_irqrestore(sh->host_lock, flags);
if (npiv_vports) {
mutex_unlock(&qla_tgt_mutex);
return -EPERM;
}
}
if (tgt->tgt_stop || tgt->tgt_stopped) { if (tgt->tgt_stop || tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
"Already in tgt->tgt_stop or tgt_stopped state\n"); "Already in tgt->tgt_stop or tgt_stopped state\n");
dump_stack(); mutex_unlock(&qla_tgt_mutex);
return; return -EPERM;
} }
ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
...@@ -815,6 +830,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt) ...@@ -815,6 +830,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
qlt_clear_tgt_db(tgt, true); qlt_clear_tgt_db(tgt, true);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&vha->vha_tgt.tgt_mutex); mutex_unlock(&vha->vha_tgt.tgt_mutex);
mutex_unlock(&qla_tgt_mutex);
flush_delayed_work(&tgt->sess_del_work); flush_delayed_work(&tgt->sess_del_work);
...@@ -841,6 +857,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt) ...@@ -841,6 +857,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
/* Wait for sessions to clear out (just in case) */ /* Wait for sessions to clear out (just in case) */
wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
return 0;
} }
EXPORT_SYMBOL(qlt_stop_phase1); EXPORT_SYMBOL(qlt_stop_phase1);
...@@ -3185,7 +3202,8 @@ static void qlt_handle_srr_work(struct work_struct *work) ...@@ -3185,7 +3202,8 @@ static void qlt_handle_srr_work(struct work_struct *work)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
"SRR cmd %p (se_cmd %p, tag %d, op %x), " "SRR cmd %p (se_cmd %p, tag %d, op %x), "
"sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
cmd->sg_cnt, cmd->offset);
qlt_handle_srr(vha, sctio, imm); qlt_handle_srr(vha, sctio, imm);
...@@ -4181,6 +4199,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) ...@@ -4181,6 +4199,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
if (base_vha->fc_vport)
return 0;
mutex_lock(&qla_tgt_mutex); mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
mutex_unlock(&qla_tgt_mutex); mutex_unlock(&qla_tgt_mutex);
...@@ -4194,6 +4215,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) ...@@ -4194,6 +4215,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
if (!vha->vha_tgt.qla_tgt) if (!vha->vha_tgt.qla_tgt)
return 0; return 0;
if (vha->fc_vport) {
qlt_release(vha->vha_tgt.qla_tgt);
return 0;
}
mutex_lock(&qla_tgt_mutex); mutex_lock(&qla_tgt_mutex);
list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
mutex_unlock(&qla_tgt_mutex); mutex_unlock(&qla_tgt_mutex);
...@@ -4265,6 +4290,12 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, ...@@ -4265,6 +4290,12 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
continue; continue;
} }
if (tgt->tgt_stop) {
pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
host->host_no);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
continue;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!scsi_host_get(host)) { if (!scsi_host_get(host)) {
...@@ -4279,12 +4310,11 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, ...@@ -4279,12 +4310,11 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
scsi_host_put(host); scsi_host_put(host);
continue; continue;
} }
mutex_unlock(&qla_tgt_mutex);
rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
if (rc != 0) if (rc != 0)
scsi_host_put(host); scsi_host_put(host);
mutex_unlock(&qla_tgt_mutex);
return rc; return rc;
} }
mutex_unlock(&qla_tgt_mutex); mutex_unlock(&qla_tgt_mutex);
......
...@@ -1001,7 +1001,7 @@ extern void qlt_modify_vp_config(struct scsi_qla_host *, ...@@ -1001,7 +1001,7 @@ extern void qlt_modify_vp_config(struct scsi_qla_host *,
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *); extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
extern int qlt_mem_alloc(struct qla_hw_data *); extern int qlt_mem_alloc(struct qla_hw_data *);
extern void qlt_mem_free(struct qla_hw_data *); extern void qlt_mem_free(struct qla_hw_data *);
extern void qlt_stop_phase1(struct qla_tgt *); extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *); extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *); extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *); extern void qlt_83xx_iospace_config(struct qla_hw_data *);
......
...@@ -182,20 +182,6 @@ static int tcm_qla2xxx_npiv_parse_wwn( ...@@ -182,20 +182,6 @@ static int tcm_qla2xxx_npiv_parse_wwn(
return 0; return 0;
} }
static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
u64 wwpn, u64 wwnn)
{
u8 b[8], b2[8];
put_unaligned_be64(wwpn, b);
put_unaligned_be64(wwnn, b2);
return snprintf(buf, len,
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
}
static char *tcm_qla2xxx_npiv_get_fabric_name(void) static char *tcm_qla2xxx_npiv_get_fabric_name(void)
{ {
return "qla2xxx_npiv"; return "qla2xxx_npiv";
...@@ -227,15 +213,6 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) ...@@ -227,15 +213,6 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
return lport->lport_naa_name; return lport->lport_naa_name;
} }
static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
return &lport->lport_npiv_name[0];
}
static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
{ {
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
...@@ -941,15 +918,41 @@ static ssize_t tcm_qla2xxx_tpg_show_enable( ...@@ -941,15 +918,41 @@ static ssize_t tcm_qla2xxx_tpg_show_enable(
atomic_read(&tpg->lport_tpg_enabled)); atomic_read(&tpg->lport_tpg_enabled));
} }
static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
{
struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
struct tcm_qla2xxx_tpg, tpg_base_work);
struct se_portal_group *se_tpg = &base_tpg->se_tpg;
struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
&se_tpg->tpg_group.cg_item)) {
atomic_set(&base_tpg->lport_tpg_enabled, 1);
qlt_enable_vha(base_vha);
}
complete(&base_tpg->tpg_base_comp);
}
static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
{
struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
struct tcm_qla2xxx_tpg, tpg_base_work);
struct se_portal_group *se_tpg = &base_tpg->se_tpg;
struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
atomic_set(&base_tpg->lport_tpg_enabled, 0);
configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
&se_tpg->tpg_group.cg_item);
}
complete(&base_tpg->tpg_base_comp);
}
static ssize_t tcm_qla2xxx_tpg_store_enable( static ssize_t tcm_qla2xxx_tpg_store_enable(
struct se_portal_group *se_tpg, struct se_portal_group *se_tpg,
const char *page, const char *page,
size_t count) size_t count)
{ {
struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg); struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op; unsigned long op;
...@@ -964,19 +967,28 @@ static ssize_t tcm_qla2xxx_tpg_store_enable( ...@@ -964,19 +967,28 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
pr_err("Illegal value for tpg_enable: %lu\n", op); pr_err("Illegal value for tpg_enable: %lu\n", op);
return -EINVAL; return -EINVAL;
} }
if (op) { if (op) {
atomic_set(&tpg->lport_tpg_enabled, 1); if (atomic_read(&tpg->lport_tpg_enabled))
qlt_enable_vha(vha); return -EEXIST;
INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
} else { } else {
if (!vha->vha_tgt.qla_tgt) { if (!atomic_read(&tpg->lport_tpg_enabled))
pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n"); return count;
return -ENODEV;
} INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
} }
init_completion(&tpg->tpg_base_comp);
schedule_work(&tpg->tpg_base_work);
wait_for_completion(&tpg->tpg_base_comp);
if (op) {
if (!atomic_read(&tpg->lport_tpg_enabled))
return -ENODEV;
} else {
if (atomic_read(&tpg->lport_tpg_enabled))
return -EPERM;
}
return count; return count;
} }
...@@ -1053,11 +1065,64 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) ...@@ -1053,11 +1065,64 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
/* /*
* Clear local TPG=1 pointer for non NPIV mode. * Clear local TPG=1 pointer for non NPIV mode.
*/ */
lport->tpg_1 = NULL; lport->tpg_1 = NULL;
kfree(tpg); kfree(tpg);
} }
static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
struct se_portal_group *se_tpg,
char *page)
{
return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
}
static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op;
int rc;
rc = kstrtoul(page, 0, &op);
if (rc < 0) {
pr_err("kstrtoul() returned %d\n", rc);
return -EINVAL;
}
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %lu\n", op);
return -EINVAL;
}
if (op) {
if (atomic_read(&tpg->lport_tpg_enabled))
return -EEXIST;
atomic_set(&tpg->lport_tpg_enabled, 1);
qlt_enable_vha(vha);
} else {
if (!atomic_read(&tpg->lport_tpg_enabled))
return count;
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
}
return count;
}
TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
&tcm_qla2xxx_npiv_tpg_enable.attr,
NULL,
};
static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
struct se_wwn *wwn, struct se_wwn *wwn,
struct config_group *group, struct config_group *group,
...@@ -1650,6 +1715,9 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, ...@@ -1650,6 +1715,9 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
struct scsi_qla_host *npiv_vha; struct scsi_qla_host *npiv_vha;
struct tcm_qla2xxx_lport *lport = struct tcm_qla2xxx_lport *lport =
(struct tcm_qla2xxx_lport *)target_lport_ptr; (struct tcm_qla2xxx_lport *)target_lport_ptr;
struct tcm_qla2xxx_lport *base_lport =
(struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
struct tcm_qla2xxx_tpg *base_tpg;
struct fc_vport_identifiers vport_id; struct fc_vport_identifiers vport_id;
if (!qla_tgt_mode_enabled(base_vha)) { if (!qla_tgt_mode_enabled(base_vha)) {
...@@ -1657,6 +1725,13 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, ...@@ -1657,6 +1725,13 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
return -EPERM; return -EPERM;
} }
if (!base_lport || !base_lport->tpg_1 ||
!atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
pr_err("qla2xxx base_lport or tpg_1 not available\n");
return -EPERM;
}
base_tpg = base_lport->tpg_1;
memset(&vport_id, 0, sizeof(vport_id)); memset(&vport_id, 0, sizeof(vport_id));
vport_id.port_name = npiv_wwpn; vport_id.port_name = npiv_wwpn;
vport_id.node_name = npiv_wwnn; vport_id.node_name = npiv_wwnn;
...@@ -1675,7 +1750,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, ...@@ -1675,7 +1750,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
npiv_vha = (struct scsi_qla_host *)vport->dd_data; npiv_vha = (struct scsi_qla_host *)vport->dd_data;
npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
lport->qla_vha = npiv_vha; lport->qla_vha = npiv_vha;
scsi_host_get(npiv_vha->host); scsi_host_get(npiv_vha->host);
return 0; return 0;
} }
...@@ -1714,8 +1788,6 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport( ...@@ -1714,8 +1788,6 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
} }
lport->lport_npiv_wwpn = npiv_wwpn; lport->lport_npiv_wwpn = npiv_wwpn;
lport->lport_npiv_wwnn = npiv_wwnn; lport->lport_npiv_wwnn = npiv_wwnn;
tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
ret = tcm_qla2xxx_init_lport(lport); ret = tcm_qla2xxx_init_lport(lport);
...@@ -1824,7 +1896,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { ...@@ -1824,7 +1896,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
.tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn, .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag, .tpg_get_tag = tcm_qla2xxx_get_tag,
.tpg_get_default_depth = tcm_qla2xxx_get_default_depth, .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
.tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
...@@ -1935,7 +2007,7 @@ static int tcm_qla2xxx_register_configfs(void) ...@@ -1935,7 +2007,7 @@ static int tcm_qla2xxx_register_configfs(void)
*/ */
npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
tcm_qla2xxx_tpg_attrs; tcm_qla2xxx_npiv_tpg_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
#define TCM_QLA2XXX_VERSION "v0.1" #define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */ /* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32 #define TCM_QLA2XXX_NAMELEN 32
/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
#define TCM_QLA2XXX_NPIV_NAMELEN 66
#include "qla_target.h" #include "qla_target.h"
...@@ -43,6 +41,9 @@ struct tcm_qla2xxx_tpg { ...@@ -43,6 +41,9 @@ struct tcm_qla2xxx_tpg {
struct tcm_qla2xxx_tpg_attrib tpg_attrib; struct tcm_qla2xxx_tpg_attrib tpg_attrib;
/* Returned by tcm_qla2xxx_make_tpg() */ /* Returned by tcm_qla2xxx_make_tpg() */
struct se_portal_group se_tpg; struct se_portal_group se_tpg;
/* Items for dealing with configfs_depend_item */
struct completion tpg_base_comp;
struct work_struct tpg_base_work;
}; };
struct tcm_qla2xxx_fc_loopid { struct tcm_qla2xxx_fc_loopid {
...@@ -62,8 +63,6 @@ struct tcm_qla2xxx_lport { ...@@ -62,8 +63,6 @@ struct tcm_qla2xxx_lport {
char lport_name[TCM_QLA2XXX_NAMELEN]; char lport_name[TCM_QLA2XXX_NAMELEN];
/* ASCII formatted naa WWPN for VPD page 83 etc */ /* ASCII formatted naa WWPN for VPD page 83 etc */
char lport_naa_name[TCM_QLA2XXX_NAMELEN]; char lport_naa_name[TCM_QLA2XXX_NAMELEN];
/* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
/* map for fc_port pointers in 24-bit FC Port ID space */ /* map for fc_port pointers in 24-bit FC Port ID space */
struct btree_head32 lport_fcport_map; struct btree_head32 lport_fcport_map;
/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */ /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
......
...@@ -1074,7 +1074,7 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, ...@@ -1074,7 +1074,7 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
struct scatterlist *psg; struct scatterlist *psg;
void *paddr, *addr; void *paddr, *addr;
unsigned int i, len, left; unsigned int i, len, left;
unsigned int offset = 0; unsigned int offset = sg_off;
left = sectors * dev->prot_length; left = sectors * dev->prot_length;
...@@ -1084,11 +1084,10 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, ...@@ -1084,11 +1084,10 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
if (offset >= sg->length) { if (offset >= sg->length) {
sg = sg_next(sg); sg = sg_next(sg);
offset = 0; offset = 0;
sg_off = sg->offset;
} }
paddr = kmap_atomic(sg_page(psg)) + psg->offset; paddr = kmap_atomic(sg_page(psg)) + psg->offset;
addr = kmap_atomic(sg_page(sg)) + sg_off; addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
if (read) if (read)
memcpy(paddr, addr, len); memcpy(paddr, addr, len);
...@@ -1163,7 +1162,7 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, ...@@ -1163,7 +1162,7 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_dif_v1_tuple *sdt; struct se_dif_v1_tuple *sdt;
struct scatterlist *dsg; struct scatterlist *dsg, *psg = sg;
sector_t sector = start; sector_t sector = start;
void *daddr, *paddr; void *daddr, *paddr;
int i, j, offset = sg_off; int i, j, offset = sg_off;
...@@ -1171,14 +1170,14 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, ...@@ -1171,14 +1170,14 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
paddr = kmap_atomic(sg_page(sg)) + sg->offset; paddr = kmap_atomic(sg_page(psg)) + sg->offset;
for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
if (offset >= sg->length) { if (offset >= psg->length) {
kunmap_atomic(paddr); kunmap_atomic(paddr);
sg = sg_next(sg); psg = sg_next(psg);
paddr = kmap_atomic(sg_page(sg)) + sg->offset; paddr = kmap_atomic(sg_page(psg)) + psg->offset;
offset = 0; offset = 0;
} }
......
...@@ -1601,6 +1601,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, ...@@ -1601,6 +1601,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_CHECK_CONDITION_ABORT_CMD: case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION: case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY: case TCM_CHECK_CONDITION_NOT_READY:
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
break; break;
case TCM_OUT_OF_RESOURCES: case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
......
...@@ -1001,6 +1001,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) ...@@ -1001,6 +1001,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
break; break;
} }
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
if (unlikely(v_req.lun[0] != 1)) {
vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
}
/* Extract the tpgt */ /* Extract the tpgt */
target = v_req.lun[1]; target = v_req.lun[1];
tpg = ACCESS_ONCE(vs_tpg[target]); tpg = ACCESS_ONCE(vs_tpg[target]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment