Commit a46171d0 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "Here are the target-pending fixes queued for v3.18-rc6.

  The highlights include:

   - target-core OOPs fix with tcm_qla2xxx + vxworks FC initiators +
     zero length SCSI commands having a transfer direction set.  (Roland
     + Craig Watson)

   - vhost-scsi OOPs fix to explicitly prevent WWPN endpoint configfs
     group removal while qemu still has an active reference.  (Paolo +
     nab)

   - ib_srpt fix for RDMA hardware with lower srp_sq_size limits.
     (Bart)

   - two ib_isert work-arounds for running on ocrdma hardware (Or + Sagi
     + Chris)

   - iscsi-target discovery portal typo + SPC-3 PR Preempt SA key
     matching fix (Steve)"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  IB/isert: Adjust CQ size to HW limits
  target: return CONFLICT only when SA key unmatched
  iser-target: Handle DEVICE_REMOVAL event on network portal listener correctly
  ib_isert: Add max_send_sge=2 minimum for control PDU responses
  srp-target: Retry when QP creation fails with ENOMEM
  iscsi-target: return the correct port in SendTargets
  vhost-scsi: Take configfs group dependency during VHOST_SCSI_SET_ENDPOINT
  target: Don't call TFO->write_pending if data_length == 0
parents 4ec69c7e b1a5ad00
...@@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, ...@@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
/* /*
* FIXME: Use devattr.max_sge - 2 for max_send_sge as * FIXME: Use devattr.max_sge - 2 for max_send_sge as
* work-around for RDMA_READ.. * work-around for RDMA_READs with ConnectX-2.
*
* Also, still make sure to have at least two SGEs for
* outgoing control PDU responses.
*/ */
attr.cap.max_send_sge = device->dev_attr.max_sge - 2; attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
isert_conn->max_sge = attr.cap.max_send_sge; isert_conn->max_sge = attr.cap.max_send_sge;
attr.cap.max_recv_sge = 1; attr.cap.max_recv_sge = 1;
...@@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device) ...@@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device)
struct isert_cq_desc *cq_desc; struct isert_cq_desc *cq_desc;
struct ib_device_attr *dev_attr; struct ib_device_attr *dev_attr;
int ret = 0, i, j; int ret = 0, i, j;
int max_rx_cqe, max_tx_cqe;
dev_attr = &device->dev_attr; dev_attr = &device->dev_attr;
ret = isert_query_device(ib_dev, dev_attr); ret = isert_query_device(ib_dev, dev_attr);
if (ret) if (ret)
return ret; return ret;
max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
/* asign function handlers */ /* asign function handlers */
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
...@@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device) ...@@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device)
isert_cq_rx_callback, isert_cq_rx_callback,
isert_cq_event_callback, isert_cq_event_callback,
(void *)&cq_desc[i], (void *)&cq_desc[i],
ISER_MAX_RX_CQ_LEN, i); max_rx_cqe, i);
if (IS_ERR(device->dev_rx_cq[i])) { if (IS_ERR(device->dev_rx_cq[i])) {
ret = PTR_ERR(device->dev_rx_cq[i]); ret = PTR_ERR(device->dev_rx_cq[i]);
device->dev_rx_cq[i] = NULL; device->dev_rx_cq[i] = NULL;
...@@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device) ...@@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device)
isert_cq_tx_callback, isert_cq_tx_callback,
isert_cq_event_callback, isert_cq_event_callback,
(void *)&cq_desc[i], (void *)&cq_desc[i],
ISER_MAX_TX_CQ_LEN, i); max_tx_cqe, i);
if (IS_ERR(device->dev_tx_cq[i])) { if (IS_ERR(device->dev_tx_cq[i])) {
ret = PTR_ERR(device->dev_tx_cq[i]); ret = PTR_ERR(device->dev_tx_cq[i]);
device->dev_tx_cq[i] = NULL; device->dev_tx_cq[i] = NULL;
...@@ -803,14 +810,25 @@ isert_disconnect_work(struct work_struct *work) ...@@ -803,14 +810,25 @@ isert_disconnect_work(struct work_struct *work)
complete(&isert_conn->conn_wait); complete(&isert_conn->conn_wait);
} }
static void static int
isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
{ {
struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; struct isert_conn *isert_conn;
if (!cma_id->qp) {
struct isert_np *isert_np = cma_id->context;
isert_np->np_cm_id = NULL;
return -1;
}
isert_conn = (struct isert_conn *)cma_id->context;
isert_conn->disconnect = disconnect; isert_conn->disconnect = disconnect;
INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
schedule_work(&isert_conn->conn_logout_work); schedule_work(&isert_conn->conn_logout_work);
return 0;
} }
static int static int
...@@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) ...@@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
switch (event->event) { switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST: case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event); ret = isert_connect_request(cma_id, event);
if (ret)
pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
event->event, ret);
break; break;
case RDMA_CM_EVENT_ESTABLISHED: case RDMA_CM_EVENT_ESTABLISHED:
isert_connected_handler(cma_id); isert_connected_handler(cma_id);
...@@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) ...@@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
disconnect = true; disconnect = true;
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
isert_disconnected_handler(cma_id, disconnect); ret = isert_disconnected_handler(cma_id, disconnect);
break; break;
case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR:
default: default:
...@@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) ...@@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
break; break;
} }
if (ret != 0) {
pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
event->event, ret);
dump_stack();
}
return ret; return ret;
} }
...@@ -3190,7 +3205,8 @@ isert_free_np(struct iscsi_np *np) ...@@ -3190,7 +3205,8 @@ isert_free_np(struct iscsi_np *np)
{ {
struct isert_np *isert_np = (struct isert_np *)np->np_context; struct isert_np *isert_np = (struct isert_np *)np->np_context;
rdma_destroy_id(isert_np->np_cm_id); if (isert_np->np_cm_id)
rdma_destroy_id(isert_np->np_cm_id);
np->np_context = NULL; np->np_context = NULL;
kfree(isert_np); kfree(isert_np);
......
...@@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ...@@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
if (!qp_init) if (!qp_init)
goto out; goto out;
retry:
ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
ch->rq_size + srp_sq_size, 0); ch->rq_size + srp_sq_size, 0);
if (IS_ERR(ch->cq)) { if (IS_ERR(ch->cq)) {
...@@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ...@@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
ch->qp = ib_create_qp(sdev->pd, qp_init); ch->qp = ib_create_qp(sdev->pd, qp_init);
if (IS_ERR(ch->qp)) { if (IS_ERR(ch->qp)) {
ret = PTR_ERR(ch->qp); ret = PTR_ERR(ch->qp);
if (ret == -ENOMEM) {
srp_sq_size /= 2;
if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
ib_destroy_cq(ch->cq);
goto retry;
}
}
printk(KERN_ERR "failed to create_qp ret= %d\n", ret); printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
goto err_destroy_cq; goto err_destroy_cq;
} }
......
...@@ -3491,7 +3491,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, ...@@ -3491,7 +3491,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
len = sprintf(buf, "TargetAddress=" len = sprintf(buf, "TargetAddress="
"%s:%hu,%hu", "%s:%hu,%hu",
inaddr_any ? conn->local_ip : np->np_ip, inaddr_any ? conn->local_ip : np->np_ip,
inaddr_any ? conn->local_port : np->np_port, np->np_port,
tpg->tpgt); tpg->tpgt);
len += 1; len += 1;
......
...@@ -2738,7 +2738,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, ...@@ -2738,7 +2738,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
struct t10_reservation *pr_tmpl = &dev->t10_pr; struct t10_reservation *pr_tmpl = &dev->t10_pr;
u32 pr_res_mapped_lun = 0; u32 pr_res_mapped_lun = 0;
int all_reg = 0, calling_it_nexus = 0, released_regs = 0; int all_reg = 0, calling_it_nexus = 0;
bool sa_res_key_unmatched = sa_res_key != 0;
int prh_type = 0, prh_scope = 0; int prh_type = 0, prh_scope = 0;
if (!se_sess) if (!se_sess)
...@@ -2813,6 +2814,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, ...@@ -2813,6 +2814,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
if (!all_reg) { if (!all_reg) {
if (pr_reg->pr_res_key != sa_res_key) if (pr_reg->pr_res_key != sa_res_key)
continue; continue;
sa_res_key_unmatched = false;
calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
pr_reg_nacl = pr_reg->pr_reg_nacl; pr_reg_nacl = pr_reg->pr_reg_nacl;
...@@ -2820,7 +2822,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, ...@@ -2820,7 +2822,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
__core_scsi3_free_registration(dev, pr_reg, __core_scsi3_free_registration(dev, pr_reg,
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
NULL, calling_it_nexus); NULL, calling_it_nexus);
released_regs++;
} else { } else {
/* /*
* Case for any existing all registrants type * Case for any existing all registrants type
...@@ -2838,6 +2839,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, ...@@ -2838,6 +2839,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
if ((sa_res_key) && if ((sa_res_key) &&
(pr_reg->pr_res_key != sa_res_key)) (pr_reg->pr_res_key != sa_res_key))
continue; continue;
sa_res_key_unmatched = false;
calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
if (calling_it_nexus) if (calling_it_nexus)
...@@ -2848,7 +2850,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, ...@@ -2848,7 +2850,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
__core_scsi3_free_registration(dev, pr_reg, __core_scsi3_free_registration(dev, pr_reg,
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
NULL, 0); NULL, 0);
released_regs++;
} }
if (!calling_it_nexus) if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl, core_scsi3_ua_allocate(pr_reg_nacl,
...@@ -2863,7 +2864,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, ...@@ -2863,7 +2864,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* registered reservation key, then the device server shall * registered reservation key, then the device server shall
* complete the command with RESERVATION CONFLICT status. * complete the command with RESERVATION CONFLICT status.
*/ */
if (!released_regs) { if (sa_res_key_unmatched) {
spin_unlock(&dev->dev_reservation_lock); spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n); core_scsi3_put_pr_reg(pr_reg_n);
return TCM_RESERVATION_CONFLICT; return TCM_RESERVATION_CONFLICT;
......
...@@ -2292,7 +2292,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) ...@@ -2292,7 +2292,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
* and let it call back once the write buffers are ready. * and let it call back once the write buffers are ready.
*/ */
target_add_to_state_list(cmd); target_add_to_state_list(cmd);
if (cmd->data_direction != DMA_TO_DEVICE) { if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
target_execute_cmd(cmd); target_execute_cmd(cmd);
return 0; return 0;
} }
......
...@@ -1312,6 +1312,7 @@ static int ...@@ -1312,6 +1312,7 @@ static int
vhost_scsi_set_endpoint(struct vhost_scsi *vs, vhost_scsi_set_endpoint(struct vhost_scsi *vs,
struct vhost_scsi_target *t) struct vhost_scsi_target *t)
{ {
struct se_portal_group *se_tpg;
struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tpg; struct tcm_vhost_tpg *tpg;
struct tcm_vhost_tpg **vs_tpg; struct tcm_vhost_tpg **vs_tpg;
...@@ -1359,6 +1360,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ...@@ -1359,6 +1360,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
ret = -EEXIST; ret = -EEXIST;
goto out; goto out;
} }
/*
* In order to ensure individual vhost-scsi configfs
* groups cannot be removed while in use by vhost ioctl,
* go ahead and take an explicit se_tpg->tpg_group.cg_item
* dependency now.
*/
se_tpg = &tpg->se_tpg;
ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
&se_tpg->tpg_group.cg_item);
if (ret) {
pr_warn("configfs_depend_item() failed: %d\n", ret);
kfree(vs_tpg);
mutex_unlock(&tpg->tv_tpg_mutex);
goto out;
}
tpg->tv_tpg_vhost_count++; tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs; tpg->vhost_scsi = vs;
vs_tpg[tpg->tport_tpgt] = tpg; vs_tpg[tpg->tport_tpgt] = tpg;
...@@ -1401,6 +1417,7 @@ static int ...@@ -1401,6 +1417,7 @@ static int
vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
struct vhost_scsi_target *t) struct vhost_scsi_target *t)
{ {
struct se_portal_group *se_tpg;
struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tpg; struct tcm_vhost_tpg *tpg;
struct vhost_virtqueue *vq; struct vhost_virtqueue *vq;
...@@ -1449,6 +1466,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, ...@@ -1449,6 +1466,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
vs->vs_tpg[target] = NULL; vs->vs_tpg[target] = NULL;
match = true; match = true;
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
/*
* Release se_tpg->tpg_group.cg_item configfs dependency now
* to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
*/
se_tpg = &tpg->se_tpg;
configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
&se_tpg->tpg_group.cg_item);
} }
if (match) { if (match) {
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment