Commit ace48471 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
  IPoIB: P_Key change event handling
  IB/mthca: Fix modify QP error path
  IPoIB: Fix network interface "RUNNING" status
  IB/mthca: Fix indentation
  IB/mthca: Fix uninitialized variable in mthca_alloc_qp()
  IB/mthca: Check SRQ limit in modify SRQ operation
  IB/mthca: Check that SRQ WQE size does not exceed device's max value
  IB/mthca: Check that sgid_index and path_mtu are valid in modify_qp
  IB/srp: Use a fake scatterlist for non-SG SCSI commands
  IPoIB: Pass correct pointer when flushing child interfaces
parents 0c505273 7a343d4c
...@@ -483,13 +483,20 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m ...@@ -483,13 +483,20 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
return err; return err;
} }
static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path) static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
struct mthca_qp_path *path)
{ {
path->g_mylmc = ah->src_path_bits & 0x7f; path->g_mylmc = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid); path->rlid = cpu_to_be16(ah->dlid);
path->static_rate = !!ah->static_rate; path->static_rate = !!ah->static_rate;
if (ah->ah_flags & IB_AH_GRH) { if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
ah->grh.sgid_index, dev->limits.gid_table_len-1);
return -1;
}
path->g_mylmc |= 1 << 7; path->g_mylmc |= 1 << 7;
path->mgid_index = ah->grh.sgid_index; path->mgid_index = ah->grh.sgid_index;
path->hop_limit = ah->grh.hop_limit; path->hop_limit = ah->grh.hop_limit;
...@@ -500,6 +507,8 @@ static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path) ...@@ -500,6 +507,8 @@ static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
memcpy(path->rgid, ah->grh.dgid.raw, 16); memcpy(path->rgid, ah->grh.dgid.raw, 16);
} else } else
path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
return 0;
} }
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
...@@ -592,8 +601,14 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -592,8 +601,14 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
if (qp->transport == MLX || qp->transport == UD) if (qp->transport == MLX || qp->transport == UD)
qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
else if (attr_mask & IB_QP_PATH_MTU) else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
mthca_dbg(dev, "path MTU (%u) is invalid\n",
attr->path_mtu);
return -EINVAL;
}
qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
}
if (mthca_is_memfree(dev)) { if (mthca_is_memfree(dev)) {
if (qp->rq.max) if (qp->rq.max)
...@@ -642,7 +657,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -642,7 +657,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
} }
if (attr_mask & IB_QP_AV) { if (attr_mask & IB_QP_AV) {
mthca_path_set(&attr->ah_attr, &qp_context->pri_path); if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path))
return -EINVAL;
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
} }
...@@ -664,7 +681,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -664,7 +681,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
return -EINVAL; return -EINVAL;
} }
mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path); if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path))
return -EINVAL;
qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
attr->alt_port_num << 24); attr->alt_port_num << 24);
qp_context->alt_path.ackto = attr->alt_timeout << 3; qp_context->alt_path.ackto = attr->alt_timeout << 3;
...@@ -758,21 +777,20 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -758,21 +777,20 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
mailbox, sqd_event, &status); mailbox, sqd_event, &status);
if (err)
goto out;
if (status) { if (status) {
mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
cur_state, new_state, status); cur_state, new_state, status);
err = -EINVAL; err = -EINVAL;
goto out;
} }
if (!err) {
qp->state = new_state; qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS) if (attr_mask & IB_QP_ACCESS_FLAGS)
qp->atomic_rd_en = attr->qp_access_flags; qp->atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
qp->resp_depth = attr->max_dest_rd_atomic; qp->resp_depth = attr->max_dest_rd_atomic;
}
mthca_free_mailbox(dev, mailbox);
if (is_sqp(dev, qp)) if (is_sqp(dev, qp))
store_attrs(to_msqp(qp), attr, attr_mask); store_attrs(to_msqp(qp), attr, attr_mask);
...@@ -797,7 +815,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -797,7 +815,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
* If we moved a kernel QP to RESET, clean up all old CQ * If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP. * entries and reinitialize the QP.
*/ */
if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) { if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq) if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
...@@ -816,6 +834,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -816,6 +834,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
} }
} }
out:
mthca_free_mailbox(dev, mailbox);
return err; return err;
} }
...@@ -1177,10 +1197,6 @@ int mthca_alloc_qp(struct mthca_dev *dev, ...@@ -1177,10 +1197,6 @@ int mthca_alloc_qp(struct mthca_dev *dev,
{ {
int err; int err;
err = mthca_set_qp_size(dev, cap, pd, qp);
if (err)
return err;
switch (type) { switch (type) {
case IB_QPT_RC: qp->transport = RC; break; case IB_QPT_RC: qp->transport = RC; break;
case IB_QPT_UC: qp->transport = UC; break; case IB_QPT_UC: qp->transport = UC; break;
...@@ -1188,6 +1204,10 @@ int mthca_alloc_qp(struct mthca_dev *dev, ...@@ -1188,6 +1204,10 @@ int mthca_alloc_qp(struct mthca_dev *dev,
default: return -EINVAL; default: return -EINVAL;
} }
err = mthca_set_qp_size(dev, cap, pd, qp);
if (err)
return err;
qp->qpn = mthca_alloc(&dev->qp_table.alloc); qp->qpn = mthca_alloc(&dev->qp_table.alloc);
if (qp->qpn == -1) if (qp->qpn == -1)
return -ENOMEM; return -ENOMEM;
...@@ -1220,6 +1240,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev, ...@@ -1220,6 +1240,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
int err; int err;
sqp->qp.transport = MLX;
err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
if (err) if (err)
return err; return err;
......
...@@ -205,6 +205,10 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, ...@@ -205,6 +205,10 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
ds = max(64UL, ds = max(64UL,
roundup_pow_of_two(sizeof (struct mthca_next_seg) + roundup_pow_of_two(sizeof (struct mthca_next_seg) +
srq->max_gs * sizeof (struct mthca_data_seg))); srq->max_gs * sizeof (struct mthca_data_seg)));
if (ds > dev->limits.max_desc_sz)
return -EINVAL;
srq->wqe_shift = long_log2(ds); srq->wqe_shift = long_log2(ds);
srq->srqn = mthca_alloc(&dev->srq_table.alloc); srq->srqn = mthca_alloc(&dev->srq_table.alloc);
...@@ -354,6 +358,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, ...@@ -354,6 +358,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL; return -EINVAL;
if (attr_mask & IB_SRQ_LIMIT) { if (attr_mask & IB_SRQ_LIMIT) {
if (attr->srq_limit > srq->max)
return -EINVAL;
ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
if (ret) if (ret)
return ret; return ret;
......
...@@ -72,13 +72,14 @@ enum { ...@@ -72,13 +72,14 @@ enum {
IPOIB_MAX_MCAST_QUEUE = 3, IPOIB_MAX_MCAST_QUEUE = 3,
IPOIB_FLAG_OPER_UP = 0, IPOIB_FLAG_OPER_UP = 0,
IPOIB_FLAG_ADMIN_UP = 1, IPOIB_FLAG_INITIALIZED = 1,
IPOIB_PKEY_ASSIGNED = 2, IPOIB_FLAG_ADMIN_UP = 2,
IPOIB_PKEY_STOP = 3, IPOIB_PKEY_ASSIGNED = 3,
IPOIB_FLAG_SUBINTERFACE = 4, IPOIB_PKEY_STOP = 4,
IPOIB_MCAST_RUN = 5, IPOIB_FLAG_SUBINTERFACE = 5,
IPOIB_STOP_REAPER = 6, IPOIB_MCAST_RUN = 6,
IPOIB_MCAST_STARTED = 7, IPOIB_STOP_REAPER = 7,
IPOIB_MCAST_STARTED = 8,
IPOIB_MAX_BACKOFF_SECONDS = 16, IPOIB_MAX_BACKOFF_SECONDS = 16,
......
...@@ -423,13 +423,33 @@ int ipoib_ib_dev_open(struct net_device *dev) ...@@ -423,13 +423,33 @@ int ipoib_ib_dev_open(struct net_device *dev)
clear_bit(IPOIB_STOP_REAPER, &priv->flags); clear_bit(IPOIB_STOP_REAPER, &priv->flags);
queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
return 0; return 0;
} }
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
u16 pkey_index = 0;
if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
else
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
}
int ipoib_ib_dev_up(struct net_device *dev) int ipoib_ib_dev_up(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
ipoib_pkey_dev_check_presence(dev);
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
ipoib_dbg(priv, "PKEY is not assigned.\n");
return 0;
}
set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
return ipoib_mcast_start_thread(dev); return ipoib_mcast_start_thread(dev);
...@@ -483,6 +503,8 @@ int ipoib_ib_dev_stop(struct net_device *dev) ...@@ -483,6 +503,8 @@ int ipoib_ib_dev_stop(struct net_device *dev)
struct ipoib_tx_buf *tx_req; struct ipoib_tx_buf *tx_req;
int i; int i;
clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
/* /*
* Move our QP to the error state and then reinitialize in * Move our QP to the error state and then reinitialize in
* when all work requests have completed or have been flushed. * when all work requests have completed or have been flushed.
...@@ -587,8 +609,15 @@ void ipoib_ib_dev_flush(void *_dev) ...@@ -587,8 +609,15 @@ void ipoib_ib_dev_flush(void *_dev)
struct net_device *dev = (struct net_device *)_dev; struct net_device *dev = (struct net_device *)_dev;
struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return; return;
}
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
return;
}
ipoib_dbg(priv, "flushing\n"); ipoib_dbg(priv, "flushing\n");
...@@ -605,7 +634,7 @@ void ipoib_ib_dev_flush(void *_dev) ...@@ -605,7 +634,7 @@ void ipoib_ib_dev_flush(void *_dev)
/* Flush any child interfaces too */ /* Flush any child interfaces too */
list_for_each_entry(cpriv, &priv->child_intfs, list) list_for_each_entry(cpriv, &priv->child_intfs, list)
ipoib_ib_dev_flush(&cpriv->dev); ipoib_ib_dev_flush(cpriv->dev);
mutex_unlock(&priv->vlan_mutex); mutex_unlock(&priv->vlan_mutex);
} }
...@@ -632,17 +661,6 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) ...@@ -632,17 +661,6 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
* change async notification is available. * change async notification is available.
*/ */
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
u16 pkey_index = 0;
if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
else
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
}
void ipoib_pkey_poll(void *dev_ptr) void ipoib_pkey_poll(void *dev_ptr)
{ {
struct net_device *dev = dev_ptr; struct net_device *dev = dev_ptr;
......
...@@ -736,6 +736,11 @@ static void ipoib_set_mcast_list(struct net_device *dev) ...@@ -736,6 +736,11 @@ static void ipoib_set_mcast_list(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
return;
}
queue_work(ipoib_workqueue, &priv->restart_task); queue_work(ipoib_workqueue, &priv->restart_task);
} }
......
...@@ -251,10 +251,12 @@ void ipoib_event(struct ib_event_handler *handler, ...@@ -251,10 +251,12 @@ void ipoib_event(struct ib_event_handler *handler,
struct ipoib_dev_priv *priv = struct ipoib_dev_priv *priv =
container_of(handler, struct ipoib_dev_priv, event_handler); container_of(handler, struct ipoib_dev_priv, event_handler);
if (record->event == IB_EVENT_PORT_ACTIVE || if (record->event == IB_EVENT_PORT_ERR ||
record->event == IB_EVENT_PKEY_CHANGE ||
record->event == IB_EVENT_PORT_ACTIVE ||
record->event == IB_EVENT_LID_CHANGE || record->event == IB_EVENT_LID_CHANGE ||
record->event == IB_EVENT_SM_CHANGE) { record->event == IB_EVENT_SM_CHANGE) {
ipoib_dbg(priv, "Port active event\n"); ipoib_dbg(priv, "Port state change event\n");
queue_work(ipoib_workqueue, &priv->flush_task); queue_work(ipoib_workqueue, &priv->flush_task);
} }
} }
...@@ -503,8 +503,10 @@ static int srp_reconnect_target(struct srp_target_port *target) ...@@ -503,8 +503,10 @@ static int srp_reconnect_target(struct srp_target_port *target)
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_request *req) struct srp_request *req)
{ {
struct scatterlist *scat;
struct srp_cmd *cmd = req->cmd->buf; struct srp_cmd *cmd = req->cmd->buf;
int len; int len, nents, count;
int i;
u8 fmt; u8 fmt;
if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
...@@ -517,15 +519,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -517,15 +519,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return -EINVAL; return -EINVAL;
} }
if (scmnd->use_sg) { /*
struct scatterlist *scat = scmnd->request_buffer; * This handling of non-SG commands can be killed when the
int n; * SCSI midlayer no longer generates non-SG commands.
int i; */
if (likely(scmnd->use_sg)) {
nents = scmnd->use_sg;
scat = scmnd->request_buffer;
} else {
nents = 1;
scat = &req->fake_sg;
sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
}
n = dma_map_sg(target->srp_host->dev->dma_device, count = dma_map_sg(target->srp_host->dev->dma_device, scat, nents,
scat, scmnd->use_sg, scmnd->sc_data_direction); scmnd->sc_data_direction);
if (n == 1) { if (count == 1) {
struct srp_direct_buf *buf = (void *) cmd->add_data; struct srp_direct_buf *buf = (void *) cmd->add_data;
fmt = SRP_DATA_DESC_DIRECT; fmt = SRP_DATA_DESC_DIRECT;
...@@ -543,9 +553,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -543,9 +553,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
fmt = SRP_DATA_DESC_INDIRECT; fmt = SRP_DATA_DESC_INDIRECT;
if (scmnd->sc_data_direction == DMA_TO_DEVICE) if (scmnd->sc_data_direction == DMA_TO_DEVICE)
cmd->data_out_desc_cnt = n; cmd->data_out_desc_cnt = count;
else else
cmd->data_in_desc_cnt = n; cmd->data_in_desc_cnt = count;
buf->table_desc.va = cpu_to_be64(req->cmd->dma + buf->table_desc.va = cpu_to_be64(req->cmd->dma +
sizeof *cmd + sizeof *cmd +
...@@ -553,9 +563,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -553,9 +563,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
buf->table_desc.key = buf->table_desc.key =
cpu_to_be32(target->srp_host->mr->rkey); cpu_to_be32(target->srp_host->mr->rkey);
buf->table_desc.len = buf->table_desc.len =
cpu_to_be32(n * sizeof (struct srp_direct_buf)); cpu_to_be32(count * sizeof (struct srp_direct_buf));
for (i = 0; i < n; ++i) { for (i = 0; i < count; ++i) {
buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i]));
buf->desc_list[i].key = buf->desc_list[i].key =
cpu_to_be32(target->srp_host->mr->rkey); cpu_to_be32(target->srp_host->mr->rkey);
...@@ -568,31 +578,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -568,31 +578,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
len = sizeof (struct srp_cmd) + len = sizeof (struct srp_cmd) +
sizeof (struct srp_indirect_buf) + sizeof (struct srp_indirect_buf) +
n * sizeof (struct srp_direct_buf); count * sizeof (struct srp_direct_buf);
}
} else {
struct srp_direct_buf *buf = (void *) cmd->add_data;
dma_addr_t dma;
dma = dma_map_single(target->srp_host->dev->dma_device,
scmnd->request_buffer, scmnd->request_bufflen,
scmnd->sc_data_direction);
if (dma_mapping_error(dma)) {
printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n",
scmnd->request_buffer, (int) scmnd->request_bufflen,
scmnd->sc_data_direction);
return -EINVAL;
}
pci_unmap_addr_set(req, direct_mapping, dma);
buf->va = cpu_to_be64(dma);
buf->key = cpu_to_be32(target->srp_host->mr->rkey);
buf->len = cpu_to_be32(scmnd->request_bufflen);
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
} }
if (scmnd->sc_data_direction == DMA_TO_DEVICE) if (scmnd->sc_data_direction == DMA_TO_DEVICE)
...@@ -600,7 +586,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -600,7 +586,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
else else
cmd->buf_fmt = fmt; cmd->buf_fmt = fmt;
return len; return len;
} }
...@@ -608,19 +593,27 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, ...@@ -608,19 +593,27 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
struct srp_target_port *target, struct srp_target_port *target,
struct srp_request *req) struct srp_request *req)
{ {
struct scatterlist *scat;
int nents;
if (!scmnd->request_buffer || if (!scmnd->request_buffer ||
(scmnd->sc_data_direction != DMA_TO_DEVICE && (scmnd->sc_data_direction != DMA_TO_DEVICE &&
scmnd->sc_data_direction != DMA_FROM_DEVICE)) scmnd->sc_data_direction != DMA_FROM_DEVICE))
return; return;
if (scmnd->use_sg) /*
dma_unmap_sg(target->srp_host->dev->dma_device, * This handling of non-SG commands can be killed when the
(struct scatterlist *) scmnd->request_buffer, * SCSI midlayer no longer generates non-SG commands.
scmnd->use_sg, scmnd->sc_data_direction); */
else if (likely(scmnd->use_sg)) {
dma_unmap_single(target->srp_host->dev->dma_device, nents = scmnd->use_sg;
pci_unmap_addr(req, direct_mapping), scat = (struct scatterlist *) scmnd->request_buffer;
scmnd->request_bufflen, } else {
nents = 1;
scat = (struct scatterlist *) scmnd->request_buffer;
}
dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
scmnd->sc_data_direction); scmnd->sc_data_direction);
} }
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
...@@ -94,7 +95,11 @@ struct srp_request { ...@@ -94,7 +95,11 @@ struct srp_request {
struct scsi_cmnd *scmnd; struct scsi_cmnd *scmnd;
struct srp_iu *cmd; struct srp_iu *cmd;
struct srp_iu *tsk_mgmt; struct srp_iu *tsk_mgmt;
DECLARE_PCI_UNMAP_ADDR(direct_mapping) /*
* Fake scatterlist used when scmnd->use_sg==0. Can be killed
* when the SCSI midlayer no longer generates non-SG commands.
*/
struct scatterlist fake_sg;
struct completion done; struct completion done;
short next; short next;
u8 cmd_done; u8 cmd_done;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment