Commit f8457d58 authored by Doug Ledford's avatar Doug Ledford

Merge branch 'bart-srpt-for-next' into k.o/wip/dl-for-next

Merging in 12 patch series from Bart that required changes in the
current for-rc branch in order to apply cleanly.
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parents c4b76d8d 2d67017c
......@@ -293,7 +293,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
}
#endif
struct ib_device *__ib_device_get_by_index(u32 ifindex);
struct ib_device *ib_device_get_by_index(u32 ifindex);
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
......
......@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
return 0;
}
struct ib_device *__ib_device_get_by_index(u32 index)
static struct ib_device *__ib_device_get_by_index(u32 index)
{
struct ib_device *device;
......@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
return NULL;
}
/*
* Caller is responsible to return refrerence count by calling put_device()
*/
struct ib_device *ib_device_get_by_index(u32 index)
{
struct ib_device *device;
down_read(&lists_rwsem);
device = __ib_device_get_by_index(index);
if (device)
get_device(&device->dev);
up_read(&lists_rwsem);
return device;
}
static struct ib_device *__ib_device_get_by_name(const char *name)
{
struct ib_device *device;
......
......@@ -150,27 +150,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
device = __ib_device_get_by_index(index);
device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (!msg) {
err = -ENOMEM;
goto err;
}
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_dev_info(msg, device);
if (err) {
nlmsg_free(msg);
return err;
}
if (err)
goto err_free;
nlmsg_end(msg, nlh);
put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
err_free:
nlmsg_free(msg);
err:
put_device(&device->dev);
return err;
}
static int _nldev_get_dumpit(struct ib_device *device,
......@@ -228,31 +235,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
device = __ib_device_get_by_index(index);
device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
if (!rdma_is_port_valid(device, port))
return -EINVAL;
if (!rdma_is_port_valid(device, port)) {
err = -EINVAL;
goto err;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (!msg) {
err = -ENOMEM;
goto err;
}
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_port_info(msg, device, port);
if (err) {
nlmsg_free(msg);
return err;
}
if (err)
goto err_free;
nlmsg_end(msg, nlh);
put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
err_free:
nlmsg_free(msg);
err:
put_device(&device->dev);
return err;
}
static int nldev_port_get_dumpit(struct sk_buff *skb,
......@@ -273,7 +289,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
return -EINVAL;
ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
device = __ib_device_get_by_index(ifindex);
device = ib_device_get_by_index(ifindex);
if (!device)
return -EINVAL;
......@@ -307,7 +323,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
nlmsg_end(skb, nlh);
}
out: cb->args[0] = idx;
out:
put_device(&device->dev);
cb->args[0] = idx;
return skb->len;
}
......
......@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
goto err_free_mr;
mr->max_pages = max_num_sg;
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
goto err_free_pl;
......@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
return &mr->ibmr;
err_free_pl:
mr->ibmr.device = pd->device;
mlx4_free_priv_pages(mr);
err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr);
......
......@@ -926,8 +926,8 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
struct net_device *dev)
static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
......@@ -941,7 +941,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
return;
return NULL;
}
/* To avoid race condition, make sure that the
* neigh will be added only once.
*/
if (unlikely(!list_empty(&neigh->list))) {
spin_unlock_irqrestore(&priv->lock, flags);
return neigh;
}
path = __path_find(dev, daddr + 4);
......@@ -980,7 +988,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
IPOIB_QPN(daddr));
ipoib_neigh_put(neigh);
return;
return NULL;
}
} else {
neigh->ah = NULL;
......@@ -997,7 +1005,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
return;
return NULL;
err_path:
ipoib_neigh_free(neigh);
......@@ -1007,6 +1015,8 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
return NULL;
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
......@@ -1127,8 +1137,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_TIPC):
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
neigh_add_path(skb, phdr->hwaddr, dev);
return NETDEV_TX_OK;
neigh = neigh_add_path(skb, phdr->hwaddr, dev);
if (likely(!neigh))
return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
......
......@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev);
if (neigh) {
/* Make sure that the neigh will be added only
* once to mcast list.
*/
if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
list_add_tail(&neigh->list, &mcast->neigh_list);
......
This diff is collapsed.
......@@ -114,7 +114,7 @@ enum {
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
SRPT_RQ_SIZE = 128,
MAX_SRPT_RQ_SIZE = 128,
MIN_SRPT_SRQ_SIZE = 4,
DEFAULT_SRPT_SRQ_SIZE = 4095,
MAX_SRPT_SRQ_SIZE = 65535,
......@@ -134,7 +134,7 @@ enum {
};
/**
* enum srpt_command_state - SCSI command state managed by SRPT.
* enum srpt_command_state - SCSI command state managed by SRPT
* @SRPT_STATE_NEW: New command arrived and is being processed.
* @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
* for data arrival.
......@@ -158,7 +158,8 @@ enum srpt_command_state {
};
/**
* struct srpt_ioctx - Shared SRPT I/O context information.
* struct srpt_ioctx - shared SRPT I/O context information
* @cqe: Completion queue element.
* @buf: Pointer to the buffer.
* @dma: DMA address of the buffer.
* @index: Index of the I/O context in its ioctx_ring array.
......@@ -171,7 +172,7 @@ struct srpt_ioctx {
};
/**
* struct srpt_recv_ioctx - SRPT receive I/O context.
* struct srpt_recv_ioctx - SRPT receive I/O context
* @ioctx: See above.
* @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
*/
......@@ -187,13 +188,20 @@ struct srpt_rw_ctx {
};
/**
* struct srpt_send_ioctx - SRPT send I/O context.
* struct srpt_send_ioctx - SRPT send I/O context
* @ioctx: See above.
* @ch: Channel pointer.
* @spinlock: Protects 'state'.
* @s_rw_ctx: @rw_ctxs points here if only a single rw_ctx is needed.
* @rw_ctxs: RDMA read/write contexts.
* @rdma_cqe: RDMA completion queue element.
* @free_list: Node in srpt_rdma_ch.free_list.
* @state: I/O context state.
* @cmd: Target core command data structure.
* @sense_data: SCSI sense data.
* @n_rdma: Number of work requests needed to transfer this ioctx.
* @n_rw_ctx: Size of rw_ctxs array.
* @queue_status_only: Send a SCSI status back to the initiator but no data.
* @sense_data: Sense data to be sent to the initiator.
*/
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
......@@ -204,10 +212,8 @@ struct srpt_send_ioctx {
struct ib_cqe rdma_cqe;
struct list_head free_list;
spinlock_t spinlock;
enum srpt_command_state state;
struct se_cmd cmd;
struct completion tx_done;
u8 n_rdma;
u8 n_rw_ctx;
bool queue_status_only;
......@@ -215,7 +221,7 @@ struct srpt_send_ioctx {
};
/**
* enum rdma_ch_state - SRP channel state.
* enum rdma_ch_state - SRP channel state
* @CH_CONNECTING: QP is in RTR state; waiting for RTU.
* @CH_LIVE: QP is in RTS state.
* @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
......@@ -233,12 +239,14 @@ enum rdma_ch_state {
};
/**
* struct srpt_rdma_ch - RDMA channel.
* struct srpt_rdma_ch - RDMA channel
* @cm_id: IB CM ID associated with the channel.
* @qp: IB queue pair used for communicating over this channel.
* @cq: IB completion queue for this channel.
* @zw_cqe: Zero-length write CQE.
* @kref: kref for this channel.
* @rq_size: IB receive queue size.
* @rsp_size IB response message size in bytes.
* @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue.
* @sport: pointer to the information of the HCA port used by this
* channel.
......@@ -270,7 +278,7 @@ struct srpt_rdma_ch {
struct ib_cqe zw_cqe;
struct kref kref;
int rq_size;
u32 rsp_size;
u32 max_rsp_size;
atomic_t sq_wr_avail;
struct srpt_port *sport;
u8 i_port_id[16];
......@@ -293,7 +301,7 @@ struct srpt_rdma_ch {
};
/**
* struct srpt_port_attib - Attributes for SRPT port
* struct srpt_port_attib - attributes for SRPT port
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
......@@ -307,7 +315,7 @@ struct srpt_port_attrib {
};
/**
* struct srpt_port - Information associated by SRPT with a single IB port.
* struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
......@@ -323,7 +331,7 @@ struct srpt_port_attrib {
* @port_guid_wwn: WWN associated with target port GUID.
* @port_gid_tpg: TPG associated with target port GID.
* @port_gid_wwn: WWN associated with target port GID.
* @port_acl_list: Head of the list with all node ACLs for this port.
* @port_attrib: Port attributes that can be accessed through configfs.
*/
struct srpt_port {
struct srpt_device *sdev;
......@@ -344,7 +352,7 @@ struct srpt_port {
};
/**
* struct srpt_device - Information associated by SRPT with a single HCA.
* struct srpt_device - information associated by SRPT with a single HCA
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment