Commit 941c8726 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull final RDMA changes from Roland Dreier:
 - Fix IPoIB to stop using unsafe linkage between networking neighbour
   layer and private path database.
 - Small fixes for bugs found by Fengguang Wu's automated builds.

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IPoIB: Use a private hash table for path lookup in xmit path
  IB/qib: Fix size of cc_supported_table_entries
  RDMA/ucma: Convert open-coded equivalent to memdup_user()
  RDMA/ocrdma: Fix check of GSI CQs
  RDMA/cma: Use PTR_RET rather than if (IS_ERR(...)) + PTR_ERR
parents 8762541f 1da9b6b4
......@@ -3064,10 +3064,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
id_priv->id.port_num, &rec,
comp_mask, GFP_KERNEL,
cma_ib_mc_handler, mc);
if (IS_ERR(mc->multicast.ib))
return PTR_ERR(mc->multicast.ib);
return 0;
return PTR_RET(mc->multicast.ib);
}
static void iboe_mcast_work_handler(struct work_struct *work)
......
......@@ -1002,23 +1002,18 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
optval = kmalloc(cmd.optlen, GFP_KERNEL);
if (!optval) {
ret = -ENOMEM;
goto out1;
}
if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
cmd.optlen)) {
ret = -EFAULT;
goto out2;
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
cmd.optlen);
if (IS_ERR(optval)) {
ret = PTR_ERR(optval);
goto out;
}
ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
cmd.optlen);
out2:
kfree(optval);
out1:
out:
ucma_put_ctx(ctx);
return ret;
}
......
......@@ -893,7 +893,9 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
/* verify consumer QPs are not trying to use GSI QP's CQ */
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
(dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq))) {
(dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) ||
(dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
(dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
__func__, dev->id);
return -EINVAL;
......
......@@ -656,6 +656,11 @@ struct qib_pportdata {
/* 16 congestion entries with each entry corresponding to a SL */
struct ib_cc_congestion_entry_shadow *congestion_entries;
/* Maximum number of congestion control entries that the agent expects
* the manager to send.
*/
u16 cc_supported_table_entries;
/* Total number of congestion control table entries */
u16 total_cct_entry;
......@@ -667,11 +672,6 @@ struct qib_pportdata {
/* CA's max number of 64 entry units in the congestion control table */
u8 cc_max_table_entries;
/* Maximum number of congestion control entries that the agent expects
* the manager to send.
*/
u8 cc_supported_table_entries;
};
/* Observers. Not to be taken lightly, possibly not to ship. */
......
......@@ -92,6 +92,8 @@ enum {
IPOIB_STOP_REAPER = 7,
IPOIB_FLAG_ADMIN_CM = 9,
IPOIB_FLAG_UMCAST = 10,
IPOIB_STOP_NEIGH_GC = 11,
IPOIB_NEIGH_TBL_FLUSH = 12,
IPOIB_MAX_BACKOFF_SECONDS = 16,
......@@ -260,6 +262,20 @@ struct ipoib_ethtool_st {
u16 max_coalesced_frames;
};
struct ipoib_neigh_hash {
struct ipoib_neigh __rcu **buckets;
struct rcu_head rcu;
u32 mask;
u32 size;
};
struct ipoib_neigh_table {
struct ipoib_neigh_hash __rcu *htbl;
rwlock_t rwlock;
atomic_t entries;
struct completion flushed;
};
/*
* Device private locking: network stack tx_lock protects members used
* in TX fast path, lock protects everything else. lock nests inside
......@@ -279,6 +295,8 @@ struct ipoib_dev_priv {
struct rb_root path_tree;
struct list_head path_list;
struct ipoib_neigh_table ntbl;
struct ipoib_mcast *broadcast;
struct list_head multicast_list;
struct rb_root multicast_tree;
......@@ -291,7 +309,7 @@ struct ipoib_dev_priv {
struct work_struct flush_heavy;
struct work_struct restart_task;
struct delayed_work ah_reap_task;
struct delayed_work neigh_reap_task;
struct ib_device *ca;
u8 port;
u16 pkey;
......@@ -377,13 +395,16 @@ struct ipoib_neigh {
#ifdef CONFIG_INFINIBAND_IPOIB_CM
struct ipoib_cm_tx *cm;
#endif
union ib_gid dgid;
u8 daddr[INFINIBAND_ALEN];
struct sk_buff_head queue;
struct neighbour *neighbour;
struct net_device *dev;
struct list_head list;
struct ipoib_neigh __rcu *hnext;
struct rcu_head rcu;
atomic_t refcnt;
unsigned long alive;
};
#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
......@@ -394,21 +415,17 @@ static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
}
/*
* We stash a pointer to our private neighbour information after our
* hardware address in neigh->ha. The ALIGN() expression here makes
* sure that this pointer is stored aligned so that an unaligned
* load is not needed to dereference it.
*/
static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh)
void ipoib_neigh_dtor(struct ipoib_neigh *neigh);
static inline void ipoib_neigh_put(struct ipoib_neigh *neigh)
{
return (void*) neigh + ALIGN(offsetof(struct neighbour, ha) +
INFINIBAND_ALEN, sizeof(void *));
if (atomic_dec_and_test(&neigh->refcnt))
ipoib_neigh_dtor(neigh);
}
struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh,
struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr);
struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
struct net_device *dev);
void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh);
void ipoib_neigh_free(struct ipoib_neigh *neigh);
void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid);
extern struct workqueue_struct *ipoib_workqueue;
......@@ -425,7 +442,6 @@ static inline void ipoib_put_ah(struct ipoib_ah *ah)
{
kref_put(&ah->ref, ipoib_free_ah);
}
int ipoib_open(struct net_device *dev);
int ipoib_add_pkey_attr(struct net_device *dev);
int ipoib_add_umcast_attr(struct net_device *dev);
......@@ -455,7 +471,7 @@ void ipoib_dev_cleanup(struct net_device *dev);
void ipoib_mcast_join_task(struct work_struct *work);
void ipoib_mcast_carrier_on_task(struct work_struct *work);
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
......@@ -517,10 +533,10 @@ static inline int ipoib_cm_admin_enabled(struct net_device *dev)
test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
static inline int ipoib_cm_enabled(struct net_device *dev, struct neighbour *n)
static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
return IPOIB_CM_SUPPORTED(n->ha) &&
return IPOIB_CM_SUPPORTED(hwaddr) &&
test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
......@@ -575,7 +591,7 @@ static inline int ipoib_cm_admin_enabled(struct net_device *dev)
{
return 0;
}
static inline int ipoib_cm_enabled(struct net_device *dev, struct neighbour *n)
static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
{
return 0;
......
......@@ -811,9 +811,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
ipoib_neigh_free(neigh);
tx->neigh = NULL;
}
......@@ -1230,9 +1228,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
ipoib_neigh_free(neigh);
tx->neigh = NULL;
}
......@@ -1279,7 +1275,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
list_move(&tx->list, &priv->cm.reap_list);
queue_work(ipoib_workqueue, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->dgid.raw);
tx->neigh->daddr + 4);
tx->neigh = NULL;
}
}
......@@ -1304,7 +1300,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
qpn = IPOIB_QPN(neigh->neighbour->ha);
qpn = IPOIB_QPN(neigh->daddr);
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
spin_unlock_irqrestore(&priv->lock, flags);
......@@ -1320,9 +1316,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
ipoib_neigh_free(neigh);
}
list_del(&p->list);
kfree(p);
......
This diff is collapsed.
......@@ -69,28 +69,13 @@ struct ipoib_mcast_iter {
static void ipoib_mcast_free(struct ipoib_mcast *mcast)
{
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh, *tmp;
int tx_dropped = 0;
ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n",
mcast->mcmember.mgid.raw);
spin_lock_irq(&priv->lock);
list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
/*
* It's safe to call ipoib_put_ah() inside priv->lock
* here, because we know that mcast->ah will always
* hold one more reference, so ipoib_put_ah() will
* never do more than decrement the ref count.
*/
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
}
spin_unlock_irq(&priv->lock);
/* remove all neigh connected to this mcast */
ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw);
if (mcast->ah)
ipoib_put_ah(mcast->ah);
......@@ -655,17 +640,12 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
return 0;
}
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct dst_entry *dst = skb_dst(skb);
struct ipoib_mcast *mcast;
struct neighbour *n;
unsigned long flags;
n = NULL;
if (dst)
n = dst_neigh_lookup_skb(dst, skb);
void *mgid = daddr + 4;
spin_lock_irqsave(&priv->lock, flags);
......@@ -721,28 +701,29 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
out:
if (mcast && mcast->ah) {
if (n) {
if (!*to_ipoib_neigh(n)) {
struct ipoib_neigh *neigh;
neigh = ipoib_neigh_alloc(n, skb->dev);
spin_unlock_irqrestore(&priv->lock, flags);
neigh = ipoib_neigh_get(dev, daddr);
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
spin_unlock_irqrestore(&priv->lock, flags);
neigh = ipoib_neigh_alloc(daddr, dev);
spin_lock_irqsave(&priv->lock, flags);
if (neigh) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
list_add_tail(&neigh->list,
&mcast->neigh_list);
}
list_add_tail(&neigh->list, &mcast->neigh_list);
}
neigh_release(n);
}
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
if (neigh)
ipoib_neigh_put(neigh);
return;
}
unlock:
if (n)
neigh_release(n);
spin_unlock_irqrestore(&priv->lock, flags);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment