Commit 899aba89 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Fix FIXME in rxe_udp_encap_recv()

rxe_udp_encap_recv() drops the reference to rxe->ib_dev taken by
rxe_get_dev_from_net() which should be held until each received skb is
freed. This patch moves the calls to ib_device_put() to each place a
received skb is freed. It also takes references to the ib_device for each
cloned skb created to process received multicast packets.

Fixes: 4c173f59 ("RDMA/rxe: Use ib_device_get_by_netdev() instead of open coding")
Link: https://lore.kernel.org/r/20210128233318.2591-1-rpearson@hpe.comSigned-off-by: default avatarBob Pearson <rpearson@hpe.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 5120bf0a
...@@ -515,6 +515,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) ...@@ -515,6 +515,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
while ((skb = skb_dequeue(&qp->resp_pkts))) { while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_drop_ref(qp); rxe_drop_ref(qp);
kfree_skb(skb); kfree_skb(skb);
ib_device_put(qp->ibqp.device);
} }
while ((wqe = queue_head(qp->sq.queue))) { while ((wqe = queue_head(qp->sq.queue))) {
...@@ -527,6 +528,17 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) ...@@ -527,6 +528,17 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
} }
} }
static void free_pkt(struct rxe_pkt_info *pkt)
{
struct sk_buff *skb = PKT_TO_SKB(pkt);
struct rxe_qp *qp = pkt->qp;
struct ib_device *dev = qp->ibqp.device;
kfree_skb(skb);
rxe_drop_ref(qp);
ib_device_put(dev);
}
int rxe_completer(void *arg) int rxe_completer(void *arg)
{ {
struct rxe_qp *qp = (struct rxe_qp *)arg; struct rxe_qp *qp = (struct rxe_qp *)arg;
...@@ -624,11 +636,8 @@ int rxe_completer(void *arg) ...@@ -624,11 +636,8 @@ int rxe_completer(void *arg)
break; break;
case COMPST_DONE: case COMPST_DONE:
if (pkt) { if (pkt)
rxe_drop_ref(pkt->qp); free_pkt(pkt);
kfree_skb(skb);
skb = NULL;
}
goto done; goto done;
case COMPST_EXIT: case COMPST_EXIT:
...@@ -671,12 +680,8 @@ int rxe_completer(void *arg) ...@@ -671,12 +680,8 @@ int rxe_completer(void *arg)
*/ */
if (qp->comp.started_retry && if (qp->comp.started_retry &&
!qp->comp.timeout_retry) { !qp->comp.timeout_retry) {
if (pkt) { if (pkt)
rxe_drop_ref(pkt->qp); free_pkt(pkt);
kfree_skb(skb);
skb = NULL;
}
goto done; goto done;
} }
...@@ -699,13 +704,8 @@ int rxe_completer(void *arg) ...@@ -699,13 +704,8 @@ int rxe_completer(void *arg)
qp->comp.started_retry = 1; qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 0); rxe_run_task(&qp->req.task, 0);
} }
if (pkt)
if (pkt) { free_pkt(pkt);
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
skb = NULL;
}
goto done; goto done;
} else { } else {
...@@ -726,9 +726,7 @@ int rxe_completer(void *arg) ...@@ -726,9 +726,7 @@ int rxe_completer(void *arg)
mod_timer(&qp->rnr_nak_timer, mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt) jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK)); & ~AETH_TYPE_MASK));
rxe_drop_ref(pkt->qp); free_pkt(pkt);
kfree_skb(skb);
skb = NULL;
goto exit; goto exit;
} else { } else {
rxe_counter_inc(rxe, rxe_counter_inc(rxe,
...@@ -742,13 +740,8 @@ int rxe_completer(void *arg) ...@@ -742,13 +740,8 @@ int rxe_completer(void *arg)
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS); WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
do_complete(qp, wqe); do_complete(qp, wqe);
rxe_qp_error(qp); rxe_qp_error(qp);
if (pkt)
if (pkt) { free_pkt(pkt);
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
skb = NULL;
}
goto exit; goto exit;
} }
} }
......
...@@ -152,10 +152,14 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev, ...@@ -152,10 +152,14 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev,
static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{ {
struct udphdr *udph; struct udphdr *udph;
struct rxe_dev *rxe;
struct net_device *ndev = skb->dev; struct net_device *ndev = skb->dev;
struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
/* takes a reference on rxe->ib_dev
* drop when skb is freed
*/
rxe = rxe_get_dev_from_net(ndev);
if (!rxe) if (!rxe)
goto drop; goto drop;
...@@ -174,12 +178,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) ...@@ -174,12 +178,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
rxe_rcv(skb); rxe_rcv(skb);
/*
* FIXME: this is in the wrong place, it needs to be done when pkt is
* destroyed
*/
ib_device_put(&rxe->ib_dev);
return 0; return 0;
drop: drop:
kfree_skb(skb); kfree_skb(skb);
......
...@@ -274,6 +274,10 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) ...@@ -274,6 +274,10 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
*/ */
if (mce->qp_list.next != &mcg->qp_list) { if (mce->qp_list.next != &mcg->qp_list) {
per_qp_skb = skb_clone(skb, GFP_ATOMIC); per_qp_skb = skb_clone(skb, GFP_ATOMIC);
if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
kfree_skb(per_qp_skb);
continue;
}
} else { } else {
per_qp_skb = skb; per_qp_skb = skb;
/* show we have consumed the skb */ /* show we have consumed the skb */
...@@ -296,6 +300,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) ...@@ -296,6 +300,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
err1: err1:
/* free skb if not consumed */ /* free skb if not consumed */
kfree_skb(skb); kfree_skb(skb);
ib_device_put(&rxe->ib_dev);
} }
/** /**
...@@ -405,4 +410,5 @@ void rxe_rcv(struct sk_buff *skb) ...@@ -405,4 +410,5 @@ void rxe_rcv(struct sk_buff *skb)
rxe_drop_ref(pkt->qp); rxe_drop_ref(pkt->qp);
kfree_skb(skb); kfree_skb(skb);
ib_device_put(&rxe->ib_dev);
} }
...@@ -99,6 +99,7 @@ static inline enum resp_states get_req(struct rxe_qp *qp, ...@@ -99,6 +99,7 @@ static inline enum resp_states get_req(struct rxe_qp *qp,
while ((skb = skb_dequeue(&qp->req_pkts))) { while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_drop_ref(qp); rxe_drop_ref(qp);
kfree_skb(skb); kfree_skb(skb);
ib_device_put(qp->ibqp.device);
} }
/* go drain recv wr queue */ /* go drain recv wr queue */
...@@ -1012,6 +1013,7 @@ static enum resp_states cleanup(struct rxe_qp *qp, ...@@ -1012,6 +1013,7 @@ static enum resp_states cleanup(struct rxe_qp *qp,
skb = skb_dequeue(&qp->req_pkts); skb = skb_dequeue(&qp->req_pkts);
rxe_drop_ref(qp); rxe_drop_ref(qp);
kfree_skb(skb); kfree_skb(skb);
ib_device_put(qp->ibqp.device);
} }
if (qp->resp.mr) { if (qp->resp.mr) {
...@@ -1176,6 +1178,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) ...@@ -1176,6 +1178,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
while ((skb = skb_dequeue(&qp->req_pkts))) { while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_drop_ref(qp); rxe_drop_ref(qp);
kfree_skb(skb); kfree_skb(skb);
ib_device_put(qp->ibqp.device);
} }
if (notify) if (notify)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment