Commit de85d99e authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

netpoll: Fix RCU usage

The use of RCU in netpoll is incorrect in a number of places:

1) The initial setting is lacking a write barrier.
2) The synchronize_rcu is in the wrong place.
3) Read barriers are missing.
4) Some places are even missing rcu_read_lock.
5) npinfo is zeroed after freeing.

This patch fixes those issues.  As most users are in BH context,
this also converts the RCU usage to the BH variant.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 36655042
...@@ -57,12 +57,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); ...@@ -57,12 +57,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL #ifdef CONFIG_NETPOLL
static inline bool netpoll_rx(struct sk_buff *skb) static inline bool netpoll_rx(struct sk_buff *skb)
{ {
struct netpoll_info *npinfo = skb->dev->npinfo; struct netpoll_info *npinfo;
unsigned long flags; unsigned long flags;
bool ret = false; bool ret = false;
rcu_read_lock_bh();
npinfo = rcu_dereference(skb->dev->npinfo);
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
return false; goto out;
spin_lock_irqsave(&npinfo->rx_lock, flags); spin_lock_irqsave(&npinfo->rx_lock, flags);
/* check rx_flags again with the lock held */ /* check rx_flags again with the lock held */
...@@ -70,12 +73,14 @@ static inline bool netpoll_rx(struct sk_buff *skb) ...@@ -70,12 +73,14 @@ static inline bool netpoll_rx(struct sk_buff *skb)
ret = true; ret = true;
spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags);
out:
rcu_read_unlock_bh();
return ret; return ret;
} }
static inline int netpoll_rx_on(struct sk_buff *skb) static inline int netpoll_rx_on(struct sk_buff *skb)
{ {
struct netpoll_info *npinfo = skb->dev->npinfo; struct netpoll_info *npinfo = rcu_dereference(skb->dev->npinfo);
return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
} }
...@@ -91,7 +96,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) ...@@ -91,7 +96,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
{ {
struct net_device *dev = napi->dev; struct net_device *dev = napi->dev;
rcu_read_lock(); /* deal with race on ->npinfo */
if (dev && dev->npinfo) { if (dev && dev->npinfo) {
spin_lock(&napi->poll_lock); spin_lock(&napi->poll_lock);
napi->poll_owner = smp_processor_id(); napi->poll_owner = smp_processor_id();
...@@ -108,7 +112,6 @@ static inline void netpoll_poll_unlock(void *have) ...@@ -108,7 +112,6 @@ static inline void netpoll_poll_unlock(void *have)
napi->poll_owner = -1; napi->poll_owner = -1;
spin_unlock(&napi->poll_lock); spin_unlock(&napi->poll_lock);
} }
rcu_read_unlock();
} }
#else #else
......
...@@ -261,6 +261,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) ...@@ -261,6 +261,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
unsigned long tries; unsigned long tries;
struct net_device *dev = np->dev; struct net_device *dev = np->dev;
const struct net_device_ops *ops = dev->netdev_ops; const struct net_device_ops *ops = dev->netdev_ops;
/* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo = np->dev->npinfo; struct netpoll_info *npinfo = np->dev->npinfo;
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
...@@ -810,10 +811,7 @@ int netpoll_setup(struct netpoll *np) ...@@ -810,10 +811,7 @@ int netpoll_setup(struct netpoll *np)
refill_skbs(); refill_skbs();
/* last thing to do is link it to the net device structure */ /* last thing to do is link it to the net device structure */
ndev->npinfo = npinfo; rcu_assign_pointer(ndev->npinfo, npinfo);
/* avoid racing with NAPI reading npinfo */
synchronize_rcu();
return 0; return 0;
...@@ -857,6 +855,16 @@ void netpoll_cleanup(struct netpoll *np) ...@@ -857,6 +855,16 @@ void netpoll_cleanup(struct netpoll *np)
if (atomic_dec_and_test(&npinfo->refcnt)) { if (atomic_dec_and_test(&npinfo->refcnt)) {
const struct net_device_ops *ops; const struct net_device_ops *ops;
ops = np->dev->netdev_ops;
if (ops->ndo_netpoll_cleanup)
ops->ndo_netpoll_cleanup(np->dev);
rcu_assign_pointer(np->dev->npinfo, NULL);
/* avoid racing with NAPI reading npinfo */
synchronize_rcu_bh();
skb_queue_purge(&npinfo->arp_tx); skb_queue_purge(&npinfo->arp_tx);
skb_queue_purge(&npinfo->txq); skb_queue_purge(&npinfo->txq);
cancel_rearming_delayed_work(&npinfo->tx_work); cancel_rearming_delayed_work(&npinfo->tx_work);
...@@ -864,10 +872,6 @@ void netpoll_cleanup(struct netpoll *np) ...@@ -864,10 +872,6 @@ void netpoll_cleanup(struct netpoll *np)
/* clean after last, unfinished work */ /* clean after last, unfinished work */
__skb_queue_purge(&npinfo->txq); __skb_queue_purge(&npinfo->txq);
kfree(npinfo); kfree(npinfo);
ops = np->dev->netdev_ops;
if (ops->ndo_netpoll_cleanup)
ops->ndo_netpoll_cleanup(np->dev);
np->dev->npinfo = NULL;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment