Commit 8fdd95ec authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

netpoll: Allow netpoll_setup/cleanup recursion

This patch adds the functions __netpoll_setup/__netpoll_cleanup
which is designed to be called recursively through ndo_netpoll_seutp.

They must be called with RTNL held, and the caller must initialise
np->dev and ensure that it has a valid reference count.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4247e161
...@@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np); ...@@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np); void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt); int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np);
int netpoll_setup(struct netpoll *np); int netpoll_setup(struct netpoll *np);
int netpoll_trap(void); int netpoll_trap(void);
void netpoll_set_trap(int trap); void netpoll_set_trap(int trap);
void __netpoll_cleanup(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np); void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb); int __netpoll_rx(struct sk_buff *skb);
void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
......
...@@ -693,15 +693,78 @@ int netpoll_parse_options(struct netpoll *np, char *opt) ...@@ -693,15 +693,78 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
return -1; return -1;
} }
int netpoll_setup(struct netpoll *np) int __netpoll_setup(struct netpoll *np)
{ {
struct net_device *ndev = NULL; struct net_device *ndev = np->dev;
struct in_device *in_dev;
struct netpoll_info *npinfo; struct netpoll_info *npinfo;
const struct net_device_ops *ops; const struct net_device_ops *ops;
unsigned long flags; unsigned long flags;
int err; int err;
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
!ndev->netdev_ops->ndo_poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
err = -ENOTSUPP;
goto out;
}
if (!ndev->npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
goto out;
}
npinfo->rx_flags = 0;
INIT_LIST_HEAD(&npinfo->rx_np);
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
skb_queue_head_init(&npinfo->txq);
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
ops = np->dev->netdev_ops;
if (ops->ndo_netpoll_setup) {
err = ops->ndo_netpoll_setup(ndev, npinfo);
if (err)
goto free_npinfo;
}
} else {
npinfo = ndev->npinfo;
atomic_inc(&npinfo->refcnt);
}
npinfo->netpoll = np;
if (np->rx_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
list_add_tail(&np->rx, &npinfo->rx_np);
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
/* last thing to do is link it to the net device structure */
rcu_assign_pointer(ndev->npinfo, npinfo);
rtnl_unlock();
return 0;
free_npinfo:
kfree(npinfo);
out:
return err;
}
EXPORT_SYMBOL_GPL(__netpoll_setup);
int netpoll_setup(struct netpoll *np)
{
struct net_device *ndev = NULL;
struct in_device *in_dev;
int err;
if (np->dev_name) if (np->dev_name)
ndev = dev_get_by_name(&init_net, np->dev_name); ndev = dev_get_by_name(&init_net, np->dev_name);
if (!ndev) { if (!ndev) {
...@@ -774,61 +837,14 @@ int netpoll_setup(struct netpoll *np) ...@@ -774,61 +837,14 @@ int netpoll_setup(struct netpoll *np)
refill_skbs(); refill_skbs();
rtnl_lock(); rtnl_lock();
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || err = __netpoll_setup(np);
!ndev->netdev_ops->ndo_poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
err = -ENOTSUPP;
goto unlock;
}
if (!ndev->npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
goto unlock;
}
npinfo->rx_flags = 0;
INIT_LIST_HEAD(&npinfo->rx_np);
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
skb_queue_head_init(&npinfo->txq);
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
ops = np->dev->netdev_ops;
if (ops->ndo_netpoll_setup) {
err = ops->ndo_netpoll_setup(ndev, npinfo);
if (err)
goto free_npinfo;
}
} else {
npinfo = ndev->npinfo;
atomic_inc(&npinfo->refcnt);
}
npinfo->netpoll = np;
if (np->rx_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
list_add_tail(&np->rx, &npinfo->rx_np);
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
/* last thing to do is link it to the net device structure */
rcu_assign_pointer(ndev->npinfo, npinfo);
rtnl_unlock(); rtnl_unlock();
if (err)
goto put;
return 0; return 0;
free_npinfo:
kfree(npinfo);
unlock:
rtnl_unlock();
put: put:
dev_put(ndev); dev_put(ndev);
return err; return err;
...@@ -841,40 +857,32 @@ static int __init netpoll_init(void) ...@@ -841,40 +857,32 @@ static int __init netpoll_init(void)
} }
core_initcall(netpoll_init); core_initcall(netpoll_init);
void netpoll_cleanup(struct netpoll *np) void __netpoll_cleanup(struct netpoll *np)
{ {
struct netpoll_info *npinfo; struct netpoll_info *npinfo;
unsigned long flags; unsigned long flags;
int free = 0;
if (!np->dev) npinfo = np->dev->npinfo;
if (!npinfo)
return; return;
rtnl_lock(); if (!list_empty(&npinfo->rx_np)) {
npinfo = np->dev->npinfo; spin_lock_irqsave(&npinfo->rx_lock, flags);
if (npinfo) { list_del(&np->rx);
if (!list_empty(&npinfo->rx_np)) { if (list_empty(&npinfo->rx_np))
spin_lock_irqsave(&npinfo->rx_lock, flags); npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
list_del(&np->rx); spin_unlock_irqrestore(&npinfo->rx_lock, flags);
if (list_empty(&npinfo->rx_np)) }
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
free = atomic_dec_and_test(&npinfo->refcnt); if (atomic_dec_and_test(&npinfo->refcnt)) {
if (free) { const struct net_device_ops *ops;
const struct net_device_ops *ops;
ops = np->dev->netdev_ops; ops = np->dev->netdev_ops;
if (ops->ndo_netpoll_cleanup) if (ops->ndo_netpoll_cleanup)
ops->ndo_netpoll_cleanup(np->dev); ops->ndo_netpoll_cleanup(np->dev);
rcu_assign_pointer(np->dev->npinfo, NULL); rcu_assign_pointer(np->dev->npinfo, NULL);
}
}
rtnl_unlock();
if (free) {
/* avoid racing with NAPI reading npinfo */ /* avoid racing with NAPI reading npinfo */
synchronize_rcu_bh(); synchronize_rcu_bh();
...@@ -886,9 +894,19 @@ void netpoll_cleanup(struct netpoll *np) ...@@ -886,9 +894,19 @@ void netpoll_cleanup(struct netpoll *np)
__skb_queue_purge(&npinfo->txq); __skb_queue_purge(&npinfo->txq);
kfree(npinfo); kfree(npinfo);
} }
}
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
dev_put(np->dev); void netpoll_cleanup(struct netpoll *np)
{
if (!np->dev)
return;
rtnl_lock();
__netpoll_cleanup(np);
rtnl_unlock();
dev_put(np->dev);
np->dev = NULL; np->dev = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment