Commit 726ce70e authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

net: Move napi polling code out of net_rx_action

This patch creates a new function napi_poll and moves the napi
polling code from net_rx_action into it.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0d164491
...@@ -4557,6 +4557,59 @@ void netif_napi_del(struct napi_struct *napi) ...@@ -4557,6 +4557,59 @@ void netif_napi_del(struct napi_struct *napi)
} }
EXPORT_SYMBOL(netif_napi_del); EXPORT_SYMBOL(netif_napi_del);
static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{
void *have;
int work, weight;
list_del_init(&n->poll_list);
have = netpoll_poll_lock(n);
weight = n->weight;
/* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
* accidentally calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n);
}
WARN_ON_ONCE(work > weight);
if (likely(work < weight))
goto out_unlock;
/* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will.
*/
if (unlikely(napi_disable_pending(n))) {
napi_complete(n);
goto out_unlock;
}
if (n->gro_list) {
/* flush too old packets
* If HZ < 1000, flush all packets.
*/
napi_gro_flush(n, HZ >= 1000);
}
list_add_tail(&n->poll_list, repoll);
out_unlock:
netpoll_poll_unlock(have);
return work;
}
static void net_rx_action(struct softirq_action *h) static void net_rx_action(struct softirq_action *h)
{ {
struct softnet_data *sd = this_cpu_ptr(&softnet_data); struct softnet_data *sd = this_cpu_ptr(&softnet_data);
...@@ -4564,7 +4617,6 @@ static void net_rx_action(struct softirq_action *h) ...@@ -4564,7 +4617,6 @@ static void net_rx_action(struct softirq_action *h)
int budget = netdev_budget; int budget = netdev_budget;
LIST_HEAD(list); LIST_HEAD(list);
LIST_HEAD(repoll); LIST_HEAD(repoll);
void *have;
local_irq_disable(); local_irq_disable();
list_splice_init(&sd->poll_list, &list); list_splice_init(&sd->poll_list, &list);
...@@ -4572,7 +4624,6 @@ static void net_rx_action(struct softirq_action *h) ...@@ -4572,7 +4624,6 @@ static void net_rx_action(struct softirq_action *h)
while (!list_empty(&list)) { while (!list_empty(&list)) {
struct napi_struct *n; struct napi_struct *n;
int work, weight;
/* If softirq window is exhausted then punt. /* If softirq window is exhausted then punt.
* Allow this to run for 2 jiffies since which will allow * Allow this to run for 2 jiffies since which will allow
...@@ -4583,48 +4634,7 @@ static void net_rx_action(struct softirq_action *h) ...@@ -4583,48 +4634,7 @@ static void net_rx_action(struct softirq_action *h)
n = list_first_entry(&list, struct napi_struct, poll_list); n = list_first_entry(&list, struct napi_struct, poll_list);
list_del_init(&n->poll_list); budget -= napi_poll(n, &repoll);
have = netpoll_poll_lock(n);
weight = n->weight;
/* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
* accidentally calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n);
}
WARN_ON_ONCE(work > weight);
budget -= work;
/* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will.
*/
if (unlikely(work == weight)) {
if (unlikely(napi_disable_pending(n))) {
napi_complete(n);
} else {
if (n->gro_list) {
/* flush too old packets
* If HZ < 1000, flush all packets.
*/
napi_gro_flush(n, HZ >= 1000);
}
list_add_tail(&n->poll_list, &repoll);
}
}
netpoll_poll_unlock(have);
} }
if (!sd_has_rps_ipi_waiting(sd) && if (!sd_has_rps_ipi_waiting(sd) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment