Commit 726ce70e authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

net: Move napi polling code out of net_rx_action

This patch creates a new function napi_poll and moves the napi
polling code from net_rx_action into it.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0d164491
...@@ -4557,32 +4557,11 @@ void netif_napi_del(struct napi_struct *napi) ...@@ -4557,32 +4557,11 @@ void netif_napi_del(struct napi_struct *napi)
} }
EXPORT_SYMBOL(netif_napi_del); EXPORT_SYMBOL(netif_napi_del);
static void net_rx_action(struct softirq_action *h) static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{ {
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
LIST_HEAD(list);
LIST_HEAD(repoll);
void *have; void *have;
local_irq_disable();
list_splice_init(&sd->poll_list, &list);
local_irq_enable();
while (!list_empty(&list)) {
struct napi_struct *n;
int work, weight; int work, weight;
/* If softirq window is exhausted then punt.
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
goto softnet_break;
n = list_first_entry(&list, struct napi_struct, poll_list);
list_del_init(&n->poll_list); list_del_init(&n->poll_list);
have = netpoll_poll_lock(n); have = netpoll_poll_lock(n);
...@@ -4603,28 +4582,59 @@ static void net_rx_action(struct softirq_action *h) ...@@ -4603,28 +4582,59 @@ static void net_rx_action(struct softirq_action *h)
WARN_ON_ONCE(work > weight); WARN_ON_ONCE(work > weight);
budget -= work; if (likely(work < weight))
goto out_unlock;
/* Drivers must not modify the NAPI state if they /* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code * consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can * still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will. * move the instance around on the list at-will.
*/ */
if (unlikely(work == weight)) {
if (unlikely(napi_disable_pending(n))) { if (unlikely(napi_disable_pending(n))) {
napi_complete(n); napi_complete(n);
} else { goto out_unlock;
}
if (n->gro_list) { if (n->gro_list) {
/* flush too old packets /* flush too old packets
* If HZ < 1000, flush all packets. * If HZ < 1000, flush all packets.
*/ */
napi_gro_flush(n, HZ >= 1000); napi_gro_flush(n, HZ >= 1000);
} }
list_add_tail(&n->poll_list, &repoll);
}
}
list_add_tail(&n->poll_list, repoll);
out_unlock:
netpoll_poll_unlock(have); netpoll_poll_unlock(have);
return work;
}
static void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
LIST_HEAD(list);
LIST_HEAD(repoll);
local_irq_disable();
list_splice_init(&sd->poll_list, &list);
local_irq_enable();
while (!list_empty(&list)) {
struct napi_struct *n;
/* If softirq window is exhausted then punt.
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
goto softnet_break;
n = list_first_entry(&list, struct napi_struct, poll_list);
budget -= napi_poll(n, &repoll);
} }
if (!sd_has_rps_ipi_waiting(sd) && if (!sd_has_rps_ipi_waiting(sd) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment