Commit 02d62e86 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: un-inline sk_busy_loop()

There is really little gain from inlining this big function.
We'll soon make it even bigger in following patches.

This means we no longer need to export napi_by_id()
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5865316c
...@@ -460,15 +460,6 @@ static inline void napi_complete(struct napi_struct *n) ...@@ -460,15 +460,6 @@ static inline void napi_complete(struct napi_struct *n)
return napi_complete_done(n, 0); return napi_complete_done(n, 0);
} }
/**
* napi_by_id - lookup a NAPI by napi_id
* @napi_id: hashed napi_id
*
* lookup @napi_id in napi_hash table
* must be called under rcu_read_lock()
*/
struct napi_struct *napi_by_id(unsigned int napi_id);
/** /**
* napi_hash_add - add a NAPI to global hashtable * napi_hash_add - add a NAPI to global hashtable
* @napi: napi context * @napi: napi context
......
...@@ -72,50 +72,7 @@ static inline bool busy_loop_timeout(unsigned long end_time) ...@@ -72,50 +72,7 @@ static inline bool busy_loop_timeout(unsigned long end_time)
return time_after(now, end_time); return time_after(now, end_time);
} }
/* when used in sock_poll() nonblock is known at compile time to be true bool sk_busy_loop(struct sock *sk, int nonblock);
* so the loop and end_time will be optimized out
*/
static inline bool sk_busy_loop(struct sock *sk, int nonblock)
{
unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
const struct net_device_ops *ops;
struct napi_struct *napi;
int rc = false;
/*
* rcu read lock for napi hash
* bh so we don't race with net_rx_action
*/
rcu_read_lock_bh();
napi = napi_by_id(sk->sk_napi_id);
if (!napi)
goto out;
ops = napi->dev->netdev_ops;
if (!ops->ndo_busy_poll)
goto out;
do {
rc = ops->ndo_busy_poll(napi);
if (rc == LL_FLUSH_FAILED)
break; /* permanent failure */
if (rc > 0)
/* local bh are disabled so it is ok to use _BH */
NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
cpu_relax();
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
!need_resched() && !busy_loop_timeout(end_time));
rc = !skb_queue_empty(&sk->sk_receive_queue);
out:
rcu_read_unlock_bh();
return rc;
}
/* used in the NIC receive handler to mark the skb */ /* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb, static inline void skb_mark_napi_id(struct sk_buff *skb,
......
...@@ -96,6 +96,7 @@ ...@@ -96,6 +96,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/busy_poll.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/stat.h> #include <linux/stat.h>
#include <net/dst.h> #include <net/dst.h>
...@@ -4663,7 +4664,7 @@ void napi_complete_done(struct napi_struct *n, int work_done) ...@@ -4663,7 +4664,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
EXPORT_SYMBOL(napi_complete_done); EXPORT_SYMBOL(napi_complete_done);
/* must be called under rcu_read_lock(), as we dont take a reference */ /* must be called under rcu_read_lock(), as we dont take a reference */
struct napi_struct *napi_by_id(unsigned int napi_id) static struct napi_struct *napi_by_id(unsigned int napi_id)
{ {
unsigned int hash = napi_id % HASH_SIZE(napi_hash); unsigned int hash = napi_id % HASH_SIZE(napi_hash);
struct napi_struct *napi; struct napi_struct *napi;
...@@ -4674,7 +4675,52 @@ struct napi_struct *napi_by_id(unsigned int napi_id) ...@@ -4674,7 +4675,52 @@ struct napi_struct *napi_by_id(unsigned int napi_id)
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(napi_by_id);
#if defined(CONFIG_NET_RX_BUSY_POLL)
bool sk_busy_loop(struct sock *sk, int nonblock)
{
unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
const struct net_device_ops *ops;
struct napi_struct *napi;
int rc = false;
/*
* rcu read lock for napi hash
* bh so we don't race with net_rx_action
*/
rcu_read_lock_bh();
napi = napi_by_id(sk->sk_napi_id);
if (!napi)
goto out;
ops = napi->dev->netdev_ops;
if (!ops->ndo_busy_poll)
goto out;
do {
rc = ops->ndo_busy_poll(napi);
if (rc == LL_FLUSH_FAILED)
break; /* permanent failure */
if (rc > 0)
/* local bh are disabled so it is ok to use _BH */
NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
cpu_relax();
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
!need_resched() && !busy_loop_timeout(end_time));
rc = !skb_queue_empty(&sk->sk_receive_queue);
out:
rcu_read_unlock_bh();
return rc;
}
EXPORT_SYMBOL(sk_busy_loop);
#endif /* CONFIG_NET_RX_BUSY_POLL */
void napi_hash_add(struct napi_struct *napi) void napi_hash_add(struct napi_struct *napi)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment