Commit 89c4b442 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

netpoll: more efficient locking

Callers of netpoll_poll_lock() own NAPI_STATE_SCHED

Callers of netpoll_poll_unlock() have BH blocked between
the NAPI_STATE_SCHED being cleared and poll_lock is released.

We can avoid the spinlock which has no contention, and use cmpxchg()
on poll_owner which we need to set anyway.

This removes a possible lockdep violation after the cited commit,
since sk_busy_loop() re-enables BH before calling busy_poll_stop()

Fixes: 217f6974 ("net: busy-poll: allow preemption in sk_busy_loop()")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1629dd4f
...@@ -316,7 +316,6 @@ struct napi_struct { ...@@ -316,7 +316,6 @@ struct napi_struct {
unsigned int gro_count; unsigned int gro_count;
int (*poll)(struct napi_struct *, int); int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL #ifdef CONFIG_NETPOLL
spinlock_t poll_lock;
int poll_owner; int poll_owner;
#endif #endif
struct net_device *dev; struct net_device *dev;
......
...@@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) ...@@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
struct net_device *dev = napi->dev; struct net_device *dev = napi->dev;
if (dev && dev->npinfo) { if (dev && dev->npinfo) {
spin_lock(&napi->poll_lock); int owner = smp_processor_id();
napi->poll_owner = smp_processor_id();
while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
cpu_relax();
return napi; return napi;
} }
return NULL; return NULL;
...@@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have) ...@@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have)
{ {
struct napi_struct *napi = have; struct napi_struct *napi = have;
if (napi) { if (napi)
napi->poll_owner = -1; smp_store_release(&napi->poll_owner, -1);
spin_unlock(&napi->poll_lock);
}
} }
static inline bool netpoll_tx_running(struct net_device *dev) static inline bool netpoll_tx_running(struct net_device *dev)
......
...@@ -5143,7 +5143,6 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, ...@@ -5143,7 +5143,6 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
list_add(&napi->dev_list, &dev->napi_list); list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev; napi->dev = dev;
#ifdef CONFIG_NETPOLL #ifdef CONFIG_NETPOLL
spin_lock_init(&napi->poll_lock);
napi->poll_owner = -1; napi->poll_owner = -1;
#endif #endif
set_bit(NAPI_STATE_SCHED, &napi->state); set_bit(NAPI_STATE_SCHED, &napi->state);
......
...@@ -171,12 +171,12 @@ static void poll_one_napi(struct napi_struct *napi) ...@@ -171,12 +171,12 @@ static void poll_one_napi(struct napi_struct *napi)
static void poll_napi(struct net_device *dev) static void poll_napi(struct net_device *dev)
{ {
struct napi_struct *napi; struct napi_struct *napi;
int cpu = smp_processor_id();
list_for_each_entry(napi, &dev->napi_list, dev_list) { list_for_each_entry(napi, &dev->napi_list, dev_list) {
if (napi->poll_owner != smp_processor_id() && if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
spin_trylock(&napi->poll_lock)) {
poll_one_napi(napi); poll_one_napi(napi);
spin_unlock(&napi->poll_lock); smp_store_release(&napi->poll_owner, -1);
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment