Commit fbd9a2ce authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by David S. Miller

net: Add lockdep asserts to ____napi_schedule().

____napi_schedule() needs to be invoked with disabled interrupts due to
__raise_softirq_irqoff (in order not to corrupt the per-CPU list).
____napi_schedule() needs also to be invoked from an interrupt context
so that the raised-softirq is processed while the interrupt context is
left.

Add lockdep asserts for both conditions.
While this is the second time the irq/softirq check is needed, provide a
generic lockdep_assert_softirq_will_run() which is used by both caller.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d96657dc
...@@ -329,6 +329,12 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); ...@@ -329,6 +329,12 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_assert_none_held_once() \ #define lockdep_assert_none_held_once() \
lockdep_assert_once(!current->lockdep_depth) lockdep_assert_once(!current->lockdep_depth)
/*
* Ensure that softirq is handled within the callchain and not delayed and
* handled by chance.
*/
#define lockdep_assert_softirq_will_run() \
lockdep_assert_once(hardirq_count() | softirq_count())
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
...@@ -414,6 +420,7 @@ extern int lockdep_is_held(const void *); ...@@ -414,6 +420,7 @@ extern int lockdep_is_held(const void *);
#define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#define lockdep_assert_none_held_once() do { } while (0) #define lockdep_assert_none_held_once() do { } while (0)
#define lockdep_assert_softirq_will_run() do { } while (0)
#define lockdep_recursing(tsk) (0) #define lockdep_recursing(tsk) (0)
......
...@@ -4265,6 +4265,9 @@ static inline void ____napi_schedule(struct softnet_data *sd, ...@@ -4265,6 +4265,9 @@ static inline void ____napi_schedule(struct softnet_data *sd,
{ {
struct task_struct *thread; struct task_struct *thread;
lockdep_assert_softirq_will_run();
lockdep_assert_irqs_disabled();
if (test_bit(NAPI_STATE_THREADED, &napi->state)) { if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
/* Paired with smp_mb__before_atomic() in /* Paired with smp_mb__before_atomic() in
* napi_enable()/dev_set_threaded(). * napi_enable()/dev_set_threaded().
...@@ -4872,7 +4875,7 @@ int __netif_rx(struct sk_buff *skb) ...@@ -4872,7 +4875,7 @@ int __netif_rx(struct sk_buff *skb)
{ {
int ret; int ret;
lockdep_assert_once(hardirq_count() | softirq_count()); lockdep_assert_softirq_will_run();
trace_netif_rx_entry(skb); trace_netif_rx_entry(skb);
ret = netif_rx_internal(skb); ret = netif_rx_internal(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment