Commit 2a916f2f authored by David Miller's avatar David Miller Committed by Alexei Starovoitov

bpf: Use migrate_disable/enable in array macros and cgroup/lirc code.

Replace the preemption disable/enable with migrate_disable/enable() to
reflect the actual requirement and to allow PREEMPT_RT to substitute it
with an actual migration disable mechanism which does not disable
preemption.

Including the code paths that go via __bpf_prog_run_save_cb().
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145643.998293311@linutronix.de
parent 02ad0596
...@@ -885,7 +885,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, ...@@ -885,7 +885,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *_prog; \ struct bpf_prog *_prog; \
struct bpf_prog_array *_array; \ struct bpf_prog_array *_array; \
u32 _ret = 1; \ u32 _ret = 1; \
preempt_disable(); \ migrate_disable(); \
rcu_read_lock(); \ rcu_read_lock(); \
_array = rcu_dereference(array); \ _array = rcu_dereference(array); \
if (unlikely(check_non_null && !_array))\ if (unlikely(check_non_null && !_array))\
...@@ -898,7 +898,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, ...@@ -898,7 +898,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
} \ } \
_out: \ _out: \
rcu_read_unlock(); \ rcu_read_unlock(); \
preempt_enable(); \ migrate_enable(); \
_ret; \ _ret; \
}) })
...@@ -932,7 +932,7 @@ _out: \ ...@@ -932,7 +932,7 @@ _out: \
u32 ret; \ u32 ret; \
u32 _ret = 1; \ u32 _ret = 1; \
u32 _cn = 0; \ u32 _cn = 0; \
preempt_disable(); \ migrate_disable(); \
rcu_read_lock(); \ rcu_read_lock(); \
_array = rcu_dereference(array); \ _array = rcu_dereference(array); \
_item = &_array->items[0]; \ _item = &_array->items[0]; \
...@@ -944,7 +944,7 @@ _out: \ ...@@ -944,7 +944,7 @@ _out: \
_item++; \ _item++; \
} \ } \
rcu_read_unlock(); \ rcu_read_unlock(); \
preempt_enable(); \ migrate_enable(); \
if (_ret) \ if (_ret) \
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
else \ else \
......
...@@ -677,6 +677,7 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) ...@@ -677,6 +677,7 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
return qdisc_skb_cb(skb)->data; return qdisc_skb_cb(skb)->data;
} }
/* Must be invoked with migration disabled */
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -702,9 +703,9 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, ...@@ -702,9 +703,9 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
{ {
u32 res; u32 res;
preempt_disable(); migrate_disable();
res = __bpf_prog_run_save_cb(prog, skb); res = __bpf_prog_run_save_cb(prog, skb);
preempt_enable(); migrate_enable();
return res; return res;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment