Commit 1b46b921 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.9-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Vasily Gorbik:

 - Disable preemption trace in percpu macros since the lockdep code
   itself uses percpu variables now and it causes recursions.

 - Fix kernel space 4-level paging broken by recent vmem rework.

* tag 's390-5.9-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/vmem: fix vmem_add_range for 4-level paging
  s390: don't trace preemption in percpu macros
parents c8b5563a bffc2f7a
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
typedef typeof(pcp) pcp_op_T__; \ typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \ pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \ pcp_op_T__ *ptr__; \
preempt_disable(); \ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \ ptr__ = raw_cpu_ptr(&(pcp)); \
prev__ = *ptr__; \ prev__ = *ptr__; \
do { \ do { \
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
new__ = old__ op (val); \ new__ = old__ op (val); \
prev__ = cmpxchg(ptr__, old__, new__); \ prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \ } while (prev__ != old__); \
preempt_enable(); \ preempt_enable_notrace(); \
new__; \ new__; \
}) })
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
typedef typeof(pcp) pcp_op_T__; \ typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \ pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \ pcp_op_T__ old__, *ptr__; \
preempt_disable(); \ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \ ptr__ = raw_cpu_ptr(&(pcp)); \
if (__builtin_constant_p(val__) && \ if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
...@@ -84,7 +84,7 @@ ...@@ -84,7 +84,7 @@
: [val__] "d" (val__) \ : [val__] "d" (val__) \
: "cc"); \ : "cc"); \
} \ } \
preempt_enable(); \ preempt_enable_notrace(); \
} }
#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
...@@ -95,14 +95,14 @@ ...@@ -95,14 +95,14 @@
typedef typeof(pcp) pcp_op_T__; \ typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \ pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \ pcp_op_T__ old__, *ptr__; \
preempt_disable(); \ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \ ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \ asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \ op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \ : [val__] "d" (val__) \
: "cc"); \ : "cc"); \
preempt_enable(); \ preempt_enable_notrace(); \
old__ + val__; \ old__ + val__; \
}) })
...@@ -114,14 +114,14 @@ ...@@ -114,14 +114,14 @@
typedef typeof(pcp) pcp_op_T__; \ typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \ pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \ pcp_op_T__ old__, *ptr__; \
preempt_disable(); \ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \ ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \ asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \ op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \ : [val__] "d" (val__) \
: "cc"); \ : "cc"); \
preempt_enable(); \ preempt_enable_notrace(); \
} }
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
...@@ -136,10 +136,10 @@ ...@@ -136,10 +136,10 @@
typedef typeof(pcp) pcp_op_T__; \ typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \ pcp_op_T__ ret__; \
pcp_op_T__ *ptr__; \ pcp_op_T__ *ptr__; \
preempt_disable(); \ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \ ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = cmpxchg(ptr__, oval, nval); \ ret__ = cmpxchg(ptr__, oval, nval); \
preempt_enable(); \ preempt_enable_notrace(); \
ret__; \ ret__; \
}) })
...@@ -152,10 +152,10 @@ ...@@ -152,10 +152,10 @@
({ \ ({ \
typeof(pcp) *ptr__; \ typeof(pcp) *ptr__; \
typeof(pcp) ret__; \ typeof(pcp) ret__; \
preempt_disable(); \ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \ ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = xchg(ptr__, nval); \ ret__ = xchg(ptr__, nval); \
preempt_enable(); \ preempt_enable_notrace(); \
ret__; \ ret__; \
}) })
...@@ -171,11 +171,11 @@ ...@@ -171,11 +171,11 @@
typeof(pcp1) *p1__; \ typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \ typeof(pcp2) *p2__; \
int ret__; \ int ret__; \
preempt_disable(); \ preempt_disable_notrace(); \
p1__ = raw_cpu_ptr(&(pcp1)); \ p1__ = raw_cpu_ptr(&(pcp1)); \
p2__ = raw_cpu_ptr(&(pcp2)); \ p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
preempt_enable(); \ preempt_enable_notrace(); \
ret__; \ ret__; \
}) })
......
...@@ -402,6 +402,7 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end, ...@@ -402,6 +402,7 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY); pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!pud) if (!pud)
goto out; goto out;
p4d_populate(&init_mm, p4d, pud);
} }
ret = modify_pud_table(p4d, addr, next, add, direct); ret = modify_pud_table(p4d, addr, next, add, direct);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment