Commit 670bcd79 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - One important fix for a bug in the way we find the cache-line size
   from the device tree, which was leading to the wrong size being
   reported to userspace on some platforms.

 - A fix for 8xx STRICT_KERNEL_RWX which was leaving TLB entries around
   leading to a window at boot when the strict mapping wasn't enforced.

 - A fix to enable our KUAP (kernel user access prevention) debugging on
   PPC32.

 - A build fix for clang in lib/mpi.

Thanks to: Chris Packham, Christophe Leroy, Nathan Chancellor, Qian Cai.

* tag 'powerpc-5.7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  lib/mpi: Fix building for powerpc with clang
  powerpc/mm: Fix CONFIG_PPC_KUAP_DEBUG on PPC32
  powerpc/8xx: Fix STRICT_KERNEL_RWX startup test failure
  powerpc/setup_64: Set cache-line-size based on cache-block-size
parents 58792882 5990cdee
...@@ -732,7 +732,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE) ...@@ -732,7 +732,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
stw r10,_CCR(r1) stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */ stw r1,KSP(r3) /* Set old stack pointer */
kuap_check r2, r4 kuap_check r2, r0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the /* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all * previous task gets rescheduled on another CPU, it sees all
......
...@@ -534,6 +534,8 @@ static bool __init parse_cache_info(struct device_node *np, ...@@ -534,6 +534,8 @@ static bool __init parse_cache_info(struct device_node *np,
lsizep = of_get_property(np, propnames[3], NULL); lsizep = of_get_property(np, propnames[3], NULL);
if (bsizep == NULL) if (bsizep == NULL)
bsizep = lsizep; bsizep = lsizep;
if (lsizep == NULL)
lsizep = bsizep;
if (lsizep != NULL) if (lsizep != NULL)
lsize = be32_to_cpu(*lsizep); lsize = be32_to_cpu(*lsizep);
if (bsizep != NULL) if (bsizep != NULL)
......
...@@ -185,6 +185,7 @@ void mmu_mark_initmem_nx(void) ...@@ -185,6 +185,7 @@ void mmu_mark_initmem_nx(void)
mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL); mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
} }
} }
_tlbil_all();
} }
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
...@@ -199,6 +200,8 @@ void mmu_mark_rodata_ro(void) ...@@ -199,6 +200,8 @@ void mmu_mark_rodata_ro(void)
~(LARGE_PAGE_SIZE_8M - 1))); ~(LARGE_PAGE_SIZE_8M - 1)));
mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext)); mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
_tlbil_all();
/* Update page tables for PTDUMP and BDI */ /* Update page tables for PTDUMP and BDI */
mmu_mapin_ram_chunk(0, sinittext, __pgprot(0)); mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX); mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
......
...@@ -397,7 +397,7 @@ config PPC_KUAP ...@@ -397,7 +397,7 @@ config PPC_KUAP
config PPC_KUAP_DEBUG config PPC_KUAP_DEBUG
bool "Extra debugging for Kernel Userspace Access Protection" bool "Extra debugging for Kernel Userspace Access Protection"
depends on PPC_KUAP && (PPC_RADIX_MMU || PPC_32) depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32)
help help
Add extra debugging for Kernel Userspace Access Protection (KUAP) Add extra debugging for Kernel Userspace Access Protection (KUAP)
If you're unsure, say N. If you're unsure, say N.
......
...@@ -722,22 +722,22 @@ do { \ ...@@ -722,22 +722,22 @@ do { \
do { \ do { \
if (__builtin_constant_p(bh) && (bh) == 0) \ if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "%r" ((USItype)(ah)), \ : "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \ "%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \ "rI" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "%r" ((USItype)(ah)), \ : "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \ "%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \ "rI" ((USItype)(bl))); \
else \ else \
__asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "%r" ((USItype)(ah)), \ : "%r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \ "r" ((USItype)(bh)), \
"%r" ((USItype)(al)), \ "%r" ((USItype)(al)), \
...@@ -747,36 +747,36 @@ do { \ ...@@ -747,36 +747,36 @@ do { \
do { \ do { \
if (__builtin_constant_p(ah) && (ah) == 0) \ if (__builtin_constant_p(ah) && (ah) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(bh)), \ : "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \ "r" ((USItype)(bl))); \
else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \ else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(bh)), \ : "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \ "r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == 0) \ else if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(ah)), \ : "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \ "r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(ah)), \ : "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \ "r" ((USItype)(bl))); \
else \ else \
__asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(ah)), \ : "r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \ "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
...@@ -787,7 +787,7 @@ do { \ ...@@ -787,7 +787,7 @@ do { \
do { \ do { \
USItype __m0 = (m0), __m1 = (m1); \ USItype __m0 = (m0), __m1 = (m1); \
__asm__ ("mulhwu %0,%1,%2" \ __asm__ ("mulhwu %0,%1,%2" \
: "=r" ((USItype) ph) \ : "=r" (ph) \
: "%r" (__m0), \ : "%r" (__m0), \
"r" (__m1)); \ "r" (__m1)); \
(pl) = __m0 * __m1; \ (pl) = __m0 * __m1; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment