Commit 671e148d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "A few arm64 fixes that came in during the merge window for -rc1.

  The main thing is restoring the pointer authentication hwcaps, which
  disappeared during some recent refactoring

   - Fix regression in CPU erratum workaround when disabling the MMU

   - Fix detection of pointer authentication hwcaps

   - Avoid writeable, executable ELF sections in vmlinux"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: lds: move .got section out of .text
  arm64: kernel: remove SHF_WRITE|SHF_EXECINSTR from .idmap.text
  arm64: cpufeature: Fix pointer auth hwcaps
  arm64: Fix label placement in record_mmu_state()
parents 611c9d88 0fddb79b
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <asm/virt.h> #include <asm/virt.h>
.text .text
.pushsection .idmap.text, "awx" .pushsection .idmap.text, "a"
/* /*
* cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) * cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2)
......
...@@ -2676,26 +2676,26 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2676,26 +2676,26 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
#ifdef CONFIG_ARM64_PTR_AUTH #ifdef CONFIG_ARM64_PTR_AUTH
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
{ {
ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, APA, PAuth) HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, APA, PAuth)
}, },
{ {
ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, APA3, PAuth) HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, APA3, PAuth)
}, },
{ {
ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, API, PAuth) HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, API, PAuth)
}, },
{}, {},
}; };
static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
{ {
ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPA, IMP) HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPA, IMP)
}, },
{ {
ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, GPA3, IMP) HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, GPA3, IMP)
}, },
{ {
ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPI, IMP) HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPI, IMP)
}, },
{}, {},
}; };
......
...@@ -150,8 +150,8 @@ CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f ) ...@@ -150,8 +150,8 @@ CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f )
pre_disable_mmu_workaround pre_disable_mmu_workaround
msr sctlr_el2, x19 msr sctlr_el2, x19
b 3f b 3f
pre_disable_mmu_workaround 2: pre_disable_mmu_workaround
2: msr sctlr_el1, x19 msr sctlr_el1, x19
3: isb 3: isb
mov x19, xzr mov x19, xzr
ret ret
......
...@@ -97,7 +97,7 @@ SYM_FUNC_START(__cpu_suspend_enter) ...@@ -97,7 +97,7 @@ SYM_FUNC_START(__cpu_suspend_enter)
ret ret
SYM_FUNC_END(__cpu_suspend_enter) SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "a"
SYM_CODE_START(cpu_resume) SYM_CODE_START(cpu_resume)
mov x0, xzr mov x0, xzr
bl init_kernel_el bl init_kernel_el
......
...@@ -181,18 +181,8 @@ SECTIONS ...@@ -181,18 +181,8 @@ SECTIONS
KPROBES_TEXT KPROBES_TEXT
HYPERVISOR_TEXT HYPERVISOR_TEXT
*(.gnu.warning) *(.gnu.warning)
. = ALIGN(16);
*(.got) /* Global offset table */
} }
/*
* Make sure that the .got.plt is either completely empty or it
* contains only the lazy dispatch entries.
*/
.got.plt : { *(.got.plt) }
ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18,
"Unexpected GOT/PLT entries detected!")
. = ALIGN(SEGMENT_ALIGN); . = ALIGN(SEGMENT_ALIGN);
_etext = .; /* End of text section */ _etext = .; /* End of text section */
...@@ -201,6 +191,15 @@ SECTIONS ...@@ -201,6 +191,15 @@ SECTIONS
HYPERVISOR_DATA_SECTIONS HYPERVISOR_DATA_SECTIONS
.got : { *(.got) }
/*
* Make sure that the .got.plt is either completely empty or it
* contains only the lazy dispatch entries.
*/
.got.plt : { *(.got.plt) }
ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18,
"Unexpected GOT/PLT entries detected!")
/* code sections that are never executed via the kernel mapping */ /* code sections that are never executed via the kernel mapping */
.rodata.text : { .rodata.text : {
TRAMP_TEXT TRAMP_TEXT
......
...@@ -167,7 +167,7 @@ alternative_else_nop_endif ...@@ -167,7 +167,7 @@ alternative_else_nop_endif
SYM_FUNC_END(cpu_do_resume) SYM_FUNC_END(cpu_do_resume)
#endif #endif
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "a"
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
adrp \tmp1, reserved_pg_dir adrp \tmp1, reserved_pg_dir
...@@ -201,7 +201,7 @@ SYM_FUNC_END(idmap_cpu_replace_ttbr1) ...@@ -201,7 +201,7 @@ SYM_FUNC_END(idmap_cpu_replace_ttbr1)
#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) #define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "a"
.macro kpti_mk_tbl_ng, type, num_entries .macro kpti_mk_tbl_ng, type, num_entries
add end_\type\()p, cur_\type\()p, #\num_entries * 8 add end_\type\()p, cur_\type\()p, #\num_entries * 8
...@@ -400,7 +400,7 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) ...@@ -400,7 +400,7 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings)
* Output: * Output:
* Return in x0 the value of the SCTLR_EL1 register. * Return in x0 the value of the SCTLR_EL1 register.
*/ */
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "a"
SYM_FUNC_START(__cpu_setup) SYM_FUNC_START(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB tlbi vmalle1 // Invalidate local TLB
dsb nsh dsb nsh
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment