Commit 89fa0be0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:

 - Fix double-evaluation of 'pte' macro argument when using 52-bit PAs

 - Fix signedness of some MTE prctl PR_* constants

 - Fix kmemleak memory usage by skipping early pgtable allocations

 - Fix printing of CPU feature register strings

 - Remove redundant -nostdlib linker flag for vDSO binaries

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: pgtable: make __pte_to_phys/__phys_to_pte_val inline functions
  arm64: Track no early_pgtable_alloc() for kmemleak
  arm64: mte: change PR_MTE_TCF_NONE back into an unsigned long
  arm64: vdso: remove -nostdlib compiler flag
  arm64: arm64_ftr_reg->name may not be a human-readable string
parents 3f55f177 c7c386fb
...@@ -32,7 +32,7 @@ pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss; ...@@ -32,7 +32,7 @@ pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
static __init void *kasan_alloc_block(size_t size) static __init void *kasan_alloc_block(size_t size)
{ {
return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, NUMA_NO_NODE); MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE);
} }
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
......
...@@ -67,9 +67,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ...@@ -67,9 +67,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
* page table entry, taking care of 52-bit addresses. * page table entry, taking care of 52-bit addresses.
*/ */
#ifdef CONFIG_ARM64_PA_BITS_52 #ifdef CONFIG_ARM64_PA_BITS_52
#define __pte_to_phys(pte) \ static inline phys_addr_t __pte_to_phys(pte_t pte)
((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36)) {
#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK) return (pte_val(pte) & PTE_ADDR_LOW) |
((pte_val(pte) & PTE_ADDR_HIGH) << 36);
}
static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
{
return (phys | (phys >> 36)) & PTE_ADDR_MASK;
}
#else #else
#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
#define __phys_to_pte_val(phys) (phys) #define __phys_to_pte_val(phys) (phys)
......
...@@ -573,15 +573,19 @@ static const struct arm64_ftr_bits ftr_raz[] = { ...@@ -573,15 +573,19 @@ static const struct arm64_ftr_bits ftr_raz[] = {
ARM64_FTR_END, ARM64_FTR_END,
}; };
#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \ #define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \
.sys_id = id, \ .sys_id = id, \
.reg = &(struct arm64_ftr_reg){ \ .reg = &(struct arm64_ftr_reg){ \
.name = #id, \ .name = id_str, \
.override = (ovr), \ .override = (ovr), \
.ftr_bits = &((table)[0]), \ .ftr_bits = &((table)[0]), \
}} }}
#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override) #define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \
__ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr)
#define ARM64_FTR_REG(id, table) \
__ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override; struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
struct arm64_ftr_override __ro_after_init id_aa64pfr1_override; struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
......
...@@ -23,7 +23,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti ...@@ -23,7 +23,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
# potential future proofing if we end up with internal calls to the exported # potential future proofing if we end up with internal calls to the exported
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
# preparation in build-time C")). # preparation in build-time C")).
ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \
-Bsymbolic --build-id=sha1 -n $(btildflags-y) -T -Bsymbolic --build-id=sha1 -n $(btildflags-y) -T
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
......
...@@ -102,7 +102,7 @@ VDSO_AFLAGS += -D__ASSEMBLY__ ...@@ -102,7 +102,7 @@ VDSO_AFLAGS += -D__ASSEMBLY__
# From arm vDSO Makefile # From arm vDSO Makefile
VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1 VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1
VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096 VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096
VDSO_LDFLAGS += -nostdlib -shared --hash-style=sysv --build-id=sha1 VDSO_LDFLAGS += -shared --hash-style=sysv --build-id=sha1
# Borrow vdsomunge.c from the arm vDSO # Borrow vdsomunge.c from the arm vDSO
......
...@@ -36,7 +36,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) ...@@ -36,7 +36,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{ {
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS), __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, node); MEMBLOCK_ALLOC_NOLEAKTRACE, node);
if (!p) if (!p)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
__func__, PAGE_SIZE, PAGE_SIZE, node, __func__, PAGE_SIZE, PAGE_SIZE, node,
...@@ -49,7 +49,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node) ...@@ -49,7 +49,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
{ {
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS), __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, node); MEMBLOCK_ALLOC_NOLEAKTRACE,
node);
if (!p) if (!p)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
__func__, PAGE_SIZE, PAGE_SIZE, node, __func__, PAGE_SIZE, PAGE_SIZE, node,
......
...@@ -96,7 +96,8 @@ static phys_addr_t __init early_pgtable_alloc(int shift) ...@@ -96,7 +96,8 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
phys_addr_t phys; phys_addr_t phys;
void *ptr; void *ptr;
phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
MEMBLOCK_ALLOC_NOLEAKTRACE);
if (!phys) if (!phys)
panic("Failed to allocate page table page\n"); panic("Failed to allocate page table page\n");
......
...@@ -389,7 +389,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r) ...@@ -389,7 +389,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
/* Flags for memblock allocation APIs */ /* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
#define MEMBLOCK_ALLOC_KASAN 1 #define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */ /* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0 #define MEMBLOCK_LOW_LIMIT 0
......
...@@ -235,7 +235,7 @@ struct prctl_mm_map { ...@@ -235,7 +235,7 @@ struct prctl_mm_map {
#define PR_GET_TAGGED_ADDR_CTRL 56 #define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0) # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
/* MTE tag check fault modes */ /* MTE tag check fault modes */
# define PR_MTE_TCF_NONE 0 # define PR_MTE_TCF_NONE 0UL
# define PR_MTE_TCF_SYNC (1UL << 1) # define PR_MTE_TCF_SYNC (1UL << 1)
# define PR_MTE_TCF_ASYNC (1UL << 2) # define PR_MTE_TCF_ASYNC (1UL << 2)
# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC) # define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
......
...@@ -287,7 +287,7 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, ...@@ -287,7 +287,7 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
{ {
/* pump up @end */ /* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE || if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
end == MEMBLOCK_ALLOC_KASAN) end == MEMBLOCK_ALLOC_NOLEAKTRACE)
end = memblock.current_limit; end = memblock.current_limit;
/* avoid allocating the first page */ /* avoid allocating the first page */
...@@ -1387,8 +1387,11 @@ phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, ...@@ -1387,8 +1387,11 @@ phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
return 0; return 0;
done: done:
/* Skip kmemleak for kasan_init() due to high volume. */ /*
if (end != MEMBLOCK_ALLOC_KASAN) * Skip kmemleak for those places like kasan_init() and
* early_pgtable_alloc() due to high volume.
*/
if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
/* /*
* The min_count is set to 0 so that memblock allocated * The min_count is set to 0 so that memblock allocated
* blocks are never reported as leaks. This is because many * blocks are never reported as leaks. This is because many
......
...@@ -235,7 +235,7 @@ struct prctl_mm_map { ...@@ -235,7 +235,7 @@ struct prctl_mm_map {
#define PR_GET_TAGGED_ADDR_CTRL 56 #define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0) # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
/* MTE tag check fault modes */ /* MTE tag check fault modes */
# define PR_MTE_TCF_NONE 0 # define PR_MTE_TCF_NONE 0UL
# define PR_MTE_TCF_SYNC (1UL << 1) # define PR_MTE_TCF_SYNC (1UL << 1)
# define PR_MTE_TCF_ASYNC (1UL << 2) # define PR_MTE_TCF_ASYNC (1UL << 2)
# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC) # define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment