Commit 84bc1993 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Unfortunately, we still have a number of outstanding issues so there
  will be more fixes to come, but this lot are a good start.

   - Fix handling of watchpoints triggered by uaccess routines

   - Fix initialisation of gigantic pages for CMA buffers

   - Raise minimum clang version for BTI to avoid miscompilation

   - Fix data race in SVE vector length configuration code

   - Ensure address tags are ignored in kern_addr_valid()

   - Dump register state on fatal BTI exception

   - kexec_file() cleanup to use struct_size() macro"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: hw_breakpoint: Don't invoke overflow handler on uaccess watchpoints
  arm64: kexec_file: Use struct_size() in kmalloc()
  arm64: mm: reserve hugetlb CMA after numa_init
  arm64: bti: Require clang >= 10.0.1 for in-kernel BTI support
  arm64: sve: Fix build failure when ARM64_SVE=y and SYSCTL=n
  arm64: pgtable: Clear the GP bit for non-executable kernel pages
  arm64: mm: reset address tag set by kasan sw tagging
  arm64: traps: Dump registers prior to panic() in bad_mode()
  arm64/sve: Eliminate data races on sve_default_vl
  docs/arm64: Fix typo'd #define in sve.rst
  arm64: remove TEXT_OFFSET randomization
parents 98b76994 24ebec25
......@@ -186,7 +186,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg)
flags:
PR_SVE_SET_VL_INHERIT
PR_SVE_VL_INHERIT
Inherit the current vector length across execve(). Otherwise, the
vector length is reset to the system default at execve(). (See
......@@ -247,7 +247,7 @@ prctl(PR_SVE_GET_VL)
The following flag may be OR-ed into the result:
PR_SVE_SET_VL_INHERIT
PR_SVE_VL_INHERIT
Vector length will be inherited across execve().
......@@ -393,7 +393,7 @@ The regset data starts with struct user_sve_header, containing:
* At every execve() call, the new vector length of the new process is set to
the system default vector length, unless
* PR_SVE_SET_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the
* PR_SVE_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the
calling thread, or
* a deferred vector length change is pending, established via the
......
......@@ -1630,6 +1630,8 @@ config ARM64_BTI_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
# https://reviews.llvm.org/rGb8ae3fdfa579dbf366b1bb1cbfdbf8c51db7fa55
depends on !CC_IS_CLANG || CLANG_VERSION >= 100001
depends on !(CC_IS_CLANG && GCOV_KERNEL)
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
......
......@@ -8,21 +8,6 @@ config PID_IN_CONTEXTIDR
instructions during context switch. Say Y here only if you are
planning to use hardware trace tools with this kernel.
config ARM64_RANDOMIZE_TEXT_OFFSET
bool "Randomize TEXT_OFFSET at build time"
help
Say Y here if you want the image load offset (AKA TEXT_OFFSET)
of the kernel to be randomized at build-time. When selected,
this option will cause TEXT_OFFSET to be randomized upon any
build of the kernel, and the offset will be reflected in the
text_offset field of the resulting Image. This can be used to
fuzz-test bootloaders which respect text_offset.
This option is intended for bootloader and/or kernel testing
only. Bootloaders must make no assumptions regarding the value
of TEXT_OFFSET and platforms must not require a specific
value.
config DEBUG_EFI
depends on EFI && DEBUG_INFO
bool "UEFI debugging"
......
......@@ -121,13 +121,7 @@ endif
head-y := arch/arm64/kernel/head.o
# The byte offset of the kernel image in RAM from the start of RAM.
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
else
TEXT_OFFSET := 0x0
endif
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
KASAN_SHADOW_SCALE_SHIFT := 4
......
......@@ -416,7 +416,7 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
#define pgprot_nx(prot) \
__pgprot_modify(prot, 0, PTE_PXN)
__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
/*
* Mark the prot value as uncacheable and unbufferable.
......
......@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/cache.h>
#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
......@@ -119,10 +120,20 @@ struct fpsimd_last_state_struct {
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
/* Default VL for tasks that don't set it explicitly: */
static int sve_default_vl = -1;
static int __sve_default_vl = -1;
static int get_sve_default_vl(void)
{
return READ_ONCE(__sve_default_vl);
}
#ifdef CONFIG_ARM64_SVE
static void set_sve_default_vl(int val)
{
WRITE_ONCE(__sve_default_vl, val);
}
/* Maximum supported vector length across all CPUs (initially poisoned) */
int __ro_after_init sve_max_vl = SVE_VL_MIN;
int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
......@@ -338,13 +349,13 @@ static unsigned int find_supported_vector_length(unsigned int vl)
return sve_vl_from_vq(__bit_to_vq(bit));
}
#ifdef CONFIG_SYSCTL
#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
static int sve_proc_do_default_vl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
int vl = sve_default_vl;
int vl = get_sve_default_vl();
struct ctl_table tmp_table = {
.data = &vl,
.maxlen = sizeof(vl),
......@@ -361,7 +372,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
if (!sve_vl_valid(vl))
return -EINVAL;
sve_default_vl = find_supported_vector_length(vl);
set_sve_default_vl(find_supported_vector_length(vl));
return 0;
}
......@@ -383,9 +394,9 @@ static int __init sve_sysctl_init(void)
return 0;
}
#else /* ! CONFIG_SYSCTL */
#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
static int __init sve_sysctl_init(void) { return 0; }
#endif /* ! CONFIG_SYSCTL */
#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
......@@ -868,7 +879,7 @@ void __init sve_setup(void)
* For the default VL, pick the maximum supported value <= 64.
* VL == 64 is guaranteed not to grow the signal frame.
*/
sve_default_vl = find_supported_vector_length(64);
set_sve_default_vl(find_supported_vector_length(64));
bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
SVE_VQ_MAX);
......@@ -889,7 +900,7 @@ void __init sve_setup(void)
pr_info("SVE: maximum available vector length %u bytes per vector\n",
sve_max_vl);
pr_info("SVE: default vector length %u bytes per vector\n",
sve_default_vl);
get_sve_default_vl());
/* KVM decides whether to support mismatched systems. Just warn here: */
if (sve_max_virtualisable_vl < sve_max_vl)
......@@ -1029,13 +1040,13 @@ void fpsimd_flush_thread(void)
* vector length configured: no kernel task can become a user
* task without an exec and hence a call to this function.
* By the time the first call to this function is made, all
* early hardware probing is complete, so sve_default_vl
* early hardware probing is complete, so __sve_default_vl
* should be valid.
* If a bug causes this to go wrong, we make some noise and
* try to fudge thread.sve_vl to a safe value here.
*/
vl = current->thread.sve_vl_onexec ?
current->thread.sve_vl_onexec : sve_default_vl;
current->thread.sve_vl_onexec : get_sve_default_vl();
if (WARN_ON(!sve_vl_valid(vl)))
vl = SVE_VL_MIN;
......
......@@ -730,6 +730,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
return 0;
}
static int watchpoint_report(struct perf_event *wp, unsigned long addr,
struct pt_regs *regs)
{
int step = is_default_overflow_handler(wp);
struct arch_hw_breakpoint *info = counter_arch_bp(wp);
info->trigger = addr;
/*
* If we triggered a user watchpoint from a uaccess routine, then
* handle the stepping ourselves since userspace really can't help
* us with this.
*/
if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
step = 1;
else
perf_bp_event(wp, regs);
return step;
}
static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
......@@ -739,7 +760,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
u64 val;
struct perf_event *wp, **slots;
struct debug_info *debug_info;
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
slots = this_cpu_ptr(wp_on_reg);
......@@ -777,25 +797,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
if (dist != 0)
continue;
info = counter_arch_bp(wp);
info->trigger = addr;
perf_bp_event(wp, regs);
/* Do we need to handle the stepping? */
if (is_default_overflow_handler(wp))
step = 1;
step = watchpoint_report(wp, addr, regs);
}
if (min_dist > 0 && min_dist != -1) {
/* No exact match found. */
wp = slots[closest_match];
info = counter_arch_bp(wp);
info->trigger = addr;
perf_bp_event(wp, regs);
/* Do we need to handle the stepping? */
if (is_default_overflow_handler(wp))
step = 1;
}
/* No exact match found? */
if (min_dist > 0 && min_dist != -1)
step = watchpoint_report(slots[closest_match], addr, regs);
rcu_read_unlock();
if (!step)
......
......@@ -219,8 +219,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
MEMBLOCK_NONE, &start, &end, NULL)
nr_ranges++;
cmem = kmalloc(sizeof(struct crash_mem) +
sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL);
cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
if (!cmem)
return -ENOMEM;
......
......@@ -813,6 +813,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
__show_regs(regs);
local_daif_mask();
panic("bad mode");
}
......
......@@ -404,11 +404,6 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma32_phys_limit);
#ifdef CONFIG_ARM64_4K_PAGES
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
#endif
}
void __init bootmem_init(void)
......@@ -424,6 +419,16 @@ void __init bootmem_init(void)
min_low_pfn = min;
arm64_numa_init();
/*
* must be done after arm64_numa_init() which calls numa_init() to
* initialize node_online_map that gets used in hugetlb_cma_reserve()
* while allocating required CMA size across online nodes.
*/
#ifdef CONFIG_ARM64_4K_PAGES
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
#endif
/*
* Sparsemem tries to allocate bootmem in memory_present(), so must be
* done after the fixed reservations.
......
......@@ -723,6 +723,7 @@ int kern_addr_valid(unsigned long addr)
pmd_t *pmdp, pmd;
pte_t *ptep, pte;
addr = arch_kasan_reset_tag(addr);
if ((((long)addr) >> VA_BITS) != -1UL)
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment