Commit df45da57 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Will Deacon:
 "ACPI:

   - Improve error reporting when failing to manage SDEI on AGDI device
     removal

  Assembly routines:

   - Improve register constraints so that the compiler can make use of
     the zero register instead of moving an immediate #0 into a GPR

   - Allow the compiler to allocate the registers used for CAS
     instructions

  CPU features and system registers:

   - Cleanups to the way in which CPU features are identified from the
     ID register fields

   - Extend system register definition generation to handle Enum types
     when defining shared register fields

   - Generate definitions for new _EL2 registers and add new fields for
     ID_AA64PFR1_EL1

   - Allow SVE to be disabled separately from SME on the kernel
     command-line

  Tracing:

   - Support for "direct calls" in ftrace, which enables BPF tracing for
     arm64

  Kdump:

   - Don't bother unmapping the crashkernel from the linear mapping,
     which then allows us to use huge (block) mappings and reduce TLB
     pressure when a crashkernel is loaded.

  Memory management:

   - Try again to remove data cache invalidation from the coherent DMA
     allocation path

   - Simplify the fixmap code by mapping at page granularity

   - Allow the kfence pool to be allocated early, preventing the rest of
     the linear mapping from being forced to page granularity

  Perf and PMU:

   - Move CPU PMU code out to drivers/perf/ where it can be reused by
     the 32-bit ARM architecture when running on ARMv8 CPUs

   - Fix race between CPU PMU probing and pKVM host de-privilege

   - Add support for Apple M2 CPU PMU

   - Adjust the generic PERF_COUNT_HW_BRANCH_INSTRUCTIONS event
     dynamically, depending on what the CPU actually supports

   - Minor fixes and cleanups to system PMU drivers

  Stack tracing:

   - Use the XPACLRI instruction to strip PAC from pointers, rather than
     rolling our own function in C

   - Remove redundant PAC removal for toolchains that handle this in
     their builtins

   - Make backtracing more resilient in the face of instrumentation

  Miscellaneous:

   - Fix single-step with KGDB

   - Remove harmless warning when 'nokaslr' is passed on the kernel
     command-line

   - Minor fixes and cleanups across the board"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (72 commits)
  KVM: arm64: Ensure CPU PMU probes before pKVM host de-privilege
  arm64: kexec: include reboot.h
  arm64: delete dead code in this_cpu_set_vectors()
  arm64/cpufeature: Use helper macro to specify ID register for capabilites
  drivers/perf: hisi: add NULL check for name
  drivers/perf: hisi: Remove redundant initialized of pmu->name
  arm64/cpufeature: Consistently use symbolic constants for min_field_value
  arm64/cpufeature: Pull out helper for CPUID register definitions
  arm64/sysreg: Convert HFGITR_EL2 to automatic generation
  ACPI: AGDI: Improve error reporting for problems during .remove()
  arm64: kernel: Fix kernel warning when nokaslr is passed to commandline
  perf/arm-cmn: Fix port detection for CMN-700
  arm64: kgdb: Set PSTATE.SS to 1 to re-enable single-step
  arm64: move PAC masks to <asm/pointer_auth.h>
  arm64: use XPACLRI to strip PAC
  arm64: avoid redundant PAC stripping in __builtin_return_address()
  arm64/sme: Fix some comments of ARM SME
  arm64/signal: Alloc tpidr2 sigframe after checking system_supports_tpidr2()
  arm64/signal: Use system_supports_tpidr2() to check TPIDR2
  arm64/idreg: Don't disable SME when disabling SVE
  ...
parents 53b5e72b eeb3557c
...@@ -20,6 +20,8 @@ properties: ...@@ -20,6 +20,8 @@ properties:
items: items:
- enum: - enum:
- apm,potenza-pmu - apm,potenza-pmu
- apple,avalanche-pmu
- apple,blizzard-pmu
- apple,firestorm-pmu - apple,firestorm-pmu
- apple,icestorm-pmu - apple,icestorm-pmu
- arm,armv8-pmuv3 # Only for s/w models - arm,armv8-pmuv3 # Only for s/w models
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2012 ARM Ltd.
*/
#ifndef __ASM_PMUV3_H
#define __ASM_PMUV3_H
#include <asm/cp15.h>
#include <asm/cputype.h>
#define PMCCNTR __ACCESS_CP15_64(0, c9)
#define PMCR __ACCESS_CP15(c9, 0, c12, 0)
#define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1)
#define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2)
#define PMOVSR __ACCESS_CP15(c9, 0, c12, 3)
#define PMSELR __ACCESS_CP15(c9, 0, c12, 5)
#define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6)
#define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7)
#define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1)
#define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2)
#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
#define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
#define PMEVCNTR0 __ACCESS_CP15(c14, 0, c8, 0)
#define PMEVCNTR1 __ACCESS_CP15(c14, 0, c8, 1)
#define PMEVCNTR2 __ACCESS_CP15(c14, 0, c8, 2)
#define PMEVCNTR3 __ACCESS_CP15(c14, 0, c8, 3)
#define PMEVCNTR4 __ACCESS_CP15(c14, 0, c8, 4)
#define PMEVCNTR5 __ACCESS_CP15(c14, 0, c8, 5)
#define PMEVCNTR6 __ACCESS_CP15(c14, 0, c8, 6)
#define PMEVCNTR7 __ACCESS_CP15(c14, 0, c8, 7)
#define PMEVCNTR8 __ACCESS_CP15(c14, 0, c9, 0)
#define PMEVCNTR9 __ACCESS_CP15(c14, 0, c9, 1)
#define PMEVCNTR10 __ACCESS_CP15(c14, 0, c9, 2)
#define PMEVCNTR11 __ACCESS_CP15(c14, 0, c9, 3)
#define PMEVCNTR12 __ACCESS_CP15(c14, 0, c9, 4)
#define PMEVCNTR13 __ACCESS_CP15(c14, 0, c9, 5)
#define PMEVCNTR14 __ACCESS_CP15(c14, 0, c9, 6)
#define PMEVCNTR15 __ACCESS_CP15(c14, 0, c9, 7)
#define PMEVCNTR16 __ACCESS_CP15(c14, 0, c10, 0)
#define PMEVCNTR17 __ACCESS_CP15(c14, 0, c10, 1)
#define PMEVCNTR18 __ACCESS_CP15(c14, 0, c10, 2)
#define PMEVCNTR19 __ACCESS_CP15(c14, 0, c10, 3)
#define PMEVCNTR20 __ACCESS_CP15(c14, 0, c10, 4)
#define PMEVCNTR21 __ACCESS_CP15(c14, 0, c10, 5)
#define PMEVCNTR22 __ACCESS_CP15(c14, 0, c10, 6)
#define PMEVCNTR23 __ACCESS_CP15(c14, 0, c10, 7)
#define PMEVCNTR24 __ACCESS_CP15(c14, 0, c11, 0)
#define PMEVCNTR25 __ACCESS_CP15(c14, 0, c11, 1)
#define PMEVCNTR26 __ACCESS_CP15(c14, 0, c11, 2)
#define PMEVCNTR27 __ACCESS_CP15(c14, 0, c11, 3)
#define PMEVCNTR28 __ACCESS_CP15(c14, 0, c11, 4)
#define PMEVCNTR29 __ACCESS_CP15(c14, 0, c11, 5)
#define PMEVCNTR30 __ACCESS_CP15(c14, 0, c11, 6)
#define PMEVTYPER0 __ACCESS_CP15(c14, 0, c12, 0)
#define PMEVTYPER1 __ACCESS_CP15(c14, 0, c12, 1)
#define PMEVTYPER2 __ACCESS_CP15(c14, 0, c12, 2)
#define PMEVTYPER3 __ACCESS_CP15(c14, 0, c12, 3)
#define PMEVTYPER4 __ACCESS_CP15(c14, 0, c12, 4)
#define PMEVTYPER5 __ACCESS_CP15(c14, 0, c12, 5)
#define PMEVTYPER6 __ACCESS_CP15(c14, 0, c12, 6)
#define PMEVTYPER7 __ACCESS_CP15(c14, 0, c12, 7)
#define PMEVTYPER8 __ACCESS_CP15(c14, 0, c13, 0)
#define PMEVTYPER9 __ACCESS_CP15(c14, 0, c13, 1)
#define PMEVTYPER10 __ACCESS_CP15(c14, 0, c13, 2)
#define PMEVTYPER11 __ACCESS_CP15(c14, 0, c13, 3)
#define PMEVTYPER12 __ACCESS_CP15(c14, 0, c13, 4)
#define PMEVTYPER13 __ACCESS_CP15(c14, 0, c13, 5)
#define PMEVTYPER14 __ACCESS_CP15(c14, 0, c13, 6)
#define PMEVTYPER15 __ACCESS_CP15(c14, 0, c13, 7)
#define PMEVTYPER16 __ACCESS_CP15(c14, 0, c14, 0)
#define PMEVTYPER17 __ACCESS_CP15(c14, 0, c14, 1)
#define PMEVTYPER18 __ACCESS_CP15(c14, 0, c14, 2)
#define PMEVTYPER19 __ACCESS_CP15(c14, 0, c14, 3)
#define PMEVTYPER20 __ACCESS_CP15(c14, 0, c14, 4)
#define PMEVTYPER21 __ACCESS_CP15(c14, 0, c14, 5)
#define PMEVTYPER22 __ACCESS_CP15(c14, 0, c14, 6)
#define PMEVTYPER23 __ACCESS_CP15(c14, 0, c14, 7)
#define PMEVTYPER24 __ACCESS_CP15(c14, 0, c15, 0)
#define PMEVTYPER25 __ACCESS_CP15(c14, 0, c15, 1)
#define PMEVTYPER26 __ACCESS_CP15(c14, 0, c15, 2)
#define PMEVTYPER27 __ACCESS_CP15(c14, 0, c15, 3)
#define PMEVTYPER28 __ACCESS_CP15(c14, 0, c15, 4)
#define PMEVTYPER29 __ACCESS_CP15(c14, 0, c15, 5)
#define PMEVTYPER30 __ACCESS_CP15(c14, 0, c15, 6)
#define RETURN_READ_PMEVCNTRN(n) \
return read_sysreg(PMEVCNTR##n)
static unsigned long read_pmevcntrn(int n)
{
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
return 0;
}
#define WRITE_PMEVCNTRN(n) \
write_sysreg(val, PMEVCNTR##n)
static void write_pmevcntrn(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
}
#define WRITE_PMEVTYPERN(n) \
write_sysreg(val, PMEVTYPER##n)
static void write_pmevtypern(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
}
static inline unsigned long read_pmmir(void)
{
return read_sysreg(PMMIR);
}
static inline u32 read_pmuver(void)
{
/* PMUVers is not a signed field */
u32 dfr0 = read_cpuid_ext(CPUID_EXT_DFR0);
return (dfr0 >> 24) & 0xf;
}
static inline void write_pmcr(u32 val)
{
write_sysreg(val, PMCR);
}
static inline u32 read_pmcr(void)
{
return read_sysreg(PMCR);
}
static inline void write_pmselr(u32 val)
{
write_sysreg(val, PMSELR);
}
static inline void write_pmccntr(u64 val)
{
write_sysreg(val, PMCCNTR);
}
static inline u64 read_pmccntr(void)
{
return read_sysreg(PMCCNTR);
}
static inline void write_pmxevcntr(u32 val)
{
write_sysreg(val, PMXEVCNTR);
}
static inline u32 read_pmxevcntr(void)
{
return read_sysreg(PMXEVCNTR);
}
static inline void write_pmxevtyper(u32 val)
{
write_sysreg(val, PMXEVTYPER);
}
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, PMCNTENSET);
}
static inline void write_pmcntenclr(u32 val)
{
write_sysreg(val, PMCNTENCLR);
}
static inline void write_pmintenset(u32 val)
{
write_sysreg(val, PMINTENSET);
}
static inline void write_pmintenclr(u32 val)
{
write_sysreg(val, PMINTENCLR);
}
static inline void write_pmccfiltr(u32 val)
{
write_sysreg(val, PMCCFILTR);
}
static inline void write_pmovsclr(u32 val)
{
write_sysreg(val, PMOVSR);
}
static inline u32 read_pmovsclr(void)
{
return read_sysreg(PMOVSR);
}
static inline void write_pmuserenr(u32 val)
{
write_sysreg(val, PMUSERENR);
}
static inline u32 read_pmceid0(void)
{
return read_sysreg(PMCEID0);
}
static inline u32 read_pmceid1(void)
{
return read_sysreg(PMCEID1);
}
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {}
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
{
return false;
}
/* PMU Version in DFR Register */
#define ARMV8_PMU_DFR_VER_NI 0
#define ARMV8_PMU_DFR_VER_V3P4 0x5
#define ARMV8_PMU_DFR_VER_V3P5 0x6
#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
static inline bool pmuv3_implemented(int pmuver)
{
return !(pmuver == ARMV8_PMU_DFR_VER_IMP_DEF ||
pmuver == ARMV8_PMU_DFR_VER_NI);
}
static inline bool is_pmuv3p4(int pmuver)
{
return pmuver >= ARMV8_PMU_DFR_VER_V3P4;
}
static inline bool is_pmuv3p5(int pmuver)
{
return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
}
#endif
...@@ -403,7 +403,7 @@ config CPU_V6K ...@@ -403,7 +403,7 @@ config CPU_V6K
select CPU_THUMB_CAPABLE select CPU_THUMB_CAPABLE
select CPU_TLB_V6 if MMU select CPU_TLB_V6 if MMU
# ARMv7 # ARMv7 and ARMv8 architectures
config CPU_V7 config CPU_V7
bool bool
select CPU_32v6K select CPU_32v6K
......
...@@ -186,6 +186,10 @@ config ARM64 ...@@ -186,6 +186,10 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
if $(cc-option,-fpatchable-function-entry=2)
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \
if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS
select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \ select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \
if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \ if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \
!CC_OPTIMIZE_FOR_SIZE) !CC_OPTIMIZE_FOR_SIZE)
...@@ -363,6 +367,20 @@ config ARCH_PROC_KCORE_TEXT ...@@ -363,6 +367,20 @@ config ARCH_PROC_KCORE_TEXT
config BROKEN_GAS_INST config BROKEN_GAS_INST
def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n) def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n)
config BUILTIN_RETURN_ADDRESS_STRIPS_PAC
bool
# Clang's __builtin_return_adddress() strips the PAC since 12.0.0
# https://reviews.llvm.org/D75044
default y if CC_IS_CLANG && (CLANG_VERSION >= 120000)
# GCC's __builtin_return_address() strips the PAC since 11.1.0,
# and this was backported to 10.2.0, 9.4.0, 8.5.0, but not earlier
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94891
default y if CC_IS_GCC && (GCC_VERSION >= 110100)
default y if CC_IS_GCC && (GCC_VERSION >= 100200) && (GCC_VERSION < 110000)
default y if CC_IS_GCC && (GCC_VERSION >= 90400) && (GCC_VERSION < 100000)
default y if CC_IS_GCC && (GCC_VERSION >= 80500) && (GCC_VERSION < 90000)
default n
config KASAN_SHADOW_OFFSET config KASAN_SHADOW_OFFSET
hex hex
depends on KASAN_GENERIC || KASAN_SW_TAGS depends on KASAN_GENERIC || KASAN_SW_TAGS
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2012 ARM Ltd.
*/
#ifndef __ASM_PMUV3_H
#define __ASM_PMUV3_H
#include <linux/kvm_host.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#define RETURN_READ_PMEVCNTRN(n) \
return read_sysreg(pmevcntr##n##_el0)
static unsigned long read_pmevcntrn(int n)
{
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
return 0;
}
#define WRITE_PMEVCNTRN(n) \
write_sysreg(val, pmevcntr##n##_el0)
static void write_pmevcntrn(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
}
#define WRITE_PMEVTYPERN(n) \
write_sysreg(val, pmevtyper##n##_el0)
static void write_pmevtypern(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
}
static inline unsigned long read_pmmir(void)
{
return read_cpuid(PMMIR_EL1);
}
static inline u32 read_pmuver(void)
{
u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
return cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_EL1_PMUVer_SHIFT);
}
static inline void write_pmcr(u32 val)
{
write_sysreg(val, pmcr_el0);
}
static inline u32 read_pmcr(void)
{
return read_sysreg(pmcr_el0);
}
static inline void write_pmselr(u32 val)
{
write_sysreg(val, pmselr_el0);
}
static inline void write_pmccntr(u64 val)
{
write_sysreg(val, pmccntr_el0);
}
static inline u64 read_pmccntr(void)
{
return read_sysreg(pmccntr_el0);
}
static inline void write_pmxevcntr(u32 val)
{
write_sysreg(val, pmxevcntr_el0);
}
static inline u32 read_pmxevcntr(void)
{
return read_sysreg(pmxevcntr_el0);
}
static inline void write_pmxevtyper(u32 val)
{
write_sysreg(val, pmxevtyper_el0);
}
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, pmcntenset_el0);
}
static inline void write_pmcntenclr(u32 val)
{
write_sysreg(val, pmcntenclr_el0);
}
static inline void write_pmintenset(u32 val)
{
write_sysreg(val, pmintenset_el1);
}
static inline void write_pmintenclr(u32 val)
{
write_sysreg(val, pmintenclr_el1);
}
static inline void write_pmccfiltr(u32 val)
{
write_sysreg(val, pmccfiltr_el0);
}
static inline void write_pmovsclr(u32 val)
{
write_sysreg(val, pmovsclr_el0);
}
static inline u32 read_pmovsclr(void)
{
return read_sysreg(pmovsclr_el0);
}
static inline void write_pmuserenr(u32 val)
{
write_sysreg(val, pmuserenr_el0);
}
static inline u32 read_pmceid0(void)
{
return read_sysreg(pmceid0_el0);
}
static inline u32 read_pmceid1(void)
{
return read_sysreg(pmceid1_el0);
}
static inline bool pmuv3_implemented(int pmuver)
{
return !(pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
pmuver == ID_AA64DFR0_EL1_PMUVer_NI);
}
static inline bool is_pmuv3p4(int pmuver)
{
return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4;
}
static inline bool is_pmuv3p5(int pmuver)
{
return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;
}
#endif
...@@ -251,22 +251,15 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ ...@@ -251,22 +251,15 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
u##sz old, \ u##sz old, \
u##sz new) \ u##sz new) \
{ \ { \
register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
register u##sz x1 asm ("x1") = old; \
register u##sz x2 asm ("x2") = new; \
unsigned long tmp; \
\
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \ __LSE_PREAMBLE \
" mov %" #w "[tmp], %" #w "[old]\n" \ " cas" #mb #sfx " %" #w "[old], %" #w "[new], %[v]\n" \
" cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ : [v] "+Q" (*(u##sz *)ptr), \
" mov %" #w "[ret], %" #w "[tmp]" \ [old] "+r" (old) \
: [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr), \ : [new] "rZ" (new) \
[tmp] "=&r" (tmp) \
: [old] "r" (x1), [new] "r" (x2) \
: cl); \ : cl); \
\ \
return x0; \ return old; \
} }
__CMPXCHG_CASE(w, b, , 8, ) __CMPXCHG_CASE(w, b, , 8, )
......
...@@ -131,25 +131,25 @@ do { \ ...@@ -131,25 +131,25 @@ do { \
case 1: \ case 1: \
asm volatile ("stlrb %w1, %0" \ asm volatile ("stlrb %w1, %0" \
: "=Q" (*__p) \ : "=Q" (*__p) \
: "r" (*(__u8 *)__u.__c) \ : "rZ" (*(__u8 *)__u.__c) \
: "memory"); \ : "memory"); \
break; \ break; \
case 2: \ case 2: \
asm volatile ("stlrh %w1, %0" \ asm volatile ("stlrh %w1, %0" \
: "=Q" (*__p) \ : "=Q" (*__p) \
: "r" (*(__u16 *)__u.__c) \ : "rZ" (*(__u16 *)__u.__c) \
: "memory"); \ : "memory"); \
break; \ break; \
case 4: \ case 4: \
asm volatile ("stlr %w1, %0" \ asm volatile ("stlr %w1, %0" \
: "=Q" (*__p) \ : "=Q" (*__p) \
: "r" (*(__u32 *)__u.__c) \ : "rZ" (*(__u32 *)__u.__c) \
: "memory"); \ : "memory"); \
break; \ break; \
case 8: \ case 8: \
asm volatile ("stlr %1, %0" \ asm volatile ("stlr %x1, %0" \
: "=Q" (*__p) \ : "=Q" (*__p) \
: "r" (*(__u64 *)__u.__c) \ : "rZ" (*(__u64 *)__u.__c) \
: "memory"); \ : "memory"); \
break; \ break; \
} \ } \
......
...@@ -83,10 +83,6 @@ struct compat_statfs { ...@@ -83,10 +83,6 @@ struct compat_statfs {
int f_spare[4]; int f_spare[4];
}; };
#define COMPAT_RLIM_INFINITY 0xffffffff
#define COMPAT_OFF_T_MAX 0x7fffffff
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
#define COMPAT_MINSIGSTKSZ 2048 #define COMPAT_MINSIGSTKSZ 2048
......
...@@ -8,19 +8,33 @@ ...@@ -8,19 +8,33 @@
#define ARM64_ASM_PREAMBLE #define ARM64_ASM_PREAMBLE
#endif #endif
/* #define xpaclri(ptr) \
* The EL0/EL1 pointer bits used by a pointer authentication code. ({ \
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. register unsigned long __xpaclri_ptr asm("x30") = (ptr); \
*/ \
#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) asm( \
#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) ARM64_ASM_PREAMBLE \
" hint #7\n" \
: "+r" (__xpaclri_ptr)); \
\
__xpaclri_ptr; \
})
/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */ #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
#define ptrauth_clear_pac(ptr) \ #define ptrauth_strip_kernel_insn_pac(ptr) xpaclri(ptr)
((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \ #else
(ptr & ~ptrauth_user_pac_mask())) #define ptrauth_strip_kernel_insn_pac(ptr) (ptr)
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
#define ptrauth_strip_user_insn_pac(ptr) xpaclri(ptr)
#else
#define ptrauth_strip_user_insn_pac(ptr) (ptr)
#endif
#if !defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC)
#define __builtin_return_address(val) \ #define __builtin_return_address(val) \
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val))) (void *)(ptrauth_strip_kernel_insn_pac((unsigned long)__builtin_return_address(val)))
#endif
#endif /* __ASM_COMPILER_H */ #endif /* __ASM_COMPILER_H */
...@@ -104,6 +104,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs, ...@@ -104,6 +104,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs,
void kernel_enable_single_step(struct pt_regs *regs); void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void); void kernel_disable_single_step(void);
int kernel_active_single_step(void); int kernel_active_single_step(void);
void kernel_rewind_single_step(struct pt_regs *regs);
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
int reinstall_suspended_bps(struct pt_regs *regs); int reinstall_suspended_bps(struct pt_regs *regs);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/math.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -36,17 +37,13 @@ enum fixed_addresses { ...@@ -36,17 +37,13 @@ enum fixed_addresses {
FIX_HOLE, FIX_HOLE,
/* /*
* Reserve a virtual window for the FDT that is 2 MB larger than the * Reserve a virtual window for the FDT that is a page bigger than the
* maximum supported size, and put it at the top of the fixmap region. * maximum supported size. The additional space ensures that any FDT
* The additional space ensures that any FDT that does not exceed * that does not exceed MAX_FDT_SIZE can be mapped regardless of
* MAX_FDT_SIZE can be mapped regardless of whether it crosses any * whether it crosses any page boundary.
* 2 MB alignment boundaries.
*
* Keep this at the top so it remains 2 MB aligned.
*/ */
#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
FIX_FDT_END, FIX_FDT_END,
FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE) + 1,
FIX_EARLYCON_MEM_BASE, FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0, FIX_TEXT_POKE0,
...@@ -97,10 +94,13 @@ enum fixed_addresses { ...@@ -97,10 +94,13 @@ enum fixed_addresses {
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE)
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE) #define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
void __init early_fixmap_init(void); void __init early_fixmap_init(void);
void __init fixmap_copy(pgd_t *pgdir);
#define __early_set_fixmap __set_fixmap #define __early_set_fixmap __set_fixmap
......
...@@ -70,10 +70,19 @@ struct ftrace_ops; ...@@ -70,10 +70,19 @@ struct ftrace_ops;
#define arch_ftrace_get_regs(regs) NULL #define arch_ftrace_get_regs(regs) NULL
/*
* Note: sizeof(struct ftrace_regs) must be a multiple of 16 to ensure correct
* stack alignment
*/
struct ftrace_regs { struct ftrace_regs {
/* x0 - x8 */ /* x0 - x8 */
unsigned long regs[9]; unsigned long regs[9];
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
unsigned long direct_tramp;
#else
unsigned long __unused; unsigned long __unused;
#endif
unsigned long fp; unsigned long fp;
unsigned long lr; unsigned long lr;
...@@ -136,6 +145,19 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); ...@@ -136,6 +145,19 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs); struct ftrace_ops *op, struct ftrace_regs *fregs);
#define ftrace_graph_func ftrace_graph_func #define ftrace_graph_func ftrace_graph_func
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
unsigned long addr)
{
/*
* The ftrace trampoline will return to this address instead of the
* instrumented function.
*/
fregs->direct_tramp = addr;
}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#endif #endif
#define ftrace_return_address(n) return_address(n) #define ftrace_return_address(n) return_address(n)
......
...@@ -59,8 +59,11 @@ ...@@ -59,8 +59,11 @@
#define EARLY_KASLR (0) #define EARLY_KASLR (0)
#endif #endif
#define SPAN_NR_ENTRIES(vstart, vend, shift) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
#define EARLY_ENTRIES(vstart, vend, shift, add) \ #define EARLY_ENTRIES(vstart, vend, shift, add) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add) (SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add)) #define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
......
...@@ -102,12 +102,6 @@ void cpu_soft_restart(unsigned long el2_switch, unsigned long entry, ...@@ -102,12 +102,6 @@ void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
int machine_kexec_post_load(struct kimage *image); int machine_kexec_post_load(struct kimage *image);
#define machine_kexec_post_load machine_kexec_post_load #define machine_kexec_post_load machine_kexec_post_load
void arch_kexec_protect_crashkres(void);
#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#endif #endif
#define ARCH_HAS_KIMAGE_ARCH #define ARCH_HAS_KIMAGE_ARCH
......
...@@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) ...@@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true; return true;
} }
#ifdef CONFIG_KFENCE
extern bool kfence_early_init;
static inline bool arm64_kfence_can_set_direct_map(void)
{
return !kfence_early_init;
}
#else /* CONFIG_KFENCE */
static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
#endif /* CONFIG_KFENCE */
#endif /* __ASM_KFENCE_H */ #endif /* __ASM_KFENCE_H */
...@@ -374,11 +374,6 @@ static inline void *phys_to_virt(phys_addr_t x) ...@@ -374,11 +374,6 @@ static inline void *phys_to_virt(phys_addr_t x)
}) })
void dump_mem_limit(void); void dump_mem_limit(void);
static inline bool defer_reserve_crashkernel(void)
{
return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32);
}
#endif /* !ASSEMBLY */ #endif /* !ASSEMBLY */
/* /*
......
...@@ -65,6 +65,8 @@ extern void paging_init(void); ...@@ -65,6 +65,8 @@ extern void paging_init(void);
extern void bootmem_init(void); extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void); extern void init_mem_pgprot(void);
extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size, unsigned long virt, phys_addr_t size,
pgprot_t prot, bool page_mappings_only); pgprot_t prot, bool page_mappings_only);
......
This diff is collapsed.
...@@ -10,6 +10,13 @@ ...@@ -10,6 +10,13 @@
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
/*
* The EL0/EL1 pointer bits used by a pointer authentication code.
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
*/
#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
#define PR_PAC_ENABLED_KEYS_MASK \ #define PR_PAC_ENABLED_KEYS_MASK \
(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
...@@ -97,11 +104,6 @@ extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys, ...@@ -97,11 +104,6 @@ extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
unsigned long enabled); unsigned long enabled);
extern int ptrauth_get_enabled_keys(struct task_struct *tsk); extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
{
return ptrauth_clear_pac(ptr);
}
static __always_inline void ptrauth_enable(void) static __always_inline void ptrauth_enable(void)
{ {
if (!system_supports_address_auth()) if (!system_supports_address_auth())
...@@ -133,7 +135,6 @@ static __always_inline void ptrauth_enable(void) ...@@ -133,7 +135,6 @@ static __always_inline void ptrauth_enable(void)
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL) #define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL)
#define ptrauth_get_enabled_keys(tsk) (-EINVAL) #define ptrauth_get_enabled_keys(tsk) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_suspend_exit() #define ptrauth_suspend_exit()
#define ptrauth_thread_init_user() #define ptrauth_thread_init_user()
#define ptrauth_thread_switch_user(tsk) #define ptrauth_thread_switch_user(tsk)
......
...@@ -419,9 +419,6 @@ ...@@ -419,9 +419,6 @@
#define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1) #define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1)
#define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2) #define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2)
#define SYS_HSTR_EL2 sys_reg(3, 4, 1, 1, 3) #define SYS_HSTR_EL2 sys_reg(3, 4, 1, 1, 3)
#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4)
#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_HACR_EL2 sys_reg(3, 4, 1, 1, 7) #define SYS_HACR_EL2 sys_reg(3, 4, 1, 1, 7)
#define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0) #define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0)
...@@ -758,12 +755,6 @@ ...@@ -758,12 +755,6 @@
#define ICH_VTR_TDS_SHIFT 19 #define ICH_VTR_TDS_SHIFT 19
#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT) #define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
/* HFG[WR]TR_EL2 bit definitions */
#define HFGxTR_EL2_nTPIDR2_EL0_SHIFT 55
#define HFGxTR_EL2_nTPIDR2_EL0_MASK BIT_MASK(HFGxTR_EL2_nTPIDR2_EL0_SHIFT)
#define HFGxTR_EL2_nSMPRI_EL1_SHIFT 54
#define HFGxTR_EL2_nSMPRI_EL1_MASK BIT_MASK(HFGxTR_EL2_nSMPRI_EL1_SHIFT)
#define ARM64_FEATURE_FIELD_BITS 4 #define ARM64_FEATURE_FIELD_BITS 4
/* Defined for compatibility only, do not add new users. */ /* Defined for compatibility only, do not add new users. */
......
...@@ -237,7 +237,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) ...@@ -237,7 +237,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
"1: " load " " reg "1, [%2]\n" \ "1: " load " " reg "1, [%2]\n" \
"2:\n" \ "2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \ _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
: "+r" (err), "=&r" (x) \ : "+r" (err), "=r" (x) \
: "r" (addr)) : "r" (addr))
#define __raw_get_mem(ldr, x, ptr, err, type) \ #define __raw_get_mem(ldr, x, ptr, err, type) \
...@@ -327,7 +327,7 @@ do { \ ...@@ -327,7 +327,7 @@ do { \
"2:\n" \ "2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \ _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
: "+r" (err) \ : "+r" (err) \
: "r" (x), "r" (addr)) : "rZ" (x), "r" (addr))
#define __raw_put_mem(str, x, ptr, err, type) \ #define __raw_put_mem(str, x, ptr, err, type) \
do { \ do { \
...@@ -449,8 +449,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count); ...@@ -449,8 +449,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n); extern __must_check long strnlen_user(const char __user *str, long n);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
struct page;
void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
......
...@@ -45,7 +45,6 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o ...@@ -45,7 +45,6 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o
......
...@@ -420,14 +420,14 @@ static DEFINE_MUTEX(insn_emulation_mutex); ...@@ -420,14 +420,14 @@ static DEFINE_MUTEX(insn_emulation_mutex);
static void enable_insn_hw_mode(void *data) static void enable_insn_hw_mode(void *data)
{ {
struct insn_emulation *insn = (struct insn_emulation *)data; struct insn_emulation *insn = data;
if (insn->set_hw_mode) if (insn->set_hw_mode)
insn->set_hw_mode(true); insn->set_hw_mode(true);
} }
static void disable_insn_hw_mode(void *data) static void disable_insn_hw_mode(void *data)
{ {
struct insn_emulation *insn = (struct insn_emulation *)data; struct insn_emulation *insn = data;
if (insn->set_hw_mode) if (insn->set_hw_mode)
insn->set_hw_mode(false); insn->set_hw_mode(false);
} }
......
...@@ -93,6 +93,9 @@ int main(void) ...@@ -93,6 +93,9 @@ int main(void)
DEFINE(FREGS_LR, offsetof(struct ftrace_regs, lr)); DEFINE(FREGS_LR, offsetof(struct ftrace_regs, lr));
DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp)); DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp));
DEFINE(FREGS_PC, offsetof(struct ftrace_regs, pc)); DEFINE(FREGS_PC, offsetof(struct ftrace_regs, pc));
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
DEFINE(FREGS_DIRECT_TRAMP, offsetof(struct ftrace_regs, direct_tramp));
#endif
DEFINE(FREGS_SIZE, sizeof(struct ftrace_regs)); DEFINE(FREGS_SIZE, sizeof(struct ftrace_regs));
BLANK(); BLANK();
#endif #endif
...@@ -197,6 +200,9 @@ int main(void) ...@@ -197,6 +200,9 @@ int main(void)
#endif #endif
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func)); DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func));
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
DEFINE(FTRACE_OPS_DIRECT_CALL, offsetof(struct ftrace_ops, direct_call));
#endif
#endif #endif
return 0; return 0;
} }
This diff is collapsed.
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
#include <asm/pointer_auth.h>
static inline u64 get_tcr_el1_t1sz(void); static inline u64 get_tcr_el1_t1sz(void);
......
...@@ -438,6 +438,11 @@ int kernel_active_single_step(void) ...@@ -438,6 +438,11 @@ int kernel_active_single_step(void)
} }
NOKPROBE_SYMBOL(kernel_active_single_step); NOKPROBE_SYMBOL(kernel_active_single_step);
void kernel_rewind_single_step(struct pt_regs *regs)
{
set_regs_spsr_ss(regs);
}
/* ptrace API */ /* ptrace API */
void user_enable_single_step(struct task_struct *task) void user_enable_single_step(struct task_struct *task)
{ {
......
...@@ -36,6 +36,31 @@ ...@@ -36,6 +36,31 @@
SYM_CODE_START(ftrace_caller) SYM_CODE_START(ftrace_caller)
bti c bti c
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
/*
* The literal pointer to the ops is at an 8-byte aligned boundary
* which is either 12 or 16 bytes before the BL instruction in the call
* site. See ftrace_call_adjust() for details.
*
* Therefore here the LR points at `literal + 16` or `literal + 20`,
* and we can find the address of the literal in either case by
* aligning to an 8-byte boundary and subtracting 16. We do the
* alignment first as this allows us to fold the subtraction into the
* LDR.
*/
bic x11, x30, 0x7
ldr x11, [x11, #-(4 * AARCH64_INSN_SIZE)] // op
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* If the op has a direct call, handle it immediately without
* saving/restoring registers.
*/
ldr x17, [x11, #FTRACE_OPS_DIRECT_CALL] // op->direct_call
cbnz x17, ftrace_caller_direct
#endif
#endif
/* Save original SP */ /* Save original SP */
mov x10, sp mov x10, sp
...@@ -49,6 +74,10 @@ SYM_CODE_START(ftrace_caller) ...@@ -49,6 +74,10 @@ SYM_CODE_START(ftrace_caller)
stp x6, x7, [sp, #FREGS_X6] stp x6, x7, [sp, #FREGS_X6]
str x8, [sp, #FREGS_X8] str x8, [sp, #FREGS_X8]
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
str xzr, [sp, #FREGS_DIRECT_TRAMP]
#endif
/* Save the callsite's FP, LR, SP */ /* Save the callsite's FP, LR, SP */
str x29, [sp, #FREGS_FP] str x29, [sp, #FREGS_FP]
str x9, [sp, #FREGS_LR] str x9, [sp, #FREGS_LR]
...@@ -71,20 +100,7 @@ SYM_CODE_START(ftrace_caller) ...@@ -71,20 +100,7 @@ SYM_CODE_START(ftrace_caller)
mov x3, sp // regs mov x3, sp // regs
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
/* mov x2, x11 // op
* The literal pointer to the ops is at an 8-byte aligned boundary
* which is either 12 or 16 bytes before the BL instruction in the call
* site. See ftrace_call_adjust() for details.
*
* Therefore here the LR points at `literal + 16` or `literal + 20`,
* and we can find the address of the literal in either case by
* aligning to an 8-byte boundary and subtracting 16. We do the
* alignment first as this allows us to fold the subtraction into the
* LDR.
*/
bic x2, x30, 0x7
ldr x2, [x2, #-16] // op
ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func
blr x4 // op->func(ip, parent_ip, op, regs) blr x4 // op->func(ip, parent_ip, op, regs)
...@@ -107,8 +123,15 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) ...@@ -107,8 +123,15 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
ldp x6, x7, [sp, #FREGS_X6] ldp x6, x7, [sp, #FREGS_X6]
ldr x8, [sp, #FREGS_X8] ldr x8, [sp, #FREGS_X8]
/* Restore the callsite's FP, LR, PC */ /* Restore the callsite's FP */
ldr x29, [sp, #FREGS_FP] ldr x29, [sp, #FREGS_FP]
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
ldr x17, [sp, #FREGS_DIRECT_TRAMP]
cbnz x17, ftrace_caller_direct_late
#endif
/* Restore the callsite's LR and PC */
ldr x30, [sp, #FREGS_LR] ldr x30, [sp, #FREGS_LR]
ldr x9, [sp, #FREGS_PC] ldr x9, [sp, #FREGS_PC]
...@@ -116,8 +139,45 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) ...@@ -116,8 +139,45 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
add sp, sp, #FREGS_SIZE + 32 add sp, sp, #FREGS_SIZE + 32
ret x9 ret x9
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
SYM_INNER_LABEL(ftrace_caller_direct_late, SYM_L_LOCAL)
/*
* Head to a direct trampoline in x17 after having run other tracers.
* The ftrace_regs are live, and x0-x8 and FP have been restored. The
* LR, PC, and SP have not been restored.
*/
/*
* Restore the callsite's LR and PC matching the trampoline calling
* convention.
*/
ldr x9, [sp, #FREGS_LR]
ldr x30, [sp, #FREGS_PC]
/* Restore the callsite's SP */
add sp, sp, #FREGS_SIZE + 32
SYM_INNER_LABEL(ftrace_caller_direct, SYM_L_LOCAL)
/*
* Head to a direct trampoline in x17.
*
* We use `BR X17` as this can safely land on a `BTI C` or `PACIASP` in
* the trampoline, and will not unbalance any return stack.
*/
br x17
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
SYM_CODE_END(ftrace_caller) SYM_CODE_END(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
SYM_CODE_START(ftrace_stub_direct_tramp)
bti c
mov x10, x30
mov x30, x9
ret x10
SYM_CODE_END(ftrace_stub_direct_tramp)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
/* /*
......
...@@ -299,7 +299,7 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, ...@@ -299,7 +299,7 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
/* /*
* TIF_SME controls whether a task can use SME without trapping while * TIF_SME controls whether a task can use SME without trapping while
* in userspace, when TIF_SME is set then we must have storage * in userspace, when TIF_SME is set then we must have storage
* alocated in sve_state and sme_state to store the contents of both ZA * allocated in sve_state and sme_state to store the contents of both ZA
* and the SVE registers for both streaming and non-streaming modes. * and the SVE registers for both streaming and non-streaming modes.
* *
* If both SVCR.ZA and SVCR.SM are disabled then at any point we * If both SVCR.ZA and SVCR.SM are disabled then at any point we
...@@ -1477,7 +1477,7 @@ void do_sve_acc(unsigned long esr, struct pt_regs *regs) ...@@ -1477,7 +1477,7 @@ void do_sve_acc(unsigned long esr, struct pt_regs *regs)
* *
* TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state() * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state()
* would have disabled the SME access trap for userspace during * would have disabled the SME access trap for userspace during
* ret_to_user, making an SVE access trap impossible in that case. * ret_to_user, making an SME access trap impossible in that case.
*/ */
void do_sme_acc(unsigned long esr, struct pt_regs *regs) void do_sme_acc(unsigned long esr, struct pt_regs *regs)
{ {
......
...@@ -195,15 +195,22 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ...@@ -195,15 +195,22 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ftrace_modify_code(pc, 0, new, false); return ftrace_modify_code(pc, 0, new, false);
} }
static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) static struct plt_entry *get_ftrace_plt(struct module *mod)
{ {
#ifdef CONFIG_ARM64_MODULE_PLTS #ifdef CONFIG_ARM64_MODULE_PLTS
struct plt_entry *plt = mod->arch.ftrace_trampolines; struct plt_entry *plt = mod->arch.ftrace_trampolines;
if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX]; return &plt[FTRACE_PLT_IDX];
#endif #else
return NULL; return NULL;
#endif
}
static bool reachable_by_bl(unsigned long addr, unsigned long pc)
{
long offset = (long)addr - (long)pc;
return offset >= -SZ_128M && offset < SZ_128M;
} }
/* /*
...@@ -220,14 +227,21 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, ...@@ -220,14 +227,21 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
unsigned long *addr) unsigned long *addr)
{ {
unsigned long pc = rec->ip; unsigned long pc = rec->ip;
long offset = (long)*addr - (long)pc;
struct plt_entry *plt; struct plt_entry *plt;
/*
* If a custom trampoline is unreachable, rely on the ftrace_caller
* trampoline which knows how to indirectly reach that trampoline
* through ops->direct_call.
*/
if (*addr != FTRACE_ADDR && !reachable_by_bl(*addr, pc))
*addr = FTRACE_ADDR;
/* /*
* When the target is within range of the 'BL' instruction, use 'addr' * When the target is within range of the 'BL' instruction, use 'addr'
* as-is and branch to that directly. * as-is and branch to that directly.
*/ */
if (offset >= -SZ_128M && offset < SZ_128M) if (reachable_by_bl(*addr, pc))
return true; return true;
/* /*
...@@ -256,7 +270,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, ...@@ -256,7 +270,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
if (WARN_ON(!mod)) if (WARN_ON(!mod))
return false; return false;
plt = get_ftrace_plt(mod, *addr); plt = get_ftrace_plt(mod);
if (!plt) { if (!plt) {
pr_err("ftrace: no module PLT for %ps\n", (void *)*addr); pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
return false; return false;
...@@ -330,12 +344,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ...@@ -330,12 +344,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr) unsigned long addr)
{ {
if (WARN_ON_ONCE(old_addr != (unsigned long)ftrace_caller)) unsigned long pc = rec->ip;
u32 old, new;
int ret;
ret = ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
if (ret)
return ret;
if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
return -EINVAL; return -EINVAL;
if (WARN_ON_ONCE(addr != (unsigned long)ftrace_caller)) if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL; return -EINVAL;
return ftrace_rec_update_ops(rec); old = aarch64_insn_gen_branch_imm(pc, old_addr,
AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, old, new, true);
} }
#endif #endif
......
...@@ -167,7 +167,7 @@ static const struct { ...@@ -167,7 +167,7 @@ static const struct {
} aliases[] __initconst = { } aliases[] __initconst = {
{ "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" }, { "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" },
{ "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" }, { "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" },
{ "arm64.nosve", "id_aa64pfr0.sve=0 id_aa64pfr1.sme=0" }, { "arm64.nosve", "id_aa64pfr0.sve=0" },
{ "arm64.nosme", "id_aa64pfr1.sme=0" }, { "arm64.nosme", "id_aa64pfr1.sme=0" },
{ "arm64.nobti", "id_aa64pfr1.bt=0" }, { "arm64.nobti", "id_aa64pfr1.bt=0" },
{ "arm64.nopauth", { "arm64.nopauth",
...@@ -178,6 +178,13 @@ static const struct { ...@@ -178,6 +178,13 @@ static const struct {
{ "nokaslr", "kaslr.disabled=1" }, { "nokaslr", "kaslr.disabled=1" },
}; };
static int __init parse_nokaslr(char *unused)
{
/* nokaslr param handling is done by early cpufeature code */
return 0;
}
early_param("nokaslr", parse_nokaslr);
static int __init find_field(const char *cmdline, static int __init find_field(const char *cmdline,
const struct ftr_set_desc *reg, int f, u64 *v) const struct ftr_set_desc *reg, int f, u64 *v)
{ {
......
...@@ -224,6 +224,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, ...@@ -224,6 +224,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
*/ */
if (!kernel_active_single_step()) if (!kernel_active_single_step())
kernel_enable_single_step(linux_regs); kernel_enable_single_step(linux_regs);
else
kernel_rewind_single_step(linux_regs);
err = 0; err = 0;
break; break;
default: default:
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/page-flags.h> #include <linux/page-flags.h>
#include <linux/reboot.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
#include <linux/smp.h> #include <linux/smp.h>
...@@ -102,7 +103,7 @@ static void kexec_segment_flush(const struct kimage *kimage) ...@@ -102,7 +103,7 @@ static void kexec_segment_flush(const struct kimage *kimage)
/* Allocates pages for kexec page table */ /* Allocates pages for kexec page table */
static void *kexec_page_alloc(void *arg) static void *kexec_page_alloc(void *arg)
{ {
struct kimage *kimage = (struct kimage *)arg; struct kimage *kimage = arg;
struct page *page = kimage_alloc_control_pages(kimage, 0); struct page *page = kimage_alloc_control_pages(kimage, 0);
void *vaddr = NULL; void *vaddr = NULL;
...@@ -268,26 +269,6 @@ void machine_crash_shutdown(struct pt_regs *regs) ...@@ -268,26 +269,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
pr_info("Starting crashdump kernel...\n"); pr_info("Starting crashdump kernel...\n");
} }
void arch_kexec_protect_crashkres(void)
{
int i;
for (i = 0; i < kexec_crash_image->nr_segments; i++)
set_memory_valid(
__phys_to_virt(kexec_crash_image->segment[i].mem),
kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
}
void arch_kexec_unprotect_crashkres(void)
{
int i;
for (i = 0; i < kexec_crash_image->nr_segments; i++)
set_memory_valid(
__phys_to_virt(kexec_crash_image->segment[i].mem),
kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
}
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
/* /*
* To preserve the crash dump kernel image, the relevant memory segments * To preserve the crash dump kernel image, the relevant memory segments
......
...@@ -38,7 +38,7 @@ user_backtrace(struct frame_tail __user *tail, ...@@ -38,7 +38,7 @@ user_backtrace(struct frame_tail __user *tail,
if (err) if (err)
return NULL; return NULL;
lr = ptrauth_strip_insn_pac(buftail.lr); lr = ptrauth_strip_user_insn_pac(buftail.lr);
perf_callchain_store(entry, lr); perf_callchain_store(entry, lr);
......
...@@ -217,7 +217,7 @@ void __show_regs(struct pt_regs *regs) ...@@ -217,7 +217,7 @@ void __show_regs(struct pt_regs *regs)
if (!user_mode(regs)) { if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc); printk("pc : %pS\n", (void *)regs->pc);
printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr)); printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr));
} else { } else {
printk("pc : %016llx\n", regs->pc); printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr); printk("lr : %016llx\n", lr);
......
...@@ -966,9 +966,6 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) ...@@ -966,9 +966,6 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
{ {
const char *v = arm64_get_bp_hardening_vector(slot); const char *v = arm64_get_bp_hardening_vector(slot);
if (slot < 0)
return;
__this_cpu_write(this_cpu_vector, v); __this_cpu_write(this_cpu_vector, v);
/* /*
......
...@@ -651,7 +651,7 @@ static int parse_user_sigframe(struct user_ctxs *user, ...@@ -651,7 +651,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
break; break;
case TPIDR2_MAGIC: case TPIDR2_MAGIC:
if (!system_supports_sme()) if (!system_supports_tpidr2())
goto invalid; goto invalid;
if (user->tpidr2) if (user->tpidr2)
...@@ -802,7 +802,7 @@ static int restore_sigframe(struct pt_regs *regs, ...@@ -802,7 +802,7 @@ static int restore_sigframe(struct pt_regs *regs,
err = restore_fpsimd_context(&user); err = restore_fpsimd_context(&user);
} }
if (err == 0 && system_supports_sme() && user.tpidr2) if (err == 0 && system_supports_tpidr2() && user.tpidr2)
err = restore_tpidr2_context(&user); err = restore_tpidr2_context(&user);
if (err == 0 && system_supports_sme() && user.za) if (err == 0 && system_supports_sme() && user.za)
...@@ -893,6 +893,13 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, ...@@ -893,6 +893,13 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
return err; return err;
} }
if (system_supports_tpidr2()) {
err = sigframe_alloc(user, &user->tpidr2_offset,
sizeof(struct tpidr2_context));
if (err)
return err;
}
if (system_supports_sme()) { if (system_supports_sme()) {
unsigned int vl; unsigned int vl;
unsigned int vq = 0; unsigned int vq = 0;
...@@ -902,11 +909,6 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, ...@@ -902,11 +909,6 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
else else
vl = task_get_sme_vl(current); vl = task_get_sme_vl(current);
err = sigframe_alloc(user, &user->tpidr2_offset,
sizeof(struct tpidr2_context));
if (err)
return err;
if (thread_za_enabled(&current->thread)) if (thread_za_enabled(&current->thread))
vq = sve_vq_from_vl(vl); vq = sve_vq_from_vl(vl);
...@@ -974,7 +976,7 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, ...@@ -974,7 +976,7 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
} }
/* TPIDR2 if supported */ /* TPIDR2 if supported */
if (system_supports_sme() && err == 0) { if (system_supports_tpidr2() && err == 0) {
struct tpidr2_context __user *tpidr2_ctx = struct tpidr2_context __user *tpidr2_ctx =
apply_user_offset(user, user->tpidr2_offset); apply_user_offset(user, user->tpidr2_offset);
err |= preserve_tpidr2_context(tpidr2_ctx); err |= preserve_tpidr2_context(tpidr2_ctx);
......
...@@ -25,7 +25,8 @@ ...@@ -25,7 +25,8 @@
* *
* The regs must be on a stack currently owned by the calling task. * The regs must be on a stack currently owned by the calling task.
*/ */
static __always_inline void unwind_init_from_regs(struct unwind_state *state, static __always_inline void
unwind_init_from_regs(struct unwind_state *state,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unwind_init_common(state, current); unwind_init_common(state, current);
...@@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state, ...@@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state,
* *
* The function which invokes this must be noinline. * The function which invokes this must be noinline.
*/ */
static __always_inline void unwind_init_from_caller(struct unwind_state *state) static __always_inline void
unwind_init_from_caller(struct unwind_state *state)
{ {
unwind_init_common(state, current); unwind_init_common(state, current);
...@@ -60,7 +62,8 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state) ...@@ -60,7 +62,8 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state)
* duration of the unwind, or the unwind will be bogus. It is never valid to * duration of the unwind, or the unwind will be bogus. It is never valid to
* call this for the current task. * call this for the current task.
*/ */
static __always_inline void unwind_init_from_task(struct unwind_state *state, static __always_inline void
unwind_init_from_task(struct unwind_state *state,
struct task_struct *task) struct task_struct *task)
{ {
unwind_init_common(state, task); unwind_init_common(state, task);
...@@ -69,6 +72,32 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state, ...@@ -69,6 +72,32 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state,
state->pc = thread_saved_pc(task); state->pc = thread_saved_pc(task);
} }
static __always_inline int
unwind_recover_return_address(struct unwind_state *state)
{
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (state->task->ret_stack &&
(state->pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc,
(void *)state->fp);
if (WARN_ON_ONCE(state->pc == orig_pc))
return -EINVAL;
state->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
if (is_kretprobe_trampoline(state->pc)) {
state->pc = kretprobe_find_ret_addr(state->task,
(void *)state->fp,
&state->kr_cur);
}
#endif /* CONFIG_KRETPROBES */
return 0;
}
/* /*
* Unwind from one frame record (A) to the next frame record (B). * Unwind from one frame record (A) to the next frame record (B).
* *
...@@ -76,7 +105,8 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state, ...@@ -76,7 +105,8 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state,
* records (e.g. a cycle), determined based on the location and fp value of A * records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B. * and the location (but not the fp value) of B.
*/ */
static int notrace unwind_next(struct unwind_state *state) static __always_inline int
unwind_next(struct unwind_state *state)
{ {
struct task_struct *tsk = state->task; struct task_struct *tsk = state->task;
unsigned long fp = state->fp; unsigned long fp = state->fp;
...@@ -90,37 +120,18 @@ static int notrace unwind_next(struct unwind_state *state) ...@@ -90,37 +120,18 @@ static int notrace unwind_next(struct unwind_state *state)
if (err) if (err)
return err; return err;
state->pc = ptrauth_strip_insn_pac(state->pc); state->pc = ptrauth_strip_kernel_insn_pac(state->pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
(state->pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
(void *)state->fp);
if (WARN_ON_ONCE(state->pc == orig_pc))
return -EINVAL;
state->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
if (is_kretprobe_trampoline(state->pc))
state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
#endif
return 0; return unwind_recover_return_address(state);
} }
NOKPROBE_SYMBOL(unwind_next);
static void notrace unwind(struct unwind_state *state, static __always_inline void
stack_trace_consume_fn consume_entry, void *cookie) unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
void *cookie)
{ {
if (unwind_recover_return_address(state))
return;
while (1) { while (1) {
int ret; int ret;
...@@ -131,40 +142,6 @@ static void notrace unwind(struct unwind_state *state, ...@@ -131,40 +142,6 @@ static void notrace unwind(struct unwind_state *state,
break; break;
} }
} }
NOKPROBE_SYMBOL(unwind);
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
char *loglvl = arg;
printk("%s %pSb\n", loglvl, (void *)where);
return true;
}
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl)
{
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (regs && user_mode(regs))
return;
if (!tsk)
tsk = current;
if (!try_get_task_stack(tsk))
return;
printk("%sCall trace:\n", loglvl);
arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
dump_backtrace(NULL, tsk, loglvl);
barrier();
}
/* /*
* Per-cpu stacks are only accessible when unwinding the current task in a * Per-cpu stacks are only accessible when unwinding the current task in a
...@@ -230,3 +207,36 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, ...@@ -230,3 +207,36 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
unwind(&state, consume_entry, cookie); unwind(&state, consume_entry, cookie);
} }
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
char *loglvl = arg;
printk("%s %pSb\n", loglvl, (void *)where);
return true;
}
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl)
{
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (regs && user_mode(regs))
return;
if (!tsk)
tsk = current;
if (!try_get_task_stack(tsk))
return;
printk("%sCall trace:\n", loglvl);
arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
dump_backtrace(NULL, tsk, loglvl);
barrier();
}
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kmemleak.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_irqfd.h> #include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h> #include <linux/irqbypass.h>
...@@ -46,7 +45,6 @@ ...@@ -46,7 +45,6 @@
#include <kvm/arm_psci.h> #include <kvm/arm_psci.h>
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT; static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
...@@ -2130,41 +2128,6 @@ static int __init init_hyp_mode(void) ...@@ -2130,41 +2128,6 @@ static int __init init_hyp_mode(void)
return err; return err;
} }
static void __init _kvm_host_prot_finalize(void *arg)
{
int *err = arg;
if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
WRITE_ONCE(*err, -EINVAL);
}
static int __init pkvm_drop_host_privileges(void)
{
int ret = 0;
/*
* Flip the static key upfront as that may no longer be possible
* once the host stage 2 is installed.
*/
static_branch_enable(&kvm_protected_mode_initialized);
on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
return ret;
}
static int __init finalize_hyp_mode(void)
{
if (!is_protected_kvm_enabled())
return 0;
/*
* Exclude HYP sections from kmemleak so that they don't get peeked
* at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
return pkvm_drop_host_privileges();
}
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -2282,14 +2245,6 @@ static __init int kvm_arm_init(void) ...@@ -2282,14 +2245,6 @@ static __init int kvm_arm_init(void)
if (err) if (err)
goto out_hyp; goto out_hyp;
if (!in_hyp_mode) {
err = finalize_hyp_mode();
if (err) {
kvm_err("Failed to finalize Hyp protection\n");
goto out_subs;
}
}
if (is_protected_kvm_enabled()) { if (is_protected_kvm_enabled()) {
kvm_info("Protected nVHE mode initialized successfully\n"); kvm_info("Protected nVHE mode initialized successfully\n");
} else if (in_hyp_mode) { } else if (in_hyp_mode) {
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* Author: Quentin Perret <qperret@google.com> * Author: Quentin Perret <qperret@google.com>
*/ */
#include <linux/init.h>
#include <linux/kmemleak.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/mutex.h> #include <linux/mutex.h>
...@@ -13,6 +15,8 @@ ...@@ -13,6 +15,8 @@
#include "hyp_constants.h" #include "hyp_constants.h"
DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory); static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr); static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
...@@ -213,3 +217,46 @@ int pkvm_init_host_vm(struct kvm *host_kvm) ...@@ -213,3 +217,46 @@ int pkvm_init_host_vm(struct kvm *host_kvm)
mutex_init(&host_kvm->lock); mutex_init(&host_kvm->lock);
return 0; return 0;
} }
static void __init _kvm_host_prot_finalize(void *arg)
{
int *err = arg;
if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
WRITE_ONCE(*err, -EINVAL);
}
static int __init pkvm_drop_host_privileges(void)
{
int ret = 0;
/*
* Flip the static key upfront as that may no longer be possible
* once the host stage 2 is installed.
*/
static_branch_enable(&kvm_protected_mode_initialized);
on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
return ret;
}
static int __init finalize_pkvm(void)
{
int ret;
if (!is_protected_kvm_enabled())
return 0;
/*
* Exclude HYP sections from kmemleak so that they don't get peeked
* at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
ret = pkvm_drop_host_privileges();
if (ret)
pr_err("Failed to finalize Hyp protection: %d\n", ret);
return ret;
}
device_initcall_sync(finalize_pkvm);
...@@ -19,12 +19,6 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt) ...@@ -19,12 +19,6 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
} }
EXPORT_SYMBOL_GPL(memcpy_flushcache); EXPORT_SYMBOL_GPL(memcpy_flushcache);
void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len)
{
memcpy_flushcache(to, page_address(page) + offset, len);
}
unsigned long __copy_user_flushcache(void *to, const void __user *from, unsigned long __copy_user_flushcache(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
obj-y := dma-mapping.o extable.o fault.o init.o \ obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \ cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \ ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o context.o proc.o pageattr.o fixmap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
......
...@@ -36,22 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -36,22 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
{ {
unsigned long start = (unsigned long)page_address(page); unsigned long start = (unsigned long)page_address(page);
/* dcache_clean_poc(start, start + size);
* The architecture only requires a clean to the PoC here in order to
* meet the requirements of the DMA API. However, some vendors (i.e.
* Qualcomm) abuse the DMA API for transferring buffers from the
* non-secure to the secure world, resetting the system if a non-secure
* access shows up after the buffer has been transferred:
*
* https://lore.kernel.org/r/20221114110329.68413-1-manivannan.sadhasivam@linaro.org
*
* Using clean+invalidate appears to make this issue less likely, but
* the drivers themselves still need fixing as the CPU could issue a
* speculative read from the buffer via the linear mapping irrespective
* of the cache maintenance we use. Once the drivers are fixed, we can
* relax this to a clean operation.
*/
dcache_clean_inval_poc(start, start + size);
} }
#ifdef CONFIG_IOMMU_DMA #ifdef CONFIG_IOMMU_DMA
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Fixmap manipulation code
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#define NR_BM_PTE_TABLES \
SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
#define NR_BM_PMD_TABLES \
SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT)
static_assert(NR_BM_PMD_TABLES == 1);
#define __BM_TABLE_IDX(addr, shift) \
(((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift)))
#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT)
static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss;
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
static inline pte_t *fixmap_pte(unsigned long addr)
{
return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)];
}
static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr)
{
pmd_t pmd = READ_ONCE(*pmdp);
pte_t *ptep;
if (pmd_none(pmd)) {
ptep = bm_pte[BM_PTE_TABLE_IDX(addr)];
__pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE);
}
}
static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr,
unsigned long end)
{
unsigned long next;
pud_t pud = READ_ONCE(*pudp);
pmd_t *pmdp;
if (pud_none(pud))
__pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
pmdp = pmd_offset_kimg(pudp, addr);
do {
next = pmd_addr_end(addr, end);
early_fixmap_init_pte(pmdp, addr);
} while (pmdp++, addr = next, addr != end);
}
static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr,
unsigned long end)
{
p4d_t p4d = READ_ONCE(*p4dp);
pud_t *pudp;
if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) &&
p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) {
/*
* We only end up here if the kernel mapping and the fixmap
* share the top level pgd entry, which should only happen on
* 16k/4 levels configurations.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
}
if (p4d_none(p4d))
__p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
pudp = pud_offset_kimg(p4dp, addr);
early_fixmap_init_pmd(pudp, addr, end);
}
/*
* The p*d_populate functions call virt_to_phys implicitly so they can't be used
* directly on kernel symbols (bm_p*d). This function is called too early to use
* lm_alias so __p*d_populate functions must be used to populate with the
* physical address from __pa_symbol.
*/
void __init early_fixmap_init(void)
{
unsigned long addr = FIXADDR_TOT_START;
unsigned long end = FIXADDR_TOP;
pgd_t *pgdp = pgd_offset_k(addr);
p4d_t *p4dp = p4d_offset(pgdp, addr);
early_fixmap_init_pud(p4dp, addr, end);
}
/*
* Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
* ever need to use IPIs for TLB broadcasting, then we're in trouble here.
*/
void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *ptep;
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = fixmap_pte(addr);
if (pgprot_val(flags)) {
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
} else {
pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
}
}
void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
{
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
phys_addr_t dt_phys_base;
int offset;
void *dt_virt;
/*
* Check whether the physical FDT address is set and meets the minimum
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
* at least 8 bytes so that we can always access the magic and size
* fields of the FDT header after mapping the first chunk, double check
* here if that is indeed the case.
*/
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
return NULL;
dt_phys_base = round_down(dt_phys, PAGE_SIZE);
offset = dt_phys % PAGE_SIZE;
dt_virt = (void *)dt_virt_base + offset;
/* map the first chunk so we can read the size from the header */
create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot);
if (fdt_magic(dt_virt) != FDT_MAGIC)
return NULL;
*size = fdt_totalsize(dt_virt);
if (*size > MAX_FDT_SIZE)
return NULL;
if (offset + *size > PAGE_SIZE) {
create_mapping_noalloc(dt_phys_base, dt_virt_base,
offset + *size, prot);
}
return dt_virt;
}
/*
* Copy the fixmap region into a new pgdir.
*/
void __init fixmap_copy(pgd_t *pgdir)
{
if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) {
/*
* The fixmap falls in a separate pgd to the kernel, and doesn't
* live in the carveout for the swapper_pg_dir. We can simply
* re-use the existing dir for the fixmap.
*/
set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START),
READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START)));
} else if (CONFIG_PGTABLE_LEVELS > 3) {
pgd_t *bm_pgdp;
p4d_t *bm_p4dp;
pud_t *bm_pudp;
/*
* The fixmap shares its top level pgd entry with the kernel
* mapping. This can really only occur when we are running
* with 16k/4 levels, so we can simply reuse the pud level
* entry instead.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START);
bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START);
bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START);
pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
pud_clear_fixmap();
} else {
BUG();
}
}
...@@ -61,34 +61,8 @@ EXPORT_SYMBOL(memstart_addr); ...@@ -61,34 +61,8 @@ EXPORT_SYMBOL(memstart_addr);
* unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
* In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
* otherwise it is empty. * otherwise it is empty.
*
* Memory reservation for crash kernel either done early or deferred
* depending on DMA memory zones configs (ZONE_DMA) --
*
* In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
* here instead of max_zone_phys(). This lets early reservation of
* crash kernel memory which has a dependency on arm64_dma_phys_limit.
* Reserving memory early for crash kernel allows linear creation of block
* mappings (greater than page-granularity) for all the memory bank rangs.
* In this scheme a comparatively quicker boot is observed.
*
* If ZONE_DMA configs are defined, crash kernel memory reservation
* is delayed until DMA zone memory range size initialization performed in
* zone_sizes_init(). The defer is necessary to steer clear of DMA zone
* memory range to avoid overlap allocation. So crash kernel memory boundaries
* are not known when mapping all bank memory ranges, which otherwise means
* not possible to exclude crash kernel range from creating block mappings
* so page-granularity mappings are created for the entire memory range.
* Hence a slightly slower boot is observed.
*
* Note: Page-granularity mappings are necessary for crash kernel memory
* range for shrinking its size via /sys/kernel/kexec_crash_size interface.
*/ */
#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
phys_addr_t __ro_after_init arm64_dma_phys_limit; phys_addr_t __ro_after_init arm64_dma_phys_limit;
#else
phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
#endif
/* Current arm64 boot protocol requires 2MB alignment */ /* Current arm64 boot protocol requires 2MB alignment */
#define CRASH_ALIGN SZ_2M #define CRASH_ALIGN SZ_2M
...@@ -248,6 +222,8 @@ static void __init zone_sizes_init(void) ...@@ -248,6 +222,8 @@ static void __init zone_sizes_init(void)
if (!arm64_dma_phys_limit) if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit; arm64_dma_phys_limit = dma32_phys_limit;
#endif #endif
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = PHYS_MASK + 1;
max_zone_pfns[ZONE_NORMAL] = max_pfn; max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init(max_zone_pfns); free_area_init(max_zone_pfns);
...@@ -408,9 +384,6 @@ void __init arm64_memblock_init(void) ...@@ -408,9 +384,6 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem(); early_init_fdt_scan_reserved_mem();
if (!defer_reserve_crashkernel())
reserve_crashkernel();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1; high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
} }
...@@ -457,7 +430,6 @@ void __init bootmem_init(void) ...@@ -457,7 +430,6 @@ void __init bootmem_init(void)
* request_standard_resources() depends on crashkernel's memory being * request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here. * reserved, so do it here.
*/ */
if (defer_reserve_crashkernel())
reserve_crashkernel(); reserve_crashkernel();
memblock_dump_all(); memblock_dump_all();
......
This diff is collapsed.
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/kfence.h>
struct page_change_data { struct page_change_data {
pgprot_t set_mask; pgprot_t set_mask;
...@@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED ...@@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void) bool can_set_direct_map(void)
{ {
/* /*
* rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be * rodata_full and DEBUG_PAGEALLOC require linear map to be
* mapped at page granularity, so that it is possible to * mapped at page granularity, so that it is possible to
* protect/unprotect single pages. * protect/unprotect single pages.
*
* KFENCE pool requires page-granular mapping if initialized late.
*/ */
return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() || return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE); arm64_kfence_can_set_direct_map();
} }
static int change_page_range(pte_t *ptep, unsigned long addr, void *data) static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
......
...@@ -45,7 +45,7 @@ static struct addr_marker address_markers[] = { ...@@ -45,7 +45,7 @@ static struct addr_marker address_markers[] = {
{ MODULES_END, "Modules end" }, { MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" }, { VMALLOC_START, "vmalloc() area" },
{ VMALLOC_END, "vmalloc() end" }, { VMALLOC_END, "vmalloc() end" },
{ FIXADDR_START, "Fixmap start" }, { FIXADDR_TOT_START, "Fixmap start" },
{ FIXADDR_TOP, "Fixmap end" }, { FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" }, { PCI_IO_START, "PCI I/O start" },
{ PCI_IO_END, "PCI I/O end" }, { PCI_IO_END, "PCI I/O end" },
......
...@@ -4,23 +4,35 @@ ...@@ -4,23 +4,35 @@
# #
# Usage: awk -f gen-sysreg.awk sysregs.txt # Usage: awk -f gen-sysreg.awk sysregs.txt
function block_current() {
return __current_block[__current_block_depth];
}
# Log an error and terminate # Log an error and terminate
function fatal(msg) { function fatal(msg) {
print "Error at " NR ": " msg > "/dev/stderr" print "Error at " NR ": " msg > "/dev/stderr"
printf "Current block nesting:"
for (i = 0; i <= __current_block_depth; i++) {
printf " " __current_block[i]
}
printf "\n"
exit 1 exit 1
} }
# Sanity check that the start or end of a block makes sense at this point in # Enter a new block, setting the active block to @block
# the file. If not, produce an error and terminate. function block_push(block) {
# __current_block[++__current_block_depth] = block
# @this - the $Block or $EndBlock }
# @prev - the only valid block to already be in (value of @block)
# @new - the new value of @block # Exit a block, setting the active block to the parent block
function change_block(this, prev, new) { function block_pop() {
if (block != prev) if (__current_block_depth == 0)
fatal("unexpected " this " (inside " block ")") fatal("error: block_pop() in root block")
block = new __current_block_depth--;
} }
# Sanity check the number of records for a field makes sense. If not, produce # Sanity check the number of records for a field makes sense. If not, produce
...@@ -84,10 +96,14 @@ BEGIN { ...@@ -84,10 +96,14 @@ BEGIN {
print "/* Generated file - do not edit */" print "/* Generated file - do not edit */"
print "" print ""
block = "None" __current_block_depth = 0
__current_block[__current_block_depth] = "Root"
} }
END { END {
if (__current_block_depth != 0)
fatal("Missing terminator for " block_current() " block")
print "#endif /* __ASM_SYSREG_DEFS_H */" print "#endif /* __ASM_SYSREG_DEFS_H */"
} }
...@@ -95,8 +111,9 @@ END { ...@@ -95,8 +111,9 @@ END {
/^$/ { next } /^$/ { next }
/^[\t ]*#/ { next } /^[\t ]*#/ { next }
/^SysregFields/ { /^SysregFields/ && block_current() == "Root" {
change_block("SysregFields", "None", "SysregFields") block_push("SysregFields")
expect_fields(2) expect_fields(2)
reg = $2 reg = $2
...@@ -110,12 +127,10 @@ END { ...@@ -110,12 +127,10 @@ END {
next next
} }
/^EndSysregFields/ { /^EndSysregFields/ && block_current() == "SysregFields" {
if (next_bit > 0) if (next_bit > 0)
fatal("Unspecified bits in " reg) fatal("Unspecified bits in " reg)
change_block("EndSysregFields", "SysregFields", "None")
define(reg "_RES0", "(" res0 ")") define(reg "_RES0", "(" res0 ")")
define(reg "_RES1", "(" res1 ")") define(reg "_RES1", "(" res1 ")")
define(reg "_UNKN", "(" unkn ")") define(reg "_UNKN", "(" unkn ")")
...@@ -126,11 +141,13 @@ END { ...@@ -126,11 +141,13 @@ END {
res1 = null res1 = null
unkn = null unkn = null
block_pop()
next next
} }
/^Sysreg/ { /^Sysreg/ && block_current() == "Root" {
change_block("Sysreg", "None", "Sysreg") block_push("Sysreg")
expect_fields(7) expect_fields(7)
reg = $2 reg = $2
...@@ -160,12 +177,10 @@ END { ...@@ -160,12 +177,10 @@ END {
next next
} }
/^EndSysreg/ { /^EndSysreg/ && block_current() == "Sysreg" {
if (next_bit > 0) if (next_bit > 0)
fatal("Unspecified bits in " reg) fatal("Unspecified bits in " reg)
change_block("EndSysreg", "Sysreg", "None")
if (res0 != null) if (res0 != null)
define(reg "_RES0", "(" res0 ")") define(reg "_RES0", "(" res0 ")")
if (res1 != null) if (res1 != null)
...@@ -185,12 +200,13 @@ END { ...@@ -185,12 +200,13 @@ END {
res1 = null res1 = null
unkn = null unkn = null
block_pop()
next next
} }
# Currently this is effectivey a comment, in future we may want to emit # Currently this is effectivey a comment, in future we may want to emit
# defines for the fields. # defines for the fields.
/^Fields/ && (block == "Sysreg") { /^Fields/ && block_current() == "Sysreg" {
expect_fields(2) expect_fields(2)
if (next_bit != 63) if (next_bit != 63)
...@@ -208,7 +224,7 @@ END { ...@@ -208,7 +224,7 @@ END {
} }
/^Res0/ && (block == "Sysreg" || block == "SysregFields") { /^Res0/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2) expect_fields(2)
parse_bitdef(reg, "RES0", $2) parse_bitdef(reg, "RES0", $2)
field = "RES0_" msb "_" lsb field = "RES0_" msb "_" lsb
...@@ -218,7 +234,7 @@ END { ...@@ -218,7 +234,7 @@ END {
next next
} }
/^Res1/ && (block == "Sysreg" || block == "SysregFields") { /^Res1/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2) expect_fields(2)
parse_bitdef(reg, "RES1", $2) parse_bitdef(reg, "RES1", $2)
field = "RES1_" msb "_" lsb field = "RES1_" msb "_" lsb
...@@ -228,7 +244,7 @@ END { ...@@ -228,7 +244,7 @@ END {
next next
} }
/^Unkn/ && (block == "Sysreg" || block == "SysregFields") { /^Unkn/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2) expect_fields(2)
parse_bitdef(reg, "UNKN", $2) parse_bitdef(reg, "UNKN", $2)
field = "UNKN_" msb "_" lsb field = "UNKN_" msb "_" lsb
...@@ -238,7 +254,7 @@ END { ...@@ -238,7 +254,7 @@ END {
next next
} }
/^Field/ && (block == "Sysreg" || block == "SysregFields") { /^Field/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(3) expect_fields(3)
field = $3 field = $3
parse_bitdef(reg, field, $2) parse_bitdef(reg, field, $2)
...@@ -249,15 +265,16 @@ END { ...@@ -249,15 +265,16 @@ END {
next next
} }
/^Raz/ && (block == "Sysreg" || block == "SysregFields") { /^Raz/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2) expect_fields(2)
parse_bitdef(reg, field, $2) parse_bitdef(reg, field, $2)
next next
} }
/^SignedEnum/ { /^SignedEnum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
change_block("Enum<", "Sysreg", "Enum") block_push("Enum")
expect_fields(3) expect_fields(3)
field = $3 field = $3
parse_bitdef(reg, field, $2) parse_bitdef(reg, field, $2)
...@@ -268,8 +285,9 @@ END { ...@@ -268,8 +285,9 @@ END {
next next
} }
/^UnsignedEnum/ { /^UnsignedEnum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
change_block("Enum<", "Sysreg", "Enum") block_push("Enum")
expect_fields(3) expect_fields(3)
field = $3 field = $3
parse_bitdef(reg, field, $2) parse_bitdef(reg, field, $2)
...@@ -280,8 +298,9 @@ END { ...@@ -280,8 +298,9 @@ END {
next next
} }
/^Enum/ { /^Enum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
change_block("Enum", "Sysreg", "Enum") block_push("Enum")
expect_fields(3) expect_fields(3)
field = $3 field = $3
parse_bitdef(reg, field, $2) parse_bitdef(reg, field, $2)
...@@ -291,16 +310,18 @@ END { ...@@ -291,16 +310,18 @@ END {
next next
} }
/^EndEnum/ { /^EndEnum/ && block_current() == "Enum" {
change_block("EndEnum", "Enum", "Sysreg")
field = null field = null
msb = null msb = null
lsb = null lsb = null
print "" print ""
block_pop()
next next
} }
/0b[01]+/ && block == "Enum" { /0b[01]+/ && block_current() == "Enum" {
expect_fields(2) expect_fields(2)
val = $1 val = $1
name = $2 name = $2
......
...@@ -879,7 +879,30 @@ EndEnum ...@@ -879,7 +879,30 @@ EndEnum
EndSysreg EndSysreg
Sysreg ID_AA64PFR1_EL1 3 0 0 4 1 Sysreg ID_AA64PFR1_EL1 3 0 0 4 1
Res0 63:40 UnsignedEnum 63:60 PFAR
0b0000 NI
0b0001 IMP
EndEnum
UnsignedEnum 59:56 DF2
0b0000 NI
0b0001 IMP
EndEnum
UnsignedEnum 55:52 MTEX
0b0000 MTE
0b0001 MTE4
EndEnum
UnsignedEnum 51:48 THE
0b0000 NI
0b0001 IMP
EndEnum
UnsignedEnum 47:44 GCS
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 MTE_frac
0b0000 ASYNC
0b1111 NI
EndEnum
UnsignedEnum 39:36 NMI UnsignedEnum 39:36 NMI
0b0000 NI 0b0000 NI
0b0001 IMP 0b0001 IMP
...@@ -1866,6 +1889,146 @@ Field 1 ZA ...@@ -1866,6 +1889,146 @@ Field 1 ZA
Field 0 SM Field 0 SM
EndSysreg EndSysreg
SysregFields HFGxTR_EL2
Field 63 nAMIAIR2_EL1
Field 62 nMAIR2_EL1
Field 61 nS2POR_EL1
Field 60 nPOR_EL1
Field 59 nPOR_EL0
Field 58 nPIR_EL1
Field 57 nPIRE0_EL1
Field 56 nRCWMASK_EL1
Field 55 nTPIDR2_EL0
Field 54 nSMPRI_EL1
Field 53 nGCS_EL1
Field 52 nGCS_EL0
Res0 51
Field 50 nACCDATA_EL1
Field 49 ERXADDR_EL1
Field 48 EXRPFGCDN_EL1
Field 47 EXPFGCTL_EL1
Field 46 EXPFGF_EL1
Field 45 ERXMISCn_EL1
Field 44 ERXSTATUS_EL1
Field 43 ERXCTLR_EL1
Field 42 ERXFR_EL1
Field 41 ERRSELR_EL1
Field 40 ERRIDR_EL1
Field 39 ICC_IGRPENn_EL1
Field 38 VBAR_EL1
Field 37 TTBR1_EL1
Field 36 TTBR0_EL1
Field 35 TPIDR_EL0
Field 34 TPIDRRO_EL0
Field 33 TPIDR_EL1
Field 32 TCR_EL1
Field 31 SCTXNUM_EL0
Field 30 SCTXNUM_EL1
Field 29 SCTLR_EL1
Field 28 REVIDR_EL1
Field 27 PAR_EL1
Field 26 MPIDR_EL1
Field 25 MIDR_EL1
Field 24 MAIR_EL1
Field 23 LORSA_EL1
Field 22 LORN_EL1
Field 21 LORID_EL1
Field 20 LOREA_EL1
Field 19 LORC_EL1
Field 18 ISR_EL1
Field 17 FAR_EL1
Field 16 ESR_EL1
Field 15 DCZID_EL0
Field 14 CTR_EL0
Field 13 CSSELR_EL1
Field 12 CPACR_EL1
Field 11 CONTEXTIDR_EL1
Field 10 CLIDR_EL1
Field 9 CCSIDR_EL1
Field 8 APIBKey
Field 7 APIAKey
Field 6 APGAKey
Field 5 APDBKey
Field 4 APDAKey
Field 3 AMAIR_EL1
Field 2 AIDR_EL1
Field 1 AFSR1_EL1
Field 0 AFSR0_EL1
EndSysregFields
Sysreg HFGRTR_EL2 3 4 1 1 4
Fields HFGxTR_EL2
EndSysreg
Sysreg HFGWTR_EL2 3 4 1 1 5
Fields HFGxTR_EL2
EndSysreg
Sysreg HFGITR_EL2 3 4 1 1 6
Res0 63:61
Field 60 COSPRCTX
Field 59 nGCSEPP
Field 58 nGCSSTR_EL1
Field 57 nGCSPUSHM_EL1
Field 56 nBRBIALL
Field 55 nBRBINJ
Field 54 DCCVAC
Field 53 SVC_EL1
Field 52 SVC_EL0
Field 51 ERET
Field 50 CPPRCTX
Field 49 DVPRCTX
Field 48 CFPRCTX
Field 47 TLBIVAALE1
Field 46 TLBIVALE1
Field 45 TLBIVAAE1
Field 44 TLBIASIDE1
Field 43 TLBIVAE1
Field 42 TLBIVMALLE1
Field 41 TLBIRVAALE1
Field 40 TLBIRVALE1
Field 39 TLBIRVAAE1
Field 38 TLBIRVAE1
Field 37 TLBIRVAALE1IS
Field 36 TLBIRVALE1IS
Field 35 TLBIRVAAE1IS
Field 34 TLBIRVAE1IS
Field 33 TLBIVAALE1IS
Field 32 TLBIVALE1IS
Field 31 TLBIVAAE1IS
Field 30 TLBIASIDE1IS
Field 29 TLBIVAE1IS
Field 28 TLBIVMALLE1IS
Field 27 TLBIRVAALE1OS
Field 26 TLBIRVALE1OS
Field 25 TLBIRVAAE1OS
Field 24 TLBIRVAE1OS
Field 23 TLBIVAALE1OS
Field 22 TLBIVALE1OS
Field 21 TLBIVAAE1OS
Field 20 TLBIASIDE1OS
Field 19 TLBIVAE1OS
Field 18 TLBIVMALLE1OS
Field 17 ATS1E1WP
Field 16 ATS1E1RP
Field 15 ATS1E0W
Field 14 ATS1E0R
Field 13 ATS1E1W
Field 12 ATS1E1R
Field 11 DCZVA
Field 10 DCCIVAC
Field 9 DCCVADP
Field 8 DCCVAP
Field 7 DCCVAU
Field 6 DCCISW
Field 5 DCCSW
Field 4 DCISW
Field 3 DCIVAC
Field 2 ICIVAU
Field 1 ICIALLU
Field 0 ICIALLUIS
EndSysreg
Sysreg ZCR_EL2 3 4 1 2 0 Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx Fields ZCR_ELx
EndSysreg EndSysreg
......
...@@ -32,6 +32,11 @@ ENTRY(ftrace_stub) ...@@ -32,6 +32,11 @@ ENTRY(ftrace_stub)
BR_EX %r14 BR_EX %r14
ENDPROC(ftrace_stub) ENDPROC(ftrace_stub)
SYM_CODE_START(ftrace_stub_direct_tramp)
lgr %r1, %r0
BR_EX %r1
SYM_CODE_END(ftrace_stub_direct_tramp)
.macro ftrace_regs_entry, allregs=0 .macro ftrace_regs_entry, allregs=0
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
......
...@@ -163,6 +163,11 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) ...@@ -163,6 +163,11 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
jmp .Lftrace_ret jmp .Lftrace_ret
SYM_CODE_END(ftrace_regs_caller) SYM_CODE_END(ftrace_regs_caller)
SYM_FUNC_START(ftrace_stub_direct_tramp)
CALL_DEPTH_ACCOUNT
RET
SYM_FUNC_END(ftrace_stub_direct_tramp)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_CODE_START(ftrace_graph_caller) SYM_CODE_START(ftrace_graph_caller)
pushl %eax pushl %eax
......
...@@ -309,6 +309,10 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) ...@@ -309,6 +309,10 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
SYM_FUNC_END(ftrace_regs_caller) SYM_FUNC_END(ftrace_regs_caller)
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
SYM_FUNC_START(ftrace_stub_direct_tramp)
CALL_DEPTH_ACCOUNT
RET
SYM_FUNC_END(ftrace_stub_direct_tramp)
#else /* ! CONFIG_DYNAMIC_FTRACE */ #else /* ! CONFIG_DYNAMIC_FTRACE */
......
...@@ -64,8 +64,11 @@ static int agdi_remove(struct platform_device *pdev) ...@@ -64,8 +64,11 @@ static int agdi_remove(struct platform_device *pdev)
int err, i; int err, i;
err = sdei_event_disable(adata->sdei_event); err = sdei_event_disable(adata->sdei_event);
if (err) if (err) {
return err; dev_err(&pdev->dev, "Failed to disable sdei-event #%d (%pe)\n",
adata->sdei_event, ERR_PTR(err));
return 0;
}
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
err = sdei_event_unregister(adata->sdei_event); err = sdei_event_unregister(adata->sdei_event);
...@@ -75,7 +78,11 @@ static int agdi_remove(struct platform_device *pdev) ...@@ -75,7 +78,11 @@ static int agdi_remove(struct platform_device *pdev)
schedule(); schedule();
} }
return err; if (err)
dev_err(&pdev->dev, "Failed to unregister sdei-event #%d (%pe)\n",
adata->sdei_event, ERR_PTR(err));
return 0;
} }
static struct platform_driver agdi_driver = { static struct platform_driver agdi_driver = {
......
...@@ -43,6 +43,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id, ...@@ -43,6 +43,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
/* entry point from firmware to arch asm code */ /* entry point from firmware to arch asm code */
static unsigned long sdei_entry_point; static unsigned long sdei_entry_point;
static int sdei_hp_state;
struct sdei_event { struct sdei_event {
/* These three are protected by the sdei_list_lock */ /* These three are protected by the sdei_list_lock */
struct list_head list; struct list_head list;
...@@ -301,8 +303,6 @@ int sdei_mask_local_cpu(void) ...@@ -301,8 +303,6 @@ int sdei_mask_local_cpu(void)
{ {
int err; int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL); err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) { if (err && err != -EIO) {
pr_warn_once("failed to mask CPU[%u]: %d\n", pr_warn_once("failed to mask CPU[%u]: %d\n",
...@@ -315,6 +315,7 @@ int sdei_mask_local_cpu(void) ...@@ -315,6 +315,7 @@ int sdei_mask_local_cpu(void)
static void _ipi_mask_cpu(void *ignored) static void _ipi_mask_cpu(void *ignored)
{ {
WARN_ON_ONCE(preemptible());
sdei_mask_local_cpu(); sdei_mask_local_cpu();
} }
...@@ -322,8 +323,6 @@ int sdei_unmask_local_cpu(void) ...@@ -322,8 +323,6 @@ int sdei_unmask_local_cpu(void)
{ {
int err; int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL); err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) { if (err && err != -EIO) {
pr_warn_once("failed to unmask CPU[%u]: %d\n", pr_warn_once("failed to unmask CPU[%u]: %d\n",
...@@ -336,6 +335,7 @@ int sdei_unmask_local_cpu(void) ...@@ -336,6 +335,7 @@ int sdei_unmask_local_cpu(void)
static void _ipi_unmask_cpu(void *ignored) static void _ipi_unmask_cpu(void *ignored)
{ {
WARN_ON_ONCE(preemptible());
sdei_unmask_local_cpu(); sdei_unmask_local_cpu();
} }
...@@ -343,6 +343,8 @@ static void _ipi_private_reset(void *ignored) ...@@ -343,6 +343,8 @@ static void _ipi_private_reset(void *ignored)
{ {
int err; int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0, err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
NULL); NULL);
if (err && err != -EIO) if (err && err != -EIO)
...@@ -389,8 +391,6 @@ static void _local_event_enable(void *data) ...@@ -389,8 +391,6 @@ static void _local_event_enable(void *data)
int err; int err;
struct sdei_crosscall_args *arg = data; struct sdei_crosscall_args *arg = data;
WARN_ON_ONCE(preemptible());
err = sdei_api_event_enable(arg->event->event_num); err = sdei_api_event_enable(arg->event->event_num);
sdei_cross_call_return(arg, err); sdei_cross_call_return(arg, err);
...@@ -479,8 +479,6 @@ static void _local_event_unregister(void *data) ...@@ -479,8 +479,6 @@ static void _local_event_unregister(void *data)
int err; int err;
struct sdei_crosscall_args *arg = data; struct sdei_crosscall_args *arg = data;
WARN_ON_ONCE(preemptible());
err = sdei_api_event_unregister(arg->event->event_num); err = sdei_api_event_unregister(arg->event->event_num);
sdei_cross_call_return(arg, err); sdei_cross_call_return(arg, err);
...@@ -561,8 +559,6 @@ static void _local_event_register(void *data) ...@@ -561,8 +559,6 @@ static void _local_event_register(void *data)
struct sdei_registered_event *reg; struct sdei_registered_event *reg;
struct sdei_crosscall_args *arg = data; struct sdei_crosscall_args *arg = data;
WARN_ON(preemptible());
reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id()); reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
err = sdei_api_event_register(arg->event->event_num, sdei_entry_point, err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
reg, 0, 0); reg, 0, 0);
...@@ -717,6 +713,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action, ...@@ -717,6 +713,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
{ {
int rv; int rv;
WARN_ON_ONCE(preemptible());
switch (action) { switch (action) {
case CPU_PM_ENTER: case CPU_PM_ENTER:
rv = sdei_mask_local_cpu(); rv = sdei_mask_local_cpu();
...@@ -765,7 +763,7 @@ static int sdei_device_freeze(struct device *dev) ...@@ -765,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
int err; int err;
/* unregister private events */ /* unregister private events */
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); cpuhp_remove_state(sdei_entry_point);
err = sdei_unregister_shared(); err = sdei_unregister_shared();
if (err) if (err)
...@@ -786,12 +784,15 @@ static int sdei_device_thaw(struct device *dev) ...@@ -786,12 +784,15 @@ static int sdei_device_thaw(struct device *dev)
return err; return err;
} }
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down); &sdei_cpuhp_up, &sdei_cpuhp_down);
if (err) if (err < 0) {
pr_warn("Failed to re-register CPU hotplug notifier...\n"); pr_warn("Failed to re-register CPU hotplug notifier...\n");
return err; return err;
}
sdei_hp_state = err;
return 0;
} }
static int sdei_device_restore(struct device *dev) static int sdei_device_restore(struct device *dev)
...@@ -823,7 +824,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action, ...@@ -823,7 +824,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
* We are going to reset the interface, after this there is no point * We are going to reset the interface, after this there is no point
* doing work when we take CPUs offline. * doing work when we take CPUs offline.
*/ */
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); cpuhp_remove_state(sdei_hp_state);
sdei_platform_reset(); sdei_platform_reset();
...@@ -1003,13 +1004,15 @@ static int sdei_probe(struct platform_device *pdev) ...@@ -1003,13 +1004,15 @@ static int sdei_probe(struct platform_device *pdev)
goto remove_cpupm; goto remove_cpupm;
} }
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down); &sdei_cpuhp_up, &sdei_cpuhp_down);
if (err) { if (err < 0) {
pr_warn("Failed to register CPU hotplug notifier...\n"); pr_warn("Failed to register CPU hotplug notifier...\n");
goto remove_reboot; goto remove_reboot;
} }
sdei_hp_state = err;
return 0; return 0;
remove_reboot: remove_reboot:
......
...@@ -100,6 +100,16 @@ config ARM_SMMU_V3_PMU ...@@ -100,6 +100,16 @@ config ARM_SMMU_V3_PMU
through the SMMU and allow the resulting information to be filtered through the SMMU and allow the resulting information to be filtered
based on the Stream ID of the corresponding master. based on the Stream ID of the corresponding master.
config ARM_PMUV3
depends on HW_PERF_EVENTS && ((ARM && CPU_V7) || ARM64)
bool "ARM PMUv3 support" if !ARM64
default ARM64
help
Say y if you want to use the ARM performance monitor unit (PMU)
version 3. The PMUv3 is the CPU performance monitors on ARMv8
(aarch32 and aarch64) systems that implement the PMUv3
architecture.
config ARM_DSU_PMU config ARM_DSU_PMU
tristate "ARM DynamIQ Shared Unit (DSU) PMU" tristate "ARM DynamIQ Shared Unit (DSU) PMU"
depends on ARM64 depends on ARM64
......
...@@ -5,6 +5,7 @@ obj-$(CONFIG_ARM_CMN) += arm-cmn.o ...@@ -5,6 +5,7 @@ obj-$(CONFIG_ARM_CMN) += arm-cmn.o
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
obj-$(CONFIG_HISI_PMU) += hisilicon/ obj-$(CONFIG_HISI_PMU) += hisilicon/
......
...@@ -656,8 +656,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev) ...@@ -656,8 +656,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
drw_pmu->dev = &pdev->dev; drw_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, drw_pmu); platform_set_drvdata(pdev, drw_pmu);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); drw_pmu->cfg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
drw_pmu->cfg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(drw_pmu->cfg_base)) if (IS_ERR(drw_pmu->cfg_base))
return PTR_ERR(drw_pmu->cfg_base); return PTR_ERR(drw_pmu->cfg_base);
......
...@@ -156,10 +156,14 @@ static int meson_ddr_perf_event_add(struct perf_event *event, int flags) ...@@ -156,10 +156,14 @@ static int meson_ddr_perf_event_add(struct perf_event *event, int flags)
u64 config2 = event->attr.config2; u64 config2 = event->attr.config2;
int i; int i;
for_each_set_bit(i, (const unsigned long *)&config1, sizeof(config1)) for_each_set_bit(i,
(const unsigned long *)&config1,
BITS_PER_TYPE(config1))
meson_ddr_set_axi_filter(event, i); meson_ddr_set_axi_filter(event, i);
for_each_set_bit(i, (const unsigned long *)&config2, sizeof(config2)) for_each_set_bit(i,
(const unsigned long *)&config2,
BITS_PER_TYPE(config2))
meson_ddr_set_axi_filter(event, i + 64); meson_ddr_set_axi_filter(event, i + 64);
if (flags & PERF_EF_START) if (flags & PERF_EF_START)
......
...@@ -559,7 +559,21 @@ static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu) ...@@ -559,7 +559,21 @@ static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
return m1_pmu_init(cpu_pmu); return m1_pmu_init(cpu_pmu);
} }
static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_avalanche_pmu";
return m1_pmu_init(cpu_pmu);
}
static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_blizzard_pmu";
return m1_pmu_init(cpu_pmu);
}
static const struct of_device_id m1_pmu_of_device_ids[] = { static const struct of_device_id m1_pmu_of_device_ids[] = {
{ .compatible = "apple,avalanche-pmu", .data = m2_pmu_avalanche_init, },
{ .compatible = "apple,blizzard-pmu", .data = m2_pmu_blizzard_init, },
{ .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, }, { .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, },
{ .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, }, { .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, },
{ }, { },
...@@ -581,4 +595,3 @@ static struct platform_driver m1_pmu_driver = { ...@@ -581,4 +595,3 @@ static struct platform_driver m1_pmu_driver = {
}; };
module_platform_driver(m1_pmu_driver); module_platform_driver(m1_pmu_driver);
MODULE_LICENSE("GPL v2");
...@@ -57,14 +57,12 @@ ...@@ -57,14 +57,12 @@
#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0) #define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)
/* XPs also have some local topology info which has uses too */ /* XPs also have some local topology info which has uses too */
#define CMN_MXP__CONNECT_INFO_P0 0x0008 #define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p))
#define CMN_MXP__CONNECT_INFO_P1 0x0010
#define CMN_MXP__CONNECT_INFO_P2 0x0028
#define CMN_MXP__CONNECT_INFO_P3 0x0030
#define CMN_MXP__CONNECT_INFO_P4 0x0038
#define CMN_MXP__CONNECT_INFO_P5 0x0040
#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0) #define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0)
#define CMN_MAX_PORTS 6
#define CI700_CONNECT_INFO_P2_5_OFFSET 0x10
/* PMU registers occupy the 3rd 4KB page of each node's region */ /* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000 #define CMN_PMU_OFFSET 0x2000
...@@ -166,7 +164,7 @@ ...@@ -166,7 +164,7 @@
#define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config) #define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
#define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config) #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24) #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27)
#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48) #define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51) #define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
/* Note that we don't yet support the tertiary match group on newer IPs */ /* Note that we don't yet support the tertiary match group on newer IPs */
...@@ -396,6 +394,25 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, ...@@ -396,6 +394,25 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
return NULL; return NULL;
} }
static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
const struct arm_cmn_node *xp, int port)
{
int offset = CMN_MXP__CONNECT_INFO(port);
if (port >= 2) {
if (cmn->model & (CMN600 | CMN650))
return 0;
/*
* CI-700 may have extra ports, but still has the
* mesh_port_connect_info registers in the way.
*/
if (cmn->model == CI700)
offset += CI700_CONNECT_INFO_P2_5_OFFSET;
}
return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
}
static struct dentry *arm_cmn_debugfs; static struct dentry *arm_cmn_debugfs;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
...@@ -469,7 +486,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) ...@@ -469,7 +486,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
y = cmn->mesh_y; y = cmn->mesh_y;
while (y--) { while (y--) {
int xp_base = cmn->mesh_x * y; int xp_base = cmn->mesh_x * y;
u8 port[6][CMN_MAX_DIMENSION]; u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
for (x = 0; x < cmn->mesh_x; x++) for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, "--------+"); seq_puts(s, "--------+");
...@@ -477,14 +494,9 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) ...@@ -477,14 +494,9 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_printf(s, "\n%d |", y); seq_printf(s, "\n%d |", y);
for (x = 0; x < cmn->mesh_x; x++) { for (x = 0; x < cmn->mesh_x; x++) {
struct arm_cmn_node *xp = cmn->xps + xp_base + x; struct arm_cmn_node *xp = cmn->xps + xp_base + x;
void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET;
for (p = 0; p < CMN_MAX_PORTS; p++)
port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0); port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1);
port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2);
port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3);
port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4);
port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5);
seq_printf(s, " XP #%-2d |", xp_base + x); seq_printf(s, " XP #%-2d |", xp_base + x);
} }
...@@ -1546,7 +1558,7 @@ static int arm_cmn_event_init(struct perf_event *event) ...@@ -1546,7 +1558,7 @@ static int arm_cmn_event_init(struct perf_event *event)
type = CMN_EVENT_TYPE(event); type = CMN_EVENT_TYPE(event);
/* DTC events (i.e. cycles) already have everything they need */ /* DTC events (i.e. cycles) already have everything they need */
if (type == CMN_TYPE_DTC) if (type == CMN_TYPE_DTC)
return 0; return arm_cmn_validate_group(cmn, event);
eventid = CMN_EVENT_EVENTID(event); eventid = CMN_EVENT_EVENTID(event);
/* For watchpoints we need the actual XP node here */ /* For watchpoints we need the actual XP node here */
...@@ -2083,18 +2095,9 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) ...@@ -2083,18 +2095,9 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
* from this, since in that case we will see at least one XP * from this, since in that case we will see at least one XP
* with port 2 connected, for the HN-D. * with port 2 connected, for the HN-D.
*/ */
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0)) for (int p = 0; p < CMN_MAX_PORTS; p++)
xp_ports |= BIT(0); if (arm_cmn_device_connect_info(cmn, xp, p))
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1)) xp_ports |= BIT(p);
xp_ports |= BIT(1);
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2))
xp_ports |= BIT(2);
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3))
xp_ports |= BIT(3);
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4))
xp_ports |= BIT(4);
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5))
xp_ports |= BIT(5);
if (cmn->multi_dtm && (xp_ports & 0xc)) if (cmn->multi_dtm && (xp_ports & 0xc))
arm_cmn_init_dtm(dtm++, xp, 1); arm_cmn_init_dtm(dtm++, xp, 1);
......
...@@ -1078,12 +1078,14 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu) ...@@ -1078,12 +1078,14 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid) static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
{ {
u32 acpi_uid; u32 acpi_uid;
struct device *cpu_dev = get_cpu_device(cpu); struct device *cpu_dev;
struct acpi_device *acpi_dev = ACPI_COMPANION(cpu_dev); struct acpi_device *acpi_dev;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) if (!cpu_dev)
return -ENODEV; return -ENODEV;
acpi_dev = ACPI_COMPANION(cpu_dev);
while (acpi_dev) { while (acpi_dev) {
if (!strcmp(acpi_device_hid(acpi_dev), if (!strcmp(acpi_device_hid(acpi_dev),
ACPI_PROCESSOR_CONTAINER_HID) && ACPI_PROCESSOR_CONTAINER_HID) &&
......
...@@ -655,8 +655,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev) ...@@ -655,8 +655,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev)
.attr_groups = dmc620_pmu_attr_groups, .attr_groups = dmc620_pmu_attr_groups,
}; };
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dmc620_pmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dmc620_pmu->base)) if (IS_ERR(dmc620_pmu->base))
return PTR_ERR(dmc620_pmu->base); return PTR_ERR(dmc620_pmu->base);
......
...@@ -316,7 +316,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev) ...@@ -316,7 +316,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
if (!name) if (!name)
return -ENOMEM; return -ENOMEM;
hisi_pmu_init(cpa_pmu, name, THIS_MODULE); hisi_pmu_init(cpa_pmu, THIS_MODULE);
/* Power Management should be disabled before using CPA PMU. */ /* Power Management should be disabled before using CPA PMU. */
hisi_cpa_pmu_disable_pm(cpa_pmu); hisi_cpa_pmu_disable_pm(cpa_pmu);
......
...@@ -499,13 +499,6 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) ...@@ -499,13 +499,6 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
return ret;
}
if (ddrc_pmu->identifier >= HISI_PMU_V2) if (ddrc_pmu->identifier >= HISI_PMU_V2)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"hisi_sccl%u_ddrc%u_%u", "hisi_sccl%u_ddrc%u_%u",
...@@ -516,7 +509,17 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) ...@@ -516,7 +509,17 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
"hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id, "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
ddrc_pmu->index_id); ddrc_pmu->index_id);
hisi_pmu_init(ddrc_pmu, name, THIS_MODULE); if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
return ret;
}
hisi_pmu_init(ddrc_pmu, THIS_MODULE);
ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
if (ret) { if (ret) {
......
...@@ -510,6 +510,11 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) ...@@ -510,6 +510,11 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
hha_pmu->sccl_id, hha_pmu->index_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
&hha_pmu->node); &hha_pmu->node);
if (ret) { if (ret) {
...@@ -517,9 +522,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) ...@@ -517,9 +522,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
return ret; return ret;
} }
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", hisi_pmu_init(hha_pmu, THIS_MODULE);
hha_pmu->sccl_id, hha_pmu->index_id);
hisi_pmu_init(hha_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&hha_pmu->pmu, name, -1); ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
if (ret) { if (ret) {
......
...@@ -544,6 +544,11 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) ...@@ -544,6 +544,11 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
l3c_pmu->sccl_id, l3c_pmu->ccl_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
&l3c_pmu->node); &l3c_pmu->node);
if (ret) { if (ret) {
...@@ -551,13 +556,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) ...@@ -551,13 +556,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
return ret; return ret;
} }
/* hisi_pmu_init(l3c_pmu, THIS_MODULE);
* CCL_ID is used to identify the L3C in the same SCCL which was
* used _UID by mistake.
*/
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
l3c_pmu->sccl_id, l3c_pmu->ccl_id);
hisi_pmu_init(l3c_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
if (ret) { if (ret) {
......
...@@ -412,7 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev) ...@@ -412,7 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
return ret; return ret;
} }
hisi_pmu_init(pa_pmu, name, THIS_MODULE); hisi_pmu_init(pa_pmu, THIS_MODULE);
ret = perf_pmu_register(&pa_pmu->pmu, name, -1); ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
if (ret) { if (ret) {
dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret); dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
......
...@@ -531,12 +531,10 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) ...@@ -531,12 +531,10 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
} }
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu); EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name, void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
struct module *module)
{ {
struct pmu *pmu = &hisi_pmu->pmu; struct pmu *pmu = &hisi_pmu->pmu;
pmu->name = name;
pmu->module = module; pmu->module = module;
pmu->task_ctx_nr = perf_invalid_context; pmu->task_ctx_nr = perf_invalid_context;
pmu->event_init = hisi_uncore_pmu_event_init; pmu->event_init = hisi_uncore_pmu_event_init;
......
...@@ -121,6 +121,5 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev, ...@@ -121,6 +121,5 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu, int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev); struct platform_device *pdev);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name, void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module);
struct module *module);
#endif /* __HISI_UNCORE_PMU_H__ */ #endif /* __HISI_UNCORE_PMU_H__ */
...@@ -445,7 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev) ...@@ -445,7 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
return ret; return ret;
} }
hisi_pmu_init(sllc_pmu, name, THIS_MODULE); hisi_pmu_init(sllc_pmu, THIS_MODULE);
ret = perf_pmu_register(&sllc_pmu->pmu, name, -1); ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
if (ret) { if (ret) {
......
...@@ -763,8 +763,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev) ...@@ -763,8 +763,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
.capabilities = PERF_PMU_CAP_NO_EXCLUDE, .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
}; };
memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0); l3pmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc);
l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc);
if (IS_ERR(l3pmu->regs)) if (IS_ERR(l3pmu->regs))
return PTR_ERR(l3pmu->regs); return PTR_ERR(l3pmu->regs);
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#define __ASM_ARM_KVM_PMU_H #define __ASM_ARM_KVM_PMU_H
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <asm/perf_event.h> #include <linux/perf/arm_pmuv3.h>
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
......
...@@ -163,7 +163,6 @@ enum cpuhp_state { ...@@ -163,7 +163,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
......
...@@ -241,6 +241,12 @@ enum { ...@@ -241,6 +241,12 @@ enum {
FTRACE_OPS_FL_DIRECT = BIT(17), FTRACE_OPS_FL_DIRECT = BIT(17),
}; };
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
#define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS
#else
#define FTRACE_OPS_FL_SAVE_ARGS 0
#endif
/* /*
* FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
* to a ftrace_ops. Note, the requests may fail. * to a ftrace_ops. Note, the requests may fail.
...@@ -321,6 +327,9 @@ struct ftrace_ops { ...@@ -321,6 +327,9 @@ struct ftrace_ops {
unsigned long trampoline_size; unsigned long trampoline_size;
struct list_head list; struct list_head list;
ftrace_ops_func_t ops_func; ftrace_ops_func_t ops_func;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
unsigned long direct_call;
#endif
#endif #endif
}; };
...@@ -397,64 +406,36 @@ struct ftrace_func_entry { ...@@ -397,64 +406,36 @@ struct ftrace_func_entry {
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
extern int ftrace_direct_func_count; extern int ftrace_direct_func_count;
int register_ftrace_direct(unsigned long ip, unsigned long addr);
int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
struct dyn_ftrace *rec,
unsigned long old_addr,
unsigned long new_addr);
unsigned long ftrace_find_rec_direct(unsigned long ip); unsigned long ftrace_find_rec_direct(unsigned long ip);
int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); bool free_filters);
int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr); int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
void ftrace_stub_direct_tramp(void);
#else #else
struct ftrace_ops; struct ftrace_ops;
# define ftrace_direct_func_count 0 # define ftrace_direct_func_count 0
static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
{
return -ENOTSUPP;
}
static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
{
return -ENOTSUPP;
}
static inline int modify_ftrace_direct(unsigned long ip,
unsigned long old_addr, unsigned long new_addr)
{
return -ENOTSUPP;
}
static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
{
return NULL;
}
static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
struct dyn_ftrace *rec,
unsigned long old_addr,
unsigned long new_addr)
{
return -ENODEV;
}
static inline unsigned long ftrace_find_rec_direct(unsigned long ip) static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
{ {
return 0; return 0;
} }
static inline int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{ {
return -ENODEV; return -ENODEV;
} }
static inline int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
bool free_filters)
{ {
return -ENODEV; return -ENODEV;
} }
static inline int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{ {
return -ENODEV; return -ENODEV;
} }
static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr) static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
{ {
return -ENODEV; return -ENODEV;
} }
......
This diff is collapsed.
...@@ -45,8 +45,8 @@ static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd ...@@ -45,8 +45,8 @@ static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd
lockdep_assert_held_once(&tr->mutex); lockdep_assert_held_once(&tr->mutex);
/* Instead of updating the trampoline here, we propagate /* Instead of updating the trampoline here, we propagate
* -EAGAIN to register_ftrace_direct_multi(). Then we can * -EAGAIN to register_ftrace_direct(). Then we can
* retry register_ftrace_direct_multi() after updating the * retry register_ftrace_direct() after updating the
* trampoline. * trampoline.
*/ */
if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) && if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
...@@ -198,7 +198,7 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) ...@@ -198,7 +198,7 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
int ret; int ret;
if (tr->func.ftrace_managed) if (tr->func.ftrace_managed)
ret = unregister_ftrace_direct_multi(tr->fops, (long)old_addr); ret = unregister_ftrace_direct(tr->fops, (long)old_addr, false);
else else
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
...@@ -215,9 +215,9 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad ...@@ -215,9 +215,9 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad
if (tr->func.ftrace_managed) { if (tr->func.ftrace_managed) {
if (lock_direct_mutex) if (lock_direct_mutex)
ret = modify_ftrace_direct_multi(tr->fops, (long)new_addr); ret = modify_ftrace_direct(tr->fops, (long)new_addr);
else else
ret = modify_ftrace_direct_multi_nolock(tr->fops, (long)new_addr); ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr);
} else { } else {
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr); ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
} }
...@@ -243,7 +243,7 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr) ...@@ -243,7 +243,7 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
if (tr->func.ftrace_managed) { if (tr->func.ftrace_managed) {
ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1); ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
ret = register_ftrace_direct_multi(tr->fops, (long)new_addr); ret = register_ftrace_direct(tr->fops, (long)new_addr);
} else { } else {
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
} }
......
...@@ -257,7 +257,7 @@ config DYNAMIC_FTRACE_WITH_REGS ...@@ -257,7 +257,7 @@ config DYNAMIC_FTRACE_WITH_REGS
config DYNAMIC_FTRACE_WITH_DIRECT_CALLS config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
def_bool y def_bool y
depends on DYNAMIC_FTRACE_WITH_REGS depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS
depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
config DYNAMIC_FTRACE_WITH_CALL_OPS config DYNAMIC_FTRACE_WITH_CALL_OPS
......
This diff is collapsed.
...@@ -785,14 +785,7 @@ static struct fgraph_ops fgraph_ops __initdata = { ...@@ -785,14 +785,7 @@ static struct fgraph_ops fgraph_ops __initdata = {
}; };
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
#ifndef CALL_DEPTH_ACCOUNT static struct ftrace_ops direct;
#define CALL_DEPTH_ACCOUNT ""
#endif
noinline __noclone static void trace_direct_tramp(void)
{
asm(CALL_DEPTH_ACCOUNT);
}
#endif #endif
/* /*
...@@ -870,8 +863,9 @@ trace_selftest_startup_function_graph(struct tracer *trace, ...@@ -870,8 +863,9 @@ trace_selftest_startup_function_graph(struct tracer *trace,
* Register direct function together with graph tracer * Register direct function together with graph tracer
* and make sure we get graph trace. * and make sure we get graph trace.
*/ */
ret = register_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
(unsigned long) trace_direct_tramp); ret = register_ftrace_direct(&direct,
(unsigned long)ftrace_stub_direct_tramp);
if (ret) if (ret)
goto out; goto out;
...@@ -891,8 +885,9 @@ trace_selftest_startup_function_graph(struct tracer *trace, ...@@ -891,8 +885,9 @@ trace_selftest_startup_function_graph(struct tracer *trace,
unregister_ftrace_graph(&fgraph_ops); unregister_ftrace_graph(&fgraph_ops);
ret = unregister_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, ret = unregister_ftrace_direct(&direct,
(unsigned long) trace_direct_tramp); (unsigned long)ftrace_stub_direct_tramp,
true);
if (ret) if (ret)
goto out; goto out;
......
...@@ -818,6 +818,10 @@ void __init kfence_alloc_pool(void) ...@@ -818,6 +818,10 @@ void __init kfence_alloc_pool(void)
if (!kfence_sample_interval) if (!kfence_sample_interval)
return; return;
/* if the pool has already been initialized by arch, skip the below. */
if (__kfence_pool)
return;
__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
if (!__kfence_pool) if (!__kfence_pool)
......
...@@ -38,7 +38,7 @@ config SAMPLE_FTRACE_DIRECT ...@@ -38,7 +38,7 @@ config SAMPLE_FTRACE_DIRECT
that hooks to wake_up_process and prints the parameters. that hooks to wake_up_process and prints the parameters.
config SAMPLE_FTRACE_DIRECT_MULTI config SAMPLE_FTRACE_DIRECT_MULTI
tristate "Build register_ftrace_direct_multi() example" tristate "Build register_ftrace_direct() on multiple ips example"
depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m
depends on HAVE_SAMPLE_FTRACE_DIRECT_MULTI depends on HAVE_SAMPLE_FTRACE_DIRECT_MULTI
help help
......
...@@ -96,6 +96,8 @@ asm ( ...@@ -96,6 +96,8 @@ asm (
#endif /* CONFIG_S390 */ #endif /* CONFIG_S390 */
static struct ftrace_ops direct;
static unsigned long my_tramp = (unsigned long)my_tramp1; static unsigned long my_tramp = (unsigned long)my_tramp1;
static unsigned long tramps[2] = { static unsigned long tramps[2] = {
(unsigned long)my_tramp1, (unsigned long)my_tramp1,
...@@ -114,7 +116,7 @@ static int simple_thread(void *arg) ...@@ -114,7 +116,7 @@ static int simple_thread(void *arg)
if (ret) if (ret)
continue; continue;
t ^= 1; t ^= 1;
ret = modify_ftrace_direct(my_ip, my_tramp, tramps[t]); ret = modify_ftrace_direct(&direct, tramps[t]);
if (!ret) if (!ret)
my_tramp = tramps[t]; my_tramp = tramps[t];
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
...@@ -129,7 +131,9 @@ static int __init ftrace_direct_init(void) ...@@ -129,7 +131,9 @@ static int __init ftrace_direct_init(void)
{ {
int ret; int ret;
ret = register_ftrace_direct(my_ip, my_tramp); ftrace_set_filter_ip(&direct, (unsigned long) my_ip, 0, 0);
ret = register_ftrace_direct(&direct, my_tramp);
if (!ret) if (!ret)
simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn"); simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
return ret; return ret;
...@@ -138,7 +142,7 @@ static int __init ftrace_direct_init(void) ...@@ -138,7 +142,7 @@ static int __init ftrace_direct_init(void)
static void __exit ftrace_direct_exit(void) static void __exit ftrace_direct_exit(void)
{ {
kthread_stop(simple_tsk); kthread_stop(simple_tsk);
unregister_ftrace_direct(my_ip, my_tramp); unregister_ftrace_direct(&direct, my_tramp, true);
} }
module_init(ftrace_direct_init); module_init(ftrace_direct_init);
......
...@@ -123,7 +123,7 @@ static int simple_thread(void *arg) ...@@ -123,7 +123,7 @@ static int simple_thread(void *arg)
if (ret) if (ret)
continue; continue;
t ^= 1; t ^= 1;
ret = modify_ftrace_direct_multi(&direct, tramps[t]); ret = modify_ftrace_direct(&direct, tramps[t]);
if (!ret) if (!ret)
my_tramp = tramps[t]; my_tramp = tramps[t];
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
...@@ -141,7 +141,7 @@ static int __init ftrace_direct_multi_init(void) ...@@ -141,7 +141,7 @@ static int __init ftrace_direct_multi_init(void)
ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0); ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0); ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
ret = register_ftrace_direct_multi(&direct, my_tramp); ret = register_ftrace_direct(&direct, my_tramp);
if (!ret) if (!ret)
simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn"); simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
...@@ -151,13 +151,12 @@ static int __init ftrace_direct_multi_init(void) ...@@ -151,13 +151,12 @@ static int __init ftrace_direct_multi_init(void)
static void __exit ftrace_direct_multi_exit(void) static void __exit ftrace_direct_multi_exit(void)
{ {
kthread_stop(simple_tsk); kthread_stop(simple_tsk);
unregister_ftrace_direct_multi(&direct, my_tramp); unregister_ftrace_direct(&direct, my_tramp, true);
ftrace_free_filter(&direct);
} }
module_init(ftrace_direct_multi_init); module_init(ftrace_direct_multi_init);
module_exit(ftrace_direct_multi_exit); module_exit(ftrace_direct_multi_exit);
MODULE_AUTHOR("Jiri Olsa"); MODULE_AUTHOR("Jiri Olsa");
MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct_multi()"); MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct()");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -73,13 +73,12 @@ static int __init ftrace_direct_multi_init(void) ...@@ -73,13 +73,12 @@ static int __init ftrace_direct_multi_init(void)
ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0); ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0); ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
return register_ftrace_direct_multi(&direct, (unsigned long) my_tramp); return register_ftrace_direct(&direct, (unsigned long) my_tramp);
} }
static void __exit ftrace_direct_multi_exit(void) static void __exit ftrace_direct_multi_exit(void)
{ {
unregister_ftrace_direct_multi(&direct, (unsigned long) my_tramp); unregister_ftrace_direct(&direct, (unsigned long) my_tramp, true);
ftrace_free_filter(&direct);
} }
module_init(ftrace_direct_multi_init); module_init(ftrace_direct_multi_init);
......
...@@ -70,16 +70,18 @@ asm ( ...@@ -70,16 +70,18 @@ asm (
#endif /* CONFIG_S390 */ #endif /* CONFIG_S390 */
static struct ftrace_ops direct;
static int __init ftrace_direct_init(void) static int __init ftrace_direct_init(void)
{ {
return register_ftrace_direct((unsigned long)handle_mm_fault, ftrace_set_filter_ip(&direct, (unsigned long) handle_mm_fault, 0, 0);
(unsigned long)my_tramp);
return register_ftrace_direct(&direct, (unsigned long) my_tramp);
} }
static void __exit ftrace_direct_exit(void) static void __exit ftrace_direct_exit(void)
{ {
unregister_ftrace_direct((unsigned long)handle_mm_fault, unregister_ftrace_direct(&direct, (unsigned long)my_tramp, true);
(unsigned long)my_tramp);
} }
module_init(ftrace_direct_init); module_init(ftrace_direct_init);
......
...@@ -63,16 +63,18 @@ asm ( ...@@ -63,16 +63,18 @@ asm (
#endif /* CONFIG_S390 */ #endif /* CONFIG_S390 */
static struct ftrace_ops direct;
static int __init ftrace_direct_init(void) static int __init ftrace_direct_init(void)
{ {
return register_ftrace_direct((unsigned long)wake_up_process, ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
(unsigned long)my_tramp);
return register_ftrace_direct(&direct, (unsigned long) my_tramp);
} }
static void __exit ftrace_direct_exit(void) static void __exit ftrace_direct_exit(void)
{ {
unregister_ftrace_direct((unsigned long)wake_up_process, unregister_ftrace_direct(&direct, (unsigned long)my_tramp, true);
(unsigned long)my_tramp);
} }
module_init(ftrace_direct_init); module_init(ftrace_direct_init);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment