Commit 4cb1fc6f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - update unwinder to cope with module PLTs

 - enable UBSAN on ARM

 - improve kernel fault message

 - update UEFI runtime page tables dump

 - avoid clang's __aeabi_uldivmod generated in NWFPE code

 - disable FIQs on CPU shutdown paths

 - update XOR register usage

 - a number of build updates (using .arch, thread pointer, removal of
   lazy evaluation in Makefile)

 - conversion of stacktrace code to stackwalk

 - findbit assembly updates

 - hwcap feature updates for ARMv8 CPUs

 - instruction dump updates for big-endian platforms

 - support for function error injection

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (31 commits)
  ARM: 9279/1: support function error injection
  ARM: 9277/1: Make the dumped instructions are consistent with the disassembled ones
  ARM: 9276/1: Refactor dump_instr()
  ARM: 9275/1: Drop '-mthumb' from AFLAGS_ISA
  ARM: 9274/1: Add hwcap for Speculative Store Bypassing Safe
  ARM: 9273/1: Add hwcap for Speculation Barrier(SB)
  ARM: 9272/1: vfp: Add hwcap for FEAT_AA32I8MM
  ARM: 9271/1: vfp: Add hwcap for FEAT_AA32BF16
  ARM: 9270/1: vfp: Add hwcap for FEAT_FHM
  ARM: 9269/1: vfp: Add hwcap for FEAT_DotProd
  ARM: 9268/1: vfp: Add hwcap FPHP and ASIMDHP for FEAT_FP16
  ARM: 9267/1: Define Armv8 registers in AArch32 state
  ARM: findbit: add unwinder information
  ARM: findbit: operate by words
  ARM: findbit: convert to macros
  ARM: findbit: provide more efficient ARMv7 implementation
  ARM: findbit: document ARMv5 bit offset calculation
  ARM: 9259/1: stacktrace: Convert stacktrace to generic ARCH_STACKWALK
  ARM: 9258/1: stacktrace: Make stack walk callback consistent with generic code
  ARM: 9265/1: pass -march= only to compiler
  ...
parents 740afa4d aaa4dd1b
......@@ -17,6 +17,7 @@ config ARM
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_MEMORY
select ARCH_STACKWALK
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
......@@ -27,6 +28,7 @@ config ARM
select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_KEEP_MEMBLOCK
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
......@@ -95,6 +97,7 @@ config ARM
select HAVE_EXIT_THREAD
select HAVE_FAST_GUP if ARM_LPAE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_GCC_PLUGINS
......
......@@ -60,47 +60,54 @@ endif
KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
# This selects which instruction set is used.
arch-$(CONFIG_CPU_32v7M) :=-march=armv7-m
arch-$(CONFIG_CPU_32v7) :=-march=armv7-a
arch-$(CONFIG_CPU_32v6) :=-march=armv6
# Only override the compiler option if ARMv6. The ARMv6K extensions are
# always available in ARMv7
ifeq ($(CONFIG_CPU_32v6),y)
arch-$(CONFIG_CPU_32v6K) :=-march=armv6k
endif
arch-$(CONFIG_CPU_32v5) :=-march=armv5te
arch-$(CONFIG_CPU_32v4T) :=-march=armv4t
arch-$(CONFIG_CPU_32v4) :=-march=armv4
arch-$(CONFIG_CPU_32v3) :=-march=armv3m
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
# testing for a specific architecture or later rather impossible.
arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m
arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 -march=armv7-a
arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 -march=armv6
cpp-$(CONFIG_CPU_32v7M) :=-D__LINUX_ARM_ARCH__=7
cpp-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7
cpp-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6
# Only override the compiler option if ARMv6. The ARMv6K extensions are
# always available in ARMv7
ifeq ($(CONFIG_CPU_32v6),y)
arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 -march=armv6k
cpp-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6
endif
arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 -march=armv5te
arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t
arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4
arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3m
# Evaluate arch cc-option calls now
arch-y := $(arch-y)
cpp-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5
cpp-$(CONFIG_CPU_32v4T) :=-D__LINUX_ARM_ARCH__=4
cpp-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4
cpp-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3
# This selects how we optimise for the processor.
tune-$(CONFIG_CPU_ARM7TDMI) =-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM720T) =-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM740T) =-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM9TDMI) =-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM940T) =-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM946E) =-mtune=arm9e
tune-$(CONFIG_CPU_ARM920T) =-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM922T) =-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM925T) =-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM926T) =-mtune=arm9tdmi
tune-$(CONFIG_CPU_FA526) =-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) =-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) =-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) =-mtune=xscale
tune-$(CONFIG_CPU_XSC3) =-mtune=xscale
tune-$(CONFIG_CPU_FEROCEON) =-mtune=xscale
tune-$(CONFIG_CPU_V6) =-mtune=arm1136j-s
tune-$(CONFIG_CPU_V6K) =-mtune=arm1136j-s
# Evaluate tune cc-option calls now
tune-y := $(tune-y)
tune-$(CONFIG_CPU_ARM7TDMI) :=-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM720T) :=-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM740T) :=-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM9TDMI) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM940T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM946E) :=-mtune=arm9e
tune-$(CONFIG_CPU_ARM920T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM925T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_FA526) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) :=-mtune=xscale
tune-$(CONFIG_CPU_XSC3) :=-mtune=xscale
tune-$(CONFIG_CPU_FEROCEON) :=-mtune=xscale
tune-$(CONFIG_CPU_V6) :=-mtune=arm1136j-s
tune-$(CONFIG_CPU_V6K) :=-mtune=arm1136j-s
ifeq ($(CONFIG_AEABI),y)
CFLAGS_ABI :=-mabi=aapcs-linux -mfpu=vfp
......@@ -117,23 +124,25 @@ CFLAGS_ABI += -meabi gnu
endif
ifeq ($(CONFIG_CURRENT_POINTER_IN_TPIDRURO),y)
CFLAGS_ABI += -mtp=cp15
KBUILD_CFLAGS += -mtp=cp15
endif
# Accept old syntax despite ".syntax unified"
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
ifeq ($(CONFIG_THUMB2_KERNEL),y)
CFLAGS_ISA :=-mthumb -Wa,-mimplicit-it=always $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
CFLAGS_ISA :=-Wa,-mimplicit-it=always $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb -D__thumb2__=2
CFLAGS_ISA +=-mthumb
else
CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA)
endif
# Need -Uarm for gcc < 3.x
KBUILD_CPPFLAGS +=$(cpp-y)
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) -Wa,$(arch-y) $(tune-y) -include asm/unified.h -msoft-float
CHECKFLAGS += -D__arm__
......
......@@ -27,6 +27,7 @@ KASAN_SANITIZE := n
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
UBSAN_SANITIZE := n
#
# Architecture dependencies
......@@ -163,4 +164,3 @@ $(obj)/piggy_data: $(obj)/../Image FORCE
$(obj)/piggy.o: $(obj)/piggy_data
CFLAGS_font.o := -Dstatic=
AFLAGS_hyp-stub.o := -Wa,-march=armv7-a
......@@ -13,7 +13,5 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_CPU_V7) += secure_cntvoff.o
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
CFLAGS_REMOVE_mcpm_entry.o = -pg
AFLAGS_mcpm_head.o := -march=armv7-a
AFLAGS_vlock.o := -march=armv7-a
obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o
obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
......@@ -15,6 +15,8 @@
#include "vlock.h"
.arch armv7-a
.if MCPM_SYNC_CLUSTER_CPUS
.error "cpus must be the first member of struct mcpm_sync_struct"
.endif
......
......@@ -12,6 +12,8 @@
#include <linux/linkage.h>
#include "vlock.h"
.arch armv7-a
/* Select different code if voting flags can fit in a single word. */
#if VLOCK_VOTING_SIZE > 4
#define FEW(x...)
......
......@@ -761,6 +761,12 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.endif
.endm
.if __LINUX_ARM_ARCH__ < 6
.set .Lrev_l_uses_tmp, 1
.else
.set .Lrev_l_uses_tmp, 0
.endif
/*
* bl_r - branch and link to register
*
......
......@@ -25,6 +25,8 @@
#define CPUID_EXT_ISAR3 0x6c
#define CPUID_EXT_ISAR4 0x70
#define CPUID_EXT_ISAR5 0x74
#define CPUID_EXT_ISAR6 0x7c
#define CPUID_EXT_PFR2 0x90
#else
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
......@@ -40,6 +42,8 @@
#define CPUID_EXT_ISAR3 "c2, 3"
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
#define CPUID_EXT_ISAR6 "c2, 7"
#define CPUID_EXT_PFR2 "c3, 4"
#endif
#define MPIDR_SMP_BITMASK (0x3 << 30)
......
......@@ -37,6 +37,11 @@ struct mod_arch_specific {
struct module;
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
#ifdef CONFIG_ARM_MODULE_PLTS
bool in_module_plt(unsigned long loc);
#else
static inline bool in_module_plt(unsigned long loc) { return false; }
#endif
#ifdef CONFIG_THUMB2_KERNEL
#define HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
......
......@@ -21,6 +21,7 @@ struct ptdump_info {
void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
#ifdef CONFIG_ARM_PTDUMP_DEBUGFS
#define EFI_RUNTIME_MAP_END SZ_1G
void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
static inline void ptdump_debugfs_register(struct ptdump_info *info,
......
......@@ -163,6 +163,10 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
})
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->ARM_r0 = rc;
}
/*
* Update ITSTATE after normal execution of an IT block instruction.
......
......@@ -44,7 +44,7 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
extern int unwind_frame(struct stackframe *frame);
extern void walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
bool (*fn)(void *, unsigned long), void *data);
extern void dump_mem(const char *lvl, const char *str, unsigned long bottom,
unsigned long top);
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
......
......@@ -87,6 +87,12 @@
#define MVFR0_DP_BIT (8)
#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT)
/* MVFR1 bits */
#define MVFR1_ASIMDHP_BIT (20)
#define MVFR1_ASIMDHP_MASK (0xf << MVFR1_ASIMDHP_BIT)
#define MVFR1_FPHP_BIT (24)
#define MVFR1_FPHP_MASK (0xf << MVFR1_FPHP_BIT)
/* Bit patterns for decoding the packaged operation descriptors */
#define VFPOPDESC_LENGTH_BIT (9)
#define VFPOPDESC_LENGTH_MASK (0x07 << VFPOPDESC_LENGTH_BIT)
......
......@@ -51,7 +51,7 @@ xor_arm4regs_2(unsigned long bytes, unsigned long * __restrict p1,
register unsigned int a1 __asm__("r4");
register unsigned int a2 __asm__("r5");
register unsigned int a3 __asm__("r6");
register unsigned int a4 __asm__("r7");
register unsigned int a4 __asm__("r10");
register unsigned int b1 __asm__("r8");
register unsigned int b2 __asm__("r9");
register unsigned int b3 __asm__("ip");
......@@ -73,7 +73,7 @@ xor_arm4regs_3(unsigned long bytes, unsigned long * __restrict p1,
register unsigned int a1 __asm__("r4");
register unsigned int a2 __asm__("r5");
register unsigned int a3 __asm__("r6");
register unsigned int a4 __asm__("r7");
register unsigned int a4 __asm__("r10");
register unsigned int b1 __asm__("r8");
register unsigned int b2 __asm__("r9");
register unsigned int b3 __asm__("ip");
......
......@@ -28,6 +28,12 @@
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
#define HWCAP_EVTSTRM (1 << 21)
#define HWCAP_FPHP (1 << 22)
#define HWCAP_ASIMDHP (1 << 23)
#define HWCAP_ASIMDDP (1 << 24)
#define HWCAP_ASIMDFHM (1 << 25)
#define HWCAP_ASIMDBF16 (1 << 26)
#define HWCAP_I8MM (1 << 27)
/*
* HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2
......@@ -37,5 +43,7 @@
#define HWCAP2_SHA1 (1 << 2)
#define HWCAP2_SHA2 (1 << 3)
#define HWCAP2_CRC32 (1 << 4)
#define HWCAP2_SB (1 << 5)
#define HWCAP2_SSBS (1 << 6)
#endif /* _UAPI__ASMARM_HWCAP_H */
......@@ -70,7 +70,6 @@ obj-$(CONFIG_HAVE_TCM) += tcm.o
obj-$(CONFIG_OF) += devtree.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
......@@ -99,7 +98,6 @@ CFLAGS_head-inflate-data.o := $(call cc-option,-Wframe-larger-than=10240)
obj-$(CONFIG_XIP_DEFLATED_DATA) += head-inflate-data.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a
ifeq ($(CONFIG_ARM_PSCI),y)
obj-$(CONFIG_SMP) += psci_smp.o
endif
......
......@@ -9,6 +9,8 @@
#include <asm/assembler.h>
#include <asm/virt.h>
.arch armv7-a
#ifndef ZIMAGE
/*
* For the kernel proper, we need to find out the CPU boot mode long after
......
......@@ -77,6 +77,8 @@ static void machine_crash_nonpanic_core(void *unused)
{
struct pt_regs regs;
local_fiq_disable();
crash_setup_regs(&regs, get_irq_regs());
printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
smp_processor_id());
......
......@@ -284,3 +284,17 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
return 0;
}
bool in_module_plt(unsigned long loc)
{
struct module *mod;
bool ret;
preempt_disable();
mod = __module_text_address(loc);
ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE ||
loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE);
preempt_enable();
return ret;
}
......@@ -81,13 +81,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC.
*/
static int
callchain_trace(struct stackframe *fr,
void *data)
static bool
callchain_trace(void *data, unsigned long pc)
{
struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, fr->pc);
return 0;
perf_callchain_store(entry, pc);
return true;
}
void
......
......@@ -16,17 +16,17 @@ struct return_address_data {
void *addr;
};
static int save_return_addr(struct stackframe *frame, void *d)
static bool save_return_addr(void *d, unsigned long pc)
{
struct return_address_data *data = d;
if (!data->level) {
data->addr = (void *)frame->pc;
data->addr = (void *)pc;
return 1;
return false;
} else {
--data->level;
return 0;
return true;
}
}
......
......@@ -450,6 +450,8 @@ static void __init cpuid_init_hwcaps(void)
{
int block;
u32 isar5;
u32 isar6;
u32 pfr2;
if (cpu_architecture() < CPU_ARCH_ARMv7)
return;
......@@ -485,6 +487,18 @@ static void __init cpuid_init_hwcaps(void)
block = cpuid_feature_extract_field(isar5, 16);
if (block >= 1)
elf_hwcap2 |= HWCAP2_CRC32;
/* Check for Speculation barrier instruction */
isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
block = cpuid_feature_extract_field(isar6, 12);
if (block >= 1)
elf_hwcap2 |= HWCAP2_SB;
/* Check for Speculative Store Bypassing control */
pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
block = cpuid_feature_extract_field(pfr2, 4);
if (block >= 1)
elf_hwcap2 |= HWCAP2_SSBS;
}
static void __init elf_hwcap_fixup(void)
......@@ -1249,6 +1263,12 @@ static const char *hwcap_str[] = {
"vfpd32",
"lpae",
"evtstrm",
"fphp",
"asimdhp",
"asimddp",
"asimdfhm",
"asimdbf16",
"i8mm",
NULL
};
......@@ -1258,6 +1278,8 @@ static const char *hwcap2_str[] = {
"sha1",
"sha2",
"crc32",
"sb",
"ssbs",
NULL
};
......
......@@ -600,6 +600,8 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
*/
static void ipi_cpu_stop(unsigned int cpu)
{
local_fiq_disable();
if (system_state <= SYSTEM_RUNNING) {
raw_spin_lock(&stop_lock);
pr_crit("CPU%u: stopping\n", cpu);
......@@ -609,9 +611,6 @@ static void ipi_cpu_stop(unsigned int cpu)
set_cpu_online(cpu, false);
local_fiq_disable();
local_irq_disable();
while (1) {
cpu_relax();
wfe();
......
......@@ -127,12 +127,12 @@ int notrace unwind_frame(struct stackframe *frame)
#endif
void notrace walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
bool (*fn)(void *, unsigned long), void *data)
{
while (1) {
int ret;
if (fn(frame, data))
if (!fn(data, frame->pc))
break;
ret = unwind_frame(frame);
if (ret < 0)
......@@ -142,41 +142,32 @@ void notrace walk_stackframe(struct stackframe *frame,
EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
unsigned int no_sched_functions;
unsigned int skip;
};
static int save_trace(struct stackframe *frame, void *d)
static void start_stack_trace(struct stackframe *frame, struct task_struct *task,
unsigned long fp, unsigned long sp,
unsigned long lr, unsigned long pc)
{
struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace;
unsigned long addr = frame->pc;
if (data->no_sched_functions && in_sched_functions(addr))
return 0;
if (data->skip) {
data->skip--;
return 0;
}
trace->entries[trace->nr_entries++] = addr;
return trace->nr_entries >= trace->max_entries;
frame->fp = fp;
frame->sp = sp;
frame->lr = lr;
frame->pc = pc;
#ifdef CONFIG_KRETPROBES
frame->kr_cur = NULL;
frame->tsk = task;
#endif
#ifdef CONFIG_UNWINDER_FRAME_POINTER
frame->ex_frame = in_entry_text(frame->pc);
#endif
}
/* This must be noinline to so that our skip calculation works correctly */
static noinline void __save_stack_trace(struct task_struct *tsk,
struct stack_trace *trace, unsigned int nosched)
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
data.no_sched_functions = nosched;
if (tsk != current) {
if (regs) {
start_stack_trace(&frame, NULL, regs->ARM_fp, regs->ARM_sp,
regs->ARM_lr, regs->ARM_pc);
} else if (task != current) {
#ifdef CONFIG_SMP
/*
* What guarantees do we have here that 'tsk' is not
......@@ -185,64 +176,22 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
*/
return;
#else
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(tsk);
start_stack_trace(&frame, task, thread_saved_fp(task),
thread_saved_sp(task), 0,
thread_saved_pc(task));
#endif
} else {
/* We don't want this function nor the caller */
data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
here:
frame.pc = (unsigned long)&&here;
start_stack_trace(&frame, task,
(unsigned long)__builtin_frame_address(0),
current_stack_pointer,
(unsigned long)__builtin_return_address(0),
(unsigned long)&&here);
/* skip this function */
if (unwind_frame(&frame))
return;
}
#ifdef CONFIG_KRETPROBES
frame.kr_cur = NULL;
frame.tsk = tsk;
#endif
#ifdef CONFIG_UNWINDER_FRAME_POINTER
frame.ex_frame = false;
#endif
walk_stackframe(&frame, save_trace, &data);
}
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
data.no_sched_functions = 0;
frame.fp = regs->ARM_fp;
frame.sp = regs->ARM_sp;
frame.lr = regs->ARM_lr;
frame.pc = regs->ARM_pc;
#ifdef CONFIG_KRETPROBES
frame.kr_cur = NULL;
frame.tsk = current;
#endif
#ifdef CONFIG_UNWINDER_FRAME_POINTER
frame.ex_frame = in_entry_text(frame.pc);
#endif
walk_stackframe(&frame, save_trace, &data);
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
__save_stack_trace(tsk, trace, 1);
}
EXPORT_SYMBOL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(current, trace, 0);
walk_stackframe(&frame, consume_entry, cookie);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
......@@ -34,6 +34,7 @@
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
".arch armv7-a\n" \
"0: ldrex"B" %2, [%3]\n" \
"1: strex"B" %0, %1, [%3]\n" \
" cmp %0, #0\n" \
......
......@@ -178,19 +178,22 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (!user_mode(regs)) {
if (thumb) {
u16 val16;
bad = get_kernel_nofault(val16, &((u16 *)addr)[i]);
val = val16;
} else {
bad = get_kernel_nofault(val, &((u32 *)addr)[i]);
}
if (thumb) {
u16 tmp;
if (user_mode(regs))
bad = get_user(tmp, &((u16 __user *)addr)[i]);
else
bad = get_kernel_nofault(tmp, &((u16 *)addr)[i]);
val = __mem_to_opcode_thumb16(tmp);
} else {
if (thumb)
bad = get_user(val, &((u16 *)addr)[i]);
if (user_mode(regs))
bad = get_user(val, &((u32 __user *)addr)[i]);
else
bad = get_user(val, &((u32 *)addr)[i]);
bad = get_kernel_nofault(val, &((u32 *)addr)[i]);
val = __mem_to_opcode_arm(val);
}
if (!bad)
......
......@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/module.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
......@@ -395,8 +396,18 @@ int unwind_frame(struct stackframe *frame)
idx = unwind_find_idx(frame->pc);
if (!idx) {
if (frame->pc && kernel_text_address(frame->pc))
if (frame->pc && kernel_text_address(frame->pc)) {
if (in_module_plt(frame->pc) && frame->pc != frame->lr) {
/*
* Quoting Ard: Veneers only set PC using a
* PC+immediate LDR, and so they don't affect
* the state of the stack or the register file
*/
frame->pc = frame->lr;
return URC_OK;
}
pr_warn("unwind: Index not found %08lx\n", frame->pc);
}
return -URC_FAILURE;
}
......
......@@ -36,10 +36,6 @@ else
lib-y += io-readsw-armv4.o io-writesw-armv4.o
endif
ifeq ($(CONFIG_ARCH_RPC),y)
AFLAGS_delay-loop.o += -march=armv4
endif
$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
......@@ -48,3 +44,5 @@ ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
CFLAGS_xor-neon.o += $(NEON_FLAGS)
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
endif
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
......@@ -8,6 +8,10 @@
#include <asm/assembler.h>
#include <asm/delay.h>
#ifdef CONFIG_ARCH_RPC
.arch armv4
#endif
.text
.LC0: .word loops_per_jiffy
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/error-injection.h>
#include <linux/kprobes.h>
void override_function_with_return(struct pt_regs *regs)
{
instruction_pointer_set(regs, regs->ARM_lr);
}
NOKPROBE_SYMBOL(override_function_with_return);
......@@ -12,182 +12,128 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
.text
/*
* Purpose : Find a 'zero' bit
* Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
*/
ENTRY(_find_first_zero_bit_le)
teq r1, #0
beq 3f
mov r2, #0
1:
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
eors r3, r3, #0xff @ invert bits
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_le)
/*
* Purpose : Find next 'zero' bit
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
*/
ENTRY(_find_next_zero_bit_le)
cmp r2, r1
bhs 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_le)
#ifdef __ARMEB__
#define SWAB_ENDIAN le
#else
#define SWAB_ENDIAN be
#endif
/*
* Purpose : Find a 'one' bit
* Prototype: int find_first_bit(const unsigned long *addr, unsigned int maxbit);
*/
ENTRY(_find_first_bit_le)
teq r1, #0
.macro find_first, endian, set, name
ENTRY(_find_first_\name\()bit_\endian)
UNWIND( .fnstart)
teq r1, #0
beq 3f
mov r2, #0
1:
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
1: ldr r3, [r0], #4
.ifeq \set
mvns r3, r3 @ invert/test bits
.else
movs r3, r3 @ test bits
.endif
.ifc \endian, SWAB_ENDIAN
bne .L_found_swab
.else
bne .L_found @ found the bit?
.endif
add r2, r2, #32 @ next index
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
3: mov r0, r1 @ no more bits
ret lr
ENDPROC(_find_first_bit_le)
UNWIND( .fnend)
ENDPROC(_find_first_\name\()bit_\endian)
.endm
/*
* Purpose : Find next 'one' bit
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
*/
ENTRY(_find_next_bit_le)
.macro find_next, endian, set, name
ENTRY(_find_next_\name\()bit_\endian)
UNWIND( .fnstart)
cmp r2, r1
bhs 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
mov ip, r2, lsr #5 @ word index
add r0, r0, ip, lsl #2
ands ip, r2, #31 @ bit position
beq 1b
ldr r3, [r0], #4
.ifeq \set
mvn r3, r3 @ invert bits
.endif
.ifc \endian, SWAB_ENDIAN
rev_l r3, ip
.if .Lrev_l_uses_tmp
@ we need to recompute ip because rev_l will have overwritten
@ it.
and ip, r2, #31 @ bit position
.endif
.endif
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
orr r2, r2, #31 @ no zero bits
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_le)
UNWIND( .fnend)
ENDPROC(_find_next_\name\()bit_\endian)
.endm
#ifdef __ARMEB__
.macro find_bit, endian, set, name
find_first \endian, \set, \name
find_next \endian, \set, \name
.endm
ENTRY(_find_first_zero_bit_be)
teq r1, #0
beq 3f
mov r2, #0
1: eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
eors r3, r3, #0xff @ invert bits
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_be)
/* _find_first_zero_bit_le and _find_next_zero_bit_le */
find_bit le, 0, zero_
ENTRY(_find_next_zero_bit_be)
cmp r2, r1
bhs 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_be)
/* _find_first_bit_le and _find_next_bit_le */
find_bit le, 1
ENTRY(_find_first_bit_be)
teq r1, #0
beq 3f
mov r2, #0
1: eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_be)
#ifdef __ARMEB__
ENTRY(_find_next_bit_be)
cmp r2, r1
bhs 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_be)
/* _find_first_zero_bit_be and _find_next_zero_bit_be */
find_bit be, 0, zero_
/* _find_first_bit_be and _find_next_bit_be */
find_bit be, 1
#endif
/*
* One or more bits in the LSB of r3 are assumed to be set.
*/
.L_found_swab:
UNWIND( .fnstart)
rev_l r3, ip
.L_found:
#if __LINUX_ARM_ARCH__ >= 5
#if __LINUX_ARM_ARCH__ >= 7
rbit r3, r3 @ reverse bits
clz r3, r3 @ count high zero bits
add r0, r2, r3 @ add offset of first set bit
#elif __LINUX_ARM_ARCH__ >= 5
rsb r0, r3, #0
and r3, r3, r0
clz r3, r3
rsb r3, r3, #31
add r0, r2, r3
and r3, r3, r0 @ mask out lowest bit set
clz r3, r3 @ count high zero bits
rsb r3, r3, #31 @ offset of first set bit
add r0, r2, r3 @ add offset of first set bit
#else
tst r3, #0x0f
mov ip, #~0
tst r3, ip, lsr #16 @ test bits 0-15
addeq r2, r2, #16
moveq r3, r3, lsr #16
tst r3, #0x00ff
addeq r2, r2, #8
moveq r3, r3, lsr #8
tst r3, #0x000f
addeq r2, r2, #4
movne r3, r3, lsl #4
tst r3, #0x30
moveq r3, r3, lsr #4
tst r3, #0x0003
addeq r2, r2, #2
movne r3, r3, lsl #2
tst r3, #0x40
moveq r3, r3, lsr #2
tst r3, #0x0001
addeq r2, r2, #1
mov r0, r2
#endif
cmp r1, r0 @ Clamp to maxbit
movlo r0, r1
ret lr
UNWIND( .fnend)
......@@ -14,9 +14,6 @@ obj-$(CONFIG_SOC_SAMV7) += samv7.o
# Power Management
obj-$(CONFIG_ATMEL_PM) += pm.o pm_suspend.o
ifeq ($(CONFIG_CPU_V7),y)
AFLAGS_pm_suspend.o := -march=armv7-a
endif
ifeq ($(CONFIG_PM_DEBUG),y)
CFLAGS_pm.o += -DDEBUG
endif
......
......@@ -12,6 +12,10 @@
#include "pm.h"
#include "pm_data-offsets.h"
#ifdef CONFIG_CPU_V7
.arch armv7-a
#endif
#define SRAMC_SELF_FRESH_ACTIVE 0x01
#define SRAMC_SELF_FRESH_EXIT 0x00
......
......@@ -34,7 +34,6 @@ obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
obj-$(CONFIG_HAVE_IMX_SRC) += src.o
ifneq ($(CONFIG_SOC_IMX6)$(CONFIG_SOC_IMX7D_CA7)$(CONFIG_SOC_LS1021A),)
AFLAGS_headsmp.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SMP) += headsmp.o platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
endif
......@@ -48,12 +47,10 @@ obj-$(CONFIG_SOC_IMX7D_CM4) += mach-imx7d-cm4.o
obj-$(CONFIG_SOC_IMX7ULP) += mach-imx7ulp.o pm-imx7ulp.o
ifeq ($(CONFIG_SUSPEND),y)
AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
endif
ifeq ($(CONFIG_ARM_CPU_SUSPEND),y)
AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
endif
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
......
......@@ -8,6 +8,8 @@
#include <linux/init.h>
#include <asm/assembler.h>
.arch armv7-a
diag_reg_offset:
.word g_diag_reg - .
......
......@@ -9,6 +9,8 @@
#include <asm/hardware/cache-l2x0.h>
#include "hardware.h"
.arch armv7-a
/*
* The following code must assume it is running from physical address
* where absolute virtual addresses to the data section have to be
......
......@@ -9,6 +9,8 @@
#include <asm/hardware/cache-l2x0.h>
#include "hardware.h"
.arch armv7-a
/*
* ==================== low level suspend ====================
*
......
# SPDX-License-Identifier: GPL-2.0
ccflags-y := -I$(srctree)/arch/arm/plat-orion/include
AFLAGS_coherency_ll.o := -Wa,-march=armv7-a
CFLAGS_pmsu.o := -march=armv7-a
obj-$(CONFIG_MACH_MVEBU_ANY) += system-controller.o mvebu-soc-id.o
ifeq ($(CONFIG_MACH_MVEBU_V7),y)
......
......@@ -20,6 +20,7 @@
#include <asm/assembler.h>
#include <asm/cp15.h>
.arch armv7-a
.text
/*
* Returns the coherency base address in r1 (r0 is untouched), or 0 if
......
......@@ -291,6 +291,7 @@ int armada_370_xp_pmsu_idle_enter(unsigned long deepidle)
/* Test the CR_C bit and set it if it was cleared */
asm volatile(
".arch armv7-a\n\t"
"mrc p15, 0, r0, c1, c0, 0 \n\t"
"tst r0, %0 \n\t"
"orreq r0, r0, #(1 << 2) \n\t"
......
# SPDX-License-Identifier: GPL-2.0-only
AFLAGS_headsmp.o += -march=armv7-a
obj-$(CONFIG_ARCH_WPCM450) += wpcm450.o
obj-$(CONFIG_ARCH_NPCM7XX) += npcm7xx.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
......@@ -6,6 +6,8 @@
#include <linux/init.h>
#include <asm/assembler.h>
.arch armv7-a
/*
* The boot ROM does not start secondary CPUs in SVC mode, so we need to do that
* here.
......
# SPDX-License-Identifier: GPL-2.0
asflags-y += -march=armv7-a
obj-y += io.o
obj-y += irq.o
obj-y += pm.o
......
......@@ -19,6 +19,8 @@
#define PMC_SCRATCH41 0x140
.arch armv7-a
#ifdef CONFIG_PM_SLEEP
/*
* tegra_resume
......
......@@ -47,6 +47,8 @@
#define PLLM_STORE_MASK (1 << 1)
#define PLLP_STORE_MASK (1 << 2)
.arch armv7-a
.macro test_pll_state, rd, test_mask
ldr \rd, tegra_pll_state
tst \rd, #\test_mask
......
......@@ -78,6 +78,8 @@
#define PLLX_STORE_MASK (1 << 4)
#define PLLM_PMC_STORE_MASK (1 << 5)
.arch armv7-a
.macro emc_device_mask, rd, base
ldr \rd, [\base, #EMC_ADR_CFG]
tst \rd, #0x1
......
......@@ -22,6 +22,8 @@
#define CLK_RESET_CCLK_BURST 0x20
#define CLK_RESET_CCLK_DIVIDER 0x24
.arch armv7-a
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PM_SLEEP)
/*
* tegra_disable_clean_inv_dcache
......
......@@ -33,9 +33,6 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o
AFLAGS_abort-ev6.o :=-Wa,-march=armv6k
AFLAGS_abort-ev7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
......@@ -49,10 +46,6 @@ obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o
obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o
AFLAGS_cache-v6.o :=-Wa,-march=armv6
AFLAGS_cache-v7.o :=-Wa,-march=armv7-a
AFLAGS_cache-v7m.o :=-Wa,-march=armv7-m
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
obj-$(CONFIG_CPU_COPY_FEROCEON) += copypage-feroceon.o
......@@ -62,8 +55,6 @@ obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
obj-$(CONFIG_CPU_XSC3) += copypage-xsc3.o
obj-$(CONFIG_CPU_COPY_FA) += copypage-fa.o
CFLAGS_copypage-feroceon.o := -march=armv5te
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o
......@@ -72,9 +63,6 @@ obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
AFLAGS_tlb-v6.o :=-Wa,-march=armv6
AFLAGS_tlb-v7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
obj-$(CONFIG_CPU_ARM740T) += proc-arm740.o
......@@ -101,9 +89,6 @@ obj-$(CONFIG_CPU_V6K) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
obj-$(CONFIG_CPU_V7M) += proc-v7m.o
AFLAGS_proc-v6.o :=-Wa,-march=armv6
AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
obj-$(CONFIG_CACHE_B15_RAC) += cache-b15-rac.o
obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
......
......@@ -16,6 +16,7 @@
* abort here if the I-TLB and D-TLB aren't seeing the same
* picture. Unfortunately, this does happen. We live with it.
*/
.arch armv6k
.align 5
ENTRY(v6_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
......
......@@ -12,6 +12,7 @@
*
* Purpose : obtain information about current aborted instruction.
*/
.arch armv7-a
.align 5
ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
......
......@@ -19,6 +19,8 @@
#define D_CACHE_LINE_SIZE 32
#define BTB_FLUSH_SIZE 8
.arch armv6
/*
* v6_flush_icache_all()
*
......
......@@ -16,6 +16,8 @@
#include "proc-macros.S"
.arch armv7-a
#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
.globl icache_size
.data
......
......@@ -18,6 +18,8 @@
#include "proc-macros.S"
.arch armv7-m
/* Generic V7M read/write macros for memory mapped cache operations */
.macro v7m_cache_read, rt, reg
movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg
......
......@@ -15,6 +15,7 @@ static void feroceon_copy_user_page(void *kto, const void *kfrom)
int tmp;
asm volatile ("\
.arch armv5te \n\
1: ldmia %1!, {r2 - r7, ip, lr} \n\
pld [%1, #0] \n\
pld [%1, #32] \n\
......
......@@ -124,8 +124,9 @@ static void die_kernel_fault(const char *msg, struct mm_struct *mm,
{
bust_spinlocks(1);
pr_alert("8<--- cut here ---\n");
pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
msg, addr);
pr_alert("Unable to handle kernel %s at virtual address %08lx when %s\n",
msg, addr, fsr & FSR_LNX_PF ? "execute" :
fsr & FSR_WRITE ? "write" : "read");
show_pte(KERN_ALERT, mm, addr);
die("Oops", regs, fsr);
......
......@@ -32,6 +32,8 @@
#define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S
#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
.arch armv6
ENTRY(cpu_v6_proc_init)
ret lr
......
......@@ -24,6 +24,8 @@
#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
.arch armv7-a
/*
* cpu_v7_switch_mm(pgd_phys, tsk)
*
......
......@@ -24,6 +24,8 @@
#include "proc-v7-2level.S"
#endif
.arch armv7-a
ENTRY(cpu_v7_proc_init)
ret lr
ENDPROC(cpu_v7_proc_init)
......
......@@ -17,6 +17,8 @@
#define HARVARD_TLB
.arch armv6
/*
* v6wbi_flush_user_tlb_range(start, end, vma)
*
......
......@@ -16,6 +16,8 @@
#include <asm/tlbflush.h>
#include "proc-macros.S"
.arch armv7-a
/*
* v7wbi_flush_user_tlb_range(start, end, vma)
*
......
......@@ -11,3 +11,9 @@ nwfpe-y += fpa11.o fpa11_cpdo.o fpa11_cpdt.o \
entry.o
nwfpe-$(CONFIG_FPE_NWFPE_XP) += extended_cpdo.o
# Try really hard to avoid generating calls to __aeabi_uldivmod() from
# float64_rem() due to loop elision.
ifdef CONFIG_CC_IS_CLANG
CFLAGS_softfloat.o += -mllvm -replexitval=never
endif
......@@ -37,6 +37,7 @@ endif
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
UBSAN_SANITIZE := n
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
......
......@@ -774,6 +774,7 @@ static int __init vfp_init(void)
{
unsigned int vfpsid;
unsigned int cpu_arch = cpu_architecture();
unsigned int isar6;
/*
* Enable the access to the VFP on all online CPUs so the
......@@ -831,7 +832,38 @@ static int __init vfp_init(void)
if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
elf_hwcap |= HWCAP_VFPv4;
if (((fmrx(MVFR1) & MVFR1_ASIMDHP_MASK) >> MVFR1_ASIMDHP_BIT) == 0x2)
elf_hwcap |= HWCAP_ASIMDHP;
if (((fmrx(MVFR1) & MVFR1_FPHP_MASK) >> MVFR1_FPHP_BIT) == 0x3)
elf_hwcap |= HWCAP_FPHP;
}
/*
* Check for the presence of Advanced SIMD Dot Product
* instructions.
*/
isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
if (cpuid_feature_extract_field(isar6, 4) == 0x1)
elf_hwcap |= HWCAP_ASIMDDP;
/*
* Check for the presence of Advanced SIMD Floating point
* half-precision multiplication instructions.
*/
if (cpuid_feature_extract_field(isar6, 8) == 0x1)
elf_hwcap |= HWCAP_ASIMDFHM;
/*
* Check for the presence of Advanced SIMD Bfloat16
* floating point instructions.
*/
if (cpuid_feature_extract_field(isar6, 20) == 0x1)
elf_hwcap |= HWCAP_ASIMDBF16;
/*
* Check for the presence of Advanced SIMD and floating point
* Int8 matrix multiplication instructions instructions.
*/
if (cpuid_feature_extract_field(isar6, 24) == 0x1)
elf_hwcap |= HWCAP_I8MM;
/* Extract the architecture version on pre-cpuid scheme */
} else {
if (vfpsid & FPSID_NODOUBLE) {
......
......@@ -23,6 +23,7 @@ struct ptdump_info {
void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
#ifdef CONFIG_PTDUMP_DEBUGFS
#define EFI_RUNTIME_MAP_END DEFAULT_MAP_WINDOW_64
void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
static inline void ptdump_debugfs_register(struct ptdump_info *info,
......
......@@ -421,12 +421,6 @@ static int amba_pm_runtime_resume(struct device *dev)
#endif /* CONFIG_PM */
static const struct dev_pm_ops amba_pm = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
.freeze = pm_generic_freeze,
.thaw = pm_generic_thaw,
.poweroff = pm_generic_poweroff,
.restore = pm_generic_restore,
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
......
......@@ -25,14 +25,14 @@
#include <asm/mmu.h>
#include <asm/pgalloc.h>
#if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64)
#if defined(CONFIG_PTDUMP_DEBUGFS) || defined(CONFIG_ARM_PTDUMP_DEBUGFS)
#include <asm/ptdump.h>
static struct ptdump_info efi_ptdump_info = {
.mm = &efi_mm,
.markers = (struct addr_marker[]){
{ 0, "UEFI runtime start" },
{ DEFAULT_MAP_WINDOW_64, "UEFI runtime end" },
{ EFI_RUNTIME_MAP_END, "UEFI runtime end" },
{ -1, NULL }
},
.base_addr = 0,
......
......@@ -33,8 +33,6 @@ obj-$(CONFIG_FPGA_DFL_EMIF) += dfl-emif.o
ti-emif-sram-objs := ti-emif-pm.o ti-emif-sram-pm.o
AFLAGS_ti-emif-sram-pm.o :=-Wa,-march=armv7-a
$(obj)/ti-emif-sram-pm.o: $(obj)/ti-emif-asm-offsets.h
$(obj)/ti-emif-asm-offsets.h: $(obj)/emif-asm-offsets.s FORCE
......
......@@ -28,6 +28,7 @@
.arm
.align 3
.arch armv7-a
ENTRY(ti_emif_sram)
......
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_ARM) += s2-arm.o pm-arm.o
AFLAGS_s2-arm.o := -march=armv7-a
obj-$(CONFIG_BMIPS_GENERIC) += s2-mips.o s3-mips.o pm-mips.o
......@@ -8,6 +8,7 @@
#include "pm.h"
.arch armv7-a
.text
.align 3
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment