Commit ac347a06 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:
 "Mostly PMU fixes and a reworking of the pseudo-NMI disabling on broken
  MediaTek firmware:

   - Move the MediaTek GIC quirk handling from irqchip to core. Before
     the merging window commit 44bd78dd ("irqchip/gic-v3: Disable
     pseudo NMIs on MediaTek devices w/ firmware issues") temporarily
     addressed this issue. Fixed now at a deeper level in the arch code

   - Reject events meant for other PMUs in the CoreSight PMU driver,
     otherwise some of the core PMU events would disappear

   - Fix the Armv8 PMUv3 driver driver to not truncate 64-bit registers,
     causing some events to be invisible

   - Remove duplicate declaration of __arm64_sys##name following the
     patch to avoid prototype warning for syscalls

   - Typos in the elf_hwcap documentation"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/syscall: Remove duplicate declaration
  Revert "arm64: smp: avoid NMI IPIs with broken MediaTek FW"
  arm64: Move MediaTek GIC quirk handling from irqchip to core
  arm64/arm: arm_pmuv3: perf: Don't truncate 64-bit registers
  perf: arm_cspmu: Reject events meant for other PMUs
  Documentation/arm64: Fix typos in elf_hwcaps
parents e1d809b3 f8612805
......@@ -174,7 +174,7 @@ HWCAP2_DCPODP
Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
HWCAP2_SVE2
Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001.
Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0001.
HWCAP2_SVEAES
Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
......@@ -222,7 +222,7 @@ HWCAP2_RNG
Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001.
HWCAP2_BTI
Functionality implied by ID_AA64PFR0_EL1.BT == 0b0001.
Functionality implied by ID_AA64PFR1_EL1.BT == 0b0001.
HWCAP2_MTE
Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0010, as described
......@@ -232,7 +232,7 @@ HWCAP2_ECV
Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001.
HWCAP2_AFP
Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001.
Functionality implied by ID_AA64MMFR1_EL1.AFP == 0b0001.
HWCAP2_RPRES
Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001.
......
......@@ -23,6 +23,8 @@
#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4)
#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5)
#define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
......@@ -150,21 +152,6 @@ static inline u64 read_pmccntr(void)
return read_sysreg(PMCCNTR);
}
static inline void write_pmxevcntr(u32 val)
{
write_sysreg(val, PMXEVCNTR);
}
static inline u32 read_pmxevcntr(void)
{
return read_sysreg(PMXEVCNTR);
}
static inline void write_pmxevtyper(u32 val)
{
write_sysreg(val, PMXEVTYPER);
}
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, PMCNTENSET);
......@@ -205,16 +192,6 @@ static inline void write_pmuserenr(u32 val)
write_sysreg(val, PMUSERENR);
}
static inline u32 read_pmceid0(void)
{
return read_sysreg(PMCEID0);
}
static inline u32 read_pmceid1(void)
{
return read_sysreg(PMCEID1);
}
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {}
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
......@@ -231,6 +208,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
/* PMU Version in DFR Register */
#define ARMV8_PMU_DFR_VER_NI 0
#define ARMV8_PMU_DFR_VER_V3P1 0x4
#define ARMV8_PMU_DFR_VER_V3P4 0x5
#define ARMV8_PMU_DFR_VER_V3P5 0x6
#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
......@@ -251,4 +229,24 @@ static inline bool is_pmuv3p5(int pmuver)
return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
}
static inline u64 read_pmceid0(void)
{
u64 val = read_sysreg(PMCEID0);
if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
val |= (u64)read_sysreg(PMCEID2) << 32;
return val;
}
static inline u64 read_pmceid1(void)
{
u64 val = read_sysreg(PMCEID1);
if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
val |= (u64)read_sysreg(PMCEID3) << 32;
return val;
}
#endif
......@@ -46,12 +46,12 @@ static inline u32 read_pmuver(void)
ID_AA64DFR0_EL1_PMUVer_SHIFT);
}
static inline void write_pmcr(u32 val)
static inline void write_pmcr(u64 val)
{
write_sysreg(val, pmcr_el0);
}
static inline u32 read_pmcr(void)
static inline u64 read_pmcr(void)
{
return read_sysreg(pmcr_el0);
}
......@@ -71,21 +71,6 @@ static inline u64 read_pmccntr(void)
return read_sysreg(pmccntr_el0);
}
static inline void write_pmxevcntr(u32 val)
{
write_sysreg(val, pmxevcntr_el0);
}
static inline u32 read_pmxevcntr(void)
{
return read_sysreg(pmxevcntr_el0);
}
static inline void write_pmxevtyper(u32 val)
{
write_sysreg(val, pmxevtyper_el0);
}
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, pmcntenset_el0);
......@@ -106,7 +91,7 @@ static inline void write_pmintenclr(u32 val)
write_sysreg(val, pmintenclr_el1);
}
static inline void write_pmccfiltr(u32 val)
static inline void write_pmccfiltr(u64 val)
{
write_sysreg(val, pmccfiltr_el0);
}
......@@ -126,12 +111,12 @@ static inline void write_pmuserenr(u32 val)
write_sysreg(val, pmuserenr_el0);
}
static inline u32 read_pmceid0(void)
static inline u64 read_pmceid0(void)
{
return read_sysreg(pmceid0_el0);
}
static inline u32 read_pmceid1(void)
static inline u64 read_pmceid1(void)
{
return read_sysreg(pmceid1_el0);
}
......
......@@ -54,7 +54,6 @@
ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \
{ \
return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
......
......@@ -999,6 +999,37 @@ static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
}
#ifdef CONFIG_ARM64_PSEUDO_NMI
static bool enable_pseudo_nmi;
static int __init early_enable_pseudo_nmi(char *p)
{
return kstrtobool(p, &enable_pseudo_nmi);
}
early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
static __init void detect_system_supports_pseudo_nmi(void)
{
struct device_node *np;
if (!enable_pseudo_nmi)
return;
/*
* Detect broken MediaTek firmware that doesn't properly save and
* restore GIC priorities.
*/
np = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
if (np && of_property_read_bool(np, "mediatek,broken-save-restore-fw")) {
pr_info("Pseudo-NMI disabled due to MediaTek Chromebook GICR save problem\n");
enable_pseudo_nmi = false;
}
of_node_put(np);
}
#else /* CONFIG_ARM64_PSEUDO_NMI */
static inline void detect_system_supports_pseudo_nmi(void) { }
#endif
void __init init_cpu_features(struct cpuinfo_arm64 *info)
{
/* Before we start using the tables, make sure it is sorted */
......@@ -1057,6 +1088,13 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
*/
init_cpucap_indirect_list();
/*
* Detect broken pseudo-NMI. Must be called _before_ the call to
* setup_boot_cpu_capabilities() since it interacts with
* can_use_gic_priorities().
*/
detect_system_supports_pseudo_nmi();
/*
* Detect and enable early CPU capabilities based on the boot CPU,
* after we have initialised the CPU feature infrastructure.
......@@ -2085,14 +2123,6 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
#endif /* CONFIG_ARM64_E0PD */
#ifdef CONFIG_ARM64_PSEUDO_NMI
static bool enable_pseudo_nmi;
static int __init early_enable_pseudo_nmi(char *p)
{
return kstrtobool(p, &enable_pseudo_nmi);
}
early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
int scope)
{
......
......@@ -965,10 +965,7 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
{
DECLARE_STATIC_KEY_FALSE(supports_pseudo_nmis);
if (!system_uses_irq_prio_masking() ||
!static_branch_likely(&supports_pseudo_nmis))
if (!system_uses_irq_prio_masking())
return false;
switch (ipi) {
......
......@@ -39,8 +39,7 @@
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2)
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3)
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
......@@ -106,7 +105,7 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
* - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
* interrupt.
*/
DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
EXPORT_SYMBOL(gic_nonsecure_priorities);
......@@ -1779,15 +1778,6 @@ static bool gic_enable_quirk_msm8996(void *data)
return true;
}
static bool gic_enable_quirk_mtk_gicr(void *data)
{
struct gic_chip_data *d = data;
d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE;
return true;
}
static bool gic_enable_quirk_cavium_38539(void *data)
{
struct gic_chip_data *d = data;
......@@ -1888,11 +1878,6 @@ static const struct gic_quirk gic_quirks[] = {
.compatible = "asr,asr8601-gic-v3",
.init = gic_enable_quirk_asr8601,
},
{
.desc = "GICv3: Mediatek Chromebook GICR save problem",
.property = "mediatek,broken-save-restore-fw",
.init = gic_enable_quirk_mtk_gicr,
},
{
.desc = "GICv3: HIP06 erratum 161010803",
.iidr = 0x0204043b,
......@@ -1959,11 +1944,6 @@ static void gic_enable_nmi_support(void)
if (!gic_prio_masking_enabled())
return;
if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) {
pr_warn("Skipping NMI enable due to firmware issues\n");
return;
}
rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
sizeof(*rdist_nmi_refs), GFP_KERNEL);
if (!rdist_nmi_refs)
......
......@@ -676,6 +676,9 @@ static int arm_cspmu_event_init(struct perf_event *event)
cspmu = to_arm_cspmu(event->pmu);
if (event->attr.type != event->pmu->type)
return -ENOENT;
/*
* Following other "uncore" PMUs, we do not support sampling mode or
* attach to a task (per-process mode).
......
......@@ -428,12 +428,12 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
static inline u32 armv8pmu_pmcr_read(void)
static inline u64 armv8pmu_pmcr_read(void)
{
return read_pmcr();
}
static inline void armv8pmu_pmcr_write(u32 val)
static inline void armv8pmu_pmcr_write(u64 val)
{
val &= ARMV8_PMU_PMCR_MASK;
isb();
......@@ -957,7 +957,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
u32 pmcr;
u64 pmcr;
/* The counter and interrupt enable registers are unknown at reset. */
armv8pmu_disable_counter(U32_MAX);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment