Commit e66d001c authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Will writes:
  "More arm64 fixes

   - Reject CHAIN PMU events when they are not part of a 64-bit counter

   - Fix WARN_ON_ONCE() that triggers for reserved regions that don't
     correspond to mapped memory"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: perf: Reject stand-alone CHAIN events for PMUv3
  arm64: Fix /proc/iomem for reserved but not memory regions
parents bab5c80b ca2b4972
...@@ -966,6 +966,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, ...@@ -966,6 +966,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
return 0; return 0;
} }
static int armv8pmu_filter_match(struct perf_event *event)
{
unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
}
static void armv8pmu_reset(void *info) static void armv8pmu_reset(void *info)
{ {
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
...@@ -1114,6 +1120,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -1114,6 +1120,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->stop = armv8pmu_stop, cpu_pmu->stop = armv8pmu_stop,
cpu_pmu->reset = armv8pmu_reset, cpu_pmu->reset = armv8pmu_reset,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter; cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->filter_match = armv8pmu_filter_match;
return 0; return 0;
} }
......
...@@ -64,6 +64,9 @@ ...@@ -64,6 +64,9 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
static int num_standard_resources;
static struct resource *standard_resources;
phys_addr_t __fdt_pointer __initdata; phys_addr_t __fdt_pointer __initdata;
/* /*
...@@ -206,14 +209,19 @@ static void __init request_standard_resources(void) ...@@ -206,14 +209,19 @@ static void __init request_standard_resources(void)
{ {
struct memblock_region *region; struct memblock_region *region;
struct resource *res; struct resource *res;
unsigned long i = 0;
kernel_code.start = __pa_symbol(_text); kernel_code.start = __pa_symbol(_text);
kernel_code.end = __pa_symbol(__init_begin - 1); kernel_code.end = __pa_symbol(__init_begin - 1);
kernel_data.start = __pa_symbol(_sdata); kernel_data.start = __pa_symbol(_sdata);
kernel_data.end = __pa_symbol(_end - 1); kernel_data.end = __pa_symbol(_end - 1);
num_standard_resources = memblock.memory.cnt;
standard_resources = alloc_bootmem_low(num_standard_resources *
sizeof(*standard_resources));
for_each_memblock(memory, region) { for_each_memblock(memory, region) {
res = alloc_bootmem_low(sizeof(*res)); res = &standard_resources[i++];
if (memblock_is_nomap(region)) { if (memblock_is_nomap(region)) {
res->name = "reserved"; res->name = "reserved";
res->flags = IORESOURCE_MEM; res->flags = IORESOURCE_MEM;
...@@ -243,36 +251,26 @@ static void __init request_standard_resources(void) ...@@ -243,36 +251,26 @@ static void __init request_standard_resources(void)
static int __init reserve_memblock_reserved_regions(void) static int __init reserve_memblock_reserved_regions(void)
{ {
phys_addr_t start, end, roundup_end = 0; u64 i, j;
struct resource *mem, *res;
u64 i; for (i = 0; i < num_standard_resources; ++i) {
struct resource *mem = &standard_resources[i];
for_each_reserved_mem_region(i, &start, &end) { phys_addr_t r_start, r_end, mem_size = resource_size(mem);
if (end <= roundup_end)
continue; /* done already */ if (!memblock_is_region_reserved(mem->start, mem_size))
start = __pfn_to_phys(PFN_DOWN(start));
end = __pfn_to_phys(PFN_UP(end)) - 1;
roundup_end = end;
res = kzalloc(sizeof(*res), GFP_ATOMIC);
if (WARN_ON(!res))
return -ENOMEM;
res->start = start;
res->end = end;
res->name = "reserved";
res->flags = IORESOURCE_MEM;
mem = request_resource_conflict(&iomem_resource, res);
/*
* We expected memblock_reserve() regions to conflict with
* memory created by request_standard_resources().
*/
if (WARN_ON_ONCE(!mem))
continue; continue;
kfree(res);
reserve_region_with_split(mem, start, end, "reserved"); for_each_reserved_mem_region(j, &r_start, &r_end) {
resource_size_t start, end;
start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
if (start > mem->end || end < mem->start)
continue;
reserve_region_with_split(mem, start, end, "reserved");
}
} }
return 0; return 0;
......
...@@ -485,7 +485,13 @@ static int armpmu_filter_match(struct perf_event *event) ...@@ -485,7 +485,13 @@ static int armpmu_filter_match(struct perf_event *event)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
return cpumask_test_cpu(cpu, &armpmu->supported_cpus); int ret;
ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
if (ret && armpmu->filter_match)
return armpmu->filter_match(event);
return ret;
} }
static ssize_t armpmu_cpumask_show(struct device *dev, static ssize_t armpmu_cpumask_show(struct device *dev,
......
...@@ -99,6 +99,7 @@ struct arm_pmu { ...@@ -99,6 +99,7 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *); void (*stop)(struct arm_pmu *);
void (*reset)(void *); void (*reset)(void *);
int (*map_event)(struct perf_event *event); int (*map_event)(struct perf_event *event);
int (*filter_match)(struct perf_event *event);
int num_events; int num_events;
bool secure_access; /* 32-bit ARM only */ bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment