Commit 3d9d7405 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "I'd been collecting these whilst we debugged a CPU hotplug failure,
  but we ended up diagnosing that one to tglx, who has taken a fix via
  the -tip tree separately.

  We're seeing some NFS issues that we haven't gotten to the bottom of
  yet, and we've uncovered some issues with our backtracing too so there
  might be another fixes pull before we're done.

  Summary:

   - Ensure we have a guard page after the kernel image in vmalloc

   - Fix incorrect prefetch stride in copy_page

   - Ensure irqs are disabled in die()

   - Fix for event group validation in QCOM L2 PMU driver

   - Fix requesting of PMU IRQs on AMD Seattle

   - Minor cleanups and fixes"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: mmu: Place guard page after mapping of kernel image
  drivers/perf: arm_pmu: Request PMU SPIs with IRQF_PER_CPU
  arm64: sysreg: Fix unprotected macro argmuent in write_sysreg
  perf: qcom_l2: fix column exclusion check
  arm64/lib: copy_page: use consistent prefetch stride
  arm64/numa: Drop duplicate message
  perf: Convert to using %pOF instead of full_name
  arm64: Convert to using %pOF instead of full_name
  arm64: traps: disable irq in die()
  arm64: atomics: Remove '&' from '+&' asm constraint in lse atomics
  arm64: uaccess: Remove redundant __force from addr cast in __range_ok
parents 080012ba 92bbd16e
...@@ -133,6 +133,7 @@ static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler) ...@@ -133,6 +133,7 @@ static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
static struct arm_pmu_platdata db8500_pmu_platdata = { static struct arm_pmu_platdata db8500_pmu_platdata = {
.handle_irq = db8500_pmu_handler, .handle_irq = db8500_pmu_handler,
.irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD,
}; };
static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
......
...@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) ...@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
" sub x30, x30, %[ret]\n" " sub x30, x30, %[ret]\n"
" cbnz x30, 1b\n" " cbnz x30, 1b\n"
"2:") "2:")
: [ret] "+&r" (x0), [v] "+Q" (v->counter) : [ret] "+r" (x0), [v] "+Q" (v->counter)
: :
: __LL_SC_CLOBBERS, "cc", "memory"); : __LL_SC_CLOBBERS, "cc", "memory");
......
...@@ -492,7 +492,7 @@ asm( ...@@ -492,7 +492,7 @@ asm(
* the "%x0" template means XZR. * the "%x0" template means XZR.
*/ */
#define write_sysreg(v, r) do { \ #define write_sysreg(v, r) do { \
u64 __val = (u64)v; \ u64 __val = (u64)(v); \
asm volatile("msr " __stringify(r) ", %x0" \ asm volatile("msr " __stringify(r) ", %x0" \
: : "rZ" (__val)); \ : : "rZ" (__val)); \
} while (0) } while (0)
...@@ -508,7 +508,7 @@ asm( ...@@ -508,7 +508,7 @@ asm(
}) })
#define write_sysreg_s(v, r) do { \ #define write_sysreg_s(v, r) do { \
u64 __val = (u64)v; \ u64 __val = (u64)(v); \
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0) } while (0)
......
...@@ -69,7 +69,7 @@ static inline void set_fs(mm_segment_t fs) ...@@ -69,7 +69,7 @@ static inline void set_fs(mm_segment_t fs)
*/ */
#define __range_ok(addr, size) \ #define __range_ok(addr, size) \
({ \ ({ \
unsigned long __addr = (unsigned long __force)(addr); \ unsigned long __addr = (unsigned long)(addr); \
unsigned long flag, roksum; \ unsigned long flag, roksum; \
__chk_user_ptr(addr); \ __chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
......
...@@ -82,8 +82,8 @@ static const char *__init cpu_read_enable_method(int cpu) ...@@ -82,8 +82,8 @@ static const char *__init cpu_read_enable_method(int cpu)
* Don't warn spuriously. * Don't warn spuriously.
*/ */
if (cpu != 0) if (cpu != 0)
pr_err("%s: missing enable-method property\n", pr_err("%pOF: missing enable-method property\n",
dn->full_name); dn);
} }
} else { } else {
enable_method = acpi_get_enable_method(cpu); enable_method = acpi_get_enable_method(cpu);
......
...@@ -469,7 +469,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn) ...@@ -469,7 +469,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
*/ */
cell = of_get_property(dn, "reg", NULL); cell = of_get_property(dn, "reg", NULL);
if (!cell) { if (!cell) {
pr_err("%s: missing reg property\n", dn->full_name); pr_err("%pOF: missing reg property\n", dn);
return INVALID_HWID; return INVALID_HWID;
} }
...@@ -478,7 +478,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn) ...@@ -478,7 +478,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
* Non affinity bits must be set to 0 in the DT * Non affinity bits must be set to 0 in the DT
*/ */
if (hwid & ~MPIDR_HWID_BITMASK) { if (hwid & ~MPIDR_HWID_BITMASK) {
pr_err("%s: invalid reg property\n", dn->full_name); pr_err("%pOF: invalid reg property\n", dn);
return INVALID_HWID; return INVALID_HWID;
} }
return hwid; return hwid;
...@@ -627,8 +627,8 @@ static void __init of_parse_and_init_cpus(void) ...@@ -627,8 +627,8 @@ static void __init of_parse_and_init_cpus(void)
goto next; goto next;
if (is_mpidr_duplicate(cpu_count, hwid)) { if (is_mpidr_duplicate(cpu_count, hwid)) {
pr_err("%s: duplicate cpu reg properties in the DT\n", pr_err("%pOF: duplicate cpu reg properties in the DT\n",
dn->full_name); dn);
goto next; goto next;
} }
...@@ -640,8 +640,8 @@ static void __init of_parse_and_init_cpus(void) ...@@ -640,8 +640,8 @@ static void __init of_parse_and_init_cpus(void)
*/ */
if (hwid == cpu_logical_map(0)) { if (hwid == cpu_logical_map(0)) {
if (bootcpu_valid) { if (bootcpu_valid) {
pr_err("%s: duplicate boot cpu reg property in DT\n", pr_err("%pOF: duplicate boot cpu reg property in DT\n",
dn->full_name); dn);
goto next; goto next;
} }
......
...@@ -45,7 +45,7 @@ static int __init get_cpu_for_node(struct device_node *node) ...@@ -45,7 +45,7 @@ static int __init get_cpu_for_node(struct device_node *node)
} }
} }
pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name); pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
of_node_put(cpu_node); of_node_put(cpu_node);
return -1; return -1;
...@@ -71,8 +71,8 @@ static int __init parse_core(struct device_node *core, int cluster_id, ...@@ -71,8 +71,8 @@ static int __init parse_core(struct device_node *core, int cluster_id,
cpu_topology[cpu].core_id = core_id; cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i; cpu_topology[cpu].thread_id = i;
} else { } else {
pr_err("%s: Can't get CPU for thread\n", pr_err("%pOF: Can't get CPU for thread\n",
t->full_name); t);
of_node_put(t); of_node_put(t);
return -EINVAL; return -EINVAL;
} }
...@@ -84,15 +84,15 @@ static int __init parse_core(struct device_node *core, int cluster_id, ...@@ -84,15 +84,15 @@ static int __init parse_core(struct device_node *core, int cluster_id,
cpu = get_cpu_for_node(core); cpu = get_cpu_for_node(core);
if (cpu >= 0) { if (cpu >= 0) {
if (!leaf) { if (!leaf) {
pr_err("%s: Core has both threads and CPU\n", pr_err("%pOF: Core has both threads and CPU\n",
core->full_name); core);
return -EINVAL; return -EINVAL;
} }
cpu_topology[cpu].cluster_id = cluster_id; cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id; cpu_topology[cpu].core_id = core_id;
} else if (leaf) { } else if (leaf) {
pr_err("%s: Can't get CPU for leaf core\n", core->full_name); pr_err("%pOF: Can't get CPU for leaf core\n", core);
return -EINVAL; return -EINVAL;
} }
...@@ -137,8 +137,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth) ...@@ -137,8 +137,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
has_cores = true; has_cores = true;
if (depth == 0) { if (depth == 0) {
pr_err("%s: cpu-map children should be clusters\n", pr_err("%pOF: cpu-map children should be clusters\n",
c->full_name); c);
of_node_put(c); of_node_put(c);
return -EINVAL; return -EINVAL;
} }
...@@ -146,8 +146,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth) ...@@ -146,8 +146,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
if (leaf) { if (leaf) {
ret = parse_core(c, cluster_id, core_id++); ret = parse_core(c, cluster_id, core_id++);
} else { } else {
pr_err("%s: Non-leaf cluster with core %s\n", pr_err("%pOF: Non-leaf cluster with core %s\n",
cluster->full_name, name); cluster, name);
ret = -EINVAL; ret = -EINVAL;
} }
...@@ -159,7 +159,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth) ...@@ -159,7 +159,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
} while (c); } while (c);
if (leaf && !has_cores) if (leaf && !has_cores)
pr_warn("%s: empty cluster\n", cluster->full_name); pr_warn("%pOF: empty cluster\n", cluster);
if (leaf) if (leaf)
cluster_id++; cluster_id++;
......
...@@ -274,10 +274,12 @@ static DEFINE_RAW_SPINLOCK(die_lock); ...@@ -274,10 +274,12 @@ static DEFINE_RAW_SPINLOCK(die_lock);
void die(const char *str, struct pt_regs *regs, int err) void die(const char *str, struct pt_regs *regs, int err)
{ {
int ret; int ret;
unsigned long flags;
raw_spin_lock_irqsave(&die_lock, flags);
oops_enter(); oops_enter();
raw_spin_lock_irq(&die_lock);
console_verbose(); console_verbose();
bust_spinlocks(1); bust_spinlocks(1);
ret = __die(str, err, regs); ret = __die(str, err, regs);
...@@ -287,13 +289,15 @@ void die(const char *str, struct pt_regs *regs, int err) ...@@ -287,13 +289,15 @@ void die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(0); bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
raw_spin_unlock_irq(&die_lock);
oops_exit(); oops_exit();
if (in_interrupt()) if (in_interrupt())
panic("Fatal exception in interrupt"); panic("Fatal exception in interrupt");
if (panic_on_oops) if (panic_on_oops)
panic("Fatal exception"); panic("Fatal exception");
raw_spin_unlock_irqrestore(&die_lock, flags);
if (ret != NOTIFY_STOP) if (ret != NOTIFY_STOP)
do_exit(SIGSEGV); do_exit(SIGSEGV);
} }
......
...@@ -30,9 +30,10 @@ ...@@ -30,9 +30,10 @@
*/ */
ENTRY(copy_page) ENTRY(copy_page)
alternative_if ARM64_HAS_NO_HW_PREFETCH alternative_if ARM64_HAS_NO_HW_PREFETCH
# Prefetch two cache lines ahead. // Prefetch three cache lines ahead.
prfm pldl1strm, [x1, #128] prfm pldl1strm, [x1, #128]
prfm pldl1strm, [x1, #256] prfm pldl1strm, [x1, #256]
prfm pldl1strm, [x1, #384]
alternative_else_nop_endif alternative_else_nop_endif
ldp x2, x3, [x1] ldp x2, x3, [x1]
...@@ -50,7 +51,7 @@ alternative_else_nop_endif ...@@ -50,7 +51,7 @@ alternative_else_nop_endif
subs x18, x18, #128 subs x18, x18, #128
alternative_if ARM64_HAS_NO_HW_PREFETCH alternative_if ARM64_HAS_NO_HW_PREFETCH
prfm pldl1strm, [x1, #384] prfm pldl1strm, [x1, #384]
alternative_else_nop_endif alternative_else_nop_endif
stnp x2, x3, [x0] stnp x2, x3, [x0]
......
...@@ -496,7 +496,7 @@ void mark_rodata_ro(void) ...@@ -496,7 +496,7 @@ void mark_rodata_ro(void)
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
pgprot_t prot, struct vm_struct *vma, pgprot_t prot, struct vm_struct *vma,
int flags) int flags, unsigned long vm_flags)
{ {
phys_addr_t pa_start = __pa_symbol(va_start); phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start; unsigned long size = va_end - va_start;
...@@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, ...@@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
early_pgtable_alloc, flags); early_pgtable_alloc, flags);
if (!(vm_flags & VM_NO_GUARD))
size += PAGE_SIZE;
vma->addr = va_start; vma->addr = va_start;
vma->phys_addr = pa_start; vma->phys_addr = pa_start;
vma->size = size; vma->size = size;
vma->flags = VM_MAP; vma->flags = VM_MAP | vm_flags;
vma->caller = __builtin_return_address(0); vma->caller = __builtin_return_address(0);
vm_area_add_early(vma); vm_area_add_early(vma);
...@@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd) ...@@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd)
* Only rodata will be remapped with different permissions later on, * Only rodata will be remapped with different permissions later on,
* all other segments are allowed to use contiguous mappings. * all other segments are allowed to use contiguous mappings.
*/ */
map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0); map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
VM_NO_GUARD);
map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL, map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
&vmlinux_rodata, NO_CONT_MAPPINGS); &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot, map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
&vmlinux_inittext, 0); &vmlinux_inittext, 0, VM_NO_GUARD);
map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL, map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
&vmlinux_initdata, 0); &vmlinux_initdata, 0, VM_NO_GUARD);
map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0); map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
/* /*
......
...@@ -208,8 +208,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) ...@@ -208,8 +208,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
} }
node_set(nid, numa_nodes_parsed); node_set(nid, numa_nodes_parsed);
pr_info("Adding memblock [0x%llx - 0x%llx] on node %d\n",
start, (end - 1), nid);
return ret; return ret;
} }
...@@ -223,10 +221,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) ...@@ -223,10 +221,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd; void *nd;
int tnid; int tnid;
if (start_pfn < end_pfn) if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
else
pr_info("Initmem setup node %d [<memory-less node>]\n", nid); pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
......
...@@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) ...@@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
if (irq != other_irq) { if (irq != other_irq) {
pr_warn("mismatched PPIs detected.\n"); pr_warn("mismatched PPIs detected.\n");
err = -EINVAL; err = -EINVAL;
goto err_out;
} }
} else { } else {
err = request_irq(irq, handler, struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", unsigned long irq_flags;
err = irq_force_affinity(irq, cpumask_of(cpu));
if (err && num_possible_cpus() > 1) {
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
goto err_out;
}
if (platdata && platdata->irq_flags) {
irq_flags = platdata->irq_flags;
} else {
irq_flags = IRQF_PERCPU |
IRQF_NOBALANCING |
IRQF_NO_THREAD;
}
err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&hw_events->percpu_pmu, cpu)); per_cpu_ptr(&hw_events->percpu_pmu, cpu));
} }
if (err) { if (err)
pr_err("unable to request IRQ%d for ARM PMU counters\n", goto err_out;
irq);
return err;
}
cpumask_set_cpu(cpu, &armpmu->active_irqs); cpumask_set_cpu(cpu, &armpmu->active_irqs);
return 0; return 0;
err_out:
pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
return err;
} }
int armpmu_request_irqs(struct arm_pmu *armpmu) int armpmu_request_irqs(struct arm_pmu *armpmu)
...@@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) ...@@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
enable_percpu_irq(irq, IRQ_TYPE_NONE); enable_percpu_irq(irq, IRQ_TYPE_NONE);
return 0; return 0;
} }
if (irq_force_affinity(irq, cpumask_of(cpu)) &&
num_possible_cpus() > 1) {
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
}
} }
return 0; return 0;
......
...@@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) ...@@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
} }
if (!pmu_has_irq_affinity(pdev->dev.of_node)) { if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
pr_warn("no interrupt-affinity property for %s, guessing.\n", pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
of_node_full_name(pdev->dev.of_node)); pdev->dev.of_node);
} }
/* /*
...@@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, ...@@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
} }
if (ret) { if (ret) {
pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); pr_info("%pOF: failed to probe PMU!\n", node);
goto out_free; goto out_free;
} }
...@@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, ...@@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
out_free_irqs: out_free_irqs:
armpmu_free_irqs(pmu); armpmu_free_irqs(pmu);
out_free: out_free:
pr_info("%s: failed to register PMU devices!\n", pr_info("%pOF: failed to register PMU devices!\n", node);
of_node_full_name(node));
armpmu_free(pmu); armpmu_free(pmu);
return ret; return ret;
} }
...@@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event) ...@@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event)
} }
if ((event != event->group_leader) && if ((event != event->group_leader) &&
!is_software_event(event->group_leader) &&
(L2_EVT_GROUP(event->group_leader->attr.config) == (L2_EVT_GROUP(event->group_leader->attr.config) ==
L2_EVT_GROUP(event->attr.config))) { L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
...@@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event) ...@@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event)
list_for_each_entry(sibling, &event->group_leader->sibling_list, list_for_each_entry(sibling, &event->group_leader->sibling_list,
group_entry) { group_entry) {
if ((sibling != event) && if ((sibling != event) &&
!is_software_event(sibling) &&
(L2_EVT_GROUP(sibling->attr.config) == (L2_EVT_GROUP(sibling->attr.config) ==
L2_EVT_GROUP(event->attr.config))) { L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
......
...@@ -24,10 +24,14 @@ ...@@ -24,10 +24,14 @@
* interrupt and passed the address of the low level handler, * interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling * and can be used to implement any platform specific handling
* before or after calling it. * before or after calling it.
*
* @irq_flags: if non-zero, these flags will be passed to request_irq
* when requesting interrupts for this PMU device.
*/ */
struct arm_pmu_platdata { struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev, irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler); irq_handler_t pmu_handler);
unsigned long irq_flags;
}; };
#ifdef CONFIG_ARM_PMU #ifdef CONFIG_ARM_PMU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment