Commit 9343c790 authored by James Clark's avatar James Clark Committed by Will Deacon

arm: perf: Remove inlines from arm_pmuv3.c

These are all static and in one compilation unit so the inline has no
effect on the binary. Except if FTRACE is enabled, then 3 functions
which were already not inlined now get the nops added which allows them
to be traced.
Signed-off-by: default avatarJames Clark <james.clark@arm.com>
Link: https://lore.kernel.org/r/20231211161331.1277825-2-james.clark@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 5ca8ab55
...@@ -304,12 +304,12 @@ PMU_FORMAT_ATTR(rdpmc, "config1:1"); ...@@ -304,12 +304,12 @@ PMU_FORMAT_ATTR(rdpmc, "config1:1");
static int sysctl_perf_user_access __read_mostly; static int sysctl_perf_user_access __read_mostly;
static inline bool armv8pmu_event_is_64bit(struct perf_event *event) static bool armv8pmu_event_is_64bit(struct perf_event *event)
{ {
return event->attr.config1 & 0x1; return event->attr.config1 & 0x1;
} }
static inline bool armv8pmu_event_want_user_access(struct perf_event *event) static bool armv8pmu_event_want_user_access(struct perf_event *event)
{ {
return event->attr.config1 & 0x2; return event->attr.config1 & 0x2;
} }
...@@ -401,7 +401,7 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) ...@@ -401,7 +401,7 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver)); return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver));
} }
static inline bool armv8pmu_event_has_user_read(struct perf_event *event) static bool armv8pmu_event_has_user_read(struct perf_event *event)
{ {
return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT; return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
} }
...@@ -411,7 +411,7 @@ static inline bool armv8pmu_event_has_user_read(struct perf_event *event) ...@@ -411,7 +411,7 @@ static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
* except when we have allocated the 64bit cycle counter (for CPU * except when we have allocated the 64bit cycle counter (for CPU
* cycles event) or when user space counter access is enabled. * cycles event) or when user space counter access is enabled.
*/ */
static inline bool armv8pmu_event_is_chained(struct perf_event *event) static bool armv8pmu_event_is_chained(struct perf_event *event)
{ {
int idx = event->hw.idx; int idx = event->hw.idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
...@@ -432,36 +432,36 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event) ...@@ -432,36 +432,36 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
#define ARMV8_IDX_TO_COUNTER(x) \ #define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
static inline u64 armv8pmu_pmcr_read(void) static u64 armv8pmu_pmcr_read(void)
{ {
return read_pmcr(); return read_pmcr();
} }
static inline void armv8pmu_pmcr_write(u64 val) static void armv8pmu_pmcr_write(u64 val)
{ {
val &= ARMV8_PMU_PMCR_MASK; val &= ARMV8_PMU_PMCR_MASK;
isb(); isb();
write_pmcr(val); write_pmcr(val);
} }
static inline int armv8pmu_has_overflowed(u32 pmovsr) static int armv8pmu_has_overflowed(u32 pmovsr)
{ {
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
} }
static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
{ {
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
} }
static inline u64 armv8pmu_read_evcntr(int idx) static u64 armv8pmu_read_evcntr(int idx)
{ {
u32 counter = ARMV8_IDX_TO_COUNTER(idx); u32 counter = ARMV8_IDX_TO_COUNTER(idx);
return read_pmevcntrn(counter); return read_pmevcntrn(counter);
} }
static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) static u64 armv8pmu_read_hw_counter(struct perf_event *event)
{ {
int idx = event->hw.idx; int idx = event->hw.idx;
u64 val = armv8pmu_read_evcntr(idx); u64 val = armv8pmu_read_evcntr(idx);
...@@ -523,14 +523,14 @@ static u64 armv8pmu_read_counter(struct perf_event *event) ...@@ -523,14 +523,14 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
return armv8pmu_unbias_long_counter(event, value); return armv8pmu_unbias_long_counter(event, value);
} }
static inline void armv8pmu_write_evcntr(int idx, u64 value) static void armv8pmu_write_evcntr(int idx, u64 value)
{ {
u32 counter = ARMV8_IDX_TO_COUNTER(idx); u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_pmevcntrn(counter, value); write_pmevcntrn(counter, value);
} }
static inline void armv8pmu_write_hw_counter(struct perf_event *event, static void armv8pmu_write_hw_counter(struct perf_event *event,
u64 value) u64 value)
{ {
int idx = event->hw.idx; int idx = event->hw.idx;
...@@ -556,7 +556,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) ...@@ -556,7 +556,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
armv8pmu_write_hw_counter(event, value); armv8pmu_write_hw_counter(event, value);
} }
static inline void armv8pmu_write_evtype(int idx, u32 val) static void armv8pmu_write_evtype(int idx, u32 val)
{ {
u32 counter = ARMV8_IDX_TO_COUNTER(idx); u32 counter = ARMV8_IDX_TO_COUNTER(idx);
...@@ -564,7 +564,7 @@ static inline void armv8pmu_write_evtype(int idx, u32 val) ...@@ -564,7 +564,7 @@ static inline void armv8pmu_write_evtype(int idx, u32 val)
write_pmevtypern(counter, val); write_pmevtypern(counter, val);
} }
static inline void armv8pmu_write_event_type(struct perf_event *event) static void armv8pmu_write_event_type(struct perf_event *event)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx; int idx = hwc->idx;
...@@ -598,7 +598,7 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event) ...@@ -598,7 +598,7 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
return mask; return mask;
} }
static inline void armv8pmu_enable_counter(u32 mask) static void armv8pmu_enable_counter(u32 mask)
{ {
/* /*
* Make sure event configuration register writes are visible before we * Make sure event configuration register writes are visible before we
...@@ -608,7 +608,7 @@ static inline void armv8pmu_enable_counter(u32 mask) ...@@ -608,7 +608,7 @@ static inline void armv8pmu_enable_counter(u32 mask)
write_pmcntenset(mask); write_pmcntenset(mask);
} }
static inline void armv8pmu_enable_event_counter(struct perf_event *event) static void armv8pmu_enable_event_counter(struct perf_event *event)
{ {
struct perf_event_attr *attr = &event->attr; struct perf_event_attr *attr = &event->attr;
u32 mask = armv8pmu_event_cnten_mask(event); u32 mask = armv8pmu_event_cnten_mask(event);
...@@ -620,7 +620,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event) ...@@ -620,7 +620,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
armv8pmu_enable_counter(mask); armv8pmu_enable_counter(mask);
} }
static inline void armv8pmu_disable_counter(u32 mask) static void armv8pmu_disable_counter(u32 mask)
{ {
write_pmcntenclr(mask); write_pmcntenclr(mask);
/* /*
...@@ -630,7 +630,7 @@ static inline void armv8pmu_disable_counter(u32 mask) ...@@ -630,7 +630,7 @@ static inline void armv8pmu_disable_counter(u32 mask)
isb(); isb();
} }
static inline void armv8pmu_disable_event_counter(struct perf_event *event) static void armv8pmu_disable_event_counter(struct perf_event *event)
{ {
struct perf_event_attr *attr = &event->attr; struct perf_event_attr *attr = &event->attr;
u32 mask = armv8pmu_event_cnten_mask(event); u32 mask = armv8pmu_event_cnten_mask(event);
...@@ -642,18 +642,18 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event) ...@@ -642,18 +642,18 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event)
armv8pmu_disable_counter(mask); armv8pmu_disable_counter(mask);
} }
static inline void armv8pmu_enable_intens(u32 mask) static void armv8pmu_enable_intens(u32 mask)
{ {
write_pmintenset(mask); write_pmintenset(mask);
} }
static inline void armv8pmu_enable_event_irq(struct perf_event *event) static void armv8pmu_enable_event_irq(struct perf_event *event)
{ {
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
armv8pmu_enable_intens(BIT(counter)); armv8pmu_enable_intens(BIT(counter));
} }
static inline void armv8pmu_disable_intens(u32 mask) static void armv8pmu_disable_intens(u32 mask)
{ {
write_pmintenclr(mask); write_pmintenclr(mask);
isb(); isb();
...@@ -662,13 +662,13 @@ static inline void armv8pmu_disable_intens(u32 mask) ...@@ -662,13 +662,13 @@ static inline void armv8pmu_disable_intens(u32 mask)
isb(); isb();
} }
static inline void armv8pmu_disable_event_irq(struct perf_event *event) static void armv8pmu_disable_event_irq(struct perf_event *event)
{ {
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
armv8pmu_disable_intens(BIT(counter)); armv8pmu_disable_intens(BIT(counter));
} }
static inline u32 armv8pmu_getreset_flags(void) static u32 armv8pmu_getreset_flags(void)
{ {
u32 value; u32 value;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment