Commit 7dfc8db1 authored by Suzuki K Poulose's avatar Suzuki K Poulose Committed by Will Deacon

arm_pmu: Tidy up clear_event_idx call backs

The armpmu uses get_event_idx callback to allocate an event
counter for a given event, which marks the selected counter
as "used". Now, when we delete the counter, the arm_pmu goes
ahead and clears the "used" bit and then invokes the "clear_event_idx"
call back, which kind of splits the job between the core code
and the backend. To keep things tidy, mandate the implementation
of clear_event_idx() and add it for exisiting backends.
This will be useful for adding the chained event support, where
we leave the event idx maintenance to the backend.

Also, when an event is removed from the PMU, reset the hw.idx
to indicate that a counter is not allocated for this event,
to help the backends do better checks. This will be also used
for the chain counter support.

Cc: Will Deacon <will.deacon@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarJulien Thierry <julien.thierry@arm.com>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent e2da97d3
...@@ -411,6 +411,12 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, ...@@ -411,6 +411,12 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
} }
} }
static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
clear_bit(event->hw.idx, cpuc->used_mask);
}
static void armv6pmu_disable_event(struct perf_event *event) static void armv6pmu_disable_event(struct perf_event *event)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
...@@ -491,6 +497,7 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu) ...@@ -491,6 +497,7 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter; cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter; cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx; cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start; cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop; cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event; cpu_pmu->map_event = armv6_map_event;
...@@ -541,6 +548,7 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -541,6 +548,7 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter; cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter; cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx; cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start; cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop; cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6mpcore_map_event; cpu_pmu->map_event = armv6mpcore_map_event;
......
...@@ -1058,6 +1058,12 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, ...@@ -1058,6 +1058,12 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
return -EAGAIN; return -EAGAIN;
} }
static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
clear_bit(event->hw.idx, cpuc->used_mask);
}
/* /*
* Add an event filter to a given event. This will only work for PMUv2 PMUs. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/ */
...@@ -1167,6 +1173,7 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu) ...@@ -1167,6 +1173,7 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv7pmu_read_counter; cpu_pmu->read_counter = armv7pmu_read_counter;
cpu_pmu->write_counter = armv7pmu_write_counter; cpu_pmu->write_counter = armv7pmu_write_counter;
cpu_pmu->get_event_idx = armv7pmu_get_event_idx; cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
cpu_pmu->start = armv7pmu_start; cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop; cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset; cpu_pmu->reset = armv7pmu_reset;
...@@ -1637,6 +1644,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc, ...@@ -1637,6 +1644,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base); bool venum_event = EVENT_VENUM(hwc->config_base);
bool krait_event = EVENT_CPU(hwc->config_base); bool krait_event = EVENT_CPU(hwc->config_base);
armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || krait_event) { if (venum_event || krait_event) {
bit = krait_event_to_bit(event, region, group); bit = krait_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask); clear_bit(bit, cpuc->used_mask);
...@@ -1966,6 +1974,7 @@ static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc, ...@@ -1966,6 +1974,7 @@ static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base); bool venum_event = EVENT_VENUM(hwc->config_base);
bool scorpion_event = EVENT_CPU(hwc->config_base); bool scorpion_event = EVENT_CPU(hwc->config_base);
armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || scorpion_event) { if (venum_event || scorpion_event) {
bit = scorpion_event_to_bit(event, region, group); bit = scorpion_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask); clear_bit(bit, cpuc->used_mask);
......
...@@ -292,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, ...@@ -292,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
} }
} }
static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
clear_bit(event->hw.idx, cpuc->used_mask);
}
static void xscale1pmu_start(struct arm_pmu *cpu_pmu) static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{ {
unsigned long flags, val; unsigned long flags, val;
...@@ -370,6 +376,7 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu) ...@@ -370,6 +376,7 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale1pmu_read_counter; cpu_pmu->read_counter = xscale1pmu_read_counter;
cpu_pmu->write_counter = xscale1pmu_write_counter; cpu_pmu->write_counter = xscale1pmu_write_counter;
cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale1pmu_start; cpu_pmu->start = xscale1pmu_start;
cpu_pmu->stop = xscale1pmu_stop; cpu_pmu->stop = xscale1pmu_stop;
cpu_pmu->map_event = xscale_map_event; cpu_pmu->map_event = xscale_map_event;
...@@ -738,6 +745,7 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu) ...@@ -738,6 +745,7 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale2pmu_read_counter; cpu_pmu->read_counter = xscale2pmu_read_counter;
cpu_pmu->write_counter = xscale2pmu_write_counter; cpu_pmu->write_counter = xscale2pmu_write_counter;
cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale2pmu_start; cpu_pmu->start = xscale2pmu_start;
cpu_pmu->stop = xscale2pmu_stop; cpu_pmu->stop = xscale2pmu_stop;
cpu_pmu->map_event = xscale_map_event; cpu_pmu->map_event = xscale_map_event;
......
...@@ -778,6 +778,12 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, ...@@ -778,6 +778,12 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
return -EAGAIN; return -EAGAIN;
} }
static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
clear_bit(event->hw.idx, cpuc->used_mask);
}
/* /*
* Add an event filter to a given event. This will only work for PMUv2 PMUs. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/ */
...@@ -956,6 +962,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -956,6 +962,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv8pmu_read_counter, cpu_pmu->read_counter = armv8pmu_read_counter,
cpu_pmu->write_counter = armv8pmu_write_counter, cpu_pmu->write_counter = armv8pmu_write_counter,
cpu_pmu->get_event_idx = armv8pmu_get_event_idx, cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx,
cpu_pmu->start = armv8pmu_start, cpu_pmu->start = armv8pmu_start,
cpu_pmu->stop = armv8pmu_stop, cpu_pmu->stop = armv8pmu_stop,
cpu_pmu->reset = armv8pmu_reset, cpu_pmu->reset = armv8pmu_reset,
......
...@@ -238,11 +238,10 @@ armpmu_del(struct perf_event *event, int flags) ...@@ -238,11 +238,10 @@ armpmu_del(struct perf_event *event, int flags)
armpmu_stop(event, PERF_EF_UPDATE); armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL; hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask); armpmu->clear_event_idx(hw_events, event);
if (armpmu->clear_event_idx)
armpmu->clear_event_idx(hw_events, event);
perf_event_update_userpage(event); perf_event_update_userpage(event);
/* Clear the allocated counter */
hwc->idx = -1;
} }
static int static int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment