Commit c91e0f5d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_event: x86: Clean up some of the u64/long bitmask casting

We need this to be u64 for direct assigment, but the bitmask functions
all work on unsigned long, leading to cast heaven, solve this by using a
union.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <20100122155535.595961269@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 81269a08
...@@ -69,10 +69,11 @@ struct debug_store { ...@@ -69,10 +69,11 @@ struct debug_store {
u64 pebs_event_reset[MAX_PEBS_EVENTS]; u64 pebs_event_reset[MAX_PEBS_EVENTS];
}; };
#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
struct event_constraint { struct event_constraint {
u64 idxmsk[BITS_TO_U64(X86_PMC_IDX_MAX)]; union {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
u64 idxmsk64[1];
};
int code; int code;
int cmask; int cmask;
}; };
...@@ -90,13 +91,14 @@ struct cpu_hw_events { ...@@ -90,13 +91,14 @@ struct cpu_hw_events {
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
}; };
#define EVENT_CONSTRAINT(c, n, m) { \ #define EVENT_CONSTRAINT(c, n, m) { \
.code = (c), \ { .idxmsk64[0] = (n) }, \
.cmask = (m), \ .code = (c), \
.idxmsk[0] = (n) } .cmask = (m), \
}
#define EVENT_CONSTRAINT_END \ #define EVENT_CONSTRAINT_END \
{ .code = 0, .cmask = 0, .idxmsk[0] = 0 } EVENT_CONSTRAINT(0, 0, 0)
#define for_each_event_constraint(e, c) \ #define for_each_event_constraint(e, c) \
for ((e) = (c); (e)->cmask; (e)++) for ((e) = (c); (e)->cmask; (e)++)
...@@ -126,8 +128,11 @@ struct x86_pmu { ...@@ -126,8 +128,11 @@ struct x86_pmu {
u64 intel_ctrl; u64 intel_ctrl;
void (*enable_bts)(u64 config); void (*enable_bts)(u64 config);
void (*disable_bts)(void); void (*disable_bts)(void);
void (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk); void (*get_event_constraints)(struct cpu_hw_events *cpuc,
void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct perf_event *event,
unsigned long *idxmsk);
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
const struct event_constraint *event_constraints; const struct event_constraint *event_constraints;
}; };
...@@ -2144,14 +2149,11 @@ perf_event_nmi_handler(struct notifier_block *self, ...@@ -2144,14 +2149,11 @@ perf_event_nmi_handler(struct notifier_block *self,
return NOTIFY_STOP; return NOTIFY_STOP;
} }
static struct event_constraint bts_constraint = { static struct event_constraint bts_constraint =
.code = 0, EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
.cmask = 0,
.idxmsk[0] = 1ULL << X86_PMC_IDX_FIXED_BTS
};
static int intel_special_constraints(struct perf_event *event, static int intel_special_constraints(struct perf_event *event,
u64 *idxmsk) unsigned long *idxmsk)
{ {
unsigned int hw_event; unsigned int hw_event;
...@@ -2171,14 +2173,14 @@ static int intel_special_constraints(struct perf_event *event, ...@@ -2171,14 +2173,14 @@ static int intel_special_constraints(struct perf_event *event,
static void intel_get_event_constraints(struct cpu_hw_events *cpuc, static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event, struct perf_event *event,
u64 *idxmsk) unsigned long *idxmsk)
{ {
const struct event_constraint *c; const struct event_constraint *c;
/* /*
* cleanup bitmask * cleanup bitmask
*/ */
bitmap_zero((unsigned long *)idxmsk, X86_PMC_IDX_MAX); bitmap_zero(idxmsk, X86_PMC_IDX_MAX);
if (intel_special_constraints(event, idxmsk)) if (intel_special_constraints(event, idxmsk))
return; return;
...@@ -2186,10 +2188,7 @@ static void intel_get_event_constraints(struct cpu_hw_events *cpuc, ...@@ -2186,10 +2188,7 @@ static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
if (x86_pmu.event_constraints) { if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) {
if ((event->hw.config & c->cmask) == c->code) { if ((event->hw.config & c->cmask) == c->code) {
bitmap_copy(idxmsk, c->idxmsk, X86_PMC_IDX_MAX);
bitmap_copy((unsigned long *)idxmsk,
(unsigned long *)c->idxmsk,
X86_PMC_IDX_MAX);
return; return;
} }
} }
...@@ -2200,10 +2199,10 @@ static void intel_get_event_constraints(struct cpu_hw_events *cpuc, ...@@ -2200,10 +2199,10 @@ static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
static void amd_get_event_constraints(struct cpu_hw_events *cpuc, static void amd_get_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event, struct perf_event *event,
u64 *idxmsk) unsigned long *idxmsk)
{ {
/* no constraints, means supports all generic counters */ /* no constraints, means supports all generic counters */
bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events); bitmap_fill(idxmsk, x86_pmu.num_events);
} }
static int x86_event_sched_in(struct perf_event *event, static int x86_event_sched_in(struct perf_event *event,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment