Commit ed094150 authored by Michael Ellerman's avatar Michael Ellerman Committed by Paul Mackerras

[PATCH] ppc64: Simplify counting of lpevents, remove lpevent_count from paca

Currently there's a per-cpu count of lpevents processed, a per-queue (ie.
global) total count, and a count by event type.

Replace all that with a count by event for each cpu. We only need to add
it up int the proc code.
Signed-off-by: default avatarMichael Ellerman <michael@ellerman.id.au>
Acked-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 74889802
...@@ -28,7 +28,9 @@ ...@@ -28,7 +28,9 @@
*/ */
struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data"))); struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
static char *event_types[9] = { DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
static char *event_types[HvLpEvent_Type_NumTypes] = {
"Hypervisor\t\t", "Hypervisor\t\t",
"Machine Facilities\t", "Machine Facilities\t",
"Session Manager\t", "Session Manager\t",
...@@ -129,7 +131,6 @@ static void hvlpevent_clear_valid( struct HvLpEvent * event ) ...@@ -129,7 +131,6 @@ static void hvlpevent_clear_valid( struct HvLpEvent * event )
void process_hvlpevents(struct pt_regs *regs) void process_hvlpevents(struct pt_regs *regs)
{ {
unsigned numIntsProcessed = 0;
struct HvLpEvent * nextLpEvent; struct HvLpEvent * nextLpEvent;
/* If we have recursed, just return */ /* If we have recursed, just return */
...@@ -144,8 +145,6 @@ void process_hvlpevents(struct pt_regs *regs) ...@@ -144,8 +145,6 @@ void process_hvlpevents(struct pt_regs *regs)
for (;;) { for (;;) {
nextLpEvent = get_next_hvlpevent(); nextLpEvent = get_next_hvlpevent();
if ( nextLpEvent ) { if ( nextLpEvent ) {
++numIntsProcessed;
hvlpevent_queue.xLpIntCount++;
/* Call appropriate handler here, passing /* Call appropriate handler here, passing
* a pointer to the LpEvent. The handler * a pointer to the LpEvent. The handler
* must make a copy of the LpEvent if it * must make a copy of the LpEvent if it
...@@ -160,7 +159,7 @@ void process_hvlpevents(struct pt_regs *regs) ...@@ -160,7 +159,7 @@ void process_hvlpevents(struct pt_regs *regs)
* here! * here!
*/ */
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
hvlpevent_queue.xLpIntCountByType[nextLpEvent->xType]++; __get_cpu_var(hvlpevent_counts)[nextLpEvent->xType]++;
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
lpEventHandler[nextLpEvent->xType] ) lpEventHandler[nextLpEvent->xType] )
lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
...@@ -181,8 +180,6 @@ void process_hvlpevents(struct pt_regs *regs) ...@@ -181,8 +180,6 @@ void process_hvlpevents(struct pt_regs *regs)
ItLpQueueInProcess = 0; ItLpQueueInProcess = 0;
mb(); mb();
clear_inUse(); clear_inUse();
get_paca()->lpevent_count += numIntsProcessed;
} }
static int set_spread_lpevents(char *str) static int set_spread_lpevents(char *str)
...@@ -228,20 +225,37 @@ void setup_hvlpevent_queue(void) ...@@ -228,20 +225,37 @@ void setup_hvlpevent_queue(void)
static int proc_lpevents_show(struct seq_file *m, void *v) static int proc_lpevents_show(struct seq_file *m, void *v)
{ {
unsigned int i; int cpu, i;
unsigned long sum;
static unsigned long cpu_totals[NR_CPUS];
/* FIXME: do we care that there's no locking here? */
sum = 0;
for_each_online_cpu(cpu) {
cpu_totals[cpu] = 0;
for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
}
sum += cpu_totals[cpu];
}
seq_printf(m, "LpEventQueue 0\n"); seq_printf(m, "LpEventQueue 0\n");
seq_printf(m, " events processed:\t%lu\n", seq_printf(m, " events processed:\t%lu\n", sum);
(unsigned long)hvlpevent_queue.xLpIntCount);
for (i = 0; i < 9; ++i) for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
seq_printf(m, " %s %10lu\n", event_types[i], sum = 0;
(unsigned long)hvlpevent_queue.xLpIntCountByType[i]); for_each_online_cpu(cpu) {
sum += per_cpu(hvlpevent_counts, cpu)[i];
}
seq_printf(m, " %s %10lu\n", event_types[i], sum);
}
seq_printf(m, "\n events processed by processor:\n"); seq_printf(m, "\n events processed by processor:\n");
for_each_online_cpu(i) for_each_online_cpu(cpu) {
seq_printf(m, " CPU%02d %10u\n", i, paca[i].lpevent_count); seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
}
return 0; return 0;
} }
......
...@@ -70,8 +70,6 @@ struct hvlpevent_queue { ...@@ -70,8 +70,6 @@ struct hvlpevent_queue {
u8 xIndex; // 0x28 unique sequential index. u8 xIndex; // 0x28 unique sequential index.
u8 xSlicRsvd[3]; // 0x29-2b u8 xSlicRsvd[3]; // 0x29-2b
u32 xInUseWord; // 0x2C u32 xInUseWord; // 0x2C
u64 xLpIntCount; // 0x30 Total Lp Int msgs processed
u64 xLpIntCountByType[9]; // 0x38-0x7F Event counts by type
}; };
extern struct hvlpevent_queue hvlpevent_queue; extern struct hvlpevent_queue hvlpevent_queue;
......
...@@ -89,7 +89,6 @@ struct paca_struct { ...@@ -89,7 +89,6 @@ struct paca_struct {
u64 next_jiffy_update_tb; /* TB value for next jiffy update */ u64 next_jiffy_update_tb; /* TB value for next jiffy update */
u64 saved_r1; /* r1 save for RTAS calls */ u64 saved_r1; /* r1 save for RTAS calls */
u64 saved_msr; /* MSR saved here by enter_rtas */ u64 saved_msr; /* MSR saved here by enter_rtas */
u32 lpevent_count; /* lpevents processed */
u8 proc_enabled; /* irq soft-enable flag */ u8 proc_enabled; /* irq soft-enable flag */
/* not yet used */ /* not yet used */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment