Commit a6187464 authored by Michael Ellerman's avatar Michael Ellerman Committed by Paul Mackerras

[PATCH] ppc64: Rename xItLpQueue to hvlpevent_queue

The xItLpQueue is a queue of HvLpEvents that we're given by the Hypervisor.
Rename xItLpQueue to hvlpevent_queue and make the type struct hvlpevent_queue.
Signed-off-by: default avatarMichael Ellerman <michael@ellerman.id.au>
Acked-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent ab354b63
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* *
* It is written to by the hypervisor so cannot end up in the BSS. * It is written to by the hypervisor so cannot end up in the BSS.
*/ */
struct ItLpQueue xItLpQueue __attribute__((__section__(".data"))); struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
static char *event_types[9] = { static char *event_types[9] = {
"Hypervisor\t\t", "Hypervisor\t\t",
...@@ -43,7 +43,7 @@ static char *event_types[9] = { ...@@ -43,7 +43,7 @@ static char *event_types[9] = {
static __inline__ int set_inUse(void) static __inline__ int set_inUse(void)
{ {
int t; int t;
u32 * inUseP = &xItLpQueue.xInUseWord; u32 * inUseP = &hvlpevent_queue.xInUseWord;
__asm__ __volatile__("\n\ __asm__ __volatile__("\n\
1: lwarx %0,0,%2 \n\ 1: lwarx %0,0,%2 \n\
...@@ -54,8 +54,8 @@ static __inline__ int set_inUse(void) ...@@ -54,8 +54,8 @@ static __inline__ int set_inUse(void)
stwcx. %0,0,%2 \n\ stwcx. %0,0,%2 \n\
bne- 1b \n\ bne- 1b \n\
2: eieio" 2: eieio"
: "=&r" (t), "=m" (xItLpQueue.xInUseWord) : "=&r" (t), "=m" (hvlpevent_queue.xInUseWord)
: "r" (inUseP), "m" (xItLpQueue.xInUseWord) : "r" (inUseP), "m" (hvlpevent_queue.xInUseWord)
: "cc"); : "cc");
return t; return t;
...@@ -63,7 +63,7 @@ static __inline__ int set_inUse(void) ...@@ -63,7 +63,7 @@ static __inline__ int set_inUse(void)
static __inline__ void clear_inUse(void) static __inline__ void clear_inUse(void)
{ {
xItLpQueue.xInUseWord = 0; hvlpevent_queue.xInUseWord = 0;
} }
/* Array of LpEvent handler functions */ /* Array of LpEvent handler functions */
...@@ -73,18 +73,18 @@ unsigned long ItLpQueueInProcess = 0; ...@@ -73,18 +73,18 @@ unsigned long ItLpQueueInProcess = 0;
static struct HvLpEvent * ItLpQueue_getNextLpEvent(void) static struct HvLpEvent * ItLpQueue_getNextLpEvent(void)
{ {
struct HvLpEvent * nextLpEvent = struct HvLpEvent * nextLpEvent =
(struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
if ( nextLpEvent->xFlags.xValid ) { if ( nextLpEvent->xFlags.xValid ) {
/* rmb() needed only for weakly consistent machines (regatta) */ /* rmb() needed only for weakly consistent machines (regatta) */
rmb(); rmb();
/* Set pointer to next potential event */ /* Set pointer to next potential event */
xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + hvlpevent_queue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
LpEventAlign ) / LpEventAlign ) /
LpEventAlign ) * LpEventAlign ) *
LpEventAlign; LpEventAlign;
/* Wrap to beginning if no room at end */ /* Wrap to beginning if no room at end */
if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr) if (hvlpevent_queue.xSlicCurEventPtr > hvlpevent_queue.xSlicLastValidEventPtr)
xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr; hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.xSlicEventStackPtr;
} }
else else
nextLpEvent = NULL; nextLpEvent = NULL;
...@@ -101,8 +101,8 @@ int ItLpQueue_isLpIntPending(void) ...@@ -101,8 +101,8 @@ int ItLpQueue_isLpIntPending(void)
if (smp_processor_id() >= spread_lpevents) if (smp_processor_id() >= spread_lpevents)
return 0; return 0;
next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending; return next_event->xFlags.xValid | hvlpevent_queue.xPlicOverflowIntPending;
} }
static void ItLpQueue_clearValid( struct HvLpEvent * event ) static void ItLpQueue_clearValid( struct HvLpEvent * event )
...@@ -145,10 +145,10 @@ unsigned ItLpQueue_process(struct pt_regs *regs) ...@@ -145,10 +145,10 @@ unsigned ItLpQueue_process(struct pt_regs *regs)
nextLpEvent = ItLpQueue_getNextLpEvent(); nextLpEvent = ItLpQueue_getNextLpEvent();
if ( nextLpEvent ) { if ( nextLpEvent ) {
/* Count events to return to caller /* Count events to return to caller
* and count processed events in xItLpQueue * and count processed events in hvlpevent_queue
*/ */
++numIntsProcessed; ++numIntsProcessed;
xItLpQueue.xLpIntCount++; hvlpevent_queue.xLpIntCount++;
/* Call appropriate handler here, passing /* Call appropriate handler here, passing
* a pointer to the LpEvent. The handler * a pointer to the LpEvent. The handler
* must make a copy of the LpEvent if it * must make a copy of the LpEvent if it
...@@ -163,7 +163,7 @@ unsigned ItLpQueue_process(struct pt_regs *regs) ...@@ -163,7 +163,7 @@ unsigned ItLpQueue_process(struct pt_regs *regs)
* here! * here!
*/ */
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++; hvlpevent_queue.xLpIntCountByType[nextLpEvent->xType]++;
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
lpEventHandler[nextLpEvent->xType] ) lpEventHandler[nextLpEvent->xType] )
lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
...@@ -171,12 +171,12 @@ unsigned ItLpQueue_process(struct pt_regs *regs) ...@@ -171,12 +171,12 @@ unsigned ItLpQueue_process(struct pt_regs *regs)
printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
ItLpQueue_clearValid( nextLpEvent ); ItLpQueue_clearValid( nextLpEvent );
} else if ( xItLpQueue.xPlicOverflowIntPending ) } else if ( hvlpevent_queue.xPlicOverflowIntPending )
/* /*
* No more valid events. If overflow events are * No more valid events. If overflow events are
* pending process them * pending process them
*/ */
HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex); HvCallEvent_getOverflowLpEvents( hvlpevent_queue.xIndex);
else else
break; break;
} }
...@@ -224,11 +224,11 @@ void setup_hvlpevent_queue(void) ...@@ -224,11 +224,11 @@ void setup_hvlpevent_queue(void)
/* Invoke the hypervisor to initialize the event stack */ /* Invoke the hypervisor to initialize the event stack */
HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
xItLpQueue.xSlicEventStackPtr = (char *)eventStack; hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
xItLpQueue.xSlicCurEventPtr = (char *)eventStack; hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack + hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
(LpEventStackSize - LpEventMaxSize); (LpEventStackSize - LpEventMaxSize);
xItLpQueue.xIndex = 0; hvlpevent_queue.xIndex = 0;
} }
static int proc_lpevents_show(struct seq_file *m, void *v) static int proc_lpevents_show(struct seq_file *m, void *v)
...@@ -237,11 +237,11 @@ static int proc_lpevents_show(struct seq_file *m, void *v) ...@@ -237,11 +237,11 @@ static int proc_lpevents_show(struct seq_file *m, void *v)
seq_printf(m, "LpEventQueue 0\n"); seq_printf(m, "LpEventQueue 0\n");
seq_printf(m, " events processed:\t%lu\n", seq_printf(m, " events processed:\t%lu\n",
(unsigned long)xItLpQueue.xLpIntCount); (unsigned long)hvlpevent_queue.xLpIntCount);
for (i = 0; i < 9; ++i) for (i = 0; i < 9; ++i)
seq_printf(m, " %s %10lu\n", event_types[i], seq_printf(m, " %s %10lu\n", event_types[i],
(unsigned long)xItLpQueue.xLpIntCountByType[i]); (unsigned long)hvlpevent_queue.xLpIntCountByType[i]);
seq_printf(m, "\n events processed by processor:\n"); seq_printf(m, "\n events processed by processor:\n");
......
...@@ -193,7 +193,7 @@ struct ItVpdAreas itVpdAreas = { ...@@ -193,7 +193,7 @@ struct ItVpdAreas itVpdAreas = {
0,0,0, /* 13 - 15 */ 0,0,0, /* 13 - 15 */
sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */ sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
0,0,0,0,0,0, /* 17 - 22 */ 0,0,0,0,0,0, /* 17 - 22 */
sizeof(struct ItLpQueue),/* 23 length of Lp Queue */ sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
0,0 /* 24 - 25 */ 0,0 /* 24 - 25 */
}, },
.xSlicVpdAdrs = { /* VPD addresses */ .xSlicVpdAdrs = { /* VPD addresses */
...@@ -211,7 +211,7 @@ struct ItVpdAreas itVpdAreas = { ...@@ -211,7 +211,7 @@ struct ItVpdAreas itVpdAreas = {
0,0,0, /* 13 - 15 */ 0,0,0, /* 13 - 15 */
&xIoHriProcessorVpd, /* 16 Proc Vpd */ &xIoHriProcessorVpd, /* 16 Proc Vpd */
0,0,0,0,0,0, /* 17 - 22 */ 0,0,0,0,0,0, /* 17 - 22 */
&xItLpQueue, /* 23 Lp Queue */ &hvlpevent_queue, /* 23 Lp Queue */
0,0 0,0
} }
}; };
......
...@@ -41,7 +41,7 @@ struct HvLpEvent; ...@@ -41,7 +41,7 @@ struct HvLpEvent;
#define LpEventMaxSize 256 #define LpEventMaxSize 256
#define LpEventAlign 64 #define LpEventAlign 64
struct ItLpQueue { struct hvlpevent_queue {
/* /*
* The xSlicCurEventPtr is the pointer to the next event stack entry * The xSlicCurEventPtr is the pointer to the next event stack entry
* that will become valid. The OS must peek at this entry to determine * that will become valid. The OS must peek at this entry to determine
...@@ -74,7 +74,7 @@ struct ItLpQueue { ...@@ -74,7 +74,7 @@ struct ItLpQueue {
u64 xLpIntCountByType[9]; // 0x38-0x7F Event counts by type u64 xLpIntCountByType[9]; // 0x38-0x7F Event counts by type
}; };
extern struct ItLpQueue xItLpQueue; extern struct hvlpevent_queue hvlpevent_queue;
extern int ItLpQueue_isLpIntPending(void); extern int ItLpQueue_isLpIntPending(void);
extern unsigned ItLpQueue_process(struct pt_regs *); extern unsigned ItLpQueue_process(struct pt_regs *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment