Commit 09f37343 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: rework idle loop, separate iSeries and pSeries

parent 3f034f4c
/* /*
* Idle daemon for PowerPC. Idle daemon will handle any action * idle.c
* that needs to be taken when the system becomes idle.
*
* Written by Cort Dougan (cort@cs.nmt.edu)
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -28,8 +26,10 @@ ...@@ -28,8 +26,10 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/time.h> #include <asm/time.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/iSeries/LparData.h> #include <asm/iSeries/LparData.h>
#include <asm/iSeries/HvCall.h> #include <asm/iSeries/HvCall.h>
#include <asm/iSeries/ItLpQueue.h> #include <asm/iSeries/ItLpQueue.h>
...@@ -37,99 +37,109 @@ ...@@ -37,99 +37,109 @@
unsigned long maxYieldTime = 0; unsigned long maxYieldTime = 0;
unsigned long minYieldTime = 0xffffffffffffffffUL; unsigned long minYieldTime = 0xffffffffffffffffUL;
#ifdef CONFIG_PPC_ISERIES
static void yield_shared_processor(void) static void yield_shared_processor(void)
{ {
struct Paca *paca;
unsigned long tb; unsigned long tb;
unsigned long yieldTime; unsigned long yieldTime;
paca = (struct Paca *)mfspr(SPRG3); HvCall_setEnabledInterrupts(HvCall_MaskIPI |
HvCall_setEnabledInterrupts( HvCall_MaskIPI | HvCall_MaskLpEvent |
HvCall_MaskLpEvent | HvCall_MaskLpProd |
HvCall_MaskLpProd | HvCall_MaskTimeout);
HvCall_MaskTimeout );
tb = get_tb(); tb = get_tb();
/* Compute future tb value when yield should expire */ /* Compute future tb value when yield should expire */
HvCall_yieldProcessor( HvCall_YieldTimed, tb+tb_ticks_per_jiffy ); HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
yieldTime = get_tb() - tb; yieldTime = get_tb() - tb;
if ( yieldTime > maxYieldTime ) if (yieldTime > maxYieldTime)
maxYieldTime = yieldTime; maxYieldTime = yieldTime;
if ( yieldTime < minYieldTime ) if (yieldTime < minYieldTime)
minYieldTime = yieldTime; minYieldTime = yieldTime;
/* The decrementer stops during the yield. Force a fake decrementer /*
* The decrementer stops during the yield. Force a fake decrementer
* here and let the timer_interrupt code sort out the actual time. * here and let the timer_interrupt code sort out the actual time.
*/ */
paca->xLpPaca.xIntDword.xFields.xDecrInt = 1; get_paca()->xLpPaca.xIntDword.xFields.xDecrInt = 1;
process_iSeries_events(); process_iSeries_events();
} }
#endif /* CONFIG_PPC_ISERIES */
int idled(void) int cpu_idle(void)
{ {
struct Paca *paca; struct Paca *paca;
long oldval; long oldval;
#ifdef CONFIG_PPC_ISERIES
unsigned long CTRL; unsigned long CTRL;
#endif
/* endless loop with no priority at all */
#ifdef CONFIG_PPC_ISERIES
/* ensure iSeries run light will be out when idle */ /* ensure iSeries run light will be out when idle */
current->thread.flags &= ~PPC_FLAG_RUN_LIGHT; current->thread.flags &= ~PPC_FLAG_RUN_LIGHT;
CTRL = mfspr(CTRLF); CTRL = mfspr(CTRLF);
CTRL &= ~RUNLATCH; CTRL &= ~RUNLATCH;
mtspr(CTRLT, CTRL); mtspr(CTRLT, CTRL);
#endif
paca = (struct Paca *)mfspr(SPRG3);
while(1) { paca = get_paca();
if (need_resched())
schedule();
}
for (;;) { while (1) {
#ifdef CONFIG_PPC_ISERIES if (paca->xLpPaca.xSharedProc) {
if ( paca->xLpPaca.xSharedProc ) { if (ItLpQueue_isLpIntPending(paca->lpQueuePtr))
if ( ItLpQueue_isLpIntPending( paca->lpQueuePtr ) )
process_iSeries_events(); process_iSeries_events();
if (!need_resched()) if (!need_resched())
yield_shared_processor(); yield_shared_processor();
} } else {
else oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
#endif
{ if (!oldval) {
if (!need_resched()) {
set_thread_flag(TIF_POLLING_NRFLAG); set_thread_flag(TIF_POLLING_NRFLAG);
while (!test_thread_flag(TIF_NEED_RESCHED)) {
#ifdef CONFIG_PPC_ISERIES while (!need_resched()) {
HMT_medium(); HMT_medium();
if (ItLpQueue_isLpIntPending(paca->lpQueuePtr)) if (ItLpQueue_isLpIntPending(paca->lpQueuePtr))
process_iSeries_events(); process_iSeries_events();
#endif
HMT_low(); HMT_low();
} }
HMT_medium();
clear_thread_flag(TIF_POLLING_NRFLAG); clear_thread_flag(TIF_POLLING_NRFLAG);
} else {
set_need_resched();
} }
} }
HMT_medium();
if (need_resched()) if (need_resched())
schedule(); schedule();
} }
return 0; return 0;
} }
/* #else /* CONFIG_PPC_ISERIES */
* SMP entry into the idle task - calls the same thing as the
* non-smp versions. -- Cort
*/
int cpu_idle(void) int cpu_idle(void)
{ {
idled(); long oldval;
return 0;
while (1) {
oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
if (!oldval) {
set_thread_flag(TIF_POLLING_NRFLAG);
while (!need_resched()) {
barrier();
HMT_low();
}
HMT_medium();
clear_thread_flag(TIF_POLLING_NRFLAG);
} else {
set_need_resched();
}
schedule();
}
return 0;
} }
#endif /* CONFIG_PPC_ISERIES */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment