Commit 09f37343 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: rework idle loop, separate iSeries and pSeries

parent 3f034f4c
/*
* Idle daemon for PowerPC. Idle daemon will handle any action
* that needs to be taken when the system becomes idle.
*
* Written by Cort Dougan (cort@cs.nmt.edu)
* idle.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
......@@ -28,8 +26,10 @@
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/cache.h>
#include <asm/time.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/iSeries/LparData.h>
#include <asm/iSeries/HvCall.h>
#include <asm/iSeries/ItLpQueue.h>
......@@ -37,99 +37,109 @@
unsigned long maxYieldTime = 0;
unsigned long minYieldTime = 0xffffffffffffffffUL;
#ifdef CONFIG_PPC_ISERIES
static void yield_shared_processor(void)
{
struct Paca *paca;
unsigned long tb;
unsigned long yieldTime;
paca = (struct Paca *)mfspr(SPRG3);
HvCall_setEnabledInterrupts( HvCall_MaskIPI |
HvCall_MaskLpEvent |
HvCall_MaskLpProd |
HvCall_MaskTimeout );
HvCall_setEnabledInterrupts(HvCall_MaskIPI |
HvCall_MaskLpEvent |
HvCall_MaskLpProd |
HvCall_MaskTimeout);
tb = get_tb();
/* Compute future tb value when yield should expire */
HvCall_yieldProcessor( HvCall_YieldTimed, tb+tb_ticks_per_jiffy );
HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
yieldTime = get_tb() - tb;
if ( yieldTime > maxYieldTime )
if (yieldTime > maxYieldTime)
maxYieldTime = yieldTime;
if ( yieldTime < minYieldTime )
if (yieldTime < minYieldTime)
minYieldTime = yieldTime;
/* The decrementer stops during the yield. Force a fake decrementer
/*
* The decrementer stops during the yield. Force a fake decrementer
* here and let the timer_interrupt code sort out the actual time.
*/
paca->xLpPaca.xIntDword.xFields.xDecrInt = 1;
get_paca()->xLpPaca.xIntDword.xFields.xDecrInt = 1;
process_iSeries_events();
}
#endif /* CONFIG_PPC_ISERIES */
int idled(void)
int cpu_idle(void)
{
struct Paca *paca;
long oldval;
#ifdef CONFIG_PPC_ISERIES
unsigned long CTRL;
#endif
/* endless loop with no priority at all */
#ifdef CONFIG_PPC_ISERIES
/* ensure iSeries run light will be out when idle */
current->thread.flags &= ~PPC_FLAG_RUN_LIGHT;
CTRL = mfspr(CTRLF);
CTRL &= ~RUNLATCH;
mtspr(CTRLT, CTRL);
#endif
paca = (struct Paca *)mfspr(SPRG3);
while(1) {
if (need_resched())
schedule();
}
paca = get_paca();
for (;;) {
#ifdef CONFIG_PPC_ISERIES
if ( paca->xLpPaca.xSharedProc ) {
if ( ItLpQueue_isLpIntPending( paca->lpQueuePtr ) )
while (1) {
if (paca->xLpPaca.xSharedProc) {
if (ItLpQueue_isLpIntPending(paca->lpQueuePtr))
process_iSeries_events();
if (!need_resched())
yield_shared_processor();
}
else
#endif
{
if (!need_resched()) {
} else {
oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
if (!oldval) {
set_thread_flag(TIF_POLLING_NRFLAG);
while (!test_thread_flag(TIF_NEED_RESCHED)) {
#ifdef CONFIG_PPC_ISERIES
while (!need_resched()) {
HMT_medium();
if (ItLpQueue_isLpIntPending(paca->lpQueuePtr))
process_iSeries_events();
#endif
HMT_low();
}
HMT_medium();
clear_thread_flag(TIF_POLLING_NRFLAG);
} else {
set_need_resched();
}
}
HMT_medium();
if (need_resched())
schedule();
}
return 0;
}
/*
* SMP entry into the idle task - calls the same thing as the
* non-smp versions. -- Cort
*/
#else /* CONFIG_PPC_ISERIES */
int cpu_idle(void)
{
idled();
return 0;
long oldval;
while (1) {
oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
if (!oldval) {
set_thread_flag(TIF_POLLING_NRFLAG);
while (!need_resched()) {
barrier();
HMT_low();
}
HMT_medium();
clear_thread_flag(TIF_POLLING_NRFLAG);
} else {
set_need_resched();
}
schedule();
}
return 0;
}
#endif /* CONFIG_PPC_ISERIES */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment