Commit e4a884cc authored by Gautham R. Shenoy's avatar Gautham R. Shenoy Committed by Michael Ellerman

powerpc: Move idle_loop_prolog()/epilog() functions to header file

Currently prior to entering an idle state on a Linux Guest, the
pseries cpuidle driver implement an idle_loop_prolog() and
idle_loop_epilog() functions which ensure that idle_purr is correctly
computed, and the hypervisor is informed that the CPU cycles have been
donated.

These prolog and epilog functions are also required in the default
idle call, i.e pseries_lpar_idle(). Hence move these accessor
functions to a common header file and call them from
pseries_lpar_idle(). Since the existing header files such as
asm/processor.h have enough clutter, create a new header file
asm/idle.h. Finally rename idle_loop_prolog() and idle_loop_epilog()
to pseries_idle_prolog() and pseries_idle_epilog() as they are only
relavent for on pseries guests.
Signed-off-by: default avatarGautham R. Shenoy <ego@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1586249263-14048-2-git-send-email-ego@linux.vnet.ibm.com
parent 45591da7
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_POWERPC_IDLE_H
#define _ASM_POWERPC_IDLE_H
#include <asm/runlatch.h>
#include <asm/paca.h>
#ifdef CONFIG_PPC_PSERIES
static inline void pseries_idle_prolog(unsigned long *in_purr)
{
ppc64_runlatch_off();
*in_purr = mfspr(SPRN_PURR);
/*
* Indicate to the HV that we are idle. Now would be
* a good time to find other work to dispatch.
*/
get_lppaca()->idle = 1;
}
static inline void pseries_idle_epilog(unsigned long in_purr)
{
u64 wait_cycles;
wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
wait_cycles += mfspr(SPRN_PURR) - in_purr;
get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
get_lppaca()->idle = 0;
ppc64_runlatch_on();
}
#endif /* CONFIG_PPC_PSERIES */
#endif
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
#include <asm/isa-bridge.h> #include <asm/isa-bridge.h>
#include <asm/security_features.h> #include <asm/security_features.h>
#include <asm/asm-const.h> #include <asm/asm-const.h>
#include <asm/idle.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
#include <asm/svm.h> #include <asm/svm.h>
...@@ -319,6 +320,8 @@ machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache); ...@@ -319,6 +320,8 @@ machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
static void pseries_lpar_idle(void) static void pseries_lpar_idle(void)
{ {
unsigned long in_purr;
/* /*
* Default handler to go into low thread priority and possibly * Default handler to go into low thread priority and possibly
* low power mode by ceding processor to hypervisor * low power mode by ceding processor to hypervisor
...@@ -328,7 +331,7 @@ static void pseries_lpar_idle(void) ...@@ -328,7 +331,7 @@ static void pseries_lpar_idle(void)
return; return;
/* Indicate to hypervisor that we are idle. */ /* Indicate to hypervisor that we are idle. */
get_lppaca()->idle = 1; pseries_idle_prolog(&in_purr);
/* /*
* Yield the processor to the hypervisor. We return if * Yield the processor to the hypervisor. We return if
...@@ -339,7 +342,7 @@ static void pseries_lpar_idle(void) ...@@ -339,7 +342,7 @@ static void pseries_lpar_idle(void)
*/ */
cede_processor(); cede_processor();
get_lppaca()->idle = 0; pseries_idle_epilog(in_purr);
} }
/* /*
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/runlatch.h> #include <asm/runlatch.h>
#include <asm/idle.h>
#include <asm/plpar_wrappers.h> #include <asm/plpar_wrappers.h>
struct cpuidle_driver pseries_idle_driver = { struct cpuidle_driver pseries_idle_driver = {
...@@ -31,29 +32,6 @@ static struct cpuidle_state *cpuidle_state_table __read_mostly; ...@@ -31,29 +32,6 @@ static struct cpuidle_state *cpuidle_state_table __read_mostly;
static u64 snooze_timeout __read_mostly; static u64 snooze_timeout __read_mostly;
static bool snooze_timeout_en __read_mostly; static bool snooze_timeout_en __read_mostly;
static inline void idle_loop_prolog(unsigned long *in_purr)
{
ppc64_runlatch_off();
*in_purr = mfspr(SPRN_PURR);
/*
* Indicate to the HV that we are idle. Now would be
* a good time to find other work to dispatch.
*/
get_lppaca()->idle = 1;
}
static inline void idle_loop_epilog(unsigned long in_purr)
{
u64 wait_cycles;
wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
wait_cycles += mfspr(SPRN_PURR) - in_purr;
get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
get_lppaca()->idle = 0;
ppc64_runlatch_on();
}
static int snooze_loop(struct cpuidle_device *dev, static int snooze_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
int index) int index)
...@@ -63,7 +41,7 @@ static int snooze_loop(struct cpuidle_device *dev, ...@@ -63,7 +41,7 @@ static int snooze_loop(struct cpuidle_device *dev,
set_thread_flag(TIF_POLLING_NRFLAG); set_thread_flag(TIF_POLLING_NRFLAG);
idle_loop_prolog(&in_purr); pseries_idle_prolog(&in_purr);
local_irq_enable(); local_irq_enable();
snooze_exit_time = get_tb() + snooze_timeout; snooze_exit_time = get_tb() + snooze_timeout;
...@@ -87,7 +65,7 @@ static int snooze_loop(struct cpuidle_device *dev, ...@@ -87,7 +65,7 @@ static int snooze_loop(struct cpuidle_device *dev,
local_irq_disable(); local_irq_disable();
idle_loop_epilog(in_purr); pseries_idle_epilog(in_purr);
return index; return index;
} }
...@@ -115,7 +93,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, ...@@ -115,7 +93,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
{ {
unsigned long in_purr; unsigned long in_purr;
idle_loop_prolog(&in_purr); pseries_idle_prolog(&in_purr);
get_lppaca()->donate_dedicated_cpu = 1; get_lppaca()->donate_dedicated_cpu = 1;
HMT_medium(); HMT_medium();
...@@ -124,7 +102,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, ...@@ -124,7 +102,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
local_irq_disable(); local_irq_disable();
get_lppaca()->donate_dedicated_cpu = 0; get_lppaca()->donate_dedicated_cpu = 0;
idle_loop_epilog(in_purr); pseries_idle_epilog(in_purr);
return index; return index;
} }
...@@ -135,7 +113,7 @@ static int shared_cede_loop(struct cpuidle_device *dev, ...@@ -135,7 +113,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
{ {
unsigned long in_purr; unsigned long in_purr;
idle_loop_prolog(&in_purr); pseries_idle_prolog(&in_purr);
/* /*
* Yield the processor to the hypervisor. We return if * Yield the processor to the hypervisor. We return if
...@@ -147,7 +125,7 @@ static int shared_cede_loop(struct cpuidle_device *dev, ...@@ -147,7 +125,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
check_and_cede_processor(); check_and_cede_processor();
local_irq_disable(); local_irq_disable();
idle_loop_epilog(in_purr); pseries_idle_epilog(in_purr);
return index; return index;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment