Commit e8824890 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/delay: Preparatory code cleanup

The naming conventions in the delay code are confusing at best.

All delay variants use a loops argument and or variable which originates
from the original delay_loop() implementation. But all variants except
delay_loop() are based on TSC cycles.

Rename the argument to cycles and make it type u64 to avoid these weird
expansions to u64 in the functions.

Rename MWAITX_MAX_LOOPS to MWAITX_MAX_WAIT_CYCLES for the same reason
and fixup the comment of delay_mwaitx() as well.

Mark the delay_fn function pointer __ro_after_init and fixup the comment
for it.

No functional change and preparation for the upcoming TPAUSE based delay
variant.

[ Kyung Min Park: Added __init to use_tsc_delay() ]
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarKyung Min Park <kyung.min.park@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/1587757076-30337-2-git-send-email-kyung.min.park@intel.com
parent 3c40cdb0
...@@ -3,8 +3,9 @@ ...@@ -3,8 +3,9 @@
#define _ASM_X86_DELAY_H #define _ASM_X86_DELAY_H
#include <asm-generic/delay.h> #include <asm-generic/delay.h>
#include <linux/init.h>
void use_tsc_delay(void); void __init use_tsc_delay(void);
void use_mwaitx_delay(void); void use_mwaitx_delay(void);
#endif /* _ASM_X86_DELAY_H */ #endif /* _ASM_X86_DELAY_H */
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#define MWAIT_ECX_INTERRUPT_BREAK 0x1 #define MWAIT_ECX_INTERRUPT_BREAK 0x1
#define MWAITX_ECX_TIMER_ENABLE BIT(1) #define MWAITX_ECX_TIMER_ENABLE BIT(1)
#define MWAITX_MAX_LOOPS ((u32)-1) #define MWAITX_MAX_WAIT_CYCLES UINT_MAX
#define MWAITX_DISABLE_CSTATES 0xf0 #define MWAITX_DISABLE_CSTATES 0xf0
u32 get_umwait_control_msr(void); u32 get_umwait_control_msr(void);
......
...@@ -27,9 +27,19 @@ ...@@ -27,9 +27,19 @@
# include <asm/smp.h> # include <asm/smp.h>
#endif #endif
static void delay_loop(u64 __loops);
/*
* Calibration and selection of the delay mechanism happens only once
* during boot.
*/
static void (*delay_fn)(u64) __ro_after_init = delay_loop;
/* simple loop based delay: */ /* simple loop based delay: */
static void delay_loop(unsigned long loops) static void delay_loop(u64 __loops)
{ {
unsigned long loops = (unsigned long)__loops;
asm volatile( asm volatile(
" test %0,%0 \n" " test %0,%0 \n"
" jz 3f \n" " jz 3f \n"
...@@ -49,9 +59,9 @@ static void delay_loop(unsigned long loops) ...@@ -49,9 +59,9 @@ static void delay_loop(unsigned long loops)
} }
/* TSC based delay: */ /* TSC based delay: */
static void delay_tsc(unsigned long __loops) static void delay_tsc(u64 cycles)
{ {
u64 bclock, now, loops = __loops; u64 bclock, now;
int cpu; int cpu;
preempt_disable(); preempt_disable();
...@@ -59,7 +69,7 @@ static void delay_tsc(unsigned long __loops) ...@@ -59,7 +69,7 @@ static void delay_tsc(unsigned long __loops)
bclock = rdtsc_ordered(); bclock = rdtsc_ordered();
for (;;) { for (;;) {
now = rdtsc_ordered(); now = rdtsc_ordered();
if ((now - bclock) >= loops) if ((now - bclock) >= cycles)
break; break;
/* Allow RT tasks to run */ /* Allow RT tasks to run */
...@@ -77,7 +87,7 @@ static void delay_tsc(unsigned long __loops) ...@@ -77,7 +87,7 @@ static void delay_tsc(unsigned long __loops)
* counter for this CPU. * counter for this CPU.
*/ */
if (unlikely(cpu != smp_processor_id())) { if (unlikely(cpu != smp_processor_id())) {
loops -= (now - bclock); cycles -= (now - bclock);
cpu = smp_processor_id(); cpu = smp_processor_id();
bclock = rdtsc_ordered(); bclock = rdtsc_ordered();
} }
...@@ -87,24 +97,24 @@ static void delay_tsc(unsigned long __loops) ...@@ -87,24 +97,24 @@ static void delay_tsc(unsigned long __loops)
/* /*
* On some AMD platforms, MWAITX has a configurable 32-bit timer, that * On some AMD platforms, MWAITX has a configurable 32-bit timer, that
* counts with TSC frequency. The input value is the loop of the * counts with TSC frequency. The input value is the number of TSC cycles
* counter, it will exit when the timer expires. * to wait. MWAITX will also exit when the timer expires.
*/ */
static void delay_mwaitx(unsigned long __loops) static void delay_mwaitx(u64 cycles)
{ {
u64 start, end, delay, loops = __loops; u64 start, end, delay;
/* /*
* Timer value of 0 causes MWAITX to wait indefinitely, unless there * Timer value of 0 causes MWAITX to wait indefinitely, unless there
* is a store on the memory monitored by MONITORX. * is a store on the memory monitored by MONITORX.
*/ */
if (loops == 0) if (!cycles)
return; return;
start = rdtsc_ordered(); start = rdtsc_ordered();
for (;;) { for (;;) {
delay = min_t(u64, MWAITX_MAX_LOOPS, loops); delay = min_t(u64, MWAITX_MAX_WAIT_CYCLES, cycles);
/* /*
* Use cpu_tss_rw as a cacheline-aligned, seldomly * Use cpu_tss_rw as a cacheline-aligned, seldomly
...@@ -121,22 +131,15 @@ static void delay_mwaitx(unsigned long __loops) ...@@ -121,22 +131,15 @@ static void delay_mwaitx(unsigned long __loops)
end = rdtsc_ordered(); end = rdtsc_ordered();
if (loops <= end - start) if (cycles <= end - start)
break; break;
loops -= end - start; cycles -= end - start;
start = end; start = end;
} }
} }
/* void __init use_tsc_delay(void)
* Since we calibrate only once at boot, this
* function should be set once at boot and not changed
*/
static void (*delay_fn)(unsigned long) = delay_loop;
void use_tsc_delay(void)
{ {
if (delay_fn == delay_loop) if (delay_fn == delay_loop)
delay_fn = delay_tsc; delay_fn = delay_tsc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment