Commit 87be28aa authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/asm/tsc: Replace rdtscll() with native_read_tsc()

Now that the ->read_tsc() paravirt hook is gone, rdtscll() is
just a wrapper around native_read_tsc(). Unwrap it.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm ML <kvm@vger.kernel.org>
Link: http://lkml.kernel.org/r/d2449ae62c1b1fb90195bcfb19ef4a35883a04dc.1434501121.git.luto@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9261e050
...@@ -82,7 +82,7 @@ static unsigned long get_random_long(void) ...@@ -82,7 +82,7 @@ static unsigned long get_random_long(void)
if (has_cpuflag(X86_FEATURE_TSC)) { if (has_cpuflag(X86_FEATURE_TSC)) {
debug_putstr(" RDTSC"); debug_putstr(" RDTSC");
rdtscll(raw); raw = native_read_tsc();
random ^= raw; random ^= raw;
use_i8254 = false; use_i8254 = false;
......
...@@ -192,9 +192,6 @@ do { \ ...@@ -192,9 +192,6 @@ do { \
#define rdtscl(low) \ #define rdtscl(low) \
((low) = (u32)native_read_tsc()) ((low) = (u32)native_read_tsc())
#define rdtscll(val) \
((val) = native_read_tsc())
#define rdtscp(low, high, aux) \ #define rdtscp(low, high, aux) \
do { \ do { \
unsigned long long _val = native_read_tscp(&(aux)); \ unsigned long long _val = native_read_tscp(&(aux)); \
......
...@@ -21,15 +21,12 @@ extern void disable_TSC(void); ...@@ -21,15 +21,12 @@ extern void disable_TSC(void);
static inline cycles_t get_cycles(void) static inline cycles_t get_cycles(void)
{ {
unsigned long long ret = 0;
#ifndef CONFIG_X86_TSC #ifndef CONFIG_X86_TSC
if (!cpu_has_tsc) if (!cpu_has_tsc)
return 0; return 0;
#endif #endif
rdtscll(ret);
return ret; return native_read_tsc();
} }
extern void tsc_init(void); extern void tsc_init(void);
......
...@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void) ...@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void)
/* Verify whether apbt counter works */ /* Verify whether apbt counter works */
t1 = dw_apb_clocksource_read(clocksource_apbt); t1 = dw_apb_clocksource_read(clocksource_apbt);
rdtscll(start); start = native_read_tsc();
/* /*
* We don't know the TSC frequency yet, but waiting for * We don't know the TSC frequency yet, but waiting for
...@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void) ...@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void)
*/ */
do { do {
rep_nop(); rep_nop();
rdtscll(now); now = native_read_tsc();
} while ((now - start) < 200000UL); } while ((now - start) < 200000UL);
/* APBT is the only always on clocksource, it has to work! */ /* APBT is the only always on clocksource, it has to work! */
......
...@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta, ...@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta,
{ {
u64 tsc; u64 tsc;
rdtscll(tsc); tsc = native_read_tsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0; return 0;
} }
...@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev) ...@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
unsigned long pm = acpi_pm_read_early(); unsigned long pm = acpi_pm_read_early();
if (cpu_has_tsc) if (cpu_has_tsc)
rdtscll(tsc); tsc = native_read_tsc();
switch (lapic_cal_loops++) { switch (lapic_cal_loops++) {
case 0: case 0:
...@@ -1209,7 +1209,7 @@ void setup_local_APIC(void) ...@@ -1209,7 +1209,7 @@ void setup_local_APIC(void)
long long max_loops = cpu_khz ? cpu_khz : 1000000; long long max_loops = cpu_khz ? cpu_khz : 1000000;
if (cpu_has_tsc) if (cpu_has_tsc)
rdtscll(tsc); tsc = native_read_tsc();
if (disable_apic) { if (disable_apic) {
disable_ioapic_support(); disable_ioapic_support();
...@@ -1293,7 +1293,7 @@ void setup_local_APIC(void) ...@@ -1293,7 +1293,7 @@ void setup_local_APIC(void)
} }
if (queued) { if (queued) {
if (cpu_has_tsc && cpu_khz) { if (cpu_has_tsc && cpu_khz) {
rdtscll(ntsc); ntsc = native_read_tsc();
max_loops = (cpu_khz << 10) - (ntsc - tsc); max_loops = (cpu_khz << 10) - (ntsc - tsc);
} else } else
max_loops--; max_loops--;
......
...@@ -125,7 +125,7 @@ void mce_setup(struct mce *m) ...@@ -125,7 +125,7 @@ void mce_setup(struct mce *m)
{ {
memset(m, 0, sizeof(struct mce)); memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id(); m->cpu = m->extcpu = smp_processor_id();
rdtscll(m->tsc); m->tsc = native_read_tsc();
/* We hope get_seconds stays lockless */ /* We hope get_seconds stays lockless */
m->time = get_seconds(); m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor; m->cpuvendor = boot_cpu_data.x86_vendor;
...@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data) ...@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data)
{ {
unsigned long *cpu_tsc = (unsigned long *)data; unsigned long *cpu_tsc = (unsigned long *)data;
rdtscll(cpu_tsc[smp_processor_id()]); cpu_tsc[smp_processor_id()] = native_read_tsc();
} }
static int mce_apei_read_done; static int mce_apei_read_done;
......
...@@ -110,7 +110,7 @@ static void init_espfix_random(void) ...@@ -110,7 +110,7 @@ static void init_espfix_random(void)
*/ */
if (!arch_get_random_long(&rand)) { if (!arch_get_random_long(&rand)) {
/* The constant is an arbitrary large prime */ /* The constant is an arbitrary large prime */
rdtscll(rand); rand = native_read_tsc();
rand *= 0xc345c6b72fd16123UL; rand *= 0xc345c6b72fd16123UL;
} }
......
...@@ -735,7 +735,7 @@ static int hpet_clocksource_register(void) ...@@ -735,7 +735,7 @@ static int hpet_clocksource_register(void)
/* Verify whether hpet counter works */ /* Verify whether hpet counter works */
t1 = hpet_readl(HPET_COUNTER); t1 = hpet_readl(HPET_COUNTER);
rdtscll(start); start = native_read_tsc();
/* /*
* We don't know the TSC frequency yet, but waiting for * We don't know the TSC frequency yet, but waiting for
...@@ -745,7 +745,7 @@ static int hpet_clocksource_register(void) ...@@ -745,7 +745,7 @@ static int hpet_clocksource_register(void)
*/ */
do { do {
rep_nop(); rep_nop();
rdtscll(now); now = native_read_tsc();
} while ((now - start) < 200000UL); } while ((now - start) < 200000UL);
if (t1 == hpet_readl(HPET_COUNTER)) { if (t1 == hpet_readl(HPET_COUNTER)) {
......
...@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void) ...@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void)
u64 ret; u64 ret;
rdtsc_barrier(); rdtsc_barrier();
rdtscll(ret); ret = native_read_tsc();
return ret; return ret;
} }
...@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) ...@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
data = cyc2ns_write_begin(cpu); data = cyc2ns_write_begin(cpu);
rdtscll(tsc_now); tsc_now = native_read_tsc();
ns_now = cycles_2_ns(tsc_now); ns_now = cycles_2_ns(tsc_now);
/* /*
...@@ -290,7 +290,7 @@ u64 native_sched_clock(void) ...@@ -290,7 +290,7 @@ u64 native_sched_clock(void)
} }
/* read the Time Stamp Counter: */ /* read the Time Stamp Counter: */
rdtscll(tsc_now); tsc_now = native_read_tsc();
/* return the value in ns */ /* return the value in ns */
return cycles_2_ns(tsc_now); return cycles_2_ns(tsc_now);
......
...@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void) ...@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
{ {
u64 host_tsc, tsc_offset; u64 host_tsc, tsc_offset;
rdtscll(host_tsc); host_tsc = native_read_tsc();
tsc_offset = vmcs_read64(TSC_OFFSET); tsc_offset = vmcs_read64(TSC_OFFSET);
return host_tsc + tsc_offset; return host_tsc + tsc_offset;
} }
......
...@@ -100,7 +100,7 @@ void use_tsc_delay(void) ...@@ -100,7 +100,7 @@ void use_tsc_delay(void)
int read_current_timer(unsigned long *timer_val) int read_current_timer(unsigned long *timer_val)
{ {
if (delay_fn == delay_tsc) { if (delay_fn == delay_tsc) {
rdtscll(*timer_val); *timer_val = native_read_tsc();
return 0; return 0;
} }
return -1; return -1;
......
...@@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio, ...@@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
/* check result for the last window */ /* check result for the last window */
msr_now = pkg_state_counter(); msr_now = pkg_state_counter();
rdtscll(tsc_now); tsc_now = native_read_tsc();
/* calculate pkg cstate vs tsc ratio */ /* calculate pkg cstate vs tsc ratio */
if (!msr_last || !tsc_last) if (!msr_last || !tsc_last)
...@@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy) ...@@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy)
u64 val64; u64 val64;
msr_now = pkg_state_counter(); msr_now = pkg_state_counter();
rdtscll(tsc_now); tsc_now = native_read_tsc();
jiffies_now = jiffies; jiffies_now = jiffies;
/* calculate pkg cstate vs tsc ratio */ /* calculate pkg cstate vs tsc ratio */
......
...@@ -81,11 +81,11 @@ static int __init cpufreq_test_tsc(void) ...@@ -81,11 +81,11 @@ static int __init cpufreq_test_tsc(void)
printk(KERN_DEBUG "start--> \n"); printk(KERN_DEBUG "start--> \n");
then = read_pmtmr(); then = read_pmtmr();
rdtscll(then_tsc); then_tsc = native_read_tsc();
for (i=0;i<20;i++) { for (i=0;i<20;i++) {
mdelay(100); mdelay(100);
now = read_pmtmr(); now = read_pmtmr();
rdtscll(now_tsc); now_tsc = native_read_tsc();
diff = (now - then) & 0xFFFFFF; diff = (now - then) & 0xFFFFFF;
diff_tsc = now_tsc - then_tsc; diff_tsc = now_tsc - then_tsc;
printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc); printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment