Commit 4ea1636b authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/asm/tsc: Rename native_read_tsc() to rdtsc()

Now that there is no paravirt TSC, the "native" is
inappropriate. The function does RDTSC, so give it the obvious
name: rdtsc().
Suggested-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm ML <kvm@vger.kernel.org>
Link: http://lkml.kernel.org/r/fd43e16281991f096c1e4d21574d9e1402c62d39.1434501121.git.luto@kernel.org
[ Ported it to v4.2-rc1. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent fe47ae6e
...@@ -82,7 +82,7 @@ static unsigned long get_random_long(void) ...@@ -82,7 +82,7 @@ static unsigned long get_random_long(void)
if (has_cpuflag(X86_FEATURE_TSC)) { if (has_cpuflag(X86_FEATURE_TSC)) {
debug_putstr(" RDTSC"); debug_putstr(" RDTSC");
raw = native_read_tsc(); raw = rdtsc();
random ^= raw; random ^= raw;
use_i8254 = false; use_i8254 = false;
......
...@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void) ...@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void)
* but no one has ever seen it happen. * but no one has ever seen it happen.
*/ */
rdtsc_barrier(); rdtsc_barrier();
ret = (cycle_t)native_read_tsc(); ret = (cycle_t)rdtsc();
last = gtod->cycle_last; last = gtod->cycle_last;
......
...@@ -109,7 +109,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr, ...@@ -109,7 +109,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
extern int rdmsr_safe_regs(u32 regs[8]); extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]); extern int wrmsr_safe_regs(u32 regs[8]);
static __always_inline unsigned long long native_read_tsc(void) /**
* rdtsc() - returns the current TSC without ordering constraints
*
* rdtsc() returns the result of RDTSC as a 64-bit integer. The
* only ordering constraint it supplies is the ordering implied by
* "asm volatile": it will put the RDTSC in the place you expect. The
* CPU can and will speculatively execute that RDTSC, though, so the
* results can be non-monotonic if compared on different CPUs.
*/
static __always_inline unsigned long long rdtsc(void)
{ {
DECLARE_ARGS(val, low, high); DECLARE_ARGS(val, low, high);
......
...@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) ...@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
static __always_inline static __always_inline
u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src) u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
{ {
u64 delta = native_read_tsc() - src->tsc_timestamp; u64 delta = rdtsc() - src->tsc_timestamp;
return pvclock_scale_delta(delta, src->tsc_to_system_mul, return pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift); src->tsc_shift);
} }
......
...@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void) ...@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void)
* on during the bootup the random pool has true entropy too. * on during the bootup the random pool has true entropy too.
*/ */
get_random_bytes(&canary, sizeof(canary)); get_random_bytes(&canary, sizeof(canary));
tsc = native_read_tsc(); tsc = rdtsc();
canary += tsc + (tsc << 32UL); canary += tsc + (tsc << 32UL);
current->stack_canary = canary; current->stack_canary = canary;
......
...@@ -26,7 +26,7 @@ static inline cycles_t get_cycles(void) ...@@ -26,7 +26,7 @@ static inline cycles_t get_cycles(void)
return 0; return 0;
#endif #endif
return native_read_tsc(); return rdtsc();
} }
extern void tsc_init(void); extern void tsc_init(void);
......
...@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void) ...@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void)
/* Verify whether apbt counter works */ /* Verify whether apbt counter works */
t1 = dw_apb_clocksource_read(clocksource_apbt); t1 = dw_apb_clocksource_read(clocksource_apbt);
start = native_read_tsc(); start = rdtsc();
/* /*
* We don't know the TSC frequency yet, but waiting for * We don't know the TSC frequency yet, but waiting for
...@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void) ...@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void)
*/ */
do { do {
rep_nop(); rep_nop();
now = native_read_tsc(); now = rdtsc();
} while ((now - start) < 200000UL); } while ((now - start) < 200000UL);
/* APBT is the only always on clocksource, it has to work! */ /* APBT is the only always on clocksource, it has to work! */
...@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void) ...@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void)
old = dw_apb_clocksource_read(clocksource_apbt); old = dw_apb_clocksource_read(clocksource_apbt);
old += loop; old += loop;
t1 = native_read_tsc(); t1 = rdtsc();
do { do {
new = dw_apb_clocksource_read(clocksource_apbt); new = dw_apb_clocksource_read(clocksource_apbt);
} while (new < old); } while (new < old);
t2 = native_read_tsc(); t2 = rdtsc();
shift = 5; shift = 5;
if (unlikely(loop >> shift == 0)) { if (unlikely(loop >> shift == 0)) {
......
...@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta, ...@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta,
{ {
u64 tsc; u64 tsc;
tsc = native_read_tsc(); tsc = rdtsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0; return 0;
} }
...@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev) ...@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
unsigned long pm = acpi_pm_read_early(); unsigned long pm = acpi_pm_read_early();
if (cpu_has_tsc) if (cpu_has_tsc)
tsc = native_read_tsc(); tsc = rdtsc();
switch (lapic_cal_loops++) { switch (lapic_cal_loops++) {
case 0: case 0:
...@@ -1209,7 +1209,7 @@ void setup_local_APIC(void) ...@@ -1209,7 +1209,7 @@ void setup_local_APIC(void)
long long max_loops = cpu_khz ? cpu_khz : 1000000; long long max_loops = cpu_khz ? cpu_khz : 1000000;
if (cpu_has_tsc) if (cpu_has_tsc)
tsc = native_read_tsc(); tsc = rdtsc();
if (disable_apic) { if (disable_apic) {
disable_ioapic_support(); disable_ioapic_support();
...@@ -1293,7 +1293,7 @@ void setup_local_APIC(void) ...@@ -1293,7 +1293,7 @@ void setup_local_APIC(void)
} }
if (queued) { if (queued) {
if (cpu_has_tsc && cpu_khz) { if (cpu_has_tsc && cpu_khz) {
ntsc = native_read_tsc(); ntsc = rdtsc();
max_loops = (cpu_khz << 10) - (ntsc - tsc); max_loops = (cpu_khz << 10) - (ntsc - tsc);
} else } else
max_loops--; max_loops--;
......
...@@ -125,10 +125,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c) ...@@ -125,10 +125,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
n = K6_BUG_LOOP; n = K6_BUG_LOOP;
f_vide = vide; f_vide = vide;
d = native_read_tsc(); d = rdtsc();
while (n--) while (n--)
f_vide(); f_vide();
d2 = native_read_tsc(); d2 = rdtsc();
d = d2-d; d = d2-d;
if (d > 20*K6_BUG_LOOP) if (d > 20*K6_BUG_LOOP)
......
...@@ -125,7 +125,7 @@ void mce_setup(struct mce *m) ...@@ -125,7 +125,7 @@ void mce_setup(struct mce *m)
{ {
memset(m, 0, sizeof(struct mce)); memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id(); m->cpu = m->extcpu = smp_processor_id();
m->tsc = native_read_tsc(); m->tsc = rdtsc();
/* We hope get_seconds stays lockless */ /* We hope get_seconds stays lockless */
m->time = get_seconds(); m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor; m->cpuvendor = boot_cpu_data.x86_vendor;
...@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data) ...@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data)
{ {
unsigned long *cpu_tsc = (unsigned long *)data; unsigned long *cpu_tsc = (unsigned long *)data;
cpu_tsc[smp_processor_id()] = native_read_tsc(); cpu_tsc[smp_processor_id()] = rdtsc();
} }
static int mce_apei_read_done; static int mce_apei_read_done;
......
...@@ -110,7 +110,7 @@ static void init_espfix_random(void) ...@@ -110,7 +110,7 @@ static void init_espfix_random(void)
*/ */
if (!arch_get_random_long(&rand)) { if (!arch_get_random_long(&rand)) {
/* The constant is an arbitrary large prime */ /* The constant is an arbitrary large prime */
rand = native_read_tsc(); rand = rdtsc();
rand *= 0xc345c6b72fd16123UL; rand *= 0xc345c6b72fd16123UL;
} }
......
...@@ -735,7 +735,7 @@ static int hpet_clocksource_register(void) ...@@ -735,7 +735,7 @@ static int hpet_clocksource_register(void)
/* Verify whether hpet counter works */ /* Verify whether hpet counter works */
t1 = hpet_readl(HPET_COUNTER); t1 = hpet_readl(HPET_COUNTER);
start = native_read_tsc(); start = rdtsc();
/* /*
* We don't know the TSC frequency yet, but waiting for * We don't know the TSC frequency yet, but waiting for
...@@ -745,7 +745,7 @@ static int hpet_clocksource_register(void) ...@@ -745,7 +745,7 @@ static int hpet_clocksource_register(void)
*/ */
do { do {
rep_nop(); rep_nop();
now = native_read_tsc(); now = rdtsc();
} while ((now - start) < 200000UL); } while ((now - start) < 200000UL);
if (t1 == hpet_readl(HPET_COUNTER)) { if (t1 == hpet_readl(HPET_COUNTER)) {
......
...@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void) ...@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void)
u64 ret; u64 ret;
rdtsc_barrier(); rdtsc_barrier();
ret = native_read_tsc(); ret = rdtsc();
return ret; return ret;
} }
...@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) ...@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
data = cyc2ns_write_begin(cpu); data = cyc2ns_write_begin(cpu);
tsc_now = native_read_tsc(); tsc_now = rdtsc();
ns_now = cycles_2_ns(tsc_now); ns_now = cycles_2_ns(tsc_now);
/* /*
...@@ -290,7 +290,7 @@ u64 native_sched_clock(void) ...@@ -290,7 +290,7 @@ u64 native_sched_clock(void)
} }
/* read the Time Stamp Counter: */ /* read the Time Stamp Counter: */
tsc_now = native_read_tsc(); tsc_now = rdtsc();
/* return the value in ns */ /* return the value in ns */
return cycles_2_ns(tsc_now); return cycles_2_ns(tsc_now);
......
...@@ -1172,7 +1172,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) ...@@ -1172,7 +1172,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
tsc_deadline = apic->lapic_timer.expired_tscdeadline; tsc_deadline = apic->lapic_timer.expired_tscdeadline;
apic->lapic_timer.expired_tscdeadline = 0; apic->lapic_timer.expired_tscdeadline = 0;
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
/* __delay is delay_tsc whenever the hardware has TSC, thus always. */ /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
...@@ -1240,7 +1240,7 @@ static void start_apic_timer(struct kvm_lapic *apic) ...@@ -1240,7 +1240,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
local_irq_save(flags); local_irq_save(flags);
now = apic->lapic_timer.timer.base->get_time(); now = apic->lapic_timer.timer.base->get_time();
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
if (likely(tscdeadline > guest_tsc)) { if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL; ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz); do_div(ns, this_tsc_khz);
......
...@@ -1080,7 +1080,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) ...@@ -1080,7 +1080,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{ {
u64 tsc; u64 tsc;
tsc = svm_scale_tsc(vcpu, native_read_tsc()); tsc = svm_scale_tsc(vcpu, rdtsc());
return target_tsc - tsc; return target_tsc - tsc;
} }
...@@ -3079,7 +3079,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3079,7 +3079,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr_info->index) { switch (msr_info->index) {
case MSR_IA32_TSC: { case MSR_IA32_TSC: {
msr_info->data = svm->vmcb->control.tsc_offset + msr_info->data = svm->vmcb->control.tsc_offset +
svm_scale_tsc(vcpu, native_read_tsc()); svm_scale_tsc(vcpu, rdtsc());
break; break;
} }
......
...@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void) ...@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
{ {
u64 host_tsc, tsc_offset; u64 host_tsc, tsc_offset;
host_tsc = native_read_tsc(); host_tsc = rdtsc();
tsc_offset = vmcs_read64(TSC_OFFSET); tsc_offset = vmcs_read64(TSC_OFFSET);
return host_tsc + tsc_offset; return host_tsc + tsc_offset;
} }
...@@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho ...@@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{ {
return target_tsc - native_read_tsc(); return target_tsc - rdtsc();
} }
static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
......
...@@ -1455,7 +1455,7 @@ static cycle_t read_tsc(void) ...@@ -1455,7 +1455,7 @@ static cycle_t read_tsc(void)
* but no one has ever seen it happen. * but no one has ever seen it happen.
*/ */
rdtsc_barrier(); rdtsc_barrier();
ret = (cycle_t)native_read_tsc(); ret = (cycle_t)rdtsc();
last = pvclock_gtod_data.clock.cycle_last; last = pvclock_gtod_data.clock.cycle_last;
...@@ -1646,7 +1646,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1646,7 +1646,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
return 1; return 1;
} }
if (!use_master_clock) { if (!use_master_clock) {
host_tsc = native_read_tsc(); host_tsc = rdtsc();
kernel_ns = get_kernel_ns(); kernel_ns = get_kernel_ns();
} }
...@@ -2810,7 +2810,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2810,7 +2810,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
native_read_tsc() - vcpu->arch.last_host_tsc; rdtsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0) if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC"); mark_tsc_unstable("KVM discovered backwards TSC");
if (check_tsc_unstable()) { if (check_tsc_unstable()) {
...@@ -2838,7 +2838,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -2838,7 +2838,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
kvm_x86_ops->vcpu_put(vcpu); kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu); kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = native_read_tsc(); vcpu->arch.last_host_tsc = rdtsc();
} }
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
...@@ -6623,7 +6623,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6623,7 +6623,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
hw_breakpoint_restore(); hw_breakpoint_restore();
vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
native_read_tsc()); rdtsc());
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb(); smp_wmb();
...@@ -7437,7 +7437,7 @@ int kvm_arch_hardware_enable(void) ...@@ -7437,7 +7437,7 @@ int kvm_arch_hardware_enable(void)
if (ret != 0) if (ret != 0)
return ret; return ret;
local_tsc = native_read_tsc(); local_tsc = rdtsc();
stable = !check_tsc_unstable(); stable = !check_tsc_unstable();
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
......
...@@ -55,10 +55,10 @@ static void delay_tsc(unsigned long __loops) ...@@ -55,10 +55,10 @@ static void delay_tsc(unsigned long __loops)
preempt_disable(); preempt_disable();
cpu = smp_processor_id(); cpu = smp_processor_id();
rdtsc_barrier(); rdtsc_barrier();
bclock = native_read_tsc(); bclock = rdtsc();
for (;;) { for (;;) {
rdtsc_barrier(); rdtsc_barrier();
now = native_read_tsc(); now = rdtsc();
if ((now - bclock) >= loops) if ((now - bclock) >= loops)
break; break;
...@@ -80,7 +80,7 @@ static void delay_tsc(unsigned long __loops) ...@@ -80,7 +80,7 @@ static void delay_tsc(unsigned long __loops)
loops -= (now - bclock); loops -= (now - bclock);
cpu = smp_processor_id(); cpu = smp_processor_id();
rdtsc_barrier(); rdtsc_barrier();
bclock = native_read_tsc(); bclock = rdtsc();
} }
} }
preempt_enable(); preempt_enable();
...@@ -100,7 +100,7 @@ void use_tsc_delay(void) ...@@ -100,7 +100,7 @@ void use_tsc_delay(void)
int read_current_timer(unsigned long *timer_val) int read_current_timer(unsigned long *timer_val)
{ {
if (delay_fn == delay_tsc) { if (delay_fn == delay_tsc) {
*timer_val = native_read_tsc(); *timer_val = rdtsc();
return 0; return 0;
} }
return -1; return -1;
......
...@@ -765,7 +765,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu) ...@@ -765,7 +765,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
local_irq_save(flags); local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf); rdmsrl(MSR_IA32_MPERF, mperf);
tsc = native_read_tsc(); tsc = rdtsc();
local_irq_restore(flags); local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time; cpu->last_sample_time = cpu->sample.time;
......
...@@ -149,9 +149,9 @@ static int old_gameport_measure_speed(struct gameport *gameport) ...@@ -149,9 +149,9 @@ static int old_gameport_measure_speed(struct gameport *gameport)
for(i = 0; i < 50; i++) { for(i = 0; i < 50; i++) {
local_irq_save(flags); local_irq_save(flags);
t1 = native_read_tsc(); t1 = rdtsc();
for (t = 0; t < 50; t++) gameport_read(gameport); for (t = 0; t < 50; t++) gameport_read(gameport);
t2 = native_read_tsc(); t2 = rdtsc();
local_irq_restore(flags); local_irq_restore(flags);
udelay(i * 10); udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1; if (t2 - t1 < tx) tx = t2 - t1;
......
...@@ -143,7 +143,7 @@ struct analog_port { ...@@ -143,7 +143,7 @@ struct analog_port {
#include <linux/i8253.h> #include <linux/i8253.h>
#define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)native_read_tsc(); else x = get_time_pit(); } while (0) #define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0))) #define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
#define TIME_NAME (cpu_has_tsc?"TSC":"PIT") #define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
static unsigned int get_time_pit(void) static unsigned int get_time_pit(void)
...@@ -160,7 +160,7 @@ static unsigned int get_time_pit(void) ...@@ -160,7 +160,7 @@ static unsigned int get_time_pit(void)
return count; return count;
} }
#elif defined(__x86_64__) #elif defined(__x86_64__)
#define GET_TIME(x) do { x = (unsigned int)native_read_tsc(); } while (0) #define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
#define DELTA(x,y) ((y)-(x)) #define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC" #define TIME_NAME "TSC"
#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE) #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
......
...@@ -638,7 +638,7 @@ static int receive(struct net_device *dev, int cnt) ...@@ -638,7 +638,7 @@ static int receive(struct net_device *dev, int cnt)
#define GETTICK(x) \ #define GETTICK(x) \
({ \ ({ \
if (cpu_has_tsc) \ if (cpu_has_tsc) \
x = (unsigned int)native_read_tsc(); \ x = (unsigned int)rdtsc(); \
}) })
#else /* __i386__ */ #else /* __i386__ */
#define GETTICK(x) #define GETTICK(x)
......
...@@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio, ...@@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
/* check result for the last window */ /* check result for the last window */
msr_now = pkg_state_counter(); msr_now = pkg_state_counter();
tsc_now = native_read_tsc(); tsc_now = rdtsc();
/* calculate pkg cstate vs tsc ratio */ /* calculate pkg cstate vs tsc ratio */
if (!msr_last || !tsc_last) if (!msr_last || !tsc_last)
...@@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy) ...@@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy)
u64 val64; u64 val64;
msr_now = pkg_state_counter(); msr_now = pkg_state_counter();
tsc_now = native_read_tsc(); tsc_now = rdtsc();
jiffies_now = jiffies; jiffies_now = jiffies;
/* calculate pkg cstate vs tsc ratio */ /* calculate pkg cstate vs tsc ratio */
......
...@@ -81,11 +81,11 @@ static int __init cpufreq_test_tsc(void) ...@@ -81,11 +81,11 @@ static int __init cpufreq_test_tsc(void)
printk(KERN_DEBUG "start--> \n"); printk(KERN_DEBUG "start--> \n");
then = read_pmtmr(); then = read_pmtmr();
then_tsc = native_read_tsc(); then_tsc = rdtsc();
for (i=0;i<20;i++) { for (i=0;i<20;i++) {
mdelay(100); mdelay(100);
now = read_pmtmr(); now = read_pmtmr();
now_tsc = native_read_tsc(); now_tsc = rdtsc();
diff = (now - then) & 0xFFFFFF; diff = (now - then) & 0xFFFFFF;
diff_tsc = now_tsc - then_tsc; diff_tsc = now_tsc - then_tsc;
printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc); printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment