Commit 3913cc35 authored by Rik van Riel's avatar Rik van Riel Committed by Ingo Molnar

x86/fpu: Remove struct fpu::counter

With the lazy FPU code gone, we no longer use the counter field
in struct fpu for anything. Get rid it.
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Reviewed-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: pbonzini@redhat.com
Link: http://lkml.kernel.org/r/1475627678-20788-6-git-send-email-riel@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c592b573
...@@ -581,16 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) ...@@ -581,16 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
/* Don't change CR0.TS if we just switch! */ /* Don't change CR0.TS if we just switch! */
if (fpu.preload) { if (fpu.preload) {
new_fpu->counter++;
__fpregs_activate(new_fpu); __fpregs_activate(new_fpu);
trace_x86_fpu_regs_activated(new_fpu); trace_x86_fpu_regs_activated(new_fpu);
prefetch(&new_fpu->state); prefetch(&new_fpu->state);
} }
} else { } else {
old_fpu->counter = 0;
old_fpu->last_cpu = -1; old_fpu->last_cpu = -1;
if (fpu.preload) { if (fpu.preload) {
new_fpu->counter++;
if (fpu_want_lazy_restore(new_fpu, cpu)) if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0; fpu.preload = 0;
else else
......
...@@ -321,17 +321,6 @@ struct fpu { ...@@ -321,17 +321,6 @@ struct fpu {
*/ */
unsigned char fpregs_active; unsigned char fpregs_active;
/*
* @counter:
*
* This counter contains the number of consecutive context switches
* during which the FPU stays used. If this is over a threshold, the
* lazy FPU restore logic becomes eager, to save the trap overhead.
* This is an unsigned char so that after 256 iterations the counter
* wraps and the context switch behavior turns lazy again; this is to
* deal with bursty apps that only use the FPU for a short time:
*/
unsigned char counter;
/* /*
* @state: * @state:
* *
......
...@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu, ...@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
__field(struct fpu *, fpu) __field(struct fpu *, fpu)
__field(bool, fpregs_active) __field(bool, fpregs_active)
__field(bool, fpstate_active) __field(bool, fpstate_active)
__field(int, counter)
__field(u64, xfeatures) __field(u64, xfeatures)
__field(u64, xcomp_bv) __field(u64, xcomp_bv)
), ),
...@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu, ...@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
__entry->fpu = fpu; __entry->fpu = fpu;
__entry->fpregs_active = fpu->fpregs_active; __entry->fpregs_active = fpu->fpregs_active;
__entry->fpstate_active = fpu->fpstate_active; __entry->fpstate_active = fpu->fpstate_active;
__entry->counter = fpu->counter;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures; __entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
} }
), ),
TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx", TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
__entry->fpu, __entry->fpu,
__entry->fpregs_active, __entry->fpregs_active,
__entry->fpstate_active, __entry->fpstate_active,
__entry->counter,
__entry->xfeatures, __entry->xfeatures,
__entry->xcomp_bv __entry->xcomp_bv
) )
......
...@@ -222,7 +222,6 @@ EXPORT_SYMBOL_GPL(fpstate_init); ...@@ -222,7 +222,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{ {
dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0; dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1; dst_fpu->last_cpu = -1;
...@@ -430,7 +429,6 @@ void fpu__restore(struct fpu *fpu) ...@@ -430,7 +429,6 @@ void fpu__restore(struct fpu *fpu)
trace_x86_fpu_before_restore(fpu); trace_x86_fpu_before_restore(fpu);
fpregs_activate(fpu); fpregs_activate(fpu);
copy_kernel_to_fpregs(&fpu->state); copy_kernel_to_fpregs(&fpu->state);
fpu->counter++;
trace_x86_fpu_after_restore(fpu); trace_x86_fpu_after_restore(fpu);
kernel_fpu_enable(); kernel_fpu_enable();
} }
...@@ -448,7 +446,6 @@ EXPORT_SYMBOL_GPL(fpu__restore); ...@@ -448,7 +446,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__drop(struct fpu *fpu) void fpu__drop(struct fpu *fpu)
{ {
preempt_disable(); preempt_disable();
fpu->counter = 0;
if (fpu->fpregs_active) { if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */ /* Ignore delayed exceptions from user space */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment