Commit de63ad4c authored by Longpeng(Mike)'s avatar Longpeng(Mike) Committed by Paolo Bonzini

KVM: X86: implement the logic for spinlock optimization

get_cpl requires vcpu_load, so we must cache the result (whether the
vcpu was preempted when its cpl=0) in kvm_vcpu_arch.
Signed-off-by: default avatarLongpeng(Mike) <longpeng2@huawei.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 199b5763
...@@ -688,6 +688,9 @@ struct kvm_vcpu_arch { ...@@ -688,6 +688,9 @@ struct kvm_vcpu_arch {
/* GPA available (AMD only) */ /* GPA available (AMD only) */
bool gpa_available; bool gpa_available;
/* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel;
}; };
struct kvm_lpage_info { struct kvm_lpage_info {
......
...@@ -1274,7 +1274,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ...@@ -1274,7 +1274,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
switch (code) { switch (code) {
case HVCALL_NOTIFY_LONG_SPIN_WAIT: case HVCALL_NOTIFY_LONG_SPIN_WAIT:
kvm_vcpu_on_spin(vcpu, false); kvm_vcpu_on_spin(vcpu, true);
break; break;
case HVCALL_POST_MESSAGE: case HVCALL_POST_MESSAGE:
case HVCALL_SIGNAL_EVENT: case HVCALL_SIGNAL_EVENT:
......
...@@ -3749,7 +3749,10 @@ static int interrupt_window_interception(struct vcpu_svm *svm) ...@@ -3749,7 +3749,10 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
static int pause_interception(struct vcpu_svm *svm) static int pause_interception(struct vcpu_svm *svm)
{ {
kvm_vcpu_on_spin(&svm->vcpu, false); struct kvm_vcpu *vcpu = &svm->vcpu;
bool in_kernel = (svm_get_cpl(vcpu) == 0);
kvm_vcpu_on_spin(vcpu, in_kernel);
return 1; return 1;
} }
......
...@@ -6781,7 +6781,13 @@ static int handle_pause(struct kvm_vcpu *vcpu) ...@@ -6781,7 +6781,13 @@ static int handle_pause(struct kvm_vcpu *vcpu)
if (ple_gap) if (ple_gap)
grow_ple_window(vcpu); grow_ple_window(vcpu);
kvm_vcpu_on_spin(vcpu, false); /*
* Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
* VM-execution control is ignored if CPL > 0. OTOH, KVM
* never set PAUSE_EXITING and just set PLE if supported,
* so the vcpu must be CPL=0 if it gets a PAUSE exit.
*/
kvm_vcpu_on_spin(vcpu, true);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
......
...@@ -2873,6 +2873,10 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -2873,6 +2873,10 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
int idx; int idx;
if (vcpu->preempted)
vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
/* /*
* Disable page faults because we're in atomic context here. * Disable page faults because we're in atomic context here.
* kvm_write_guest_offset_cached() would call might_fault() * kvm_write_guest_offset_cached() would call might_fault()
...@@ -7985,6 +7989,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -7985,6 +7989,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_pmu_init(vcpu); kvm_pmu_init(vcpu);
vcpu->arch.pending_external_vector = -1; vcpu->arch.pending_external_vector = -1;
vcpu->arch.preempted_in_kernel = false;
kvm_hv_vcpu_init(vcpu); kvm_hv_vcpu_init(vcpu);
...@@ -8434,7 +8439,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -8434,7 +8439,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{ {
return false; return vcpu->arch.preempted_in_kernel;
} }
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment