Commit 68fd66f1 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: x86: extend struct kvm_vcpu_pv_apf_data with token info

Currently, APF mechanism relies on the #PF abuse where the token is being
passed through CR2. If we switch to using interrupts to deliver page-ready
notifications we need a different way to pass the data. Extent the existing
'struct kvm_vcpu_pv_apf_data' with token information for page-ready
notifications.

While on it, rename 'reason' to 'flags'. This doesn't change the semantics
as we only have reasons '1' and '2' and these can be treated as bit flags
but KVM_PV_REASON_PAGE_READY is going away with interrupt based delivery
making 'reason' name misleading.

The newly introduced apf_put_user_ready() temporary puts both flags and
token information, this will be changed to put token only when we switch
to interrupt based notifications.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200525144125.143875-3-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 84b09f33
...@@ -770,7 +770,7 @@ struct kvm_vcpu_arch { ...@@ -770,7 +770,7 @@ struct kvm_vcpu_arch {
u64 msr_val; u64 msr_val;
u32 id; u32 id;
bool send_user_only; bool send_user_only;
u32 host_apf_reason; u32 host_apf_flags;
unsigned long nested_apf_token; unsigned long nested_apf_token;
bool delivery_as_pf_vmexit; bool delivery_as_pf_vmexit;
} apf; } apf;
......
...@@ -90,7 +90,7 @@ unsigned int kvm_arch_para_features(void); ...@@ -90,7 +90,7 @@ unsigned int kvm_arch_para_features(void);
unsigned int kvm_arch_para_hints(void); unsigned int kvm_arch_para_hints(void);
void kvm_async_pf_task_wait_schedule(u32 token); void kvm_async_pf_task_wait_schedule(u32 token);
void kvm_async_pf_task_wake(u32 token); void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void); u32 kvm_read_and_reset_apf_flags(void);
void kvm_disable_steal_time(void); void kvm_disable_steal_time(void);
bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token); bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
...@@ -131,7 +131,7 @@ static inline unsigned int kvm_arch_para_hints(void) ...@@ -131,7 +131,7 @@ static inline unsigned int kvm_arch_para_hints(void)
return 0; return 0;
} }
static inline u32 kvm_read_and_reset_pf_reason(void) static inline u32 kvm_read_and_reset_apf_flags(void)
{ {
return 0; return 0;
} }
......
...@@ -112,8 +112,9 @@ struct kvm_mmu_op_release_pt { ...@@ -112,8 +112,9 @@ struct kvm_mmu_op_release_pt {
#define KVM_PV_REASON_PAGE_READY 2 #define KVM_PV_REASON_PAGE_READY 2
struct kvm_vcpu_pv_apf_data { struct kvm_vcpu_pv_apf_data {
__u32 reason; __u32 flags;
__u8 pad[60]; __u32 token; /* Used for page ready notification only */
__u8 pad[56];
__u32 enabled; __u32 enabled;
}; };
......
...@@ -218,23 +218,23 @@ void kvm_async_pf_task_wake(u32 token) ...@@ -218,23 +218,23 @@ void kvm_async_pf_task_wake(u32 token)
} }
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
u32 kvm_read_and_reset_pf_reason(void) u32 kvm_read_and_reset_apf_flags(void)
{ {
u32 reason = 0; u32 flags = 0;
if (__this_cpu_read(apf_reason.enabled)) { if (__this_cpu_read(apf_reason.enabled)) {
reason = __this_cpu_read(apf_reason.reason); flags = __this_cpu_read(apf_reason.flags);
__this_cpu_write(apf_reason.reason, 0); __this_cpu_write(apf_reason.flags, 0);
} }
return reason; return flags;
} }
EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason); NOKPROBE_SYMBOL(kvm_read_and_reset_apf_flags);
bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{ {
u32 reason = kvm_read_and_reset_pf_reason(); u32 reason = kvm_read_and_reset_apf_flags();
switch (reason) { switch (reason) {
case KVM_PV_REASON_PAGE_NOT_PRESENT: case KVM_PV_REASON_PAGE_NOT_PRESENT:
......
...@@ -4164,7 +4164,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, ...@@ -4164,7 +4164,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
#endif #endif
vcpu->arch.l1tf_flush_l1d = true; vcpu->arch.l1tf_flush_l1d = true;
switch (vcpu->arch.apf.host_apf_reason) { switch (vcpu->arch.apf.host_apf_flags) {
default: default:
trace_kvm_page_fault(fault_address, error_code); trace_kvm_page_fault(fault_address, error_code);
...@@ -4174,13 +4174,13 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, ...@@ -4174,13 +4174,13 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
insn_len); insn_len);
break; break;
case KVM_PV_REASON_PAGE_NOT_PRESENT: case KVM_PV_REASON_PAGE_NOT_PRESENT:
vcpu->arch.apf.host_apf_reason = 0; vcpu->arch.apf.host_apf_flags = 0;
local_irq_disable(); local_irq_disable();
kvm_async_pf_task_wait_schedule(fault_address); kvm_async_pf_task_wait_schedule(fault_address);
local_irq_enable(); local_irq_enable();
break; break;
case KVM_PV_REASON_PAGE_READY: case KVM_PV_REASON_PAGE_READY:
vcpu->arch.apf.host_apf_reason = 0; vcpu->arch.apf.host_apf_flags = 0;
local_irq_disable(); local_irq_disable();
kvm_async_pf_task_wake(fault_address); kvm_async_pf_task_wake(fault_address);
local_irq_enable(); local_irq_enable();
......
...@@ -921,7 +921,7 @@ int nested_svm_exit_special(struct vcpu_svm *svm) ...@@ -921,7 +921,7 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits) if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits)
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
svm->vcpu.arch.apf.host_apf_reason) svm->vcpu.arch.apf.host_apf_flags)
/* Trap async PF even if not shadowing */ /* Trap async PF even if not shadowing */
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
break; break;
......
...@@ -3459,7 +3459,8 @@ static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3459,7 +3459,8 @@ static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
/* if exit due to PF check for async PF */ /* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); svm->vcpu.arch.apf.host_apf_flags =
kvm_read_and_reset_apf_flags();
if (npt_enabled) { if (npt_enabled) {
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
......
...@@ -5652,7 +5652,7 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) ...@@ -5652,7 +5652,7 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
if (is_nmi(intr_info)) if (is_nmi(intr_info))
return true; return true;
else if (is_page_fault(intr_info)) else if (is_page_fault(intr_info))
return vcpu->arch.apf.host_apf_reason || !enable_ept; return vcpu->arch.apf.host_apf_flags || !enable_ept;
else if (is_debug(intr_info) && else if (is_debug(intr_info) &&
vcpu->guest_debug & vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
......
...@@ -4765,7 +4765,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) ...@@ -4765,7 +4765,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
if (is_page_fault(intr_info)) { if (is_page_fault(intr_info)) {
cr2 = vmx_get_exit_qual(vcpu); cr2 = vmx_get_exit_qual(vcpu);
/* EPT won't cause page fault directly */ /* EPT won't cause page fault directly */
WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); WARN_ON_ONCE(!vcpu->arch.apf.host_apf_flags && enable_ept);
return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
} }
...@@ -6360,7 +6360,7 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) ...@@ -6360,7 +6360,7 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
/* if exit due to PF check for async PF */ /* if exit due to PF check for async PF */
if (is_page_fault(intr_info)) { if (is_page_fault(intr_info)) {
vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
/* Handle machine checks before interrupts are enabled */ /* Handle machine checks before interrupts are enabled */
} else if (is_machine_check(intr_info)) { } else if (is_machine_check(intr_info)) {
kvm_machine_check(); kvm_machine_check();
......
...@@ -2690,7 +2690,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) ...@@ -2690,7 +2690,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
} }
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
sizeof(u32))) sizeof(u64)))
return 1; return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
...@@ -10420,8 +10420,17 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) ...@@ -10420,8 +10420,17 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
} }
} }
static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
{ {
u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
sizeof(reason));
}
static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
{
u64 val = (u64)token << 32 | KVM_PV_REASON_PAGE_READY;
return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
sizeof(val)); sizeof(val));
...@@ -10466,7 +10475,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, ...@@ -10466,7 +10475,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
kvm_add_async_pf_gfn(vcpu, work->arch.gfn); kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
if (kvm_can_deliver_async_pf(vcpu) && if (kvm_can_deliver_async_pf(vcpu) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { !apf_put_user_notpresent(vcpu)) {
fault.vector = PF_VECTOR; fault.vector = PF_VECTOR;
fault.error_code_valid = true; fault.error_code_valid = true;
fault.error_code = 0; fault.error_code = 0;
...@@ -10499,7 +10508,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, ...@@ -10499,7 +10508,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED && if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { !apf_put_user_ready(vcpu, work->arch.token)) {
fault.vector = PF_VECTOR; fault.vector = PF_VECTOR;
fault.error_code_valid = true; fault.error_code_valid = true;
fault.error_code = 0; fault.error_code = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment