Commit 69d413cf authored by David Woodhouse's avatar David Woodhouse Committed by Paolo Bonzini

KVM: x86/xen: Use gfn_to_pfn_cache for vcpu_time_info

This switches the final pvclock to kvm_setup_pvclock_pfncache() and now
the old kvm_setup_pvclock_page() can be removed.
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220303154127.202856-7-dwmw2@infradead.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7caf9571
...@@ -606,9 +606,8 @@ struct kvm_vcpu_hv { ...@@ -606,9 +606,8 @@ struct kvm_vcpu_hv {
struct kvm_vcpu_xen { struct kvm_vcpu_xen {
u64 hypercall_rip; u64 hypercall_rip;
u32 current_runstate; u32 current_runstate;
bool vcpu_time_info_set;
struct gfn_to_pfn_cache vcpu_info_cache; struct gfn_to_pfn_cache vcpu_info_cache;
struct gfn_to_hva_cache vcpu_time_info_cache; struct gfn_to_pfn_cache vcpu_time_info_cache;
struct gfn_to_pfn_cache runstate_cache; struct gfn_to_pfn_cache runstate_cache;
u64 last_steal; u64 last_steal;
u64 runstate_entry_time; u64 runstate_entry_time;
......
...@@ -3011,65 +3011,6 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, ...@@ -3011,65 +3011,6 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
} }
static void kvm_setup_pvclock_page(struct kvm_vcpu *v,
struct gfn_to_hva_cache *cache,
unsigned int offset)
{
struct kvm_vcpu_arch *vcpu = &v->arch;
struct pvclock_vcpu_time_info guest_hv_clock;
if (unlikely(kvm_read_guest_offset_cached(v->kvm, cache,
&guest_hv_clock, offset, sizeof(guest_hv_clock))))
return;
/* This VCPU is paused, but it's legal for a guest to read another
* VCPU's kvmclock, so we really have to follow the specification where
* it says that version is odd if data is being modified, and even after
* it is consistent.
*
* Version field updates must be kept separate. This is because
* kvm_write_guest_cached might use a "rep movs" instruction, and
* writes within a string instruction are weakly ordered. So there
* are three writes overall.
*
* As a small optimization, only write the version field in the first
* and third write. The vcpu->pv_time cache is still valid, because the
* version field is the first in the struct.
*/
BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
if (guest_hv_clock.version & 1)
++guest_hv_clock.version; /* first time write, random junk */
vcpu->hv_clock.version = guest_hv_clock.version + 1;
kvm_write_guest_offset_cached(v->kvm, cache,
&vcpu->hv_clock, offset,
sizeof(vcpu->hv_clock.version));
smp_wmb();
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
vcpu->pvclock_set_guest_stopped_request = false;
}
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
kvm_write_guest_offset_cached(v->kvm, cache,
&vcpu->hv_clock, offset,
sizeof(vcpu->hv_clock));
smp_wmb();
vcpu->hv_clock.version++;
kvm_write_guest_offset_cached(v->kvm, cache,
&vcpu->hv_clock, offset,
sizeof(vcpu->hv_clock.version));
}
static int kvm_guest_time_update(struct kvm_vcpu *v) static int kvm_guest_time_update(struct kvm_vcpu *v)
{ {
unsigned long flags, tgt_tsc_khz; unsigned long flags, tgt_tsc_khz;
...@@ -3161,8 +3102,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -3161,8 +3102,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
if (vcpu->xen.vcpu_info_cache.active) if (vcpu->xen.vcpu_info_cache.active)
kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
offsetof(struct compat_vcpu_info, time)); offsetof(struct compat_vcpu_info, time));
if (vcpu->xen.vcpu_time_info_set) if (vcpu->xen.vcpu_time_info_cache.active)
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0); kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0);
if (!v->vcpu_idx) if (!v->vcpu_idx)
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0; return 0;
......
...@@ -463,25 +463,18 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) ...@@ -463,25 +463,18 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
if (data->u.gpa == GPA_INVALID) { if (data->u.gpa == GPA_INVALID) {
vcpu->arch.xen.vcpu_time_info_set = false; kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
&vcpu->arch.xen.vcpu_time_info_cache);
r = 0; r = 0;
break; break;
} }
/* It must fit within a single page */ r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
r = -EINVAL;
break;
}
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.vcpu_time_info_cache, &vcpu->arch.xen.vcpu_time_info_cache,
data->u.gpa, NULL, KVM_HOST_USES_PFN, data->u.gpa,
sizeof(struct pvclock_vcpu_time_info)); sizeof(struct pvclock_vcpu_time_info));
if (!r) { if (!r)
vcpu->arch.xen.vcpu_time_info_set = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
}
break; break;
case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
...@@ -622,7 +615,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) ...@@ -622,7 +615,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break; break;
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
if (vcpu->arch.xen.vcpu_time_info_set) if (vcpu->arch.xen.vcpu_time_info_cache.active)
data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
else else
data->u.gpa = GPA_INVALID; data->u.gpa = GPA_INVALID;
...@@ -1066,4 +1059,6 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) ...@@ -1066,4 +1059,6 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
&vcpu->arch.xen.runstate_cache); &vcpu->arch.xen.runstate_cache);
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
&vcpu->arch.xen.vcpu_info_cache); &vcpu->arch.xen.vcpu_info_cache);
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
&vcpu->arch.xen.vcpu_time_info_cache);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment