Commit cc753fbe authored by Hang Yuan's avatar Hang Yuan Committed by Rodrigo Vivi

drm/i915/gvt: validate gfn before set shadow page entry

GVT may receive partial write on one guest PTE update. Validate gfn
not to translate incomplete gfn. This avoids some unnecessary error
messages incurred by the incomplete gfn translating. Also fix the
bug that the whole PPGTT shadow page update is aborted on any invalid
gfn entry.

gfn validation relys on hypervisor's help. Add one MPT module function
to provide the function.
Signed-off-by: default avatarHang Yuan <hang.yuan@intel.com>
Reviewed-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 03fa9350
...@@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, ...@@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{ {
struct intel_vgpu *vgpu = spt->vgpu; struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s; struct intel_vgpu_ppgtt_spt *s;
struct intel_gvt_gtt_entry se, ge; struct intel_gvt_gtt_entry se, ge;
unsigned long i; unsigned long gfn, i;
int ret; int ret;
trace_spt_change(spt->vgpu->id, "born", spt, trace_spt_change(spt->vgpu->id, "born", spt,
...@@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) ...@@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
if (gtt_type_is_pte_pt(spt->shadow_page.type)) { if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
for_each_present_guest_entry(spt, &ge, i) { for_each_present_guest_entry(spt, &ge, i) {
ret = gtt_entry_p2m(vgpu, &ge, &se); gfn = ops->get_pfn(&ge);
if (ret) if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
goto fail; gtt_entry_p2m(vgpu, &ge, &se))
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
ppgtt_set_shadow_entry(spt, &se, i); ppgtt_set_shadow_entry(spt, &se, i);
} }
return 0; return 0;
...@@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma; unsigned long gma, gfn;
struct intel_gvt_gtt_entry e, m; struct intel_gvt_gtt_entry e, m;
int ret; int ret;
...@@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
bytes); bytes);
if (ops->test_present(&e)) { if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
*/
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
goto out;
}
ret = gtt_entry_p2m(vgpu, &e, &m); ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) { if (ret) {
gvt_vgpu_err("fail to translate guest gtt entry\n"); gvt_vgpu_err("fail to translate guest gtt entry\n");
...@@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->set_pfn(&m, gvt->gtt.scratch_mfn); ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} }
out:
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
gtt_invalidate(gvt->dev_priv); gtt_invalidate(gvt->dev_priv);
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
......
...@@ -58,6 +58,7 @@ struct intel_gvt_mpt { ...@@ -58,6 +58,7 @@ struct intel_gvt_mpt {
int (*set_opregion)(void *vgpu); int (*set_opregion)(void *vgpu);
int (*get_vfio_device)(void *vgpu); int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu); void (*put_vfio_device)(void *vgpu);
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
}; };
extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt xengt_mpt;
......
...@@ -1570,6 +1570,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr) ...@@ -1570,6 +1570,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
return PFN_DOWN(__pa(addr)); return PFN_DOWN(__pa(addr));
} }
static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
if (!handle_valid(handle))
return false;
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
return kvm_is_visible_gfn(kvm, gfn);
}
struct intel_gvt_mpt kvmgt_mpt = { struct intel_gvt_mpt kvmgt_mpt = {
.host_init = kvmgt_host_init, .host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit, .host_exit = kvmgt_host_exit,
...@@ -1585,6 +1600,7 @@ struct intel_gvt_mpt kvmgt_mpt = { ...@@ -1585,6 +1600,7 @@ struct intel_gvt_mpt kvmgt_mpt = {
.set_opregion = kvmgt_set_opregion, .set_opregion = kvmgt_set_opregion,
.get_vfio_device = kvmgt_get_vfio_device, .get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device, .put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn,
}; };
EXPORT_SYMBOL_GPL(kvmgt_mpt); EXPORT_SYMBOL_GPL(kvmgt_mpt);
......
...@@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu) ...@@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
intel_gvt_host.mpt->put_vfio_device(vgpu); intel_gvt_host.mpt->put_vfio_device(vgpu);
} }
/**
* intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
* @vgpu: a vGPU
* @gfn: guest PFN
*
* Returns:
* true on valid gfn, false on not.
*/
static inline bool intel_gvt_hypervisor_is_valid_gfn(
struct intel_vgpu *vgpu, unsigned long gfn)
{
if (!intel_gvt_host.mpt->is_valid_gfn)
return true;
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
}
#endif /* _GVT_MPT_H_ */ #endif /* _GVT_MPT_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment