Commit 95925b95 authored by Tina Zhang's avatar Tina Zhang Committed by Rodrigo Vivi

drm/i915/gvt: Use KVM r/w to access guest opregion

For KVMGT, the guest opregion, which is handled by VFIO, is actually a
piece of guest memory which won't be accessed by devices. So, its mfn
shouldn't be obtained through VFIO interface. This patch uses KVM r/w
interface to access the data in guest opregion.

Fix the guest opregion accessing issue when host "intel_iommu=on".

v3:
- Remove mapped flag for KVM/VFIO usage, as it's useless for KVM.

v2:
- Set the gpa correctly when invoking KVM r/w operations. (Zhenyu)
Signed-off-by: default avatarTina Zhang <tina.zhang@intel.com>
Cc: Yan Zhao <yan.y.zhao@intel.com>
Cc: Xiong Zhang <xiong.y.zhang@intel.com>
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent d480b28a
...@@ -126,7 +126,6 @@ struct intel_vgpu_irq { ...@@ -126,7 +126,6 @@ struct intel_vgpu_irq {
struct intel_vgpu_opregion { struct intel_vgpu_opregion {
bool mapped; bool mapped;
void *va; void *va;
void *va_gopregion;
u32 gfn[INTEL_GVT_OPREGION_PAGES]; u32 gfn[INTEL_GVT_OPREGION_PAGES];
}; };
......
...@@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) ...@@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{ {
int i, ret = 0; int i, ret = 0;
unsigned long pfn;
gvt_dbg_core("emulate opregion from kernel\n"); gvt_dbg_core("emulate opregion from kernel\n");
switch (intel_gvt_host.hypervisor_type) { switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_KVM: case INTEL_GVT_HYPERVISOR_KVM:
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT); for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT, vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
INTEL_GVT_OPREGION_SIZE,
MEMREMAP_WB);
if (!vgpu_opregion(vgpu)->va_gopregion) {
gvt_vgpu_err("failed to map guest opregion\n");
ret = -EFAULT;
}
vgpu_opregion(vgpu)->mapped = true;
break; break;
case INTEL_GVT_HYPERVISOR_XEN: case INTEL_GVT_HYPERVISOR_XEN:
/** /**
...@@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) ...@@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (vgpu_opregion(vgpu)->mapped) if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false); map_vgpu_opregion(vgpu, false);
} else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
if (vgpu_opregion(vgpu)->mapped) { /* Guest opregion is released by VFIO */
memunmap(vgpu_opregion(vgpu)->va_gopregion);
vgpu_opregion(vgpu)->va_gopregion = NULL;
}
} }
free_pages((unsigned long)vgpu_opregion(vgpu)->va, free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE)); get_order(INTEL_GVT_OPREGION_SIZE));
...@@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic) ...@@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic)
*/ */
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
{ {
u32 *scic, *parm; u32 scic, parm;
u32 func, subfunc; u32 func, subfunc;
u64 scic_pa = 0, parm_pa = 0;
int ret;
switch (intel_gvt_host.hypervisor_type) { switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN: case INTEL_GVT_HYPERVISOR_XEN:
scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC; scic = *((u32 *)vgpu_opregion(vgpu)->va +
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; INTEL_GVT_OPREGION_SCIC);
parm = *((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_PARM);
break; break;
case INTEL_GVT_HYPERVISOR_KVM: case INTEL_GVT_HYPERVISOR_KVM:
scic = vgpu_opregion(vgpu)->va_gopregion + scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_SCIC; INTEL_GVT_OPREGION_SCIC;
parm = vgpu_opregion(vgpu)->va_gopregion + parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_PARM; INTEL_GVT_OPREGION_PARM;
ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
&scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
&parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
break; break;
default: default:
gvt_vgpu_err("not supported hypervisor\n"); gvt_vgpu_err("not supported hypervisor\n");
...@@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) ...@@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
return 0; return 0;
} }
func = GVT_OPREGION_FUNC(*scic); func = GVT_OPREGION_FUNC(scic);
subfunc = GVT_OPREGION_SUBFUNC(*scic); subfunc = GVT_OPREGION_SUBFUNC(scic);
if (!querying_capabilities(*scic)) { if (!querying_capabilities(scic)) {
gvt_vgpu_err("requesting runtime service: func \"%s\"," gvt_vgpu_err("requesting runtime service: func \"%s\","
" subfunc \"%s\"\n", " subfunc \"%s\"\n",
opregion_func_name(func), opregion_func_name(func),
...@@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) ...@@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
* emulate exit status of function call, '0' means * emulate exit status of function call, '0' means
* "failure, generic, unsupported or unknown cause" * "failure, generic, unsupported or unknown cause"
*/ */
*scic &= ~OPREGION_SCIC_EXIT_MASK; scic &= ~OPREGION_SCIC_EXIT_MASK;
return 0; goto out;
}
scic = 0;
parm = 0;
out:
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN:
*((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_SCIC) = scic;
*((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_PARM) = parm;
break;
case INTEL_GVT_HYPERVISOR_KVM:
ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
&scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
&parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
break;
default:
gvt_vgpu_err("not supported hypervisor\n");
return -EINVAL;
} }
*scic = 0;
*parm = 0;
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment