Commit 36774806 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Zhi Wang

drm/i915/gvt: remove enum hypervisor_type

The only supported hypervisor is KVM, so don't bother with dead code
enumerating hypervisors.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-3-hch@lst.deReviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent a85749e1
......@@ -41,11 +41,6 @@
struct intel_gvt_host intel_gvt_host;
static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_XEN] = "XEN",
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
......@@ -304,23 +299,13 @@ intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
if (!intel_gvt_host.initialized)
return -ENODEV;
if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
m->type != INTEL_GVT_HYPERVISOR_XEN)
return -EINVAL;
intel_gvt_host.mpt = m;
intel_gvt_host.hypervisor_type = m->type;
gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
&intel_gvt_ops);
if (ret < 0) {
gvt_err("Failed to init %s hypervisor module\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
if (ret < 0)
return -ENODEV;
}
gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
return 0;
}
EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
......
......@@ -60,7 +60,6 @@
struct intel_gvt_host {
struct device *dev;
bool initialized;
int hypervisor_type;
const struct intel_gvt_mpt *mpt;
};
......
......@@ -37,17 +37,11 @@
struct device;
enum hypervisor_type {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
};
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
......
......@@ -2221,7 +2221,6 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
}
static const struct intel_gvt_mpt kvmgt_mpt = {
.type = INTEL_GVT_HYPERVISOR_KVM,
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu,
......
......@@ -255,33 +255,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
return 0;
}
static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
{
u64 mfn;
int i, ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("fail to get MFN from VA\n");
return -EINVAL;
}
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map);
if (ret) {
gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
ret);
return ret;
}
}
vgpu_opregion(vgpu)->mapped = map;
return 0;
}
/**
* intel_vgpu_opregion_base_write_handler - Opregion base register write handler
*
......@@ -294,34 +267,13 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{
int i, ret = 0;
int i;
gvt_dbg_core("emulate opregion from kernel\n");
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_KVM:
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
break;
case INTEL_GVT_HYPERVISOR_XEN:
/**
* Wins guest on Xengt will write this register twice: xen
* hvmloader and windows graphic driver.
*/
if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
ret = map_vgpu_opregion(vgpu, true);
break;
default:
ret = -EINVAL;
gvt_vgpu_err("not supported hypervisor\n");
}
return ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
return 0;
}
/**
......@@ -336,12 +288,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (!vgpu_opregion(vgpu)->va)
return;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false);
} else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
/* Guest opregion is released by VFIO */
}
/* Guest opregion is released by VFIO */
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE));
......@@ -470,39 +417,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
u64 scic_pa = 0, parm_pa = 0;
int ret;
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN:
scic = *((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_SCIC);
parm = *((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_PARM);
break;
case INTEL_GVT_HYPERVISOR_KVM:
scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_SCIC;
parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_PARM;
ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
&scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
&parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_SCIC;
parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_PARM;
ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, &scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
break;
default:
gvt_vgpu_err("not supported hypervisor\n");
return -EINVAL;
ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, &parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
if (!(swsci & SWSCI_SCI_SELECT)) {
......@@ -535,34 +465,20 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
parm = 0;
out:
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN:
*((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_SCIC) = scic;
*((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_PARM) = parm;
break;
case INTEL_GVT_HYPERVISOR_KVM:
ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
&scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
&parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, &scic,
sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
break;
default:
gvt_vgpu_err("not supported hypervisor\n");
return -EINVAL;
ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, &parm,
sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment