Commit 62980cac authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Zhi Wang

drm/i915/gvt: merge struct kvmgt_vdev into struct intel_vgpu

Move towards having only a single structure for the per-VGPU state.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-11-hch@lst.deReviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 3cbac24c
......@@ -207,21 +207,36 @@ struct intel_vgpu {
struct dentry *debugfs;
/* Hypervisor-specific device state. */
void *vdev;
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
struct intel_vgpu_vblank_timer vblank_timer;
u32 scan_nonprivbb;
};
static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
{
return vgpu->vdev;
}
struct mdev_device *mdev;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
/*
* Two caches are used to avoid mapping duplicated pages (eg.
* scratch pages). This help to reduce dma setup overhead.
*/
struct rb_root gfn_cache;
struct rb_root dma_addr_cache;
unsigned long nr_cache_entries;
struct mutex cache_lock;
struct notifier_block iommu_notifier;
struct notifier_block group_notifier;
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
struct vfio_device *vfio_device;
struct vfio_group *vfio_group;
};
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
......
......@@ -44,7 +44,6 @@ struct device;
struct intel_gvt_mpt {
int (*host_init)(struct device *dev, void *gvt);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
int (*enable_page_track)(unsigned long handle, u64 gfn);
......
This diff is collapsed.
......@@ -71,22 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
intel_gvt_host.mpt->host_exit(dev, gvt);
}
/**
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
* related stuffs inside hypervisor.
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
{
/* optional to provide */
if (!intel_gvt_host.mpt->attach_vgpu)
return 0;
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
}
/**
* intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
* related stuffs inside hypervisor.
......
......@@ -405,13 +405,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
populate_pvinfo_page(vgpu);
ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
if (ret)
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_gtt(vgpu);
if (ret)
goto out_detach_hypervisor_vgpu;
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_opregion(vgpu);
if (ret)
......@@ -454,8 +450,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment