Commit 8398eee8 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Zhi Wang

drm/i915/gvt: devirtualize ->dma_{,un}map_guest_page

Just call the functions directly.  Also remove a pointless wrapper.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-22-hch@lst.deReviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 4c2baaaf
...@@ -54,12 +54,6 @@ static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, ...@@ -54,12 +54,6 @@ static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
return ret; return ret;
} }
static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
}
static int vgpu_gem_get_pages( static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
...@@ -114,7 +108,7 @@ static int vgpu_gem_get_pages( ...@@ -114,7 +108,7 @@ static int vgpu_gem_get_pages(
for_each_sg(st->sgl, sg, i, j) { for_each_sg(st->sgl, sg, i, j) {
dma_addr = sg_dma_address(sg); dma_addr = sg_dma_address(sg);
if (dma_addr) if (dma_addr)
vgpu_unpin_dma_address(vgpu, dma_addr); intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
} }
sg_free_table(st); sg_free_table(st);
kfree(st); kfree(st);
...@@ -136,7 +130,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, ...@@ -136,7 +130,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
int i; int i;
for_each_sg(pages->sgl, sg, fb_info->size, i) for_each_sg(pages->sgl, sg, fb_info->size, i)
vgpu_unpin_dma_address(vgpu, intel_gvt_dma_unmap_guest_page(vgpu,
sg_dma_address(sg)); sg_dma_address(sg));
} }
......
...@@ -1013,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, ...@@ -1013,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return; return;
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
} }
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
...@@ -1212,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ...@@ -1212,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
return PTR_ERR(sub_spt); return PTR_ERR(sub_spt);
for_each_shadow_entry(sub_spt, &sub_se, sub_index) { for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
start_gfn + sub_index, PAGE_SIZE, &dma_addr); PAGE_SIZE, &dma_addr);
if (ret) { if (ret) {
ppgtt_invalidate_spt(spt); ppgtt_invalidate_spt(spt);
return ret; return ret;
...@@ -1258,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, ...@@ -1258,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_64k_splited(&entry); ops->set_64k_splited(&entry);
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
start_gfn + i, PAGE_SIZE, &dma_addr); PAGE_SIZE, &dma_addr);
if (ret) if (ret)
return ret; return ret;
...@@ -1313,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, ...@@ -1313,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
} }
/* direct shadow */ /* direct shadow */
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
&dma_addr);
if (ret) if (ret)
return -ENXIO; return -ENXIO;
...@@ -2245,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, ...@@ -2245,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
pfn = pte_ops->get_pfn(entry); pfn = pte_ops->get_pfn(entry);
if (pfn != vgpu->gvt->gtt.scratch_mfn) if (pfn != vgpu->gvt->gtt.scratch_mfn)
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
pfn << PAGE_SHIFT);
} }
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
...@@ -2337,8 +2335,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -2337,8 +2335,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
goto out; goto out;
} }
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
PAGE_SIZE, &dma_addr); &dma_addr);
if (ret) { if (ret) {
gvt_vgpu_err("fail to populate guest ggtt entry\n"); gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial /* guest driver may read/write the entry when partial
......
...@@ -767,6 +767,10 @@ void intel_gvt_debugfs_clean(struct intel_gvt *gvt); ...@@ -767,6 +767,10 @@ void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn);
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn);
int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr);
void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
dma_addr_t dma_addr);
#include "trace.h" #include "trace.h"
#include "mpt.h" #include "mpt.h"
......
...@@ -46,11 +46,6 @@ struct intel_gvt_mpt { ...@@ -46,11 +46,6 @@ struct intel_gvt_mpt {
int (*host_init)(struct device *dev, void *gvt); int (*host_init)(struct device *dev, void *gvt);
void (*host_exit)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt);
int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(struct intel_vgpu *vgpu,
dma_addr_t dma_addr);
int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
}; };
......
...@@ -1874,7 +1874,7 @@ void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) ...@@ -1874,7 +1874,7 @@ void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
vgpu->region = NULL; vgpu->region = NULL;
} }
static int kvmgt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr) unsigned long size, dma_addr_t *dma_addr)
{ {
struct gvt_dma *entry; struct gvt_dma *entry;
...@@ -1950,7 +1950,7 @@ static void __gvt_dma_release(struct kref *ref) ...@@ -1950,7 +1950,7 @@ static void __gvt_dma_release(struct kref *ref)
__gvt_cache_remove_entry(entry->vgpu, entry); __gvt_cache_remove_entry(entry->vgpu, entry);
} }
static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
dma_addr_t dma_addr) dma_addr_t dma_addr)
{ {
struct gvt_dma *entry; struct gvt_dma *entry;
...@@ -1968,8 +1968,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, ...@@ -1968,8 +1968,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
static const struct intel_gvt_mpt kvmgt_mpt = { static const struct intel_gvt_mpt kvmgt_mpt = {
.host_init = kvmgt_host_init, .host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit, .host_exit = kvmgt_host_exit,
.dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
.dma_pin_guest_page = kvmgt_dma_pin_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
}; };
......
...@@ -71,35 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) ...@@ -71,35 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
intel_gvt_host.mpt->host_exit(dev, gvt); intel_gvt_host.mpt->host_exit(dev, gvt);
} }
/**
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
* @vgpu: a vGPU
* @gfn: guest pfn
* @size: page size
* @dma_addr: retrieve allocated dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_dma_map_guest_page(
struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
dma_addr_t *dma_addr)
{
return intel_gvt_host.mpt->dma_map_guest_page(vgpu, gfn, size,
dma_addr);
}
/**
* intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
* @vgpu: a vGPU
* @dma_addr: the mapped dma addr
*/
static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
struct intel_vgpu *vgpu, dma_addr_t dma_addr)
{
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu, dma_addr);
}
/** /**
* intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
* @vgpu: a vGPU * @vgpu: a vGPU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment