Commit c4f61203 authored by Cai Huoqing's avatar Cai Huoqing Committed by Daniel Vetter

drm/i915: Use direction definition DMA_BIDIRECTIONAL instead of PCI_DMA_BIDIRECTIONAL

Replace direction definition PCI_DMA_BIDIRECTIONAL
with DMA_BIDIRECTIONAL, because it helps to enhance readability
and avoid possible inconsistency.
Signed-off-by: default avatarCai Huoqing <caihuoqing@baidu.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210925124613.144-1-caihuoqing@baidu.com
parent 239f3c2e
...@@ -32,7 +32,7 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem) ...@@ -32,7 +32,7 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
mem->remap_addr = dma_map_resource(i915->drm.dev, mem->remap_addr = dma_map_resource(i915->drm.dev,
mem->region.start, mem->region.start,
mem->fake_mappable.size, mem->fake_mappable.size,
PCI_DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL,
DMA_ATTR_FORCE_CONTIGUOUS); DMA_ATTR_FORCE_CONTIGUOUS);
if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) { if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
drm_mm_remove_node(&mem->fake_mappable); drm_mm_remove_node(&mem->fake_mappable);
...@@ -62,7 +62,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem) ...@@ -62,7 +62,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
dma_unmap_resource(mem->i915->drm.dev, dma_unmap_resource(mem->i915->drm.dev,
mem->remap_addr, mem->remap_addr,
mem->fake_mappable.size, mem->fake_mappable.size,
PCI_DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL,
DMA_ATTR_FORCE_CONTIGUOUS); DMA_ATTR_FORCE_CONTIGUOUS);
} }
......
...@@ -745,7 +745,7 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) ...@@ -745,7 +745,7 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
...@@ -849,7 +849,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( ...@@ -849,7 +849,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
*/ */
spt->shadow_page.type = type; spt->shadow_page.type = type;
daddr = dma_map_page(kdev, spt->shadow_page.page, daddr = dma_map_page(kdev, spt->shadow_page.page,
0, 4096, PCI_DMA_BIDIRECTIONAL); 0, 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, daddr)) { if (dma_mapping_error(kdev, daddr)) {
gvt_vgpu_err("fail to map dma addr\n"); gvt_vgpu_err("fail to map dma addr\n");
ret = -EINVAL; ret = -EINVAL;
...@@ -865,7 +865,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( ...@@ -865,7 +865,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
return spt; return spt;
err_unmap_dma: err_unmap_dma:
dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL);
err_free_spt: err_free_spt:
free_spt(spt); free_spt(spt);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -2409,8 +2409,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, ...@@ -2409,8 +2409,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
return -ENOMEM; return -ENOMEM;
} }
daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL);
4096, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) { if (dma_mapping_error(dev, daddr)) {
gvt_vgpu_err("fail to dmamap scratch_pt\n"); gvt_vgpu_err("fail to dmamap scratch_pt\n");
__free_page(virt_to_page(scratch_pt)); __free_page(virt_to_page(scratch_pt));
...@@ -2461,7 +2460,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu) ...@@ -2461,7 +2460,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu)
if (vgpu->gtt.scratch_pt[i].page != NULL) { if (vgpu->gtt.scratch_pt[i].page != NULL) {
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
I915_GTT_PAGE_SHIFT); I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page); __free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL; vgpu->gtt.scratch_pt[i].page = NULL;
vgpu->gtt.scratch_pt[i].page_mfn = 0; vgpu->gtt.scratch_pt[i].page_mfn = 0;
...@@ -2741,7 +2740,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2741,7 +2740,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
} }
daddr = dma_map_page(dev, virt_to_page(page), 0, daddr = dma_map_page(dev, virt_to_page(page), 0,
4096, PCI_DMA_BIDIRECTIONAL); 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) { if (dma_mapping_error(dev, daddr)) {
gvt_err("fail to dmamap scratch ggtt page\n"); gvt_err("fail to dmamap scratch ggtt page\n");
__free_page(virt_to_page(page)); __free_page(virt_to_page(page));
...@@ -2755,7 +2754,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2755,7 +2754,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
ret = setup_spt_oos(gvt); ret = setup_spt_oos(gvt);
if (ret) { if (ret) {
gvt_err("fail to initialize SPT oos\n"); gvt_err("fail to initialize SPT oos\n");
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_page); __free_page(gvt->gtt.scratch_page);
return ret; return ret;
} }
...@@ -2779,7 +2778,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) ...@@ -2779,7 +2778,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT); I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_page); __free_page(gvt->gtt.scratch_page);
......
...@@ -328,7 +328,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -328,7 +328,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
return ret; return ret;
/* Setup DMA mapping. */ /* Setup DMA mapping. */
*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL); *dma_addr = dma_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr)) { if (dma_mapping_error(dev, *dma_addr)) {
gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n", gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
page_to_pfn(page), ret); page_to_pfn(page), ret);
...@@ -344,7 +344,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -344,7 +344,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
{ {
struct device *dev = vgpu->gvt->gt->i915->drm.dev; struct device *dev = vgpu->gvt->gt->i915->drm.dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size); gvt_unpin_guest_page(vgpu, gfn, size);
} }
......
...@@ -30,7 +30,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, ...@@ -30,7 +30,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
do { do {
if (dma_map_sg_attrs(obj->base.dev->dev, if (dma_map_sg_attrs(obj->base.dev->dev,
pages->sgl, pages->nents, pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_SKIP_CPU_SYNC |
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_KERNEL_MAPPING |
DMA_ATTR_NO_WARN)) DMA_ATTR_NO_WARN))
...@@ -64,7 +64,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, ...@@ -64,7 +64,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
usleep_range(100, 250); usleep_range(100, 250);
dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents, dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment