Commit a2843b3b authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Limit lmem scatterlist elements to UINT_MAX

Adhere to the i915_sg_max_segment() limit on the lengths of individual
scatterlist elements, and in doing so split up very large chunks of lmem
into manageable pieces for the dma-mapping backend.
Reported-by: default avatarVenkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Suggested-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Venkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201202173444.14903-1-chris@chris-wilson.co.uk
parent 840291a7
...@@ -22,6 +22,7 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj, ...@@ -22,6 +22,7 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
int int
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
{ {
const u64 max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = obj->mm.region; struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = &obj->mm.blocks; struct list_head *blocks = &obj->mm.blocks;
resource_size_t size = obj->base.size; resource_size_t size = obj->base.size;
...@@ -37,7 +38,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) ...@@ -37,7 +38,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
if (!st) if (!st)
return -ENOMEM; return -ENOMEM;
if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) { if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
kfree(st); kfree(st);
return -ENOMEM; return -ENOMEM;
} }
...@@ -64,27 +65,30 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) ...@@ -64,27 +65,30 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
i915_buddy_block_size(&mem->mm, block)); i915_buddy_block_size(&mem->mm, block));
offset = i915_buddy_block_offset(block); offset = i915_buddy_block_offset(block);
GEM_BUG_ON(overflows_type(block_size, sg->length)); while (block_size) {
u64 len;
if (offset != prev_end || if (offset != prev_end || sg->length >= max_segment) {
add_overflows_t(typeof(sg->length), sg->length, block_size)) { if (st->nents) {
if (st->nents) { sg_page_sizes |= sg->length;
sg_page_sizes |= sg->length; sg = __sg_next(sg);
sg = __sg_next(sg); }
sg_dma_address(sg) = mem->region.start + offset;
sg_dma_len(sg) = 0;
sg->length = 0;
st->nents++;
} }
sg_dma_address(sg) = mem->region.start + offset; len = min(block_size, max_segment - sg->length);
sg_dma_len(sg) = block_size; sg->length += len;
sg_dma_len(sg) += len;
sg->length = block_size; offset += len;
block_size -= len;
st->nents++; prev_end = offset;
} else {
sg->length += block_size;
sg_dma_len(sg) += block_size;
} }
prev_end = offset + block_size;
} }
sg_page_sizes |= sg->length; sg_page_sizes |= sg->length;
......
...@@ -129,6 +129,21 @@ static void igt_object_release(struct drm_i915_gem_object *obj) ...@@ -129,6 +129,21 @@ static void igt_object_release(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj); i915_gem_object_put(obj);
} }
static bool is_contiguous(struct drm_i915_gem_object *obj)
{
struct scatterlist *sg;
dma_addr_t addr = -1;
for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
if (addr != -1 && sg_dma_address(sg) != addr)
return false;
addr = sg_dma_address(sg) + sg_dma_len(sg);
}
return true;
}
static int igt_mock_contiguous(void *arg) static int igt_mock_contiguous(void *arg)
{ {
struct intel_memory_region *mem = arg; struct intel_memory_region *mem = arg;
...@@ -150,8 +165,8 @@ static int igt_mock_contiguous(void *arg) ...@@ -150,8 +165,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
if (obj->mm.pages->nents != 1) { if (!is_contiguous(obj)) {
pr_err("%s min object spans multiple sg entries\n", __func__); pr_err("%s min object spans disjoint sg entries\n", __func__);
err = -EINVAL; err = -EINVAL;
goto err_close_objects; goto err_close_objects;
} }
...@@ -163,8 +178,8 @@ static int igt_mock_contiguous(void *arg) ...@@ -163,8 +178,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
if (obj->mm.pages->nents != 1) { if (!is_contiguous(obj)) {
pr_err("%s max object spans multiple sg entries\n", __func__); pr_err("%s max object spans disjoint sg entries\n", __func__);
err = -EINVAL; err = -EINVAL;
goto err_close_objects; goto err_close_objects;
} }
...@@ -189,8 +204,8 @@ static int igt_mock_contiguous(void *arg) ...@@ -189,8 +204,8 @@ static int igt_mock_contiguous(void *arg)
goto err_close_objects; goto err_close_objects;
} }
if (obj->mm.pages->nents != 1) { if (!is_contiguous(obj)) {
pr_err("%s object spans multiple sg entries\n", __func__); pr_err("%s object spans disjoint sg entries\n", __func__);
err = -EINVAL; err = -EINVAL;
goto err_close_objects; goto err_close_objects;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment