Commit 90797e6d authored by Imre Deak's avatar Imre Deak Committed by Daniel Vetter

drm/i915: create compact dma scatter lists for gem objects

So far we created a sparse dma scatter list for gem objects, where each
scatter list entry represented only a single page. In the future we'll
have to handle compact scatter lists too where each entry can consist of
multiple pages, for example for objects imported through PRIME.

The previous patches have already fixed up all other places where the
i915 driver _walked_ these lists. Here we have the corresponding fix to
_create_ compact lists. It's not a performance or memory footprint
improvement, but it helps to better exercise the new logic.

Reference: http://www.spinics.net/lists/dri-devel/msg33917.htmlSigned-off-by: default avatarImre Deak <imre.deak@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 67d5a50c
...@@ -1625,9 +1625,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) ...@@ -1625,9 +1625,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
static void static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{ {
int page_count = obj->base.size / PAGE_SIZE; struct sg_page_iter sg_iter;
struct scatterlist *sg; int ret;
int ret, i;
BUG_ON(obj->madv == __I915_MADV_PURGED); BUG_ON(obj->madv == __I915_MADV_PURGED);
...@@ -1647,8 +1646,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1647,8 +1646,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED) if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0; obj->dirty = 0;
for_each_sg(obj->pages->sgl, sg, page_count, i) { for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page(sg); struct page *page = sg_iter.page;
if (obj->dirty) if (obj->dirty)
set_page_dirty(page); set_page_dirty(page);
...@@ -1749,7 +1748,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1749,7 +1748,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct address_space *mapping; struct address_space *mapping;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
struct sg_page_iter sg_iter;
struct page *page; struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
gfp_t gfp; gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it /* Assert that the object is not currently in any GPU domain. As it
...@@ -1779,7 +1780,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1779,7 +1780,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp = mapping_gfp_mask(mapping); gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT); gfp &= ~(__GFP_IO | __GFP_WAIT);
for_each_sg(st->sgl, sg, page_count, i) { sg = st->sgl;
st->nents = 0;
for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) { if (IS_ERR(page)) {
i915_gem_purge(dev_priv, page_count); i915_gem_purge(dev_priv, page_count);
...@@ -1802,9 +1805,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1802,9 +1805,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp &= ~(__GFP_IO | __GFP_WAIT); gfp &= ~(__GFP_IO | __GFP_WAIT);
} }
sg_set_page(sg, page, PAGE_SIZE, 0); if (!i || page_to_pfn(page) != last_pfn + 1) {
if (i)
sg = sg_next(sg);
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
} else {
sg->length += PAGE_SIZE;
}
last_pfn = page_to_pfn(page);
} }
sg_mark_end(sg);
obj->pages = st; obj->pages = st;
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
...@@ -1813,8 +1825,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1813,8 +1825,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
return 0; return 0;
err_pages: err_pages:
for_each_sg(st->sgl, sg, i, page_count) sg_mark_end(sg);
page_cache_release(sg_page(sg)); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
page_cache_release(sg_iter.page);
sg_free_table(st); sg_free_table(st);
kfree(st); kfree(st);
return PTR_ERR(page); return PTR_ERR(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment