Commit 67d5a50c authored by Imre Deak's avatar Imre Deak Committed by Daniel Vetter

drm/i915: handle walking compact dma scatter lists

So far the assumption was that each dma scatter list entry contains only
a single page. This might not hold in the future, when we'll introduce
compact scatter lists, so prepare for this everywhere in the i915 code
where we walk such a list.

We'll fix the place _creating_ these lists separately in the next patch
to help the reviewing/bisectability.

Reference: http://www.spinics.net/lists/dri-devel/msg33917.htmlSigned-off-by: default avatarImre Deak <imre.deak@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 5bd4687e
...@@ -1528,17 +1528,12 @@ void i915_gem_lastclose(struct drm_device *dev); ...@@ -1528,17 +1528,12 @@ void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{ {
struct scatterlist *sg = obj->pages->sgl; struct sg_page_iter sg_iter;
int nents = obj->pages->nents;
while (nents > SG_MAX_SINGLE_ALLOC) { for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
if (n < SG_MAX_SINGLE_ALLOC - 1) return sg_iter.page;
break;
return NULL;
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
n -= SG_MAX_SINGLE_ALLOC - 1;
nents -= SG_MAX_SINGLE_ALLOC - 1;
}
return sg_page(sg+n);
} }
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{ {
......
...@@ -411,8 +411,7 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -411,8 +411,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
int obj_do_bit17_swizzling, page_do_bit17_swizzling; int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int prefaulted = 0; int prefaulted = 0;
int needs_clflush = 0; int needs_clflush = 0;
struct scatterlist *sg; struct sg_page_iter sg_iter;
int i;
user_data = to_user_ptr(args->data_ptr); user_data = to_user_ptr(args->data_ptr);
remain = args->size; remain = args->size;
...@@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
offset = args->offset; offset = args->offset;
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
struct page *page; offset >> PAGE_SHIFT) {
struct page *page = sg_iter.page;
if (i < offset >> PAGE_SHIFT)
continue;
if (remain <= 0) if (remain <= 0)
break; break;
...@@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE) if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset; page_length = PAGE_SIZE - shmem_page_offset;
page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling && page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0; (page_to_phys(page) & (1 << 17)) != 0;
...@@ -732,8 +728,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -732,8 +728,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0; int hit_slowpath = 0;
int needs_clflush_after = 0; int needs_clflush_after = 0;
int needs_clflush_before = 0; int needs_clflush_before = 0;
int i; struct sg_page_iter sg_iter;
struct scatterlist *sg;
user_data = to_user_ptr(args->data_ptr); user_data = to_user_ptr(args->data_ptr);
remain = args->size; remain = args->size;
...@@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
offset = args->offset; offset = args->offset;
obj->dirty = 1; obj->dirty = 1;
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
struct page *page; offset >> PAGE_SHIFT) {
struct page *page = sg_iter.page;
int partial_cacheline_write; int partial_cacheline_write;
if (i < offset >> PAGE_SHIFT)
continue;
if (remain <= 0) if (remain <= 0)
break; break;
...@@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length) ((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1)); & (boot_cpu_data.x86_clflush_size - 1));
page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling && page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0; (page_to_phys(page) & (1 << 17)) != 0;
......
...@@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme ...@@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
src = obj->pages->sgl; src = obj->pages->sgl;
dst = st->sgl; dst = st->sgl;
for (i = 0; i < obj->pages->nents; i++) { for (i = 0; i < obj->pages->nents; i++) {
sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst); dst = sg_next(dst);
src = sg_next(src); src = sg_next(src);
} }
...@@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ...@@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{ {
struct drm_i915_gem_object *obj = dma_buf->priv; struct drm_i915_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct scatterlist *sg; struct sg_page_iter sg_iter;
struct page **pages; struct page **pages;
int ret, i; int ret, i;
...@@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ...@@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
ret = -ENOMEM; ret = -ENOMEM;
pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL) if (pages == NULL)
goto error; goto error;
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) i = 0;
pages[i] = sg_page(sg); for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
pages[i++] = sg_iter.page;
obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
drm_free_large(pages); drm_free_large(pages);
if (!obj->dma_buf_vmapping) if (!obj->dma_buf_vmapping)
......
...@@ -473,28 +473,29 @@ i915_gem_swizzle_page(struct page *page) ...@@ -473,28 +473,29 @@ i915_gem_swizzle_page(struct page *page)
void void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct scatterlist *sg; struct sg_page_iter sg_iter;
int page_count = obj->base.size >> PAGE_SHIFT;
int i; int i;
if (obj->bit_17 == NULL) if (obj->bit_17 == NULL)
return; return;
for_each_sg(obj->pages->sgl, sg, page_count, i) { i = 0;
struct page *page = sg_page(sg); for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_iter.page;
char new_bit_17 = page_to_phys(page) >> 17; char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) { (test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page); i915_gem_swizzle_page(page);
set_page_dirty(page); set_page_dirty(page);
} }
i++;
} }
} }
void void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct scatterlist *sg; struct sg_page_iter sg_iter;
int page_count = obj->base.size >> PAGE_SHIFT; int page_count = obj->base.size >> PAGE_SHIFT;
int i; int i;
...@@ -508,11 +509,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -508,11 +509,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
} }
} }
for_each_sg(obj->pages->sgl, sg, page_count, i) { i = 0;
struct page *page = sg_page(sg); for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
if (page_to_phys(page) & (1 << 17)) if (page_to_phys(sg_iter.page) & (1 << 17))
__set_bit(i, obj->bit_17); __set_bit(i, obj->bit_17);
else else
__clear_bit(i, obj->bit_17); __clear_bit(i, obj->bit_17);
i++;
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment