Commit b91b09ee authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson

drm/i915: push set_pages down to the callers

Each backend is now responsible for calling __i915_gem_object_set_pages
upon successfully gathering its backing storage. This eliminates the
inconsistency between the async and sync paths, which stands out even
more when we start throwing around an sg_mask in a later patch.
Suggested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006145041.21673-6-matthew.auld@intel.comSigned-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006221833.32439-5-chris@chris-wilson.co.uk
parent 2a9654b2
...@@ -162,8 +162,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -162,8 +162,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
static struct sg_table * static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{ {
struct address_space *mapping = obj->base.filp->f_mapping; struct address_space *mapping = obj->base.filp->f_mapping;
drm_dma_handle_t *phys; drm_dma_handle_t *phys;
...@@ -171,9 +170,10 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -171,9 +170,10 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
struct scatterlist *sg; struct scatterlist *sg;
char *vaddr; char *vaddr;
int i; int i;
int err;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return ERR_PTR(-EINVAL); return -EINVAL;
/* Always aligning to the object size, allows a single allocation /* Always aligning to the object size, allows a single allocation
* to handle all possible callers, and given typical object sizes, * to handle all possible callers, and given typical object sizes,
...@@ -183,7 +183,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -183,7 +183,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
roundup_pow_of_two(obj->base.size), roundup_pow_of_two(obj->base.size),
roundup_pow_of_two(obj->base.size)); roundup_pow_of_two(obj->base.size));
if (!phys) if (!phys)
return ERR_PTR(-ENOMEM); return -ENOMEM;
vaddr = phys->vaddr; vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
...@@ -192,7 +192,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -192,7 +192,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i); page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) { if (IS_ERR(page)) {
st = ERR_CAST(page); err = PTR_ERR(page);
goto err_phys; goto err_phys;
} }
...@@ -209,13 +209,13 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -209,13 +209,13 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) { if (!st) {
st = ERR_PTR(-ENOMEM); err = -ENOMEM;
goto err_phys; goto err_phys;
} }
if (sg_alloc_table(st, 1, GFP_KERNEL)) { if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st); kfree(st);
st = ERR_PTR(-ENOMEM); err = -ENOMEM;
goto err_phys; goto err_phys;
} }
...@@ -227,11 +227,15 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -227,11 +227,15 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size; sg_dma_len(sg) = obj->base.size;
obj->phys_handle = phys; obj->phys_handle = phys;
return st;
__i915_gem_object_set_pages(obj, st);
return 0;
err_phys: err_phys:
drm_pci_free(obj->base.dev, phys); drm_pci_free(obj->base.dev, phys);
return st;
return err;
} }
static void __start_cpu_write(struct drm_i915_gem_object *obj) static void __start_cpu_write(struct drm_i915_gem_object *obj)
...@@ -2292,8 +2296,7 @@ static bool i915_sg_trim(struct sg_table *orig_st) ...@@ -2292,8 +2296,7 @@ static bool i915_sg_trim(struct sg_table *orig_st)
return true; return true;
} }
static struct sg_table * static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
const unsigned long page_count = obj->base.size / PAGE_SIZE; const unsigned long page_count = obj->base.size / PAGE_SIZE;
...@@ -2317,12 +2320,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2317,12 +2320,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) if (st == NULL)
return ERR_PTR(-ENOMEM); return -ENOMEM;
rebuild_st: rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) { if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st); kfree(st);
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
/* Get the list of pages out of our struct file. They'll be pinned /* Get the list of pages out of our struct file. They'll be pinned
...@@ -2430,7 +2433,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2430,7 +2433,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st); i915_gem_object_do_bit_17_swizzle(obj, st);
return st; __i915_gem_object_set_pages(obj, st);
return 0;
err_sg: err_sg:
sg_mark_end(sg); sg_mark_end(sg);
...@@ -2451,7 +2456,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2451,7 +2456,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (ret == -ENOSPC) if (ret == -ENOSPC)
ret = -ENOMEM; ret = -ENOMEM;
return ERR_PTR(ret); return ret;
} }
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
...@@ -2474,19 +2479,17 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -2474,19 +2479,17 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{ {
struct sg_table *pages; int err;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n"); DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT; return -EFAULT;
} }
pages = obj->ops->get_pages(obj); err = obj->ops->get_pages(obj);
if (unlikely(IS_ERR(pages))) GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
return PTR_ERR(pages);
__i915_gem_object_set_pages(obj, pages); return err;
return 0;
} }
/* Ensure that the associated pages are gathered from the backing storage /* Ensure that the associated pages are gathered from the backing storage
......
...@@ -256,11 +256,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, ...@@ -256,11 +256,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return drm_gem_dmabuf_export(dev, &exp_info); return drm_gem_dmabuf_export(dev, &exp_info);
} }
static struct sg_table * static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{ {
return dma_buf_map_attachment(obj->base.import_attach, struct sg_table *pages;
pages = dma_buf_map_attachment(obj->base.import_attach,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (IS_ERR(pages))
return PTR_ERR(pages);
__i915_gem_object_set_pages(obj, pages);
return 0;
} }
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
......
...@@ -44,8 +44,7 @@ static void internal_free_pages(struct sg_table *st) ...@@ -44,8 +44,7 @@ static void internal_free_pages(struct sg_table *st)
kfree(st); kfree(st);
} }
static struct sg_table * static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st; struct sg_table *st;
...@@ -78,12 +77,12 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -78,12 +77,12 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
create_st: create_st:
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) if (!st)
return ERR_PTR(-ENOMEM); return -ENOMEM;
npages = obj->base.size / PAGE_SIZE; npages = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, npages, GFP_KERNEL)) { if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st); kfree(st);
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
sg = st->sgl; sg = st->sgl;
...@@ -132,13 +131,17 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -132,13 +131,17 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
* object are only valid whilst active and pinned. * object are only valid whilst active and pinned.
*/ */
obj->mm.madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
return st;
__i915_gem_object_set_pages(obj, st);
return 0;
err: err:
sg_set_page(sg, NULL, 0, 0); sg_set_page(sg, NULL, 0, 0);
sg_mark_end(sg); sg_mark_end(sg);
internal_free_pages(st); internal_free_pages(st);
return ERR_PTR(-ENOMEM);
return -ENOMEM;
} }
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
......
...@@ -69,7 +69,7 @@ struct drm_i915_gem_object_ops { ...@@ -69,7 +69,7 @@ struct drm_i915_gem_object_ops {
* being released or under memory pressure (where we attempt to * being released or under memory pressure (where we attempt to
* reap pages for the shrinker). * reap pages for the shrinker).
*/ */
struct sg_table *(*get_pages)(struct drm_i915_gem_object *); int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*pwrite)(struct drm_i915_gem_object *, int (*pwrite)(struct drm_i915_gem_object *,
......
...@@ -539,12 +539,18 @@ i915_pages_create_for_stolen(struct drm_device *dev, ...@@ -539,12 +539,18 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st; return st;
} }
static struct sg_table * static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{ {
return i915_pages_create_for_stolen(obj->base.dev, struct sg_table *pages =
i915_pages_create_for_stolen(obj->base.dev,
obj->stolen->start, obj->stolen->start,
obj->stolen->size); obj->stolen->size);
if (IS_ERR(pages))
return PTR_ERR(pages);
__i915_gem_object_set_pages(obj, pages);
return 0;
} }
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
......
...@@ -434,6 +434,8 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, ...@@ -434,6 +434,8 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
__i915_gem_object_set_pages(obj, st);
return st; return st;
} }
...@@ -521,7 +523,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -521,7 +523,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pages = __i915_gem_userptr_alloc_pages(obj, pvec, pages = __i915_gem_userptr_alloc_pages(obj, pvec,
npages); npages);
if (!IS_ERR(pages)) { if (!IS_ERR(pages)) {
__i915_gem_object_set_pages(obj, pages);
pinned = 0; pinned = 0;
pages = NULL; pages = NULL;
} }
...@@ -582,8 +583,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) ...@@ -582,8 +583,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
} }
static struct sg_table * static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{ {
const int num_pages = obj->base.size >> PAGE_SHIFT; const int num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm; struct mm_struct *mm = obj->userptr.mm->mm;
...@@ -612,9 +612,9 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -612,9 +612,9 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) { if (obj->userptr.work) {
/* active flag should still be held for the pending work */ /* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work)) if (IS_ERR(obj->userptr.work))
return ERR_CAST(obj->userptr.work); return PTR_ERR(obj->userptr.work);
else else
return ERR_PTR(-EAGAIN); return -EAGAIN;
} }
pvec = NULL; pvec = NULL;
...@@ -650,7 +650,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -650,7 +650,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
release_pages(pvec, pinned, 0); release_pages(pvec, pinned, 0);
kvfree(pvec); kvfree(pvec);
return pages; return PTR_ERR_OR_ZERO(pages);
} }
static void static void
......
...@@ -37,8 +37,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj, ...@@ -37,8 +37,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
kfree(pages); kfree(pages);
} }
static struct sg_table * static int huge_get_pages(struct drm_i915_gem_object *obj)
huge_get_pages(struct drm_i915_gem_object *obj)
{ {
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
const unsigned long nreal = obj->scratch / PAGE_SIZE; const unsigned long nreal = obj->scratch / PAGE_SIZE;
...@@ -49,11 +48,11 @@ huge_get_pages(struct drm_i915_gem_object *obj) ...@@ -49,11 +48,11 @@ huge_get_pages(struct drm_i915_gem_object *obj)
pages = kmalloc(sizeof(*pages), GFP); pages = kmalloc(sizeof(*pages), GFP);
if (!pages) if (!pages)
return ERR_PTR(-ENOMEM); return -ENOMEM;
if (sg_alloc_table(pages, npages, GFP)) { if (sg_alloc_table(pages, npages, GFP)) {
kfree(pages); kfree(pages);
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
sg = pages->sgl; sg = pages->sgl;
...@@ -81,11 +80,14 @@ huge_get_pages(struct drm_i915_gem_object *obj) ...@@ -81,11 +80,14 @@ huge_get_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, pages)) if (i915_gem_gtt_prepare_pages(obj, pages))
goto err; goto err;
return pages; __i915_gem_object_set_pages(obj, pages);
return 0;
err: err:
huge_free_pages(obj, pages); huge_free_pages(obj, pages);
return ERR_PTR(-ENOMEM);
return -ENOMEM;
#undef GFP #undef GFP
} }
......
...@@ -39,8 +39,7 @@ static void fake_free_pages(struct drm_i915_gem_object *obj, ...@@ -39,8 +39,7 @@ static void fake_free_pages(struct drm_i915_gem_object *obj,
kfree(pages); kfree(pages);
} }
static struct sg_table * static int fake_get_pages(struct drm_i915_gem_object *obj)
fake_get_pages(struct drm_i915_gem_object *obj)
{ {
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
#define PFN_BIAS 0x1000 #define PFN_BIAS 0x1000
...@@ -50,12 +49,12 @@ fake_get_pages(struct drm_i915_gem_object *obj) ...@@ -50,12 +49,12 @@ fake_get_pages(struct drm_i915_gem_object *obj)
pages = kmalloc(sizeof(*pages), GFP); pages = kmalloc(sizeof(*pages), GFP);
if (!pages) if (!pages)
return ERR_PTR(-ENOMEM); return -ENOMEM;
rem = round_up(obj->base.size, BIT(31)) >> 31; rem = round_up(obj->base.size, BIT(31)) >> 31;
if (sg_alloc_table(pages, rem, GFP)) { if (sg_alloc_table(pages, rem, GFP)) {
kfree(pages); kfree(pages);
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
rem = obj->base.size; rem = obj->base.size;
...@@ -72,7 +71,10 @@ fake_get_pages(struct drm_i915_gem_object *obj) ...@@ -72,7 +71,10 @@ fake_get_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(rem); GEM_BUG_ON(rem);
obj->mm.madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
return pages;
__i915_gem_object_set_pages(obj, pages);
return 0;
#undef GFP #undef GFP
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment