Commit dd6034c6 authored by Dave Gordon's avatar Dave Gordon Committed by Chris Wilson

drm/i915: refactor i915_gem_object_pin_map()

The recently-added i915_gem_object_pin_map() can be further optimised
for "small" objects. To facilitate this, and simplify the error paths
before adding the new code, this patch pulls out the "mapping" part of
the operation (involving local allocations which must be undone before
return) into its own subfunction.

The next patch will then insert the new optimisation into the middle of
the now-separated subfunction.

This reorganisation will probably not affect the generated code, as the
compiler will most likely inline it anyway, but it makes the logical
structure a bit clearer and easier to modify.

v2:
    Restructure loop-over-pages & error check [Chris Wilson]

v3:
    Add page count to debug messages [Chris Wilson]
    Convert WARN_ON() to GEM_BUG_ON()

v4:
    Drop the DEBUG messages [Tvrtko Ursulin]
Signed-off-by: default avatarDave Gordon <david.s.gordon@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1463741647-15666-2-git-send-email-chris@chris-wilson.co.uk
parent dc00b6a0
......@@ -2398,6 +2398,38 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->pages;
struct sg_page_iter sg_iter;
struct page **pages;
unsigned long i = 0;
void *addr;
/* A single page can always be kmapped */
if (n_pages == 1)
return kmap(sg_page(sgt->sgl));
pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
if (!pages)
return NULL;
for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0)
pages[i++] = sg_page_iter_page(&sg_iter);
/* Check that we have the expected number of pages */
GEM_BUG_ON(i != n_pages);
addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
drm_free_large(pages);
return addr;
}
/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
{
int ret;
......@@ -2410,29 +2442,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
i915_gem_object_pin_pages(obj);
if (obj->mapping == NULL) {
struct page **pages;
pages = NULL;
if (obj->base.size == PAGE_SIZE)
obj->mapping = kmap(sg_page(obj->pages->sgl));
else
pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
sizeof(*pages),
GFP_TEMPORARY);
if (pages != NULL) {
struct sg_page_iter sg_iter;
int n;
n = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter,
obj->pages->nents, 0)
pages[n++] = sg_page_iter_page(&sg_iter);
obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
drm_free_large(pages);
}
if (obj->mapping == NULL) {
if (!obj->mapping) {
obj->mapping = i915_gem_object_map(obj);
if (!obj->mapping) {
i915_gem_object_unpin_pages(obj);
return ERR_PTR(-ENOMEM);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment