Commit 6056e500 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Support discontiguous lmem object maps

Create a vmap for discontinguous lmem objects to support
i915_gem_object_pin_map().

v2: Offset io address by region.start for fake-lmem
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200102204215.1519103-1-chris@chris-wilson.co.uk
parent 1d0e2c93
...@@ -16,46 +16,6 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { ...@@ -16,46 +16,6 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.release = i915_gem_object_release_memory_region, .release = i915_gem_object_release_memory_region,
}; };
/* XXX: Time to vfunc your life up? */
void __iomem *
i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
unsigned long n)
{
resource_size_t offset;
offset = i915_gem_object_get_dma_address(obj, n);
offset -= obj->mm.region->region.start;
return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
}
void __iomem *
i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
unsigned long n)
{
resource_size_t offset;
offset = i915_gem_object_get_dma_address(obj, n);
offset -= obj->mm.region->region.start;
return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
}
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
unsigned long size)
{
resource_size_t offset;
GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
offset = i915_gem_object_get_dma_address(obj, n);
offset -= obj->mm.region->region.start;
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
}
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{ {
return obj->ops == &i915_gem_lmem_obj_ops; return obj->ops == &i915_gem_lmem_obj_ops;
......
...@@ -14,14 +14,6 @@ struct intel_memory_region; ...@@ -14,14 +14,6 @@ struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n, unsigned long size);
void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
unsigned long n);
void __iomem *
i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
unsigned long n);
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object * struct drm_i915_gem_object *
......
...@@ -158,9 +158,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) ...@@ -158,9 +158,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{ {
if (i915_gem_object_is_lmem(obj)) if (is_vmalloc_addr(ptr))
io_mapping_unmap((void __force __iomem *)ptr);
else if (is_vmalloc_addr(ptr))
vunmap(ptr); vunmap(ptr);
else else
kunmap(kmap_to_page(ptr)); kunmap(kmap_to_page(ptr));
...@@ -236,46 +234,44 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -236,46 +234,44 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return err; return err;
} }
static inline pte_t iomap_pte(resource_size_t base,
dma_addr_t offset,
pgprot_t prot)
{
return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
}
/* The 'mapping' part of i915_gem_object_pin_map() below */ /* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map(struct drm_i915_gem_object *obj, static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type) enum i915_map_type type)
{ {
unsigned long n_pages = obj->base.size >> PAGE_SHIFT; unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->mm.pages; struct sg_table *sgt = obj->mm.pages;
struct sgt_iter sgt_iter; pte_t *stack[32], **mem;
struct page *page; struct vm_struct *area;
struct page *stack_pages[32];
struct page **pages = stack_pages;
unsigned long i = 0;
pgprot_t pgprot; pgprot_t pgprot;
void *addr;
if (i915_gem_object_is_lmem(obj)) { if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
void __iomem *io; return NULL;
if (type != I915_MAP_WC)
return NULL;
io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
return (void __force *)io;
}
/* A single page can always be kmapped */ /* A single page can always be kmapped */
if (n_pages == 1 && type == I915_MAP_WB) if (n_pte == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl)); return kmap(sg_page(sgt->sgl));
if (n_pages > ARRAY_SIZE(stack_pages)) { mem = stack;
if (n_pte > ARRAY_SIZE(stack)) {
/* Too big for stack -- allocate temporary array instead */ /* Too big for stack -- allocate temporary array instead */
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
if (!pages) if (!mem)
return NULL; return NULL;
} }
for_each_sgt_page(page, sgt_iter, sgt) area = alloc_vm_area(obj->base.size, mem);
pages[i++] = page; if (!area) {
if (mem != stack)
/* Check that we have the expected number of pages */ kvfree(mem);
GEM_BUG_ON(i != n_pages); return NULL;
}
switch (type) { switch (type) {
default: default:
...@@ -288,12 +284,31 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, ...@@ -288,12 +284,31 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
pgprot = pgprot_writecombine(PAGE_KERNEL_IO); pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
break; break;
} }
addr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack_pages) if (i915_gem_object_has_struct_page(obj)) {
kvfree(pages); struct sgt_iter iter;
struct page *page;
pte_t **ptes = mem;
for_each_sgt_page(page, iter, sgt)
**ptes++ = mk_pte(page, pgprot);
} else {
resource_size_t iomap;
struct sgt_iter iter;
pte_t **ptes = mem;
dma_addr_t addr;
iomap = obj->mm.region->iomap.base;
iomap -= obj->mm.region->region.start;
for_each_sgt_daddr(addr, iter, sgt)
**ptes++ = iomap_pte(iomap, addr, pgprot);
}
if (mem != stack)
kvfree(mem);
return addr; return area->addr;
} }
/* get, pin, and map the pages of the object into kernel space */ /* get, pin, and map the pages of the object into kernel space */
......
...@@ -1017,38 +1017,33 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) ...@@ -1017,38 +1017,33 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err; return err;
} }
static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{ {
unsigned long n; unsigned long n = obj->base.size >> PAGE_SHIFT;
u32 *ptr;
int err; int err;
i915_gem_object_lock(obj); err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
err = i915_gem_object_set_to_wc_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
return err;
err = i915_gem_object_pin_pages(obj);
if (err) if (err)
return err; return err;
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
u32 __iomem *base; if (IS_ERR(ptr))
u32 read_val; return PTR_ERR(ptr);
base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
read_val = ioread32(base + dword); ptr += dword;
io_mapping_unmap_atomic(base); while (n--) {
if (read_val != val) { if (*ptr != val) {
pr_err("n=%lu base[%u]=%u, val=%u\n", pr_err("base[%u]=%08x, val=%08x\n",
n, dword, read_val, val); dword, *ptr, val);
err = -EINVAL; err = -EINVAL;
break; break;
} }
ptr += PAGE_SIZE / sizeof(*ptr);
} }
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_map(obj);
return err; return err;
} }
...@@ -1056,10 +1051,8 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) ...@@ -1056,10 +1051,8 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{ {
if (i915_gem_object_has_struct_page(obj)) if (i915_gem_object_has_struct_page(obj))
return __cpu_check_shmem(obj, dword, val); return __cpu_check_shmem(obj, dword, val);
else if (i915_gem_object_is_lmem(obj)) else
return __cpu_check_lmem(obj, dword, val); return __cpu_check_vmap(obj, dword, val);
return -ENODEV;
} }
static int __igt_write_huge(struct intel_context *ce, static int __igt_write_huge(struct intel_context *ce,
......
...@@ -270,36 +270,31 @@ static int igt_gpu_write_dw(struct intel_context *ce, ...@@ -270,36 +270,31 @@ static int igt_gpu_write_dw(struct intel_context *ce,
static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{ {
unsigned long n; unsigned long n = obj->base.size >> PAGE_SHIFT;
u32 *ptr;
int err; int err;
i915_gem_object_lock(obj); err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
err = i915_gem_object_set_to_wc_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
return err;
err = i915_gem_object_pin_pages(obj);
if (err) if (err)
return err; return err;
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
u32 __iomem *base; if (IS_ERR(ptr))
u32 read_val; return PTR_ERR(ptr);
base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
read_val = ioread32(base + dword); ptr += dword;
io_mapping_unmap_atomic(base); while (n--) {
if (read_val != val) { if (*ptr != val) {
pr_err("n=%lu base[%u]=%u, val=%u\n", pr_err("base[%u]=%08x, val=%08x\n",
n, dword, read_val, val); dword, *ptr, val);
err = -EINVAL; err = -EINVAL;
break; break;
} }
ptr += PAGE_SIZE / sizeof(*ptr);
} }
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_map(obj);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment