Commit d22632c8 authored by Matthew Auld's avatar Matthew Auld

drm/i915: support forcing the page size with lmem

For some specialised objects we might need something larger than the
regions min_page_size due to some hw restriction, and slightly more
hairy is needing something smaller with the guarantee that such objects
will never be inserted into any GTT, which is the case for the paging
structures.

This also fixes how we setup the BO page_alignment, if we later migrate
the object somewhere else. For example if the placements are {SMEM,
LMEM}, then we might get this wrong. Pushing the min_page_size behaviour
into the manager should fix this.

v2(Thomas): push the default page size behaviour into buddy_man, and let
the user override it with the page-alignment, which looks cleaner

v3: rebase on ttm sys changes
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210625103824.558481-1-matthew.auld@intel.com
parent e11b7b6e
...@@ -90,7 +90,7 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size) ...@@ -90,7 +90,7 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
*/ */
flags = I915_BO_ALLOC_USER; flags = I915_BO_ALLOC_USER;
ret = mr->ops->init_object(mr, obj, size, flags); ret = mr->ops->init_object(mr, obj, size, 0, flags);
if (ret) if (ret)
return ret; return ret;
......
...@@ -72,11 +72,42 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) ...@@ -72,11 +72,42 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
mr->type == INTEL_MEMORY_STOLEN_LOCAL); mr->type == INTEL_MEMORY_STOLEN_LOCAL);
} }
/**
* __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
* minimum page size for the backing pages.
* @i915: The i915 instance.
* @size: The size in bytes for the object. Note that we need to round the size
* up depending on the @page_size. The final object size can be fished out from
* the drm GEM object.
* @page_size: The requested minimum page size in bytes for this object. This is
* useful if we need something bigger than the regions min_page_size due to some
* hw restriction, or in some very specialised cases where it needs to be
* smaller, where the internal fragmentation cost is too great when rounding up
* the object size.
* @flags: The optional BO allocation flags.
*
* Note that this interface assumes you know what you are doing when forcing the
* @page_size. If this is smaller than the regions min_page_size then it can
* never be inserted into any GTT, otherwise it might lead to undefined
* behaviour.
*
* Return: The object pointer, which might be an ERR_PTR in the case of failure.
*/
struct drm_i915_gem_object *
__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
{
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
size, page_size, flags);
}
struct drm_i915_gem_object * struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915, i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size, resource_size_t size,
unsigned int flags) unsigned int flags)
{ {
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM], return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
size, flags); size, 0, flags);
} }
...@@ -23,6 +23,11 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); ...@@ -23,6 +23,11 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
struct drm_i915_gem_object * struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915, i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size, resource_size_t size,
......
...@@ -32,9 +32,11 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) ...@@ -32,9 +32,11 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
struct drm_i915_gem_object * struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region *mem, i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size, resource_size_t size,
resource_size_t page_size,
unsigned int flags) unsigned int flags)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
resource_size_t default_page_size;
int err; int err;
/* /*
...@@ -48,7 +50,14 @@ i915_gem_object_create_region(struct intel_memory_region *mem, ...@@ -48,7 +50,14 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
if (!mem) if (!mem)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
size = round_up(size, mem->min_page_size); default_page_size = mem->min_page_size;
if (page_size)
default_page_size = page_size;
GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
GEM_BUG_ON(default_page_size < PAGE_SIZE);
size = round_up(size, default_page_size);
GEM_BUG_ON(!size); GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT)); GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
...@@ -60,7 +69,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem, ...@@ -60,7 +69,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
if (!obj) if (!obj)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = mem->ops->init_object(mem, obj, size, flags); err = mem->ops->init_object(mem, obj, size, page_size, flags);
if (err) if (err)
goto err_object_free; goto err_object_free;
......
...@@ -19,6 +19,7 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj); ...@@ -19,6 +19,7 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object * struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region *mem, i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size, resource_size_t size,
resource_size_t page_size,
unsigned int flags); unsigned int flags);
#endif #endif
...@@ -490,6 +490,7 @@ static int __create_shmem(struct drm_i915_private *i915, ...@@ -490,6 +490,7 @@ static int __create_shmem(struct drm_i915_private *i915,
static int shmem_object_init(struct intel_memory_region *mem, static int shmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
resource_size_t size, resource_size_t size,
resource_size_t page_size,
unsigned int flags) unsigned int flags)
{ {
static struct lock_class_key lock_class; static struct lock_class_key lock_class;
...@@ -548,7 +549,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, ...@@ -548,7 +549,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
resource_size_t size) resource_size_t size)
{ {
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
size, 0); size, 0, 0);
} }
/* Allocate a new GEM object and fill it with the supplied data */ /* Allocate a new GEM object and fill it with the supplied data */
......
...@@ -670,6 +670,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, ...@@ -670,6 +670,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
resource_size_t size, resource_size_t size,
resource_size_t page_size,
unsigned int flags) unsigned int flags)
{ {
struct drm_i915_private *i915 = mem->i915; struct drm_i915_private *i915 = mem->i915;
...@@ -708,7 +709,7 @@ struct drm_i915_gem_object * ...@@ -708,7 +709,7 @@ struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *i915, i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size) resource_size_t size)
{ {
return i915_gem_object_create_region(i915->mm.stolen_region, size, 0); return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
} }
static int init_stolen_smem(struct intel_memory_region *mem) static int init_stolen_smem(struct intel_memory_region *mem)
......
...@@ -893,6 +893,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) ...@@ -893,6 +893,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
int __i915_gem_ttm_object_init(struct intel_memory_region *mem, int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
resource_size_t size, resource_size_t size,
resource_size_t page_size,
unsigned int flags) unsigned int flags)
{ {
static struct lock_class_key lock_class; static struct lock_class_key lock_class;
...@@ -915,6 +916,9 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, ...@@ -915,6 +916,9 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
obj->base.vma_node.driver_private = i915_gem_to_ttm(obj); obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
/* Forcing the page size is kernel internal only */
GEM_BUG_ON(page_size && obj->mm.n_placements);
/* /*
* If this function fails, it will call the destructor, but * If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the * our caller still owns the object. So no freeing in the
...@@ -924,7 +928,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, ...@@ -924,7 +928,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
*/ */
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size, ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
bo_type, &i915_sys_placement, bo_type, &i915_sys_placement,
mem->min_page_size >> PAGE_SHIFT, page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy); &ctx, NULL, NULL, i915_ttm_bo_destroy);
if (ret) if (ret)
return i915_ttm_err_to_gem(ret); return i915_ttm_err_to_gem(ret);
......
...@@ -44,5 +44,6 @@ i915_ttm_to_gem(struct ttm_buffer_object *bo) ...@@ -44,5 +44,6 @@ i915_ttm_to_gem(struct ttm_buffer_object *bo)
int __i915_gem_ttm_object_init(struct intel_memory_region *mem, int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
resource_size_t size, resource_size_t size,
resource_size_t page_size,
unsigned int flags); unsigned int flags);
#endif #endif
...@@ -496,7 +496,8 @@ static int igt_mock_memory_region_huge_pages(void *arg) ...@@ -496,7 +496,8 @@ static int igt_mock_memory_region_huge_pages(void *arg)
int i; int i;
for (i = 0; i < ARRAY_SIZE(flags); ++i) { for (i = 0; i < ARRAY_SIZE(flags); ++i) {
obj = i915_gem_object_create_region(mem, page_size, obj = i915_gem_object_create_region(mem,
page_size, page_size,
flags[i]); flags[i]);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
......
...@@ -48,7 +48,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src, ...@@ -48,7 +48,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
GEM_BUG_ON(!src_mr); GEM_BUG_ON(!src_mr);
/* Switch object backing-store on create */ /* Switch object backing-store on create */
obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0); obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
......
...@@ -958,7 +958,7 @@ static int igt_mmap(void *arg) ...@@ -958,7 +958,7 @@ static int igt_mmap(void *arg)
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int err; int err;
obj = i915_gem_object_create_region(mr, sizes[i], I915_BO_ALLOC_USER); obj = i915_gem_object_create_region(mr, sizes[i], 0, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV)) if (obj == ERR_PTR(-ENODEV))
continue; continue;
...@@ -1084,7 +1084,7 @@ static int igt_mmap_access(void *arg) ...@@ -1084,7 +1084,7 @@ static int igt_mmap_access(void *arg)
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int err; int err;
obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER); obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV)) if (obj == ERR_PTR(-ENODEV))
continue; continue;
...@@ -1229,7 +1229,7 @@ static int igt_mmap_gpu(void *arg) ...@@ -1229,7 +1229,7 @@ static int igt_mmap_gpu(void *arg)
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int err; int err;
obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER); obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV)) if (obj == ERR_PTR(-ENODEV))
continue; continue;
...@@ -1384,7 +1384,7 @@ static int igt_mmap_revoke(void *arg) ...@@ -1384,7 +1384,7 @@ static int igt_mmap_revoke(void *arg)
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int err; int err;
obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER); obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV)) if (obj == ERR_PTR(-ENODEV))
continue; continue;
......
...@@ -18,6 +18,7 @@ struct i915_ttm_buddy_manager { ...@@ -18,6 +18,7 @@ struct i915_ttm_buddy_manager {
struct i915_buddy_mm mm; struct i915_buddy_mm mm;
struct list_head reserved; struct list_head reserved;
struct mutex lock; struct mutex lock;
u64 default_page_size;
}; };
static struct i915_ttm_buddy_manager * static struct i915_ttm_buddy_manager *
...@@ -53,7 +54,10 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, ...@@ -53,7 +54,10 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
GEM_BUG_ON(!bman_res->base.num_pages); GEM_BUG_ON(!bman_res->base.num_pages);
size = bman_res->base.num_pages << PAGE_SHIFT; size = bman_res->base.num_pages << PAGE_SHIFT;
min_page_size = bman->default_page_size;
if (bo->page_alignment)
min_page_size = bo->page_alignment << PAGE_SHIFT; min_page_size = bo->page_alignment << PAGE_SHIFT;
GEM_BUG_ON(min_page_size < mm->chunk_size); GEM_BUG_ON(min_page_size < mm->chunk_size);
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size); min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
...@@ -134,6 +138,9 @@ static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = { ...@@ -134,6 +138,9 @@ static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
* @type: Memory type we want to manage * @type: Memory type we want to manage
* @use_tt: Set use_tt for the manager * @use_tt: Set use_tt for the manager
* @size: The size in bytes to manage * @size: The size in bytes to manage
* @default_page_size: The default minimum page size in bytes for allocations,
* this must be at least as large as @chunk_size, and can be overridden by
* setting the BO page_alignment, to be larger or smaller as needed.
* @chunk_size: The minimum page size in bytes for our allocations i.e * @chunk_size: The minimum page size in bytes for our allocations i.e
* order-zero * order-zero
* *
...@@ -154,7 +161,8 @@ static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = { ...@@ -154,7 +161,8 @@ static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
*/ */
int i915_ttm_buddy_man_init(struct ttm_device *bdev, int i915_ttm_buddy_man_init(struct ttm_device *bdev,
unsigned int type, bool use_tt, unsigned int type, bool use_tt,
u64 size, u64 chunk_size) u64 size, u64 default_page_size,
u64 chunk_size)
{ {
struct ttm_resource_manager *man; struct ttm_resource_manager *man;
struct i915_ttm_buddy_manager *bman; struct i915_ttm_buddy_manager *bman;
...@@ -170,6 +178,8 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev, ...@@ -170,6 +178,8 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev,
mutex_init(&bman->lock); mutex_init(&bman->lock);
INIT_LIST_HEAD(&bman->reserved); INIT_LIST_HEAD(&bman->reserved);
GEM_BUG_ON(default_page_size < chunk_size);
bman->default_page_size = default_page_size;
man = &bman->manager; man = &bman->manager;
man->use_tt = use_tt; man->use_tt = use_tt;
......
...@@ -46,7 +46,7 @@ to_ttm_buddy_resource(struct ttm_resource *res) ...@@ -46,7 +46,7 @@ to_ttm_buddy_resource(struct ttm_resource *res)
int i915_ttm_buddy_man_init(struct ttm_device *bdev, int i915_ttm_buddy_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt, unsigned type, bool use_tt,
u64 size, u64 chunk_size); u64 size, u64 default_page_size, u64 chunk_size);
int i915_ttm_buddy_man_fini(struct ttm_device *bdev, int i915_ttm_buddy_man_fini(struct ttm_device *bdev,
unsigned int type); unsigned int type);
......
...@@ -55,6 +55,7 @@ struct intel_memory_region_ops { ...@@ -55,6 +55,7 @@ struct intel_memory_region_ops {
int (*init_object)(struct intel_memory_region *mem, int (*init_object)(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
resource_size_t size, resource_size_t size,
resource_size_t page_size,
unsigned int flags); unsigned int flags);
}; };
......
...@@ -86,7 +86,8 @@ int intel_region_ttm_init(struct intel_memory_region *mem) ...@@ -86,7 +86,8 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
int ret; int ret;
ret = i915_ttm_buddy_man_init(bdev, mem_type, false, ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
resource_size(&mem->region), PAGE_SIZE); resource_size(&mem->region),
mem->min_page_size, PAGE_SIZE);
if (ret) if (ret)
return ret; return ret;
...@@ -167,7 +168,6 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem, ...@@ -167,7 +168,6 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
int ret; int ret;
mock_bo.base.size = size; mock_bo.base.size = size;
mock_bo.page_alignment = mem->min_page_size >> PAGE_SHIFT;
place.flags = flags; place.flags = flags;
ret = man->func->alloc(man, &mock_bo, &place, &res); ret = man->func->alloc(man, &mock_bo, &place, &res);
......
...@@ -68,7 +68,7 @@ static int igt_mock_fill(void *arg) ...@@ -68,7 +68,7 @@ static int igt_mock_fill(void *arg)
resource_size_t size = page_num * page_size; resource_size_t size = page_num * page_size;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
obj = i915_gem_object_create_region(mem, size, 0); obj = i915_gem_object_create_region(mem, size, 0, 0);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
break; break;
...@@ -110,7 +110,7 @@ igt_object_create(struct intel_memory_region *mem, ...@@ -110,7 +110,7 @@ igt_object_create(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int err; int err;
obj = i915_gem_object_create_region(mem, size, flags); obj = i915_gem_object_create_region(mem, size, 0, flags);
if (IS_ERR(obj)) if (IS_ERR(obj))
return obj; return obj;
...@@ -647,6 +647,62 @@ static int igt_lmem_create(void *arg) ...@@ -647,6 +647,62 @@ static int igt_lmem_create(void *arg)
return err; return err;
} }
static int igt_lmem_create_with_ps(void *arg)
{
struct drm_i915_private *i915 = arg;
int err = 0;
u32 ps;
for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
struct drm_i915_gem_object *obj;
dma_addr_t daddr;
obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
if (err == -ENXIO || err == -E2BIG) {
pr_info("%s not enough lmem for ps(%u) err=%d\n",
__func__, ps, err);
err = 0;
}
break;
}
if (obj->base.size != ps) {
pr_err("%s size(%zu) != ps(%u)\n",
__func__, obj->base.size, ps);
err = -EINVAL;
goto out_put;
}
i915_gem_object_lock(obj, NULL);
err = i915_gem_object_pin_pages(obj);
if (err)
goto out_put;
daddr = i915_gem_object_get_dma_address(obj, 0);
if (!IS_ALIGNED(daddr, ps)) {
pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
__func__, &daddr, ps);
err = -EINVAL;
goto out_unpin;
}
out_unpin:
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
out_put:
i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
if (err)
break;
}
return err;
}
static int igt_lmem_create_cleared_cpu(void *arg) static int igt_lmem_create_cleared_cpu(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
...@@ -932,7 +988,7 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type, ...@@ -932,7 +988,7 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
void *addr; void *addr;
obj = i915_gem_object_create_region(mr, size, 0); obj = i915_gem_object_create_region(mr, size, 0, 0);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */ if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
...@@ -1149,6 +1205,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915) ...@@ -1149,6 +1205,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
{ {
static const struct i915_subtest tests[] = { static const struct i915_subtest tests[] = {
SUBTEST(igt_lmem_create), SUBTEST(igt_lmem_create),
SUBTEST(igt_lmem_create_with_ps),
SUBTEST(igt_lmem_create_cleared_cpu), SUBTEST(igt_lmem_create_cleared_cpu),
SUBTEST(igt_lmem_write_cpu), SUBTEST(igt_lmem_write_cpu),
SUBTEST(igt_lmem_write_gpu), SUBTEST(igt_lmem_write_gpu),
......
...@@ -63,6 +63,7 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = { ...@@ -63,6 +63,7 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
static int mock_object_init(struct intel_memory_region *mem, static int mock_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
resource_size_t size, resource_size_t size,
resource_size_t page_size,
unsigned int flags) unsigned int flags)
{ {
static struct lock_class_key lock_class; static struct lock_class_key lock_class;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment