Commit c471748d authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter

drm/i915: Move HAS_STRUCT_PAGE to obj->flags

We want to remove the changing of ops structure for attaching
phys pages, so we need to kill off HAS_STRUCT_PAGE from ops->flags,
and put it in the bo.

This will remove a potential race of dereferencing the wrong obj->ops
without ww mutex held.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
[danvet: apply with wiggle]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-8-maarten.lankhorst@linux.intel.com
parent aaee716e
......@@ -258,7 +258,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, 0);
obj->base.import_attach = attach;
obj->base.resv = dma_buf->resv;
......
......@@ -138,8 +138,7 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
.name = "i915_gem_object_internal",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_internal,
.put_pages = i915_gem_object_put_pages_internal,
};
......@@ -178,7 +177,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class);
i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
/*
* Mark the object as volatile, such that the pages are marked as
......
......@@ -40,13 +40,13 @@ int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
i915_gem_object_init_memory_region(obj, mem, flags);
i915_gem_object_init_memory_region(obj, mem);
return 0;
}
......@@ -251,7 +251,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
goto out;
iomap = -1;
if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
if (!i915_gem_object_has_struct_page(obj)) {
iomap = obj->mm.region->iomap.base;
iomap -= obj->mm.region->region.start;
}
......@@ -653,9 +653,8 @@ __assign_mmap_offset(struct drm_file *file,
}
if (mmap_type != I915_MMAP_TYPE_GTT &&
!i915_gem_object_type_has(obj,
I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_HAS_IOMEM)) {
!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) {
err = -ENODEV;
goto out;
}
......
......@@ -60,7 +60,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key)
struct lock_class_key *key, unsigned flags)
{
__mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key);
......@@ -78,6 +78,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
init_rcu_head(&obj->rcu);
obj->ops = ops;
GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
obj->flags = flags;
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
......
......@@ -23,7 +23,8 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key);
struct lock_class_key *key,
unsigned alloc_flags);
struct drm_i915_gem_object *
i915_gem_object_create_shmem(struct drm_i915_private *i915,
resource_size_t size);
......@@ -215,7 +216,7 @@ i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
}
static inline bool
......
......@@ -30,7 +30,6 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
#define I915_GEM_OBJECT_IS_PROXY BIT(3)
......@@ -171,9 +170,12 @@ struct drm_i915_gem_object {
unsigned long flags;
#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
#define I915_BO_READONLY BIT(2)
#define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */
#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_STRUCT_PAGE)
#define I915_BO_READONLY BIT(3)
#define I915_TILING_QUIRK_BIT 4 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
......
......@@ -333,13 +333,12 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
enum i915_map_type has_type;
unsigned int flags;
bool pinned;
void *ptr;
int err;
flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
if (!i915_gem_object_type_has(obj, flags))
if (!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
......
......@@ -240,6 +240,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
pages = __i915_gem_object_unset_pages(obj);
obj->ops = &i915_gem_phys_ops;
obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;
err = ____i915_gem_object_get_pages(obj);
if (err)
......@@ -258,6 +259,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
err_xfer:
obj->ops = &i915_gem_shmem_ops;
obj->flags |= I915_BO_ALLOC_STRUCT_PAGE;
if (!IS_ERR_OR_NULL(pages)) {
unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
......
......@@ -106,13 +106,11 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
}
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem,
unsigned long flags)
struct intel_memory_region *mem)
{
INIT_LIST_HEAD(&obj->mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
obj->flags |= flags;
if (obj->base.size <= mem->min_page_size)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
......
......@@ -17,8 +17,7 @@ void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem,
unsigned long flags);
struct intel_memory_region *mem);
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
......
......@@ -430,8 +430,7 @@ static void shmem_release(struct drm_i915_gem_object *obj)
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.name = "i915_gem_object_shmem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = shmem_get_pages,
.put_pages = shmem_put_pages,
......@@ -491,7 +490,8 @@ static int shmem_object_init(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class);
i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
......@@ -515,7 +515,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
i915_gem_object_set_cache_coherency(obj, cache_level);
i915_gem_object_init_memory_region(obj, mem, 0);
i915_gem_object_init_memory_region(obj, mem);
return 0;
}
......
......@@ -630,7 +630,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
int err;
drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, 0);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
......@@ -641,7 +641,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
if (err)
return err;
i915_gem_object_init_memory_region(obj, mem, 0);
i915_gem_object_init_memory_region(obj, mem);
return 0;
}
......
......@@ -702,8 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.name = "i915_gem_object_userptr",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_MMAP |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
......@@ -810,7 +809,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
......
......@@ -89,7 +89,6 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops huge_ops = {
.name = "huge-gem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,
};
......@@ -115,7 +114,8 @@ huge_gem_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
i915_gem_object_init(obj, &huge_ops, &lock_class);
i915_gem_object_init(obj, &huge_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
......
......@@ -140,8 +140,7 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops huge_page_ops = {
.name = "huge-gem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = get_huge_pages,
.put_pages = put_huge_pages,
};
......@@ -168,7 +167,8 @@ huge_pages_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &huge_page_ops, &lock_class);
i915_gem_object_init(obj, &huge_page_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
i915_gem_object_set_volatile(obj);
......@@ -319,9 +319,9 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
drm_gem_private_object_init(&i915->drm, &obj->base, size);
if (single)
i915_gem_object_init(obj, &fake_ops_single, &lock_class);
i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0);
else
i915_gem_object_init(obj, &fake_ops, &lock_class);
i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
i915_gem_object_set_volatile(obj);
......
......@@ -835,9 +835,8 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
return false;
if (type != I915_MMAP_TYPE_GTT &&
!i915_gem_object_type_has(obj,
I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_HAS_IOMEM))
!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
return false;
return true;
......@@ -977,10 +976,8 @@ static const char *repr_mmap_type(enum i915_mmap_type type)
static bool can_access(const struct drm_i915_gem_object *obj)
{
unsigned int flags =
I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
return i915_gem_object_type_has(obj, flags);
return i915_gem_object_has_struct_page(obj) ||
i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
}
static int __igt_mmap_access(struct drm_i915_private *i915,
......
......@@ -25,12 +25,24 @@ static int mock_phys_object(void *arg)
goto out;
}
if (!i915_gem_object_has_struct_page(obj)) {
err = -EINVAL;
pr_err("shmem has no struct page\n");
goto out_obj;
}
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
if (err) {
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
goto out_obj;
}
if (i915_gem_object_has_struct_page(obj)) {
err = -EINVAL;
pr_err("shmem has a struct page\n");
goto out_obj;
}
if (obj->ops != &i915_gem_phys_ops) {
pr_err("i915_gem_object_attach_phys did not create a phys object\n");
err = -EINVAL;
......
......@@ -218,7 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base,
roundup(info->size, PAGE_SIZE));
i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0);
i915_gem_object_set_readonly(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT;
......
......@@ -121,7 +121,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
goto err;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &fake_ops, &lock_class);
i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
i915_gem_object_set_volatile(obj);
......
......@@ -27,13 +27,13 @@ static int mock_object_init(struct intel_memory_region *mem,
return -E2BIG;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags);
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
i915_gem_object_init_memory_region(obj, mem, flags);
i915_gem_object_init_memory_region(obj, mem);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment