Commit c7eae626 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Make the object handles idr-generated

Instead of generating user-space object handles based on a, possibly
processed, hash of the kernel address of the object, use idr to generate
and lookup those handles. This might improve somewhat on security since
we loose all connections to the object's kernel address. Also idr is
designed to do just this.

As a todo-item, since user-space handles are now generated in sequence,
we can probably use a much simpler hash function to hash them.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
Reviewed-by: default avatarDeepak Rawat <drawat@vmware.com>
parent b1d05b4f
......@@ -261,7 +261,7 @@ int ttm_vt_lock(struct ttm_lock *lock,
int ttm_vt_unlock(struct ttm_lock *lock)
{
return ttm_ref_object_base_unref(lock->vt_holder,
lock->base.hash.key, TTM_REF_USAGE);
lock->base.handle, TTM_REF_USAGE);
}
void ttm_suspend_unlock(struct ttm_lock *lock)
......
......@@ -94,6 +94,7 @@ struct ttm_object_device {
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
size_t dma_buf_size;
struct idr idr;
};
/**
......@@ -171,14 +172,15 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
kref_init(&base->refcount);
idr_preload(GFP_KERNEL);
spin_lock(&tdev->object_lock);
ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
&base->hash,
(unsigned long)base, 31, 0, 0);
ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
spin_unlock(&tdev->object_lock);
if (unlikely(ret != 0))
goto out_err0;
idr_preload_end();
if (ret < 0)
return ret;
base->handle = ret;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
......@@ -188,9 +190,8 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0;
out_err1:
spin_lock(&tdev->object_lock);
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
out_err0:
return ret;
}
......@@ -201,7 +202,7 @@ static void ttm_release_base(struct kref *kref)
struct ttm_object_device *tdev = base->tfile->tdev;
spin_lock(&tdev->object_lock);
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
/*
......@@ -248,19 +249,13 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
{
struct ttm_base_object *base = NULL;
struct drm_hash_item *hash;
struct drm_open_hash *ht = &tdev->object_hash;
int ret;
struct ttm_base_object *base;
rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, key, &hash);
base = idr_find(&tdev->idr, key);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
}
if (base && !kref_get_unless_zero(&base->refcount))
base = NULL;
rcu_read_unlock();
return base;
......@@ -284,7 +279,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_ref_object *ref;
rcu_read_lock();
if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
goto out_false;
/*
......@@ -334,7 +329,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
while (ret == -EINVAL) {
rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
......@@ -358,7 +353,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
return -ENOMEM;
}
ref->hash.key = base->hash.key;
ref->hash.key = base->handle;
ref->obj = base;
ref->tfile = tfile;
ref->ref_type = ref_type;
......@@ -510,6 +505,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (ret != 0)
goto out_no_object_hash;
idr_init(&tdev->idr);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
......@@ -528,6 +524,8 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
idr_destroy(&tdev->idr);
drm_ht_remove(&tdev->object_hash);
kfree(tdev);
......@@ -630,7 +628,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
*handle = base->handle;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
......
......@@ -124,14 +124,14 @@ struct ttm_object_device;
struct ttm_base_object {
struct rcu_head rhead;
struct drm_hash_item hash;
enum ttm_object_type object_type;
bool shareable;
struct ttm_object_file *tfile;
struct kref refcount;
void (*refcount_release) (struct ttm_base_object **base);
void (*ref_obj_release) (struct ttm_base_object *base,
enum ttm_ref_type ref_type);
u32 handle;
enum ttm_object_type object_type;
u32 shareable;
};
......@@ -350,4 +350,11 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead)
/*
* Extra memory required by the base object's idr storage, which is allocated
* separately from the base object itself. We estimate an on-average 128 bytes
* per idr.
*/
#define TTM_OBJ_EXTRA_SIZE 128
#endif
......@@ -441,7 +441,8 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_buffer_object));
user_struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_user_buffer_object));
ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
TTM_OBJ_EXTRA_SIZE;
}
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
......@@ -631,7 +632,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
*handle = user_bo->prime.base.hash.key;
*handle = user_bo->prime.base.handle;
out_no_base_object:
return ret;
......@@ -940,7 +941,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,
user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
*handle = user_bo->prime.base.hash.key;
*handle = user_bo->prime.base.handle;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL, false);
}
......
......@@ -755,14 +755,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
return -EINVAL;
}
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of contexts anyway.
*/
if (unlikely(vmw_user_context_size == 0))
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
......@@ -807,7 +803,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
goto out_err;
}
arg->cid = ctx->base.hash.key;
arg->cid = ctx->base.handle;
out_err:
vmw_resource_unreference(&res);
out_unlock:
......
......@@ -306,7 +306,8 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
TTM_OBJ_EXTRA_SIZE;
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
......@@ -650,7 +651,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
}
*p_fence = &ufence->fence;
*p_handle = ufence->base.hash.key;
*p_handle = ufence->base.handle;
return 0;
out_err:
......@@ -1137,7 +1138,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
"object.\n");
goto out_no_ref_obj;
}
handle = base->hash.key;
handle = base->handle;
}
ttm_base_object_unref(&base);
}
......
......@@ -30,6 +30,11 @@
#include "vmwgfx_drv.h"
/*
* Extra memory required by the resource id's ida storage, which is allocated
* separately from the base object itself. We estimate an on-average 128 bytes
* per ida.
*/
#define VMW_IDA_ACC_SIZE 128
enum vmw_cmdbuf_res_state {
......
......@@ -740,13 +740,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
};
int ret;
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size =
ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
ttm_round_pot(sizeof(struct vmw_user_shader)) +
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
......@@ -792,7 +789,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
}
if (handle)
*handle = ushader->base.hash.key;
*handle = ushader->base.handle;
out_err:
vmw_resource_unreference(&res);
out:
......@@ -814,13 +811,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
};
int ret;
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_shader_size == 0))
vmw_shader_size =
ttm_round_pot(sizeof(struct vmw_shader)) + 128;
ttm_round_pot(sizeof(struct vmw_shader)) +
VMW_IDA_ACC_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_shader_size,
......
......@@ -159,7 +159,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size;
account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE;
account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (ret)
......@@ -208,7 +209,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
goto out_err;
}
func->set_arg_handle(data, usimple->base.hash.key);
func->set_arg_handle(data, usimple->base.handle);
out_err:
vmw_resource_unreference(&res);
out_ret:
......
......@@ -731,7 +731,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
......@@ -744,7 +744,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
size = vmw_user_surface_size +
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
......@@ -886,7 +886,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
rep->sid = user_srf->prime.base.hash.key;
rep->sid = user_srf->prime.base.handle;
vmw_resource_unreference(&res);
ttm_read_unlock(&dev_priv->reservation_sem);
......@@ -1024,7 +1024,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT;
}
......@@ -1609,9 +1609,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
size = vmw_user_surface_size + 128;
size = vmw_user_surface_size;
/* Define a surface based on the parameters. */
ret = vmw_surface_gb_priv_define(dev,
......@@ -1683,7 +1683,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
goto out_unlock;
}
rep->handle = user_srf->prime.base.hash.key;
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
......@@ -1745,7 +1745,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a GB surface "
"backup buffer.\n");
(void) ttm_ref_object_base_unref(tfile, base->hash.key,
(void) ttm_ref_object_base_unref(tfile, base->handle,
TTM_REF_USAGE);
goto out_bad_resource;
}
......@@ -1759,7 +1759,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->creq.base.array_size = srf->array_size;
rep->creq.base.buffer_handle = backup_handle;
rep->creq.base.base_size = srf->base_size;
rep->crep.handle = user_srf->prime.base.hash.key;
rep->crep.handle = user_srf->prime.base.handle;
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment