Commit c32fc9c8 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'vmwgfx-next-2014-03-28' of git://people.freedesktop.org/~thomash/linux into drm-next

vmwgfx render-node support and drm + ttm changes it depends upon.

Pull request of 2014-03-28

* tag 'vmwgfx-next-2014-03-28' of git://people.freedesktop.org/~thomash/linux:
  drm/vmwgfx: Bump driver minor and date
  drm/vmwgfx: Enable render nodes
  drm/vmwgfx: Tighten the security around buffer maps
  drm/ttm: Add a ttm_ref_object_exists function
  drm/vmwgfx: Tighten security around surface sharing v2
  drm/vmwgfx: Allow prime fds in the surface reference ioctls
  drm/vmwgfx: Drop authentication requirement on UNREF ioctls
  drm/vmwgfx: Reinstate and tighten security around legacy master model
  drm/vmwgfx: Use a per-device semaphore for reservation protection
  drm: Add a function to get the ioctl flags
  drm: Protect the master management with a drm_device::master_mutex v3
  drm: Remove the minor master list
  drm: Improve on minor type helpers v3
  drm: Make control nodes master-less v3
  drm: Break out ioctl permission check to a separate function v2
  drm: Have the crtc code only reference master from legacy nodes v2
parents 60f2b4af 03c5b8f0
...@@ -1492,9 +1492,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data, ...@@ -1492,9 +1492,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
mutex_unlock(&file_priv->fbs_lock); mutex_unlock(&file_priv->fbs_lock);
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
mode_group = &file_priv->master->minor->mode_group; if (!drm_is_primary_client(file_priv)) {
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
mode_group = NULL;
list_for_each(lh, &dev->mode_config.crtc_list) list_for_each(lh, &dev->mode_config.crtc_list)
crtc_count++; crtc_count++;
...@@ -1505,6 +1505,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, ...@@ -1505,6 +1505,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
encoder_count++; encoder_count++;
} else { } else {
mode_group = &file_priv->master->minor->mode_group;
crtc_count = mode_group->num_crtcs; crtc_count = mode_group->num_crtcs;
connector_count = mode_group->num_connectors; connector_count = mode_group->num_connectors;
encoder_count = mode_group->num_encoders; encoder_count = mode_group->num_encoders;
...@@ -1519,7 +1520,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, ...@@ -1519,7 +1520,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_crtcs >= crtc_count) { if (card_res->count_crtcs >= crtc_count) {
copied = 0; copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { if (!mode_group) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, list_for_each_entry(crtc, &dev->mode_config.crtc_list,
head) { head) {
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
...@@ -1546,7 +1547,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, ...@@ -1546,7 +1547,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_encoders >= encoder_count) { if (card_res->count_encoders >= encoder_count) {
copied = 0; copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { if (!mode_group) {
list_for_each_entry(encoder, list_for_each_entry(encoder,
&dev->mode_config.encoder_list, &dev->mode_config.encoder_list,
head) { head) {
...@@ -1577,7 +1578,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, ...@@ -1577,7 +1578,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_connectors >= connector_count) { if (card_res->count_connectors >= connector_count) {
copied = 0; copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { if (!mode_group) {
list_for_each_entry(connector, list_for_each_entry(connector,
&dev->mode_config.connector_list, &dev->mode_config.connector_list,
head) { head) {
...@@ -2846,7 +2847,8 @@ int drm_mode_getfb(struct drm_device *dev, ...@@ -2846,7 +2847,8 @@ int drm_mode_getfb(struct drm_device *dev,
r->bpp = fb->bits_per_pixel; r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0]; r->pitch = fb->pitches[0];
if (fb->funcs->create_handle) { if (fb->funcs->create_handle) {
if (file_priv->is_master || capable(CAP_SYS_ADMIN)) { if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
drm_is_control_client(file_priv)) {
ret = fb->funcs->create_handle(fb, file_priv, ret = fb->funcs->create_handle(fb, file_priv,
&r->handle); &r->handle);
} else { } else {
......
...@@ -285,6 +285,45 @@ static int drm_version(struct drm_device *dev, void *data, ...@@ -285,6 +285,45 @@ static int drm_version(struct drm_device *dev, void *data,
return err; return err;
} }
/**
* drm_ioctl_permit - Check ioctl permissions against caller
*
* @flags: ioctl permission flags.
* @file_priv: Pointer to struct drm_file identifying the caller.
*
* Checks whether the caller is allowed to run an ioctl with the
* indicated permissions. If so, returns zero. Otherwise returns an
* error code suitable for ioctl return.
*/
static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
return -EACCES;
/* AUTH is only for authenticated or render client */
if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
!file_priv->authenticated))
return -EACCES;
/* MASTER is only for master or control clients */
if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
!drm_is_control_client(file_priv)))
return -EACCES;
/* Control clients must be explicitly allowed */
if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
drm_is_control_client(file_priv)))
return -EACCES;
/* Render clients must be explicitly allowed */
if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
drm_is_render_client(file_priv)))
return -EACCES;
return 0;
}
/** /**
* Called whenever a process performs an ioctl on /dev/drm. * Called whenever a process performs an ioctl on /dev/drm.
* *
...@@ -350,52 +389,51 @@ long drm_ioctl(struct file *filp, ...@@ -350,52 +389,51 @@ long drm_ioctl(struct file *filp,
/* Do not trust userspace, use our own definition */ /* Do not trust userspace, use our own definition */
func = ioctl->func; func = ioctl->func;
if (!func) { if (unlikely(!func)) {
DRM_DEBUG("no function\n"); DRM_DEBUG("no function\n");
retcode = -EINVAL; retcode = -EINVAL;
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || goto err_i1;
((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) || }
((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
(!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
(!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
kdata = kmalloc(asize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
}
}
if (asize > usize)
memset(kdata + usize, 0, asize - usize);
}
if (cmd & IOC_IN) { retcode = drm_ioctl_permit(ioctl->flags, file_priv);
if (copy_from_user(kdata, (void __user *)arg, if (unlikely(retcode))
usize) != 0) { goto err_i1;
retcode = -EFAULT;
if (cmd & (IOC_IN | IOC_OUT)) {
if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
kdata = kmalloc(asize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1; goto err_i1;
} }
} else
memset(kdata, 0, usize);
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
else {
mutex_lock(&drm_global_mutex);
retcode = func(dev, kdata, file_priv);
mutex_unlock(&drm_global_mutex);
} }
if (asize > usize)
memset(kdata + usize, 0, asize - usize);
}
if (cmd & IOC_OUT) { if (cmd & IOC_IN) {
if (copy_to_user((void __user *)arg, kdata, if (copy_from_user(kdata, (void __user *)arg,
usize) != 0) usize) != 0) {
retcode = -EFAULT; retcode = -EFAULT;
goto err_i1;
} }
} else
memset(kdata, 0, usize);
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
else {
mutex_lock(&drm_global_mutex);
retcode = func(dev, kdata, file_priv);
mutex_unlock(&drm_global_mutex);
}
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
usize) != 0)
retcode = -EFAULT;
} }
err_i1: err_i1:
...@@ -412,3 +450,21 @@ long drm_ioctl(struct file *filp, ...@@ -412,3 +450,21 @@ long drm_ioctl(struct file *filp,
return retcode; return retcode;
} }
EXPORT_SYMBOL(drm_ioctl); EXPORT_SYMBOL(drm_ioctl);
/**
* drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
*
* @nr: Ioctl number.
* @flags: Where to return the ioctl permission flags
*/
bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{
if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
(nr < DRM_COMMAND_BASE)) {
*flags = drm_ioctls[nr].flags;
return true;
}
return false;
}
EXPORT_SYMBOL(drm_ioctl_flags);
...@@ -231,12 +231,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp, ...@@ -231,12 +231,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
/* if there is no current master make this fd it, but do not create /* if there is no current master make this fd it, but do not create
* any master object for render clients */ * any master object for render clients */
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->master_mutex);
if (!priv->minor->master && !drm_is_render_client(priv)) { if (drm_is_primary_client(priv) && !priv->minor->master) {
/* create a new master */ /* create a new master */
priv->minor->master = drm_master_create(priv->minor); priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) { if (!priv->minor->master) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM; ret = -ENOMEM;
goto out_close; goto out_close;
} }
...@@ -244,37 +243,31 @@ static int drm_open_helper(struct inode *inode, struct file *filp, ...@@ -244,37 +243,31 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->is_master = 1; priv->is_master = 1;
/* take another reference for the copy in the local file priv */ /* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master); priv->master = drm_master_get(priv->minor->master);
priv->authenticated = 1; priv->authenticated = 1;
mutex_unlock(&dev->struct_mutex);
if (dev->driver->master_create) { if (dev->driver->master_create) {
ret = dev->driver->master_create(dev, priv->master); ret = dev->driver->master_create(dev, priv->master);
if (ret) { if (ret) {
mutex_lock(&dev->struct_mutex);
/* drop both references if this fails */ /* drop both references if this fails */
drm_master_put(&priv->minor->master); drm_master_put(&priv->minor->master);
drm_master_put(&priv->master); drm_master_put(&priv->master);
mutex_unlock(&dev->struct_mutex);
goto out_close; goto out_close;
} }
} }
mutex_lock(&dev->struct_mutex);
if (dev->driver->master_set) { if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, priv, true); ret = dev->driver->master_set(dev, priv, true);
if (ret) { if (ret) {
/* drop both references if this fails */ /* drop both references if this fails */
drm_master_put(&priv->minor->master); drm_master_put(&priv->minor->master);
drm_master_put(&priv->master); drm_master_put(&priv->master);
mutex_unlock(&dev->struct_mutex);
goto out_close; goto out_close;
} }
} }
} else if (!drm_is_render_client(priv)) { } else if (drm_is_primary_client(priv)) {
/* get a reference to the master */ /* get a reference to the master */
priv->master = drm_master_get(priv->minor->master); priv->master = drm_master_get(priv->minor->master);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->master_mutex);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
list_add(&priv->lhead, &dev->filelist); list_add(&priv->lhead, &dev->filelist);
...@@ -302,6 +295,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, ...@@ -302,6 +295,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return 0; return 0;
out_close: out_close:
mutex_unlock(&dev->master_mutex);
if (dev->driver->postclose) if (dev->driver->postclose)
dev->driver->postclose(dev, priv); dev->driver->postclose(dev, priv);
out_prime_destroy: out_prime_destroy:
...@@ -489,11 +483,13 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -489,11 +483,13 @@ int drm_release(struct inode *inode, struct file *filp)
} }
mutex_unlock(&dev->ctxlist_mutex); mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->master_mutex);
if (file_priv->is_master) { if (file_priv->is_master) {
struct drm_master *master = file_priv->master; struct drm_master *master = file_priv->master;
struct drm_file *temp; struct drm_file *temp;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(temp, &dev->filelist, lhead) { list_for_each_entry(temp, &dev->filelist, lhead) {
if ((temp->master == file_priv->master) && if ((temp->master == file_priv->master) &&
(temp != file_priv)) (temp != file_priv))
...@@ -512,6 +508,7 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -512,6 +508,7 @@ int drm_release(struct inode *inode, struct file *filp)
master->lock.file_priv = NULL; master->lock.file_priv = NULL;
wake_up_interruptible_all(&master->lock.lock_queue); wake_up_interruptible_all(&master->lock.lock_queue);
} }
mutex_unlock(&dev->struct_mutex);
if (file_priv->minor->master == file_priv->master) { if (file_priv->minor->master == file_priv->master) {
/* drop the reference held my the minor */ /* drop the reference held my the minor */
...@@ -521,10 +518,13 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -521,10 +518,13 @@ int drm_release(struct inode *inode, struct file *filp)
} }
} }
/* drop the reference held my the file priv */ /* drop the master reference held by the file priv */
if (file_priv->master) if (file_priv->master)
drm_master_put(&file_priv->master); drm_master_put(&file_priv->master);
file_priv->is_master = 0; file_priv->is_master = 0;
mutex_unlock(&dev->master_mutex);
mutex_lock(&dev->struct_mutex);
list_del(&file_priv->lhead); list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -127,8 +127,6 @@ struct drm_master *drm_master_create(struct drm_minor *minor) ...@@ -127,8 +127,6 @@ struct drm_master *drm_master_create(struct drm_minor *minor)
INIT_LIST_HEAD(&master->magicfree); INIT_LIST_HEAD(&master->magicfree);
master->minor = minor; master->minor = minor;
list_add_tail(&master->head, &minor->master_list);
return master; return master;
} }
...@@ -146,8 +144,7 @@ static void drm_master_destroy(struct kref *kref) ...@@ -146,8 +144,7 @@ static void drm_master_destroy(struct kref *kref)
struct drm_device *dev = master->minor->dev; struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp; struct drm_map_list *r_list, *list_temp;
list_del(&master->head); mutex_lock(&dev->struct_mutex);
if (dev->driver->master_destroy) if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master); dev->driver->master_destroy(dev, master);
...@@ -175,6 +172,7 @@ static void drm_master_destroy(struct kref *kref) ...@@ -175,6 +172,7 @@ static void drm_master_destroy(struct kref *kref)
drm_ht_remove(&master->magiclist); drm_ht_remove(&master->magiclist);
mutex_unlock(&dev->struct_mutex);
kfree(master); kfree(master);
} }
...@@ -190,19 +188,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, ...@@ -190,19 +188,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
{ {
int ret = 0; int ret = 0;
mutex_lock(&dev->master_mutex);
if (file_priv->is_master) if (file_priv->is_master)
return 0; goto out_unlock;
if (file_priv->minor->master && file_priv->minor->master != file_priv->master) if (file_priv->minor->master) {
return -EINVAL; ret = -EINVAL;
goto out_unlock;
if (!file_priv->master) }
return -EINVAL;
if (file_priv->minor->master) if (!file_priv->master) {
return -EINVAL; ret = -EINVAL;
goto out_unlock;
}
mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master); file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1; file_priv->is_master = 1;
if (dev->driver->master_set) { if (dev->driver->master_set) {
...@@ -212,27 +211,33 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, ...@@ -212,27 +211,33 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
drm_master_put(&file_priv->minor->master); drm_master_put(&file_priv->minor->master);
} }
} }
mutex_unlock(&dev->struct_mutex);
out_unlock:
mutex_unlock(&dev->master_mutex);
return ret; return ret;
} }
int drm_dropmaster_ioctl(struct drm_device *dev, void *data, int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
int ret = -EINVAL;
mutex_lock(&dev->master_mutex);
if (!file_priv->is_master) if (!file_priv->is_master)
return -EINVAL; goto out_unlock;
if (!file_priv->minor->master) if (!file_priv->minor->master)
return -EINVAL; goto out_unlock;
mutex_lock(&dev->struct_mutex); ret = 0;
if (dev->driver->master_drop) if (dev->driver->master_drop)
dev->driver->master_drop(dev, file_priv, false); dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master); drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0; file_priv->is_master = 0;
mutex_unlock(&dev->struct_mutex);
return 0; out_unlock:
mutex_unlock(&dev->master_mutex);
return ret;
} }
/* /*
...@@ -273,7 +278,6 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type) ...@@ -273,7 +278,6 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
minor->type = type; minor->type = type;
minor->dev = dev; minor->dev = dev;
INIT_LIST_HEAD(&minor->master_list);
*drm_minor_get_slot(dev, type) = minor; *drm_minor_get_slot(dev, type) = minor;
return 0; return 0;
...@@ -564,6 +568,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, ...@@ -564,6 +568,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
spin_lock_init(&dev->event_lock); spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex); mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex); mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->master_mutex);
dev->anon_inode = drm_fs_inode_new(); dev->anon_inode = drm_fs_inode_new();
if (IS_ERR(dev->anon_inode)) { if (IS_ERR(dev->anon_inode)) {
...@@ -617,6 +622,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, ...@@ -617,6 +622,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
drm_minor_free(dev, DRM_MINOR_CONTROL); drm_minor_free(dev, DRM_MINOR_CONTROL);
drm_fs_inode_free(dev->anon_inode); drm_fs_inode_free(dev->anon_inode);
err_free: err_free:
mutex_destroy(&dev->master_mutex);
kfree(dev); kfree(dev);
return NULL; return NULL;
} }
...@@ -638,6 +644,8 @@ static void drm_dev_release(struct kref *ref) ...@@ -638,6 +644,8 @@ static void drm_dev_release(struct kref *ref)
drm_minor_free(dev, DRM_MINOR_CONTROL); drm_minor_free(dev, DRM_MINOR_CONTROL);
kfree(dev->devname); kfree(dev->devname);
mutex_destroy(&dev->master_mutex);
kfree(dev); kfree(dev);
} }
......
...@@ -270,6 +270,52 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) ...@@ -270,6 +270,52 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
} }
EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
/**
* ttm_ref_object_exists - Check whether a caller has a valid ref object
* (has opened) a base object.
*
* @tfile: Pointer to a struct ttm_object_file identifying the caller.
* @base: Pointer to a struct base object.
*
* Checks wether the caller identified by @tfile has put a valid USAGE
* reference object on the base object identified by @base.
*/
bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base)
{
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
struct drm_hash_item *hash;
struct ttm_ref_object *ref;
rcu_read_lock();
if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
goto out_false;
/*
* Verify that the ref object is really pointing to our base object.
* Our base object could actually be dead, and the ref object pointing
* to another base object with the same handle.
*/
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
if (unlikely(base != ref->obj))
goto out_false;
/*
* Verify that the ref->obj pointer was actually valid!
*/
rmb();
if (unlikely(atomic_read(&ref->kref.refcount) == 0))
goto out_false;
rcu_read_unlock();
return true;
out_false:
rcu_read_unlock();
return false;
}
EXPORT_SYMBOL(ttm_ref_object_exists);
int ttm_ref_object_add(struct ttm_object_file *tfile, int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base, struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed) enum ttm_ref_type ref_type, bool *existed)
......
...@@ -462,7 +462,6 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, ...@@ -462,7 +462,6 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *tmp; struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret; int ret;
...@@ -474,7 +473,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, ...@@ -474,7 +473,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_context_size == 0)) if (unlikely(vmw_user_context_size == 0))
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -521,7 +520,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, ...@@ -521,7 +520,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
out_err: out_err:
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
out_unlock: out_unlock:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
......
...@@ -52,11 +52,10 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, ...@@ -52,11 +52,10 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible) bool interruptible)
{ {
struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
int ret; int ret;
ret = ttm_write_lock(&vmaster->lock, interruptible); ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -71,7 +70,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, ...@@ -71,7 +70,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
err: err:
ttm_write_unlock(&vmaster->lock); ttm_write_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
...@@ -95,12 +94,11 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, ...@@ -95,12 +94,11 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_dma_buffer *buf,
bool pin, bool interruptible) bool pin, bool interruptible)
{ {
struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement *placement; struct ttm_placement *placement;
int ret; int ret;
ret = ttm_write_lock(&vmaster->lock, interruptible); ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -143,7 +141,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, ...@@ -143,7 +141,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
err_unreserve: err_unreserve:
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
err: err:
ttm_write_unlock(&vmaster->lock); ttm_write_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
...@@ -198,7 +196,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, ...@@ -198,7 +196,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_dma_buffer *buf,
bool pin, bool interruptible) bool pin, bool interruptible)
{ {
struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement; struct ttm_placement placement;
int ret = 0; int ret = 0;
...@@ -209,7 +206,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, ...@@ -209,7 +206,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
placement = vmw_vram_placement; placement = vmw_vram_placement;
placement.lpfn = bo->num_pages; placement.lpfn = bo->num_pages;
ret = ttm_write_lock(&vmaster->lock, interruptible); ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -232,7 +229,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, ...@@ -232,7 +229,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
err_unlock: err_unlock:
ttm_write_unlock(&vmaster->lock); ttm_write_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
......
...@@ -142,11 +142,11 @@ ...@@ -142,11 +142,11 @@
static const struct drm_ioctl_desc vmw_ioctls[] = { static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl, vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
...@@ -159,29 +159,28 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { ...@@ -159,29 +159,28 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
vmw_fence_obj_signaled_ioctl, vmw_fence_obj_signaled_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_EVENT, VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
vmw_fence_event_ioctl, DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
/* these allow direct access to the framebuffers mark as master only */ /* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
...@@ -194,19 +193,19 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { ...@@ -194,19 +193,19 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_MASTER | DRM_UNLOCKED), DRM_MASTER | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CREATE_SHADER, VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl, vmw_shader_define_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SHADER, VMW_IOCTL_DEF(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl, vmw_shader_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl, vmw_gb_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl, vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU, VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl, vmw_user_dmabuf_synccpu_ioctl,
DRM_AUTH | DRM_UNLOCKED), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
}; };
static struct pci_device_id vmw_pci_id_list[] = { static struct pci_device_id vmw_pci_id_list[] = {
...@@ -606,6 +605,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -606,6 +605,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex); mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock); rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
for (i = vmw_res_context; i < vmw_res_max; ++i) { for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]); idr_init(&dev_priv->res_idr[i]);
...@@ -981,12 +981,70 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) ...@@ -981,12 +981,70 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
return ret; return ret;
} }
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, static struct vmw_master *vmw_master_check(struct drm_device *dev,
unsigned long arg) struct drm_file *file_priv,
unsigned int flags)
{
int ret;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *vmaster;
if (file_priv->minor->type != DRM_MINOR_LEGACY ||
!(flags & DRM_AUTH))
return NULL;
ret = mutex_lock_interruptible(&dev->master_mutex);
if (unlikely(ret != 0))
return ERR_PTR(-ERESTARTSYS);
if (file_priv->is_master) {
mutex_unlock(&dev->master_mutex);
return NULL;
}
/*
* Check if we were previously master, but now dropped.
*/
if (vmw_fp->locked_master) {
mutex_unlock(&dev->master_mutex);
DRM_ERROR("Dropped master trying to access ioctl that "
"requires authentication.\n");
return ERR_PTR(-EACCES);
}
mutex_unlock(&dev->master_mutex);
/*
* Taking the drm_global_mutex after the TTM lock might deadlock
*/
if (!(flags & DRM_UNLOCKED)) {
DRM_ERROR("Refusing locked ioctl access.\n");
return ERR_PTR(-EDEADLK);
}
/*
* Take the TTM lock. Possibly sleep waiting for the authenticating
* master to become master again, or for a SIGTERM if the
* authenticating master exits.
*/
vmaster = vmw_master(file_priv->master);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
vmaster = ERR_PTR(ret);
return vmaster;
}
static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg,
long (*ioctl_func)(struct file *, unsigned int,
unsigned long))
{ {
struct drm_file *file_priv = filp->private_data; struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev; struct drm_device *dev = file_priv->minor->dev;
unsigned int nr = DRM_IOCTL_NR(cmd); unsigned int nr = DRM_IOCTL_NR(cmd);
struct vmw_master *vmaster;
unsigned int flags;
long ret;
/* /*
* Do extra checking on driver private ioctls. * Do extra checking on driver private ioctls.
...@@ -995,18 +1053,44 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -995,18 +1053,44 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
const struct drm_ioctl_desc *ioctl = const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE]; &vmw_ioctls[nr - DRM_COMMAND_BASE];
if (unlikely(ioctl->cmd_drv != cmd)) { if (unlikely(ioctl->cmd_drv != cmd)) {
DRM_ERROR("Invalid command format, ioctl %d\n", DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE); nr - DRM_COMMAND_BASE);
return -EINVAL; return -EINVAL;
} }
flags = ioctl->flags;
} else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL;
vmaster = vmw_master_check(dev, file_priv, flags);
if (unlikely(IS_ERR(vmaster))) {
DRM_INFO("IOCTL ERROR %d\n", nr);
return PTR_ERR(vmaster);
} }
return drm_ioctl(filp, cmd, arg); ret = ioctl_func(filp, cmd, arg);
if (vmaster)
ttm_read_unlock(&vmaster->lock);
return ret;
}
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
} }
#ifdef CONFIG_COMPAT
static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
}
#endif
static void vmw_lastclose(struct drm_device *dev) static void vmw_lastclose(struct drm_device *dev)
{ {
struct drm_crtc *crtc; struct drm_crtc *crtc;
...@@ -1175,12 +1259,11 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, ...@@ -1175,12 +1259,11 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
{ {
struct vmw_private *dev_priv = struct vmw_private *dev_priv =
container_of(nb, struct vmw_private, pm_nb); container_of(nb, struct vmw_private, pm_nb);
struct vmw_master *vmaster = dev_priv->active_master;
switch (val) { switch (val) {
case PM_HIBERNATION_PREPARE: case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE: case PM_SUSPEND_PREPARE:
ttm_suspend_lock(&vmaster->lock); ttm_suspend_lock(&dev_priv->reservation_sem);
/** /**
* This empties VRAM and unbinds all GMR bindings. * This empties VRAM and unbinds all GMR bindings.
...@@ -1194,7 +1277,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, ...@@ -1194,7 +1277,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
case PM_POST_HIBERNATION: case PM_POST_HIBERNATION:
case PM_POST_SUSPEND: case PM_POST_SUSPEND:
case PM_POST_RESTORE: case PM_POST_RESTORE:
ttm_suspend_unlock(&vmaster->lock); ttm_suspend_unlock(&dev_priv->reservation_sem);
break; break;
case PM_RESTORE_PREPARE: case PM_RESTORE_PREPARE:
...@@ -1315,14 +1398,14 @@ static const struct file_operations vmwgfx_driver_fops = { ...@@ -1315,14 +1398,14 @@ static const struct file_operations vmwgfx_driver_fops = {
.poll = vmw_fops_poll, .poll = vmw_fops_poll,
.read = vmw_fops_read, .read = vmw_fops_read,
#if defined(CONFIG_COMPAT) #if defined(CONFIG_COMPAT)
.compat_ioctl = drm_compat_ioctl, .compat_ioctl = vmw_compat_ioctl,
#endif #endif
.llseek = noop_llseek, .llseek = noop_llseek,
}; };
static struct drm_driver driver = { static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET | DRIVER_PRIME, DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
.load = vmw_driver_load, .load = vmw_driver_load,
.unload = vmw_driver_unload, .unload = vmw_driver_unload,
.lastclose = vmw_lastclose, .lastclose = vmw_lastclose,
......
...@@ -40,9 +40,9 @@ ...@@ -40,9 +40,9 @@
#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h" #include "vmwgfx_fence.h"
#define VMWGFX_DRIVER_DATE "20140228" #define VMWGFX_DRIVER_DATE "20140325"
#define VMWGFX_DRIVER_MAJOR 2 #define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 5 #define VMWGFX_DRIVER_MINOR 6
#define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
...@@ -486,6 +486,11 @@ struct vmw_private { ...@@ -486,6 +486,11 @@ struct vmw_private {
struct mutex release_mutex; struct mutex release_mutex;
uint32_t num_3d_resources; uint32_t num_3d_resources;
/*
* Replace this with an rwsem as soon as we have down_xx_interruptible()
*/
struct ttm_lock reservation_sem;
/* /*
* Query processing. These members * Query processing. These members
* are protected by the cmdbuf mutex. * are protected by the cmdbuf mutex.
......
...@@ -2712,7 +2712,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -2712,7 +2712,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret; int ret;
/* /*
...@@ -2729,7 +2728,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -2729,7 +2728,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -2745,6 +2744,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -2745,6 +2744,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
vmw_kms_cursor_post_execbuf(dev_priv); vmw_kms_cursor_post_execbuf(dev_priv);
out_unlock: out_unlock:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
...@@ -377,14 +377,13 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, ...@@ -377,14 +377,13 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* interuptable? */ (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
if (unlikely(ret != 0))
return ret;
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
if (!vmw_bo) if (!vmw_bo) {
ret = -ENOMEM;
goto err_unlock; goto err_unlock;
}
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
&ne_placement, &ne_placement,
......
...@@ -226,7 +226,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, ...@@ -226,7 +226,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_present_arg *arg = struct drm_vmw_present_arg *arg =
(struct drm_vmw_present_arg *)data; (struct drm_vmw_present_arg *)data;
struct vmw_surface *surface; struct vmw_surface *surface;
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL; struct drm_vmw_rect *clips = NULL;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
...@@ -271,7 +270,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, ...@@ -271,7 +270,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
} }
vfb = vmw_framebuffer_to_vfb(fb); vfb = vmw_framebuffer_to_vfb(fb);
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_ttm_lock; goto out_no_ttm_lock;
...@@ -291,7 +290,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, ...@@ -291,7 +290,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
vmw_surface_unreference(&surface); vmw_surface_unreference(&surface);
out_no_surface: out_no_surface:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock: out_no_ttm_lock:
drm_framebuffer_unreference(fb); drm_framebuffer_unreference(fb);
out_no_fb: out_no_fb:
...@@ -311,7 +310,6 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, ...@@ -311,7 +310,6 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_fence_rep __user *user_fence_rep = struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *) (struct drm_vmw_fence_rep __user *)
(unsigned long)arg->fence_rep; (unsigned long)arg->fence_rep;
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL; struct drm_vmw_rect *clips = NULL;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
...@@ -361,7 +359,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, ...@@ -361,7 +359,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_no_ttm_lock; goto out_no_ttm_lock;
} }
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_ttm_lock; goto out_no_ttm_lock;
...@@ -369,7 +367,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, ...@@ -369,7 +367,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
vfb, user_fence_rep, vfb, user_fence_rep,
clips, num_clips); clips, num_clips);
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock: out_no_ttm_lock:
drm_framebuffer_unreference(fb); drm_framebuffer_unreference(fb);
out_no_fb: out_no_fb:
......
...@@ -596,7 +596,6 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, ...@@ -596,7 +596,6 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
unsigned num_clips) unsigned num_clips)
{ {
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs = struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer); vmw_framebuffer_to_vfbs(framebuffer);
struct drm_clip_rect norect; struct drm_clip_rect norect;
...@@ -611,7 +610,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, ...@@ -611,7 +610,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
drm_modeset_lock_all(dev_priv->dev); drm_modeset_lock_all(dev_priv->dev);
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev); drm_modeset_unlock_all(dev_priv->dev);
return ret; return ret;
...@@ -632,7 +631,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, ...@@ -632,7 +631,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
flags, color, flags, color,
clips, num_clips, inc, NULL); clips, num_clips, inc, NULL);
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev); drm_modeset_unlock_all(dev_priv->dev);
...@@ -954,7 +953,6 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, ...@@ -954,7 +953,6 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
unsigned num_clips) unsigned num_clips)
{ {
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_dmabuf *vfbd = struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer); vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect; struct drm_clip_rect norect;
...@@ -962,7 +960,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, ...@@ -962,7 +960,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
drm_modeset_lock_all(dev_priv->dev); drm_modeset_lock_all(dev_priv->dev);
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev); drm_modeset_unlock_all(dev_priv->dev);
return ret; return ret;
...@@ -989,7 +987,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, ...@@ -989,7 +987,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
clips, num_clips, increment, NULL); clips, num_clips, increment, NULL);
} }
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev); drm_modeset_unlock_all(dev_priv->dev);
...@@ -2022,7 +2020,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -2022,7 +2020,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_update_layout_arg *arg = struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data; (struct drm_vmw_update_layout_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
void __user *user_rects; void __user *user_rects;
struct drm_vmw_rect *rects; struct drm_vmw_rect *rects;
unsigned rects_size; unsigned rects_size;
...@@ -2030,7 +2027,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -2030,7 +2027,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
int i; int i;
struct drm_mode_config *mode_config = &dev->mode_config; struct drm_mode_config *mode_config = &dev->mode_config;
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -2072,6 +2069,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -2072,6 +2069,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
out_free: out_free:
kfree(rects); kfree(rects);
out_unlock: out_unlock:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
...@@ -538,8 +538,13 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, ...@@ -538,8 +538,13 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
return -EPERM; return -EPERM;
vmw_user_bo = vmw_user_dma_buffer(bo); vmw_user_bo = vmw_user_dma_buffer(bo);
return (vmw_user_bo->prime.base.tfile == tfile ||
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; /* Check that the caller has opened the object. */
if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
return 0;
DRM_ERROR("Could not grant buffer access.\n");
return -EPERM;
} }
/** /**
...@@ -676,10 +681,9 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, ...@@ -676,10 +681,9 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_dmabuf_rep *rep = &arg->rep; struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_dma_buffer *dma_buf; struct vmw_dma_buffer *dma_buf;
uint32_t handle; uint32_t handle;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret; int ret;
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -696,7 +700,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, ...@@ -696,7 +700,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
vmw_dmabuf_unreference(&dma_buf); vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf: out_no_dmabuf:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
...@@ -873,7 +877,6 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, ...@@ -873,7 +877,6 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *tmp; struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret; int ret;
/* /*
...@@ -884,7 +887,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, ...@@ -884,7 +887,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_stream_size == 0)) if (unlikely(vmw_user_stream_size == 0))
vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -932,7 +935,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, ...@@ -932,7 +935,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
out_err: out_err:
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
out_unlock: out_unlock:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
...@@ -985,14 +988,13 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -985,14 +988,13 @@ int vmw_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args) struct drm_mode_create_dumb *args)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_dma_buffer *dma_buf; struct vmw_dma_buffer *dma_buf;
int ret; int ret;
args->pitch = args->width * ((args->bpp + 7) / 8); args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height; args->size = args->pitch * args->height;
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1004,7 +1006,7 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -1004,7 +1006,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
vmw_dmabuf_unreference(&dma_buf); vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf: out_no_dmabuf:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
......
...@@ -449,7 +449,6 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, ...@@ -449,7 +449,6 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_shader_create_arg *arg = struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data; (struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_dma_buffer *buffer = NULL; struct vmw_dma_buffer *buffer = NULL;
SVGA3dShaderType shader_type; SVGA3dShaderType shader_type;
int ret; int ret;
...@@ -487,14 +486,14 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, ...@@ -487,14 +486,14 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
goto out_bad_arg; goto out_bad_arg;
} }
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_bad_arg; goto out_bad_arg;
ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
shader_type, tfile, &arg->shader_handle); shader_type, tfile, &arg->shader_handle);
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg: out_bad_arg:
vmw_dmabuf_unreference(&buffer); vmw_dmabuf_unreference(&buffer);
return ret; return ret;
......
...@@ -36,11 +36,13 @@ ...@@ -36,11 +36,13 @@
* @base: The TTM base object handling user-space visibility. * @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata. * @srf: The surface metadata.
* @size: TTM accounting size for the surface. * @size: TTM accounting size for the surface.
* @master: master of the creating client. Used for security check.
*/ */
struct vmw_user_surface { struct vmw_user_surface {
struct ttm_prime_object prime; struct ttm_prime_object prime;
struct vmw_surface srf; struct vmw_surface srf;
uint32_t size; uint32_t size;
struct drm_master *master;
}; };
/** /**
...@@ -624,6 +626,8 @@ static void vmw_user_surface_free(struct vmw_resource *res) ...@@ -624,6 +626,8 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_private *dev_priv = srf->res.dev_priv; struct vmw_private *dev_priv = srf->res.dev_priv;
uint32_t size = user_srf->size; uint32_t size = user_srf->size;
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets); kfree(srf->offsets);
kfree(srf->sizes); kfree(srf->sizes);
kfree(srf->snooper.image); kfree(srf->snooper.image);
...@@ -697,7 +701,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -697,7 +701,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct vmw_surface_offset *cur_offset; struct vmw_surface_offset *cur_offset;
uint32_t num_sizes; uint32_t num_sizes;
uint32_t size; uint32_t size;
struct vmw_master *vmaster = vmw_master(file_priv->master);
const struct svga3d_surface_desc *desc; const struct svga3d_surface_desc *desc;
if (unlikely(vmw_user_surface_size == 0)) if (unlikely(vmw_user_surface_size == 0))
...@@ -723,7 +726,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -723,7 +726,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -820,6 +823,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -820,6 +823,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
user_srf->prime.base.shareable = false; user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL; user_srf->prime.base.tfile = NULL;
if (drm_is_primary_client(file_priv))
user_srf->master = drm_master_get(file_priv->master);
/** /**
* From this point, the generic resource management functions * From this point, the generic resource management functions
...@@ -862,7 +867,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -862,7 +867,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
rep->sid = user_srf->prime.base.hash.key; rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return 0; return 0;
out_no_copy: out_no_copy:
kfree(srf->offsets); kfree(srf->offsets);
...@@ -873,7 +878,81 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -873,7 +878,81 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
out_no_user_srf: out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size); ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock: out_unlock:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
static int
vmw_surface_handle_reference(struct vmw_private *dev_priv,
struct drm_file *file_priv,
uint32_t u_handle,
enum drm_vmw_handle_type handle_type,
struct ttm_base_object **base_p)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_user_surface *user_srf;
uint32_t handle;
struct ttm_base_object *base;
int ret;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
if (unlikely(drm_is_render_client(file_priv))) {
DRM_ERROR("Render client refused legacy "
"surface reference.\n");
return -EACCES;
}
handle = u_handle;
}
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n");
goto out_no_lookup;
}
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
DRM_ERROR("Referenced object is not a surface.\n");
goto out_bad_resource;
}
if (handle_type != DRM_VMW_HANDLE_PRIME) {
user_srf = container_of(base, struct vmw_user_surface,
prime.base);
/*
* Make sure the surface creator has the same
* authenticating master.
*/
if (drm_is_primary_client(file_priv) &&
user_srf->master != file_priv->master) {
DRM_ERROR("Trying to reference surface outside of"
" master domain.\n");
ret = -EACCES;
goto out_bad_resource;
}
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
}
}
*base_p = base;
return 0;
out_bad_resource:
ttm_base_object_unref(&base);
out_no_lookup:
if (handle_type == DRM_VMW_HANDLE_PRIME)
(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
return ret; return ret;
} }
...@@ -898,27 +977,16 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, ...@@ -898,27 +977,16 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct vmw_user_surface *user_srf; struct vmw_user_surface *user_srf;
struct drm_vmw_size __user *user_sizes; struct drm_vmw_size __user *user_sizes;
struct ttm_base_object *base; struct ttm_base_object *base;
int ret = -EINVAL; int ret;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n");
return -EINVAL;
}
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
goto out_bad_resource; req->handle_type, &base);
if (unlikely(ret != 0))
return ret;
user_srf = container_of(base, struct vmw_user_surface, prime.base); user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf; srf = &user_srf->srf;
ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_no_reference;
}
rep->flags = srf->flags; rep->flags = srf->flags;
rep->format = srf->format; rep->format = srf->format;
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
...@@ -931,10 +999,10 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, ...@@ -931,10 +999,10 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n", DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes); user_sizes, srf->num_sizes);
ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
ret = -EFAULT; ret = -EFAULT;
} }
out_bad_resource:
out_no_reference:
ttm_base_object_unref(&base); ttm_base_object_unref(&base);
return ret; return ret;
...@@ -1173,7 +1241,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1173,7 +1241,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret; int ret;
uint32_t size; uint32_t size;
struct vmw_master *vmaster = vmw_master(file_priv->master);
const struct svga3d_surface_desc *desc; const struct svga3d_surface_desc *desc;
uint32_t backup_handle; uint32_t backup_handle;
...@@ -1189,7 +1256,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1189,7 +1256,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1228,6 +1295,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1228,6 +1295,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
user_srf->prime.base.shareable = false; user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL; user_srf->prime.base.tfile = NULL;
if (drm_is_primary_client(file_priv))
user_srf->master = drm_master_get(file_priv->master);
/** /**
* From this point, the generic resource management functions * From this point, the generic resource management functions
...@@ -1283,12 +1352,12 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1283,12 +1352,12 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return 0; return 0;
out_no_user_srf: out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size); ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock: out_unlock:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
...@@ -1315,14 +1384,10 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, ...@@ -1315,14 +1384,10 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
uint32_t backup_handle; uint32_t backup_handle;
int ret = -EINVAL; int ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
if (unlikely(base == NULL)) { req->handle_type, &base);
DRM_ERROR("Could not find surface to reference.\n"); if (unlikely(ret != 0))
return -EINVAL; return ret;
}
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
goto out_bad_resource;
user_srf = container_of(base, struct vmw_user_surface, prime.base); user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf; srf = &user_srf->srf;
...@@ -1331,13 +1396,6 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, ...@@ -1331,13 +1396,6 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
goto out_bad_resource; goto out_bad_resource;
} }
ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a GB surface.\n");
goto out_bad_resource;
}
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
&backup_handle); &backup_handle);
...@@ -1346,8 +1404,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, ...@@ -1346,8 +1404,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a GB surface " DRM_ERROR("Could not add a reference to a GB surface "
"backup buffer.\n"); "backup buffer.\n");
(void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, (void) ttm_ref_object_base_unref(tfile, base->hash.key,
req->sid,
TTM_REF_USAGE); TTM_REF_USAGE);
goto out_bad_resource; goto out_bad_resource;
} }
......
...@@ -405,7 +405,8 @@ struct drm_prime_file_private { ...@@ -405,7 +405,8 @@ struct drm_prime_file_private {
struct drm_file { struct drm_file {
unsigned always_authenticated :1; unsigned always_authenticated :1;
unsigned authenticated :1; unsigned authenticated :1;
unsigned is_master :1; /* this file private is a master for a minor */ /* Whether we're master for a minor. Protected by master_mutex */
unsigned is_master :1;
/* true when the client has asked us to expose stereo 3D mode flags */ /* true when the client has asked us to expose stereo 3D mode flags */
unsigned stereo_allowed :1; unsigned stereo_allowed :1;
...@@ -684,29 +685,29 @@ struct drm_gem_object { ...@@ -684,29 +685,29 @@ struct drm_gem_object {
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
/* per-master structure */ /**
* struct drm_master - drm master structure
*
* @refcount: Refcount for this master object.
* @minor: Link back to minor char device we are master for. Immutable.
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
* @unique_len: Length of unique field. Protected by drm_global_mutex.
* @unique_size: Amount allocated. Protected by drm_global_mutex.
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
* @magicfree: List of used authentication tokens. Protected by struct_mutex.
* @lock: DRI lock information.
* @driver_priv: Pointer to driver-private information.
*/
struct drm_master { struct drm_master {
struct kref refcount;
struct kref refcount; /* refcount for this master */ struct drm_minor *minor;
char *unique;
struct list_head head; /**< each minor contains a list of masters */ int unique_len;
struct drm_minor *minor; /**< link back to minor we are a master for */ int unique_size;
char *unique; /**< Unique identifier: e.g., busid */
int unique_len; /**< Length of unique field */
int unique_size; /**< amount allocated */
int blocked; /**< Blocked due to VC switch? */
/** \name Authentication */
/*@{ */
struct drm_open_hash magiclist; struct drm_open_hash magiclist;
struct list_head magicfree; struct list_head magicfree;
/*@} */ struct drm_lock_data lock;
void *driver_priv;
struct drm_lock_data lock; /**< Information on hardware lock */
void *driver_priv; /**< Private structure for driver to use */
}; };
/* Size of ringbuffer for vblank timestamps. Just double-buffer /* Size of ringbuffer for vblank timestamps. Just double-buffer
...@@ -1021,8 +1022,8 @@ struct drm_minor { ...@@ -1021,8 +1022,8 @@ struct drm_minor {
struct list_head debugfs_list; struct list_head debugfs_list;
struct mutex debugfs_lock; /* Protects debugfs_list. */ struct mutex debugfs_lock; /* Protects debugfs_list. */
struct drm_master *master; /* currently active master for this node */ /* currently active master for this node. Protected by master_mutex */
struct list_head master_list; struct drm_master *master;
struct drm_mode_group mode_group; struct drm_mode_group mode_group;
}; };
...@@ -1072,6 +1073,7 @@ struct drm_device { ...@@ -1072,6 +1073,7 @@ struct drm_device {
/*@{ */ /*@{ */
spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
struct mutex struct_mutex; /**< For others */ struct mutex struct_mutex; /**< For others */
struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
/*@} */ /*@} */
/** \name Usage Counters */ /** \name Usage Counters */
...@@ -1202,11 +1204,21 @@ static inline bool drm_modeset_is_locked(struct drm_device *dev) ...@@ -1202,11 +1204,21 @@ static inline bool drm_modeset_is_locked(struct drm_device *dev)
return mutex_is_locked(&dev->mode_config.mutex); return mutex_is_locked(&dev->mode_config.mutex);
} }
static inline bool drm_is_render_client(struct drm_file *file_priv) static inline bool drm_is_render_client(const struct drm_file *file_priv)
{ {
return file_priv->minor->type == DRM_MINOR_RENDER; return file_priv->minor->type == DRM_MINOR_RENDER;
} }
static inline bool drm_is_control_client(const struct drm_file *file_priv)
{
return file_priv->minor->type == DRM_MINOR_CONTROL;
}
static inline bool drm_is_primary_client(const struct drm_file *file_priv)
{
return file_priv->minor->type == DRM_MINOR_LEGACY;
}
/******************************************************************/ /******************************************************************/
/** \name Internal function definitions */ /** \name Internal function definitions */
/*@{*/ /*@{*/
...@@ -1217,6 +1229,7 @@ extern long drm_ioctl(struct file *filp, ...@@ -1217,6 +1229,7 @@ extern long drm_ioctl(struct file *filp,
extern long drm_compat_ioctl(struct file *filp, extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int drm_lastclose(struct drm_device *dev); extern int drm_lastclose(struct drm_device *dev);
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
/* Device support (drm_fops.h) */ /* Device support (drm_fops.h) */
extern struct mutex drm_global_mutex; extern struct mutex drm_global_mutex;
......
...@@ -244,6 +244,10 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); ...@@ -244,6 +244,10 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
extern int ttm_ref_object_add(struct ttm_object_file *tfile, extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base, struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed); enum ttm_ref_type ref_type, bool *existed);
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base);
/** /**
* ttm_ref_object_base_unref * ttm_ref_object_base_unref
* *
......
...@@ -89,6 +89,15 @@ ...@@ -89,6 +89,15 @@
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
*
*/
enum drm_vmw_handle_type {
DRM_VMW_HANDLE_LEGACY = 0,
DRM_VMW_HANDLE_PRIME = 1
};
/** /**
* struct drm_vmw_getparam_arg * struct drm_vmw_getparam_arg
* *
...@@ -177,6 +186,7 @@ struct drm_vmw_surface_create_req { ...@@ -177,6 +186,7 @@ struct drm_vmw_surface_create_req {
* struct drm_wmv_surface_arg * struct drm_wmv_surface_arg
* *
* @sid: Surface id of created surface or surface to destroy or reference. * @sid: Surface id of created surface or surface to destroy or reference.
* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
* *
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl. * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
...@@ -185,7 +195,7 @@ struct drm_vmw_surface_create_req { ...@@ -185,7 +195,7 @@ struct drm_vmw_surface_create_req {
struct drm_vmw_surface_arg { struct drm_vmw_surface_arg {
int32_t sid; int32_t sid;
uint32_t pad64; enum drm_vmw_handle_type handle_type;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment