Commit 8afa13a0 authored by Zack Rusin's avatar Zack Rusin

drm/vmwgfx: Implement DRIVER_GEM

This is initial change adding support for DRIVER_GEM to vmwgfx. vmwgfx
was written before GEM and has always used TTM. Over the years the
TTM buffers started inherting from GEM objects but vmwgfx never
implemented GEM making it quite awkward. We were directly setting
variables in GEM objects to not make DRM crash.

This change brings vmwgfx inline with other DRM drivers and allows us
to use a lot of DRM helpers which have depended on drivers with GEM
support.

Due to historical reasons vmwgfx splits the idea of a buffer and surface
which makes it a littly tricky since either one can be used in most
of our ioctl's which take user space handles. For now our BO's are
GEM objects and our surfaces are opaque objects which are backed by
GEM objects. In the future I'd like to combine those into a single
BO but we don't want to break any of our existing ioctl's so it will
take time to do it in a non-destructive way.
Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Reviewed-by: default avatarMartin Krastev <krastevm@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211206172620.3139754-5-zack@kde.org
parent 8ad0c3fd
...@@ -4,6 +4,7 @@ config DRM_VMWGFX ...@@ -4,6 +4,7 @@ config DRM_VMWGFX
depends on DRM && PCI && MMU depends on DRM && PCI && MMU
depends on X86 || ARM64 depends on X86 || ARM64
select DRM_TTM select DRM_TTM
select DRM_TTM_HELPER
select MAPPING_DIRTY_HELPERS select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed # Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself. # again once vmwgfx sets up the primary plane itself.
......
...@@ -9,7 +9,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d ...@@ -9,7 +9,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
vmwgfx_gem.o
vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/module.h> #include <linux/module.h>
#include "ttm_object.h" #include "ttm_object.h"
#include "vmwgfx_drv.h"
MODULE_IMPORT_NS(DMA_BUF); MODULE_IMPORT_NS(DMA_BUF);
...@@ -73,7 +74,7 @@ struct ttm_object_file { ...@@ -73,7 +74,7 @@ struct ttm_object_file {
struct ttm_object_device *tdev; struct ttm_object_device *tdev;
spinlock_t lock; spinlock_t lock;
struct list_head ref_list; struct list_head ref_list;
struct vmwgfx_open_hash ref_hash[TTM_REF_NUM]; struct vmwgfx_open_hash ref_hash;
struct kref refcount; struct kref refcount;
}; };
...@@ -124,7 +125,6 @@ struct ttm_ref_object { ...@@ -124,7 +125,6 @@ struct ttm_ref_object {
struct vmwgfx_hash_item hash; struct vmwgfx_hash_item hash;
struct list_head head; struct list_head head;
struct kref kref; struct kref kref;
enum ttm_ref_type ref_type;
struct ttm_base_object *obj; struct ttm_base_object *obj;
struct ttm_object_file *tfile; struct ttm_object_file *tfile;
}; };
...@@ -160,9 +160,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, ...@@ -160,9 +160,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
struct ttm_base_object *base, struct ttm_base_object *base,
bool shareable, bool shareable,
enum ttm_object_type object_type, enum ttm_object_type object_type,
void (*refcount_release) (struct ttm_base_object **), void (*refcount_release) (struct ttm_base_object **))
void (*ref_obj_release) (struct ttm_base_object *,
enum ttm_ref_type ref_type))
{ {
struct ttm_object_device *tdev = tfile->tdev; struct ttm_object_device *tdev = tfile->tdev;
int ret; int ret;
...@@ -170,7 +168,6 @@ int ttm_base_object_init(struct ttm_object_file *tfile, ...@@ -170,7 +168,6 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->shareable = shareable; base->shareable = shareable;
base->tfile = ttm_object_file_ref(tfile); base->tfile = ttm_object_file_ref(tfile);
base->refcount_release = refcount_release; base->refcount_release = refcount_release;
base->ref_obj_release = ref_obj_release;
base->object_type = object_type; base->object_type = object_type;
kref_init(&base->refcount); kref_init(&base->refcount);
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
...@@ -182,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, ...@@ -182,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return ret; return ret;
base->handle = ret; base->handle = ret;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); ret = ttm_ref_object_add(tfile, base, NULL, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err1; goto out_err1;
...@@ -246,7 +243,7 @@ struct ttm_base_object * ...@@ -246,7 +243,7 @@ struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
{ {
struct vmwgfx_hash_item *hash; struct vmwgfx_hash_item *hash;
struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret; int ret;
rcu_read_lock(); rcu_read_lock();
...@@ -266,7 +263,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, ...@@ -266,7 +263,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
{ {
struct ttm_base_object *base = NULL; struct ttm_base_object *base = NULL;
struct vmwgfx_hash_item *hash; struct vmwgfx_hash_item *hash;
struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret; int ret;
rcu_read_lock(); rcu_read_lock();
...@@ -297,57 +294,12 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) ...@@ -297,57 +294,12 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
return base; return base;
} }
/**
* ttm_ref_object_exists - Check whether a caller has a valid ref object
* (has opened) a base object.
*
* @tfile: Pointer to a struct ttm_object_file identifying the caller.
* @base: Pointer to a struct base object.
*
* Checks wether the caller identified by @tfile has put a valid USAGE
* reference object on the base object identified by @base.
*/
bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base)
{
struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
struct vmwgfx_hash_item *hash;
struct ttm_ref_object *ref;
rcu_read_lock();
if (unlikely(vmwgfx_ht_find_item_rcu(ht, base->handle, &hash) != 0))
goto out_false;
/*
* Verify that the ref object is really pointing to our base object.
* Our base object could actually be dead, and the ref object pointing
* to another base object with the same handle.
*/
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
if (unlikely(base != ref->obj))
goto out_false;
/*
* Verify that the ref->obj pointer was actually valid!
*/
rmb();
if (unlikely(kref_read(&ref->kref) == 0))
goto out_false;
rcu_read_unlock();
return true;
out_false:
rcu_read_unlock();
return false;
}
int ttm_ref_object_add(struct ttm_object_file *tfile, int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base, struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed, bool *existed,
bool require_existed) bool require_existed)
{ {
struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type]; struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref; struct ttm_ref_object *ref;
struct vmwgfx_hash_item *hash; struct vmwgfx_hash_item *hash;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -382,7 +334,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ...@@ -382,7 +334,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
ref->hash.key = base->handle; ref->hash.key = base->handle;
ref->obj = base; ref->obj = base;
ref->tfile = tfile; ref->tfile = tfile;
ref->ref_type = ref_type;
kref_init(&ref->kref); kref_init(&ref->kref);
spin_lock(&tfile->lock); spin_lock(&tfile->lock);
...@@ -411,27 +362,23 @@ ttm_ref_object_release(struct kref *kref) ...@@ -411,27 +362,23 @@ ttm_ref_object_release(struct kref *kref)
{ {
struct ttm_ref_object *ref = struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref); container_of(kref, struct ttm_ref_object, kref);
struct ttm_base_object *base = ref->obj;
struct ttm_object_file *tfile = ref->tfile; struct ttm_object_file *tfile = ref->tfile;
struct vmwgfx_open_hash *ht; struct vmwgfx_open_hash *ht;
ht = &tfile->ref_hash[ref->ref_type]; ht = &tfile->ref_hash;
(void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash); (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
list_del(&ref->head); list_del(&ref->head);
spin_unlock(&tfile->lock); spin_unlock(&tfile->lock);
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
base->ref_obj_release(base, ref->ref_type);
ttm_base_object_unref(&ref->obj); ttm_base_object_unref(&ref->obj);
kfree_rcu(ref, rcu_head); kfree_rcu(ref, rcu_head);
spin_lock(&tfile->lock); spin_lock(&tfile->lock);
} }
int ttm_ref_object_base_unref(struct ttm_object_file *tfile, int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
unsigned long key, enum ttm_ref_type ref_type) unsigned long key)
{ {
struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type]; struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref; struct ttm_ref_object *ref;
struct vmwgfx_hash_item *hash; struct vmwgfx_hash_item *hash;
int ret; int ret;
...@@ -452,7 +399,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) ...@@ -452,7 +399,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
{ {
struct ttm_ref_object *ref; struct ttm_ref_object *ref;
struct list_head *list; struct list_head *list;
unsigned int i;
struct ttm_object_file *tfile = *p_tfile; struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL; *p_tfile = NULL;
...@@ -470,8 +416,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) ...@@ -470,8 +416,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
} }
spin_unlock(&tfile->lock); spin_unlock(&tfile->lock);
for (i = 0; i < TTM_REF_NUM; ++i) vmwgfx_ht_remove(&tfile->ref_hash);
vmwgfx_ht_remove(&tfile->ref_hash[i]);
ttm_object_file_unref(&tfile); ttm_object_file_unref(&tfile);
} }
...@@ -480,8 +425,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, ...@@ -480,8 +425,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
unsigned int hash_order) unsigned int hash_order)
{ {
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
unsigned int i;
unsigned int j = 0;
int ret; int ret;
if (unlikely(tfile == NULL)) if (unlikely(tfile == NULL))
...@@ -492,18 +435,13 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, ...@@ -492,18 +435,13 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
kref_init(&tfile->refcount); kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list); INIT_LIST_HEAD(&tfile->ref_list);
for (i = 0; i < TTM_REF_NUM; ++i) { ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order);
ret = vmwgfx_ht_create(&tfile->ref_hash[i], hash_order); if (ret)
if (ret) {
j = i;
goto out_err; goto out_err;
}
}
return tfile; return tfile;
out_err: out_err:
for (i = 0; i < j; ++i) vmwgfx_ht_remove(&tfile->ref_hash);
vmwgfx_ht_remove(&tfile->ref_hash[i]);
kfree(tfile); kfree(tfile);
...@@ -526,7 +464,15 @@ ttm_object_device_init(unsigned int hash_order, ...@@ -526,7 +464,15 @@ ttm_object_device_init(unsigned int hash_order,
if (ret != 0) if (ret != 0)
goto out_no_object_hash; goto out_no_object_hash;
idr_init_base(&tdev->idr, 1); /*
* Our base is at VMWGFX_NUM_MOB + 1 because we want to create
* a seperate namespace for GEM handles (which are
* 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
* can take either handle as an argument so we want to
* easily be able to tell whether the handle refers to a
* GEM buffer or a surface.
*/
idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
tdev->ops = *ops; tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release; tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release; tdev->ops.release = ttm_prime_dmabuf_release;
...@@ -647,7 +593,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, ...@@ -647,7 +593,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv; prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base; base = &prime->base;
*handle = base->handle; *handle = base->handle;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); ret = ttm_ref_object_add(tfile, base, NULL, false);
dma_buf_put(dma_buf); dma_buf_put(dma_buf);
...@@ -741,7 +687,6 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, ...@@ -741,7 +687,6 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
* @shareable: See ttm_base_object_init * @shareable: See ttm_base_object_init
* @type: See ttm_base_object_init * @type: See ttm_base_object_init
* @refcount_release: See ttm_base_object_init * @refcount_release: See ttm_base_object_init
* @ref_obj_release: See ttm_base_object_init
* *
* Initializes an object which is compatible with the drm_prime model * Initializes an object which is compatible with the drm_prime model
* for data sharing between processes and devices. * for data sharing between processes and devices.
...@@ -749,9 +694,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, ...@@ -749,9 +694,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
struct ttm_prime_object *prime, bool shareable, struct ttm_prime_object *prime, bool shareable,
enum ttm_object_type type, enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object **), void (*refcount_release) (struct ttm_base_object **))
void (*ref_obj_release) (struct ttm_base_object *,
enum ttm_ref_type ref_type))
{ {
mutex_init(&prime->mutex); mutex_init(&prime->mutex);
prime->size = PAGE_ALIGN(size); prime->size = PAGE_ALIGN(size);
...@@ -760,6 +703,5 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, ...@@ -760,6 +703,5 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
prime->refcount_release = refcount_release; prime->refcount_release = refcount_release;
return ttm_base_object_init(tfile, &prime->base, shareable, return ttm_base_object_init(tfile, &prime->base, shareable,
ttm_prime_type, ttm_prime_type,
ttm_prime_refcount_release, ttm_prime_refcount_release);
ref_obj_release);
} }
...@@ -44,28 +44,6 @@ ...@@ -44,28 +44,6 @@
#include "vmwgfx_hashtab.h" #include "vmwgfx_hashtab.h"
/**
* enum ttm_ref_type
*
* Describes what type of reference a ref object holds.
*
* TTM_REF_USAGE is a simple refcount on a base object.
*
* TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
* buffer object.
*
* TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
* buffer object.
*
*/
enum ttm_ref_type {
TTM_REF_USAGE,
TTM_REF_SYNCCPU_READ,
TTM_REF_SYNCCPU_WRITE,
TTM_REF_NUM
};
/** /**
* enum ttm_object_type * enum ttm_object_type
* *
...@@ -76,7 +54,6 @@ enum ttm_ref_type { ...@@ -76,7 +54,6 @@ enum ttm_ref_type {
enum ttm_object_type { enum ttm_object_type {
ttm_fence_type, ttm_fence_type,
ttm_buffer_type,
ttm_lock_type, ttm_lock_type,
ttm_prime_type, ttm_prime_type,
ttm_driver_type0 = 256, ttm_driver_type0 = 256,
...@@ -127,8 +104,6 @@ struct ttm_base_object { ...@@ -127,8 +104,6 @@ struct ttm_base_object {
struct ttm_object_file *tfile; struct ttm_object_file *tfile;
struct kref refcount; struct kref refcount;
void (*refcount_release) (struct ttm_base_object **base); void (*refcount_release) (struct ttm_base_object **base);
void (*ref_obj_release) (struct ttm_base_object *base,
enum ttm_ref_type ref_type);
u32 handle; u32 handle;
enum ttm_object_type object_type; enum ttm_object_type object_type;
u32 shareable; u32 shareable;
...@@ -177,11 +152,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile, ...@@ -177,11 +152,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
bool shareable, bool shareable,
enum ttm_object_type type, enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object void (*refcount_release) (struct ttm_base_object
**), **));
void (*ref_obj_release) (struct ttm_base_object
*,
enum ttm_ref_type
ref_type));
/** /**
* ttm_base_object_lookup * ttm_base_object_lookup
...@@ -245,12 +216,9 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); ...@@ -245,12 +216,9 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
*/ */
extern int ttm_ref_object_add(struct ttm_object_file *tfile, extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base, struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed, bool *existed,
bool require_existed); bool require_existed);
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base);
/** /**
* ttm_ref_object_base_unref * ttm_ref_object_base_unref
* *
...@@ -263,8 +231,7 @@ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, ...@@ -263,8 +231,7 @@ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
* will be unreferenced. * will be unreferenced.
*/ */
extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
unsigned long key, unsigned long key);
enum ttm_ref_type ref_type);
/** /**
* ttm_object_file_init - initialize a struct ttm_object file * ttm_object_file_init - initialize a struct ttm_object file
...@@ -328,10 +295,7 @@ extern int ttm_prime_object_init(struct ttm_object_file *tfile, ...@@ -328,10 +295,7 @@ extern int ttm_prime_object_init(struct ttm_object_file *tfile,
bool shareable, bool shareable,
enum ttm_object_type type, enum ttm_object_type type,
void (*refcount_release) void (*refcount_release)
(struct ttm_base_object **), (struct ttm_base_object **));
void (*ref_obj_release)
(struct ttm_base_object *,
enum ttm_ref_type ref_type));
static inline enum ttm_object_type static inline enum ttm_object_type
ttm_base_object_type(struct ttm_base_object *base) ttm_base_object_type(struct ttm_base_object *base)
......
...@@ -32,18 +32,6 @@ ...@@ -32,18 +32,6 @@
#include "ttm_object.h" #include "ttm_object.h"
/**
* struct vmw_user_buffer_object - User-space-visible buffer object
*
* @prime: The prime object providing user visibility.
* @vbo: The struct vmw_buffer_object
*/
struct vmw_user_buffer_object {
struct ttm_prime_object prime;
struct vmw_buffer_object vbo;
};
/** /**
* vmw_buffer_object - Convert a struct ttm_buffer_object to a struct * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_buffer_object. * vmw_buffer_object.
...@@ -59,23 +47,6 @@ vmw_buffer_object(struct ttm_buffer_object *bo) ...@@ -59,23 +47,6 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
} }
/**
* vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_user_buffer_object.
*
* @bo: Pointer to the TTM buffer object.
* Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
* object.
*/
static struct vmw_user_buffer_object *
vmw_user_buffer_object(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
}
/** /**
* vmw_bo_pin_in_placement - Validate a buffer to placement. * vmw_bo_pin_in_placement - Validate a buffer to placement.
* *
...@@ -403,27 +374,10 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo) ...@@ -403,27 +374,10 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
WARN_ON(vmw_bo->dirty); WARN_ON(vmw_bo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo); vmw_bo_unmap(vmw_bo);
dma_resv_fini(&bo->base._resv); drm_gem_object_release(&bo->base);
kfree(vmw_bo); kfree(vmw_bo);
} }
/**
* vmw_user_bo_destroy - vmw buffer object destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_unmap(vbo);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
/** /**
* vmw_bo_create_kernel - Create a pinned BO for internal kernel use. * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
* *
...@@ -443,15 +397,16 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, ...@@ -443,15 +397,16 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
.no_wait_gpu = false .no_wait_gpu = false
}; };
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
struct drm_device *vdev = &dev_priv->drm;
int ret; int ret;
bo = kzalloc(sizeof(*bo), GFP_KERNEL); bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(!bo)) if (unlikely(!bo))
return -ENOMEM; return -ENOMEM;
bo->base.size = size; size = ALIGN(size, PAGE_SIZE);
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node); drm_gem_private_object_init(vdev, &bo->base, size);
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
ttm_bo_type_kernel, placement, 0, ttm_bo_type_kernel, placement, 0,
...@@ -470,6 +425,33 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, ...@@ -470,6 +425,33 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
return ret; return ret;
} }
int vmw_bo_create(struct vmw_private *vmw,
size_t size, struct ttm_placement *placement,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo),
struct vmw_buffer_object **p_bo)
{
int ret;
*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
if (unlikely(!*p_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
ret = vmw_bo_init(vmw, *p_bo, size,
placement, interruptible, pin,
bo_free);
if (unlikely(ret != 0))
goto out_error;
return ret;
out_error:
kfree(*p_bo);
*p_bo = NULL;
return ret;
}
/** /**
* vmw_bo_init - Initialize a vmw buffer object * vmw_bo_init - Initialize a vmw buffer object
* *
...@@ -495,22 +477,21 @@ int vmw_bo_init(struct vmw_private *dev_priv, ...@@ -495,22 +477,21 @@ int vmw_bo_init(struct vmw_private *dev_priv,
.no_wait_gpu = false .no_wait_gpu = false
}; };
struct ttm_device *bdev = &dev_priv->bdev; struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
int ret; int ret;
bool user = (bo_free == &vmw_user_bo_destroy);
WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free))); WARN_ON_ONCE(!bo_free);
memset(vmw_bo, 0, sizeof(*vmw_bo)); memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3; vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT; vmw_bo->res_tree = RB_ROOT;
size = ALIGN(size, PAGE_SIZE);
vmw_bo->base.base.size = size; drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
dma_resv_init(&vmw_bo->base.base._resv);
drm_vma_node_reset(&vmw_bo->base.base.vma_node);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement, ttm_bo_type_device,
placement,
0, &ctx, NULL, NULL, bo_free); 0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) { if (unlikely(ret)) {
return ret; return ret;
...@@ -519,159 +500,16 @@ int vmw_bo_init(struct vmw_private *dev_priv, ...@@ -519,159 +500,16 @@ int vmw_bo_init(struct vmw_private *dev_priv,
if (pin) if (pin)
ttm_bo_pin(&vmw_bo->base); ttm_bo_pin(&vmw_bo->base);
ttm_bo_unreserve(&vmw_bo->base); ttm_bo_unreserve(&vmw_bo->base);
return 0;
}
/**
* vmw_user_bo_release - TTM reference base object release callback for
* vmw user buffer objects
*
* @p_base: The TTM base object pointer about to be unreferenced.
*
* Clears the TTM base object pointer and drops the reference the
* base object has on the underlying struct vmw_buffer_object.
*/
static void vmw_user_bo_release(struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *vmw_user_bo;
struct ttm_base_object *base = *p_base;
*p_base = NULL;
if (unlikely(base == NULL))
return;
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
ttm_bo_put(&vmw_user_bo->vbo.base);
}
/**
* vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
* for vmw user buffer objects
*
* @base: Pointer to the TTM base object
* @ref_type: Reference type of the reference reaching zero.
*
* Called when user-space drops its last synccpu reference on the buffer
* object, Either explicitly or as part of a cleanup file close.
*/
static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
enum ttm_ref_type ref_type)
{
struct vmw_user_buffer_object *user_bo;
user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
atomic_dec(&user_bo->vbo.cpu_writers);
break;
default:
WARN_ONCE(true, "Undefined buffer object reference release.\n");
}
}
/**
* vmw_user_bo_alloc - Allocate a user buffer object
*
* @dev_priv: Pointer to a struct device private.
* @tfile: Pointer to a struct ttm_object_file on which to register the user
* object.
* @size: Size of the buffer object.
* @shareable: Boolean whether the buffer is shareable with other open files.
* @handle: Pointer to where the handle value should be assigned.
* @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
* should be assigned.
* @p_base: The TTM base object pointer about to be allocated.
* Return: Zero on success, negative error code on error.
*/
int vmw_user_bo_alloc(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_buffer_object **p_vbo,
struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *user_bo;
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
if (unlikely(!user_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
(dev_priv->has_mob) ?
&vmw_sys_placement :
&vmw_vram_sys_placement, true, false,
&vmw_user_bo_destroy);
if (unlikely(ret != 0))
return ret;
ttm_bo_get(&user_bo->vbo.base);
ret = ttm_prime_object_init(tfile,
size,
&user_bo->prime,
shareable,
ttm_buffer_type,
&vmw_user_bo_release,
&vmw_user_bo_ref_obj_release);
if (unlikely(ret != 0)) {
ttm_bo_put(&user_bo->vbo.base);
goto out_no_base_object;
}
*p_vbo = &user_bo->vbo;
if (p_base) {
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
*handle = user_bo->prime.base.handle;
out_no_base_object:
return ret;
}
/**
* vmw_user_bo_verify_access - verify access permissions on this
* buffer object.
*
* @bo: Pointer to the buffer object being accessed
* @tfile: Identifying the caller.
*/
int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile)
{
struct vmw_user_buffer_object *vmw_user_bo;
if (unlikely(bo->destroy != vmw_user_bo_destroy))
return -EPERM;
vmw_user_bo = vmw_user_buffer_object(bo);
/* Check that the caller has opened the object. */
if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
return 0; return 0;
DRM_ERROR("Could not grant buffer access.\n");
return -EPERM;
} }
/** /**
* vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
* access, idling previous GPU operations on the buffer and optionally * access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions. * blocking it for further command submissions.
* *
* @user_bo: Pointer to the buffer object being grabbed for CPU access * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
* @tfile: Identifying the caller.
* @flags: Flags indicating how the grab should be performed. * @flags: Flags indicating how the grab should be performed.
* Return: Zero on success, Negative error code on error. In particular, * Return: Zero on success, Negative error code on error. In particular,
* -EBUSY will be returned if a dontblock operation is requested and the * -EBUSY will be returned if a dontblock operation is requested and the
...@@ -680,13 +518,11 @@ int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, ...@@ -680,13 +518,11 @@ int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
* *
* A blocking grab will be automatically released when @tfile is closed. * A blocking grab will be automatically released when @tfile is closed.
*/ */
static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
struct ttm_object_file *tfile,
uint32_t flags) uint32_t flags)
{ {
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &user_bo->vbo.base; struct ttm_buffer_object *bo = &vmw_bo->base;
bool existed;
int ret; int ret;
if (flags & drm_vmw_synccpu_allow_cs) { if (flags & drm_vmw_synccpu_allow_cs) {
...@@ -708,17 +544,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, ...@@ -708,17 +544,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
ret = ttm_bo_wait(bo, true, nonblock); ret = ttm_bo_wait(bo, true, nonblock);
if (likely(ret == 0)) if (likely(ret == 0))
atomic_inc(&user_bo->vbo.cpu_writers); atomic_inc(&vmw_bo->cpu_writers);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
atomic_dec(&user_bo->vbo.cpu_writers);
return ret; return ret;
} }
...@@ -726,19 +557,23 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, ...@@ -726,19 +557,23 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
* vmw_user_bo_synccpu_release - Release a previous grab for CPU access, * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
* and unblock command submission on the buffer if blocked. * and unblock command submission on the buffer if blocked.
* *
* @filp: Identifying the caller.
* @handle: Handle identifying the buffer object. * @handle: Handle identifying the buffer object.
* @tfile: Identifying the caller.
* @flags: Flags indicating the type of release. * @flags: Flags indicating the type of release.
*/ */
static int vmw_user_bo_synccpu_release(uint32_t handle, static int vmw_user_bo_synccpu_release(struct drm_file *filp,
struct ttm_object_file *tfile, uint32_t handle,
uint32_t flags) uint32_t flags)
{ {
if (!(flags & drm_vmw_synccpu_allow_cs)) struct vmw_buffer_object *vmw_bo;
return ttm_ref_object_base_unref(tfile, handle, int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
TTM_REF_SYNCCPU_WRITE);
return 0; if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
ttm_bo_put(&vmw_bo->base);
return ret;
} }
...@@ -760,9 +595,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -760,9 +595,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_synccpu_arg *arg = struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data; (struct drm_vmw_synccpu_arg *) data;
struct vmw_buffer_object *vbo; struct vmw_buffer_object *vbo;
struct vmw_user_buffer_object *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_base_object *buffer_base;
int ret; int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
...@@ -775,16 +607,12 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -775,16 +607,12 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
switch (arg->op) { switch (arg->op) {
case drm_vmw_synccpu_grab: case drm_vmw_synccpu_grab:
ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo, ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
&buffer_base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
user_bo = container_of(vbo, struct vmw_user_buffer_object, ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vbo);
ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
vmw_bo_unreference(&vbo); vmw_bo_unreference(&vbo);
ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS && if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) { ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
...@@ -793,7 +621,8 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -793,7 +621,8 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
} }
break; break;
case drm_vmw_synccpu_release: case drm_vmw_synccpu_release:
ret = vmw_user_bo_synccpu_release(arg->handle, tfile, ret = vmw_user_bo_synccpu_release(file_priv,
arg->handle,
arg->flags); arg->flags);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
...@@ -809,50 +638,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -809,50 +638,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
/**
* vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
* allocation functionality.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller.
* Return: Zero on success, negative error code on error.
*
* This function checks the ioctl arguments for validity and allocates a
* struct vmw_user_buffer_object bo.
*/
int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_alloc_dmabuf_arg *arg =
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_buffer_object *vbo;
uint32_t handle;
int ret;
ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &vbo,
NULL);
if (unlikely(ret != 0))
goto out_no_bo;
rep->handle = handle;
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
vmw_bo_unreference(&vbo);
out_no_bo:
return ret;
}
/** /**
* vmw_bo_unref_ioctl - Generic handle close ioctl. * vmw_bo_unref_ioctl - Generic handle close ioctl.
* *
...@@ -870,65 +655,48 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, ...@@ -870,65 +655,48 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_unref_dmabuf_arg *arg = struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data; (struct drm_vmw_unref_dmabuf_arg *)data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, drm_gem_handle_delete(file_priv, arg->handle);
arg->handle, return 0;
TTM_REF_USAGE);
} }
/** /**
* vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
* *
* @tfile: The TTM object file the handle is registered with. * @filp: The file the handle is registered with.
* @handle: The user buffer object handle * @handle: The user buffer object handle
* @out: Pointer to a where a pointer to the embedded * @out: Pointer to a where a pointer to the embedded
* struct vmw_buffer_object should be placed. * struct vmw_buffer_object should be placed.
* @p_base: Pointer to where a pointer to the TTM base object should be
* placed, or NULL if no such pointer is required.
* Return: Zero on success, Negative error code on error. * Return: Zero on success, Negative error code on error.
* *
* Both the output base object pointer and the vmw buffer object pointer * The vmw buffer object pointer will be refcounted.
* will be refcounted.
*/ */
int vmw_user_bo_lookup(struct ttm_object_file *tfile, int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle, struct vmw_buffer_object **out, uint32_t handle,
struct ttm_base_object **p_base) struct vmw_buffer_object **out)
{ {
struct vmw_user_buffer_object *vmw_user_bo; struct drm_gem_object *gobj;
struct ttm_base_object *base;
base = ttm_base_object_lookup(tfile, handle); gobj = drm_gem_object_lookup(filp, handle);
if (unlikely(base == NULL)) { if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle); (unsigned long)handle);
return -ESRCH; return -ESRCH;
} }
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { *out = gem_to_vmw_bo(gobj);
ttm_base_object_unref(&base); ttm_bo_get(&(*out)->base);
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", drm_gem_object_put(gobj);
(unsigned long)handle);
return -EINVAL;
}
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
ttm_bo_get(&vmw_user_bo->vbo.base);
if (p_base)
*p_base = base;
else
ttm_base_object_unref(&base);
*out = &vmw_user_bo->vbo;
return 0; return 0;
} }
/** /**
* vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
* @tfile: The TTM object file the handle is registered with. * @filp: The TTM object file the handle is registered with.
* @handle: The user buffer object handle. * @handle: The user buffer object handle.
* *
* This function looks up a struct vmw_user_bo and returns a pointer to the * This function looks up a struct vmw_bo and returns a pointer to the
* struct vmw_buffer_object it derives from without refcounting the pointer. * struct vmw_buffer_object it derives from without refcounting the pointer.
* The returned pointer is only valid until vmw_user_bo_noref_release() is * The returned pointer is only valid until vmw_user_bo_noref_release() is
* called, and the object pointed to by the returned pointer may be doomed. * called, and the object pointed to by the returned pointer may be doomed.
...@@ -941,52 +709,23 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile, ...@@ -941,52 +709,23 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
* error pointer on failure. * error pointer on failure.
*/ */
struct vmw_buffer_object * struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle) vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
{ {
struct vmw_user_buffer_object *vmw_user_bo; struct vmw_buffer_object *vmw_bo;
struct ttm_base_object *base; struct ttm_buffer_object *bo;
struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
base = ttm_base_object_noref_lookup(tfile, handle); if (!gobj) {
if (!base) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle); (unsigned long)handle);
return ERR_PTR(-ESRCH); return ERR_PTR(-ESRCH);
} }
vmw_bo = gem_to_vmw_bo(gobj);
bo = ttm_bo_get_unless_zero(&vmw_bo->base);
vmw_bo = vmw_buffer_object(bo);
drm_gem_object_put(gobj);
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { return vmw_bo;
ttm_base_object_noref_release();
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return ERR_PTR(-EINVAL);
}
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
return &vmw_user_bo->vbo;
}
/**
* vmw_user_bo_reference - Open a handle to a vmw user buffer object.
*
* @tfile: The TTM object file to register the handle with.
* @vbo: The embedded vmw buffer object.
* @handle: Pointer to where the new handle should be placed.
* Return: Zero on success, Negative error code on error.
*/
int vmw_user_bo_reference(struct ttm_object_file *tfile,
struct vmw_buffer_object *vbo,
uint32_t *handle)
{
struct vmw_user_buffer_object *user_bo;
if (vbo->base.destroy != vmw_user_bo_destroy)
return -EINVAL;
user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
*handle = user_bo->prime.base.handle;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL, false);
} }
...@@ -1040,68 +779,15 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -1040,68 +779,15 @@ int vmw_dumb_create(struct drm_file *file_priv,
int ret; int ret;
args->pitch = args->width * ((args->bpp + 7) / 8); args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height; args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, false, &args->handle, args->size, &args->handle,
&vbo, NULL); &vbo);
if (unlikely(ret != 0))
goto out_no_bo;
vmw_bo_unreference(&vbo);
out_no_bo:
return ret; return ret;
} }
/**
* vmw_dumb_map_offset - Return the address space offset of a dumb buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @handle: Handle identifying the dumb buffer.
* @offset: The address space offset returned.
* Return: Zero on success, negative error code on failure.
*
* This is a driver callback for the core drm dumb_map_offset functionality.
*/
int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_buffer_object *out_buf;
int ret;
ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
if (ret != 0)
return -EINVAL;
*offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
vmw_bo_unreference(&out_buf);
return 0;
}
/**
* vmw_dumb_destroy - Destroy a dumb boffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @handle: Handle identifying the dumb buffer.
* Return: Zero on success, negative error code on failure.
*
* This is a driver callback for the core drm dumb_destroy functionality.
*/
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle)
{
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
}
/** /**
* vmw_bo_swap_notify - swapout notify callback. * vmw_bo_swap_notify - swapout notify callback.
* *
...@@ -1110,8 +796,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv, ...@@ -1110,8 +796,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
void vmw_bo_swap_notify(struct ttm_buffer_object *bo) void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{ {
/* Is @bo embedded in a struct vmw_buffer_object? */ /* Is @bo embedded in a struct vmw_buffer_object? */
if (bo->destroy != vmw_bo_bo_free && if (vmw_bo_is_vmw_bo(bo))
bo->destroy != vmw_user_bo_destroy)
return; return;
/* Kill any cached kernel maps before swapout */ /* Kill any cached kernel maps before swapout */
...@@ -1135,8 +820,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -1135,8 +820,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct vmw_buffer_object *vbo; struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */ /* Make sure @bo is embedded in a struct vmw_buffer_object? */
if (bo->destroy != vmw_bo_bo_free && if (vmw_bo_is_vmw_bo(bo))
bo->destroy != vmw_user_bo_destroy)
return; return;
vbo = container_of(bo, struct vmw_buffer_object, base); vbo = container_of(bo, struct vmw_buffer_object, base);
...@@ -1157,3 +841,22 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -1157,3 +841,22 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo); vmw_resource_unbind_list(vbo);
} }
/**
* vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
* @bo: buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
* a &vmw_buffer_object.
*
* Returns:
* true if the object is of &vmw_buffer_object type, false if not.
*/
bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &vmw_bo_bo_free ||
bo->destroy == &vmw_gem_destroy)
return true;
return false;
}
...@@ -715,7 +715,7 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -715,7 +715,7 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); return ttm_ref_object_base_unref(tfile, arg->cid);
} }
static int vmw_context_define(struct drm_device *dev, void *data, static int vmw_context_define(struct drm_device *dev, void *data,
...@@ -754,7 +754,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, ...@@ -754,7 +754,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
tmp = vmw_resource_reference(&ctx->res); tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
&vmw_user_context_base_release, NULL); &vmw_user_context_base_release);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp); vmw_resource_unreference(&tmp);
......
...@@ -407,12 +407,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -407,12 +407,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure * for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure. * we can use tryreserve without failure.
*/ */
buf = kzalloc(sizeof(*buf), GFP_KERNEL); ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
if (!buf) true, true, vmw_bo_bo_free, &buf);
return -ENOMEM;
ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement,
true, true, vmw_bo_bo_free);
if (ret) { if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n"); DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret; return ret;
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_ioctl.h> #include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h> #include <drm/drm_sysfs.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
...@@ -162,7 +163,7 @@ ...@@ -162,7 +163,7 @@
static const struct drm_ioctl_desc vmw_ioctls[] = { static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl, DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
...@@ -396,13 +397,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ...@@ -396,13 +397,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
* immediately succeed. This is because we're the only * immediately succeed. This is because we're the only
* user of the bo currently. * user of the bo currently.
*/ */
vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); ret = vmw_bo_create(dev_priv, PAGE_SIZE,
if (!vbo)
return -ENOMEM;
ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
&vmw_sys_placement, false, true, &vmw_sys_placement, false, true,
&vmw_bo_bo_free); &vmw_bo_bo_free, &vbo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1578,7 +1575,7 @@ static const struct file_operations vmwgfx_driver_fops = { ...@@ -1578,7 +1575,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static const struct drm_driver driver = { static const struct drm_driver driver = {
.driver_features = .driver_features =
DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
.ioctls = vmw_ioctls, .ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls), .num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set, .master_set = vmw_master_set,
...@@ -1587,8 +1584,7 @@ static const struct drm_driver driver = { ...@@ -1587,8 +1584,7 @@ static const struct drm_driver driver = {
.postclose = vmw_postclose, .postclose = vmw_postclose,
.dumb_create = vmw_dumb_create, .dumb_create = vmw_dumb_create,
.dumb_map_offset = vmw_dumb_map_offset, .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.dumb_destroy = vmw_dumb_destroy,
.prime_fd_to_handle = vmw_prime_fd_to_handle, .prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd, .prime_handle_to_fd = vmw_prime_handle_to_fd,
...@@ -1642,6 +1638,8 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1642,6 +1638,8 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret) if (ret)
goto out_unload; goto out_unload;
vmw_debugfs_gem_init(vmw);
return 0; return 0;
out_unload: out_unload:
vmw_driver_unload(&vmw->drm); vmw_driver_unload(&vmw->drm);
......
...@@ -361,6 +361,19 @@ struct vmw_piter { ...@@ -361,6 +361,19 @@ struct vmw_piter {
dma_addr_t (*dma_address)(struct vmw_piter *); dma_addr_t (*dma_address)(struct vmw_piter *);
}; };
struct vmw_ttm_tt {
struct ttm_tt dma_ttm;
struct vmw_private *dev_priv;
int gmr_id;
struct vmw_mob *mob;
int mem_type;
struct sg_table sgt;
struct vmw_sg_table vsgt;
bool mapped;
bool bound;
};
/* /*
* enum vmw_display_unit_type - Describes the display unit * enum vmw_display_unit_type - Describes the display unit
*/ */
...@@ -411,6 +424,7 @@ struct vmw_sw_context{ ...@@ -411,6 +424,7 @@ struct vmw_sw_context{
bool res_ht_initialized; bool res_ht_initialized;
bool kernel; bool kernel;
struct vmw_fpriv *fp; struct vmw_fpriv *fp;
struct drm_file *filp;
uint32_t *cmd_bounce; uint32_t *cmd_bounce;
uint32_t cmd_bounce_size; uint32_t cmd_bounce_size;
struct vmw_buffer_object *cur_query_bo; struct vmw_buffer_object *cur_query_bo;
...@@ -643,6 +657,11 @@ struct vmw_private { ...@@ -643,6 +657,11 @@ struct vmw_private {
#endif #endif
}; };
static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj)
{
return container_of((gobj), struct vmw_buffer_object, base.base);
}
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{ {
return container_of(res, struct vmw_surface, res); return container_of(res, struct vmw_surface, res);
...@@ -765,7 +784,7 @@ extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, ...@@ -765,7 +784,7 @@ extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup); bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res); extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile, struct drm_file *filp,
uint32_t handle, uint32_t handle,
struct vmw_surface **out_surf, struct vmw_surface **out_surf,
struct vmw_buffer_object **out_buf); struct vmw_buffer_object **out_buf);
...@@ -831,6 +850,7 @@ static inline void vmw_user_resource_noref_release(void) ...@@ -831,6 +850,7 @@ static inline void vmw_user_resource_noref_release(void)
/** /**
* Buffer object helper functions - vmwgfx_bo.c * Buffer object helper functions - vmwgfx_bo.c
*/ */
extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo);
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo, struct vmw_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
...@@ -855,32 +875,23 @@ extern int vmw_bo_create_kernel(struct vmw_private *dev_priv, ...@@ -855,32 +875,23 @@ extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
unsigned long size, unsigned long size,
struct ttm_placement *placement, struct ttm_placement *placement,
struct ttm_buffer_object **p_bo); struct ttm_buffer_object **p_bo);
extern int vmw_bo_create(struct vmw_private *dev_priv,
size_t size, struct ttm_placement *placement,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo),
struct vmw_buffer_object **p_bo);
extern int vmw_bo_init(struct vmw_private *dev_priv, extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo, struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement, size_t size, struct ttm_placement *placement,
bool interruptible, bool pin, bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo)); void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_buffer_object **p_dma_buf,
struct ttm_base_object **p_base);
extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
struct vmw_buffer_object *dma_buf,
uint32_t *handle);
extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, extern int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t id, struct vmw_buffer_object **out, uint32_t handle,
struct ttm_base_object **base); struct vmw_buffer_object **out);
extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence); struct vmw_fence_obj *fence);
extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
...@@ -889,16 +900,7 @@ extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -889,16 +900,7 @@ extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem); struct ttm_resource *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
extern struct vmw_buffer_object * extern struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
/**
* vmw_user_bo_noref_release - release a buffer object pointer looked up
* without reference
*/
static inline void vmw_user_bo_noref_release(void)
{
ttm_base_object_noref_release();
}
/** /**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
...@@ -949,6 +951,19 @@ static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio) ...@@ -949,6 +951,19 @@ static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
vmw_bo_prio_adjust(vbo); vmw_bo_prio_adjust(vbo);
} }
/**
* GEM related functionality - vmwgfx_gem.c
*/
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
uint32_t *handle,
struct vmw_buffer_object **p_vbo);
extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
extern void vmw_gem_destroy(struct ttm_buffer_object *bo);
extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
/** /**
* Misc Ioctl functionality - vmwgfx_ioctl.c * Misc Ioctl functionality - vmwgfx_ioctl.c
*/ */
...@@ -1212,13 +1227,6 @@ void vmw_kms_lost_device(struct drm_device *dev); ...@@ -1212,13 +1227,6 @@ void vmw_kms_lost_device(struct drm_device *dev);
int vmw_dumb_create(struct drm_file *file_priv, int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args); struct drm_mode_create_dumb *args);
int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res); extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
......
...@@ -1171,14 +1171,13 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1171,14 +1171,13 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
int ret; int ret;
vmw_validation_preload_bo(sw_context->ctx); vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
if (IS_ERR(vmw_bo)) { if (IS_ERR_OR_NULL(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use MOB buffer.\n"); VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo); return PTR_ERR(vmw_bo);
} }
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
vmw_user_bo_noref_release(); ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1226,14 +1225,13 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1226,14 +1225,13 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
int ret; int ret;
vmw_validation_preload_bo(sw_context->ctx); vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
if (IS_ERR(vmw_bo)) { if (IS_ERR_OR_NULL(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use GMR region.\n"); VMW_DEBUG_USER("Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo); return PTR_ERR(vmw_bo);
} }
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
vmw_user_bo_noref_release(); ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -3869,8 +3867,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ...@@ -3869,8 +3867,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.fd = -1; fence_rep.fd = -1;
} }
ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
TTM_REF_USAGE);
VMW_DEBUG_USER("Fence copy error. Syncing.\n"); VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false, (void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT); VMW_FENCE_WAIT_TIMEOUT);
...@@ -4099,6 +4096,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4099,6 +4096,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true; sw_context->kernel = true;
} }
sw_context->filp = file_priv;
sw_context->fp = vmw_fpriv(file_priv); sw_context->fp = vmw_fpriv(file_priv);
INIT_LIST_HEAD(&sw_context->ctx_list); INIT_LIST_HEAD(&sw_context->ctx_list);
sw_context->cur_query_bo = dev_priv->pinned_bo; sw_context->cur_query_bo = dev_priv->pinned_bo;
......
...@@ -394,22 +394,15 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, ...@@ -394,22 +394,15 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
struct vmw_buffer_object *vmw_bo; struct vmw_buffer_object *vmw_bo;
int ret; int ret;
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); ret = vmw_bo_create(vmw_priv, size,
if (!vmw_bo) {
ret = -ENOMEM;
goto err_unlock;
}
ret = vmw_bo_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement, &vmw_sys_placement,
false, false, false, false,
&vmw_bo_bo_free); &vmw_bo_bo_free, &vmw_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */ return ret;
*out = vmw_bo; *out = vmw_bo;
err_unlock:
return ret; return ret;
} }
......
...@@ -596,9 +596,10 @@ int vmw_user_fence_create(struct drm_file *file_priv, ...@@ -596,9 +596,10 @@ int vmw_user_fence_create(struct drm_file *file_priv,
* vmw_user_fence_base_release. * vmw_user_fence_base_release.
*/ */
tmp = vmw_fence_obj_reference(&ufence->fence); tmp = vmw_fence_obj_reference(&ufence->fence);
ret = ttm_base_object_init(tfile, &ufence->base, false, ret = ttm_base_object_init(tfile, &ufence->base, false,
VMW_RES_FENCE, VMW_RES_FENCE,
&vmw_user_fence_base_release, NULL); &vmw_user_fence_base_release);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -801,8 +802,7 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, ...@@ -801,8 +802,7 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
*/ */
if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
return ttm_ref_object_base_unref(tfile, arg->handle, return ttm_ref_object_base_unref(tfile, arg->handle);
TTM_REF_USAGE);
return ret; return ret;
} }
...@@ -844,8 +844,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, ...@@ -844,8 +844,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_arg *) data; (struct drm_vmw_fence_arg *) data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
arg->handle, arg->handle);
TTM_REF_USAGE);
} }
/** /**
...@@ -1091,7 +1090,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, ...@@ -1091,7 +1090,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
if (user_fence_rep != NULL) { if (user_fence_rep != NULL) {
ret = ttm_ref_object_add(vmw_fp->tfile, base, ret = ttm_ref_object_add(vmw_fp->tfile, base,
TTM_REF_USAGE, NULL, false); NULL, false);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence " DRM_ERROR("Failed to reference a fence "
"object.\n"); "object.\n");
...@@ -1134,7 +1133,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, ...@@ -1134,7 +1133,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
out_no_create: out_no_create:
if (user_fence_rep != NULL) if (user_fence_rep != NULL)
ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); ttm_ref_object_base_unref(tfile, handle);
out_no_ref_obj: out_no_ref_obj:
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
return ret; return ret;
......
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright 2021 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "vmwgfx_drv.h"
#include "drm/drm_prime.h"
#include "drm/drm_gem_ttm_helper.h"
/**
* vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_buffer_object.
*
* @bo: Pointer to the TTM buffer object.
* Return: Pointer to the struct vmw_buffer_object embedding the
* TTM buffer object.
*/
static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object *bo)
{
return container_of(bo, struct vmw_buffer_object, base);
}
static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
if (bo) {
ttm_bo_put(bo);
}
}
static int vmw_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
return 0;
}
static void vmw_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
}
static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (unlikely(ret != 0))
goto err;
vmw_bo_pin_reserved(vbo, do_pin);
ttm_bo_unreserve(bo);
err:
return ret;
}
static int vmw_gem_object_pin(struct drm_gem_object *obj)
{
return vmw_gem_pin_private(obj, true);
}
static void vmw_gem_object_unpin(struct drm_gem_object *obj)
{
vmw_gem_pin_private(obj, false);
}
static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
if (vmw_tt->vsgt.sgt)
return vmw_tt->vsgt.sgt;
return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
}
static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
.open = vmw_gem_object_open,
.close = vmw_gem_object_close,
.print_info = drm_gem_ttm_print_info,
.pin = vmw_gem_object_pin,
.unpin = vmw_gem_object_unpin,
.get_sg_table = vmw_gem_object_get_sg_table,
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
.mmap = drm_gem_ttm_mmap,
};
/**
* vmw_gem_destroy - vmw buffer object destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
void vmw_gem_destroy(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_unmap(vbo);
drm_gem_object_release(&vbo->base.base);
kfree(vbo);
}
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
uint32_t *handle,
struct vmw_buffer_object **p_vbo)
{
int ret;
ret = vmw_bo_create(dev_priv, size,
(dev_priv->has_mob) ?
&vmw_sys_placement :
&vmw_vram_sys_placement,
true, false, &vmw_gem_destroy, p_vbo);
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
if (ret != 0)
goto out_no_bo;
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&(*p_vbo)->base.base);
out_no_bo:
return ret;
}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_alloc_dmabuf_arg *arg =
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_buffer_object *vbo;
uint32_t handle;
int ret;
ret = vmw_gem_object_create_with_handle(dev_priv, filp,
req->size, &handle, &vbo);
if (ret)
goto out_no_bo;
rep->handle = handle;
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
out_no_bo:
return ret;
}
#if defined(CONFIG_DEBUG_FS)
static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m)
{
const char *placement;
const char *type;
switch (bo->base.resource->mem_type) {
case TTM_PL_SYSTEM:
placement = " CPU";
break;
case VMW_PL_GMR:
placement = " GMR";
break;
case VMW_PL_MOB:
placement = " MOB";
break;
case VMW_PL_SYSTEM:
placement = "VCPU";
break;
case TTM_PL_VRAM:
placement = "VRAM";
break;
default:
placement = "None";
break;
}
switch (bo->base.type) {
case ttm_bo_type_device:
type = "device";
break;
case ttm_bo_type_kernel:
type = "kernel";
break;
case ttm_bo_type_sg:
type = "sg ";
break;
default:
type = "none ";
break;
}
seq_printf(m, "\t\t0x%08x: %12ld bytes %s, type = %s",
id, bo->base.base.size, placement, type);
seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
bo->base.priority,
bo->base.pin_count,
kref_read(&bo->base.base.refcount),
kref_read(&bo->base.kref));
seq_puts(m, "\n");
}
static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
{
struct vmw_private *vdev = (struct vmw_private *)m->private;
struct drm_device *dev = &vdev->drm;
struct drm_file *file;
int r;
r = mutex_lock_interruptible(&dev->filelist_mutex);
if (r)
return r;
list_for_each_entry(file, &dev->filelist, lhead) {
struct task_struct *task;
struct drm_gem_object *gobj;
int id;
/*
* Although we have a valid reference on file->pid, that does
* not guarantee that the task_struct who called get_pid() is
* still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU.
*/
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
task ? task->comm : "<unknown>");
rcu_read_unlock();
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, id) {
struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj);
vmw_bo_print_info(id, bo, m);
}
spin_unlock(&file->table_lock);
}
mutex_unlock(&dev->filelist_mutex);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info);
#endif
void vmw_debugfs_gem_init(struct vmw_private *vdev)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = vdev->drm.primary;
struct dentry *root = minor->debugfs_root;
debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev,
&vmw_debugfs_gem_info_fops);
#endif
}
...@@ -843,8 +843,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) ...@@ -843,8 +843,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer); drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface); vmw_surface_unreference(&vfbs->surface);
if (vfbs->base.user_obj)
ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs); kfree(vfbs);
} }
...@@ -996,8 +994,6 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) ...@@ -996,8 +994,6 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer); drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer); vmw_bo_unreference(&vfbd->buffer);
if (vfbd->base.user_obj)
ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd); kfree(vfbd);
} }
...@@ -1251,6 +1247,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, ...@@ -1251,6 +1247,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
goto out_err1; goto out_err1;
} }
vfbd->base.base.obj[0] = &bo->base.base;
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true; vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo); vfbd->buffer = vmw_bo_reference(bo);
...@@ -1368,34 +1365,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1368,34 +1365,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd) const struct drm_mode_fb_cmd2 *mode_cmd)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL; struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL; struct vmw_surface *surface = NULL;
struct vmw_buffer_object *bo = NULL; struct vmw_buffer_object *bo = NULL;
struct ttm_base_object *user_obj;
int ret; int ret;
/*
* Take a reference on the user object of the resource
* backing the kms fb. This ensures that user-space handle
* lookups on that resource will always work as long as
* it's registered with a kms framebuffer. This is important,
* since vmw_execbuf_process identifies resources in the
* command stream using user-space handles.
*/
user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
}
/**
* End conditioned code.
*/
/* returns either a bo or surface */ /* returns either a bo or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile, ret = vmw_user_lookup_handle(dev_priv, file_priv,
mode_cmd->handles[0], mode_cmd->handles[0],
&surface, &bo); &surface, &bo);
if (ret) if (ret)
...@@ -1428,10 +1404,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1428,10 +1404,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (ret) { if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
ttm_base_object_unref(&user_obj);
return ERR_PTR(ret); return ERR_PTR(ret);
} else }
vfb->user_obj = user_obj;
return &vfb->base; return &vfb->base;
} }
......
...@@ -219,7 +219,6 @@ struct vmw_framebuffer { ...@@ -219,7 +219,6 @@ struct vmw_framebuffer {
int (*pin)(struct vmw_framebuffer *fb); int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb); int (*unpin)(struct vmw_framebuffer *fb);
bool bo; bool bo;
struct ttm_base_object *user_obj;
uint32_t user_handle; uint32_t user_handle;
}; };
......
...@@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL); ret = vmw_user_bo_lookup(file_priv, arg->handle, &buf);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
......
...@@ -85,6 +85,5 @@ int vmw_prime_handle_to_fd(struct drm_device *dev, ...@@ -85,6 +85,5 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd) int *prime_fd)
{ {
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
} }
...@@ -320,11 +320,12 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, ...@@ -320,11 +320,12 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
* The pointer this pointed at by out_surf and out_buf needs to be null. * The pointer this pointed at by out_surf and out_buf needs to be null.
*/ */
int vmw_user_lookup_handle(struct vmw_private *dev_priv, int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile, struct drm_file *filp,
uint32_t handle, uint32_t handle,
struct vmw_surface **out_surf, struct vmw_surface **out_surf,
struct vmw_buffer_object **out_buf) struct vmw_buffer_object **out_buf)
{ {
struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res; struct vmw_resource *res;
int ret; int ret;
...@@ -339,7 +340,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, ...@@ -339,7 +340,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
} }
*out_surf = NULL; *out_surf = NULL;
ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL); ret = vmw_user_bo_lookup(filp, handle, out_buf);
return ret; return ret;
} }
...@@ -362,14 +363,10 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, ...@@ -362,14 +363,10 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0; return 0;
} }
backup = kzalloc(sizeof(*backup), GFP_KERNEL); ret = vmw_bo_create(res->dev_priv, res->backup_size,
if (unlikely(!backup))
return -ENOMEM;
ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
res->func->backup_placement, res->func->backup_placement,
interruptible, false, interruptible, false,
&vmw_bo_bo_free); &vmw_bo_bo_free, &backup);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_bo; goto out_no_bo;
......
...@@ -442,19 +442,15 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -442,19 +442,15 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
vps->bo_size = 0; vps->bo_size = 0;
} }
vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
if (!vps->bo)
return -ENOMEM;
vmw_svga_enable(dev_priv); vmw_svga_enable(dev_priv);
/* After we have alloced the backing store might not be able to /* After we have alloced the backing store might not be able to
* resume the overlays, this is preferred to failing to alloc. * resume the overlays, this is preferred to failing to alloc.
*/ */
vmw_overlay_pause_all(dev_priv); vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_init(dev_priv, vps->bo, size, ret = vmw_bo_create(dev_priv, size,
&vmw_vram_placement, &vmw_vram_placement,
false, true, &vmw_bo_bo_free); false, true, &vmw_bo_bo_free, &vps->bo);
vmw_overlay_resume_all(dev_priv); vmw_overlay_resume_all(dev_priv);
if (ret) { if (ret) {
vps->bo = NULL; /* vmw_bo_init frees on error */ vps->bo = NULL; /* vmw_bo_init frees on error */
......
...@@ -676,8 +676,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -676,8 +676,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_ref_object_base_unref(tfile, arg->handle, return ttm_ref_object_base_unref(tfile, arg->handle);
TTM_REF_USAGE);
} }
static int vmw_user_shader_alloc(struct vmw_private *dev_priv, static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
...@@ -718,7 +717,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, ...@@ -718,7 +717,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
tmp = vmw_resource_reference(res); tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false, ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER, VMW_RES_SHADER,
&vmw_user_shader_base_release, NULL); &vmw_user_shader_base_release);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp); vmw_resource_unreference(&tmp);
...@@ -777,8 +776,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ...@@ -777,8 +776,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
int ret; int ret;
if (buffer_handle != SVGA3D_INVALID_ID) { if (buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_bo_lookup(tfile, buffer_handle, ret = vmw_user_bo_lookup(file_priv, buffer_handle, &buffer);
&buffer, NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n"); VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
return ret; return ret;
...@@ -894,13 +892,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, ...@@ -894,13 +892,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (!vmw_shader_id_ok(user_key, shader_type)) if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL; return -EINVAL;
/* Allocate and pin a DMA buffer */ ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement,
buf = kzalloc(sizeof(*buf), GFP_KERNEL); true, true, vmw_bo_bo_free, &buf);
if (unlikely(!buf))
return -ENOMEM;
ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement,
true, true, vmw_bo_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
......
...@@ -172,7 +172,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, ...@@ -172,7 +172,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
tmp = vmw_resource_reference(res); tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &usimple->base, false, ret = ttm_base_object_init(tfile, &usimple->base, false,
func->ttm_res_type, func->ttm_res_type,
&vmw_simple_resource_base_release, NULL); &vmw_simple_resource_base_release);
if (ret) { if (ret) {
vmw_resource_unreference(&tmp); vmw_resource_unreference(&tmp);
......
...@@ -46,13 +46,11 @@ ...@@ -46,13 +46,11 @@
* @base: The TTM base object handling user-space visibility. * @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata. * @srf: The surface metadata.
* @master: Master of the creating client. Used for security check. * @master: Master of the creating client. Used for security check.
* @backup_base: The TTM base object of the backup buffer.
*/ */
struct vmw_user_surface { struct vmw_user_surface {
struct ttm_prime_object prime; struct ttm_prime_object prime;
struct vmw_surface srf; struct vmw_surface srf;
struct drm_master *master; struct drm_master *master;
struct ttm_base_object *backup_base;
}; };
/** /**
...@@ -686,8 +684,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) ...@@ -686,8 +684,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res; struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL; *p_base = NULL;
if (user_srf->backup_base)
ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
} }
...@@ -705,7 +701,7 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -705,7 +701,7 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); return ttm_ref_object_base_unref(tfile, arg->sid);
} }
/** /**
...@@ -851,22 +847,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -851,22 +847,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (dev_priv->has_mob && req->shareable) { if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle; uint32_t backup_handle;
ret = vmw_user_bo_alloc(dev_priv, tfile, ret = vmw_gem_object_create_with_handle(dev_priv,
file_priv,
res->backup_size, res->backup_size,
true,
&backup_handle, &backup_handle,
&res->backup, &res->backup);
&user_srf->backup_base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
goto out_unlock; goto out_unlock;
} }
vmw_bo_reference(res->backup);
} }
tmp = vmw_resource_reference(&srf->res); tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE, req->shareable, VMW_RES_SURFACE,
&vmw_user_surface_base_release, NULL); &vmw_user_surface_base_release);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp); vmw_resource_unreference(&tmp);
...@@ -921,7 +917,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, ...@@ -921,7 +917,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
VMW_DEBUG_USER("Referenced object is not a surface.\n"); VMW_DEBUG_USER("Referenced object is not a surface.\n");
goto out_bad_resource; goto out_bad_resource;
} }
if (handle_type != DRM_VMW_HANDLE_PRIME) { if (handle_type != DRM_VMW_HANDLE_PRIME) {
bool require_exist = false; bool require_exist = false;
...@@ -946,8 +941,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, ...@@ -946,8 +941,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv))) if (unlikely(drm_is_render_client(file_priv)))
require_exist = true; require_exist = true;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
require_exist);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n"); DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource; goto out_bad_resource;
...@@ -961,7 +955,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, ...@@ -961,7 +955,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
ttm_base_object_unref(&base); ttm_base_object_unref(&base);
out_no_lookup: out_no_lookup:
if (handle_type == DRM_VMW_HANDLE_PRIME) if (handle_type == DRM_VMW_HANDLE_PRIME)
(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); (void) ttm_ref_object_base_unref(tfile, handle);
return ret; return ret;
} }
...@@ -1011,7 +1005,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, ...@@ -1011,7 +1005,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes, VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
srf->metadata.num_sizes); srf->metadata.num_sizes);
ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE); ttm_ref_object_base_unref(tfile, base->handle);
ret = -EFAULT; ret = -EFAULT;
} }
...@@ -1498,9 +1492,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1498,9 +1492,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
res = &user_srf->srf.res; res = &user_srf->srf.res;
if (req->base.buffer_handle != SVGA3D_INVALID_ID) { if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle, ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
&res->backup, &res->backup);
&user_srf->backup_base);
if (ret == 0) { if (ret == 0) {
if (res->backup->base.base.size < res->backup_size) { if (res->backup->base.base.size < res->backup_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n"); VMW_DEBUG_USER("Surface backup buffer too small.\n");
...@@ -1513,14 +1506,15 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1513,14 +1506,15 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
} }
} else if (req->base.drm_surface_flags & } else if (req->base.drm_surface_flags &
(drm_vmw_surface_flag_create_buffer | (drm_vmw_surface_flag_create_buffer |
drm_vmw_surface_flag_coherent)) drm_vmw_surface_flag_coherent)) {
ret = vmw_user_bo_alloc(dev_priv, tfile, ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
res->backup_size, res->backup_size,
req->base.drm_surface_flags &
drm_vmw_surface_flag_shareable,
&backup_handle, &backup_handle,
&res->backup, &res->backup);
&user_srf->backup_base); if (ret == 0)
vmw_bo_reference(res->backup);
}
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
...@@ -1552,7 +1546,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1552,7 +1546,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
req->base.drm_surface_flags & req->base.drm_surface_flags &
drm_vmw_surface_flag_shareable, drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE, VMW_RES_SURFACE,
&vmw_user_surface_base_release, NULL); &vmw_user_surface_base_release);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp); vmw_resource_unreference(&tmp);
...@@ -1572,7 +1566,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1572,7 +1566,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->buffer_size = 0; rep->buffer_size = 0;
rep->buffer_handle = SVGA3D_INVALID_ID; rep->buffer_handle = SVGA3D_INVALID_ID;
} }
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
out_unlock: out_unlock:
...@@ -1595,12 +1588,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, ...@@ -1595,12 +1588,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_surface *srf; struct vmw_surface *srf;
struct vmw_user_surface *user_srf; struct vmw_user_surface *user_srf;
struct vmw_surface_metadata *metadata; struct vmw_surface_metadata *metadata;
struct ttm_base_object *base; struct ttm_base_object *base;
uint32_t backup_handle; u32 backup_handle;
int ret; int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
...@@ -1617,14 +1609,12 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, ...@@ -1617,14 +1609,12 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
metadata = &srf->metadata; metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle); ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
&backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
if (ret != 0) {
if (unlikely(ret != 0)) { drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
DRM_ERROR("Could not add a reference to a GB surface " req->sid);
"backup buffer.\n");
(void) ttm_ref_object_base_unref(tfile, base->handle,
TTM_REF_USAGE);
goto out_bad_resource; goto out_bad_resource;
} }
......
...@@ -167,18 +167,6 @@ struct ttm_placement vmw_nonfixed_placement = { ...@@ -167,18 +167,6 @@ struct ttm_placement vmw_nonfixed_placement = {
.busy_placement = &sys_placement_flags .busy_placement = &sys_placement_flags
}; };
struct vmw_ttm_tt {
struct ttm_tt dma_ttm;
struct vmw_private *dev_priv;
int gmr_id;
struct vmw_mob *mob;
int mem_type;
struct sg_table sgt;
struct vmw_sg_table vsgt;
bool mapped;
bool bound;
};
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/** /**
...@@ -311,11 +299,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) ...@@ -311,11 +299,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
vsgt->pages = vmw_tt->dma_ttm.pages; vsgt->pages = vmw_tt->dma_ttm.pages;
vsgt->num_pages = vmw_tt->dma_ttm.num_pages; vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
vsgt->addrs = vmw_tt->dma_ttm.dma_address; vsgt->addrs = vmw_tt->dma_ttm.dma_address;
vsgt->sgt = &vmw_tt->sgt; vsgt->sgt = NULL;
switch (dev_priv->map_mode) { switch (dev_priv->map_mode) {
case vmw_dma_map_bind: case vmw_dma_map_bind:
case vmw_dma_map_populate: case vmw_dma_map_populate:
vsgt->sgt = &vmw_tt->sgt;
ret = sg_alloc_table_from_pages_segment( ret = sg_alloc_table_from_pages_segment(
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
(unsigned long)vsgt->num_pages << PAGE_SHIFT, (unsigned long)vsgt->num_pages << PAGE_SHIFT,
......
...@@ -27,30 +27,44 @@ ...@@ -27,30 +27,44 @@
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev, static int vmw_bo_vm_lookup(struct ttm_device *bdev,
struct drm_file *filp,
unsigned long offset, unsigned long offset,
unsigned long pages) unsigned long pages,
struct ttm_buffer_object **p_bo)
{ {
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
struct drm_device *drm = &dev_priv->drm; struct drm_device *drm = &dev_priv->drm;
struct drm_vma_offset_node *node; struct drm_vma_offset_node *node;
struct ttm_buffer_object *bo = NULL; int ret;
*p_bo = NULL;
drm_vma_offset_lock_lookup(bdev->vma_manager); drm_vma_offset_lock_lookup(bdev->vma_manager);
node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) { if (likely(node)) {
bo = container_of(node, struct ttm_buffer_object, *p_bo = container_of(node, struct ttm_buffer_object,
base.vma_node); base.vma_node);
bo = ttm_bo_get_unless_zero(bo); *p_bo = ttm_bo_get_unless_zero(*p_bo);
} }
drm_vma_offset_unlock_lookup(bdev->vma_manager); drm_vma_offset_unlock_lookup(bdev->vma_manager);
if (!bo) if (!*p_bo) {
drm_err(drm, "Could not find buffer object to map\n"); drm_err(drm, "Could not find buffer object to map\n");
return -EINVAL;
}
if (!drm_vma_node_is_allowed(node, filp)) {
ret = -EACCES;
goto out_no_access;
}
return bo; return 0;
out_no_access:
ttm_bo_put(*p_bo);
return ret;
} }
int vmw_mmap(struct file *filp, struct vm_area_struct *vma) int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
...@@ -64,7 +78,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -64,7 +78,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
}; };
struct drm_file *file_priv = filp->private_data; struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_device *bdev = &dev_priv->bdev; struct ttm_device *bdev = &dev_priv->bdev;
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
int ret; int ret;
...@@ -72,13 +85,9 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -72,13 +85,9 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
return -EINVAL; return -EINVAL;
bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
if (unlikely(!bo))
return -EINVAL;
ret = vmw_user_bo_verify_access(bo, tfile);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unref; return ret;
ret = ttm_bo_mmap_obj(vma, bo); ret = ttm_bo_mmap_obj(vma, bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
......
...@@ -117,7 +117,7 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, ...@@ -117,7 +117,7 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
arg->stream_id, TTM_REF_USAGE); arg->stream_id);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment