Commit 94bc2249 authored by Danilo Krummrich's avatar Danilo Krummrich

drm/gpuvm: add an abstraction for a VM / BO combination

Add an abstraction layer between the drm_gpuva mappings of a particular
drm_gem_object and this GEM object itself. The abstraction represents a
combination of a drm_gem_object and drm_gpuvm. The drm_gem_object holds
a list of drm_gpuvm_bo structures (the structure representing this
abstraction), while each drm_gpuvm_bo contains list of mappings of this
GEM object.

This has multiple advantages:

1) We can use the drm_gpuvm_bo structure to attach it to various lists
   of the drm_gpuvm. This is useful for tracking external and evicted
   objects per VM, which is introduced in subsequent patches.

2) Finding mappings of a certain drm_gem_object mapped in a certain
   drm_gpuvm becomes much cheaper.

3) Drivers can derive and extend the structure to easily represent
   driver specific states of a BO for a certain GPUVM.

The idea of this abstraction was taken from amdgpu, hence the credit for
this idea goes to the developers of amdgpu.

Cc: Christian König <christian.koenig@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarBoris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: default avatarDanilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-11-dakr@redhat.com
parent 8af72338
This diff is collapsed.
......@@ -62,6 +62,8 @@ struct bind_job_op {
enum vm_bind_op op;
u32 flags;
struct drm_gpuvm_bo *vm_bo;
struct {
u64 addr;
u64 range;
......@@ -1101,22 +1103,28 @@ bind_validate_region(struct nouveau_job *job)
}
static void
bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
bind_link_gpuvas(struct bind_job_op *bop)
{
struct nouveau_uvma_prealloc *new = &bop->new;
struct drm_gpuvm_bo *vm_bo = bop->vm_bo;
struct drm_gpuva_ops *ops = bop->ops;
struct drm_gpuva_op *op;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP:
drm_gpuva_link(&new->map->va);
drm_gpuva_link(&new->map->va, vm_bo);
break;
case DRM_GPUVA_OP_REMAP:
case DRM_GPUVA_OP_REMAP: {
struct drm_gpuva *va = op->remap.unmap->va;
if (op->remap.prev)
drm_gpuva_link(&new->prev->va);
drm_gpuva_link(&new->prev->va, va->vm_bo);
if (op->remap.next)
drm_gpuva_link(&new->next->va);
drm_gpuva_unlink(op->remap.unmap->va);
drm_gpuva_link(&new->next->va, va->vm_bo);
drm_gpuva_unlink(va);
break;
}
case DRM_GPUVA_OP_UNMAP:
drm_gpuva_unlink(op->unmap.va);
break;
......@@ -1138,10 +1146,17 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_MAP) {
op->gem.obj = drm_gem_object_lookup(job->file_priv,
op->gem.handle);
if (!op->gem.obj)
struct drm_gem_object *obj = op->gem.obj =
drm_gem_object_lookup(job->file_priv,
op->gem.handle);
if (!obj)
return -ENOENT;
dma_resv_lock(obj->resv, NULL);
op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj);
dma_resv_unlock(obj->resv);
if (IS_ERR(op->vm_bo))
return PTR_ERR(op->vm_bo);
}
ret = bind_validate_op(job, op);
......@@ -1352,7 +1367,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
case OP_UNMAP_SPARSE:
case OP_MAP:
case OP_UNMAP:
bind_link_gpuvas(op->ops, &op->new);
bind_link_gpuvas(op);
break;
default:
break;
......@@ -1499,6 +1514,12 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
if (!IS_ERR_OR_NULL(op->ops))
drm_gpuva_ops_free(&uvmm->base, op->ops);
if (!IS_ERR_OR_NULL(op->vm_bo)) {
dma_resv_lock(obj->resv, NULL);
drm_gpuvm_bo_put(op->vm_bo);
dma_resv_unlock(obj->resv);
}
if (obj)
drm_gem_object_put(obj);
}
......@@ -1752,15 +1773,18 @@ void
nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
{
struct drm_gem_object *obj = &nvbo->bo.base;
struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
drm_gem_for_each_gpuva(va, obj) {
struct nouveau_uvma *uvma = uvma_from_va(va);
drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
drm_gpuvm_bo_for_each_va(va, vm_bo) {
struct nouveau_uvma *uvma = uvma_from_va(va);
nouveau_uvma_map(uvma, mem);
drm_gpuva_invalidate(va, false);
nouveau_uvma_map(uvma, mem);
drm_gpuva_invalidate(va, false);
}
}
}
......@@ -1768,15 +1792,18 @@ void
nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
{
struct drm_gem_object *obj = &nvbo->bo.base;
struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
drm_gem_for_each_gpuva(va, obj) {
struct nouveau_uvma *uvma = uvma_from_va(va);
drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
drm_gpuvm_bo_for_each_va(va, vm_bo) {
struct nouveau_uvma *uvma = uvma_from_va(va);
nouveau_uvma_unmap(uvma);
drm_gpuva_invalidate(va, true);
nouveau_uvma_unmap(uvma);
drm_gpuva_invalidate(va, true);
}
}
}
......
......@@ -580,7 +580,7 @@ int drm_gem_evict(struct drm_gem_object *obj);
* drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
* @obj: the &drm_gem_object
*
* This initializes the &drm_gem_object's &drm_gpuva list.
* This initializes the &drm_gem_object's &drm_gpuvm_bo list.
*
* Calling this function is only necessary for drivers intending to support the
* &drm_driver_feature DRIVER_GEM_GPUVA.
......@@ -593,28 +593,28 @@ static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
}
/**
* drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas
* @entry__: &drm_gpuva structure to assign to in each iteration step
* @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
* drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo
* @entry__: &drm_gpuvm_bo structure to assign to in each iteration step
* @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
*
* This iterator walks over all &drm_gpuva structures associated with the
* &drm_gpuva_manager.
* This iterator walks over all &drm_gpuvm_bo structures associated with the
* &drm_gem_object.
*/
#define drm_gem_for_each_gpuva(entry__, obj__) \
list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry)
#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \
list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem)
/**
* drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of
* gpuvas
* @entry__: &drm_gpuva structure to assign to in each iteration step
* @next__: &next &drm_gpuva to store the next step
* @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
* drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of
* &drm_gpuvm_bo
* @entry__: &drm_gpuvm_bostructure to assign to in each iteration step
* @next__: &next &drm_gpuvm_bo to store the next step
* @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
*
* This iterator walks over all &drm_gpuva structures associated with the
* This iterator walks over all &drm_gpuvm_bo structures associated with the
* &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
* it is save against removal of elements.
*/
#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \
list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry)
#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \
list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem)
#endif /* __DRM_GEM_H__ */
......@@ -25,6 +25,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/dma-resv.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/types.h>
......@@ -33,6 +34,7 @@
#include <drm/drm_gem.h>
struct drm_gpuvm;
struct drm_gpuvm_bo;
struct drm_gpuvm_ops;
/**
......@@ -73,6 +75,12 @@ struct drm_gpuva {
*/
struct drm_gpuvm *vm;
/**
* @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
* &drm_gem_object
*/
struct drm_gpuvm_bo *vm_bo;
/**
* @flags: the &drm_gpuva_flags for this mapping
*/
......@@ -108,7 +116,7 @@ struct drm_gpuva {
struct drm_gem_object *obj;
/**
* @entry: the &list_head to attach this object to a &drm_gem_object
* @entry: the &list_head to attach this object to a &drm_gpuvm_bo
*/
struct list_head entry;
} gem;
......@@ -141,7 +149,7 @@ struct drm_gpuva {
int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
void drm_gpuva_remove(struct drm_gpuva *va);
void drm_gpuva_link(struct drm_gpuva *va);
void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
void drm_gpuva_unlink(struct drm_gpuva *va);
struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
......@@ -188,10 +196,16 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
* enum drm_gpuvm_flags - flags for struct drm_gpuvm
*/
enum drm_gpuvm_flags {
/**
* @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
* GPUVM's &dma_resv lock
*/
DRM_GPUVM_RESV_PROTECTED = BIT(0),
/**
* @DRM_GPUVM_USERBITS: user defined bits
*/
DRM_GPUVM_USERBITS = BIT(0),
DRM_GPUVM_USERBITS = BIT(1),
};
/**
......@@ -302,6 +316,19 @@ bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
struct drm_gem_object *
drm_gpuvm_resv_object_alloc(struct drm_device *drm);
/**
* drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
* set
* @gpuvm: the &drm_gpuvm
*
* Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
*/
static inline bool
drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
{
return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
}
/**
* drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
* @gpuvm__: the &drm_gpuvm
......@@ -320,6 +347,12 @@ drm_gpuvm_resv_object_alloc(struct drm_device *drm);
*/
#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
#define drm_gpuvm_resv_held(gpuvm__) \
dma_resv_held(drm_gpuvm_resv(gpuvm__))
#define drm_gpuvm_resv_assert_held(gpuvm__) \
dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
#define drm_gpuvm_resv_held(gpuvm__) \
dma_resv_held(drm_gpuvm_resv(gpuvm__))
......@@ -404,6 +437,125 @@ __drm_gpuva_next(struct drm_gpuva *va)
#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
/**
* struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
* &drm_gem_object combination
*
* This structure is an abstraction representing a &drm_gpuvm and
* &drm_gem_object combination. It serves as an indirection to accelerate
* iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
* &drm_gem_object.
*
* Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
* accelerate validation.
*
* Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
* a GEM object is mapped first in a GPU-VM and release the instance once the
* last mapping of the GEM object in this GPU-VM is unmapped.
*/
struct drm_gpuvm_bo {
/**
* @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
* counted pointer.
*/
struct drm_gpuvm *vm;
/**
* @obj: The &drm_gem_object being mapped in @vm. This is a reference
* counted pointer.
*/
struct drm_gem_object *obj;
/**
* @kref: The reference count for this &drm_gpuvm_bo.
*/
struct kref kref;
/**
* @list: Structure containing all &list_heads.
*/
struct {
/**
* @gpuva: The list of linked &drm_gpuvas.
*
* It is safe to access entries from this list as long as the
* GEM's gpuva lock is held. See also struct drm_gem_object.
*/
struct list_head gpuva;
/**
* @entry: Structure containing all &list_heads serving as
* entry.
*/
struct {
/**
* @gem: List entry to attach to the &drm_gem_objects
* gpuva list.
*/
struct list_head gem;
} entry;
} list;
};
struct drm_gpuvm_bo *
drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
struct drm_gpuvm_bo *
drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
struct drm_gpuvm_bo *
drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
/**
* drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
* @vm_bo: the &drm_gpuvm_bo to acquire the reference of
*
* This function acquires an additional reference to @vm_bo. It is illegal to
* call this without already holding a reference. No locks required.
*/
static inline struct drm_gpuvm_bo *
drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
{
kref_get(&vm_bo->kref);
return vm_bo;
}
void drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
struct drm_gpuvm_bo *
drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
/**
* drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
* @va__: &drm_gpuva structure to assign to in each iteration step
* @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
*
* This iterator walks over all &drm_gpuva structures associated with the
* &drm_gpuvm_bo.
*
* The caller must hold the GEM's gpuva lock.
*/
#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
/**
* drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
* &drm_gpuva
* @va__: &drm_gpuva structure to assign to in each iteration step
* @next__: &next &drm_gpuva to store the next step
* @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
*
* This iterator walks over all &drm_gpuva structures associated with the
* &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
* it is save against removal of elements.
*
* The caller must hold the GEM's gpuva lock.
*/
#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
/**
* enum drm_gpuva_op_type - GPU VA operation type
*
......@@ -673,8 +825,7 @@ drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
struct drm_gpuva_ops *
drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
struct drm_gpuva_ops *ops);
......@@ -726,6 +877,30 @@ struct drm_gpuvm_ops {
*/
void (*op_free)(struct drm_gpuva_op *op);
/**
* @vm_bo_alloc: called when the &drm_gpuvm allocates
* a struct drm_gpuvm_bo
*
* Some drivers may want to embed struct drm_gpuvm_bo into driver
* specific structures. By implementing this callback drivers can
* allocate memory accordingly.
*
* This callback is optional.
*/
struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
/**
* @vm_bo_free: called when the &drm_gpuvm frees a
* struct drm_gpuvm_bo
*
* Some drivers may want to embed struct drm_gpuvm_bo into driver
* specific structures. By implementing this callback drivers can
* free the previously allocated memory accordingly.
*
* This callback is optional.
*/
void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
/**
* @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
* mapping once all previous steps were completed
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment