Commit e1eb899b authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: add IOCTL interface for per VM BOs v3

Add the IOCTL interface so that applications can allocate per VM BOs.

Still WIP since not all corner cases are tested yet, but this reduces average
CS overhead for 10K BOs from 21ms down to 48us.

v2: add some extra checks, remove the WIP tag
v3: rename new flag to AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 73fb16e7
...@@ -457,6 +457,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev); ...@@ -457,6 +457,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain, int alignment, u32 initial_domain,
u64 flags, bool kernel, u64 flags, bool kernel,
struct reservation_object *resv,
struct drm_gem_object **obj); struct drm_gem_object **obj);
int amdgpu_mode_dumb_create(struct drm_file *file_priv, int amdgpu_mode_dumb_create(struct drm_file *file_priv,
......
...@@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, ...@@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED, AMDGPU_GEM_CREATE_VRAM_CLEARED,
true, &gobj); true, NULL, &gobj);
if (ret) { if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size); pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM; return -ENOMEM;
......
...@@ -46,9 +46,10 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) ...@@ -46,9 +46,10 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain, int alignment, u32 initial_domain,
u64 flags, bool kernel, u64 flags, bool kernel,
struct reservation_object *resv,
struct drm_gem_object **obj) struct drm_gem_object **obj)
{ {
struct amdgpu_bo *robj; struct amdgpu_bo *bo;
int r; int r;
*obj = NULL; *obj = NULL;
...@@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry: retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
flags, NULL, NULL, 0, &robj); flags, NULL, resv, 0, &bo);
if (r) { if (r) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
...@@ -71,7 +72,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -71,7 +72,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
} }
return r; return r;
} }
*obj = &robj->gem_base; *obj = &bo->gem_base;
return 0; return 0;
} }
...@@ -119,6 +120,10 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, ...@@ -119,6 +120,10 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
if (mm && mm != current->mm) if (mm && mm != current->mm)
return -EPERM; return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
abo->tbo.resv != vm->root.base.bo->tbo.resv)
return -EPERM;
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, false);
if (r) if (r)
return r; return r;
...@@ -142,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -142,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct list_head list; struct list_head list, duplicates;
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
int r; int r;
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo; tv.bo = &bo->tbo;
tv.shared = true; tv.shared = true;
...@@ -156,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -156,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) { if (r) {
dev_err(adev->dev, "leaking bo va because " dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r); "we fail to reserve bo (%d)\n", r);
...@@ -191,9 +197,12 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -191,9 +197,12 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data; union drm_amdgpu_gem_create *args = data;
uint64_t flags = args->in.domain_flags; uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size; uint64_t size = args->in.bo_size;
struct reservation_object *resv = NULL;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
uint32_t handle; uint32_t handle;
int r; int r;
...@@ -202,7 +211,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -202,7 +211,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED)) AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID))
return -EINVAL; return -EINVAL;
/* reject invalid gem domains */ /* reject invalid gem domains */
...@@ -229,9 +239,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -229,9 +239,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
} }
size = roundup(size, PAGE_SIZE); size = roundup(size, PAGE_SIZE);
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
r = amdgpu_bo_reserve(vm->root.base.bo, false);
if (r)
return r;
resv = vm->root.base.bo->tbo.resv;
}
r = amdgpu_gem_object_create(adev, size, args->in.alignment, r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains), (u32)(0xffffffff & args->in.domains),
flags, false, &gobj); flags, false, resv, &gobj);
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
abo->parent = amdgpu_bo_ref(vm->root.base.bo);
}
amdgpu_bo_unreserve(vm->root.base.bo);
}
if (r) if (r)
return r; return r;
...@@ -273,9 +299,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -273,9 +299,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
} }
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0, r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
AMDGPU_GEM_DOMAIN_CPU, 0, 0, 0, NULL, &gobj);
0, &gobj);
if (r) if (r)
return r; return r;
...@@ -527,7 +552,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -527,7 +552,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head list; struct list_head list, duplicates;
uint64_t va_flags; uint64_t va_flags;
int r = 0; int r = 0;
...@@ -563,6 +588,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -563,6 +588,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
} }
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) && if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) { !(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle); gobj = drm_gem_object_lookup(filp, args->handle);
...@@ -579,7 +605,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -579,7 +605,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) if (r)
goto error_unref; goto error_unref;
...@@ -645,6 +671,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -645,6 +671,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data; struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct amdgpu_bo *robj; struct amdgpu_bo *robj;
...@@ -692,6 +719,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, ...@@ -692,6 +719,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(adev, robj, true);
amdgpu_bo_unreserve(robj); amdgpu_bo_unreserve(robj);
break; break;
default: default:
...@@ -721,8 +751,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, ...@@ -721,8 +751,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = amdgpu_gem_object_create(adev, args->size, 0, r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
ttm_bo_type_device, false, NULL, &gobj);
&gobj);
if (r) if (r)
return -ENOMEM; return -ENOMEM;
......
...@@ -136,7 +136,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, ...@@ -136,7 +136,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
{ {
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags); return drm_gem_prime_export(dev, gobj, flags);
......
...@@ -87,6 +87,8 @@ extern "C" { ...@@ -87,6 +87,8 @@ extern "C" {
#define AMDGPU_GEM_CREATE_SHADOW (1 << 4) #define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
/* Flag that allocating the BO should use linear VRAM */ /* Flag that allocating the BO should use linear VRAM */
#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
/* Flag that BO is always valid in this VM */
#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
struct drm_amdgpu_gem_create_in { struct drm_amdgpu_gem_create_in {
/** the requested memory size */ /** the requested memory size */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment