Commit eab3de23 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: explicit give BO type to amdgpu_bo_create

Drop the "kernel" and sg parameter and give the BO type to create
explicit to amdgpu_bo_create instead of figuring it out from the
parameters.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarRoger He <Hongbo.He@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e3364dfc
...@@ -441,7 +441,7 @@ struct amdgpu_sa_bo { ...@@ -441,7 +441,7 @@ struct amdgpu_sa_bo {
void amdgpu_gem_force_release(struct amdgpu_device *adev); void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain, int alignment, u32 initial_domain,
u64 flags, bool kernel, u64 flags, enum ttm_bo_type type,
struct reservation_object *resv, struct reservation_object *resv,
struct drm_gem_object **obj); struct drm_gem_object **obj);
......
...@@ -221,8 +221,9 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, ...@@ -221,8 +221,9 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
uint64_t gpu_addr_tmp = 0; uint64_t gpu_addr_tmp = 0;
void *cpu_ptr_tmp = NULL; void *cpu_ptr_tmp = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &bo); AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
NULL, &bo);
if (r) { if (r) {
dev_err(adev->dev, dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r); "failed to allocate BO for amdkfd (%d)\n", r);
......
...@@ -997,8 +997,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -997,8 +997,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain)); va, size, domain_string(alloc_domain));
ret = amdgpu_bo_create(adev, size, byte_align, false, ret = amdgpu_bo_create(adev, size, byte_align,
alloc_domain, alloc_flags, NULL, NULL, &bo); alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
if (ret) { if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n", pr_debug("Failed to create BO on domain %s. ret %d\n",
domain_string(alloc_domain), ret); domain_string(alloc_domain), ret);
......
...@@ -80,8 +80,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -80,8 +80,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
int time; int time;
n = AMDGPU_BENCHMARK_ITERATIONS; n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
NULL, &sobj); ttm_bo_type_kernel, NULL, &sobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
...@@ -93,8 +93,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -93,8 +93,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
NULL, &dobj); ttm_bo_type_kernel, NULL, &dobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
......
...@@ -113,11 +113,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) ...@@ -113,11 +113,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
int r; int r;
if (adev->gart.robj == NULL) { if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size, r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->gart.robj); ttm_bo_type_kernel, NULL,
&adev->gart.robj);
if (r) { if (r) {
return r; return r;
} }
......
...@@ -45,7 +45,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) ...@@ -45,7 +45,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain, int alignment, u32 initial_domain,
u64 flags, bool kernel, u64 flags, enum ttm_bo_type type,
struct reservation_object *resv, struct reservation_object *resv,
struct drm_gem_object **obj) struct drm_gem_object **obj)
{ {
...@@ -59,8 +59,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -59,8 +59,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
} }
retry: retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, r = amdgpu_bo_create(adev, size, alignment, initial_domain,
flags, NULL, resv, &bo); flags, type, resv, &bo);
if (r) { if (r) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
......
...@@ -191,10 +191,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev, ...@@ -191,10 +191,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
int r; int r;
if (!*bo_ptr) { if (!*bo_ptr) {
r = amdgpu_bo_create(adev, size, align, true, domain, r = amdgpu_bo_create(adev, size, align, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, bo_ptr); ttm_bo_type_kernel, NULL, bo_ptr);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r); r);
...@@ -335,21 +335,19 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, ...@@ -335,21 +335,19 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
return false; return false;
} }
static int amdgpu_bo_do_create(struct amdgpu_device *adev, static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
unsigned long size, int byte_align, int byte_align, u32 domain,
bool kernel, u32 domain, u64 flags, u64 flags, enum ttm_bo_type type,
struct sg_table *sg,
struct reservation_object *resv, struct reservation_object *resv,
struct amdgpu_bo **bo_ptr) struct amdgpu_bo **bo_ptr)
{ {
struct ttm_operation_ctx ctx = { struct ttm_operation_ctx ctx = {
.interruptible = !kernel, .interruptible = (type != ttm_bo_type_kernel),
.no_wait_gpu = false, .no_wait_gpu = false,
.resv = resv, .resv = resv,
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
}; };
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align; unsigned long page_align;
size_t acc_size; size_t acc_size;
int r; int r;
...@@ -360,13 +358,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -360,13 +358,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (!amdgpu_bo_validate_size(adev, size, domain)) if (!amdgpu_bo_validate_size(adev, size, domain))
return -ENOMEM; return -ENOMEM;
if (kernel) {
type = ttm_bo_type_kernel;
} else if (sg) {
type = ttm_bo_type_sg;
} else {
type = ttm_bo_type_device;
}
*bo_ptr = NULL; *bo_ptr = NULL;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
...@@ -385,7 +376,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -385,7 +376,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA); AMDGPU_GEM_DOMAIN_OA);
bo->allowed_domains = bo->preferred_domains; bo->allowed_domains = bo->preferred_domains;
if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) if (type != ttm_bo_type_kernel &&
bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
bo->flags = flags; bo->flags = flags;
...@@ -423,7 +415,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -423,7 +415,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, &ctx, acc_size, &bo->placement, page_align, &ctx, acc_size,
sg, resv, &amdgpu_ttm_bo_destroy); NULL, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
...@@ -435,7 +427,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -435,7 +427,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
else else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (kernel) if (type == ttm_bo_type_kernel)
bo->tbo.priority = 1; bo->tbo.priority = 1;
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
...@@ -479,12 +471,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, ...@@ -479,12 +471,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
if (bo->shadow) if (bo->shadow)
return 0; return 0;
r = amdgpu_bo_do_create(adev, size, byte_align, true, r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW, AMDGPU_GEM_CREATE_SHADOW,
NULL, bo->tbo.resv, ttm_bo_type_kernel,
&bo->shadow); bo->tbo.resv, &bo->shadow);
if (!r) { if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo); bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock); mutex_lock(&adev->shadow_list_lock);
...@@ -495,18 +486,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, ...@@ -495,18 +486,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r; return r;
} }
int amdgpu_bo_create(struct amdgpu_device *adev, int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
unsigned long size, int byte_align, int byte_align, u32 domain,
bool kernel, u32 domain, u64 flags, u64 flags, enum ttm_bo_type type,
struct sg_table *sg,
struct reservation_object *resv, struct reservation_object *resv,
struct amdgpu_bo **bo_ptr) struct amdgpu_bo **bo_ptr)
{ {
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r; int r;
r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain, r = amdgpu_bo_do_create(adev, size, byte_align, domain,
parent_flags, sg, resv, bo_ptr); parent_flags, type, resv, bo_ptr);
if (r) if (r)
return r; return r;
......
...@@ -203,12 +203,11 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) ...@@ -203,12 +203,11 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
} }
int amdgpu_bo_create(struct amdgpu_device *adev, int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
unsigned long size, int byte_align, int byte_align, u32 domain,
bool kernel, u32 domain, u64 flags, u64 flags, enum ttm_bo_type type,
struct sg_table *sg, struct reservation_object *resv,
struct reservation_object *resv, struct amdgpu_bo **bo_ptr);
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev, int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align, unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr, u32 domain, struct amdgpu_bo **bo_ptr,
......
...@@ -105,11 +105,14 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -105,11 +105,14 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
int ret; int ret;
ww_mutex_lock(&resv->lock, NULL); ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_CPU, 0, sg, resv, &bo); AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
resv, &bo);
if (ret) if (ret)
goto error; goto error;
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
......
...@@ -59,9 +59,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -59,9 +59,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_cleanup; goto out_cleanup;
} }
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
AMDGPU_GEM_DOMAIN_VRAM, 0, ttm_bo_type_kernel, NULL, &vram_obj);
NULL, NULL, &vram_obj);
if (r) { if (r) {
DRM_ERROR("Failed to create VRAM object\n"); DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup; goto out_cleanup;
...@@ -80,9 +79,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -80,9 +79,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void **vram_start, **vram_end; void **vram_start, **vram_end;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, r = amdgpu_bo_create(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, AMDGPU_GEM_DOMAIN_GTT, 0,
NULL, gtt_obj + i); ttm_bo_type_kernel, NULL, gtt_obj + i);
if (r) { if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i); DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean; goto out_lclean;
......
...@@ -1342,11 +1342,12 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) ...@@ -1342,11 +1342,12 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
if (adev->fw_vram_usage.size > 0 && if (adev->fw_vram_usage.size > 0 &&
adev->fw_vram_usage.size <= vram_size) { adev->fw_vram_usage.size <= vram_size) {
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
&adev->fw_vram_usage.reserved_bo); ttm_bo_type_kernel, NULL,
&adev->fw_vram_usage.reserved_bo);
if (r) if (r)
goto error_create; goto error_create;
......
...@@ -413,9 +413,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, ...@@ -413,9 +413,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
if (!entry->base.bo) { if (!entry->base.bo) {
r = amdgpu_bo_create(adev, r = amdgpu_bo_create(adev,
amdgpu_vm_bo_size(adev, level), amdgpu_vm_bo_size(adev, level),
AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, flags, AMDGPU_GEM_DOMAIN_VRAM, flags,
NULL, resv, &pt); ttm_bo_type_kernel, resv, &pt);
if (r) if (r)
return r; return r;
...@@ -2409,8 +2409,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2409,8 +2409,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
AMDGPU_GEM_CREATE_SHADOW); AMDGPU_GEM_CREATE_SHADOW);
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM, r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
flags, NULL, NULL, &vm->root.base.bo); ttm_bo_type_kernel, NULL, &vm->root.base.bo);
if (r) if (r)
goto error_free_sched_entity; goto error_free_sched_entity;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment