Commit 4af9f07c authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: use kernel submit helper in vm

Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarChristian K?nig <christian.koenig@amd.com>
parent 953e8fd4
...@@ -1235,19 +1235,6 @@ struct amdgpu_cs_chunk { ...@@ -1235,19 +1235,6 @@ struct amdgpu_cs_chunk {
void __user *user_ptr; void __user *user_ptr;
}; };
union amdgpu_sched_job_param {
struct {
struct amdgpu_vm *vm;
uint64_t start;
uint64_t last;
struct fence **fence;
} vm_mapping;
struct {
struct amdgpu_bo *bo;
} vm;
};
struct amdgpu_cs_parser { struct amdgpu_cs_parser {
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct drm_file *filp; struct drm_file *filp;
...@@ -1272,7 +1259,6 @@ struct amdgpu_cs_parser { ...@@ -1272,7 +1259,6 @@ struct amdgpu_cs_parser {
struct mutex job_lock; struct mutex job_lock;
struct work_struct job_work; struct work_struct job_work;
int (*prepare_job)(struct amdgpu_cs_parser *sched_job); int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
union amdgpu_sched_job_param job_param;
int (*run_job)(struct amdgpu_cs_parser *sched_job); int (*run_job)(struct amdgpu_cs_parser *sched_job);
int (*free_job)(struct amdgpu_cs_parser *sched_job); int (*free_job)(struct amdgpu_cs_parser *sched_job);
}; };
......
...@@ -121,7 +121,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, ...@@ -121,7 +121,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
uint64_t v_seq; uint64_t v_seq;
struct amdgpu_cs_parser *sched_job = struct amdgpu_cs_parser *sched_job =
amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
ibs, 1); ibs, num_ibs);
if(!sched_job) { if(!sched_job) {
return -ENOMEM; return -ENOMEM;
} }
...@@ -139,7 +139,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, ...@@ -139,7 +139,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
if (r) if (r)
WARN(true, "emit timeout\n"); WARN(true, "emit timeout\n");
} else } else
r = amdgpu_ib_schedule(adev, 1, ibs, owner); r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
if (r) if (r)
return r; return r;
*f = &ibs[num_ibs - 1].fence->base; *f = &ibs[num_ibs - 1].fence->base;
......
...@@ -316,14 +316,6 @@ static int amdgpu_vm_free_job( ...@@ -316,14 +316,6 @@ static int amdgpu_vm_free_job(
return 0; return 0;
} }
static int amdgpu_vm_run_job(
struct amdgpu_cs_parser *sched_job)
{
amdgpu_bo_fence(sched_job->job_param.vm.bo,
&sched_job->ibs[sched_job->num_ibs -1].fence->base, true);
return 0;
}
/** /**
* amdgpu_vm_clear_bo - initially clear the page dir/table * amdgpu_vm_clear_bo - initially clear the page dir/table
* *
...@@ -334,7 +326,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -334,7 +326,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo) struct amdgpu_bo *bo)
{ {
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
struct amdgpu_cs_parser *sched_job = NULL; struct fence *fence = NULL;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
unsigned entries; unsigned entries;
uint64_t addr; uint64_t addr;
...@@ -368,38 +360,16 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -368,38 +360,16 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0); amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
amdgpu_vm_pad_ib(adev, ib); amdgpu_vm_pad_ib(adev, ib);
WARN_ON(ib->length_dw > 64); WARN_ON(ib->length_dw > 64);
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
&amdgpu_vm_free_job,
AMDGPU_FENCE_OWNER_VM,
&fence);
if (!r)
amdgpu_bo_fence(bo, fence, true);
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
int r;
uint64_t v_seq;
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
&adev->kernel_ctx, ib, 1);
if(!sched_job)
goto error_free;
sched_job->job_param.vm.bo = bo;
sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx.rings[ring->idx].entity,
sched_job);
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].entity,
v_seq,
false,
-1);
if (r)
DRM_ERROR("emit timeout\n");
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
return 0; return 0;
} else {
r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
if (r)
goto error_free;
amdgpu_bo_fence(bo, &ib->fence->base, true);
} }
error_free: error_free:
amdgpu_ib_free(adev, ib); amdgpu_ib_free(adev, ib);
kfree(ib); kfree(ib);
...@@ -456,7 +426,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -456,7 +426,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
uint64_t last_pde = ~0, last_pt = ~0; uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw; unsigned count = 0, pt_idx, ndw;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct amdgpu_cs_parser *sched_job = NULL; struct fence *fence = NULL;
int r; int r;
...@@ -518,37 +488,13 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -518,37 +488,13 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
amdgpu_vm_pad_ib(adev, ib); amdgpu_vm_pad_ib(adev, ib);
amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
WARN_ON(ib->length_dw > ndw); WARN_ON(ib->length_dw > ndw);
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
if (amdgpu_enable_scheduler) { &amdgpu_vm_free_job,
int r; AMDGPU_FENCE_OWNER_VM,
uint64_t v_seq; &fence);
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, if (r)
&adev->kernel_ctx, goto error_free;
ib, 1); amdgpu_bo_fence(pd, fence, true);
if(!sched_job)
goto error_free;
sched_job->job_param.vm.bo = pd;
sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx.rings[ring->idx].entity,
sched_job);
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].entity,
v_seq,
false,
-1);
if (r)
DRM_ERROR("emit timeout\n");
} else {
r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
if (r) {
amdgpu_ib_free(adev, ib);
return r;
}
amdgpu_bo_fence(pd, &ib->fence->base, true);
}
} }
if (!amdgpu_enable_scheduler || ib->length_dw == 0) { if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
...@@ -559,11 +505,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -559,11 +505,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
return 0; return 0;
error_free: error_free:
if (sched_job)
kfree(sched_job);
amdgpu_ib_free(adev, ib); amdgpu_ib_free(adev, ib);
kfree(ib); kfree(ib);
return -ENOMEM; return r;
} }
/** /**
...@@ -748,20 +692,6 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, ...@@ -748,20 +692,6 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
amdgpu_bo_fence(vm->page_tables[i].bo, fence, true); amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
} }
static int amdgpu_vm_bo_update_mapping_run_job(
struct amdgpu_cs_parser *sched_job)
{
struct fence **fence = sched_job->job_param.vm_mapping.fence;
amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
sched_job->job_param.vm_mapping.start,
sched_job->job_param.vm_mapping.last + 1,
&sched_job->ibs[sched_job->num_ibs -1].fence->base);
if (fence) {
fence_put(*fence);
*fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
}
return 0;
}
/** /**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
* *
...@@ -787,7 +717,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -787,7 +717,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
unsigned nptes, ncmds, ndw; unsigned nptes, ncmds, ndw;
uint32_t flags = gtt_flags; uint32_t flags = gtt_flags;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct amdgpu_cs_parser *sched_job = NULL; struct fence *f = NULL;
int r; int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
...@@ -869,56 +799,29 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -869,56 +799,29 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_vm_pad_ib(adev, ib); amdgpu_vm_pad_ib(adev, ib);
WARN_ON(ib->length_dw > ndw); WARN_ON(ib->length_dw > ndw);
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
&amdgpu_vm_free_job,
AMDGPU_FENCE_OWNER_VM,
&f);
if (r)
goto error_free;
if (amdgpu_enable_scheduler) { amdgpu_vm_fence_pts(vm, mapping->it.start,
int r; mapping->it.last + 1, f);
uint64_t v_seq; if (fence) {
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, fence_put(*fence);
&adev->kernel_ctx, ib, 1); *fence = fence_get(f);
if(!sched_job) }
goto error_free; if (!amdgpu_enable_scheduler) {
sched_job->job_param.vm_mapping.vm = vm;
sched_job->job_param.vm_mapping.start = mapping->it.start;
sched_job->job_param.vm_mapping.last = mapping->it.last;
sched_job->job_param.vm_mapping.fence = fence;
sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
sched_job->free_job = amdgpu_vm_free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx.rings[ring->idx].entity,
sched_job);
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].entity,
v_seq,
false,
-1);
if (r)
DRM_ERROR("emit timeout\n");
} else {
r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
if (r) {
amdgpu_ib_free(adev, ib);
return r;
}
amdgpu_vm_fence_pts(vm, mapping->it.start,
mapping->it.last + 1, &ib->fence->base);
if (fence) {
fence_put(*fence);
*fence = fence_get(&ib->fence->base);
}
amdgpu_ib_free(adev, ib); amdgpu_ib_free(adev, ib);
kfree(ib); kfree(ib);
} }
return 0; return 0;
error_free: error_free:
if (sched_job)
kfree(sched_job);
amdgpu_ib_free(adev, ib); amdgpu_ib_free(adev, ib);
kfree(ib); kfree(ib);
return -ENOMEM; return r;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment