Commit 27488686 authored by Graham Sider's avatar Graham Sider Committed by Alex Deucher

drm/amdgpu: Enable GFX11 SDMA context empty interrupt

Enable SDMA queue empty context switching. SDMA context switch due to
quantum programming no longer done here (as of sdma v6), so re-name
sdma_v6_0_ctx_switch_enable to sdma_v6_0_ctxempty_int_enable to reflect
this.

Also program SDMAx_QUEUEx_SCHEDULE_CNTL for context switch due to
quantum in KFD. Set to amdgpu_sdma_phase_quantum (defaults to 32 i.e.
3200us).
Signed-off-by: default avatarGraham Sider <Graham.Sider@amd.com>
Reviewed-by: default avatarHarish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
Reviewed-by: default avatarStanley Yang <Stanley.Yang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 00fa4035
...@@ -403,15 +403,26 @@ static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev) ...@@ -403,15 +403,26 @@ static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev)
} }
/** /**
* sdma_v6_0_ctx_switch_enable - stop the async dma engines context switch * sdma_v6_0_ctxempty_int_enable - enable or disable context empty interrupts
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch. * @enable: enable/disable context switching due to queue empty conditions
* *
* Halt or unhalt the async dma engines context switch. * Enable or disable the async dma engines queue empty context switch.
*/ */
static void sdma_v6_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) static void sdma_v6_0_ctxempty_int_enable(struct amdgpu_device *adev, bool enable)
{ {
u32 f32_cntl;
int i;
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
CTXEMPTY_INT_ENABLE, enable ? 1 : 0);
WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL), f32_cntl);
}
}
} }
/** /**
...@@ -579,10 +590,8 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) ...@@ -579,10 +590,8 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
ring->sched.ready = true; ring->sched.ready = true;
if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ if (amdgpu_sriov_vf(adev))
sdma_v6_0_ctx_switch_enable(adev, true);
sdma_v6_0_enable(adev, true); sdma_v6_0_enable(adev, true);
}
r = amdgpu_ring_test_helper(ring); r = amdgpu_ring_test_helper(ring);
if (r) { if (r) {
...@@ -778,7 +787,6 @@ static int sdma_v6_0_start(struct amdgpu_device *adev) ...@@ -778,7 +787,6 @@ static int sdma_v6_0_start(struct amdgpu_device *adev)
int r = 0; int r = 0;
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false); sdma_v6_0_enable(adev, false);
/* set RB registers */ /* set RB registers */
...@@ -799,7 +807,7 @@ static int sdma_v6_0_start(struct amdgpu_device *adev) ...@@ -799,7 +807,7 @@ static int sdma_v6_0_start(struct amdgpu_device *adev)
/* unhalt the MEs */ /* unhalt the MEs */
sdma_v6_0_enable(adev, true); sdma_v6_0_enable(adev, true);
/* enable sdma ring preemption */ /* enable sdma ring preemption */
sdma_v6_0_ctx_switch_enable(adev, true); sdma_v6_0_ctxempty_int_enable(adev, true);
/* start the gfx rings and rlc compute queues */ /* start the gfx rings and rlc compute queues */
r = sdma_v6_0_gfx_resume(adev); r = sdma_v6_0_gfx_resume(adev);
...@@ -1340,7 +1348,7 @@ static int sdma_v6_0_hw_fini(void *handle) ...@@ -1340,7 +1348,7 @@ static int sdma_v6_0_hw_fini(void *handle)
return 0; return 0;
} }
sdma_v6_0_ctx_switch_enable(adev, false); sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false); sdma_v6_0_enable(adev, false);
return 0; return 0;
......
...@@ -357,6 +357,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, ...@@ -357,6 +357,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdmax_rlcx_doorbell_offset = m->sdmax_rlcx_doorbell_offset =
q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum
<< SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT)
& SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK;
m->sdma_engine_id = q->sdma_engine_id; m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id; m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment