Commit 24a6eb92 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: fix and cleanup gmc_v9_0_flush_gpu_tlb

The KIQ code path was ignoring the second flush. Also avoid long lines and
re-calculating the register offsets over and over again.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f1235727
...@@ -816,13 +816,17 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -816,13 +816,17 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type) uint32_t vmhub, uint32_t flush_type)
{ {
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
u32 j, inv_req, inv_req2, tmp, sem, req, ack;
const unsigned int eng = 17; const unsigned int eng = 17;
u32 j, inv_req, inv_req2, tmp;
struct amdgpu_vmhub *hub; struct amdgpu_vmhub *hub;
BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS); BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
hub = &adev->vmhub[vmhub]; hub = &adev->vmhub[vmhub];
sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
if (adev->gmc.xgmi.num_physical_nodes && if (adev->gmc.xgmi.num_physical_nodes &&
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0)) { amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0)) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a /* Vega20+XGMI caches PTEs in TC and TLB. Add a
...@@ -854,6 +858,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -854,6 +858,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid); 1 << vmid);
if (inv_req2)
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack,
inv_req2, 1 << vmid);
up_read(&adev->reset_domain->sem); up_read(&adev->reset_domain->sem);
return; return;
} }
...@@ -872,9 +880,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -872,9 +880,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
for (j = 0; j < adev->usec_timeout; j++) { for (j = 0; j < adev->usec_timeout; j++) {
/* a read return value of 1 means semaphore acquire */ /* a read return value of 1 means semaphore acquire */
if (vmhub >= AMDGPU_MMHUB0(0)) if (vmhub >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng); tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem);
else else
tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem);
if (tmp & 0x1) if (tmp & 0x1)
break; break;
udelay(1); udelay(1);
...@@ -886,9 +894,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -886,9 +894,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
do { do {
if (vmhub >= AMDGPU_MMHUB0(0)) if (vmhub >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req);
else else
WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req);
/* /*
* Issue a dummy read to wait for the ACK register to * Issue a dummy read to wait for the ACK register to
...@@ -897,14 +905,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -897,14 +905,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*/ */
if ((vmhub == AMDGPU_GFXHUB(0)) && if ((vmhub == AMDGPU_GFXHUB(0)) &&
(amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
RREG32_NO_KIQ(hub->vm_inv_eng0_req + RREG32_SOC15_IP_NO_KIQ(GC, req);
hub->eng_distance * eng);
for (j = 0; j < adev->usec_timeout; j++) { for (j = 0; j < adev->usec_timeout; j++) {
if (vmhub >= AMDGPU_MMHUB0(0)) if (vmhub >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng); tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack);
else else
tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack);
if (tmp & (1 << vmid)) if (tmp & (1 << vmid))
break; break;
udelay(1); udelay(1);
...@@ -921,9 +928,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -921,9 +928,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* write with 0 means semaphore release * write with 0 means semaphore release
*/ */
if (vmhub >= AMDGPU_MMHUB0(0)) if (vmhub >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0);
else else
WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); WREG32_SOC15_IP_NO_KIQ(GC, sem, 0);
} }
spin_unlock(&adev->gmc.invalidate_lock); spin_unlock(&adev->gmc.invalidate_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment