Commit 041a5743 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: fix and cleanup gmc_v11_0_flush_gpu_tlb_pasid

The same PASID can be used by more than one VMID, reset each of them.

Use the common KIQ handling.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 72cc9920
...@@ -303,54 +303,27 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, ...@@ -303,54 +303,27 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type, uint16_t pasid, uint32_t flush_type,
bool all_hub, uint32_t inst) bool all_hub, uint32_t inst)
{ {
uint16_t queried;
int vmid, i; int vmid, i;
signed long r;
uint32_t seq;
uint16_t queried_pasid;
bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
if (amdgpu_emu_mode == 0 && ring->sched.ready) {
spin_lock(&adev->gfx.kiq[0].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
return -ETIME;
}
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
}
return 0;
}
for (vmid = 1; vmid < 16; vmid++) { for (vmid = 1; vmid < 16; vmid++) {
bool valid;
valid = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
&queried);
if (!valid || queried == pasid)
continue;
ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
&queried_pasid);
if (ret && queried_pasid == pasid) {
if (all_hub) { if (all_hub) {
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) for_each_set_bit(i, adev->vmhubs_mask,
gmc_v11_0_flush_gpu_tlb(adev, vmid, AMDGPU_MAX_VMHUBS)
i, flush_type); gmc_v11_0_flush_gpu_tlb(adev, vmid, i,
flush_type);
} else { } else {
gmc_v11_0_flush_gpu_tlb(adev, vmid, gmc_v11_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
AMDGPU_GFXHUB(0), flush_type); flush_type);
}
} }
} }
return 0; return 0;
} }
...@@ -920,8 +893,10 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) ...@@ -920,8 +893,10 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
static int gmc_v11_0_hw_init(void *handle) static int gmc_v11_0_hw_init(void *handle)
{ {
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
/* The sequence of these two function calls matters.*/ /* The sequence of these two function calls matters.*/
gmc_v11_0_init_golden_registers(adev); gmc_v11_0_init_golden_registers(adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment