Commit 5518625d authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: implement gmc_v8_0_emit_flush_gpu_tlb

Unify tlb flushing for gmc v8.

v2: handle UVD v6 as well
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d9a701cc
...@@ -6333,28 +6333,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -6333,28 +6333,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
{ {
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)) |
WR_CONFIRM);
if (vmid < 8) {
amdgpu_ring_write(ring,
(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring,
(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
/* invalidate the cache */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */ /* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
...@@ -6886,7 +6865,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { ...@@ -6886,7 +6865,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_frame_size = /* maximum 215dw if count 16 IBs in */ .emit_frame_size = /* maximum 215dw if count 16 IBs in */
5 + /* COND_EXEC */ 5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */ 7 + /* PIPELINE_SYNC */
19 + /* VM_FLUSH */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */ 8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */ 20 + /* GDS switch */
4 + /* double SWITCH_BUFFER, 4 + /* double SWITCH_BUFFER,
...@@ -6933,7 +6912,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { ...@@ -6933,7 +6912,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
7 + /* gfx_v8_0_ring_emit_hdp_flush */ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
17 + /* gfx_v8_0_ring_emit_vm_flush */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_ib = gfx_v8_0_ring_emit_ib_compute,
......
...@@ -611,6 +611,24 @@ static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, ...@@ -611,6 +611,24 @@ static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
} }
static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, unsigned pasid,
uint64_t pd_addr)
{
uint32_t reg;
if (vmid < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
return pd_addr;
}
/** /**
* gmc_v8_0_set_pte_pde - update the page tables using MMIO * gmc_v8_0_set_pte_pde - update the page tables using MMIO
* *
...@@ -1640,6 +1658,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { ...@@ -1640,6 +1658,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb, .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.set_pte_pde = gmc_v8_0_set_pte_pde, .set_pte_pde = gmc_v8_0_set_pte_pde,
.set_prt = gmc_v8_0_set_prt, .set_prt = gmc_v8_0_set_prt,
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
......
...@@ -862,20 +862,7 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -862,20 +862,7 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, unsigned pasid, unsigned vmid, unsigned pasid,
uint64_t pd_addr) uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, pd_addr >> 12);
/* flush TLB */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 1 << vmid);
/* wait for flush */ /* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
...@@ -1215,7 +1202,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { ...@@ -1215,7 +1202,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
6 + /* sdma_v2_4_ring_emit_hdp_flush */ 6 + /* sdma_v2_4_ring_emit_hdp_flush */
3 + /* sdma_v2_4_ring_emit_hdp_invalidate */ 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
6 + /* sdma_v2_4_ring_emit_pipeline_sync */ 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
12 + /* sdma_v2_4_ring_emit_vm_flush */ VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */ 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */ .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib, .emit_ib = sdma_v2_4_ring_emit_ib,
......
...@@ -1128,20 +1128,7 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1128,20 +1128,7 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, unsigned pasid, unsigned vmid, unsigned pasid,
uint64_t pd_addr) uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, pd_addr >> 12);
/* flush TLB */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 1 << vmid);
/* wait for flush */ /* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
......
...@@ -1087,26 +1087,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1087,26 +1087,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, unsigned pasid, unsigned vmid, unsigned pasid,
uint64_t pd_addr) uint64_t pd_addr)
{ {
uint32_t reg; amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
if (vmid < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#ifndef __VI_H__ #ifndef __VI_H__
#define __VI_H__ #define __VI_H__
#define VI_FLUSH_GPU_TLB_NUM_WREG 2
void vi_srbm_select(struct amdgpu_device *adev, void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev); int vi_set_ip_blocks(struct amdgpu_device *adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment