Commit 9a17c9b7 authored by Graham Sider's avatar Graham Sider Committed by Alex Deucher

drm/amdkfd: replace kgd_dev in static gfx v9 funcs

Static funcs in amdgpu_amdkfd_gfx_v9.c now using amdgpu_device.
Signed-off-by: default avatarGraham Sider <Graham.Sider@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1cca6087
...@@ -51,32 +51,26 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) ...@@ -51,32 +51,26 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
return (struct amdgpu_device *)kgd; return (struct amdgpu_device *)kgd;
} }
static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid) uint32_t queue, uint32_t vmid)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, mec, pipe, queue, vmid); soc15_grbm_select(adev, mec, pipe, queue, vmid);
} }
static void unlock_srbm(struct kgd_dev *kgd) static void unlock_srbm(struct amdgpu_device *adev)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
} }
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id) uint32_t queue_id)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, queue_id, 0); lock_srbm(adev, mec, pipe, queue_id, 0);
} }
static uint64_t get_queue_mask(struct amdgpu_device *adev, static uint64_t get_queue_mask(struct amdgpu_device *adev,
...@@ -88,9 +82,9 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, ...@@ -88,9 +82,9 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev,
return 1ull << bit; return 1ull << bit;
} }
static void release_queue(struct kgd_dev *kgd) static void release_queue(struct amdgpu_device *adev)
{ {
unlock_srbm(kgd); unlock_srbm(adev);
} }
void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
...@@ -101,13 +95,13 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, ...@@ -101,13 +95,13 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
lock_srbm(kgd, 0, 0, 0, vmid); lock_srbm(adev, 0, 0, 0, vmid);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
/* APE1 no longer exists on GFX9 */ /* APE1 no longer exists on GFX9 */
unlock_srbm(kgd); unlock_srbm(adev);
} }
int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
...@@ -180,13 +174,13 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) ...@@ -180,13 +174,13 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, 0, 0); lock_srbm(adev, mec, pipe, 0, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
unlock_srbm(kgd); unlock_srbm(adev);
return 0; return 0;
} }
...@@ -245,7 +239,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, ...@@ -245,7 +239,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd); m = get_mqd(mqd);
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo; mqd_hqd = &m->cp_mqd_base_addr_lo;
...@@ -308,7 +302,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, ...@@ -308,7 +302,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
release_queue(kgd); release_queue(adev);
return 0; return 0;
} }
...@@ -325,7 +319,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, ...@@ -325,7 +319,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
m = get_mqd(mqd); m = get_mqd(mqd);
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
...@@ -361,7 +355,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, ...@@ -361,7 +355,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
out_unlock: out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock); spin_unlock(&adev->gfx.kiq.ring_lock);
release_queue(kgd); release_queue(adev);
return r; return r;
} }
...@@ -384,13 +378,13 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, ...@@ -384,13 +378,13 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL) if (*dump == NULL)
return -ENOMEM; return -ENOMEM;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg); DUMP_REG(reg);
release_queue(kgd); release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS); WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i; *n_regs = i;
...@@ -508,7 +502,7 @@ bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, ...@@ -508,7 +502,7 @@ bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
bool retval = false; bool retval = false;
uint32_t low, high; uint32_t low, high;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
if (act) { if (act) {
low = lower_32_bits(queue_address >> 8); low = lower_32_bits(queue_address >> 8);
...@@ -518,7 +512,7 @@ bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, ...@@ -518,7 +512,7 @@ bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
retval = true; retval = true;
} }
release_queue(kgd); release_queue(adev);
return retval; return retval;
} }
...@@ -555,7 +549,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, ...@@ -555,7 +549,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EIO; return -EIO;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0) if (m->cp_hqd_vmid == 0)
WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
...@@ -584,13 +578,13 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, ...@@ -584,13 +578,13 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
break; break;
if (time_after(jiffies, end_jiffies)) { if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n"); pr_err("cp queue preemption time out.\n");
release_queue(kgd); release_queue(adev);
return -ETIME; return -ETIME;
} }
usleep_range(500, 1000); usleep_range(500, 1000);
} }
release_queue(kgd); release_queue(adev);
return 0; return 0;
} }
...@@ -887,7 +881,7 @@ void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, ...@@ -887,7 +881,7 @@ void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
lock_srbm(kgd, 0, 0, 0, vmid); lock_srbm(adev, 0, 0, 0, vmid);
/* /*
* Program TBA registers * Program TBA registers
...@@ -905,7 +899,7 @@ void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, ...@@ -905,7 +899,7 @@ void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
upper_32_bits(tma_addr >> 8)); upper_32_bits(tma_addr >> 8));
unlock_srbm(kgd); unlock_srbm(adev);
} }
const struct kfd2kgd_calls gfx_v9_kfd2kgd = { const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment