Commit 1cca6087 authored by Graham Sider's avatar Graham Sider Committed by Alex Deucher

drm/amdkfd: replace kgd_dev in static gfx v8 funcs

Static funcs in amdgpu_amdkfd_gfx_v8.c now using amdgpu_device.
Signed-off-by: default avatarGraham Sider <Graham.Sider@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 9365fbf3
...@@ -44,38 +44,33 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) ...@@ -44,38 +44,33 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
return (struct amdgpu_device *)kgd; return (struct amdgpu_device *)kgd;
} }
static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid) uint32_t queue, uint32_t vmid)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
WREG32(mmSRBM_GFX_CNTL, value); WREG32(mmSRBM_GFX_CNTL, value);
} }
static void unlock_srbm(struct kgd_dev *kgd) static void unlock_srbm(struct amdgpu_device *adev)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd);
WREG32(mmSRBM_GFX_CNTL, 0); WREG32(mmSRBM_GFX_CNTL, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
} }
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id) uint32_t queue_id)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, queue_id, 0); lock_srbm(adev, mec, pipe, queue_id, 0);
} }
static void release_queue(struct kgd_dev *kgd) static void release_queue(struct amdgpu_device *adev)
{ {
unlock_srbm(kgd); unlock_srbm(adev);
} }
static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
...@@ -86,14 +81,14 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, ...@@ -86,14 +81,14 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
lock_srbm(kgd, 0, 0, 0, vmid); lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_MEM_CONFIG, sh_mem_config); WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
WREG32(mmSH_MEM_BASES, sh_mem_bases); WREG32(mmSH_MEM_BASES, sh_mem_bases);
unlock_srbm(kgd); unlock_srbm(adev);
} }
static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
...@@ -132,12 +127,12 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) ...@@ -132,12 +127,12 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, 0, 0); lock_srbm(adev, mec, pipe, 0, 0);
WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
unlock_srbm(kgd); unlock_srbm(adev);
return 0; return 0;
} }
...@@ -178,7 +173,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, ...@@ -178,7 +173,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd); m = get_mqd(mqd);
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
/* HIQ is set during driver init period with vmid set to 0*/ /* HIQ is set during driver init period with vmid set to 0*/
if (m->cp_hqd_vmid == 0) { if (m->cp_hqd_vmid == 0) {
...@@ -226,16 +221,16 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, ...@@ -226,16 +221,16 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
* release srbm_mutex to avoid circular dependency between * release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/ */
release_queue(kgd); release_queue(adev);
valid_wptr = read_user_wptr(mm, wptr, wptr_val); valid_wptr = read_user_wptr(mm, wptr, wptr_val);
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
if (valid_wptr) if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(mmCP_HQD_ACTIVE, data); WREG32(mmCP_HQD_ACTIVE, data);
release_queue(kgd); release_queue(adev);
return 0; return 0;
} }
...@@ -258,7 +253,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, ...@@ -258,7 +253,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL) if (*dump == NULL)
return -ENOMEM; return -ENOMEM;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
...@@ -268,7 +263,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, ...@@ -268,7 +263,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++) for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
DUMP_REG(reg); DUMP_REG(reg);
release_queue(kgd); release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS); WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i; *n_regs = i;
...@@ -375,7 +370,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, ...@@ -375,7 +370,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
bool retval = false; bool retval = false;
uint32_t low, high; uint32_t low, high;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
act = RREG32(mmCP_HQD_ACTIVE); act = RREG32(mmCP_HQD_ACTIVE);
if (act) { if (act) {
low = lower_32_bits(queue_address >> 8); low = lower_32_bits(queue_address >> 8);
...@@ -385,7 +380,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, ...@@ -385,7 +380,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32(mmCP_HQD_PQ_BASE_HI)) high == RREG32(mmCP_HQD_PQ_BASE_HI))
retval = true; retval = true;
} }
release_queue(kgd); release_queue(adev);
return retval; return retval;
} }
...@@ -422,7 +417,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, ...@@ -422,7 +417,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EIO; return -EIO;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0) if (m->cp_hqd_vmid == 0)
WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0); WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
...@@ -502,13 +497,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, ...@@ -502,13 +497,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
break; break;
if (time_after(jiffies, end_jiffies)) { if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n"); pr_err("cp queue preemption time out.\n");
release_queue(kgd); release_queue(adev);
return -ETIME; return -ETIME;
} }
usleep_range(500, 1000); usleep_range(500, 1000);
} }
release_queue(kgd); release_queue(adev);
return 0; return 0;
} }
...@@ -612,9 +607,9 @@ static void set_scratch_backing_va(struct kgd_dev *kgd, ...@@ -612,9 +607,9 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
{ {
struct amdgpu_device *adev = (struct amdgpu_device *) kgd; struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
lock_srbm(kgd, 0, 0, 0, vmid); lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
unlock_srbm(kgd); unlock_srbm(adev);
} }
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment