Commit 630e959f authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu/gmc9: convert to IP version checking

Use IP versions rather than asic_type to differentiate
IP version specific features.
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 64df665f
...@@ -579,7 +579,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, ...@@ -579,7 +579,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* the new fast GRBM interface. * the new fast GRBM interface.
*/ */
if ((entry->vmid_src == AMDGPU_GFXHUB_0) && if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
(adev->asic_type < CHIP_ALDEBARAN)) (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
RREG32(hub->vm_l2_pro_fault_status); RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status);
...@@ -597,26 +597,28 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, ...@@ -597,26 +597,28 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
gfxhub_client_ids[cid], gfxhub_client_ids[cid],
cid); cid);
} else { } else {
switch (adev->asic_type) { switch (adev->ip_versions[MMHUB_HWIP][0]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 0):
mmhub_cid = mmhub_client_ids_vega10[cid][rw]; mmhub_cid = mmhub_client_ids_vega10[cid][rw];
break; break;
case CHIP_VEGA12: case IP_VERSION(9, 3, 0):
mmhub_cid = mmhub_client_ids_vega12[cid][rw]; mmhub_cid = mmhub_client_ids_vega12[cid][rw];
break; break;
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
mmhub_cid = mmhub_client_ids_vega20[cid][rw]; mmhub_cid = mmhub_client_ids_vega20[cid][rw];
break; break;
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
mmhub_cid = mmhub_client_ids_arcturus[cid][rw]; mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
break; break;
case CHIP_RAVEN: case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 0):
mmhub_cid = mmhub_client_ids_raven[cid][rw]; mmhub_cid = mmhub_client_ids_raven[cid][rw];
break; break;
case CHIP_RENOIR: case IP_VERSION(1, 5, 0):
case IP_VERSION(2, 4, 0):
mmhub_cid = mmhub_client_ids_renoir[cid][rw]; mmhub_cid = mmhub_client_ids_renoir[cid][rw];
break; break;
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
mmhub_cid = mmhub_client_ids_aldebaran[cid][rw]; mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
break; break;
default: default:
...@@ -694,7 +696,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, ...@@ -694,7 +696,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub) uint32_t vmhub)
{ {
if (adev->asic_type == CHIP_ALDEBARAN) if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
return false; return false;
return ((vmhub == AMDGPU_MMHUB_0 || return ((vmhub == AMDGPU_MMHUB_0 ||
...@@ -745,7 +747,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -745,7 +747,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
hub = &adev->vmhub[vmhub]; hub = &adev->vmhub[vmhub];
if (adev->gmc.xgmi.num_physical_nodes && if (adev->gmc.xgmi.num_physical_nodes &&
adev->asic_type == CHIP_VEGA20) { adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a /* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy-weight TLB flush (type 2), which flushes * heavy-weight TLB flush (type 2), which flushes
* both. Due to a race condition with concurrent * both. Due to a race condition with concurrent
...@@ -808,7 +810,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -808,7 +810,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* GRBM interface. * GRBM interface.
*/ */
if ((vmhub == AMDGPU_GFXHUB_0) && if ((vmhub == AMDGPU_GFXHUB_0) &&
(adev->asic_type < CHIP_ALDEBARAN)) (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
RREG32_NO_KIQ(hub->vm_inv_eng0_req + RREG32_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng); hub->eng_distance * eng);
...@@ -874,7 +876,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, ...@@ -874,7 +876,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
* still need a second TLB flush after this. * still need a second TLB flush after this.
*/ */
bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
adev->asic_type == CHIP_VEGA20); adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
/* 2 dwords flush + 8 dwords fence */ /* 2 dwords flush + 8 dwords fence */
unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8; unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
...@@ -1088,13 +1090,13 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, ...@@ -1088,13 +1090,13 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID; *flags &= ~AMDGPU_PTE_VALID;
} }
if ((adev->asic_type == CHIP_ARCTURUS || if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
adev->asic_type == CHIP_ALDEBARAN) && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) &&
!(*flags & AMDGPU_PTE_SYSTEM) && !(*flags & AMDGPU_PTE_SYSTEM) &&
mapping->bo_va->is_xgmi) mapping->bo_va->is_xgmi)
*flags |= AMDGPU_PTE_SNOOPED; *flags |= AMDGPU_PTE_SNOOPED;
if (adev->asic_type == CHIP_ALDEBARAN) if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
*flags |= mapping->flags & AMDGPU_PTE_SNOOPED; *flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
} }
...@@ -1108,9 +1110,10 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -1108,9 +1110,10 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
} else { } else {
u32 viewport; u32 viewport;
switch (adev->asic_type) { switch (adev->ip_versions[DCE_HWIP][0]) {
case CHIP_RAVEN: case IP_VERSION(1, 0, 0):
case CHIP_RENOIR: case IP_VERSION(1, 0, 1):
case IP_VERSION(2, 1, 0):
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
size = (REG_GET_FIELD(viewport, size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
...@@ -1118,9 +1121,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -1118,9 +1121,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
4); 4);
break; break;
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
default: default:
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
...@@ -1151,11 +1151,11 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) ...@@ -1151,11 +1151,11 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->ip_versions[UMC_HWIP][0]) {
case CHIP_VEGA10: case IP_VERSION(6, 0, 0):
adev->umc.funcs = &umc_v6_0_funcs; adev->umc.funcs = &umc_v6_0_funcs;
break; break;
case CHIP_VEGA20: case IP_VERSION(6, 1, 1):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
...@@ -1163,7 +1163,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) ...@@ -1163,7 +1163,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras_funcs = &umc_v6_1_ras_funcs; adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break; break;
case CHIP_ARCTURUS: case IP_VERSION(6, 1, 2):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
...@@ -1171,7 +1171,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) ...@@ -1171,7 +1171,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras_funcs = &umc_v6_1_ras_funcs; adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break; break;
case CHIP_ALDEBARAN: case IP_VERSION(6, 7, 0):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM; adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
...@@ -1190,11 +1190,11 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) ...@@ -1190,11 +1190,11 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->ip_versions[MMHUB_HWIP][0]) {
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
adev->mmhub.funcs = &mmhub_v9_4_funcs; adev->mmhub.funcs = &mmhub_v9_4_funcs;
break; break;
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
adev->mmhub.funcs = &mmhub_v1_7_funcs; adev->mmhub.funcs = &mmhub_v1_7_funcs;
break; break;
default: default:
...@@ -1205,14 +1205,14 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) ...@@ -1205,14 +1205,14 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->ip_versions[MMHUB_HWIP][0]) {
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs; adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
break; break;
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs; adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
break; break;
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs; adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
break; break;
default: default:
...@@ -1233,8 +1233,9 @@ static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) ...@@ -1233,8 +1233,9 @@ static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { /* is UMC the right IP to check for MCA? Maybe DF? */
case CHIP_ALDEBARAN: switch (adev->ip_versions[UMC_HWIP][0]) {
case IP_VERSION(6, 7, 0):
if (!adev->gmc.xgmi.connected_to_cpu) if (!adev->gmc.xgmi.connected_to_cpu)
adev->mca.funcs = &mca_v3_0_funcs; adev->mca.funcs = &mca_v3_0_funcs;
break; break;
...@@ -1247,11 +1248,12 @@ static int gmc_v9_0_early_init(void *handle) ...@@ -1247,11 +1248,12 @@ static int gmc_v9_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
if (adev->asic_type == CHIP_VEGA20 || if (adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_ARCTURUS) adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true; adev->gmc.xgmi.supported = true;
if (adev->asic_type == CHIP_ALDEBARAN) { if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
adev->gmc.xgmi.supported = true; adev->gmc.xgmi.supported = true;
adev->gmc.xgmi.connected_to_cpu = adev->gmc.xgmi.connected_to_cpu =
adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
...@@ -1289,7 +1291,8 @@ static int gmc_v9_0_late_init(void *handle) ...@@ -1289,7 +1291,8 @@ static int gmc_v9_0_late_init(void *handle)
* Workaround performance drop issue with VBIOS enables partial * Workaround performance drop issue with VBIOS enables partial
* writes, while disables HBM ECC for vega10. * writes, while disables HBM ECC for vega10.
*/ */
if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) { if (!amdgpu_sriov_vf(adev) &&
(adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
if (adev->df.funcs->enable_ecc_force_par_wr_rmw) if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
...@@ -1393,17 +1396,18 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) ...@@ -1393,17 +1396,18 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
/* set the gart size */ /* set the gart size */
if (amdgpu_gart_size == -1) { if (amdgpu_gart_size == -1) {
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP][0]) {
case CHIP_VEGA10: /* all engines support GPUVM */ case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
case CHIP_VEGA12: /* all engines support GPUVM */ case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
default: default:
adev->gmc.gart_size = 512ULL << 20; adev->gmc.gart_size = 512ULL << 20;
break; break;
case CHIP_RAVEN: /* DCE SG support */ case IP_VERSION(9, 1, 0): /* DCE SG support */
case CHIP_RENOIR: case IP_VERSION(9, 2, 2): /* DCE SG support */
case IP_VERSION(9, 3, 0):
adev->gmc.gart_size = 1024ULL << 20; adev->gmc.gart_size = 1024ULL << 20;
break; break;
} }
...@@ -1464,7 +1468,8 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) ...@@ -1464,7 +1468,8 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
*/ */
static void gmc_v9_0_save_registers(struct amdgpu_device *adev) static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
{ {
if (adev->asic_type == CHIP_RAVEN) if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
(adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
} }
...@@ -1507,8 +1512,9 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -1507,8 +1512,9 @@ static int gmc_v9_0_sw_init(void *handle)
adev->gmc.vram_type = vram_type; adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor; adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP][0]) {
case CHIP_RAVEN: case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
adev->num_vmhubs = 2; adev->num_vmhubs = 2;
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
...@@ -1520,11 +1526,11 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -1520,11 +1526,11 @@ static int gmc_v9_0_sw_init(void *handle)
adev->vm_manager.num_level > 1; adev->vm_manager.num_level > 1;
} }
break; break;
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
case CHIP_RENOIR: case IP_VERSION(9, 3, 0):
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
adev->num_vmhubs = 2; adev->num_vmhubs = 2;
...@@ -1539,7 +1545,7 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -1539,7 +1545,7 @@ static int gmc_v9_0_sw_init(void *handle)
else else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
break; break;
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
adev->num_vmhubs = 3; adev->num_vmhubs = 3;
/* Keep the vm size same with Vega20 */ /* Keep the vm size same with Vega20 */
...@@ -1555,7 +1561,7 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -1555,7 +1561,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
if (adev->asic_type == CHIP_ARCTURUS) { if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault); &adev->gmc.vm_fault);
if (r) if (r)
...@@ -1622,8 +1628,8 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -1622,8 +1628,8 @@ static int gmc_v9_0_sw_init(void *handle)
* for video processing. * for video processing.
*/ */
adev->vm_manager.first_kfd_vmid = adev->vm_manager.first_kfd_vmid =
(adev->asic_type == CHIP_ARCTURUS || (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
adev->asic_type == CHIP_ALDEBARAN) ? 3 : 8; adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8;
amdgpu_vm_manager_init(adev); amdgpu_vm_manager_init(adev);
...@@ -1649,12 +1655,12 @@ static int gmc_v9_0_sw_fini(void *handle) ...@@ -1649,12 +1655,12 @@ static int gmc_v9_0_sw_fini(void *handle)
static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->ip_versions[MMHUB_HWIP][0]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 0):
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
break; break;
fallthrough; fallthrough;
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0, golden_settings_mmhub_1_0_0,
ARRAY_SIZE(golden_settings_mmhub_1_0_0)); ARRAY_SIZE(golden_settings_mmhub_1_0_0));
...@@ -1662,9 +1668,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -1662,9 +1668,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_athub_1_0_0, golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0)); ARRAY_SIZE(golden_settings_athub_1_0_0));
break; break;
case CHIP_VEGA12: case IP_VERSION(9, 1, 0):
break; case IP_VERSION(9, 2, 0):
case CHIP_RAVEN:
/* TODO for renoir */ /* TODO for renoir */
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0, golden_settings_athub_1_0_0,
...@@ -1684,7 +1689,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -1684,7 +1689,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*/ */
void gmc_v9_0_restore_registers(struct amdgpu_device *adev) void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
{ {
if (adev->asic_type == CHIP_RAVEN) { if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
(adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
WARN_ON(adev->gmc.sdpif_register != WARN_ON(adev->gmc.sdpif_register !=
RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0)); RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment