Commit 0cfc1d68 authored by Srinivasan Shanmugam's avatar Srinivasan Shanmugam Committed by Alex Deucher

drm/amdgpu: Fix errors & warnings in gmc_ v6_0, v7_0.c

Fix below checkpatch errors & warnings:

ERROR: trailing statements should be on next line
+       default: BUG();
ERROR: trailing statements should be on next line

WARNING: braces {} are not necessary for single statement blocks
WARNING: braces {} are not necessary for any arm of this statement
WARNING: Block comments use * on subsequent lines
WARNING: Missing a blank line after declarations
WARNING: Prefer 'unsigned int' to bare use of 'unsigned'

Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarSrinivasan Shanmugam <srinivasan.shanmugam@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8612a435
...@@ -120,7 +120,8 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) ...@@ -120,7 +120,8 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
case CHIP_HAINAN: case CHIP_HAINAN:
chip_name = "hainan"; chip_name = "hainan";
break; break;
default: BUG(); default:
BUG();
} }
/* this memory configuration requires special firmware */ /* this memory configuration requires special firmware */
...@@ -178,9 +179,8 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) ...@@ -178,9 +179,8 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
} }
/* load the MC ucode */ /* load the MC ucode */
for (i = 0; i < ucode_size; i++) { for (i = 0; i < ucode_size; i++)
WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
}
/* put the engine back into the active state */ /* put the engine back into the active state */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
...@@ -208,6 +208,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, ...@@ -208,6 +208,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc) struct amdgpu_gmc *mc)
{ {
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24; base <<= 24;
amdgpu_gmc_vram_location(adev, mc, base); amdgpu_gmc_vram_location(adev, mc, base);
...@@ -228,9 +229,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) ...@@ -228,9 +229,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
} }
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
if (gmc_v6_0_wait_for_idle((void *)adev)) { if (gmc_v6_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n"); dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
if (adev->mode_info.num_crtc) { if (adev->mode_info.num_crtc) {
u32 tmp; u32 tmp;
...@@ -256,9 +256,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) ...@@ -256,9 +256,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v6_0_wait_for_idle((void *)adev)) { if (gmc_v6_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n"); dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
} }
static int gmc_v6_0_mc_init(struct amdgpu_device *adev) static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
...@@ -269,13 +268,13 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) ...@@ -269,13 +268,13 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
int r; int r;
tmp = RREG32(mmMC_ARB_RAMCFG); tmp = RREG32(mmMC_ARB_RAMCFG);
if (tmp & (1 << 11)) { if (tmp & (1 << 11))
chansize = 16; chansize = 16;
} else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) { else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK)
chansize = 64; chansize = 64;
} else { else
chansize = 32; chansize = 32;
}
tmp = RREG32(mmMC_SHARED_CHMAP); tmp = RREG32(mmMC_SHARED_CHMAP);
switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
case 0: case 0:
...@@ -352,7 +351,7 @@ static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -352,7 +351,7 @@ static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
} }
static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
uint32_t reg; uint32_t reg;
...@@ -405,11 +404,11 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, ...@@ -405,11 +404,11 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
} }
/** /**
+ * gmc_v8_0_set_prt - set PRT VM fault * gmc_v8_0_set_prt() - set PRT VM fault
+ * *
+ * @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
+ * @enable: enable/disable VM fault handling for PRT * @enable: enable/disable VM fault handling for PRT
+*/ */
static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
{ {
u32 tmp; u32 tmp;
...@@ -547,7 +546,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) ...@@ -547,7 +546,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0); gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20), (unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr); (unsigned long long)table_addr);
return 0; return 0;
} }
...@@ -787,15 +786,16 @@ static int gmc_v6_0_late_init(void *handle) ...@@ -787,15 +786,16 @@ static int gmc_v6_0_late_init(void *handle)
return 0; return 0;
} }
static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
{ {
u32 d1vga_control = RREG32(mmD1VGA_CONTROL); u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
unsigned size; unsigned int size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION; size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else { } else {
u32 viewport = RREG32(mmVIEWPORT_SIZE); u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4); 4);
...@@ -814,6 +814,7 @@ static int gmc_v6_0_sw_init(void *handle) ...@@ -814,6 +814,7 @@ static int gmc_v6_0_sw_init(void *handle)
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else { } else {
u32 tmp = RREG32(mmMC_SEQ_MISC0); u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK; tmp &= MC_SEQ_MISC0__MT__MASK;
adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp); adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
} }
...@@ -964,7 +965,7 @@ static bool gmc_v6_0_is_idle(void *handle) ...@@ -964,7 +965,7 @@ static bool gmc_v6_0_is_idle(void *handle)
static int gmc_v6_0_wait_for_idle(void *handle) static int gmc_v6_0_wait_for_idle(void *handle)
{ {
unsigned i; unsigned int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
...@@ -995,10 +996,8 @@ static int gmc_v6_0_soft_reset(void *handle) ...@@ -995,10 +996,8 @@ static int gmc_v6_0_soft_reset(void *handle)
if (srbm_soft_reset) { if (srbm_soft_reset) {
gmc_v6_0_mc_stop(adev); gmc_v6_0_mc_stop(adev);
if (gmc_v6_0_wait_for_idle(adev)) { if (gmc_v6_0_wait_for_idle(adev))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
tmp = RREG32(mmSRBM_SOFT_RESET); tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset; tmp |= srbm_soft_reset;
...@@ -1023,7 +1022,7 @@ static int gmc_v6_0_soft_reset(void *handle) ...@@ -1023,7 +1022,7 @@ static int gmc_v6_0_soft_reset(void *handle)
static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev, static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, struct amdgpu_irq_src *src,
unsigned type, unsigned int type,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
u32 tmp; u32 tmp;
...@@ -1141,8 +1140,7 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) ...@@ -1141,8 +1140,7 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs; adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
} }
const struct amdgpu_ip_block_version gmc_v6_0_ip_block = const struct amdgpu_ip_block_version gmc_v6_0_ip_block = {
{
.type = AMD_IP_BLOCK_TYPE_GMC, .type = AMD_IP_BLOCK_TYPE_GMC,
.major = 6, .major = 6,
.minor = 0, .minor = 0,
......
...@@ -58,16 +58,14 @@ MODULE_FIRMWARE("amdgpu/bonaire_mc.bin"); ...@@ -58,16 +58,14 @@ MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
MODULE_FIRMWARE("amdgpu/hawaii_mc.bin"); MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
static const u32 golden_settings_iceland_a11[] = static const u32 golden_settings_iceland_a11[] = {
{
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
}; };
static const u32 iceland_mgcg_cgcg_init[] = static const u32 iceland_mgcg_cgcg_init[] = {
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
}; };
...@@ -151,7 +149,8 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) ...@@ -151,7 +149,8 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
case CHIP_KABINI: case CHIP_KABINI:
case CHIP_MULLINS: case CHIP_MULLINS:
return 0; return 0;
default: BUG(); default:
BUG();
} }
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
...@@ -237,6 +236,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, ...@@ -237,6 +236,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc) struct amdgpu_gmc *mc)
{ {
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24; base <<= 24;
amdgpu_gmc_vram_location(adev, mc, base); amdgpu_gmc_vram_location(adev, mc, base);
...@@ -266,9 +266,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) ...@@ -266,9 +266,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
} }
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
if (gmc_v7_0_wait_for_idle((void *)adev)) { if (gmc_v7_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n"); dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
if (adev->mode_info.num_crtc) { if (adev->mode_info.num_crtc) {
/* Lockout access through VGA aperture*/ /* Lockout access through VGA aperture*/
tmp = RREG32(mmVGA_HDP_CONTROL); tmp = RREG32(mmVGA_HDP_CONTROL);
...@@ -290,9 +290,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) ...@@ -290,9 +290,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v7_0_wait_for_idle((void *)adev)) { if (gmc_v7_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n"); dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
...@@ -324,11 +323,11 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) ...@@ -324,11 +323,11 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
/* Get VRAM informations */ /* Get VRAM informations */
tmp = RREG32(mmMC_ARB_RAMCFG); tmp = RREG32(mmMC_ARB_RAMCFG);
if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
chansize = 64; chansize = 64;
} else { else
chansize = 32; chansize = 32;
}
tmp = RREG32(mmMC_SHARED_CHMAP); tmp = RREG32(mmMC_SHARED_CHMAP);
switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
case 0: case 0:
...@@ -472,7 +471,7 @@ static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -472,7 +471,7 @@ static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
} }
static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
uint32_t reg; uint32_t reg;
...@@ -488,8 +487,8 @@ static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, ...@@ -488,8 +487,8 @@ static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr; return pd_addr;
} }
static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
unsigned pasid) unsigned int pasid)
{ {
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
} }
...@@ -700,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) ...@@ -700,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0); gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20), (unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr); (unsigned long long)table_addr);
return 0; return 0;
} }
...@@ -761,7 +760,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) ...@@ -761,7 +760,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
* Print human readable fault information (CIK). * Print human readable fault information (CIK).
*/ */
static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
u32 addr, u32 mc_client, unsigned pasid) u32 addr, u32 mc_client, unsigned int pasid)
{ {
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
...@@ -957,15 +956,16 @@ static int gmc_v7_0_late_init(void *handle) ...@@ -957,15 +956,16 @@ static int gmc_v7_0_late_init(void *handle)
return 0; return 0;
} }
static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
{ {
u32 d1vga_control = RREG32(mmD1VGA_CONTROL); u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
unsigned size; unsigned int size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION; size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else { } else {
u32 viewport = RREG32(mmVIEWPORT_SIZE); u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4); 4);
...@@ -985,6 +985,7 @@ static int gmc_v7_0_sw_init(void *handle) ...@@ -985,6 +985,7 @@ static int gmc_v7_0_sw_init(void *handle)
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else { } else {
u32 tmp = RREG32(mmMC_SEQ_MISC0); u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK; tmp &= MC_SEQ_MISC0__MT__MASK;
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp); adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
} }
...@@ -1153,7 +1154,7 @@ static bool gmc_v7_0_is_idle(void *handle) ...@@ -1153,7 +1154,7 @@ static bool gmc_v7_0_is_idle(void *handle)
static int gmc_v7_0_wait_for_idle(void *handle) static int gmc_v7_0_wait_for_idle(void *handle)
{ {
unsigned i; unsigned int i;
u32 tmp; u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
...@@ -1191,10 +1192,8 @@ static int gmc_v7_0_soft_reset(void *handle) ...@@ -1191,10 +1192,8 @@ static int gmc_v7_0_soft_reset(void *handle)
if (srbm_soft_reset) { if (srbm_soft_reset) {
gmc_v7_0_mc_stop(adev); gmc_v7_0_mc_stop(adev);
if (gmc_v7_0_wait_for_idle((void *)adev)) { if (gmc_v7_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
tmp = RREG32(mmSRBM_SOFT_RESET); tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset; tmp |= srbm_soft_reset;
...@@ -1220,7 +1219,7 @@ static int gmc_v7_0_soft_reset(void *handle) ...@@ -1220,7 +1219,7 @@ static int gmc_v7_0_soft_reset(void *handle)
static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev, static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, struct amdgpu_irq_src *src,
unsigned type, unsigned int type,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
u32 tmp; u32 tmp;
...@@ -1384,8 +1383,7 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) ...@@ -1384,8 +1383,7 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs; adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
} }
const struct amdgpu_ip_block_version gmc_v7_0_ip_block = const struct amdgpu_ip_block_version gmc_v7_0_ip_block = {
{
.type = AMD_IP_BLOCK_TYPE_GMC, .type = AMD_IP_BLOCK_TYPE_GMC,
.major = 7, .major = 7,
.minor = 0, .minor = 0,
...@@ -1393,8 +1391,7 @@ const struct amdgpu_ip_block_version gmc_v7_0_ip_block = ...@@ -1393,8 +1391,7 @@ const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
.funcs = &gmc_v7_0_ip_funcs, .funcs = &gmc_v7_0_ip_funcs,
}; };
const struct amdgpu_ip_block_version gmc_v7_4_ip_block = const struct amdgpu_ip_block_version gmc_v7_4_ip_block = {
{
.type = AMD_IP_BLOCK_TYPE_GMC, .type = AMD_IP_BLOCK_TYPE_GMC,
.major = 7, .major = 7,
.minor = 4, .minor = 4,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment