Commit 519b2331 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.3-2023-03-09' of...

Merge tag 'amd-drm-fixes-6.3-2023-03-09' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.3-2023-03-09:

amdgpu:
- Misc display fixes
- UMC 8.10 fixes
- Driver unload fixes
- NBIO 7.3.0 fix
- Error checking fixes for soc15, nv, soc21 read register interface
- Fix video cap query for VCN 4.0.4

amdkfd:
- Fix return check in doorbell handling
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230310031314.1296929-1-alexander.deucher@amd.com
parents 3a43e30b 6ce2ea07
...@@ -543,6 +543,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, ...@@ -543,6 +543,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
struct harvest_table *harvest_info; struct harvest_table *harvest_info;
u16 offset; u16 offset;
int i; int i;
uint32_t umc_harvest_config = 0;
bhdr = (struct binary_header *)adev->mman.discovery_bin; bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
...@@ -570,12 +571,17 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, ...@@ -570,12 +571,17 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
break; break;
case UMC_HWID: case UMC_HWID:
umc_harvest_config |=
1 << (le16_to_cpu(harvest_info->list[i].number_instance));
(*umc_harvest_count)++; (*umc_harvest_count)++;
break; break;
default: default:
break; break;
} }
} }
adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
~umc_harvest_config;
} }
/* ================================================== */ /* ================================================== */
...@@ -1156,8 +1162,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) ...@@ -1156,8 +1162,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
AMDGPU_MAX_SDMA_INSTANCES); AMDGPU_MAX_SDMA_INSTANCES);
} }
if (le16_to_cpu(ip->hw_id) == UMC_HWID) if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
adev->gmc.num_umc++; adev->gmc.num_umc++;
adev->umc.node_inst_num++;
}
for (k = 0; k < num_base_address; k++) { for (k = 0; k < num_base_address; k++) {
/* /*
......
...@@ -1315,7 +1315,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) ...@@ -1315,7 +1315,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM || if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) || !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
adev->in_suspend || adev->shutdown) adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
return; return;
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
......
...@@ -602,27 +602,14 @@ psp_cmd_submit_buf(struct psp_context *psp, ...@@ -602,27 +602,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{ {
int ret; int ret;
int index, idx; int index;
int timeout = 20000; int timeout = 20000;
bool ras_intr = false; bool ras_intr = false;
bool skip_unsupport = false; bool skip_unsupport = false;
bool dev_entered;
if (psp->adev->no_hw_access) if (psp->adev->no_hw_access)
return 0; return 0;
dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx);
/*
* We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring
* a lock in drm_dev_enter during driver unload because we must call
* drm_dev_unplug as the beginning of unload driver sequence . It is very
* crucial that userspace can't access device instances anymore.
*/
if (!dev_entered)
WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD &&
psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA &&
psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_INVOKE_CMD);
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
...@@ -686,8 +673,6 @@ psp_cmd_submit_buf(struct psp_context *psp, ...@@ -686,8 +673,6 @@ psp_cmd_submit_buf(struct psp_context *psp,
} }
exit: exit:
if (dev_entered)
drm_dev_exit(idx);
return ret; return ret;
} }
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
#define LOOP_UMC_NODE_INST(node_inst) \ #define LOOP_UMC_NODE_INST(node_inst) \
for ((node_inst) = 0; (node_inst) < adev->umc.node_inst_num; (node_inst)++) for_each_set_bit((node_inst), &(adev->umc.active_mask), adev->umc.node_inst_num)
#define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \ #define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \
LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst)) LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst))
...@@ -69,7 +69,7 @@ struct amdgpu_umc { ...@@ -69,7 +69,7 @@ struct amdgpu_umc {
/* number of umc instance with memory map register access */ /* number of umc instance with memory map register access */
uint32_t umc_inst_num; uint32_t umc_inst_num;
/*number of umc node instance with memory map register access*/ /* Total number of umc node instance including harvest one */
uint32_t node_inst_num; uint32_t node_inst_num;
/* UMC regiser per channel offset */ /* UMC regiser per channel offset */
...@@ -82,6 +82,9 @@ struct amdgpu_umc { ...@@ -82,6 +82,9 @@ struct amdgpu_umc {
const struct amdgpu_umc_funcs *funcs; const struct amdgpu_umc_funcs *funcs;
struct amdgpu_umc_ras *ras; struct amdgpu_umc_ras *ras;
/* active mask for umc node instance */
unsigned long active_mask;
}; };
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
......
...@@ -567,7 +567,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) ...@@ -567,7 +567,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
case IP_VERSION(8, 10, 0): case IP_VERSION(8, 10, 0):
adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM; adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
adev->umc.node_inst_num = adev->gmc.num_umc;
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev); adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET; adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
......
...@@ -382,11 +382,6 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) ...@@ -382,11 +382,6 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data) if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data); WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
break; break;
case IP_VERSION(7, 5, 1):
data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
fallthrough;
default: default:
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL)); def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
...@@ -399,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) ...@@ -399,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
break; break;
} }
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(7, 3, 0):
case IP_VERSION(7, 5, 1):
data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
break;
}
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
......
...@@ -444,9 +444,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num, ...@@ -444,9 +444,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0; *value = 0;
for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
en = &nv_allowed_read_registers[i]; en = &nv_allowed_read_registers[i];
if (adev->reg_offset[en->hwip][en->inst] && if (!adev->reg_offset[en->hwip][en->inst])
reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] continue;
+ en->reg_offset)) else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue; continue;
*value = nv_get_register_value(adev, *value = nv_get_register_value(adev,
......
...@@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, ...@@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0; *value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
en = &soc15_allowed_read_registers[i]; en = &soc15_allowed_read_registers[i];
if (adev->reg_offset[en->hwip][en->inst] && if (!adev->reg_offset[en->hwip][en->inst])
reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] continue;
else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset)) + en->reg_offset))
continue; continue;
......
...@@ -111,6 +111,7 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, ...@@ -111,6 +111,7 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
switch (adev->ip_versions[UVD_HWIP][0]) { switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 2): case IP_VERSION(4, 0, 2):
case IP_VERSION(4, 0, 4):
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
if (encode) if (encode)
*codecs = &vcn_4_0_0_video_codecs_encode_vcn1; *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
...@@ -291,9 +292,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num, ...@@ -291,9 +292,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0; *value = 0;
for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) { for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
en = &soc21_allowed_read_registers[i]; en = &soc21_allowed_read_registers[i];
if (adev->reg_offset[en->hwip][en->inst] && if (!adev->reg_offset[en->hwip][en->inst])
reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] continue;
+ en->reg_offset)) else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue; continue;
*value = soc21_get_register_value(adev, *value = soc21_get_register_value(adev,
......
...@@ -31,9 +31,9 @@ ...@@ -31,9 +31,9 @@
/* number of umc instance with memory map register access */ /* number of umc instance with memory map register access */
#define UMC_V8_10_UMC_INSTANCE_NUM 2 #define UMC_V8_10_UMC_INSTANCE_NUM 2
/* Total channel instances for all umc nodes */ /* Total channel instances for all available umc nodes */
#define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \ #define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \
(UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->umc.node_inst_num) (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->gmc.num_umc)
/* UMC regiser per channel offset */ /* UMC regiser per channel offset */
#define UMC_V8_10_PER_CHANNEL_OFFSET 0x400 #define UMC_V8_10_PER_CHANNEL_OFFSET 0x400
......
...@@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd) ...@@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
if (!pdd->doorbell_index) { if (!pdd->doorbell_index) {
int r = kfd_alloc_process_doorbells(pdd->dev, int r = kfd_alloc_process_doorbells(pdd->dev,
&pdd->doorbell_index); &pdd->doorbell_index);
if (r) if (r < 0)
return 0; return 0;
} }
......
...@@ -529,6 +529,19 @@ static struct clk_bw_params vg_bw_params = { ...@@ -529,6 +529,19 @@ static struct clk_bw_params vg_bw_params = {
}; };
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
int i;
for (i = 0; i < num_clocks; ++i) {
if (clocks[i] > max)
max = clocks[i];
}
return max;
}
static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table, static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table,
unsigned int voltage) unsigned int voltage)
{ {
...@@ -572,12 +585,16 @@ static void vg_clk_mgr_helper_populate_bw_params( ...@@ -572,12 +585,16 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.num_entries = j + 1; bw_params->clk_table.num_entries = j + 1;
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk; bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk; bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage; bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage); bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
} }
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type; bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number; bw_params->num_channels = bios_info->ma_channel_number;
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include "asic_reg/mp/mp_13_0_0_sh_mask.h" #include "asic_reg/mp/mp_13_0_0_sh_mask.h"
#include "smu_cmn.h" #include "smu_cmn.h"
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
#include "umc_v8_10.h"
/* /*
* DO NOT use these for err/warn/info/debug messages. * DO NOT use these for err/warn/info/debug messages.
...@@ -90,6 +91,12 @@ ...@@ -90,6 +91,12 @@
#define DEBUGSMC_MSG_Mode1Reset 2 #define DEBUGSMC_MSG_Mode1Reset 2
/*
* SMU_v13_0_10 supports ECCTABLE since version 80.34.0,
* use this to check ECCTABLE feature whether support
*/
#define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200
static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = { static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
...@@ -229,6 +236,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { ...@@ -229,6 +236,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(ACTIVITY_MONITOR_COEFF), TAB_MAP(ACTIVITY_MONITOR_COEFF),
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(I2C_COMMANDS), TAB_MAP(I2C_COMMANDS),
TAB_MAP(ECCINFO),
}; };
static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
...@@ -462,6 +470,8 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu) ...@@ -462,6 +470,8 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
AMDGPU_GEM_DOMAIN_VRAM); AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table) if (!smu_table->metrics_table)
...@@ -477,8 +487,14 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu) ...@@ -477,8 +487,14 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
if (!smu_table->watermarks_table) if (!smu_table->watermarks_table)
goto err2_out; goto err2_out;
smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
if (!smu_table->ecc_table)
goto err3_out;
return 0; return 0;
err3_out:
kfree(smu_table->watermarks_table);
err2_out: err2_out:
kfree(smu_table->gpu_metrics_table); kfree(smu_table->gpu_metrics_table);
err1_out: err1_out:
...@@ -2036,6 +2052,64 @@ static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu, ...@@ -2036,6 +2052,64 @@ static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
return ret; return ret;
} }
static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return -EOPNOTSUPP;
if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) &&
(smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
return ret;
else
return -EOPNOTSUPP;
}
static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
void *table)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
EccInfoTable_t *ecc_table = NULL;
struct ecc_info_per_ch *ecc_info_per_channel = NULL;
int i, ret = 0;
struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
ret = smu_v13_0_0_check_ecc_table_support(smu);
if (ret)
return ret;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ECCINFO,
0,
smu_table->ecc_table,
false);
if (ret) {
dev_info(adev->dev, "Failed to export SMU ecc table!\n");
return ret;
}
ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
for (i = 0; i < UMC_V8_10_TOTAL_CHANNEL_NUM(adev); i++) {
ecc_info_per_channel = &(eccinfo->ecc[i]);
ecc_info_per_channel->ce_count_lo_chip =
ecc_table->EccInfo[i].ce_count_lo_chip;
ecc_info_per_channel->ce_count_hi_chip =
ecc_table->EccInfo[i].ce_count_hi_chip;
ecc_info_per_channel->mca_umc_status =
ecc_table->EccInfo[i].mca_umc_status;
ecc_info_per_channel->mca_umc_addr =
ecc_table->EccInfo[i].mca_umc_addr;
}
return ret;
}
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
...@@ -2111,6 +2185,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { ...@@ -2111,6 +2185,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num, .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag, .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
.gpo_control = smu_v13_0_gpo_control, .gpo_control = smu_v13_0_gpo_control,
.get_ecc_info = smu_v13_0_0_get_ecc_info,
}; };
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
......
...@@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, ...@@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
/* Sink EOTF is Bit map while infoframe is absolute values */ /* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf, if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
connector->hdr_sink_metadata.hdmi_type1.eotf)) { connector->hdr_sink_metadata.hdmi_type1.eotf))
DRM_DEBUG_KMS("EOTF Not Supported\n"); DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
return -EINVAL;
}
err = hdmi_drm_infoframe_init(frame); err = hdmi_drm_infoframe_init(frame);
if (err < 0) if (err < 0)
......
...@@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, ...@@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
if (state->writeback_job && state->writeback_job->fb) if (state->writeback_job && state->writeback_job->fb)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment