Commit bd3755c1 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.11-2024-08-28' of...

Merge tag 'amd-drm-fixes-6.11-2024-08-28' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.11-2024-08-28:

amdgpu:
- SWSMU gaming stability fix
- SMU 13.0.7 fix
- SWSMU documentation alignment fix
- SMU 14.0.x fixes
- GC 12.x fix
- Display fix
- IP discovery fix
- SMU 13.0.6 fix
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240828184908.125387-1-alexander.deucher@amd.com
parents 5be63fc1 849f0d58
......@@ -1500,6 +1500,7 @@ union gc_info {
struct gc_info_v1_0 v1;
struct gc_info_v1_1 v1_1;
struct gc_info_v1_2 v1_2;
struct gc_info_v1_3 v1_3;
struct gc_info_v2_0 v2;
struct gc_info_v2_1 v2_1;
};
......@@ -1558,6 +1559,16 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
}
if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
}
break;
case 2:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
......
......@@ -240,6 +240,12 @@ struct amdgpu_gfx_config {
uint32_t gc_tcp_size_per_cu;
uint32_t gc_num_cu_per_sqc;
uint32_t gc_tcc_size;
uint32_t gc_tcp_cache_line_size;
uint32_t gc_instruction_cache_size_per_sqc;
uint32_t gc_instruction_cache_line_size;
uint32_t gc_scalar_data_cache_size_per_sqc;
uint32_t gc_scalar_data_cache_line_size;
uint32_t gc_tcc_cache_line_size;
};
struct amdgpu_cu_info {
......
......@@ -3005,7 +3005,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
......
......@@ -187,6 +187,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
......
......@@ -28,6 +28,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fourcc.h>
#include "amdgpu.h"
......@@ -935,10 +936,14 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
}
afb = to_amdgpu_framebuffer(new_state->fb);
obj = new_state->fb->obj[0];
obj = drm_gem_fb_get_obj(new_state->fb, 0);
if (!obj) {
DRM_ERROR("Failed to get obj from framebuffer\n");
return -EINVAL;
}
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, true);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
......
......@@ -258,6 +258,48 @@ struct gc_info_v1_2 {
uint32_t gc_gl2c_per_gpu;
};
struct gc_info_v1_3 {
struct gpu_info_header header;
uint32_t gc_num_se;
uint32_t gc_num_wgp0_per_sa;
uint32_t gc_num_wgp1_per_sa;
uint32_t gc_num_rb_per_se;
uint32_t gc_num_gl2c;
uint32_t gc_num_gprs;
uint32_t gc_num_max_gs_thds;
uint32_t gc_gs_table_depth;
uint32_t gc_gsprim_buff_depth;
uint32_t gc_parameter_cache_depth;
uint32_t gc_double_offchip_lds_buffer;
uint32_t gc_wave_size;
uint32_t gc_max_waves_per_simd;
uint32_t gc_max_scratch_slots_per_cu;
uint32_t gc_lds_size;
uint32_t gc_num_sc_per_se;
uint32_t gc_num_sa_per_se;
uint32_t gc_num_packer_per_sc;
uint32_t gc_num_gl2a;
uint32_t gc_num_tcp_per_sa;
uint32_t gc_num_sdp_interface;
uint32_t gc_num_tcps;
uint32_t gc_num_tcp_per_wpg;
uint32_t gc_tcp_l1_size;
uint32_t gc_num_sqc_per_wgp;
uint32_t gc_l1_instruction_cache_size_per_sqc;
uint32_t gc_l1_data_cache_size_per_sqc;
uint32_t gc_gl1c_per_sa;
uint32_t gc_gl1c_size_per_instance;
uint32_t gc_gl2c_per_gpu;
uint32_t gc_tcp_size_per_cu;
uint32_t gc_tcp_cache_line_size;
uint32_t gc_instruction_cache_size_per_sqc;
uint32_t gc_instruction_cache_line_size;
uint32_t gc_scalar_data_cache_size_per_sqc;
uint32_t gc_scalar_data_cache_line_size;
uint32_t gc_tcc_size;
uint32_t gc_tcc_cache_line_size;
};
struct gc_info_v2_0 {
struct gpu_info_header header;
......
......@@ -2225,7 +2225,8 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings)
bool skip_display_settings,
bool force_update)
{
int ret = 0;
int index = 0;
......@@ -2254,7 +2255,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
if (smu_dpm_ctx->dpm_level != level) {
if (force_update || smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
dev_err(smu->adev->dev, "Failed to set performance level!");
......@@ -2265,13 +2266,12 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
smu_dpm_ctx->dpm_level = level;
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
if (smu->power_profile_mode != workload[0])
if (force_update || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}
......@@ -2292,11 +2292,13 @@ static int smu_handle_task(struct smu_context *smu,
ret = smu_pre_display_config_changed(smu);
if (ret)
return ret;
ret = smu_adjust_power_state_dynamic(smu, level, false);
ret = smu_adjust_power_state_dynamic(smu, level, false, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
ret = smu_adjust_power_state_dynamic(smu, level, true, true);
break;
case AMD_PP_TASK_READJUST_POWER_STATE:
ret = smu_adjust_power_state_dynamic(smu, level, true);
ret = smu_adjust_power_state_dynamic(smu, level, true, false);
break;
default:
break;
......@@ -2343,8 +2345,7 @@ static int smu_switch_power_profile(void *handle,
workload[0] = smu->workload_setting[index];
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0);
return 0;
......
......@@ -92,7 +92,6 @@
//Resets
#define PPSMC_MSG_PrepareMp1ForUnload 0x2E
#define PPSMC_MSG_Mode1Reset 0x2F
//Set SystemVirtual DramAddrHigh
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30
......@@ -119,11 +118,12 @@
//STB to dram log
#define PPSMC_MSG_DumpSTBtoDram 0x3D
#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E
#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F
#define PPSMC_MSG_STBtoDramLogSetDramAddress 0x3E
#define PPSMC_MSG_DummyUndefined 0x3F
#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40
#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41
#define PPSMC_MSG_UseProfilingMode 0x42
#define PPSMC_MSG_AllowGfxDcs 0x43
#define PPSMC_MSG_DisallowGfxDcs 0x44
#define PPSMC_MSG_EnableAudioStutterWA 0x45
......@@ -135,6 +135,16 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B
#define PPSMC_MSG_SetPriorityDeltaGain 0x4C
#define PPSMC_MSG_AllowIHHostInterrupt 0x4D
#define PPSMC_MSG_EnableShadowDpm 0x4E
#define PPSMC_MSG_Mode3Reset 0x4F
#define PPSMC_Message_Count 0x50
#define PPSMC_MSG_SetDriverDramAddr 0x50
#define PPSMC_MSG_SetToolsDramAddr 0x51
#define PPSMC_MSG_TransferTableSmu2DramWithAddr 0x52
#define PPSMC_MSG_TransferTableDram2SmuWithAddr 0x53
#define PPSMC_MSG_GetAllRunningSmuFeatures 0x54
#define PPSMC_MSG_GetSvi3Voltage 0x55
#define PPSMC_MSG_UpdatePolicy 0x56
#define PPSMC_MSG_ExtPwrConnSupport 0x57
#define PPSMC_MSG_PreloadSwPstateForUclkOverDrive 0x58
#define PPSMC_Message_Count 0x59
#endif
......@@ -121,6 +121,7 @@ struct mca_ras_info {
#define P2S_TABLE_ID_A 0x50325341
#define P2S_TABLE_ID_X 0x50325358
#define P2S_TABLE_ID_3 0x50325303
// clang-format off
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
......@@ -271,14 +272,18 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t p2s_table_id = P2S_TABLE_ID_A;
int ret = 0, i, p2stable_count;
int var = (adev->pdev->device & 0xF);
char ucode_prefix[15];
/* No need to load P2S tables in IOV mode */
if (amdgpu_sriov_vf(adev))
return 0;
if (!(adev->flags & AMD_IS_APU))
if (!(adev->flags & AMD_IS_APU)) {
p2s_table_id = P2S_TABLE_ID_X;
if (var == 0x5)
p2s_table_id = P2S_TABLE_ID_3;
}
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
sizeof(ucode_prefix));
......
......@@ -2378,7 +2378,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
size += sysfs_emit_at(buf, size, " ");
for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i],
size += sysfs_emit_at(buf, size, "%d %-14s%s", i, amdgpu_pp_profile_name[i],
(i == smu->power_profile_mode) ? "* " : " ");
size += sysfs_emit_at(buf, size, "\n");
......@@ -2408,7 +2408,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
do { \
size += sysfs_emit_at(buf, size, "%-30s", #field); \
for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \
size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
size += sysfs_emit_at(buf, size, "%-18d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
size += sysfs_emit_at(buf, size, "\n"); \
} while (0)
......
......@@ -115,7 +115,6 @@ static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
......@@ -1824,50 +1823,6 @@ static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu)
smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54);
}
static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu,
uint32_t size)
{
int ret = 0;
/* message SMU to update the bad page number on SMUBUS */
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetNumBadMemoryPagesRetired,
size, NULL);
if (ret)
dev_err(smu->adev->dev,
"[%s] failed to message SMU to update bad memory pages number\n",
__func__);
return ret;
}
static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu,
uint32_t size)
{
int ret = 0;
/* message SMU to update the bad channel info on SMUBUS */
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,
size, NULL);
if (ret)
dev_err(smu->adev->dev,
"[%s] failed to message SMU to update bad memory pages channel info\n",
__func__);
return ret;
}
static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
void *table)
{
int ret = 0;
// TODO
return ret;
}
static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
void **table)
{
......@@ -2015,12 +1970,9 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.enable_gfx_features = smu_v14_0_2_enable_gfx_features,
.set_mp1_state = smu_v14_0_2_set_mp1_state,
.set_df_cstate = smu_v14_0_2_set_df_cstate,
.send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num,
.send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag,
#if 0
.gpo_control = smu_v14_0_gpo_control,
#endif
.get_ecc_info = smu_v14_0_2_get_ecc_info,
};
void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment