Commit 7a1b3f31 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.10-2024-06-26' of...

Merge tag 'amd-drm-fixes-6.10-2024-06-26' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.10-2024-06-26:

amdgpu:
- SMU 14.x fix
- vram info parsing fix
- mode1 reset fix
- LTTPR fix
- Virtual display fix
- Avoid spurious error in PSP init
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240626221408.2019633-1-alexander.deucher@amd.com
parents 5fed0854 48880f96
...@@ -400,7 +400,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, ...@@ -400,7 +400,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num; mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width; mem_channel_width = vram_info->v30.channel_width;
if (vram_width) if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width); *vram_width = mem_channel_number * 16;
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -5220,11 +5220,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev) ...@@ -5220,11 +5220,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU mode1 reset\n"); dev_info(adev->dev, "GPU mode1 reset\n");
/* Cache the state before bus master disable. The saved config space
* values are used in other cases like restore after mode-2 reset.
*/
amdgpu_device_cache_pci_state(adev->pdev);
/* disable BM */ /* disable BM */
pci_clear_master(adev->pdev); pci_clear_master(adev->pdev);
amdgpu_device_cache_pci_state(adev->pdev);
if (amdgpu_dpm_is_mode1_reset_supported(adev)) { if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
dev_info(adev->dev, "GPU smu mode1 reset\n"); dev_info(adev->dev, "GPU smu mode1 reset\n");
ret = amdgpu_dpm_mode1_reset(adev); ret = amdgpu_dpm_mode1_reset(adev);
......
...@@ -640,6 +640,20 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) ...@@ -640,6 +640,20 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
} }
} }
static bool psp_err_warn(struct psp_context *psp)
{
struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
/* This response indicates reg list is already loaded */
if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
cmd->resp.status == TEE_ERROR_CANCEL)
return false;
return true;
}
static int static int
psp_cmd_submit_buf(struct psp_context *psp, psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode, struct amdgpu_firmware_info *ucode,
...@@ -699,9 +713,12 @@ psp_cmd_submit_buf(struct psp_context *psp, ...@@ -699,9 +713,12 @@ psp_cmd_submit_buf(struct psp_context *psp,
dev_warn(psp->adev->dev, dev_warn(psp->adev->dev,
"failed to load ucode %s(0x%X) ", "failed to load ucode %s(0x%X) ",
amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
dev_warn(psp->adev->dev, if (psp_err_warn(psp))
dev_warn(
psp->adev->dev,
"psp gfx command %s(0x%X) failed and response status is (0x%X)\n", "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status); psp->cmd_buf_mem->resp.status);
/* If any firmware (including CAP) load fails under SRIOV, it should /* If any firmware (including CAP) load fails under SRIOV, it should
* return failure to stop the VF from initializing. * return failure to stop the VF from initializing.
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h> #include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h> #include <drm/drm_simple_kms_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h> #include <drm/drm_vblank.h>
#include "amdgpu.h" #include "amdgpu.h"
...@@ -314,7 +315,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, ...@@ -314,7 +315,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
return 0; return 0;
} }
afb = to_amdgpu_framebuffer(new_state->fb); afb = to_amdgpu_framebuffer(new_state->fb);
obj = new_state->fb->obj[0];
obj = drm_gem_fb_get_obj(new_state->fb, 0);
if (!obj) {
DRM_ERROR("Failed to get obj from framebuffer\n");
return -EINVAL;
}
rbo = gem_to_amdgpu_bo(obj); rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev); adev = amdgpu_ttm_adev(rbo->tbo.bdev);
...@@ -368,12 +375,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, ...@@ -368,12 +375,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state) struct drm_plane_state *old_state)
{ {
struct amdgpu_bo *rbo; struct amdgpu_bo *rbo;
struct drm_gem_object *obj;
int r; int r;
if (!old_state->fb) if (!old_state->fb)
return; return;
rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); obj = drm_gem_fb_get_obj(old_state->fb, 0);
if (!obj) {
DRM_ERROR("Failed to get obj from framebuffer\n");
return;
}
rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false); r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) { if (unlikely(r)) {
DRM_ERROR("failed to reserve rbo before unpin\n"); DRM_ERROR("failed to reserve rbo before unpin\n");
......
...@@ -465,6 +465,7 @@ struct psp_gfx_rb_frame ...@@ -465,6 +465,7 @@ struct psp_gfx_rb_frame
enum tee_error_code { enum tee_error_code {
TEE_SUCCESS = 0x00000000, TEE_SUCCESS = 0x00000000,
TEE_ERROR_CANCEL = 0xFFFF0002,
TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A, TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A,
}; };
......
...@@ -1590,9 +1590,17 @@ static bool retrieve_link_cap(struct dc_link *link) ...@@ -1590,9 +1590,17 @@ static bool retrieve_link_cap(struct dc_link *link)
return false; return false;
} }
if (dp_is_lttpr_present(link)) if (dp_is_lttpr_present(link)) {
configure_lttpr_mode_transparent(link); configure_lttpr_mode_transparent(link);
// Echo TOTAL_LTTPR_CNT back downstream
core_link_write_dpcd(
link,
DP_TOTAL_LTTPR_CNT,
&link->dpcd_caps.lttpr_caps.phy_repeater_cnt,
sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
}
/* Read DP tunneling information. */ /* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link); status = dpcd_get_tunneling_device_data(link);
......
...@@ -177,4 +177,9 @@ enum dpcd_psr_sink_states { ...@@ -177,4 +177,9 @@ enum dpcd_psr_sink_states {
#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379 #define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379
#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A #define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
/* Remove once drm_dp_helper.h is updated upstream */
#ifndef DP_TOTAL_LTTPR_CNT
#define DP_TOTAL_LTTPR_CNT 0xF000A /* 2.1 */
#endif
#endif /* __DAL_DPCD_DEFS_H__ */ #endif /* __DAL_DPCD_DEFS_H__ */
...@@ -324,6 +324,18 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, ...@@ -324,6 +324,18 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
return ret; return ret;
} }
static int smu_set_mall_enable(struct smu_context *smu)
{
int ret = 0;
if (!smu->ppt_funcs->set_mall_enable)
return 0;
ret = smu->ppt_funcs->set_mall_enable(smu);
return ret;
}
/** /**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block * smu_dpm_set_power_gate - power gate/ungate the specific IP block
* *
...@@ -1791,6 +1803,7 @@ static int smu_hw_init(void *handle) ...@@ -1791,6 +1803,7 @@ static int smu_hw_init(void *handle)
smu_dpm_set_jpeg_enable(smu, true); smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true); smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true); smu_dpm_set_umsch_mm_enable(smu, true);
smu_set_mall_enable(smu);
smu_set_gfx_cgpg(smu, true); smu_set_gfx_cgpg(smu, true);
} }
......
...@@ -1394,6 +1394,11 @@ struct pptable_funcs { ...@@ -1394,6 +1394,11 @@ struct pptable_funcs {
*/ */
int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable); int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
/**
* @set_mall_enable: Init MALL power gating control.
*/
int (*set_mall_enable)(struct smu_context *smu);
/** /**
* @notify_rlc_state: Notify RLC power state to SMU. * @notify_rlc_state: Notify RLC power state to SMU.
*/ */
......
...@@ -106,8 +106,8 @@ ...@@ -106,8 +106,8 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA #define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///< #define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///< #define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache #define PPSMC_MSG_MALLPowerController 0x38 ///< Set MALL control
#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache #define PPSMC_MSG_MALLPowerState 0x39 ///< Enter/Exit MALL PG
#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages #define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/ /** @}*/
......
...@@ -272,7 +272,9 @@ ...@@ -272,7 +272,9 @@
__SMU_DUMMY_MAP(SetSoftMinVpe), \ __SMU_DUMMY_MAP(SetSoftMinVpe), \
__SMU_DUMMY_MAP(GetMetricsVersion), \ __SMU_DUMMY_MAP(GetMetricsVersion), \
__SMU_DUMMY_MAP(EnableUCLKShadow), \ __SMU_DUMMY_MAP(EnableUCLKShadow), \
__SMU_DUMMY_MAP(RmaDueToBadPageThreshold), __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState),
#undef __SMU_DUMMY_MAP #undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type #define __SMU_DUMMY_MAP(type) SMU_MSG_##type
......
...@@ -52,6 +52,19 @@ ...@@ -52,6 +52,19 @@
#define mmMP1_SMN_C2PMSG_90 0x029a #define mmMP1_SMN_C2PMSG_90 0x029a
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
/* MALLPowerController message arguments (Defines for the Cache mode control) */
#define SMU_MALL_PMFW_CONTROL 0
#define SMU_MALL_DRIVER_CONTROL 1
/*
* MALLPowerState message arguments
* (Defines for the Allocate/Release Cache mode if in driver mode)
*/
#define SMU_MALL_EXIT_PG 0
#define SMU_MALL_ENTER_PG 1
#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON
#define FEATURE_MASK(feature) (1ULL << feature) #define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \ #define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
...@@ -66,6 +79,12 @@ ...@@ -66,6 +79,12 @@
FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
FEATURE_MASK(FEATURE_VPE_DPM_BIT)) FEATURE_MASK(FEATURE_VPE_DPM_BIT))
enum smu_mall_pg_config {
SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0,
SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON = 1,
SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF = 2,
};
static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = { static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
...@@ -113,6 +132,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = ...@@ -113,6 +132,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1), MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1),
MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1), MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1),
MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1), MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1),
MSG_MAP(MALLPowerController, PPSMC_MSG_MALLPowerController, 1),
MSG_MAP(MALLPowerState, PPSMC_MSG_MALLPowerState, 1),
}; };
static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
...@@ -1423,6 +1444,57 @@ static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_cl ...@@ -1423,6 +1444,57 @@ static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_cl
return 0; return 0;
} }
static int smu_v14_0_1_init_mall_power_gating(struct smu_context *smu, enum smu_mall_pg_config pg_config)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (pg_config == SMU_MALL_PG_CONFIG_PMFW_CONTROL) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
SMU_MALL_PMFW_CONTROL, NULL);
if (ret) {
dev_err(adev->dev, "Init MALL PMFW CONTROL Failure\n");
return ret;
}
} else {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
SMU_MALL_DRIVER_CONTROL, NULL);
if (ret) {
dev_err(adev->dev, "Init MALL Driver CONTROL Failure\n");
return ret;
}
if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
SMU_MALL_EXIT_PG, NULL);
if (ret) {
dev_err(adev->dev, "EXIT MALL PG Failure\n");
return ret;
}
} else if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
SMU_MALL_ENTER_PG, NULL);
if (ret) {
dev_err(adev->dev, "Enter MALL PG Failure\n");
return ret;
}
}
}
return ret;
}
static int smu_v14_0_common_set_mall_enable(struct smu_context *smu)
{
enum smu_mall_pg_config pg_config = SMU_MALL_PG_CONFIG_DEFAULT;
int ret = 0;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
ret = smu_v14_0_1_init_mall_power_gating(smu, pg_config);
return ret;
}
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status, .check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version, .check_fw_version = smu_v14_0_check_fw_version,
...@@ -1454,6 +1526,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { ...@@ -1454,6 +1526,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table, .get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
.set_mall_enable = smu_v14_0_common_set_mall_enable,
}; };
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment