Commit 485d41b0 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-5.9-2020-08-12' of...

Merge tag 'amd-drm-fixes-5.9-2020-08-12' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

amd-drm-fixes-5.9-2020-08-12:

amdgpu:
- Fix allocation size
- SR-IOV fixes
- Vega20 SMU feature state caching fix
- Fix custom pptable handling
- Arcturus golden settings update
- Several display fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200813033610.4008-1-alexander.deucher@amd.com
parents f2ea2578 f41ed88c
......@@ -462,7 +462,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
unsigned int pages;
int i, r;
*sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
if (!*sgt)
return -ENOMEM;
......
......@@ -691,6 +691,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
};
static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
......
......@@ -135,6 +135,12 @@ static void gfxhub_v2_1_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
......@@ -190,6 +196,12 @@ static void gfxhub_v2_1_enable_system_domain(struct amdgpu_device *adev)
static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev)
{
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
......@@ -326,6 +338,13 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
......
......@@ -134,6 +134,12 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
......@@ -189,6 +195,12 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
{
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
WREG32_SOC15(MMHUB, 0,
mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
......@@ -318,6 +330,13 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
......
......@@ -2196,6 +2196,7 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
......
......@@ -3286,12 +3286,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
core_link_set_avmute(pipe_ctx, true);
}
dc->hwss.blank_stream(pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, true);
#endif
dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
......
......@@ -49,7 +49,7 @@
#define DCN_PANEL_CNTL_REG_LIST()\
DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
SR(BL_PWM_CNTL), \
SR(BL_PWM_CNTL2), \
SR(BL_PWM_PERIOD_CNTL), \
......
......@@ -121,35 +121,35 @@ void enc1_update_generic_info_packet(
switch (packet_index) {
case 0:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC0_FRAME_UPDATE, 1);
AFMT_GENERIC0_IMMEDIATE_UPDATE, 1);
break;
case 1:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC1_FRAME_UPDATE, 1);
AFMT_GENERIC1_IMMEDIATE_UPDATE, 1);
break;
case 2:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC2_FRAME_UPDATE, 1);
AFMT_GENERIC2_IMMEDIATE_UPDATE, 1);
break;
case 3:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC3_FRAME_UPDATE, 1);
AFMT_GENERIC3_IMMEDIATE_UPDATE, 1);
break;
case 4:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC4_FRAME_UPDATE, 1);
AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
break;
case 5:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC5_FRAME_UPDATE, 1);
AFMT_GENERIC5_IMMEDIATE_UPDATE, 1);
break;
case 6:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC6_FRAME_UPDATE, 1);
AFMT_GENERIC6_IMMEDIATE_UPDATE, 1);
break;
case 7:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC7_FRAME_UPDATE, 1);
AFMT_GENERIC7_IMMEDIATE_UPDATE, 1);
break;
default:
break;
......
......@@ -281,7 +281,14 @@ struct dcn10_stream_enc_registers {
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
......@@ -345,7 +352,14 @@ struct dcn10_stream_enc_registers {
type AFMT_GENERIC2_FRAME_UPDATE;\
type AFMT_GENERIC3_FRAME_UPDATE;\
type AFMT_GENERIC4_FRAME_UPDATE;\
type AFMT_GENERIC0_IMMEDIATE_UPDATE;\
type AFMT_GENERIC1_IMMEDIATE_UPDATE;\
type AFMT_GENERIC2_IMMEDIATE_UPDATE;\
type AFMT_GENERIC3_IMMEDIATE_UPDATE;\
type AFMT_GENERIC4_IMMEDIATE_UPDATE;\
type AFMT_GENERIC5_IMMEDIATE_UPDATE;\
type AFMT_GENERIC6_IMMEDIATE_UPDATE;\
type AFMT_GENERIC7_IMMEDIATE_UPDATE;\
type AFMT_GENERIC5_FRAME_UPDATE;\
type AFMT_GENERIC6_FRAME_UPDATE;\
type AFMT_GENERIC7_FRAME_UPDATE;\
......
......@@ -3141,7 +3141,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
......
......@@ -324,22 +324,44 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
*/
frames_to_insert = mid_point_frames_floor;
} else if (mid_point_frames_floor < 2) {
/* Check if FLOOR would result in non-LFC. In this case
* choose to use CEIL
*/
frames_to_insert = mid_point_frames_ceil;
} else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
/* If choosing CEIL results in a frame duration that is
* closer to the mid point of the range.
* Choose CEIL
*/
if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
(delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
mid_point_frames_floor < 2)) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
} else {
/* If choosing FLOOR results in a frame duration that is
* closer to the mid point of the range.
* Choose FLOOR
*/
frames_to_insert = mid_point_frames_floor;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
delta_from_mid_point_in_us_2;
}
/* Prefer current frame multiplier when BTR is enabled unless it drifts
* too far from the midpoint
*/
if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
} else {
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
delta_from_mid_point_in_us_2;
}
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
......
......@@ -979,10 +979,7 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint64_t features_enabled;
int i;
bool enabled;
int ret = 0;
int i, ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
......@@ -990,17 +987,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return ret);
ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret,
"[DisableAllSMUFeatures] Failed to get enabled smc features!",
return ret);
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
true : false;
data->smu_features[i].enabled = enabled;
data->smu_features[i].supported = enabled;
}
for (i = 0; i < GNLD_FEATURES_MAX; i++)
data->smu_features[i].enabled = 0;
return 0;
}
......@@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
data->uvd_power_gated = true;
data->vce_power_gated = true;
if (data->smu_features[GNLD_DPM_UVD].enabled)
data->uvd_power_gated = false;
if (data->smu_features[GNLD_DPM_VCE].enabled)
data->vce_power_gated = false;
}
static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
......@@ -3230,10 +3212,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
{
uint64_t features_enabled;
uint64_t features_to_enable;
uint64_t features_to_disable;
int ret = 0;
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint64_t features_enabled, features_to_enable, features_to_disable;
int i, ret = 0;
bool enabled;
if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
return -EINVAL;
......@@ -3262,6 +3245,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return ret;
}
/* Update the cached feature enablement state */
ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
if (ret)
return ret;
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
true : false;
data->smu_features[i].enabled = enabled;
}
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment