Commit c6c2097a authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.20' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Fixes for 4.20:
- Fix banding regression on 6 bpc panels
- Vega20 fix for six 4k displays
- Fix LRU handling in ttm_buffer_object_transfer
- Use proper MC firmware for newer polaris variants
- Vega20 powerplay fixes
- VCN suspend/resume fix for PCO
- Misc other fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181205192934.2857-1-alexander.deucher@amd.com
parents 534c6307 0a9b89b2
......@@ -233,7 +233,7 @@ enum amdgpu_kiq_irq {
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 20
#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
......
......@@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_UVD_ENC] = 1,
[AMDGPU_HW_IP_VCN_DEC] = 1,
[AMDGPU_HW_IP_VCN_ENC] = 1,
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};
static int amdgput_ctx_total_num_entities(void)
......
......@@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->late_init_work);
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
ui32 = adev->accel_working;
......@@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
struct amdgpu_fpriv *fpriv;
int r, pasid;
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->late_init_work);
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
......
......@@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
......@@ -224,12 +227,38 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "tonga";
break;
case CHIP_POLARIS11:
if (((adev->pdev->device == 0x67ef) &&
((adev->pdev->revision == 0xe0) ||
(adev->pdev->revision == 0xe5))) ||
((adev->pdev->device == 0x67ff) &&
((adev->pdev->revision == 0xcf) ||
(adev->pdev->revision == 0xef) ||
(adev->pdev->revision == 0xff))))
chip_name = "polaris11_k";
else if ((adev->pdev->device == 0x67ef) &&
(adev->pdev->revision == 0xe2))
chip_name = "polaris11_k";
else
chip_name = "polaris11";
break;
case CHIP_POLARIS10:
if ((adev->pdev->device == 0x67df) &&
((adev->pdev->revision == 0xe1) ||
(adev->pdev->revision == 0xf7)))
chip_name = "polaris10_k";
else
chip_name = "polaris10";
break;
case CHIP_POLARIS12:
if (((adev->pdev->device == 0x6987) &&
((adev->pdev->revision == 0xc0) ||
(adev->pdev->revision == 0xc3))) ||
((adev->pdev->device == 0x6981) &&
((adev->pdev->revision == 0x00) ||
(adev->pdev->revision == 0x01) ||
(adev->pdev->revision == 0x10))))
chip_name = "polaris12_k";
else
chip_name = "polaris12";
break;
case CHIP_FIJI:
......@@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
u32 data, vbios_version;
u32 data;
int i, ucode_size, regs_size;
/* Skip MC ucode loading on SR-IOV capable boards.
......@@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
if (amdgpu_sriov_bios(adev))
return 0;
WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
vbios_version = data & 0xf;
if (vbios_version == 0)
return 0;
if (!adev->gmc.fw)
return -EINVAL;
......
......@@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
/**
* vcn_v1_0_early_init - set function pointers
......@@ -222,7 +223,7 @@ static int vcn_v1_0_hw_fini(void *handle)
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
vcn_v1_0_stop(adev);
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->ready = false;
......
......@@ -2554,9 +2554,9 @@ static void fill_audio_info(struct audio_info *audio_info,
cea_revision = drm_connector->display_info.cea_rev;
strncpy(audio_info->display_name,
strscpy(audio_info->display_name,
edid_caps->display_name,
AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
if (cea_revision >= 3) {
audio_info->mode_count = edid_caps->audio_mode_count;
......@@ -3042,6 +3042,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_enable = false;
state->underscan_hborder = 0;
state->underscan_vborder = 0;
state->max_bpc = 8;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
......@@ -3063,6 +3064,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
new_state->freesync_capable = state->freesync_capable;
new_state->freesync_enable = state->freesync_enable;
new_state->max_bpc = state->max_bpc;
return &new_state->base;
}
......@@ -3650,7 +3652,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
mode->hdisplay = hdisplay;
mode->vdisplay = vdisplay;
mode->type &= ~DRM_MODE_TYPE_PREFERRED;
strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
return mode;
......
......@@ -2512,6 +2512,8 @@ static void pplib_apply_display_requirements(
dc,
context->bw.dce.sclk_khz);
pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw.dce.sclk_deep_sleep_khz;
......
......@@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
adev = hwmgr->adev;
if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
/* Skip for suspend/resume case */
if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
&& adev->in_suspend) {
pr_info("dpm has been enabled\n");
return 0;
}
......
......@@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = phm_pre_display_configuration_changed(hwmgr);
if (ret)
return ret;
ret = phm_set_cpu_power_state(hwmgr);
if (ret)
return ret;
......
......@@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
if (skip)
return 0;
phm_pre_display_configuration_changed(hwmgr);
phm_display_configuration_changed(hwmgr);
if (hwmgr->ps)
......
......@@ -3589,8 +3589,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= sclk_table->count) {
if (sclk > sclk_table->dpm_levels[i-1].value) {
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
sclk_table->dpm_levels[i-1].value = sclk;
}
} else {
/* TODO: Check SCLK in DAL's minimum clocks
* in case DeepSleep divider update is required.
......@@ -3607,9 +3609,11 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= mclk_table->count) {
if (mclk > mclk_table->dpm_levels[i-1].value) {
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
mclk_table->dpm_levels[i-1].value = mclk;
}
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
......
......@@ -3266,9 +3266,11 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= sclk_table->count) {
if (sclk > sclk_table->dpm_levels[i-1].value) {
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
sclk_table->dpm_levels[i-1].value = sclk;
}
}
for (i = 0; i < mclk_table->count; i++) {
if (mclk == mclk_table->dpm_levels[i].value)
......@@ -3276,9 +3278,11 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= mclk_table->count) {
if (mclk > mclk_table->dpm_levels[i-1].value) {
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
mclk_table->dpm_levels[i-1].value = mclk;
}
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
......
......@@ -1660,14 +1660,15 @@ static uint32_t vega20_find_highest_dpm_level(
return i;
}
static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t min_freq;
int ret = 0;
if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
(feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
......@@ -1676,7 +1677,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
if (data->smu_features[GNLD_DPM_UCLK].enabled &&
(feature_mask & FEATURE_DPM_UCLK_MASK)) {
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
......@@ -1692,7 +1694,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_UVD].enabled) {
if (data->smu_features[GNLD_DPM_UVD].enabled &&
(feature_mask & FEATURE_DPM_UVD_MASK)) {
min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
......@@ -1710,7 +1713,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_VCE].enabled) {
if (data->smu_features[GNLD_DPM_VCE].enabled &&
(feature_mask & FEATURE_DPM_VCE_MASK)) {
min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
......@@ -1720,7 +1724,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
(feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
......@@ -1733,14 +1738,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret;
}
static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t max_freq;
int ret = 0;
if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
(feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
......@@ -1750,7 +1756,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
if (data->smu_features[GNLD_DPM_UCLK].enabled &&
(feature_mask & FEATURE_DPM_UCLK_MASK)) {
max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
......@@ -1760,7 +1767,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_UVD].enabled) {
if (data->smu_features[GNLD_DPM_UVD].enabled &&
(feature_mask & FEATURE_DPM_UVD_MASK)) {
max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
......@@ -1777,7 +1785,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_VCE].enabled) {
if (data->smu_features[GNLD_DPM_VCE].enabled &&
(feature_mask & FEATURE_DPM_VCE_MASK)) {
max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
......@@ -1787,7 +1796,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
(feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
......@@ -2126,12 +2136,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
......@@ -2158,12 +2168,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
......@@ -2176,12 +2186,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
int ret = 0;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Max Levels!",
return ret);
......@@ -2239,12 +2249,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
......@@ -2259,12 +2269,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
ret = vega20_upload_dpm_min_level(hwmgr);
ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
ret = vega20_upload_dpm_max_level(hwmgr);
ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
......
......@@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (!fbo)
return -ENOMEM;
ttm_bo_get(bo);
fbo->base = *bo;
fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
ttm_bo_get(bo);
fbo->bo = bo;
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment