Commit 52e81b69 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-5.16-2021-12-01' of...

Merge tag 'amd-drm-fixes-5.16-2021-12-01' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-5.16-2021-12-01:

amdgpu:
- IP discovery based enumeration fixes
- vkms fixes
- DSC fixes for DP MST
- Audio fix for hotplug with tiled displays
- Misc display fixes
- DP tunneling fix
- DP fix
- Aldebaran fix

amdkfd:
- Locking fix
- Static checker fix
- Fix double free
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211201232802.5801-1-alexander.deucher@amd.com
parents 8b233a83 3abfe30d
...@@ -1396,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1396,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct sg_table *sg = NULL; struct sg_table *sg = NULL;
uint64_t user_addr = 0; uint64_t user_addr = 0;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct drm_gem_object *gobj; struct drm_gem_object *gobj = NULL;
u32 domain, alloc_domain; u32 domain, alloc_domain;
u64 alloc_flags; u64 alloc_flags;
int ret; int ret;
...@@ -1506,14 +1506,16 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1506,14 +1506,16 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv); drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow: err_node_allow:
drm_gem_object_put(gobj);
/* Don't unreserve system mem limit twice */ /* Don't unreserve system mem limit twice */
goto err_reserve_limit; goto err_reserve_limit;
err_bo_create: err_bo_create:
unreserve_mem_limit(adev, size, alloc_domain, !!sg); unreserve_mem_limit(adev, size, alloc_domain, !!sg);
err_reserve_limit: err_reserve_limit:
mutex_destroy(&(*mem)->lock); mutex_destroy(&(*mem)->lock);
kfree(*mem); if (gobj)
drm_gem_object_put(gobj);
else
kfree(*mem);
err: err:
if (sg) { if (sg) {
sg_free_table(sg); sg_free_table(sg);
......
...@@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) ...@@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
/* disable all interrupts */ /* disable all interrupts */
amdgpu_irq_disable_all(adev); amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){ if (adev->mode_info.mode_config_initialized){
if (!amdgpu_device_has_dc_support(adev)) if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
drm_helper_force_disable_all(adev_to_drm(adev)); drm_helper_force_disable_all(adev_to_drm(adev));
else else
drm_atomic_helper_shutdown(adev_to_drm(adev)); drm_atomic_helper_shutdown(adev_to_drm(adev));
...@@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, ...@@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
{ {
int r; int r;
amdgpu_amdkfd_pre_reset(adev);
if (from_hypervisor) if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true); r = amdgpu_virt_request_full_gpu(adev, true);
else else
...@@ -4316,6 +4318,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, ...@@ -4316,6 +4318,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_irq_gpu_reset_resume_helper(adev); amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev); r = amdgpu_ib_ring_tests(adev);
amdgpu_amdkfd_post_reset(adev);
error: error:
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
...@@ -5030,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -5030,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
cancel_delayed_work_sync(&tmp_adev->delayed_init_work); cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
amdgpu_amdkfd_pre_reset(tmp_adev); if (!amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_pre_reset(tmp_adev);
/* /*
* Mark these ASICs to be reseted as untracked first * Mark these ASICs to be reseted as untracked first
...@@ -5129,7 +5133,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -5129,7 +5133,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
} }
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
} }
...@@ -5148,9 +5152,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -5148,9 +5152,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
skip_sched_resume: skip_sched_resume:
list_for_each_entry(tmp_adev, device_list_handle, reset_list) { list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/* unlock kfd */ /* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart) if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev); amdgpu_amdkfd_post_reset(tmp_adev);
/* kfd_post_reset will do nothing if kfd device is not initialized, /* kfd_post_reset will do nothing if kfd device is not initialized,
* need to bring up kfd here if it's not be initialized before * need to bring up kfd here if it's not be initialized before
......
...@@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = { ...@@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = {
[HDP_HWIP] = HDP_HWID, [HDP_HWIP] = HDP_HWID,
[SDMA0_HWIP] = SDMA0_HWID, [SDMA0_HWIP] = SDMA0_HWID,
[SDMA1_HWIP] = SDMA1_HWID, [SDMA1_HWIP] = SDMA1_HWID,
[SDMA2_HWIP] = SDMA2_HWID,
[SDMA3_HWIP] = SDMA3_HWID,
[MMHUB_HWIP] = MMHUB_HWID, [MMHUB_HWIP] = MMHUB_HWID,
[ATHUB_HWIP] = ATHUB_HWID, [ATHUB_HWIP] = ATHUB_HWID,
[NBIO_HWIP] = NBIF_HWID, [NBIO_HWIP] = NBIF_HWID,
...@@ -918,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) ...@@ -918,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 64): case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 0, 2): case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 192):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev)) if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
......
...@@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
break; break;
case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64): case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 0, 192):
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
fw_name = FIRMWARE_SIENNA_CICHLID; fw_name = FIRMWARE_SIENNA_CICHLID;
else else
......
...@@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle) ...@@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle)
int i = 0; int i = 0;
for (i = 0; i < adev->mode_info.num_crtc; i++) for (i = 0; i < adev->mode_info.num_crtc; i++)
if (adev->mode_info.crtcs[i]) if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function)
hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer);
kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->mode_info.bios_hardcoded_edid);
kfree(adev->amdgpu_vkms_output); kfree(adev->amdgpu_vkms_output);
......
...@@ -4060,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle) ...@@ -4060,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_cp_enable(adev, false); gfx_v9_0_cp_enable(adev, false);
/* Skip suspend with A+A reset */ /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
dev_dbg(adev->dev, "Skipping RLC halt\n");
return 0; return 0;
} }
......
...@@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, ...@@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
switch (adev->ip_versions[UVD_HWIP][0]) { switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64): case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 0, 192):
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
if (encode) if (encode)
*codecs = &sriov_sc_video_codecs_encode; *codecs = &sriov_sc_video_codecs_encode;
......
...@@ -1574,7 +1574,6 @@ svm_range_list_lock_and_flush_work(struct svm_range_list *svms, ...@@ -1574,7 +1574,6 @@ svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
static void svm_range_restore_work(struct work_struct *work) static void svm_range_restore_work(struct work_struct *work)
{ {
struct delayed_work *dwork = to_delayed_work(work); struct delayed_work *dwork = to_delayed_work(work);
struct amdkfd_process_info *process_info;
struct svm_range_list *svms; struct svm_range_list *svms;
struct svm_range *prange; struct svm_range *prange;
struct kfd_process *p; struct kfd_process *p;
...@@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work) ...@@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work)
* the lifetime of this thread, kfd_process and mm will be valid. * the lifetime of this thread, kfd_process and mm will be valid.
*/ */
p = container_of(svms, struct kfd_process, svms); p = container_of(svms, struct kfd_process, svms);
process_info = p->kgd_process_info;
mm = p->mm; mm = p->mm;
if (!mm) if (!mm)
return; return;
mutex_lock(&process_info->lock);
svm_range_list_lock_and_flush_work(svms, mm); svm_range_list_lock_and_flush_work(svms, mm);
mutex_lock(&svms->lock); mutex_lock(&svms->lock);
...@@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work) ...@@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work)
out_reschedule: out_reschedule:
mutex_unlock(&svms->lock); mutex_unlock(&svms->lock);
mmap_write_unlock(mm); mmap_write_unlock(mm);
mutex_unlock(&process_info->lock);
/* If validation failed, reschedule another attempt */ /* If validation failed, reschedule another attempt */
if (evicted_ranges) { if (evicted_ranges) {
...@@ -2614,6 +2610,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, ...@@ -2614,6 +2610,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
if (atomic_read(&svms->drain_pagefaults)) { if (atomic_read(&svms->drain_pagefaults)) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr); pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
r = 0;
goto out; goto out;
} }
...@@ -2623,6 +2620,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, ...@@ -2623,6 +2620,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
mm = get_task_mm(p->lead_thread); mm = get_task_mm(p->lead_thread);
if (!mm) { if (!mm) {
pr_debug("svms 0x%p failed to get mm\n", svms); pr_debug("svms 0x%p failed to get mm\n", svms);
r = 0;
goto out; goto out;
} }
...@@ -2660,6 +2658,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, ...@@ -2660,6 +2658,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
if (svm_range_skip_recover(prange)) { if (svm_range_skip_recover(prange)) {
amdgpu_gmc_filter_faults_remove(adev, addr, pasid); amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
r = 0;
goto out_unlock_range; goto out_unlock_range;
} }
...@@ -2668,6 +2667,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, ...@@ -2668,6 +2667,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) { if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
pr_debug("svms 0x%p [0x%lx %lx] already restored\n", pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
svms, prange->start, prange->last); svms, prange->start, prange->last);
r = 0;
goto out_unlock_range; goto out_unlock_range;
} }
...@@ -3177,7 +3177,6 @@ static int ...@@ -3177,7 +3177,6 @@ static int
svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
{ {
struct amdkfd_process_info *process_info = p->kgd_process_info;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct list_head update_list; struct list_head update_list;
struct list_head insert_list; struct list_head insert_list;
...@@ -3196,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, ...@@ -3196,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svms = &p->svms; svms = &p->svms;
mutex_lock(&process_info->lock);
svm_range_list_lock_and_flush_work(svms, mm); svm_range_list_lock_and_flush_work(svms, mm);
r = svm_range_is_valid(p, start, size); r = svm_range_is_valid(p, start, size);
...@@ -3273,8 +3270,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, ...@@ -3273,8 +3270,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
mutex_unlock(&svms->lock); mutex_unlock(&svms->lock);
mmap_read_unlock(mm); mmap_read_unlock(mm);
out: out:
mutex_unlock(&process_info->lock);
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
&p->svms, start, start + size - 1, r); &p->svms, start, start + size - 1, r);
......
...@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) ...@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
ret = -EINVAL;
goto cleanup;
}
} }
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
......
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
#include "dm_helpers.h" #include "dm_helpers.h"
#include "dc_link_ddc.h" #include "dc_link_ddc.h"
#include "ddc_service_types.h"
#include "dpcd_defs.h"
#include "i2caux_interface.h" #include "i2caux_interface.h"
#include "dmub_cmd.h" #include "dmub_cmd.h"
...@@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { ...@@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
}; };
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
static bool needs_dsc_aux_workaround(struct dc_link *link)
{
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
(link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
return true;
return false;
}
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{ {
struct dc_sink *dc_sink = aconnector->dc_sink; struct dc_sink *dc_sink = aconnector->dc_sink;
...@@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto ...@@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
u8 *dsc_branch_dec_caps = NULL; u8 *dsc_branch_dec_caps = NULL;
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
#if defined(CONFIG_HP_HOOK_WORKAROUND)
/* /*
* drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
* because it only check the dsc/fec caps of the "port variable" and not the dock * because it only check the dsc/fec caps of the "port variable" and not the dock
...@@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto ...@@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
* Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
* *
*/ */
if (!aconnector->dsc_aux && !port->parent->port_parent &&
if (!aconnector->dsc_aux && !port->parent->port_parent) needs_dsc_aux_workaround(aconnector->dc_link))
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
#endif
if (!aconnector->dsc_aux) if (!aconnector->dsc_aux)
return false; return false;
......
...@@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link, ...@@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link,
dal_ddc_service_set_transaction_type(link->ddc, dal_ddc_service_set_transaction_type(link->ddc,
sink_caps->transaction_type); sink_caps->transaction_type);
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
* reports DSC support.
*/
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
link->type == dc_connection_mst_branch &&
link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
link->wa_flags.dpia_mst_dsc_always_on = true;
#endif
#if defined(CONFIG_DRM_AMD_DC_HDCP) #if defined(CONFIG_DRM_AMD_DC_HDCP)
/* In case of fallback to SST when topology discovery below fails /* In case of fallback to SST when topology discovery below fails
* HDCP caps will be querried again later by the upper layer (caller * HDCP caps will be querried again later by the upper layer (caller
...@@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link, ...@@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
LINK_INFO("link=%d, mst branch is now Disconnected\n", LINK_INFO("link=%d, mst branch is now Disconnected\n",
link->link_index); link->link_index);
/* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
link->wa_flags.dpia_mst_dsc_always_on = false;
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
link->mst_stream_alloc_table.stream_count = 0; link->mst_stream_alloc_table.stream_count = 0;
......
...@@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training( ...@@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
} }
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++) for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0; lt_settings->dpcd_lane_settings[lane].raw = 0;
} }
if (status == LINK_TRAINING_SUCCESS) { if (status == LINK_TRAINING_SUCCESS) {
......
...@@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged( ...@@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false; return false;
// Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
return false;
return true; return true;
} }
...@@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state( ...@@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state(
if (!new_ctx) if (!new_ctx)
return DC_ERROR_UNEXPECTED; return DC_ERROR_UNEXPECTED;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* Update link encoder to stream assignment.
* TODO: Split out reason allocation from validation.
*/
if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
dc->res_pool->funcs->link_encs_assign(
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
#endif
if (dc->res_pool->funcs->validate_global) { if (dc->res_pool->funcs->validate_global) {
result = dc->res_pool->funcs->validate_global(dc, new_ctx); result = dc->res_pool->funcs->validate_global(dc, new_ctx);
...@@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state( ...@@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state(
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE; result = DC_FAIL_BANDWIDTH_VALIDATE;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* Only update link encoder to stream assignment after bandwidth validation passed.
* TODO: Split out assignment and validation.
*/
if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
dc->res_pool->funcs->link_encs_assign(
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
#endif
return result; return result;
} }
......
...@@ -508,7 +508,8 @@ union dpia_debug_options { ...@@ -508,7 +508,8 @@ union dpia_debug_options {
uint32_t disable_dpia:1; uint32_t disable_dpia:1;
uint32_t force_non_lttpr:1; uint32_t force_non_lttpr:1;
uint32_t extend_aux_rd_interval:1; uint32_t extend_aux_rd_interval:1;
uint32_t reserved:29; uint32_t disable_mst_dsc_work_around:1;
uint32_t reserved:28;
} bits; } bits;
uint32_t raw; uint32_t raw;
}; };
......
...@@ -191,6 +191,8 @@ struct dc_link { ...@@ -191,6 +191,8 @@ struct dc_link {
bool dp_skip_DID2; bool dp_skip_DID2;
bool dp_skip_reset_segment; bool dp_skip_reset_segment;
bool dp_mot_reset_segment; bool dp_mot_reset_segment;
/* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
bool dpia_mst_dsc_always_on;
} wa_flags; } wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table; struct link_mst_stream_allocation_table mst_stream_alloc_table;
......
...@@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu) ...@@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
dev_err(adev->dev, "Failed to disable smu features.\n"); dev_err(adev->dev, "Failed to disable smu features.\n");
} }
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) && if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev); adev->gfx.rlc.funcs->stop(adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment