Commit 2a1fe39a authored by Mario Limonciello's avatar Mario Limonciello Committed by Alex Deucher

drm/amd: Fix logic error in sienna_cichlid_update_pcie_parameters()

While aligning SMU11 with SMU13 implementation an assumption was made that
`dpm_context->dpm_tables.pcie_table` was populated in dpm table initialization
like in SMU13 but it isn't.

So restore some of the original logic and instead just check for
amdgpu_device_pcie_dynamic_switching_supported() to decide whether to hardcode
values; erring on the side of performance.

Cc: stable@vger.kernel.org # 6.1+
Reported-and-tested-by: default avatarUmio Yasuno <coelacanth_dream@protonmail.com>
Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/1447#note_2101382
Fixes: e701156c ("drm/amd: Align SMU11 SMU_MSG_OverridePcieParameters implementation with SMU13")
Signed-off-by: default avatarMario Limonciello <mario.limonciello@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5d061675
...@@ -2082,36 +2082,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context ...@@ -2082,36 +2082,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret; return ret;
} }
#define MAX(a, b) ((a) > (b) ? (a) : (b))
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap, uint32_t pcie_gen_cap,
uint32_t pcie_width_cap) uint32_t pcie_width_cap)
{ {
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
u32 smu_pcie_arg; uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
int ret, i; int ret, i;
/* PCIE gen speed and lane width override */ GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
if (!amdgpu_device_pcie_dynamic_switching_supported()) { GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap) min_gen_speed = MAX(0, table_member1[0]);
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1]; max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
/* Force all levels to use the same settings */ if (!amdgpu_device_pcie_dynamic_switching_supported()) {
for (i = 0; i < NUM_LINK_LEVELS; i++) { pcie_table->pcie_gen[0] = max_gen_speed;
pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[0] = max_lane_width;
pcie_table->pcie_lane[i] = pcie_width_cap;
}
} else { } else {
for (i = 0; i < NUM_LINK_LEVELS; i++) { pcie_table->pcie_gen[0] = min_gen_speed;
if (pcie_table->pcie_gen[i] > pcie_gen_cap) pcie_table->pcie_lane[0] = min_lane_width;
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
}
} }
pcie_table->pcie_gen[1] = max_gen_speed;
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) { for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16 | smu_pcie_arg = (i << 16 |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment