Commit 05656e5e authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu: used cached pcie gen info for SI (v2)

Rather than querying it every time we need it.
Also fixes a crash in VM pass through if there is no
root bridge because the cached value fetch already checks
this properly.

v2: fix includes

Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=105244Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Rex Zhu<rezhu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
parent a8d0fb2f
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "amdgpu_uvd.h" #include "amdgpu_uvd.h"
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
#include "atom.h" #include "atom.h"
#include "amd_pcie.h"
#include "amdgpu_powerplay.h" #include "amdgpu_powerplay.h"
#include "sid.h" #include "sid.h"
#include "si_ih.h" #include "si_ih.h"
...@@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
{ {
struct pci_dev *root = adev->pdev->bus->self; struct pci_dev *root = adev->pdev->bus->self;
int bridge_pos, gpu_pos; int bridge_pos, gpu_pos;
u32 speed_cntl, mask, current_data_rate; u32 speed_cntl, current_data_rate;
int ret, i; int i;
u16 tmp16; u16 tmp16;
if (pci_is_root_bus(adev->pdev->bus)) if (pci_is_root_bus(adev->pdev->bus))
...@@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return; return;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
if (ret != 0) CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
return; return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT; LC_CURRENT_DATA_RATE_SHIFT;
if (mask & DRM_PCIE_SPEED_80) { if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) { if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n"); DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return; return;
} }
DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
} else if (mask & DRM_PCIE_SPEED_50) { } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
if (current_data_rate == 1) { if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n"); DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return; return;
...@@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
if (!gpu_pos) if (!gpu_pos)
return; return;
if (mask & DRM_PCIE_SPEED_80) { if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate != 2) { if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg; u16 bridge_cfg, gpu_cfg;
u16 bridge_cfg2, gpu_cfg2; u16 bridge_cfg2, gpu_cfg2;
...@@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf; tmp16 &= ~0xf;
if (mask & DRM_PCIE_SPEED_80) if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
tmp16 |= 3; tmp16 |= 3;
else if (mask & DRM_PCIE_SPEED_50) else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
tmp16 |= 2; tmp16 |= 2;
else else
tmp16 |= 1; tmp16 |= 1;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "amdgpu_pm.h" #include "amdgpu_pm.h"
#include "amdgpu_dpm.h" #include "amdgpu_dpm.h"
#include "amdgpu_atombios.h" #include "amdgpu_atombios.h"
#include "amd_pcie.h"
#include "sid.h" #include "sid.h"
#include "r600_dpm.h" #include "r600_dpm.h"
#include "si_dpm.h" #include "si_dpm.h"
...@@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, ...@@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
} }
} }
static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
u32 sys_mask,
enum amdgpu_pcie_gen asic_gen,
enum amdgpu_pcie_gen default_gen)
{
switch (asic_gen) {
case AMDGPU_PCIE_GEN1:
return AMDGPU_PCIE_GEN1;
case AMDGPU_PCIE_GEN2:
return AMDGPU_PCIE_GEN2;
case AMDGPU_PCIE_GEN3:
return AMDGPU_PCIE_GEN3;
default:
if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
return AMDGPU_PCIE_GEN3;
else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
return AMDGPU_PCIE_GEN2;
else
return AMDGPU_PCIE_GEN1;
}
return AMDGPU_PCIE_GEN1;
}
static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u) u32 *p, u32 *u)
{ {
...@@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, ...@@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
table->ACPIState.levels[0].vddc.index, table->ACPIState.levels[0].vddc.index,
&table->ACPIState.levels[0].std_vddc); &table->ACPIState.levels[0].std_vddc);
} }
table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, table->ACPIState.levels[0].gen2PCIE =
si_pi->sys_pcie_mask, (u8)amdgpu_get_pcie_gen_support(adev,
si_pi->boot_pcie_gen, si_pi->sys_pcie_mask,
AMDGPU_PCIE_GEN1); si_pi->boot_pcie_gen,
AMDGPU_PCIE_GEN1);
if (si_pi->vddc_phase_shed_control) if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(adev, si_populate_phase_shedding_value(adev,
...@@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev, ...@@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
pl->vddc = le16_to_cpu(clock_info->si.usVDDC); pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
pl->flags = le32_to_cpu(clock_info->si.ulFlags); pl->flags = le32_to_cpu(clock_info->si.ulFlags);
pl->pcie_gen = r600_get_pcie_gen_support(adev, pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
si_pi->sys_pcie_mask, si_pi->sys_pcie_mask,
si_pi->boot_pcie_gen, si_pi->boot_pcie_gen,
clock_info->si.ucPCIEGen); clock_info->si.ucPCIEGen);
/* patch up vddc if necessary */ /* patch up vddc if necessary */
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
...@@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev) ...@@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
struct si_power_info *si_pi; struct si_power_info *si_pi;
struct atom_clock_dividers dividers; struct atom_clock_dividers dividers;
int ret; int ret;
u32 mask;
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
if (si_pi == NULL) if (si_pi == NULL)
...@@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev) ...@@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
eg_pi = &ni_pi->eg; eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx; pi = &eg_pi->rv7xx;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); si_pi->sys_pcie_mask =
if (ret) (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
si_pi->sys_pcie_mask = 0; CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
else
si_pi->sys_pcie_mask = mask;
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment