Commit 9530273e authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/powerplay: cover the powerplay implementation details V3

This can save users much troubles. As they do not
actually need to care whether swSMU or traditional
powerplay routine should be used.

V2: apply the fixes to vi.c and cik.c also
V3: squash in oops fix
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a434b94c
...@@ -613,12 +613,6 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) ...@@ -613,12 +613,6 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
if (is_support_sw_smu(adev))
smu_switch_power_profile(&adev->smu,
PP_SMC_POWER_PROFILE_COMPUTE,
!idle);
else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->switch_power_profile)
amdgpu_dpm_switch_power_profile(adev, amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE, PP_SMC_POWER_PROFILE_COMPUTE,
!idle); !idle);
......
...@@ -2345,14 +2345,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) ...@@ -2345,14 +2345,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false; adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */ /* handle putting the SMC in the appropriate state */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
if (is_support_sw_smu(adev)) { r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
} else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_mp1_state) {
r = adev->powerplay.pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
adev->mp1_state);
}
if (r) { if (r) {
DRM_ERROR("SMC failed to set mp1 state %d, %d\n", DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
adev->mp1_state, r); adev->mp1_state, r);
...@@ -4359,56 +4352,22 @@ int amdgpu_device_baco_enter(struct drm_device *dev) ...@@ -4359,56 +4352,22 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
if (ras && ras->supported) if (ras && ras->supported)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false); adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
if (is_support_sw_smu(adev)) { return amdgpu_dpm_baco_enter(adev);
struct smu_context *smu = &adev->smu;
int ret;
ret = smu_baco_enter(smu);
if (ret)
return ret;
} else {
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
return -ENOENT;
/* enter BACO state */
if (pp_funcs->set_asic_baco_state(pp_handle, 1))
return -EIO;
}
return 0;
} }
int amdgpu_device_baco_exit(struct drm_device *dev) int amdgpu_device_baco_exit(struct drm_device *dev)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
if (!amdgpu_device_supports_baco(adev->ddev)) if (!amdgpu_device_supports_baco(adev->ddev))
return -ENOTSUPP; return -ENOTSUPP;
if (is_support_sw_smu(adev)) { ret = amdgpu_dpm_baco_exit(adev);
struct smu_context *smu = &adev->smu;
int ret;
ret = smu_baco_exit(smu);
if (ret) if (ret)
return ret; return ret;
} else {
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
return -ENOENT;
/* exit BACO state */
if (pp_funcs->set_asic_baco_state(pp_handle, 0))
return -EIO;
}
if (ras && ras->supported) if (ras && ras->supported)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true); adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
......
...@@ -983,3 +983,163 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block ...@@ -983,3 +983,163 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
return ret; return ret;
} }
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
struct smu_context *smu = &adev->smu;
int ret = 0;
if (is_support_sw_smu(adev)) {
ret = smu_baco_enter(smu);
} else {
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
}
return ret;
}
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
struct smu_context *smu = &adev->smu;
int ret = 0;
if (is_support_sw_smu(adev)) {
ret = smu_baco_exit(smu);
} else {
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
}
return ret;
}
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state)
{
int ret = 0;
if (is_support_sw_smu(adev)) {
ret = smu_set_mp1_state(&adev->smu, mp1_state);
} else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_mp1_state) {
ret = adev->powerplay.pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
mp1_state);
}
return ret;
}
bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
struct smu_context *smu = &adev->smu;
bool baco_cap;
if (is_support_sw_smu(adev)) {
return smu_baco_is_support(smu);
} else {
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
return false;
return baco_cap ? true : false;
}
}
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
struct smu_context *smu = &adev->smu;
if (is_support_sw_smu(adev)) {
return smu_mode2_reset(smu);
} else {
if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
return -ENOENT;
return pp_funcs->asic_reset_mode_2(pp_handle);
}
}
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
struct smu_context *smu = &adev->smu;
int ret = 0;
dev_info(adev->dev, "GPU BACO reset\n");
if (is_support_sw_smu(adev)) {
ret = smu_baco_enter(smu);
if (ret)
return ret;
ret = smu_baco_exit(smu);
if (ret)
return ret;
} else {
if (!pp_funcs
|| !pp_funcs->set_asic_baco_state)
return -ENOENT;
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
if (ret)
return ret;
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
if (ret)
return ret;
}
return 0;
}
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en)
{
int ret = 0;
if (is_support_sw_smu(adev))
ret = smu_switch_power_profile(&adev->smu, type, en);
else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->switch_power_profile)
ret = adev->powerplay.pp_funcs->switch_power_profile(
adev->powerplay.pp_handle, type, en);
return ret;
}
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
uint32_t pstate)
{
int ret = 0;
if (is_support_sw_smu_xgmi(adev))
ret = smu_set_xgmi_pstate(&adev->smu, pstate);
else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_xgmi_pstate)
ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
pstate);
return ret;
}
...@@ -341,10 +341,6 @@ enum amdgpu_pcie_gen { ...@@ -341,10 +341,6 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->reset_power_profile_state(\ ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request)) (adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_switch_power_profile(adev, type, en) \
((adev)->powerplay.pp_funcs->switch_power_profile(\
(adev)->powerplay.pp_handle, type, en))
#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \ #define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\ ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id)) (adev)->powerplay.pp_handle, msg_id))
...@@ -517,4 +513,24 @@ extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low); ...@@ -517,4 +513,24 @@ extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low); extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
uint32_t pstate);
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en);
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state);
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
#endif #endif
...@@ -543,12 +543,6 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) ...@@ -543,12 +543,6 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return; return;
if (!is_support_sw_smu(adev) &&
(!adev->powerplay.pp_funcs ||
!adev->powerplay.pp_funcs->set_powergating_by_smu))
return;
mutex_lock(&adev->gfx.gfx_off_mutex); mutex_lock(&adev->gfx.gfx_off_mutex);
if (!enable) if (!enable)
......
...@@ -291,13 +291,7 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) ...@@ -291,13 +291,7 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
if (is_support_sw_smu_xgmi(adev)) ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
ret = smu_set_xgmi_pstate(&adev->smu, pstate);
else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_xgmi_pstate)
ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
pstate);
if (ret) { if (ret) {
dev_err(adev->dev, dev_err(adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d", "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
......
...@@ -1312,19 +1312,13 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev) ...@@ -1312,19 +1312,13 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
static bool cik_asic_supports_baco(struct amdgpu_device *adev) static bool cik_asic_supports_baco(struct amdgpu_device *adev)
{ {
bool baco_support;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_HAWAII: case CHIP_HAWAII:
smu7_asic_get_baco_capability(adev, &baco_support); return amdgpu_dpm_is_baco_supported(adev);
break;
default: default:
baco_support = false; return false;
break;
} }
return baco_support;
} }
static enum amd_reset_method static enum amd_reset_method
...@@ -1366,7 +1360,7 @@ static int cik_asic_reset(struct amdgpu_device *adev) ...@@ -1366,7 +1360,7 @@ static int cik_asic_reset(struct amdgpu_device *adev)
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend) if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev); amdgpu_inc_vram_lost(adev);
r = smu7_asic_baco_reset(adev); r = amdgpu_dpm_baco_reset(adev);
} else { } else {
r = cik_asic_pci_config_reset(adev); r = cik_asic_pci_config_reset(adev);
} }
......
...@@ -31,7 +31,5 @@ void cik_srbm_select(struct amdgpu_device *adev, ...@@ -31,7 +31,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
int cik_set_ip_blocks(struct amdgpu_device *adev); int cik_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev);
int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
int smu7_asic_baco_reset(struct amdgpu_device *adev);
#endif #endif
...@@ -478,7 +478,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) ...@@ -478,7 +478,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
...@@ -489,7 +489,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) ...@@ -489,7 +489,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
...@@ -502,7 +502,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) ...@@ -502,7 +502,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
...@@ -513,7 +513,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) ...@@ -513,7 +513,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
......
...@@ -479,63 +479,19 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev) ...@@ -479,63 +479,19 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
return ret; return ret;
} }
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
{
if (is_support_sw_smu(adev)) {
struct smu_context *smu = &adev->smu;
*cap = smu_baco_is_support(smu);
return 0;
} else {
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
*cap = false;
return -ENOENT;
}
return pp_funcs->get_asic_baco_capability(pp_handle, cap);
}
}
static int soc15_asic_baco_reset(struct amdgpu_device *adev) static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{ {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
/* avoid NBIF got stuck when do RAS recovery in BACO reset */ /* avoid NBIF got stuck when do RAS recovery in BACO reset */
if (ras && ras->supported) if (ras && ras->supported)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false); adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
dev_info(adev->dev, "GPU BACO reset\n"); ret = amdgpu_dpm_baco_reset(adev);
if (is_support_sw_smu(adev)) {
struct smu_context *smu = &adev->smu;
int ret;
ret = smu_baco_enter(smu);
if (ret) if (ret)
return ret; return ret;
ret = smu_baco_exit(smu);
if (ret)
return ret;
} else {
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
return -ENOENT;
/* enter BACO state */
if (pp_funcs->set_asic_baco_state(pp_handle, 1))
return -EIO;
/* exit BACO state */
if (pp_funcs->set_asic_baco_state(pp_handle, 0))
return -EIO;
}
/* re-enable doorbell interrupt after BACO exit */ /* re-enable doorbell interrupt after BACO exit */
if (ras && ras->supported) if (ras && ras->supported)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true); adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
...@@ -543,17 +499,6 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev) ...@@ -543,17 +499,6 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
return 0; return 0;
} }
static int soc15_mode2_reset(struct amdgpu_device *adev)
{
if (is_support_sw_smu(adev))
return smu_mode2_reset(&adev->smu);
if (!adev->powerplay.pp_funcs ||
!adev->powerplay.pp_funcs->asic_reset_mode_2)
return -ENOENT;
return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle);
}
static enum amd_reset_method static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device *adev) soc15_asic_reset_method(struct amdgpu_device *adev)
{ {
...@@ -567,11 +512,11 @@ soc15_asic_reset_method(struct amdgpu_device *adev) ...@@ -567,11 +512,11 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_ARCTURUS: case CHIP_ARCTURUS:
soc15_asic_get_baco_capability(adev, &baco_reset); baco_reset = amdgpu_dpm_is_baco_supported(adev);
break; break;
case CHIP_VEGA20: case CHIP_VEGA20:
if (adev->psp.sos_fw_version >= 0x80067) if (adev->psp.sos_fw_version >= 0x80067)
soc15_asic_get_baco_capability(adev, &baco_reset); baco_reset = amdgpu_dpm_is_baco_supported(adev);
/* /*
* 1. PMFW version > 0x284300: all cases use baco * 1. PMFW version > 0x284300: all cases use baco
...@@ -598,7 +543,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev) ...@@ -598,7 +543,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
amdgpu_inc_vram_lost(adev); amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev); return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2: case AMD_RESET_METHOD_MODE2:
return soc15_mode2_reset(adev); return amdgpu_dpm_mode2_reset(adev);
default: default:
if (!adev->in_suspend) if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev); amdgpu_inc_vram_lost(adev);
...@@ -608,25 +553,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev) ...@@ -608,25 +553,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
static bool soc15_supports_baco(struct amdgpu_device *adev) static bool soc15_supports_baco(struct amdgpu_device *adev)
{ {
bool baco_support;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_ARCTURUS: case CHIP_ARCTURUS:
soc15_asic_get_baco_capability(adev, &baco_support); return amdgpu_dpm_is_baco_supported(adev);
break;
case CHIP_VEGA20: case CHIP_VEGA20:
if (adev->psp.sos_fw_version >= 0x80067) if (adev->psp.sos_fw_version >= 0x80067)
soc15_asic_get_baco_capability(adev, &baco_support); return amdgpu_dpm_is_baco_supported(adev);
else return false;
baco_support = false;
break;
default: default:
return false; return false;
} }
return baco_support;
} }
/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
...@@ -846,7 +784,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) ...@@ -846,7 +784,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
if (is_support_sw_smu(adev))
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
......
...@@ -689,40 +689,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) ...@@ -689,40 +689,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL; return -EINVAL;
} }
int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
{
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
*cap = false;
return -ENOENT;
}
return pp_funcs->get_asic_baco_capability(pp_handle, cap);
}
int smu7_asic_baco_reset(struct amdgpu_device *adev)
{
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
return -ENOENT;
/* enter BACO state */
if (pp_funcs->set_asic_baco_state(pp_handle, 1))
return -EIO;
/* exit BACO state */
if (pp_funcs->set_asic_baco_state(pp_handle, 0))
return -EIO;
dev_info(adev->dev, "GPU BACO reset\n");
return 0;
}
/** /**
* vi_asic_pci_config_reset - soft reset GPU * vi_asic_pci_config_reset - soft reset GPU
* *
...@@ -747,8 +713,6 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev) ...@@ -747,8 +713,6 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
static bool vi_asic_supports_baco(struct amdgpu_device *adev) static bool vi_asic_supports_baco(struct amdgpu_device *adev)
{ {
bool baco_support;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_FIJI: case CHIP_FIJI:
case CHIP_TONGA: case CHIP_TONGA:
...@@ -756,14 +720,10 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev) ...@@ -756,14 +720,10 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
case CHIP_POLARIS11: case CHIP_POLARIS11:
case CHIP_POLARIS12: case CHIP_POLARIS12:
case CHIP_TOPAZ: case CHIP_TOPAZ:
smu7_asic_get_baco_capability(adev, &baco_support); return amdgpu_dpm_is_baco_supported(adev);
break;
default: default:
baco_support = false; return false;
break;
} }
return baco_support;
} }
static enum amd_reset_method static enum amd_reset_method
...@@ -778,7 +738,7 @@ vi_asic_reset_method(struct amdgpu_device *adev) ...@@ -778,7 +738,7 @@ vi_asic_reset_method(struct amdgpu_device *adev)
case CHIP_POLARIS11: case CHIP_POLARIS11:
case CHIP_POLARIS12: case CHIP_POLARIS12:
case CHIP_TOPAZ: case CHIP_TOPAZ:
smu7_asic_get_baco_capability(adev, &baco_reset); baco_reset = amdgpu_dpm_is_baco_supported(adev);
break; break;
default: default:
baco_reset = false; baco_reset = false;
...@@ -807,7 +767,7 @@ static int vi_asic_reset(struct amdgpu_device *adev) ...@@ -807,7 +767,7 @@ static int vi_asic_reset(struct amdgpu_device *adev)
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend) if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev); amdgpu_inc_vram_lost(adev);
r = smu7_asic_baco_reset(adev); r = amdgpu_dpm_baco_reset(adev);
} else { } else {
r = vi_asic_pci_config_reset(adev); r = vi_asic_pci_config_reset(adev);
} }
......
...@@ -31,7 +31,5 @@ void vi_srbm_select(struct amdgpu_device *adev, ...@@ -31,7 +31,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
int vi_set_ip_blocks(struct amdgpu_device *adev); int vi_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev);
int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
int smu7_asic_baco_reset(struct amdgpu_device *adev);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment