Commit bc7d6c12 authored by Darren Powell's avatar Darren Powell Committed by Alex Deucher

amdgpu/pm: Powerplay API for smu , changed 4 dpm functions to use API

v2: fix errors and warnings flagged by checkpatch
v3: Context mismatch with revision v3 to patch 0003

New Functions
  smu_get_mclk        - implementation of the Powerplay API function get_mclk
  smu_get_sclk        - implementation of the Powerplay API function get_sclk
  smu_handle_dpm_task - implementation of the Powerplay API function dispatch_tasks

Modified Functions
  smu_dpm_set_power_gate - - modifed arg0 to match Powerplay API set_powergating_by_smu

Other Changes
  removed special smu handling in dpm functions and called through Powerplay API
  call to smu_dpm_set_power_gate via Powerplay API now locks mutex for UVD and VCE
Signed-off-by: default avatarDarren Powell <darren.powell@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarEvan Quan <evan.quan@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 2ea092e5
...@@ -911,50 +911,28 @@ amdgpu_get_vce_clock_state(void *handle, u32 idx) ...@@ -911,50 +911,28 @@ amdgpu_get_vce_clock_state(void *handle, u32 idx)
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{ {
uint32_t clk_freq; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
low ? &clk_freq : NULL,
!low ? &clk_freq : NULL);
if (ret)
return 0;
return clk_freq * 100;
} else { return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
}
} }
int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
{ {
uint32_t clk_freq; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
low ? &clk_freq : NULL,
!low ? &clk_freq : NULL);
if (ret)
return 0;
return clk_freq * 100;
} else { return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
}
} }
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
{ {
int ret = 0; int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
bool swsmu = is_support_sw_smu(adev); bool swsmu = is_support_sw_smu(adev);
switch (block_type) { switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE: case AMD_IP_BLOCK_TYPE_VCE:
if (swsmu) { if (pp_funcs && pp_funcs->set_powergating_by_smu) {
ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
} else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* /*
* TODO: need a better lock mechanism * TODO: need a better lock mechanism
* *
...@@ -982,7 +960,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block ...@@ -982,7 +960,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
* amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
*/ */
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate)); (adev)->powerplay.pp_handle, block_type, gate));
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
} }
...@@ -990,12 +968,10 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block ...@@ -990,12 +968,10 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_GFX: case AMD_IP_BLOCK_TYPE_GFX:
case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA: case AMD_IP_BLOCK_TYPE_SDMA:
if (swsmu) if (pp_funcs && pp_funcs->set_powergating_by_smu) {
ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); ret = (pp_funcs->set_powergating_by_smu(
else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu)
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate)); (adev)->powerplay.pp_handle, block_type, gate));
}
break; break;
case AMD_IP_BLOCK_TYPE_JPEG: case AMD_IP_BLOCK_TYPE_JPEG:
if (swsmu) if (swsmu)
...@@ -1003,10 +979,10 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block ...@@ -1003,10 +979,10 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
break; break;
case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP: case AMD_IP_BLOCK_TYPE_ACP:
if (adev->powerplay.pp_funcs && if (pp_funcs && pp_funcs->set_powergating_by_smu) {
adev->powerplay.pp_funcs->set_powergating_by_smu) ret = (pp_funcs->set_powergating_by_smu(
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate)); (adev)->powerplay.pp_handle, block_type, gate));
}
break; break;
default: default:
break; break;
...@@ -1512,36 +1488,30 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) ...@@ -1512,36 +1488,30 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring); amdgpu_fence_wait_empty(ring);
} }
if (is_support_sw_smu(adev)) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; if (!amdgpu_device_has_dc_support(adev)) {
smu_handle_task(&adev->smu,
smu_dpm->dpm_level,
AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
true);
} else {
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
if (adev->pm.pm_display_cfg.vrefresh > 120)
adev->pm.pm_display_cfg.min_vblank_time = 0;
if (adev->powerplay.pp_funcs->display_configuration_change)
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
mutex_unlock(&adev->pm.mutex);
}
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
} else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev); amdgpu_dpm_get_active_displays(adev);
amdgpu_dpm_change_power_state_locked(adev); adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we have issues with mclk switching with
* refresh rates over 120 hz on the non-DC code.
*/
if (adev->pm.pm_display_cfg.vrefresh > 120)
adev->pm.pm_display_cfg.min_vblank_time = 0;
if (adev->powerplay.pp_funcs->display_configuration_change)
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
} }
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
} else {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
amdgpu_dpm_change_power_state_locked(adev);
mutex_unlock(&adev->pm.mutex);
} }
} }
......
...@@ -1303,16 +1303,21 @@ int smu_set_watermarks_for_clock_ranges( ...@@ -1303,16 +1303,21 @@ int smu_set_watermarks_for_clock_ranges(
extern int smu_display_configuration_change(struct smu_context *smu, const extern int smu_display_configuration_change(struct smu_context *smu, const
struct amd_pp_display_configuration struct amd_pp_display_configuration
*display_config); *display_config);
extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate); extern int smu_dpm_set_power_gate(void *handle, uint32_t block_type, bool gate);
extern int smu_handle_task(struct smu_context *smu, extern int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level, enum amd_dpm_forced_level level,
enum amd_pp_task task_id, enum amd_pp_task task_id,
bool lock_needed); bool lock_needed);
extern int smu_handle_dpm_task(void *handle,
enum amd_pp_task task_id,
enum amd_pm_state_type *user_state);
int smu_switch_power_profile(void *handle, int smu_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type, enum PP_SMC_POWER_PROFILE type,
bool en); bool en);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max); uint32_t *min, uint32_t *max);
u32 smu_get_mclk(void *handle, bool low);
u32 smu_get_sclk(void *handle, bool low);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max); uint32_t min, uint32_t max);
enum amd_dpm_forced_level smu_get_performance_level(void *handle); enum amd_dpm_forced_level smu_get_performance_level(void *handle);
......
...@@ -141,6 +141,34 @@ int smu_get_dpm_freq_range(struct smu_context *smu, ...@@ -141,6 +141,34 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
return ret; return ret;
} }
u32 smu_get_mclk(void *handle, bool low)
{
struct smu_context *smu = handle;
uint32_t clk_freq;
int ret = 0;
ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
low ? &clk_freq : NULL,
!low ? &clk_freq : NULL);
if (ret)
return 0;
return clk_freq * 100;
}
u32 smu_get_sclk(void *handle, bool low)
{
struct smu_context *smu = handle;
uint32_t clk_freq;
int ret = 0;
ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
low ? &clk_freq : NULL,
!low ? &clk_freq : NULL);
if (ret)
return 0;
return clk_freq * 100;
}
static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
bool enable) bool enable)
{ {
...@@ -216,7 +244,7 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu, ...@@ -216,7 +244,7 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
/** /**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block * smu_dpm_set_power_gate - power gate/ungate the specific IP block
* *
* @smu: smu_context pointer * @handle: smu_context pointer
* @block_type: the IP block to power gate/ungate * @block_type: the IP block to power gate/ungate
* @gate: to power gate if true, ungate otherwise * @gate: to power gate if true, ungate otherwise
* *
...@@ -227,9 +255,10 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu, ...@@ -227,9 +255,10 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
* Under this case, the smu->mutex lock protection is already enforced on * Under this case, the smu->mutex lock protection is already enforced on
* the parent API smu_force_performance_level of the call path. * the parent API smu_force_performance_level of the call path.
*/ */
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, int smu_dpm_set_power_gate(void *handle, uint32_t block_type,
bool gate) bool gate)
{ {
struct smu_context *smu = handle;
int ret = 0; int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
...@@ -1678,6 +1707,18 @@ int smu_handle_task(struct smu_context *smu, ...@@ -1678,6 +1707,18 @@ int smu_handle_task(struct smu_context *smu,
return ret; return ret;
} }
int smu_handle_dpm_task(void *handle,
enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
}
int smu_switch_power_profile(void *handle, int smu_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type, enum PP_SMC_POWER_PROFILE type,
bool en) bool en)
...@@ -2918,9 +2959,13 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { ...@@ -2918,9 +2959,13 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_pp_table = smu_sys_get_pp_table, .get_pp_table = smu_sys_get_pp_table,
.switch_power_profile = smu_switch_power_profile, .switch_power_profile = smu_switch_power_profile,
/* export to amdgpu */ /* export to amdgpu */
.dispatch_tasks = smu_handle_dpm_task,
.set_powergating_by_smu = smu_dpm_set_power_gate,
.set_power_limit = smu_set_power_limit, .set_power_limit = smu_set_power_limit,
.set_mp1_state = smu_set_mp1_state, .set_mp1_state = smu_set_mp1_state,
/* export to DC */ /* export to DC */
.get_sclk = smu_get_sclk,
.get_mclk = smu_get_mclk,
.enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
.get_asic_baco_capability = smu_get_baco_capability, .get_asic_baco_capability = smu_get_baco_capability,
.set_asic_baco_state = smu_baco_set_state, .set_asic_baco_state = smu_baco_set_state,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment