Commit 7dbf7805 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/powerplay: move ppfeature mask setting to smu_cmn.c

Considering they are shared by all ASICs. And we are moving
to centralize all feature enablement/support checking and
setting APIs in smu_cmn.c.
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 28251d72
......@@ -59,147 +59,33 @@ const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type
return __smu_message_names[type];
}
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(fea) #fea
static const char* __smu_feature_names[] = {
SMU_FEATURE_MASKS
};
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
{
if (feature < 0 || feature >= SMU_FEATURE_COUNT)
return "unknown smu feature";
return __smu_feature_names[feature];
}
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
{
size_t size = 0;
int ret = 0, i = 0;
uint32_t feature_mask[2] = { 0 };
int32_t feature_index = 0;
uint32_t count = 0;
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
goto failed;
size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
feature_mask[1], feature_mask[0]);
for (i = 0; i < SMU_FEATURE_COUNT; i++) {
feature_index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
i);
if (feature_index < 0)
continue;
sort_feature[feature_index] = i;
hw_feature_count++;
}
for (i = 0; i < hw_feature_count; i++) {
size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
count++,
smu_get_feature_name(smu, sort_feature[i]),
i,
!!smu_feature_is_enabled(smu, sort_feature[i]) ?
"enabled" : "disabled");
}
size = smu_get_pp_feature_mask(smu, buf);
failed:
mutex_unlock(&smu->mutex);
return size;
}
static int smu_feature_update_enable_state(struct smu_context *smu,
uint64_t feature_mask,
bool enabled)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
if (enabled) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_EnableSmuFeaturesLow,
lower_32_bits(feature_mask),
NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_EnableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
if (ret)
return ret;
} else {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_DisableSmuFeaturesLow,
lower_32_bits(feature_mask),
NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_DisableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
if (ret)
return ret;
}
mutex_lock(&feature->mutex);
if (enabled)
bitmap_or(feature->enabled, feature->enabled,
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
else
bitmap_andnot(feature->enabled, feature->enabled,
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
return ret;
}
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
{
int ret = 0;
uint32_t feature_mask[2] = { 0 };
uint64_t feature_2_enabled = 0;
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
goto out;
feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
feature_2_enabled = ~feature_enables & new_mask;
feature_2_disabled = feature_enables & ~new_mask;
ret = smu_set_pp_feature_mask(smu, new_mask);
if (feature_2_enabled) {
ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
if (ret)
goto out;
}
if (feature_2_disabled) {
ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
if (ret)
goto out;
}
out:
mutex_unlock(&smu->mutex);
return ret;
......@@ -540,25 +426,6 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
return ret;
}
int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
mask);
if (feature_id < 0)
return -EINVAL;
WARN_ON(feature_id > feature->feature_num);
return smu_feature_update_enable_state(smu,
1ULL << feature_id,
enable);
}
static int smu_set_funcs(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
......
......@@ -1838,7 +1838,7 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (enable) {
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
if (ret) {
dev_err(smu->adev->dev, "[EnableVCNDPM] failed!\n");
return ret;
......@@ -1847,7 +1847,7 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
power_gate->vcn_gated = false;
} else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
if (ret) {
dev_err(smu->adev->dev, "[DisableVCNDPM] failed!\n");
return ret;
......@@ -2312,6 +2312,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_df_cstate = arcturus_set_df_cstate,
.allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
.log_thermal_throttling_event = arcturus_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
......
......@@ -584,6 +584,8 @@ struct pptable_funcs {
int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
void (*log_thermal_throttling_event)(struct smu_context *smu);
size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
};
typedef enum {
......@@ -729,9 +731,6 @@ extern const struct amd_ip_funcs smu_ip_funcs;
extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern int smu_feature_set_enabled(struct smu_context *smu,
enum smu_feature_mask mask, bool enable);
int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
void *table_data, bool drv2smu);
......@@ -771,7 +770,6 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count);
int smu_set_ac_dc(struct smu_context *smu);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
int smu_force_clk_levels(struct smu_context *smu,
......
......@@ -2329,6 +2329,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.run_btc = navi10_run_btc,
.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
......
......@@ -1025,6 +1025,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
.set_driver_table_location = smu_v12_0_set_driver_table_location,
.is_dpm_running = renoir_is_dpm_running,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
......
......@@ -2477,6 +2477,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.mode1_reset = smu_v11_0_mode1_reset,
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
......
......@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_cmn.h"
#include "smu_internal.h"
/*
* DO NOT use these for err/warn/info/debug messages.
......@@ -192,3 +193,165 @@ int smu_cmn_get_enabled_mask(struct smu_context *smu,
return ret;
}
static int smu_cmn_feature_update_enable_state(struct smu_context *smu,
uint64_t feature_mask,
bool enabled)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
if (enabled) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_EnableSmuFeaturesLow,
lower_32_bits(feature_mask),
NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_EnableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
if (ret)
return ret;
} else {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_DisableSmuFeaturesLow,
lower_32_bits(feature_mask),
NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_DisableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
if (ret)
return ret;
}
mutex_lock(&feature->mutex);
if (enabled)
bitmap_or(feature->enabled, feature->enabled,
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
else
bitmap_andnot(feature->enabled, feature->enabled,
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
return ret;
}
int smu_cmn_feature_set_enabled(struct smu_context *smu,
enum smu_feature_mask mask,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
mask);
if (feature_id < 0)
return -EINVAL;
WARN_ON(feature_id > feature->feature_num);
return smu_cmn_feature_update_enable_state(smu,
1ULL << feature_id,
enable);
}
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(fea) #fea
static const char* __smu_feature_names[] = {
SMU_FEATURE_MASKS
};
static const char *smu_get_feature_name(struct smu_context *smu,
enum smu_feature_mask feature)
{
if (feature < 0 || feature >= SMU_FEATURE_COUNT)
return "unknown smu feature";
return __smu_feature_names[feature];
}
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
uint32_t feature_mask[2] = { 0 };
int32_t feature_index = 0;
uint32_t count = 0;
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
size_t size = 0;
int ret = 0, i;
ret = smu_cmn_get_enabled_mask(smu,
feature_mask,
2);
if (ret)
return 0;
size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
feature_mask[1], feature_mask[0]);
for (i = 0; i < SMU_FEATURE_COUNT; i++) {
feature_index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
i);
if (feature_index < 0)
continue;
sort_feature[feature_index] = i;
hw_feature_count++;
}
for (i = 0; i < hw_feature_count; i++) {
size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
count++,
smu_get_feature_name(smu, sort_feature[i]),
i,
!!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
"enabled" : "disabled");
}
return size;
}
int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
uint64_t new_mask)
{
int ret = 0;
uint32_t feature_mask[2] = { 0 };
uint64_t feature_2_enabled = 0;
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
ret = smu_cmn_get_enabled_mask(smu,
feature_mask,
2);
if (ret)
return ret;
feature_enables = ((uint64_t)feature_mask[1] << 32 |
(uint64_t)feature_mask[0]);
feature_2_enabled = ~feature_enables & new_mask;
feature_2_disabled = feature_enables & ~new_mask;
if (feature_2_enabled) {
ret = smu_cmn_feature_update_enable_state(smu,
feature_2_enabled,
true);
if (ret)
return ret;
}
if (feature_2_disabled) {
ret = smu_cmn_feature_update_enable_state(smu,
feature_2_disabled,
false);
if (ret)
return ret;
}
return ret;
}
......@@ -39,4 +39,14 @@ int smu_cmn_get_enabled_mask(struct smu_context *smu,
uint32_t *feature_mask,
uint32_t num);
int smu_cmn_feature_set_enabled(struct smu_context *smu,
enum smu_feature_mask mask,
bool enable);
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf);
int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
uint64_t new_mask);
#endif
......@@ -92,5 +92,7 @@
#define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu)
#define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
#define smu_get_asic_power_limits(smu) smu_ppt_funcs(get_power_limit, 0, smu)
#define smu_get_pp_feature_mask(smu, buf) smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
#define smu_set_pp_feature_mask(smu, new_mask) smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
#endif
......@@ -1204,7 +1204,7 @@ smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return 0;
ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
if (ret)
dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
__func__, (auto_fan_control ? "Start" : "Stop"));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment