Commit 8264ee69 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/powerplay: drop unused code

Those code were obsoleted by new common API
smu_cmn_to_asic_specific_index().
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6c339f37
......@@ -207,118 +207,6 @@ static const struct cmn2asic_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_msg_mapping mapping;
if (index >= SMU_MSG_MAX_COUNT)
return -EINVAL;
mapping = arcturus_message_map[index];
if (!(mapping.valid_mapping))
return -EINVAL;
if (amdgpu_sriov_vf(smc->adev) && !mapping.valid_in_vf)
return -EACCES;
return mapping.map_to;
#endif
return 0;
}
static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_CLK_COUNT)
return -EINVAL;
mapping = arcturus_clk_map[index];
if (!(mapping.valid_mapping)) {
dev_warn(smc->adev->dev, "Unsupported SMU clk: %d\n", index);
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_FEATURE_COUNT)
return -EINVAL;
mapping = arcturus_feature_mask_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_TABLE_COUNT)
return -EINVAL;
mapping = arcturus_table_map[index];
if (!(mapping.valid_mapping)) {
dev_warn(smc->adev->dev, "Unsupported SMU table: %d\n", index);
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_POWER_SOURCE_COUNT)
return -EINVAL;
mapping = arcturus_pwr_src_map[index];
if (!(mapping.valid_mapping)) {
dev_warn(smc->adev->dev, "Unsupported SMU power source: %d\n", index);
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
return -EINVAL;
mapping = arcturus_workload_map[profile];
if (!(mapping.valid_mapping))
return -EINVAL;
return mapping.map_to;
#endif
return 0;
}
static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables)
{
struct smu_table_context *smu_table = &smu->smu_table;
......@@ -2349,13 +2237,6 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
}
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
.get_smu_clk_index = arcturus_get_smu_clk_index,
.get_smu_feature_index = arcturus_get_smu_feature_index,
.get_smu_table_index = arcturus_get_smu_table_index,
.get_smu_power_index= arcturus_get_pwr_src_index,
.get_workload_type = arcturus_get_workload_type,
/* internal structurs allocations */
.tables_init = arcturus_tables_init,
.alloc_dpm_context = arcturus_allocate_dpm_context,
......
......@@ -452,12 +452,6 @@ struct i2c_adapter;
struct pptable_funcs {
int (*alloc_dpm_context)(struct smu_context *smu);
int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_clk_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_feature_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
int (*run_btc)(struct smu_context *smu);
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
......
......@@ -62,17 +62,6 @@ static const struct smu_temperature_range smu11_thermal_policy[] =
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
struct smu_11_0_msg_mapping {
int valid_mapping;
int map_to;
int valid_in_vf;
};
struct smu_11_0_cmn2aisc_mapping {
int valid_mapping;
int map_to;
};
struct smu_11_0_max_sustainable_clocks {
uint32_t display_clock;
uint32_t phy_clock;
......
......@@ -31,12 +31,6 @@
#define MP1_Public 0x03b00000
#define MP1_SRAM 0x03c00004
struct smu_12_0_cmn2aisc_mapping {
int valid_mapping;
int map_to;
};
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg);
......
......@@ -227,118 +227,6 @@ static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] =
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_msg_mapping mapping;
if (index >= SMU_MSG_MAX_COUNT)
return -EINVAL;
mapping = navi10_message_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
if (amdgpu_sriov_vf(smc->adev) && !mapping.valid_in_vf)
return -EACCES;
return mapping.map_to;
#endif
return 0;
}
static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_CLK_COUNT)
return -EINVAL;
mapping = navi10_clk_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_FEATURE_COUNT)
return -EINVAL;
mapping = navi10_feature_mask_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_TABLE_COUNT)
return -EINVAL;
mapping = navi10_table_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_POWER_SOURCE_COUNT)
return -EINVAL;
mapping = navi10_pwr_src_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
return -EINVAL;
mapping = navi10_workload_map[profile];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static bool is_asic_secure(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
......@@ -2368,12 +2256,6 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
.get_smu_msg_index = navi10_get_smu_msg_index,
.get_smu_clk_index = navi10_get_smu_clk_index,
.get_smu_feature_index = navi10_get_smu_feature_index,
.get_smu_table_index = navi10_get_smu_table_index,
.get_smu_power_index = navi10_get_pwr_src_index,
.get_workload_type = navi10_get_workload_type,
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
......
......@@ -127,58 +127,6 @@ static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] =
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_12_0_cmn2aisc_mapping mapping;
if (index >= SMU_MSG_MAX_COUNT)
return -EINVAL;
mapping = renoir_message_map[index];
if (!(mapping.valid_mapping))
return -EINVAL;
return mapping.map_to;
#endif
return 0;
}
static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_12_0_cmn2aisc_mapping mapping;
if (index >= SMU_CLK_COUNT)
return -EINVAL;
mapping = renoir_clk_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_12_0_cmn2aisc_mapping mapping;
if (index >= SMU_TABLE_COUNT)
return -EINVAL;
mapping = renoir_table_map[index];
if (!(mapping.valid_mapping))
return -EINVAL;
return mapping.map_to;
#endif
return 0;
}
static int renoir_get_metrics_table(struct smu_context *smu,
SmuMetrics_t *metrics_table)
{
......@@ -684,35 +632,6 @@ static int renoir_get_current_activity_percent(struct smu_context *smu,
return 0;
}
static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
{
uint32_t pplib_workload = 0;
switch (profile) {
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break;
case PP_SMC_POWER_PROFILE_CUSTOM:
pplib_workload = WORKLOAD_PPLIB_COUNT;
break;
case PP_SMC_POWER_PROFILE_VIDEO:
pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
break;
case PP_SMC_POWER_PROFILE_VR:
pplib_workload = WORKLOAD_PPLIB_VR_BIT;
break;
case PP_SMC_POWER_PROFILE_COMPUTE:
pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
break;
default:
return -EINVAL;
}
return pplib_workload;
}
/**
* This interface get dpm clock table for dc
*/
......@@ -1076,16 +995,12 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
}
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_clk_index = renoir_get_smu_clk_index,
.get_smu_table_index = renoir_get_smu_table_index,
.tables_init = renoir_tables_init,
.set_power_state = NULL,
.print_clk_levels = renoir_print_clk_levels,
.get_current_power_state = renoir_get_current_power_state,
.dpm_set_vcn_enable = renoir_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable,
.get_workload_type = renoir_get_workload_type,
.force_clk_levels = renoir_force_clk_levels,
.set_power_profile_mode = renoir_set_power_profile_mode,
.set_performance_level = renoir_set_performance_level,
......
......@@ -208,114 +208,6 @@ static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
static int sienna_cichlid_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_MSG_MAX_COUNT)
return -EINVAL;
mapping = sienna_cichlid_message_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int sienna_cichlid_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_CLK_COUNT)
return -EINVAL;
mapping = sienna_cichlid_clk_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int sienna_cichlid_get_smu_feature_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_FEATURE_COUNT)
return -EINVAL;
mapping = sienna_cichlid_feature_mask_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int sienna_cichlid_get_smu_table_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_TABLE_COUNT)
return -EINVAL;
mapping = sienna_cichlid_table_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int sienna_cichlid_get_pwr_src_index(struct smu_context *smc, uint32_t index)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_POWER_SOURCE_COUNT)
return -EINVAL;
mapping = sienna_cichlid_pwr_src_map[index];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int sienna_cichlid_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
{
#if 0
struct smu_11_0_cmn2aisc_mapping mapping;
if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
return -EINVAL;
mapping = sienna_cichlid_workload_map[profile];
if (!(mapping.valid_mapping)) {
return -EINVAL;
}
return mapping.map_to;
#endif
return 0;
}
static int
sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
......@@ -2516,12 +2408,6 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.tables_init = sienna_cichlid_tables_init,
.alloc_dpm_context = sienna_cichlid_allocate_dpm_context,
.get_smu_msg_index = sienna_cichlid_get_smu_msg_index,
.get_smu_clk_index = sienna_cichlid_get_smu_clk_index,
.get_smu_feature_index = sienna_cichlid_get_smu_feature_index,
.get_smu_table_index = sienna_cichlid_get_smu_table_index,
.get_smu_power_index = sienna_cichlid_get_pwr_src_index,
.get_workload_type = sienna_cichlid_get_workload_type,
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
.dpm_set_vcn_enable = sienna_cichlid_dpm_set_vcn_enable,
......
......@@ -69,12 +69,6 @@
#define smu_apply_clocks_adjust_rules(smu) smu_ppt_funcs(apply_clocks_adjust_rules, 0, smu)
#define smu_notify_smc_display_config(smu) smu_ppt_funcs(notify_smc_display_config, 0, smu)
#define smu_set_cpu_power_state(smu) smu_ppt_funcs(set_cpu_power_state, 0, smu)
#define smu_msg_get_index(smu, msg) smu_ppt_funcs(get_smu_msg_index, -EINVAL, smu, msg)
#define smu_clk_get_index(smu, clk) smu_ppt_funcs(get_smu_clk_index, -EINVAL, smu, clk)
#define smu_feature_get_index(smu, fea) smu_ppt_funcs(get_smu_feature_index, -EINVAL, smu, fea)
#define smu_table_get_index(smu, tab) smu_ppt_funcs(get_smu_table_index, -EINVAL, smu, tab)
#define smu_power_get_index(smu, src) smu_ppt_funcs(get_smu_power_index, -EINVAL, smu, src)
#define smu_workload_get_type(smu, type) smu_ppt_funcs(get_workload_type, -EINVAL, smu, type)
#define smu_run_btc(smu) smu_ppt_funcs(run_btc, 0, smu)
#define smu_get_allowed_feature_mask(smu, feature_mask, num) smu_ppt_funcs(get_allowed_feature_mask, 0, smu, feature_mask, num)
#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) smu_ppt_funcs(store_cc6_data, 0, smu, st, cc6_dis, pst_dis, pst_sw_dis)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment