Commit 1f2cf08a authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/pm: drop unneeded feature->mutex

As all those related APIs are already well protected by adev->pm.mutex.
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarGuchun Chen <guchun.chen@amd.com>
Reviewed-by: default avatarLijo Lazar <lijo.lazar@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1c4dba5e
......@@ -949,7 +949,6 @@ static int smu_sw_init(void *handle)
smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
mutex_init(&smu->smu_feature.mutex);
bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
......
......@@ -391,7 +391,6 @@ struct smu_feature
DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
DECLARE_BITMAP(enabled, SMU_FEATURE_MAX);
struct mutex mutex;
};
struct smu_clocks {
......
......@@ -722,25 +722,21 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
mutex_lock(&feature->mutex);
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
goto failed;
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
feature->feature_num < 64)
return -EINVAL;
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
if (ret)
goto failed;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
feature_mask[0], NULL);
if (ret)
goto failed;
return ret;
failed:
mutex_unlock(&feature->mutex);
return ret;
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetAllowedFeaturesMaskLow,
feature_mask[0],
NULL);
}
int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
......
......@@ -481,7 +481,6 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
int ret = 0;
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
......@@ -491,11 +490,7 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
ret = test_bit(feature_id, feature->supported);
mutex_unlock(&feature->mutex);
return ret;
return test_bit(feature_id, feature->supported);
}
int smu_cmn_feature_is_enabled(struct smu_context *smu,
......@@ -504,7 +499,6 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
int feature_id;
int ret = 0;
if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
return 1;
......@@ -517,11 +511,7 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
ret = test_bit(feature_id, feature->enabled);
mutex_unlock(&feature->mutex);
return ret;
return test_bit(feature_id, feature->enabled);
}
bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
......@@ -666,14 +656,12 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
return ret;
}
mutex_lock(&feature->mutex);
if (enabled)
bitmap_or(feature->enabled, feature->enabled,
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
else
bitmap_andnot(feature->enabled, feature->enabled,
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
return ret;
}
......@@ -843,11 +831,8 @@ int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
}
if (no_hw_disablement) {
mutex_lock(&feature->mutex);
bitmap_andnot(feature->enabled, feature->enabled,
(unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
return 0;
} else {
return smu_cmn_feature_update_enable_state(smu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment