Commit 7e0357be authored by Yang Wang's avatar Yang Wang Committed by Alex Deucher

drm/amdgpu: remove unused MCA driver codes

- remove unused callback functions.
- make part of mca functions static and refine the function order.
Signed-off-by: default avatarYang Wang <kevinyang.wang@amd.com>
Reviewed-by: default avatarTao Zhou <tao.zhou1@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 66eba12a
......@@ -153,7 +153,7 @@ int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
return 0;
}
void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
static void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
{
if (!mca_set)
return;
......@@ -162,7 +162,7 @@ void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
INIT_LIST_HEAD(&mca_set->list);
}
int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry)
static int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry)
{
struct mca_bank_node *node;
......@@ -183,7 +183,7 @@ int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_
return 0;
}
void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
static void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
{
struct mca_bank_node *node, *tmp;
......@@ -228,6 +228,84 @@ static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, st
idx, entry->regs[MCA_REG_IDX_SYND]);
}
static int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
{
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
if (!count)
return -EINVAL;
if (mca_funcs && mca_funcs->mca_get_valid_mca_count)
return mca_funcs->mca_get_valid_mca_count(adev, type, count);
return -EOPNOTSUPP;
}
static int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
int idx, struct mca_bank_entry *entry)
{
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
int count;
if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
return -EOPNOTSUPP;
switch (type) {
case AMDGPU_MCA_ERROR_TYPE_UE:
count = mca_funcs->max_ue_count;
break;
case AMDGPU_MCA_ERROR_TYPE_CE:
count = mca_funcs->max_ce_count;
break;
default:
return -EINVAL;
}
if (idx >= count)
return -EINVAL;
return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
}
static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
{
struct mca_bank_entry entry;
uint32_t count = 0, i;
int ret;
if (!mca_set)
return -EINVAL;
ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count);
if (ret)
return ret;
for (i = 0; i < count; i++) {
memset(&entry, 0, sizeof(entry));
ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, &entry);
if (ret)
return ret;
amdgpu_mca_bank_set_add_entry(mca_set, &entry);
}
return 0;
}
static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
if (!count || !entry)
return -EINVAL;
if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count)
return -EOPNOTSUPP;
return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count);
}
int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
struct ras_err_data *err_data, struct ras_query_context *qctx)
{
......@@ -241,7 +319,7 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
amdgpu_mca_bank_set_init(&mca_set);
ret = amdgpu_mca_smu_get_mca_set(adev, blk, type, &mca_set);
ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set);
if (ret)
goto out_mca_release;
......@@ -286,119 +364,6 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
return ret;
}
int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
{
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
if (!count)
return -EINVAL;
if (mca_funcs && mca_funcs->mca_get_valid_mca_count)
return mca_funcs->mca_get_valid_mca_count(adev, type, count);
return -EOPNOTSUPP;
}
int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
enum amdgpu_mca_error_type type, uint32_t *total)
{
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
struct mca_bank_set mca_set;
struct mca_bank_node *node;
struct mca_bank_entry *entry;
uint32_t count;
int ret;
if (!total)
return -EINVAL;
if (!mca_funcs)
return -EOPNOTSUPP;
if (!mca_funcs->mca_get_ras_mca_set || !mca_funcs->mca_get_valid_mca_count)
return -EOPNOTSUPP;
amdgpu_mca_bank_set_init(&mca_set);
ret = mca_funcs->mca_get_ras_mca_set(adev, blk, type, &mca_set);
if (ret)
goto err_mca_set_release;
*total = 0;
list_for_each_entry(node, &mca_set.list, node) {
entry = &node->entry;
count = 0;
ret = mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, &count);
if (ret)
goto err_mca_set_release;
*total += count;
}
err_mca_set_release:
amdgpu_mca_bank_set_release(&mca_set);
return ret;
}
int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
if (!count || !entry)
return -EINVAL;
if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count)
return -EOPNOTSUPP;
return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count);
}
int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
{
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
if (!mca_set)
return -EINVAL;
if (!mca_funcs || !mca_funcs->mca_get_ras_mca_set)
return -EOPNOTSUPP;
WARN_ON(!list_empty(&mca_set->list));
return mca_funcs->mca_get_ras_mca_set(adev, blk, type, mca_set);
}
int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
int idx, struct mca_bank_entry *entry)
{
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
int count;
if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
return -EOPNOTSUPP;
switch (type) {
case AMDGPU_MCA_ERROR_TYPE_UE:
count = mca_funcs->max_ue_count;
break;
case AMDGPU_MCA_ERROR_TYPE_CE:
count = mca_funcs->max_ce_count;
break;
default:
return -EINVAL;
}
if (idx >= count)
return -EINVAL;
return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
}
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
{
......
......@@ -122,8 +122,6 @@ struct amdgpu_mca_smu_funcs {
int max_ue_count;
int max_ce_count;
int (*mca_set_debug_mode)(struct amdgpu_device *adev, bool enable);
int (*mca_get_ras_mca_set)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
struct mca_bank_set *mca_set);
int (*mca_parse_mca_error_count)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
struct mca_bank_entry *entry, uint32_t *count);
int (*mca_get_valid_mca_count)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
......@@ -152,23 +150,9 @@ int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev);
void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs);
int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable);
int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count);
int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
enum amdgpu_mca_error_type type, uint32_t *total);
int amdgpu_mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
enum amdgpu_mca_error_type type, uint32_t *count);
int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count);
int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set);
int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
int idx, struct mca_bank_entry *entry);
void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root);
void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set);
int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry);
void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set);
int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
struct ras_err_data *err_data, struct ras_query_context *qctx);
......
......@@ -2928,55 +2928,6 @@ static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_i
return true;
}
static int __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
{
struct mca_bank_entry entry;
uint32_t mca_cnt;
int i, ret;
ret = mca_get_valid_mca_count(adev, type, &mca_cnt);
if (ret)
return ret;
/* if valid mca bank count is 0, the driver can return 0 directly */
if (!mca_cnt)
return 0;
for (i = 0; i < mca_cnt; i++) {
memset(&entry, 0, sizeof(entry));
ret = mca_get_mca_entry(adev, type, i, &entry);
if (ret)
return ret;
if (mca_ras && !mca_bank_is_valid(adev, mca_ras, type, &entry))
continue;
ret = amdgpu_mca_bank_set_add_entry(mca_set, &entry);
if (ret)
return ret;
}
return 0;
}
static int mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
{
const struct mca_ras_info *mca_ras = NULL;
if (!mca_set)
return -EINVAL;
if (blk != AMDGPU_RAS_BLOCK_COUNT) {
mca_ras = mca_get_mca_ras_info(adev, blk);
if (!mca_ras)
return -EOPNOTSUPP;
}
return __mca_smu_get_ras_mca_set(adev, mca_ras, type, mca_set);
}
static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
struct mca_bank_entry *entry, uint32_t *count)
{
......@@ -3013,7 +2964,6 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = {
.max_ue_count = 12,
.max_ce_count = 12,
.mca_set_debug_mode = mca_smu_set_debug_mode,
.mca_get_ras_mca_set = mca_smu_get_ras_mca_set,
.mca_parse_mca_error_count = mca_smu_parse_mca_error_count,
.mca_get_mca_entry = mca_smu_get_mca_entry,
.mca_get_valid_mca_count = mca_smu_get_valid_mca_count,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment