Commit d02ddefc authored by Lijo Lazar's avatar Lijo Lazar Committed by Alex Deucher

drm/amdgpu: Initialize VF partition mode

For SOCs with GFX v9.4.3, a VF may have multiple compute partitions.
Fetch the partition information during init and initialize partition
nodes. There is no support to switch partition mode in VF mode, hence
disable the same.
Signed-off-by: default avatarLijo Lazar <lijo.lazar@amd.com>
Reviewed-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5d64af40
......@@ -297,6 +297,7 @@ struct amdgpu_gfx_funcs {
int (*switch_partition_mode)(struct amdgpu_device *adev,
int num_xccs_per_xcp);
int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node);
int (*get_xccs_per_xcp)(struct amdgpu_device *adev);
};
struct sq_work {
......
......@@ -219,7 +219,8 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
{
int mode;
if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
return xcp_mgr->mode;
if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
......@@ -228,6 +229,12 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
if (!(flags & AMDGPU_XCP_FL_LOCKED))
mutex_lock(&xcp_mgr->xcp_lock);
mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
/* First time query for VF, set the mode here */
if (amdgpu_sriov_vf(xcp_mgr->adev) &&
xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
xcp_mgr->mode = mode;
if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
dev_WARN(
xcp_mgr->adev->dev,
......@@ -282,8 +289,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
{
struct amdgpu_xcp_mgr *xcp_mgr;
if (!xcp_funcs || !xcp_funcs->switch_partition_mode ||
!xcp_funcs->get_ip_details)
if (!xcp_funcs || !xcp_funcs->get_ip_details)
return -EINVAL;
xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
......
......@@ -304,13 +304,56 @@ u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
return ext_offset;
}
static enum amdgpu_gfx_partition
__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
struct amdgpu_device *adev = xcp_mgr->adev;
int num_xcc, num_xcc_per_xcp = 0, mode = 0;
num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
if (adev->gfx.funcs->get_xccs_per_xcp)
num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
mode = num_xcc / num_xcc_per_xcp;
if (num_xcc_per_xcp == 1)
return AMDGPU_CPX_PARTITION_MODE;
switch (mode) {
case 1:
return AMDGPU_SPX_PARTITION_MODE;
case 2:
return AMDGPU_DPX_PARTITION_MODE;
case 3:
return AMDGPU_TPX_PARTITION_MODE;
case 4:
return AMDGPU_QPX_PARTITION_MODE;
default:
return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
}
return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
}
static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
enum amdgpu_gfx_partition derv_mode,
mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
struct amdgpu_device *adev = xcp_mgr->adev;
if (adev->nbio.funcs->get_compute_partition_mode)
derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
if (amdgpu_sriov_vf(adev))
return derv_mode;
if (adev->nbio.funcs->get_compute_partition_mode) {
mode = adev->nbio.funcs->get_compute_partition_mode(adev);
if (mode != derv_mode)
dev_warn(
adev->dev,
"Mismatch in compute partition mode - reported : %d derived : %d",
mode, derv_mode);
}
return mode;
}
......@@ -624,6 +667,9 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
{
int ret;
if (amdgpu_sriov_vf(adev))
aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
&aqua_vanjaram_xcp_funcs);
if (ret)
......
......@@ -652,6 +652,15 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
}
static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
{
u32 xcp_ctl;
/* Value is expected to be the same on all, fetch from first instance */
xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
}
static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
int num_xccs_per_xcp)
......@@ -706,6 +715,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
.get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
};
static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
......@@ -2050,18 +2060,31 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
{
int r = 0, i, num_xcc;
int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
if (amdgpu_sriov_vf(adev)) {
enum amdgpu_gfx_partition mode;
mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE);
if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
return -EINVAL;
num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
num_xcp = num_xcc / num_xcc_per_xcp;
r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
} else {
if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE) ==
AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
amdgpu_user_partt_mode);
r = amdgpu_xcp_switch_partition_mode(
adev->xcp_mgr, amdgpu_user_partt_mode);
}
if (r)
return r;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
r = gfx_v9_4_3_xcc_cp_resume(adev, i);
if (r)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment