Commit aa53bc2e authored by Monk Liu's avatar Monk Liu Committed by Alex Deucher

drm/amdgpu: introduce new request and its function

1) modify xgpu_nv_send_access_requests to support
new idh request

2) introduce new function: req_gpu_init_data() which
is used to notify host to prepare vbios/ip-discovery/pfvf exchange
Signed-off-by: default avatarMonk Liu <Monk.Liu@amd.com>
Reviewed-by: default avatarEmily Deng <Emily.Deng@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c27cbdd2
...@@ -153,6 +153,19 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) ...@@ -153,6 +153,19 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
return 0; return 0;
} }
void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
{
struct amdgpu_virt *virt = &adev->virt;
if (virt->ops && virt->ops->req_init_data)
virt->ops->req_init_data(adev);
if (adev->virt.req_init_data_ver > 0)
DRM_INFO("host supports REQ_INIT_DATA handshake\n");
else
DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
}
/** /**
* amdgpu_virt_wait_reset() - wait for reset gpu completed * amdgpu_virt_wait_reset() - wait for reset gpu completed
* @amdgpu: amdgpu device. * @amdgpu: amdgpu device.
......
...@@ -59,6 +59,7 @@ struct amdgpu_vf_error_buffer { ...@@ -59,6 +59,7 @@ struct amdgpu_vf_error_buffer {
struct amdgpu_virt_ops { struct amdgpu_virt_ops {
int (*req_full_gpu)(struct amdgpu_device *adev, bool init); int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
int (*req_init_data)(struct amdgpu_device *adev);
int (*reset_gpu)(struct amdgpu_device *adev); int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
...@@ -263,6 +264,7 @@ struct amdgpu_virt { ...@@ -263,6 +264,7 @@ struct amdgpu_virt {
struct amdgpu_virt_fw_reserve fw_reserve; struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature; uint32_t gim_feature;
uint32_t reg_access_mode; uint32_t reg_access_mode;
int req_init_data_ver;
}; };
#define amdgpu_sriov_enabled(adev) \ #define amdgpu_sriov_enabled(adev) \
...@@ -303,6 +305,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, ...@@ -303,6 +305,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
int amdgpu_virt_wait_reset(struct amdgpu_device *adev); int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
......
...@@ -109,7 +109,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) ...@@ -109,7 +109,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
timeout -= 10; timeout -= 10;
} while (timeout > 1); } while (timeout > 1);
pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return -ETIME; return -ETIME;
} }
...@@ -163,18 +162,45 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, ...@@ -163,18 +162,45 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
enum idh_request req) enum idh_request req)
{ {
int r; int r;
enum idh_event event = -1;
xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0); xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
/* start to check msg if request is idh_req_gpu_init_access */ switch (req) {
if (req == IDH_REQ_GPU_INIT_ACCESS || case IDH_REQ_GPU_INIT_ACCESS:
req == IDH_REQ_GPU_FINI_ACCESS || case IDH_REQ_GPU_FINI_ACCESS:
req == IDH_REQ_GPU_RESET_ACCESS) { case IDH_REQ_GPU_RESET_ACCESS:
r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); event = IDH_READY_TO_ACCESS_GPU;
break;
case IDH_REQ_GPU_INIT_DATA:
event = IDH_REQ_GPU_INIT_DATA_READY;
break;
default:
break;
}
if (event != -1) {
r = xgpu_nv_poll_msg(adev, event);
if (r) { if (r) {
pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); if (req != IDH_REQ_GPU_INIT_DATA) {
return r; pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return r;
}
else /* host doesn't support REQ_GPU_INIT_DATA handshake */
adev->virt.req_init_data_ver = 0;
} else {
if (req == IDH_REQ_GPU_INIT_DATA)
{
adev->virt.req_init_data_ver =
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW1));
/* assume V1 in case host doesn't set version number */
if (adev->virt.req_init_data_ver < 1)
adev->virt.req_init_data_ver = 1;
}
} }
/* Retrieve checksum from mailbox2 */ /* Retrieve checksum from mailbox2 */
if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key = adev->virt.fw_reserve.checksum_key =
...@@ -212,6 +238,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, ...@@ -212,6 +238,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
return r; return r;
} }
static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
{
return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
...@@ -377,6 +408,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) ...@@ -377,6 +408,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
const struct amdgpu_virt_ops xgpu_nv_virt_ops = { const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access, .req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access,
.req_init_data = xgpu_nv_request_init_data,
.reset_gpu = xgpu_nv_request_reset, .reset_gpu = xgpu_nv_request_reset,
.wait_reset = NULL, .wait_reset = NULL,
.trans_msg = xgpu_nv_mailbox_trans_msg, .trans_msg = xgpu_nv_mailbox_trans_msg,
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define __MXGPU_NV_H__ #define __MXGPU_NV_H__
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 #define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000 #define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500 #define NV_MAILBOX_POLL_FLR_TIMEDOUT 500
enum idh_request { enum idh_request {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment