Commit c1299461 authored by Wenhui Sheng's avatar Wenhui Sheng Committed by Alex Deucher

drm/amdgpu: request init data in virt detection

Move request init data to virt detection func, so we
can insert request full access between request init data
and set ip blocks.
Signed-off-by: default avatarWenhui Sheng <Wenhui.Sheng@amd.com>
Reviewed-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 81659b20
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
#include "vi.h"
#include "soc15.h"
#include "nv.h"
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{ {
...@@ -513,6 +516,31 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) ...@@ -513,6 +516,31 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
} }
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_FIJI:
vi_set_virt_ops(adev);
break;
case CHIP_VEGA10:
case CHIP_VEGA20:
case CHIP_ARCTURUS:
soc15_set_virt_ops(adev);
break;
case CHIP_NAVI10:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
nv_set_virt_ops(adev);
/* try send GPU_INIT_DATA request to host */
amdgpu_virt_request_init_data(adev);
break;
default: /* other chip doesn't support SRIOV */
DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
break;
}
}
} }
static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev) static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
......
...@@ -420,6 +420,11 @@ static int nv_reg_base_init(struct amdgpu_device *adev) ...@@ -420,6 +420,11 @@ static int nv_reg_base_init(struct amdgpu_device *adev)
return 0; return 0;
} }
void nv_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_nv_virt_ops;
}
int nv_set_ip_blocks(struct amdgpu_device *adev) int nv_set_ip_blocks(struct amdgpu_device *adev)
{ {
int r; int r;
...@@ -427,12 +432,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) ...@@ -427,12 +432,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v2_3_funcs; adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
if (amdgpu_sriov_vf(adev)) {
adev->virt.ops = &xgpu_nv_virt_ops;
/* try send GPU_INIT_DATA request to host */
amdgpu_virt_request_init_data(adev);
}
/* Set IP register base before any HW register access */ /* Set IP register base before any HW register access */
r = nv_reg_base_init(adev); r = nv_reg_base_init(adev);
if (r) if (r)
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
void nv_grbm_select(struct amdgpu_device *adev, void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid);
void nv_set_virt_ops(struct amdgpu_device *adev);
int nv_set_ip_blocks(struct amdgpu_device *adev); int nv_set_ip_blocks(struct amdgpu_device *adev);
int navi10_reg_base_init(struct amdgpu_device *adev); int navi10_reg_base_init(struct amdgpu_device *adev);
int navi14_reg_base_init(struct amdgpu_device *adev); int navi14_reg_base_init(struct amdgpu_device *adev);
......
...@@ -669,6 +669,11 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) ...@@ -669,6 +669,11 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
return adev->nbio.funcs->get_rev_id(adev); return adev->nbio.funcs->get_rev_id(adev);
} }
void soc15_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_ai_virt_ops;
}
int soc15_set_ip_blocks(struct amdgpu_device *adev) int soc15_set_ip_blocks(struct amdgpu_device *adev)
{ {
int r; int r;
...@@ -722,9 +727,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) ...@@ -722,9 +727,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->rev_id = soc15_get_rev_id(adev); adev->rev_id = soc15_get_rev_id(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
......
...@@ -90,6 +90,7 @@ struct soc15_ras_field_entry { ...@@ -90,6 +90,7 @@ struct soc15_ras_field_entry {
void soc15_grbm_select(struct amdgpu_device *adev, void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid);
void soc15_set_virt_ops(struct amdgpu_device *adev);
int soc15_set_ip_blocks(struct amdgpu_device *adev); int soc15_set_ip_blocks(struct amdgpu_device *adev);
void soc15_program_register_sequence(struct amdgpu_device *adev, void soc15_program_register_sequence(struct amdgpu_device *adev,
......
...@@ -1705,11 +1705,13 @@ static const struct amdgpu_ip_block_version vi_common_ip_block = ...@@ -1705,11 +1705,13 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
.funcs = &vi_common_ip_funcs, .funcs = &vi_common_ip_funcs,
}; };
int vi_set_ip_blocks(struct amdgpu_device *adev) void vi_set_virt_ops(struct amdgpu_device *adev)
{ {
if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_vi_virt_ops;
adev->virt.ops = &xgpu_vi_virt_ops; }
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_TOPAZ: case CHIP_TOPAZ:
/* topaz has no DCE, UVD, VCE */ /* topaz has no DCE, UVD, VCE */
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
void vi_srbm_select(struct amdgpu_device *adev, void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid);
void vi_set_virt_ops(struct amdgpu_device *adev);
int vi_set_ip_blocks(struct amdgpu_device *adev); int vi_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment