Commit 6127896f authored by Huang Rui's avatar Huang Rui Committed by Alex Deucher

drm/amdkfd: implement the dGPU fallback path for apu (v6)

We still have a few iommu issues which need to address, so force raven
as "dgpu" path for the moment.

This is to add the fallback path to bypass IOMMU if IOMMU v2 is disabled
or ACPI CRAT table not correct.

v2: Use ignore_crat parameter to decide whether it will go with IOMMUv2.
v3: Align with existed thunk, don't change the way of raven, only renoir
    will use "dgpu" path by default.
v4: don't update global ignore_crat in the driver, and revise fallback
    function if CRAT is broken.
v5: refine acpi crat good but no iommu support case, and rename the
    title.
v6: fix the issue of dGPU initialized firstly, just modify the report
    value in the node_show().
Signed-off-by: default avatarHuang Rui <ray.huang@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent bfcc0c16
...@@ -680,11 +680,14 @@ MODULE_PARM_DESC(debug_largebar, ...@@ -680,11 +680,14 @@ MODULE_PARM_DESC(debug_largebar,
* Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT
* table to get information about AMD APUs. This option can serve as a workaround on * table to get information about AMD APUs. This option can serve as a workaround on
* systems with a broken CRAT table. * systems with a broken CRAT table.
*
* Default is auto (according to asic type, iommu_v2, and crat table, to decide
* whehter use CRAT)
*/ */
int ignore_crat; int ignore_crat;
module_param(ignore_crat, int, 0444); module_param(ignore_crat, int, 0444);
MODULE_PARM_DESC(ignore_crat, MODULE_PARM_DESC(ignore_crat,
"Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)"); "Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)");
/** /**
* DOC: halt_if_hws_hang (int) * DOC: halt_if_hws_hang (int)
......
...@@ -1254,7 +1254,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev) ...@@ -1254,7 +1254,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
return true; return true;
} }
if (dev->device_info->needs_iommu_device) if (dev->use_iommu_v2)
return false; return false;
amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info); amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
......
...@@ -742,6 +742,22 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, ...@@ -742,6 +742,22 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
return 0; return 0;
} }
static bool kfd_ignore_crat(void)
{
bool ret;
if (ignore_crat)
return true;
#ifndef KFD_SUPPORT_IOMMU_V2
ret = true;
#else
ret = false;
#endif
return ret;
}
/* /*
* kfd_create_crat_image_acpi - Allocates memory for CRAT image and * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
* copies CRAT from ACPI (if available). * copies CRAT from ACPI (if available).
...@@ -776,7 +792,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) ...@@ -776,7 +792,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
return -EINVAL; return -EINVAL;
} }
if (ignore_crat) { if (kfd_ignore_crat()) {
pr_info("CRAT table disabled by module option\n"); pr_info("CRAT table disabled by module option\n");
return -ENODATA; return -ENODATA;
} }
......
...@@ -116,6 +116,7 @@ static const struct kfd_device_info carrizo_device_info = { ...@@ -116,6 +116,7 @@ static const struct kfd_device_info carrizo_device_info = {
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2, .num_sdma_queues_per_engine = 2,
}; };
#endif
static const struct kfd_device_info raven_device_info = { static const struct kfd_device_info raven_device_info = {
.asic_family = CHIP_RAVEN, .asic_family = CHIP_RAVEN,
...@@ -134,7 +135,6 @@ static const struct kfd_device_info raven_device_info = { ...@@ -134,7 +135,6 @@ static const struct kfd_device_info raven_device_info = {
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2, .num_sdma_queues_per_engine = 2,
}; };
#endif
static const struct kfd_device_info hawaii_device_info = { static const struct kfd_device_info hawaii_device_info = {
.asic_family = CHIP_HAWAII, .asic_family = CHIP_HAWAII,
...@@ -738,6 +738,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -738,6 +738,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto gws_error; goto gws_error;
} }
/* If CRAT is broken, won't set iommu enabled */
kfd_double_confirm_iommu_support(kfd);
if (kfd_iommu_device_init(kfd)) { if (kfd_iommu_device_init(kfd)) {
dev_err(kfd_device, "Error initializing iommuv2\n"); dev_err(kfd_device, "Error initializing iommuv2\n");
goto device_iommu_error; goto device_iommu_error;
......
...@@ -62,7 +62,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm, ...@@ -62,7 +62,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
if (amdgpu_noretry && if (amdgpu_noretry &&
!dqm->dev->device_info->needs_iommu_device) !dqm->dev->use_iommu_v2)
qpd->sh_mem_config |= qpd->sh_mem_config |=
1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
......
...@@ -321,7 +321,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) ...@@ -321,7 +321,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_VI(); pdd->lds_base = MAKE_LDS_APP_BASE_VI();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
if (!pdd->dev->device_info->needs_iommu_device) { if (!pdd->dev->use_iommu_v2) {
/* dGPUs: SVM aperture starting at 0 /* dGPUs: SVM aperture starting at 0
* with small reserved space for kernel. * with small reserved space for kernel.
* Set them to CANONICAL addresses. * Set them to CANONICAL addresses.
...@@ -425,7 +425,7 @@ int kfd_init_apertures(struct kfd_process *process) ...@@ -425,7 +425,7 @@ int kfd_init_apertures(struct kfd_process *process)
return -EINVAL; return -EINVAL;
} }
if (!dev->device_info->needs_iommu_device) { if (!dev->use_iommu_v2) {
/* dGPUs: the reserved space for kernel /* dGPUs: the reserved space for kernel
* before SVM * before SVM
*/ */
......
...@@ -41,7 +41,7 @@ int kfd_iommu_check_device(struct kfd_dev *kfd) ...@@ -41,7 +41,7 @@ int kfd_iommu_check_device(struct kfd_dev *kfd)
struct amd_iommu_device_info iommu_info; struct amd_iommu_device_info iommu_info;
int err; int err;
if (!kfd->device_info->needs_iommu_device) if (!kfd->use_iommu_v2)
return -ENODEV; return -ENODEV;
iommu_info.flags = 0; iommu_info.flags = 0;
...@@ -63,7 +63,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) ...@@ -63,7 +63,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
unsigned int pasid_limit; unsigned int pasid_limit;
int err; int err;
if (!kfd->device_info->needs_iommu_device) if (!kfd->use_iommu_v2)
return 0; return 0;
iommu_info.flags = 0; iommu_info.flags = 0;
...@@ -109,7 +109,7 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) ...@@ -109,7 +109,7 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
struct kfd_process *p = pdd->process; struct kfd_process *p = pdd->process;
int err; int err;
if (!dev->device_info->needs_iommu_device || pdd->bound == PDD_BOUND) if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND)
return 0; return 0;
if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) { if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
...@@ -284,7 +284,7 @@ static void kfd_unbind_processes_from_device(struct kfd_dev *kfd) ...@@ -284,7 +284,7 @@ static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
*/ */
void kfd_iommu_suspend(struct kfd_dev *kfd) void kfd_iommu_suspend(struct kfd_dev *kfd)
{ {
if (!kfd->device_info->needs_iommu_device) if (!kfd->use_iommu_v2)
return; return;
kfd_unbind_processes_from_device(kfd); kfd_unbind_processes_from_device(kfd);
...@@ -304,7 +304,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd) ...@@ -304,7 +304,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd)
unsigned int pasid_limit; unsigned int pasid_limit;
int err; int err;
if (!kfd->device_info->needs_iommu_device) if (!kfd->use_iommu_v2)
return 0; return 0;
pasid_limit = kfd_get_pasid_limit(); pasid_limit = kfd_get_pasid_limit();
......
...@@ -297,6 +297,9 @@ struct kfd_dev { ...@@ -297,6 +297,9 @@ struct kfd_dev {
bool pci_atomic_requested; bool pci_atomic_requested;
/* Use IOMMU v2 flag */
bool use_iommu_v2;
/* SRAM ECC flag */ /* SRAM ECC flag */
atomic_t sram_ecc_flag; atomic_t sram_ecc_flag;
...@@ -892,6 +895,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); ...@@ -892,6 +895,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd); struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id); int kfd_numa_node_to_apic_id(int numa_node_id);
void kfd_double_confirm_iommu_support(struct kfd_dev *gpu);
/* Interrupts */ /* Interrupts */
int kfd_interrupt_init(struct kfd_dev *dev); int kfd_interrupt_init(struct kfd_dev *dev);
......
...@@ -446,7 +446,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, ...@@ -446,7 +446,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count", sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
dev->node_props.cpu_cores_count); dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, offs, "simd_count", sysfs_show_32bit_prop(buffer, offs, "simd_count",
dev->node_props.simd_count); dev->gpu ? dev->node_props.simd_count : 0);
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count", sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
dev->node_props.mem_banks_count); dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, offs, "caches_count", sysfs_show_32bit_prop(buffer, offs, "caches_count",
...@@ -1139,7 +1139,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) ...@@ -1139,7 +1139,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
/* Discrete GPUs need their own topology device list /* Discrete GPUs need their own topology device list
* entries. Don't assign them to CPU/APU nodes. * entries. Don't assign them to CPU/APU nodes.
*/ */
if (!gpu->device_info->needs_iommu_device && if (!gpu->use_iommu_v2 &&
dev->node_props.cpu_cores_count) dev->node_props.cpu_cores_count)
continue; continue;
...@@ -1388,7 +1388,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) ...@@ -1388,7 +1388,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* Overwrite ATS capability according to needs_iommu_device to fix * Overwrite ATS capability according to needs_iommu_device to fix
* potential missing corresponding bit in CRAT of BIOS. * potential missing corresponding bit in CRAT of BIOS.
*/ */
if (dev->gpu->device_info->needs_iommu_device) if (dev->gpu->use_iommu_v2)
dev->node_props.capability |= HSA_CAP_ATS_PRESENT; dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
else else
dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
...@@ -1515,6 +1515,29 @@ int kfd_numa_node_to_apic_id(int numa_node_id) ...@@ -1515,6 +1515,29 @@ int kfd_numa_node_to_apic_id(int numa_node_id)
return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id)); return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id));
} }
void kfd_double_confirm_iommu_support(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev;
gpu->use_iommu_v2 = false;
if (!gpu->device_info->needs_iommu_device)
return;
down_read(&topology_lock);
/* Only use IOMMUv2 if there is an APU topology node with no GPU
* assigned yet. This GPU will be assigned to it.
*/
list_for_each_entry(dev, &topology_device_list, list)
if (dev->node_props.cpu_cores_count &&
dev->node_props.simd_count &&
!dev->gpu)
gpu->use_iommu_v2 = true;
up_read(&topology_lock);
}
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data) int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment