Commit 244511f3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Alex Deucher

drm/amdgpu: simplify and cleanup setting the dma mask

Use dma_set_mask_and_coherent to set both masks in one go, and remove
the no longer required fallback, as the kernel now always accepts
larger than required DMA masks.  Fail the driver probe if we can't
set the DMA mask, as that means the system can only support a larger
mask.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 03127c58
...@@ -789,7 +789,6 @@ struct amdgpu_device { ...@@ -789,7 +789,6 @@ struct amdgpu_device {
int usec_timeout; int usec_timeout;
const struct amdgpu_asic_funcs *asic_funcs; const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown; bool shutdown;
bool need_dma32;
bool need_swiotlb; bool need_swiotlb;
bool accel_working; bool accel_working;
struct notifier_block acpi_nb; struct notifier_block acpi_nb;
......
...@@ -592,7 +592,6 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -592,7 +592,6 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v10_0_sw_init(void *handle) static int gmc_v10_0_sw_init(void *handle)
{ {
int r; int r;
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v2_0_init(adev); gfxhub_v2_0_init(adev);
...@@ -642,26 +641,10 @@ static int gmc_v10_0_sw_init(void *handle) ...@@ -642,26 +641,10 @@ static int gmc_v10_0_sw_init(void *handle)
else else
adev->gmc.stolen_size = 9 * 1024 *1024; adev->gmc.stolen_size = 9 * 1024 *1024;
/* r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
* Set DMA mask + need_dma32 flags.
* PCIE - can handle 44-bits.
* IGP - can handle 44-bits
* PCI - dma32 for legacy pci gart, 44 bits on navi10
*/
adev->need_dma32 = false;
dma_bits = adev->need_dma32 ? 32 : 44;
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
if (r) { if (r) {
adev->need_dma32 = true;
dma_bits = 32;
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
} return r;
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
} }
r = gmc_v10_0_mc_init(adev); r = gmc_v10_0_mc_init(adev);
......
...@@ -839,7 +839,6 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -839,7 +839,6 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v6_0_sw_init(void *handle) static int gmc_v6_0_sw_init(void *handle)
{ {
int r; int r;
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
...@@ -862,20 +861,12 @@ static int gmc_v6_0_sw_init(void *handle) ...@@ -862,20 +861,12 @@ static int gmc_v6_0_sw_init(void *handle)
adev->gmc.mc_mask = 0xffffffffffULL; adev->gmc.mc_mask = 0xffffffffffULL;
adev->need_dma32 = false; r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
dma_bits = adev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
if (r) { if (r) {
adev->need_dma32 = true;
dma_bits = 32;
dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
return r;
} }
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); adev->need_swiotlb = drm_need_swiotlb(44);
if (r) {
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
}
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v6_0_init_microcode(adev); r = gmc_v6_0_init_microcode(adev);
if (r) { if (r) {
......
...@@ -959,7 +959,6 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -959,7 +959,6 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v7_0_sw_init(void *handle) static int gmc_v7_0_sw_init(void *handle)
{ {
int r; int r;
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
...@@ -990,25 +989,12 @@ static int gmc_v7_0_sw_init(void *handle) ...@@ -990,25 +989,12 @@ static int gmc_v7_0_sw_init(void *handle)
*/ */
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
/* set DMA mask + need_dma32 flags. r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
*/
adev->need_dma32 = false;
dma_bits = adev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
if (r) { if (r) {
adev->need_dma32 = true;
dma_bits = 32;
pr_warn("amdgpu: No suitable DMA available\n"); pr_warn("amdgpu: No suitable DMA available\n");
return r;
} }
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); adev->need_swiotlb = drm_need_swiotlb(40);
if (r) {
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v7_0_init_microcode(adev); r = gmc_v7_0_init_microcode(adev);
if (r) { if (r) {
......
...@@ -1079,7 +1079,6 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -1079,7 +1079,6 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v8_0_sw_init(void *handle) static int gmc_v8_0_sw_init(void *handle)
{ {
int r; int r;
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
...@@ -1116,25 +1115,12 @@ static int gmc_v8_0_sw_init(void *handle) ...@@ -1116,25 +1115,12 @@ static int gmc_v8_0_sw_init(void *handle)
*/ */
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
/* set DMA mask + need_dma32 flags. r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
*/
adev->need_dma32 = false;
dma_bits = adev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
if (r) { if (r) {
adev->need_dma32 = true;
dma_bits = 32;
pr_warn("amdgpu: No suitable DMA available\n"); pr_warn("amdgpu: No suitable DMA available\n");
return r;
} }
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); adev->need_swiotlb = drm_need_swiotlb(40);
if (r) {
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v8_0_init_microcode(adev); r = gmc_v8_0_init_microcode(adev);
if (r) { if (r) {
......
...@@ -1090,7 +1090,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -1090,7 +1090,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v9_0_sw_init(void *handle) static int gmc_v9_0_sw_init(void *handle)
{ {
int r; int r;
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v1_0_init(adev); gfxhub_v1_0_init(adev);
...@@ -1174,25 +1173,12 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -1174,25 +1173,12 @@ static int gmc_v9_0_sw_init(void *handle)
*/ */
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
/* set DMA mask + need_dma32 flags. r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
* PCIE - can handle 44-bits.
* IGP - can handle 44-bits
* PCI - dma32 for legacy pci gart, 44 bits on vega10
*/
adev->need_dma32 = false;
dma_bits = adev->need_dma32 ? 32 : 44;
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
if (r) { if (r) {
adev->need_dma32 = true;
dma_bits = 32;
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
return r;
} }
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); adev->need_swiotlb = drm_need_swiotlb(44);
if (r) {
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
}
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
if (adev->gmc.xgmi.supported) { if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev); r = gfxhub_v1_1_get_xgmi_info(adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment