Commit a5fb8b91 authored by Jordan Crouse's avatar Jordan Crouse Committed by Rob Clark

drm/msm/a6xx: Use the DMA API for GMU memory objects

The GMU has very few memory allocations and uses a flat memory space so
there is no good reason to go out of our way to bypass the DMA APIs which
were basically designed for this exact scenario.

v7: Check return value of dma_set_mask_and_coherent
v4: Use dma_alloc_wc()
v3: Set the dma mask correctly and use dma_addr_t for the iova type
v2: Pass force_dma false to of_dma_configure to require that the DMA
region be set up and return error from of_dma_configure to fail probe.
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent a168b512
......@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
......@@ -920,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
{
int count, i;
u64 iova;
if (IS_ERR_OR_NULL(bo))
return;
count = bo->size >> PAGE_SHIFT;
iova = bo->iova;
for (i = 0; i < count; i++, iova += PAGE_SIZE) {
iommu_unmap(gmu->domain, iova, PAGE_SIZE);
__free_pages(bo->pages[i], 0);
}
kfree(bo->pages);
dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
kfree(bo);
}
......@@ -942,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
size_t size)
{
struct a6xx_gmu_bo *bo;
int ret, count, i;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
......@@ -950,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
bo->size = PAGE_ALIGN(size);
count = bo->size >> PAGE_SHIFT;
bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
if (!bo->pages) {
if (!bo->virt) {
kfree(bo);
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < count; i++) {
bo->pages[i] = alloc_page(GFP_KERNEL);
if (!bo->pages[i])
goto err;
}
bo->iova = gmu->uncached_iova_base;
for (i = 0; i < count; i++) {
ret = iommu_map(gmu->domain,
bo->iova + (PAGE_SIZE * i),
page_to_phys(bo->pages[i]), PAGE_SIZE,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
for (i = i - 1 ; i >= 0; i--)
iommu_unmap(gmu->domain,
bo->iova + (PAGE_SIZE * i),
PAGE_SIZE);
goto err;
}
}
bo->virt = vmap(bo->pages, count, VM_IOREMAP,
pgprot_writecombine(PAGE_KERNEL));
if (!bo->virt)
goto err;
/* Align future IOVA addresses on 1MB boundaries */
gmu->uncached_iova_base += ALIGN(size, SZ_1M);
return bo;
err:
for (i = 0; i < count; i++) {
if (bo->pages[i])
__free_pages(bo->pages[i], 0);
}
kfree(bo->pages);
kfree(bo);
return ERR_PTR(-ENOMEM);
}
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
int ret;
/*
* The GMU address space is hardcoded to treat the range
* 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
* between the GMU and the CPU will live in this space
*/
gmu->uncached_iova_base = 0x60000000;
gmu->domain = iommu_domain_alloc(&platform_bus_type);
if (!gmu->domain)
return -ENODEV;
ret = iommu_attach_device(gmu->domain, gmu->dev);
if (ret) {
iommu_domain_free(gmu->domain);
gmu->domain = NULL;
}
return ret;
}
/* Return the 'arc-level' for the given frequency */
......@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_memory_free(gmu, gmu->hfi);
iommu_detach_device(gmu->domain, gmu->dev);
iommu_domain_free(gmu->domain);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
......@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);
/* Pass force_dma false to require the DT to set the dma region */
ret = of_dma_configure(gmu->dev, node, false);
if (ret)
return ret;
/* Set the mask after the of_dma_configure() */
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
if (ret)
return ret;
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
......@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
/* Set up the IOMMU context bank */
ret = a6xx_gmu_memory_probe(gmu);
if (ret)
goto err_put_device;
/* Allocate memory for for the HFI queues */
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->hfi))
......@@ -1375,11 +1291,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
err_memory:
a6xx_gmu_memory_free(gmu, gmu->hfi);
if (gmu->domain) {
iommu_detach_device(gmu->domain, gmu->dev);
iommu_domain_free(gmu->domain);
}
ret = -ENODEV;
err_put_device:
......
......@@ -12,8 +12,7 @@
struct a6xx_gmu_bo {
void *virt;
size_t size;
u64 iova;
struct page **pages;
dma_addr_t iova;
};
/*
......@@ -49,9 +48,6 @@ struct a6xx_gmu {
int hfi_irq;
int gmu_irq;
struct iommu_domain *domain;
u64 uncached_iova_base;
struct device *gxpd;
int idle_level;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment