Commit 3740b081 authored by Robin Murphy's avatar Robin Murphy Committed by Steven Price

drm/panfrost: Update io-pgtable API

Convert to io-pgtable's bulk {map,unmap}_pages() APIs, to help the old
single-page interfaces eventually go away. Unmapping heap BOs still
wants to be done a page at a time, but everything else can get the full
benefit of the more efficient interface.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Acked-by: default avatarAlyssa Rosenzweig <alyssa@collabora.com>
Reviewed-by: default avatarSteven Price <steven.price@arm.com>
Signed-off-by: default avatarSteven Price <steven.price@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/daef7f8c134d989c55636a5790d8c0fcaca1bae3.1661205687.git.robin.murphy@arm.com
parent e6545831
...@@ -248,11 +248,15 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev) ...@@ -248,11 +248,15 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
mmu_write(pfdev, MMU_INT_MASK, ~0); mmu_write(pfdev, MMU_INT_MASK, ~0);
} }
static size_t get_pgsize(u64 addr, size_t size) static size_t get_pgsize(u64 addr, size_t size, size_t *count)
{ {
if (addr & (SZ_2M - 1) || size < SZ_2M) size_t blk_offset = -addr % SZ_2M;
return SZ_4K;
if (blk_offset || size < SZ_2M) {
*count = min_not_zero(blk_offset, size) / SZ_4K;
return SZ_4K;
}
*count = size / SZ_2M;
return SZ_2M; return SZ_2M;
} }
...@@ -287,12 +291,16 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, ...@@ -287,12 +291,16 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len); dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
while (len) { while (len) {
size_t pgsize = get_pgsize(iova | paddr, len); size_t pgcount, mapped = 0;
size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
iova += pgsize; ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
paddr += pgsize; GFP_KERNEL, &mapped);
len -= pgsize; /* Don't get stuck if things have gone wrong */
mapped = max(mapped, pgsize);
iova += mapped;
paddr += mapped;
len -= mapped;
} }
} }
...@@ -344,15 +352,17 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) ...@@ -344,15 +352,17 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
mapping->mmu->as, iova, len); mapping->mmu->as, iova, len);
while (unmapped_len < len) { while (unmapped_len < len) {
size_t unmapped_page; size_t unmapped_page, pgcount;
size_t pgsize = get_pgsize(iova, len - unmapped_len); size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
if (ops->iova_to_phys(ops, iova)) { if (bo->is_heap)
unmapped_page = ops->unmap(ops, iova, pgsize, NULL); pgcount = 1;
WARN_ON(unmapped_page != pgsize); if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
WARN_ON(unmapped_page != pgsize * pgcount);
} }
iova += pgsize; iova += pgsize * pgcount;
unmapped_len += pgsize; unmapped_len += pgsize * pgcount;
} }
panfrost_mmu_flush_range(pfdev, mapping->mmu, panfrost_mmu_flush_range(pfdev, mapping->mmu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment