Commit 329d8d3b authored by Laurent Pinchart's avatar Laurent Pinchart Committed by Joerg Roedel

iommu/omap-iovmm: support non page-aligned buffers in iommu_vmap

omap_iovmm requires page-aligned buffers, and that sometimes causes
omap3isp failures (i.e. whenever the buffer passed from userspace is not
page-aligned).

Remove this limitation by rounding the address of the first page entry
down, and adding the offset back to the device address.
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Acked-by: default avatarHiroshi DOYU <Hiroshi.DOYU@nokia.com>
[ohad@wizery.com: rebased, but tested only with aligned buffers]
[ohad@wizery.com: slightly edited the commit log]
Signed-off-by: default avatarOhad Ben-Cohen <ohad@wizery.com>
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 024ae884
...@@ -27,6 +27,15 @@ ...@@ -27,6 +27,15 @@
static struct kmem_cache *iovm_area_cachep; static struct kmem_cache *iovm_area_cachep;
/* return the offset of the first scatterlist entry in a sg table */
static unsigned int sgtable_offset(const struct sg_table *sgt)
{
if (!sgt || !sgt->nents)
return 0;
return sgt->sgl->offset;
}
/* return total bytes of sg buffers */ /* return total bytes of sg buffers */
static size_t sgtable_len(const struct sg_table *sgt) static size_t sgtable_len(const struct sg_table *sgt)
{ {
...@@ -39,11 +48,17 @@ static size_t sgtable_len(const struct sg_table *sgt) ...@@ -39,11 +48,17 @@ static size_t sgtable_len(const struct sg_table *sgt)
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes; size_t bytes;
bytes = sg->length; bytes = sg->length + sg->offset;
if (!iopgsz_ok(bytes)) { if (!iopgsz_ok(bytes)) {
pr_err("%s: sg[%d] not iommu pagesize(%x)\n", pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
__func__, i, bytes); __func__, i, bytes, sg->offset);
return 0;
}
if (i && sg->offset) {
pr_err("%s: sg[%d] offset not allowed in internal "
"entries\n", __func__, i);
return 0; return 0;
} }
...@@ -164,8 +179,8 @@ static void *vmap_sg(const struct sg_table *sgt) ...@@ -164,8 +179,8 @@ static void *vmap_sg(const struct sg_table *sgt)
u32 pa; u32 pa;
int err; int err;
pa = sg_phys(sg); pa = sg_phys(sg) - sg->offset;
bytes = sg->length; bytes = sg->length + sg->offset;
BUG_ON(bytes != PAGE_SIZE); BUG_ON(bytes != PAGE_SIZE);
...@@ -405,8 +420,8 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, ...@@ -405,8 +420,8 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
u32 pa; u32 pa;
size_t bytes; size_t bytes;
pa = sg_phys(sg); pa = sg_phys(sg) - sg->offset;
bytes = sg->length; bytes = sg->length + sg->offset;
flags &= ~IOVMF_PGSZ_MASK; flags &= ~IOVMF_PGSZ_MASK;
...@@ -432,7 +447,7 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, ...@@ -432,7 +447,7 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
for_each_sg(sgt->sgl, sg, i, j) { for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes; size_t bytes;
bytes = sg->length; bytes = sg->length + sg->offset;
order = get_order(bytes); order = get_order(bytes);
/* ignore failures.. we're already handling one */ /* ignore failures.. we're already handling one */
...@@ -461,7 +476,7 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, ...@@ -461,7 +476,7 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
size_t bytes; size_t bytes;
int order; int order;
bytes = sg->length; bytes = sg->length + sg->offset;
order = get_order(bytes); order = get_order(bytes);
err = iommu_unmap(domain, start, order); err = iommu_unmap(domain, start, order);
...@@ -600,7 +615,7 @@ u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, ...@@ -600,7 +615,7 @@ u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
if (IS_ERR_VALUE(da)) if (IS_ERR_VALUE(da))
vunmap_sg(va); vunmap_sg(va);
return da; return da + sgtable_offset(sgt);
} }
EXPORT_SYMBOL_GPL(omap_iommu_vmap); EXPORT_SYMBOL_GPL(omap_iommu_vmap);
...@@ -620,6 +635,7 @@ omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da) ...@@ -620,6 +635,7 @@ omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
* Just returns 'sgt' to the caller to free * Just returns 'sgt' to the caller to free
*/ */
da &= PAGE_MASK;
sgt = unmap_vm_area(domain, obj, da, vunmap_sg, sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
IOVMF_DISCONT | IOVMF_MMIO); IOVMF_DISCONT | IOVMF_MMIO);
if (!sgt) if (!sgt)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment