Commit 9a40401c authored by Jason Gunthorpe's avatar Jason Gunthorpe

lib/scatterlist: Do not limit max_segment to PAGE_ALIGNED values

The main intention of the max_segment argument to
__sg_alloc_table_from_pages() is to match the DMA layer segment size set
by dma_set_max_seg_size().

Restricting the input to be page aligned makes it impossible to just
connect the DMA layer to this API.

The only reason for a page alignment here is because the algorithm will
overshoot the max_segment if it is not a multiple of PAGE_SIZE. Simply fix
the alignment before starting and don't expose this implementation detail
to the callers.

A future patch will completely remove SCATTERLIST_MAX_SEGMENT.
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 16e7483e
...@@ -404,7 +404,7 @@ static struct scatterlist *get_next_sg(struct sg_table *table, ...@@ -404,7 +404,7 @@ static struct scatterlist *get_next_sg(struct sg_table *table,
* @n_pages: Number of pages in the pages array * @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer * @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset) * @size: Number of valid bytes in the buffer (after offset)
* @max_segment: Maximum size of a scatterlist node in bytes (page aligned) * @max_segment: Maximum size of a scatterlist element in bytes
* @prv: Last populated sge in sgt * @prv: Last populated sge in sgt
* @left_pages: Left pages caller have to set after this call * @left_pages: Left pages caller have to set after this call
* @gfp_mask: GFP allocation mask * @gfp_mask: GFP allocation mask
...@@ -435,7 +435,12 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, ...@@ -435,7 +435,12 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned int added_nents = 0; unsigned int added_nents = 0;
struct scatterlist *s = prv; struct scatterlist *s = prv;
if (WARN_ON(!max_segment || offset_in_page(max_segment))) /*
* The algorithm below requires max_segment to be aligned to PAGE_SIZE
* otherwise it can overshoot.
*/
max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
if (WARN_ON(max_segment < PAGE_SIZE))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv) if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv)
...@@ -542,8 +547,7 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, ...@@ -542,8 +547,7 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
unsigned long size, gfp_t gfp_mask) unsigned long size, gfp_t gfp_mask)
{ {
return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages, return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
offset, size, SCATTERLIST_MAX_SEGMENT, offset, size, UINT_MAX, NULL, 0, gfp_mask));
NULL, 0, gfp_mask));
} }
EXPORT_SYMBOL(sg_alloc_table_from_pages); EXPORT_SYMBOL(sg_alloc_table_from_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment