Commit 1b1d3710 authored by Niranjana Vishwanathapura's avatar Niranjana Vishwanathapura Committed by Rodrigo Vivi

drm/xe: Apply upper limit to sg element size

The iommu_dma_map_sg() function ensures iova allocation doesn't
cross dma segment boundary. It does so by padding some sg elements.
This can cause overflow, ending up with sg->length being set to 0.
Avoid this by halving the maximum segment size (rounded down to
PAGE_SIZE).

Specify maximum segment size for sg elements by using
sg_alloc_table_from_pages_segment() to allocate sg_table.

v2: Use correct max segment size in dma_set_max_seg_size() call
Signed-off-by: default avatarNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Reviewed-by: default avatarBruce Chang <yu.bruce.chang@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 6ed6ba32
......@@ -251,9 +251,11 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
if (xe_tt->sg)
return 0;
ret = sg_alloc_table_from_pages(&xe_tt->sgt, tt->pages, num_pages,
0, (u64)num_pages << PAGE_SHIFT,
GFP_KERNEL);
ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
num_pages, 0,
(u64)num_pages << PAGE_SHIFT,
xe_sg_segment_size(xe_tt->dev),
GFP_KERNEL);
if (ret)
return ret;
......
......@@ -296,6 +296,30 @@ void xe_bo_put_commit(struct llist_head *deferred);
struct sg_table *xe_bo_get_sg(struct xe_bo *bo);
/*
* xe_sg_segment_size() - Provides upper limit for sg segment size.
* @dev: device pointer
*
* Returns the maximum segment size for the 'struct scatterlist'
* elements.
*/
static inline unsigned int xe_sg_segment_size(struct device *dev)
{
struct scatterlist __maybe_unused sg;
size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
max = min_t(size_t, max, dma_max_mapping_size(dev));
/*
* The iommu_dma_map_sg() function ensures iova allocation doesn't
* cross dma segment boundary. It does so by padding some sg elements.
* This can cause overflow, ending up with sg->length being set to 0.
* Avoid this by ensuring maximum segment size is half of 'max'
* rounded down to PAGE_SIZE.
*/
return round_down(max / 2, PAGE_SIZE);
}
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
/**
* xe_bo_is_mem_type - Whether the bo currently resides in the given
......
......@@ -11,6 +11,7 @@
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_mcr.h"
......@@ -26,11 +27,7 @@ static int xe_set_dma_info(struct xe_device *xe)
unsigned int mask_size = xe->info.dma_mask_size;
int err;
/*
* We don't have a max segment size, so set it to the max so sg's
* debugging layer doesn't complain
*/
dma_set_max_seg_size(xe->drm.dev, UINT_MAX);
dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
if (err)
......
......@@ -117,9 +117,11 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
if (ret)
goto out;
ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned,
0, (u64)pinned << PAGE_SHIFT,
GFP_KERNEL);
ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
pinned, 0,
(u64)pinned << PAGE_SHIFT,
xe_sg_segment_size(xe->drm.dev),
GFP_KERNEL);
if (ret) {
vma->userptr.sg = NULL;
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment