Commit 1144e0df authored by Rodrigo Vivi's avatar Rodrigo Vivi

drm/xe: Introduce xe_ggtt_largest_hole

Introduce a new xe_ggtt_largest_hole helper that attends the SRIOV
demand and continue with the goal of limiting drm_mm access to xe_ggtt.

v2: Fix a typo (Michal)

Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240821193842.352557-8-rodrigo.vivi@intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 8b5ccc97
...@@ -584,6 +584,41 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) ...@@ -584,6 +584,41 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE); bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
} }
/**
* xe_ggtt_largest_hole - Largest GGTT hole
* @ggtt: the &xe_ggtt that will be inspected
* @alignment: minimum alignment
* @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
*
* Return: size of the largest continuous GGTT region
*/
u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
{
const struct drm_mm *mm = &ggtt->mm;
const struct drm_mm_node *entry;
u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
u64 hole_start, hole_end, hole_size;
u64 max_hole = 0;
mutex_lock(&ggtt->lock);
drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
hole_start = max(hole_start, hole_min_start);
hole_start = ALIGN(hole_start, alignment);
hole_end = ALIGN_DOWN(hole_end, alignment);
if (hole_start >= hole_end)
continue;
hole_size = hole_end - hole_start;
if (spare)
*spare -= min3(*spare, hole_size, max_hole);
max_hole = max(max_hole, hole_size);
}
mutex_unlock(&ggtt->lock);
return max_hole;
}
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
static u64 xe_encode_vfid_pte(u16 vfid) static u64 xe_encode_vfid_pte(u16 vfid)
{ {
......
...@@ -29,6 +29,7 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); ...@@ -29,6 +29,7 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end); u64 start, u64 end);
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare);
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p); int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
......
...@@ -590,30 +590,11 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid, ...@@ -590,30 +590,11 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
static u64 pf_get_max_ggtt(struct xe_gt *gt) static u64 pf_get_max_ggtt(struct xe_gt *gt)
{ {
struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
const struct drm_mm *mm = &ggtt->mm;
const struct drm_mm_node *entry;
u64 alignment = pf_get_ggtt_alignment(gt); u64 alignment = pf_get_ggtt_alignment(gt);
u64 spare = pf_get_spare_ggtt(gt); u64 spare = pf_get_spare_ggtt(gt);
u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt)); u64 max_hole;
u64 hole_start, hole_end, hole_size;
u64 max_hole = 0;
mutex_lock(&ggtt->lock);
drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
hole_start = max(hole_start, hole_min_start);
hole_start = ALIGN(hole_start, alignment);
hole_end = ALIGN_DOWN(hole_end, alignment);
if (hole_start >= hole_end)
continue;
hole_size = hole_end - hole_start;
xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n",
hole_start, hole_size / SZ_1K);
spare -= min3(spare, hole_size, max_hole);
max_hole = max(max_hole, hole_size);
}
mutex_unlock(&ggtt->lock); max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n", xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
max_hole / SZ_1K, spare / SZ_1K); max_hole / SZ_1K, spare / SZ_1K);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment