Commit 2eb412aa authored by Arunpravin Paneer Selvam's avatar Arunpravin Paneer Selvam Committed by Christian König

drm/amdgpu: Move the size computations to drm buddy

- Move roundup_power_of_two() and IS_ALIGNED() computations to
  drm buddy file to support the new try harder mechanism for
  contiguous allocation.

- Move trim function call to drm_buddy_alloc_blocks() function.
Signed-off-by: default avatarArunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230909160902.15644-2-Arunpravin.PaneerSelvam@amd.comReviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
parent 0a1844bf
......@@ -424,9 +424,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_resource **res)
{
u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
u64 vis_usage = 0, max_bytes, min_block_size;
struct amdgpu_vram_mgr_resource *vres;
u64 size, remaining_size, lpfn, fpfn;
struct drm_buddy *mm = &mgr->mm;
......@@ -474,6 +474,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
if (fpfn || lpfn != mgr->mm.size)
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
......@@ -496,25 +499,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
cur_size = size;
if (fpfn + size != (u64)place->lpfn << PAGE_SHIFT) {
/*
* Except for actual range allocation, modify the size and
* min_block_size conforming to continuous flag enablement
*/
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
size = roundup_pow_of_two(size);
min_block_size = size;
/*
* Modify the size value if size is not
* aligned with min_block_size
*/
} else if (!IS_ALIGNED(size, min_block_size)) {
size = round_up(size, min_block_size);
}
}
r = drm_buddy_alloc_blocks(mm, fpfn,
lpfn,
size,
......@@ -531,40 +515,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
}
mutex_unlock(&mgr->lock);
if (cur_size != size) {
struct drm_buddy_block *block;
struct list_head *trim_list;
u64 original_size;
LIST_HEAD(temp);
trim_list = &vres->blocks;
original_size = (u64)vres->base.size;
/*
* If size value is rounded up to min_block_size, trim the last
* block to the required size
*/
if (!list_is_singular(&vres->blocks)) {
block = list_last_entry(&vres->blocks, typeof(*block), link);
list_move_tail(&block->link, &temp);
trim_list = &temp;
/*
* Compute the original_size value by subtracting the
* last block size with (aligned size - original size)
*/
original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
}
mutex_lock(&mgr->lock);
drm_buddy_block_trim(mm,
original_size,
trim_list);
mutex_unlock(&mgr->lock);
if (!list_empty(&temp))
list_splice_tail(trim_list, &vres->blocks);
}
vres->base.start = 0;
list_for_each_entry(block, &vres->blocks, link) {
unsigned long start;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment