Commit 5640e816 authored by Arunpravin Paneer Selvam's avatar Arunpravin Paneer Selvam Committed by Christian König

drm: Optimize drm buddy top-down allocation method

We are observing performance drop in many usecases which include
games, 3D benchmark applications,etc.. To solve this problem, We
are strictly not allowing top down flag enabled allocations to
steal the memory space from cpu visible region.

The idea is, we are sorting each order list entries in
ascending order and compare the last entry of each order
list in the freelist and return the max block.

This patch improves the 3D benchmark scores and solves
fragmentation issues.

All drm buddy selftests are verfied.
drm_buddy: pass:6 fail:0 skip:0 total:6
Signed-off-by: default avatarArunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230112120027.3072-1-Arunpravin.PaneerSelvam@amd.comSigned-off-by: default avatarChristian König <christian.koenig@amd.com>
CC: Cc: stable@vger.kernel.org # 5.18+
parent 040b35c1
......@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
kmem_cache_free(slab_blocks, block);
}
static void list_insert_sorted(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
struct drm_buddy_block *node;
struct list_head *head;
head = &mm->free_list[drm_buddy_block_order(block)];
if (list_empty(head)) {
list_add(&block->link, head);
return;
}
list_for_each_entry(node, head, link)
if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
break;
__list_add(&block->link, node->link.prev, &node->link);
}
static void mark_allocated(struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
......@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE;
list_add(&block->link,
&mm->free_list[drm_buddy_block_order(block)]);
list_insert_sorted(mm, block);
}
static void mark_split(struct drm_buddy_block *block)
......@@ -387,21 +405,27 @@ alloc_range_bias(struct drm_buddy *mm,
}
static struct drm_buddy_block *
get_maxblock(struct list_head *head)
get_maxblock(struct drm_buddy *mm, unsigned int order)
{
struct drm_buddy_block *max_block = NULL, *node;
unsigned int i;
max_block = list_first_entry_or_null(head,
for (i = order; i <= mm->max_order; ++i) {
if (!list_empty(&mm->free_list[i])) {
node = list_last_entry(&mm->free_list[i],
struct drm_buddy_block,
link);
if (!max_block)
return NULL;
if (!max_block) {
max_block = node;
continue;
}
list_for_each_entry(node, head, link) {
if (drm_buddy_block_offset(node) >
drm_buddy_block_offset(max_block))
drm_buddy_block_offset(max_block)) {
max_block = node;
}
}
}
return max_block;
}
......@@ -412,40 +436,43 @@ alloc_from_freelist(struct drm_buddy *mm,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
unsigned int i;
unsigned int tmp;
int err;
for (i = order; i <= mm->max_order; ++i) {
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
block = get_maxblock(&mm->free_list[i]);
block = get_maxblock(mm, order);
if (block)
break;
/* Store the obtained block order */
tmp = drm_buddy_block_order(block);
} else {
block = list_first_entry_or_null(&mm->free_list[i],
for (tmp = order; tmp <= mm->max_order; ++tmp) {
if (!list_empty(&mm->free_list[tmp])) {
block = list_last_entry(&mm->free_list[tmp],
struct drm_buddy_block,
link);
if (block)
break;
}
}
}
if (!block)
return ERR_PTR(-ENOSPC);
BUG_ON(!drm_buddy_block_is_free(block));
while (i != order) {
while (tmp != order) {
err = split_block(mm, block);
if (unlikely(err))
goto err_undo;
block = block->right;
i--;
tmp--;
}
return block;
err_undo:
if (i != order)
if (tmp != order)
__drm_buddy_free(mm, block);
return ERR_PTR(err);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment