Commit c9cad937 authored by Arunpravin Paneer Selvam's avatar Arunpravin Paneer Selvam Committed by Christian König

drm/amdgpu: add drm buddy support to amdgpu

- Switch to drm buddy allocator
- Add resource cursor support for drm buddy

v2(Matthew Auld):
  - replace spinlock with mutex as we call kmem_cache_zalloc
    (..., GFP_KERNEL) in drm_buddy_alloc() function

  - lock drm_buddy_block_trim() function as it calls
    mark_free/mark_split are all globally visible

v3(Matthew Auld):
  - remove trim method error handling as we address the failure case
    at drm_buddy_block_trim() function

v4:
  - fix warnings reported by kernel test robot <lkp@intel.com>

v5:
  - fix merge conflict issue

v6:
  - fix warnings reported by kernel test robot <lkp@intel.com>

v7:
  - remove DRM_BUDDY_RANGE_ALLOCATION flag usage

v8:
  - keep DRM_BUDDY_RANGE_ALLOCATION flag usage
  - resolve conflicts created by drm/amdgpu: remove VRAM accounting v2

v9(Christian):
  - merged the below patch
     - drm/amdgpu: move vram inline functions into a header
  - rename label name as fallback
  - move struct amdgpu_vram_mgr to amdgpu_vram_mgr.h
  - remove unnecessary flags from struct amdgpu_vram_reservation
  - rewrite block NULL check condition
  - change else style as per coding standard
  - rewrite the node max size
  - add a helper function to fetch the first entry from the list

v10(Christian):
   - rename amdgpu_get_node() function name as amdgpu_vram_mgr_first_block

v11:
   - if size is not aligned with min_page_size, enable is_contiguous flag,
     therefore, the size round up to the power of two and trimmed to the
     original size.
v12:
   - rename the function names having prefix as amdgpu_vram_mgr_*()
   - modify the round_up() logic conforming to contiguous flag enablement
     or if size is not aligned to min_block_size
   - modify the trim logic
   - rename node as block wherever applicable
Signed-off-by: default avatarArunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407224843.2416-1-Arunpravin.PaneerSelvam@amd.comSigned-off-by: default avatarChristian König <christian.koenig@amd.com>
parent 17b048d4
...@@ -280,6 +280,7 @@ config DRM_AMDGPU ...@@ -280,6 +280,7 @@ config DRM_AMDGPU
select HWMON select HWMON
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE select INTERVAL_TREE
select DRM_BUDDY
help help
Choose this option if you have a recent AMD Radeon graphics card. Choose this option if you have a recent AMD Radeon graphics card.
......
...@@ -30,12 +30,15 @@ ...@@ -30,12 +30,15 @@
#include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_range_manager.h>
#include "amdgpu_vram_mgr.h"
/* state back for walking over vram_mgr and gtt_mgr allocations */ /* state back for walking over vram_mgr and gtt_mgr allocations */
struct amdgpu_res_cursor { struct amdgpu_res_cursor {
uint64_t start; uint64_t start;
uint64_t size; uint64_t size;
uint64_t remaining; uint64_t remaining;
struct drm_mm_node *node; void *node;
uint32_t mem_type;
}; };
/** /**
...@@ -52,19 +55,41 @@ static inline void amdgpu_res_first(struct ttm_resource *res, ...@@ -52,19 +55,41 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
uint64_t start, uint64_t size, uint64_t start, uint64_t size,
struct amdgpu_res_cursor *cur) struct amdgpu_res_cursor *cur)
{ {
struct drm_buddy_block *block;
struct list_head *head, *next;
struct drm_mm_node *node; struct drm_mm_node *node;
if (!res || res->mem_type == TTM_PL_SYSTEM) { if (!res)
cur->start = start; goto fallback;
cur->size = size;
cur->remaining = size;
cur->node = NULL;
WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
return;
}
BUG_ON(start + size > res->num_pages << PAGE_SHIFT); BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
cur->mem_type = res->mem_type;
switch (cur->mem_type) {
case TTM_PL_VRAM:
head = &to_amdgpu_vram_mgr_resource(res)->blocks;
block = list_first_entry_or_null(head,
struct drm_buddy_block,
link);
if (!block)
goto fallback;
while (start >= amdgpu_vram_mgr_block_size(block)) {
start -= amdgpu_vram_mgr_block_size(block);
next = block->link.next;
if (next != head)
block = list_entry(next, struct drm_buddy_block, link);
}
cur->start = amdgpu_vram_mgr_block_start(block) + start;
cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
cur->remaining = size;
cur->node = block;
break;
case TTM_PL_TT:
node = to_ttm_range_mgr_node(res)->mm_nodes; node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT) while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT; start -= node++->size << PAGE_SHIFT;
...@@ -73,6 +98,20 @@ static inline void amdgpu_res_first(struct ttm_resource *res, ...@@ -73,6 +98,20 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
cur->size = min((node->size << PAGE_SHIFT) - start, size); cur->size = min((node->size << PAGE_SHIFT) - start, size);
cur->remaining = size; cur->remaining = size;
cur->node = node; cur->node = node;
break;
default:
goto fallback;
}
return;
fallback:
cur->start = start;
cur->size = size;
cur->remaining = size;
cur->node = NULL;
WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
return;
} }
/** /**
...@@ -85,7 +124,9 @@ static inline void amdgpu_res_first(struct ttm_resource *res, ...@@ -85,7 +124,9 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
*/ */
static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
{ {
struct drm_mm_node *node = cur->node; struct drm_buddy_block *block;
struct drm_mm_node *node;
struct list_head *next;
BUG_ON(size > cur->remaining); BUG_ON(size > cur->remaining);
...@@ -99,9 +140,27 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) ...@@ -99,9 +140,27 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
return; return;
} }
switch (cur->mem_type) {
case TTM_PL_VRAM:
block = cur->node;
next = block->link.next;
block = list_entry(next, struct drm_buddy_block, link);
cur->node = block;
cur->start = amdgpu_vram_mgr_block_start(block);
cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
break;
case TTM_PL_TT:
node = cur->node;
cur->node = ++node; cur->node = ++node;
cur->start = node->start << PAGE_SHIFT; cur->start = node->start << PAGE_SHIFT;
cur->size = min(node->size << PAGE_SHIFT, cur->remaining); cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
break;
default:
return;
}
} }
#endif #endif
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h> #include <drm/gpu_scheduler.h>
#include "amdgpu_vram_mgr.h"
#include "amdgpu.h" #include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
...@@ -38,15 +39,6 @@ ...@@ -38,15 +39,6 @@
#define AMDGPU_POISON 0xd0bed0be #define AMDGPU_POISON 0xd0bed0be
struct amdgpu_vram_mgr {
struct ttm_resource_manager manager;
struct drm_mm mm;
spinlock_t lock;
struct list_head reservations_pending;
struct list_head reserved_pages;
atomic64_t vis_usage;
};
struct amdgpu_gtt_mgr { struct amdgpu_gtt_mgr {
struct ttm_resource_manager manager; struct ttm_resource_manager manager;
struct drm_mm mm; struct drm_mm mm;
......
...@@ -32,8 +32,10 @@ ...@@ -32,8 +32,10 @@
#include "atom.h" #include "atom.h"
struct amdgpu_vram_reservation { struct amdgpu_vram_reservation {
struct list_head node; u64 start;
struct drm_mm_node mm_node; u64 size;
struct list_head allocated;
struct list_head blocks;
}; };
static inline struct amdgpu_vram_mgr * static inline struct amdgpu_vram_mgr *
...@@ -186,18 +188,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = { ...@@ -186,18 +188,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = {
}; };
/** /**
* amdgpu_vram_mgr_vis_size - Calculate visible node size * amdgpu_vram_mgr_vis_size - Calculate visible block size
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @node: MM node structure * @block: DRM BUDDY block structure
* *
* Calculate how many bytes of the MM node are inside visible VRAM * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
*/ */
static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
struct drm_mm_node *node) struct drm_buddy_block *block)
{ {
uint64_t start = node->start << PAGE_SHIFT; u64 start = amdgpu_vram_mgr_block_start(block);
uint64_t end = (node->size + node->start) << PAGE_SHIFT; u64 end = start + amdgpu_vram_mgr_block_size(block);
if (start >= adev->gmc.visible_vram_size) if (start >= adev->gmc.visible_vram_size)
return 0; return 0;
...@@ -218,9 +220,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) ...@@ -218,9 +220,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource; struct ttm_resource *res = bo->tbo.resource;
unsigned pages = res->num_pages; struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
struct drm_mm_node *mm; struct drm_buddy_block *block;
u64 usage; u64 usage = 0;
if (amdgpu_gmc_vram_full_visible(&adev->gmc)) if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return amdgpu_bo_size(bo); return amdgpu_bo_size(bo);
...@@ -228,9 +230,8 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) ...@@ -228,9 +230,8 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0; return 0;
mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0]; list_for_each_entry(block, &vres->blocks, link)
for (usage = 0; pages; pages -= mm->size, mm++) usage += amdgpu_vram_mgr_vis_size(adev, block);
usage += amdgpu_vram_mgr_vis_size(adev, mm);
return usage; return usage;
} }
...@@ -240,23 +241,30 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) ...@@ -240,23 +241,30 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
{ {
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr); struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct drm_mm *mm = &mgr->mm; struct drm_buddy *mm = &mgr->mm;
struct amdgpu_vram_reservation *rsv, *temp; struct amdgpu_vram_reservation *rsv, *temp;
struct drm_buddy_block *block;
uint64_t vis_usage; uint64_t vis_usage;
list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) { list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
if (drm_mm_reserve_node(mm, &rsv->mm_node)) if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
rsv->size, mm->chunk_size, &rsv->allocated,
DRM_BUDDY_RANGE_ALLOCATION))
continue;
block = amdgpu_vram_mgr_first_block(&rsv->allocated);
if (!block)
continue; continue;
dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n", dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
rsv->mm_node.start, rsv->mm_node.size); rsv->start, rsv->size);
vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node); vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
atomic64_add(vis_usage, &mgr->vis_usage); atomic64_add(vis_usage, &mgr->vis_usage);
spin_lock(&man->bdev->lru_lock); spin_lock(&man->bdev->lru_lock);
man->usage += rsv->mm_node.size << PAGE_SHIFT; man->usage += rsv->size;
spin_unlock(&man->bdev->lru_lock); spin_unlock(&man->bdev->lru_lock);
list_move(&rsv->node, &mgr->reserved_pages); list_move(&rsv->blocks, &mgr->reserved_pages);
} }
} }
...@@ -278,14 +286,16 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, ...@@ -278,14 +286,16 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
if (!rsv) if (!rsv)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&rsv->node); INIT_LIST_HEAD(&rsv->allocated);
rsv->mm_node.start = start >> PAGE_SHIFT; INIT_LIST_HEAD(&rsv->blocks);
rsv->mm_node.size = size >> PAGE_SHIFT;
rsv->start = start;
rsv->size = size;
spin_lock(&mgr->lock); mutex_lock(&mgr->lock);
list_add_tail(&rsv->node, &mgr->reservations_pending); list_add_tail(&rsv->blocks, &mgr->reservations_pending);
amdgpu_vram_mgr_do_reserve(&mgr->manager); amdgpu_vram_mgr_do_reserve(&mgr->manager);
spin_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
return 0; return 0;
} }
...@@ -307,19 +317,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, ...@@ -307,19 +317,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
struct amdgpu_vram_reservation *rsv; struct amdgpu_vram_reservation *rsv;
int ret; int ret;
spin_lock(&mgr->lock); mutex_lock(&mgr->lock);
list_for_each_entry(rsv, &mgr->reservations_pending, node) { list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
if ((rsv->mm_node.start <= start) && if (rsv->start <= start &&
(start < (rsv->mm_node.start + rsv->mm_node.size))) { (start < (rsv->start + rsv->size))) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
} }
list_for_each_entry(rsv, &mgr->reserved_pages, node) { list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
if ((rsv->mm_node.start <= start) && if (rsv->start <= start &&
(start < (rsv->mm_node.start + rsv->mm_node.size))) { (start < (rsv->start + rsv->size))) {
ret = 0; ret = 0;
goto out; goto out;
} }
...@@ -327,32 +337,10 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, ...@@ -327,32 +337,10 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
ret = -ENOENT; ret = -ENOENT;
out: out:
spin_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
return ret; return ret;
} }
/**
* amdgpu_vram_mgr_virt_start - update virtual start address
*
* @mem: ttm_resource to update
* @node: just allocated node
*
* Calculate a virtual BO start address to easily check if everything is CPU
* accessible.
*/
static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
struct drm_mm_node *node)
{
unsigned long start;
start = node->start + node->size;
if (start > mem->num_pages)
start -= mem->num_pages;
else
start = 0;
mem->start = max(mem->start, start);
}
/** /**
* amdgpu_vram_mgr_new - allocate new ranges * amdgpu_vram_mgr_new - allocate new ranges
* *
...@@ -368,46 +356,44 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -368,46 +356,44 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource **res) struct ttm_resource **res)
{ {
unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr); struct amdgpu_device *adev = to_amdgpu_device(mgr);
uint64_t vis_usage = 0, mem_bytes, max_bytes; struct amdgpu_vram_mgr_resource *vres;
struct ttm_range_mgr_node *node; u64 size, remaining_size, lpfn, fpfn;
struct drm_mm *mm = &mgr->mm; struct drm_buddy *mm = &mgr->mm;
enum drm_mm_insert_mode mode; struct drm_buddy_block *block;
unsigned i; unsigned long pages_per_block;
int r; int r;
lpfn = place->lpfn; lpfn = place->lpfn << PAGE_SHIFT;
if (!lpfn) if (!lpfn)
lpfn = man->size >> PAGE_SHIFT; lpfn = man->size;
fpfn = place->fpfn << PAGE_SHIFT;
max_bytes = adev->gmc.mc_vram_size; max_bytes = adev->gmc.mc_vram_size;
if (tbo->type != ttm_bo_type_kernel) if (tbo->type != ttm_bo_type_kernel)
max_bytes -= AMDGPU_VM_RESERVED_VRAM; max_bytes -= AMDGPU_VM_RESERVED_VRAM;
mem_bytes = tbo->base.size;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
pages_per_node = ~0ul; pages_per_block = ~0ul;
num_nodes = 1;
} else { } else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pages_per_node = HPAGE_PMD_NR; pages_per_block = HPAGE_PMD_NR;
#else #else
/* default to 2MB */ /* default to 2MB */
pages_per_node = 2UL << (20UL - PAGE_SHIFT); pages_per_block = 2UL << (20UL - PAGE_SHIFT);
#endif #endif
pages_per_node = max_t(uint32_t, pages_per_node, pages_per_block = max_t(uint32_t, pages_per_block,
tbo->page_alignment); tbo->page_alignment);
num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
} }
node = kvmalloc(struct_size(node, mm_nodes, num_nodes), vres = kzalloc(sizeof(*vres), GFP_KERNEL);
GFP_KERNEL | __GFP_ZERO); if (!vres)
if (!node)
return -ENOMEM; return -ENOMEM;
ttm_resource_init(tbo, place, &node->base); ttm_resource_init(tbo, place, &vres->base);
/* bail out quickly if there's likely not enough VRAM for this BO */ /* bail out quickly if there's likely not enough VRAM for this BO */
if (ttm_resource_manager_usage(man) > max_bytes) { if (ttm_resource_manager_usage(man) > max_bytes) {
...@@ -415,66 +401,130 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -415,66 +401,130 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
goto error_fini; goto error_fini;
} }
mode = DRM_MM_INSERT_BEST; INIT_LIST_HEAD(&vres->blocks);
if (place->flags & TTM_PL_FLAG_TOPDOWN) if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH; vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
if (fpfn || lpfn != man->size)
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
remaining_size = vres->base.num_pages << PAGE_SHIFT;
mutex_lock(&mgr->lock);
while (remaining_size) {
if (tbo->page_alignment)
min_block_size = tbo->page_alignment << PAGE_SHIFT;
else
min_block_size = mgr->default_page_size;
pages_left = node->base.num_pages; BUG_ON(min_block_size < mm->chunk_size);
/* Limit maximum size to 2GB due to SG table limitations */ /* Limit maximum size to 2GiB due to SG table limitations */
pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); size = min(remaining_size, 2ULL << 30);
i = 0; if (size >= pages_per_block << PAGE_SHIFT)
spin_lock(&mgr->lock); min_block_size = pages_per_block << PAGE_SHIFT;
while (pages_left) {
uint32_t alignment = tbo->page_alignment;
if (pages >= pages_per_node) cur_size = size;
alignment = pages_per_node;
if (fpfn + size != place->lpfn << PAGE_SHIFT) {
/*
* Except for actual range allocation, modify the size and
* min_block_size conforming to continuous flag enablement
*/
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
size = roundup_pow_of_two(size);
min_block_size = size;
/*
* Modify the size value if size is not
* aligned with min_block_size
*/
} else if (!IS_ALIGNED(size, min_block_size)) {
size = round_up(size, min_block_size);
}
}
r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages, r = drm_buddy_alloc_blocks(mm, fpfn,
alignment, 0, place->fpfn, lpfn,
lpfn, mode); size,
if (unlikely(r)) { min_block_size,
if (pages > pages_per_node) { &vres->blocks,
if (is_power_of_2(pages)) vres->flags);
pages = pages / 2; if (unlikely(r))
goto error_free_blocks;
if (size > remaining_size)
remaining_size = 0;
else else
pages = rounddown_pow_of_two(pages); remaining_size -= size;
continue;
} }
goto error_free; mutex_unlock(&mgr->lock);
if (cur_size != size) {
struct drm_buddy_block *block;
struct list_head *trim_list;
u64 original_size;
LIST_HEAD(temp);
trim_list = &vres->blocks;
original_size = vres->base.num_pages << PAGE_SHIFT;
/*
* If size value is rounded up to min_block_size, trim the last
* block to the required size
*/
if (!list_is_singular(&vres->blocks)) {
block = list_last_entry(&vres->blocks, typeof(*block), link);
list_move_tail(&block->link, &temp);
trim_list = &temp;
/*
* Compute the original_size value by subtracting the
* last block size with (aligned size - original size)
*/
original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
} }
vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); mutex_lock(&mgr->lock);
amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]); drm_buddy_block_trim(mm,
pages_left -= pages; original_size,
++i; trim_list);
mutex_unlock(&mgr->lock);
if (pages > pages_left) if (!list_empty(&temp))
pages = pages_left; list_splice_tail(trim_list, &vres->blocks);
} }
spin_unlock(&mgr->lock);
if (i == 1) list_for_each_entry(block, &vres->blocks, link)
node->base.placement |= TTM_PL_FLAG_CONTIGUOUS; vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
block = amdgpu_vram_mgr_first_block(&vres->blocks);
if (!block) {
r = -EINVAL;
goto error_fini;
}
vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
if (adev->gmc.xgmi.connected_to_cpu) if (adev->gmc.xgmi.connected_to_cpu)
node->base.bus.caching = ttm_cached; vres->base.bus.caching = ttm_cached;
else else
node->base.bus.caching = ttm_write_combined; vres->base.bus.caching = ttm_write_combined;
atomic64_add(vis_usage, &mgr->vis_usage); atomic64_add(vis_usage, &mgr->vis_usage);
*res = &node->base; *res = &vres->base;
return 0; return 0;
error_free: error_free_blocks:
while (i--) drm_buddy_free_list(mm, &vres->blocks);
drm_mm_remove_node(&node->mm_nodes[i]); mutex_unlock(&mgr->lock);
spin_unlock(&mgr->lock);
error_fini: error_fini:
ttm_resource_fini(man, &node->base); ttm_resource_fini(man, &vres->base);
kvfree(node); kfree(vres);
return r; return r;
} }
...@@ -490,27 +540,26 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -490,27 +540,26 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res) struct ttm_resource *res)
{ {
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr); struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct drm_buddy *mm = &mgr->mm;
struct drm_buddy_block *block;
uint64_t vis_usage = 0; uint64_t vis_usage = 0;
unsigned i, pages;
spin_lock(&mgr->lock); mutex_lock(&mgr->lock);
for (i = 0, pages = res->num_pages; pages; list_for_each_entry(block, &vres->blocks, link)
pages -= node->mm_nodes[i].size, ++i) { vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
struct drm_mm_node *mm = &node->mm_nodes[i];
drm_mm_remove_node(mm);
vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
}
amdgpu_vram_mgr_do_reserve(man); amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock);
drm_buddy_free_list(mm, &vres->blocks);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage); atomic64_sub(vis_usage, &mgr->vis_usage);
ttm_resource_fini(man, res); ttm_resource_fini(man, res);
kvfree(node); kfree(vres);
} }
/** /**
...@@ -542,7 +591,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, ...@@ -542,7 +591,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
if (!*sgt) if (!*sgt)
return -ENOMEM; return -ENOMEM;
/* Determine the number of DRM_MM nodes to export */ /* Determine the number of DRM_BUDDY blocks to export */
amdgpu_res_first(res, offset, length, &cursor); amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) { while (cursor.remaining) {
num_entries++; num_entries++;
...@@ -558,10 +607,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, ...@@ -558,10 +607,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
sg->length = 0; sg->length = 0;
/* /*
* Walk down DRM_MM nodes to populate scatterlist nodes * Walk down DRM_BUDDY blocks to populate scatterlist nodes
* @note: Use iterator api to get first the DRM_MM node * @note: Use iterator api to get first the DRM_BUDDY block
* and the number of bytes from it. Access the following * and the number of bytes from it. Access the following
* DRM_MM node(s) if more buffer needs to exported * DRM_BUDDY block(s) if more buffer needs to exported
*/ */
amdgpu_res_first(res, offset, length, &cursor); amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) { for_each_sgtable_sg((*sgt), sg, i) {
...@@ -648,13 +697,22 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, ...@@ -648,13 +697,22 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer) struct drm_printer *printer)
{ {
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct drm_buddy *mm = &mgr->mm;
struct drm_buddy_block *block;
drm_printf(printer, " vis usage:%llu\n", drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr)); amdgpu_vram_mgr_vis_usage(mgr));
spin_lock(&mgr->lock); mutex_lock(&mgr->lock);
drm_mm_print(&mgr->mm, printer); drm_printf(printer, "default_page_size: %lluKiB\n",
spin_unlock(&mgr->lock); mgr->default_page_size >> 10);
drm_buddy_print(mm, printer);
drm_printf(printer, "reserved:\n");
list_for_each_entry(block, &mgr->reserved_pages, link)
drm_buddy_block_print(mm, block, printer);
mutex_unlock(&mgr->lock);
} }
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
...@@ -674,16 +732,21 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) ...@@ -674,16 +732,21 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
{ {
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
struct ttm_resource_manager *man = &mgr->manager; struct ttm_resource_manager *man = &mgr->manager;
int err;
ttm_resource_manager_init(man, &adev->mman.bdev, ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size); adev->gmc.real_vram_size);
man->func = &amdgpu_vram_mgr_func; man->func = &amdgpu_vram_mgr_func;
drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT); err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
spin_lock_init(&mgr->lock); if (err)
return err;
mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending); INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages); INIT_LIST_HEAD(&mgr->reserved_pages);
mgr->default_page_size = PAGE_SIZE;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true); ttm_resource_manager_set_used(man, true);
...@@ -711,16 +774,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) ...@@ -711,16 +774,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
if (ret) if (ret)
return; return;
spin_lock(&mgr->lock); mutex_lock(&mgr->lock);
list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
kfree(rsv); kfree(rsv);
list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
drm_mm_remove_node(&rsv->mm_node); drm_buddy_free_list(&mgr->mm, &rsv->blocks);
kfree(rsv); kfree(rsv);
} }
drm_mm_takedown(&mgr->mm); drm_buddy_fini(&mgr->mm);
spin_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
ttm_resource_manager_cleanup(man); ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
......
/* SPDX-License-Identifier: MIT
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __AMDGPU_VRAM_MGR_H__
#define __AMDGPU_VRAM_MGR_H__
#include <drm/drm_buddy.h>
struct amdgpu_vram_mgr {
struct ttm_resource_manager manager;
struct drm_buddy mm;
/* protects access to buffer objects */
struct mutex lock;
struct list_head reservations_pending;
struct list_head reserved_pages;
atomic64_t vis_usage;
u64 default_page_size;
};
struct amdgpu_vram_mgr_resource {
struct ttm_resource base;
struct list_head blocks;
unsigned long flags;
};
static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
{
return drm_buddy_block_offset(block);
}
static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
{
return PAGE_SIZE << drm_buddy_block_order(block);
}
static inline struct drm_buddy_block *
amdgpu_vram_mgr_first_block(struct list_head *list)
{
return list_first_entry_or_null(list, struct drm_buddy_block, link);
}
static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
{
struct drm_buddy_block *block;
u64 start, size;
block = amdgpu_vram_mgr_first_block(head);
if (!block)
return false;
while (head != block->link.next) {
start = amdgpu_vram_mgr_block_start(block);
size = amdgpu_vram_mgr_block_size(block);
block = list_entry(block->link.next, struct drm_buddy_block, link);
if (start + size != amdgpu_vram_mgr_block_start(block))
return false;
}
return true;
}
static inline struct amdgpu_vram_mgr_resource *
to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
{
return container_of(res, struct amdgpu_vram_mgr_resource, base);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment