Commit 62347f9e authored by Lauri Kasanen's avatar Lauri Kasanen Committed by Dave Airlie

drm: Add support for two-ended allocation, v3

Clients like i915 need to segregate cache domains within the GTT which
can lead to small amounts of fragmentation. By allocating the uncached
buffers from the bottom and the cacheable buffers from the top, we can
reduce the amount of wasted space and also optimize allocation of the
mappable portion of the GTT to only those buffers that require CPU
access through the GTT.

For other drivers, allocating small bos from one end and large ones
from the other helps improve the quality of fragmentation.

Based on drm_mm work by Chris Wilson.

v3: Changed to use a TTM placement flag
v2: Updated kerneldoc

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ben Widawsky <ben@bwidawsk.net>
Cc: Christian König <deathsimple@vodafone.de>
Signed-off-by: default avatarLauri Kasanen <cand@gmx.com>
Signed-off-by: default avatarDavid Airlie <airlied@redhat.com>
parent 2614dc66
...@@ -82,6 +82,10 @@ ...@@ -82,6 +82,10 @@
* this to implement guard pages between incompatible caching domains in the * this to implement guard pages between incompatible caching domains in the
* graphics TT. * graphics TT.
* *
* Two behaviors are supported for searching and allocating: bottom-up and top-down.
* The default is bottom-up. Top-down allocation can be used if the memory area
* has different restrictions, or just to reduce fragmentation.
*
* Finally iteration helpers to walk all nodes and all holes are provided as are * Finally iteration helpers to walk all nodes and all holes are provided as are
* some basic allocator dumpers for debugging. * some basic allocator dumpers for debugging.
*/ */
...@@ -102,7 +106,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ ...@@ -102,7 +106,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
static void drm_mm_insert_helper(struct drm_mm_node *hole_node, static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long size, unsigned alignment,
unsigned long color) unsigned long color,
enum drm_mm_allocator_flags flags)
{ {
struct drm_mm *mm = hole_node->mm; struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node); unsigned long hole_start = drm_mm_hole_node_start(hole_node);
...@@ -115,12 +120,22 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, ...@@ -115,12 +120,22 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
if (mm->color_adjust) if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end); mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (flags & DRM_MM_CREATE_TOP)
adj_start = adj_end - size;
if (alignment) { if (alignment) {
unsigned tmp = adj_start % alignment; unsigned tmp = adj_start % alignment;
if (tmp) if (tmp) {
adj_start += alignment - tmp; if (flags & DRM_MM_CREATE_TOP)
adj_start -= tmp;
else
adj_start += alignment - tmp;
}
} }
BUG_ON(adj_start < hole_start);
BUG_ON(adj_end > hole_end);
if (adj_start == hole_start) { if (adj_start == hole_start) {
hole_node->hole_follows = 0; hole_node->hole_follows = 0;
list_del(&hole_node->hole_stack); list_del(&hole_node->hole_stack);
...@@ -205,7 +220,8 @@ EXPORT_SYMBOL(drm_mm_reserve_node); ...@@ -205,7 +220,8 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
* @size: size of the allocation * @size: size of the allocation
* @alignment: alignment of the allocation * @alignment: alignment of the allocation
* @color: opaque tag value to use for this node * @color: opaque tag value to use for this node
* @flags: flags to fine-tune the allocation * @sflags: flags to fine-tune the allocation search
* @aflags: flags to fine-tune the allocation behavior
* *
* The preallocated node must be cleared to 0. * The preallocated node must be cleared to 0.
* *
...@@ -215,16 +231,17 @@ EXPORT_SYMBOL(drm_mm_reserve_node); ...@@ -215,16 +231,17 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long size, unsigned alignment,
unsigned long color, unsigned long color,
enum drm_mm_search_flags flags) enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags)
{ {
struct drm_mm_node *hole_node; struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_generic(mm, size, alignment, hole_node = drm_mm_search_free_generic(mm, size, alignment,
color, flags); color, sflags);
if (!hole_node) if (!hole_node)
return -ENOSPC; return -ENOSPC;
drm_mm_insert_helper(hole_node, node, size, alignment, color); drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_mm_insert_node_generic); EXPORT_SYMBOL(drm_mm_insert_node_generic);
...@@ -233,7 +250,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, ...@@ -233,7 +250,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long size, unsigned alignment,
unsigned long color, unsigned long color,
unsigned long start, unsigned long end) unsigned long start, unsigned long end,
enum drm_mm_allocator_flags flags)
{ {
struct drm_mm *mm = hole_node->mm; struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node); unsigned long hole_start = drm_mm_hole_node_start(hole_node);
...@@ -248,13 +266,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, ...@@ -248,13 +266,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
if (adj_end > end) if (adj_end > end)
adj_end = end; adj_end = end;
if (flags & DRM_MM_CREATE_TOP)
adj_start = adj_end - size;
if (mm->color_adjust) if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end); mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) { if (alignment) {
unsigned tmp = adj_start % alignment; unsigned tmp = adj_start % alignment;
if (tmp) if (tmp) {
adj_start += alignment - tmp; if (flags & DRM_MM_CREATE_TOP)
adj_start -= tmp;
else
adj_start += alignment - tmp;
}
} }
if (adj_start == hole_start) { if (adj_start == hole_start) {
...@@ -271,6 +296,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, ...@@ -271,6 +296,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
INIT_LIST_HEAD(&node->hole_stack); INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list); list_add(&node->node_list, &hole_node->node_list);
BUG_ON(node->start < start);
BUG_ON(node->start < adj_start);
BUG_ON(node->start + node->size > adj_end); BUG_ON(node->start + node->size > adj_end);
BUG_ON(node->start + node->size > end); BUG_ON(node->start + node->size > end);
...@@ -290,7 +317,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, ...@@ -290,7 +317,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
* @color: opaque tag value to use for this node * @color: opaque tag value to use for this node
* @start: start of the allowed range for this node * @start: start of the allowed range for this node
* @end: end of the allowed range for this node * @end: end of the allowed range for this node
* @flags: flags to fine-tune the allocation * @sflags: flags to fine-tune the allocation search
* @aflags: flags to fine-tune the allocation behavior
* *
* The preallocated node must be cleared to 0. * The preallocated node must be cleared to 0.
* *
...@@ -298,21 +326,23 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, ...@@ -298,21 +326,23 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
* 0 on success, -ENOSPC if there's no suitable hole. * 0 on success, -ENOSPC if there's no suitable hole.
*/ */
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color, unsigned long size, unsigned alignment,
unsigned long color,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
enum drm_mm_search_flags flags) enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags)
{ {
struct drm_mm_node *hole_node; struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range_generic(mm, hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color, size, alignment, color,
start, end, flags); start, end, sflags);
if (!hole_node) if (!hole_node)
return -ENOSPC; return -ENOSPC;
drm_mm_insert_helper_range(hole_node, node, drm_mm_insert_helper_range(hole_node, node,
size, alignment, color, size, alignment, color,
start, end); start, end, aflags);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
...@@ -391,7 +421,8 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, ...@@ -391,7 +421,8 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
best = NULL; best = NULL;
best_size = ~0UL; best_size = ~0UL;
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
if (mm->color_adjust) { if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end); mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start) if (adj_end <= adj_start)
...@@ -432,7 +463,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ ...@@ -432,7 +463,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
best = NULL; best = NULL;
best_size = ~0UL; best_size = ~0UL;
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
if (adj_start < start) if (adj_start < start)
adj_start = start; adj_start = start;
if (adj_end > end) if (adj_end > end)
......
...@@ -3264,7 +3264,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3264,7 +3264,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment, size, alignment,
obj->cache_level, 0, gtt_max, obj->cache_level, 0, gtt_max,
DRM_MM_SEARCH_DEFAULT); DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) { if (ret) {
ret = i915_gem_evict_something(dev, vm, size, alignment, ret = i915_gem_evict_something(dev, vm, size, alignment,
obj->cache_level, flags); obj->cache_level, flags);
......
...@@ -1074,7 +1074,8 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) ...@@ -1074,7 +1074,8 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
&ppgtt->node, GEN6_PD_SIZE, &ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0, GEN6_PD_ALIGN, 0,
0, dev_priv->gtt.base.total, 0, dev_priv->gtt.base.total,
DRM_MM_SEARCH_DEFAULT); DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret == -ENOSPC && !retried) { if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN, GEN6_PD_SIZE, GEN6_PD_ALIGN,
......
...@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, ...@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm; struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL; struct drm_mm_node *node = NULL;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn; unsigned long lpfn;
int ret; int ret;
...@@ -66,11 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, ...@@ -66,11 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node) if (!node)
return -ENOMEM; return -ENOMEM;
if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN)
aflags = DRM_MM_CREATE_TOP;
spin_lock(&rman->lock); spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
mem->page_alignment, mem->page_alignment, 0,
placement->fpfn, lpfn, placement->fpfn, lpfn,
DRM_MM_SEARCH_BEST); DRM_MM_SEARCH_BEST,
aflags);
spin_unlock(&rman->lock); spin_unlock(&rman->lock);
if (unlikely(ret)) { if (unlikely(ret)) {
......
...@@ -47,8 +47,17 @@ ...@@ -47,8 +47,17 @@
enum drm_mm_search_flags { enum drm_mm_search_flags {
DRM_MM_SEARCH_DEFAULT = 0, DRM_MM_SEARCH_DEFAULT = 0,
DRM_MM_SEARCH_BEST = 1 << 0, DRM_MM_SEARCH_BEST = 1 << 0,
DRM_MM_SEARCH_BELOW = 1 << 1,
}; };
enum drm_mm_allocator_flags {
DRM_MM_CREATE_DEFAULT = 0,
DRM_MM_CREATE_TOP = 1 << 0,
};
#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
struct drm_mm_node { struct drm_mm_node {
struct list_head node_list; struct list_head node_list;
struct list_head hole_stack; struct list_head hole_stack;
...@@ -186,6 +195,9 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) ...@@ -186,6 +195,9 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
* Implementation Note: * Implementation Note:
* We need to inline list_for_each_entry in order to be able to set hole_start * We need to inline list_for_each_entry in order to be able to set hole_start
* and hole_end on each iteration while keeping the macro sane. * and hole_end on each iteration while keeping the macro sane.
*
* The __drm_mm_for_each_hole version is similar, but with added support for
* going backwards.
*/ */
#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
...@@ -195,6 +207,14 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) ...@@ -195,6 +207,14 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
1 : 0; \ 1 : 0; \
entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack)) entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
&entry->hole_stack != &(mm)->hole_stack ? \
hole_start = drm_mm_hole_node_start(entry), \
hole_end = drm_mm_hole_node_end(entry), \
1 : 0; \
entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
/* /*
* Basic range manager support (drm_mm.c) * Basic range manager support (drm_mm.c)
*/ */
...@@ -205,7 +225,8 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, ...@@ -205,7 +225,8 @@ int drm_mm_insert_node_generic(struct drm_mm *mm,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,
unsigned long color, unsigned long color,
enum drm_mm_search_flags flags); enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags);
/** /**
* drm_mm_insert_node - search for space and insert @node * drm_mm_insert_node - search for space and insert @node
* @mm: drm_mm to allocate from * @mm: drm_mm to allocate from
...@@ -228,7 +249,8 @@ static inline int drm_mm_insert_node(struct drm_mm *mm, ...@@ -228,7 +249,8 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
unsigned alignment, unsigned alignment,
enum drm_mm_search_flags flags) enum drm_mm_search_flags flags)
{ {
return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
DRM_MM_CREATE_DEFAULT);
} }
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
...@@ -238,7 +260,8 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, ...@@ -238,7 +260,8 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
unsigned long color, unsigned long color,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
enum drm_mm_search_flags flags); enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags);
/** /**
* drm_mm_insert_node_in_range - ranged search for space and insert @node * drm_mm_insert_node_in_range - ranged search for space and insert @node
* @mm: drm_mm to allocate from * @mm: drm_mm to allocate from
...@@ -266,7 +289,8 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, ...@@ -266,7 +289,8 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
enum drm_mm_search_flags flags) enum drm_mm_search_flags flags)
{ {
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
0, start, end, flags); 0, start, end, flags,
DRM_MM_CREATE_DEFAULT);
} }
void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_remove_node(struct drm_mm_node *node);
......
...@@ -65,6 +65,8 @@ ...@@ -65,6 +65,8 @@
* reference the buffer. * reference the buffer.
* TTM_PL_FLAG_NO_EVICT means that the buffer may never * TTM_PL_FLAG_NO_EVICT means that the buffer may never
* be evicted to make room for other buffers. * be evicted to make room for other buffers.
* TTM_PL_FLAG_TOPDOWN requests to be placed from the
* top of the memory area, instead of the bottom.
*/ */
#define TTM_PL_FLAG_CACHED (1 << 16) #define TTM_PL_FLAG_CACHED (1 << 16)
...@@ -72,6 +74,7 @@ ...@@ -72,6 +74,7 @@
#define TTM_PL_FLAG_WC (1 << 18) #define TTM_PL_FLAG_WC (1 << 18)
#define TTM_PL_FLAG_SHARED (1 << 20) #define TTM_PL_FLAG_SHARED (1 << 20)
#define TTM_PL_FLAG_NO_EVICT (1 << 21) #define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_FLAG_TOPDOWN (1 << 22)
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ #define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
TTM_PL_FLAG_UNCACHED | \ TTM_PL_FLAG_UNCACHED | \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment