Commit cf6c467d authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: add BO priorities for the LRUs

This way the driver can specify a priority for a BO which has the effect that
a BO is only evicted when all other BOs with a lower priority are evicted
first.
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarRoger.He <Hongbo.He@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 2ee7fc92
...@@ -1166,8 +1166,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) ...@@ -1166,8 +1166,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
lru->lru[j] = &adev->mman.bdev.man[j].lru; lru->lru[j] = &adev->mman.bdev.man[j].lru[0];
lru->swap_lru = &adev->mman.bdev.glob->swap_lru; lru->swap_lru = &adev->mman.bdev.glob->swap_lru[0];
} }
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
......
...@@ -242,13 +242,13 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); ...@@ -242,13 +242,13 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo) struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
{ {
return bo->bdev->man[bo->mem.mem_type].lru.prev; return bo->bdev->man[bo->mem.mem_type].lru[bo->priority].prev;
} }
EXPORT_SYMBOL(ttm_bo_default_lru_tail); EXPORT_SYMBOL(ttm_bo_default_lru_tail);
struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo) struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
{ {
return bo->glob->swap_lru.prev; return bo->glob->swap_lru[bo->priority].prev;
} }
EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail); EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
...@@ -741,14 +741,17 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -741,14 +741,17 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
int ret = -EBUSY, put_count; int ret = -EBUSY, put_count;
unsigned i;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) { for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
ret = __ttm_bo_reserve(bo, false, true, NULL); ret = __ttm_bo_reserve(bo, false, true, NULL);
if (ret) if (ret)
continue; continue;
if (place && !bdev->driver->eviction_valuable(bo, place)) { if (place && !bdev->driver->eviction_valuable(bo,
place)) {
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
ret = -EBUSY; ret = -EBUSY;
continue; continue;
...@@ -757,6 +760,10 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -757,6 +760,10 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
break; break;
} }
if (!ret)
break;
}
if (ret) { if (ret) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
return ret; return ret;
...@@ -1197,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, ...@@ -1197,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
} }
atomic_inc(&bo->glob->bo_count); atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node); drm_vma_node_reset(&bo->vma_node);
bo->priority = 0;
/* /*
* For ttm_bo_type_device buffers, allocate * For ttm_bo_type_device buffers, allocate
...@@ -1297,19 +1305,22 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, ...@@ -1297,19 +1305,22 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
struct dma_fence *fence; struct dma_fence *fence;
int ret; int ret;
unsigned i;
/* /*
* Can't use standard list traversal since we're unlocking. * Can't use standard list traversal since we're unlocking.
*/ */
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) { for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
if (ret) if (ret)
return ret; return ret;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
} }
}
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
spin_lock(&man->move_lock); spin_lock(&man->move_lock);
...@@ -1385,6 +1396,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ...@@ -1385,6 +1396,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
{ {
int ret = -EINVAL; int ret = -EINVAL;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
unsigned i;
BUG_ON(type >= TTM_NUM_MEM_TYPES); BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type]; man = &bdev->man[type];
...@@ -1410,7 +1422,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ...@@ -1410,7 +1422,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
man->use_type = true; man->use_type = true;
man->size = p_size; man->size = p_size;
INIT_LIST_HEAD(&man->lru); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
man->move = NULL; man->move = NULL;
return 0; return 0;
...@@ -1442,6 +1455,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref) ...@@ -1442,6 +1455,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
container_of(ref, struct ttm_bo_global_ref, ref); container_of(ref, struct ttm_bo_global_ref, ref);
struct ttm_bo_global *glob = ref->object; struct ttm_bo_global *glob = ref->object;
int ret; int ret;
unsigned i;
mutex_init(&glob->device_list_mutex); mutex_init(&glob->device_list_mutex);
spin_lock_init(&glob->lru_lock); spin_lock_init(&glob->lru_lock);
...@@ -1453,7 +1467,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref) ...@@ -1453,7 +1467,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
goto out_no_drp; goto out_no_drp;
} }
INIT_LIST_HEAD(&glob->swap_lru); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&glob->swap_lru[i]);
INIT_LIST_HEAD(&glob->device_list); INIT_LIST_HEAD(&glob->device_list);
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
...@@ -1512,8 +1527,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) ...@@ -1512,8 +1527,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
if (list_empty(&bdev->ddestroy)) if (list_empty(&bdev->ddestroy))
TTM_DEBUG("Delayed destroy list was clean\n"); TTM_DEBUG("Delayed destroy list was clean\n");
if (list_empty(&bdev->man[0].lru)) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
TTM_DEBUG("Swap list was clean\n"); if (list_empty(&bdev->man[0].lru[0]))
TTM_DEBUG("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
drm_vma_offset_manager_destroy(&bdev->vma_manager); drm_vma_offset_manager_destroy(&bdev->vma_manager);
...@@ -1665,13 +1681,18 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ...@@ -1665,13 +1681,18 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
int ret = -EBUSY; int ret = -EBUSY;
int put_count; int put_count;
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
unsigned i;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) { for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
ret = __ttm_bo_reserve(bo, false, true, NULL); ret = __ttm_bo_reserve(bo, false, true, NULL);
if (!ret) if (!ret)
break; break;
} }
if (!ret)
break;
}
if (ret) { if (ret) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
......
...@@ -215,6 +215,8 @@ struct ttm_buffer_object { ...@@ -215,6 +215,8 @@ struct ttm_buffer_object {
struct drm_vma_offset_node vma_node; struct drm_vma_offset_node vma_node;
unsigned priority;
/** /**
* Special members that are protected by the reserve lock * Special members that are protected by the reserve lock
* and the bo::lock when written to. Can be read with * and the bo::lock when written to. Can be read with
......
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/reservation.h> #include <linux/reservation.h>
#define TTM_MAX_BO_PRIORITY 16
struct ttm_backend_func { struct ttm_backend_func {
/** /**
* struct ttm_backend_func member bind * struct ttm_backend_func member bind
...@@ -298,7 +300,7 @@ struct ttm_mem_type_manager { ...@@ -298,7 +300,7 @@ struct ttm_mem_type_manager {
* Protected by the global->lru_lock. * Protected by the global->lru_lock.
*/ */
struct list_head lru; struct list_head lru[TTM_MAX_BO_PRIORITY];
/* /*
* Protected by @move_lock. * Protected by @move_lock.
...@@ -518,7 +520,7 @@ struct ttm_bo_global { ...@@ -518,7 +520,7 @@ struct ttm_bo_global {
/** /**
* Protected by the lru_lock. * Protected by the lru_lock.
*/ */
struct list_head swap_lru; struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
/** /**
* Internal protection. * Internal protection.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment