Commit 4c44f89c authored by Thomas Hellström's avatar Thomas Hellström Committed by Christian König

drm/ttm, drm/amdgpu, drm/xe: Consider hitch moves within bulk sublist moves

To address the problem with hitches moving when bulk move
sublists are lru-bumped, register the list cursors with the
ttm_lru_bulk_move structure when traversing its list, and
when lru-bumping the list, move the cursor hitch to the tail.
This also means it's mandatory for drivers to call
ttm_lru_bulk_move_init() and ttm_lru_bulk_move_fini() when
initializing and finalizing the bulk move structure, so add
those calls to the amdgpu- and xe driver.

Compared to v1 this is slightly more code but less fragile
and hopefully easier to understand.

Changes in previous series:
- Completely rework the functionality
- Avoid a NULL pointer dereference assigning manager->mem_type
- Remove some leftover code causing build problems
v2:
- For hitch bulk tail moves, store the mem_type in the cursor
  instead of with the manager.
v3:
- Remove leftover mem_type member from change in v2.
v6:
- Add some lockdep asserts (Matthew Brost)
- Avoid NULL pointer dereference (Matthew Brost)
- No need to check bo->resource before dereferencing
  bo->bulk_move (Matthew Brost)

Cc: Christian König <christian.koenig@amd.com>
Cc: Somalapuram Amaranath <Amaranath.Somalapuram@amd.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <dri-devel@lists.freedesktop.org>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240705153206.68526-5-thomas.hellstrom@linux.intel.comSigned-off-by: default avatarChristian König <christian.koenig@amd.com>
parent 8e9bf0fb
...@@ -2422,6 +2422,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2422,6 +2422,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r) if (r)
return r; return r;
ttm_lru_bulk_move_init(&vm->lru_bulk_move);
vm->is_compute_context = false; vm->is_compute_context = false;
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
...@@ -2486,6 +2488,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2486,6 +2488,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
error_free_delayed: error_free_delayed:
dma_fence_put(vm->last_tlb_flush); dma_fence_put(vm->last_tlb_flush);
dma_fence_put(vm->last_unlocked); dma_fence_put(vm->last_unlocked);
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
amdgpu_vm_fini_entities(vm); amdgpu_vm_fini_entities(vm);
return r; return r;
...@@ -2642,6 +2645,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2642,6 +2645,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
} }
} }
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
} }
/** /**
......
...@@ -33,6 +33,53 @@ ...@@ -33,6 +33,53 @@
#include <drm/drm_util.h> #include <drm/drm_util.h>
/* Detach the cursor from the bulk move list*/
static void
ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
{
lockdep_assert_held(&cursor->man->bdev->lru_lock);
cursor->bulk = NULL;
list_del_init(&cursor->bulk_link);
}
/* Move the cursor to the end of the bulk move list it's in */
static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk,
struct ttm_resource_cursor *cursor)
{
struct ttm_lru_bulk_move_pos *pos;
lockdep_assert_held(&cursor->man->bdev->lru_lock);
if (WARN_ON_ONCE(bulk != cursor->bulk)) {
list_del_init(&cursor->bulk_link);
return;
}
pos = &bulk->pos[cursor->mem_type][cursor->priority];
if (pos->last)
list_move(&cursor->hitch.link, &pos->last->lru.link);
ttm_resource_cursor_clear_bulk(cursor);
}
/* Move all cursors attached to a bulk move to its end */
static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk)
{
struct ttm_resource_cursor *cursor, *next;
list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
ttm_resource_cursor_move_bulk_tail(bulk, cursor);
}
/* Remove a cursor from an empty bulk move list */
static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
{
struct ttm_resource_cursor *cursor, *next;
list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
ttm_resource_cursor_clear_bulk(cursor);
}
/** /**
* ttm_resource_cursor_fini_locked() - Finalize the LRU list cursor usage * ttm_resource_cursor_fini_locked() - Finalize the LRU list cursor usage
* @cursor: The struct ttm_resource_cursor to finalize. * @cursor: The struct ttm_resource_cursor to finalize.
...@@ -45,6 +92,7 @@ void ttm_resource_cursor_fini_locked(struct ttm_resource_cursor *cursor) ...@@ -45,6 +92,7 @@ void ttm_resource_cursor_fini_locked(struct ttm_resource_cursor *cursor)
{ {
lockdep_assert_held(&cursor->man->bdev->lru_lock); lockdep_assert_held(&cursor->man->bdev->lru_lock);
list_del_init(&cursor->hitch.link); list_del_init(&cursor->hitch.link);
ttm_resource_cursor_clear_bulk(cursor);
} }
/** /**
...@@ -73,9 +121,27 @@ void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor) ...@@ -73,9 +121,27 @@ void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
{ {
memset(bulk, 0, sizeof(*bulk)); memset(bulk, 0, sizeof(*bulk));
INIT_LIST_HEAD(&bulk->cursor_list);
} }
EXPORT_SYMBOL(ttm_lru_bulk_move_init); EXPORT_SYMBOL(ttm_lru_bulk_move_init);
/**
* ttm_lru_bulk_move_fini - finalize a bulk move structure
* @bdev: The struct ttm_device
* @bulk: the structure to finalize
*
* Sanity checks that bulk moves don't have any
* resources left and hence no cursors attached.
*/
void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
struct ttm_lru_bulk_move *bulk)
{
spin_lock(&bdev->lru_lock);
ttm_bulk_move_drop_cursors(bulk);
spin_unlock(&bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_lru_bulk_move_fini);
/** /**
* ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
* *
...@@ -88,6 +154,7 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) ...@@ -88,6 +154,7 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{ {
unsigned i, j; unsigned i, j;
ttm_bulk_move_adjust_cursors(bulk);
for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
...@@ -515,6 +582,28 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man, ...@@ -515,6 +582,28 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
} }
EXPORT_SYMBOL(ttm_resource_manager_debug); EXPORT_SYMBOL(ttm_resource_manager_debug);
static void
ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
struct ttm_lru_item *next_lru)
{
struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
struct ttm_lru_bulk_move *bulk = NULL;
struct ttm_buffer_object *bo = next->bo;
lockdep_assert_held(&cursor->man->bdev->lru_lock);
bulk = bo->bulk_move;
if (cursor->bulk != bulk) {
if (bulk) {
list_move_tail(&cursor->bulk_link, &bulk->cursor_list);
cursor->mem_type = next->mem_type;
} else {
list_del_init(&cursor->bulk_link);
}
cursor->bulk = bulk;
}
}
/** /**
* ttm_resource_manager_first() - Start iterating over the resources * ttm_resource_manager_first() - Start iterating over the resources
* of a resource manager * of a resource manager
...@@ -535,6 +624,7 @@ ttm_resource_manager_first(struct ttm_resource_manager *man, ...@@ -535,6 +624,7 @@ ttm_resource_manager_first(struct ttm_resource_manager *man,
cursor->priority = 0; cursor->priority = 0;
cursor->man = man; cursor->man = man;
ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
INIT_LIST_HEAD(&cursor->bulk_link);
list_add(&cursor->hitch.link, &man->lru[cursor->priority]); list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
return ttm_resource_manager_next(cursor); return ttm_resource_manager_next(cursor);
...@@ -559,6 +649,7 @@ ttm_resource_manager_next(struct ttm_resource_cursor *cursor) ...@@ -559,6 +649,7 @@ ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
lru = &cursor->hitch; lru = &cursor->hitch;
list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
if (ttm_lru_item_is_res(lru)) { if (ttm_lru_item_is_res(lru)) {
ttm_resource_cursor_check_bulk(cursor, lru);
list_move(&cursor->hitch.link, &lru->link); list_move(&cursor->hitch.link, &lru->link);
return ttm_lru_item_to_res(lru); return ttm_lru_item_to_res(lru);
} }
...@@ -568,6 +659,7 @@ ttm_resource_manager_next(struct ttm_resource_cursor *cursor) ...@@ -568,6 +659,7 @@ ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
break; break;
list_move(&cursor->hitch.link, &man->lru[cursor->priority]); list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
ttm_resource_cursor_clear_bulk(cursor);
} }
ttm_resource_cursor_fini_locked(cursor); ttm_resource_cursor_fini_locked(cursor);
......
...@@ -1252,6 +1252,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) ...@@ -1252,6 +1252,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock); init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock); spin_lock_init(&vm->userptr.invalidated_lock);
ttm_lru_bulk_move_init(&vm->lru_bulk_move);
INIT_LIST_HEAD(&vm->preempt.exec_queues); INIT_LIST_HEAD(&vm->preempt.exec_queues);
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
...@@ -1369,6 +1371,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) ...@@ -1369,6 +1371,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
mutex_destroy(&vm->snap_mutex); mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id) for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]); xe_range_fence_tree_fini(&vm->rftree[id]);
ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
kfree(vm); kfree(vm);
if (!(flags & XE_VM_FLAG_MIGRATION)) if (!(flags & XE_VM_FLAG_MIGRATION))
xe_pm_runtime_put(xe); xe_pm_runtime_put(xe);
...@@ -1511,6 +1514,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm) ...@@ -1511,6 +1514,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
XE_WARN_ON(vm->pt_root[id]); XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm); trace_xe_vm_free(vm);
ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
kfree(vm); kfree(vm);
} }
......
...@@ -269,26 +269,6 @@ ttm_lru_item_to_res(struct ttm_lru_item *item) ...@@ -269,26 +269,6 @@ ttm_lru_item_to_res(struct ttm_lru_item *item)
return container_of(item, struct ttm_resource, lru); return container_of(item, struct ttm_resource, lru);
} }
/**
* struct ttm_resource_cursor
*
* @man: The resource manager currently being iterated over.
* @hitch: A hitch list node inserted before the next resource
* to iterate over.
* @priority: the current priority
*
* Cursor to iterate over the resources in a manager.
*/
struct ttm_resource_cursor {
struct ttm_resource_manager *man;
struct ttm_lru_item hitch;
unsigned int priority;
};
void ttm_resource_cursor_fini_locked(struct ttm_resource_cursor *cursor);
void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
/** /**
* struct ttm_lru_bulk_move_pos * struct ttm_lru_bulk_move_pos
* *
...@@ -304,8 +284,9 @@ struct ttm_lru_bulk_move_pos { ...@@ -304,8 +284,9 @@ struct ttm_lru_bulk_move_pos {
/** /**
* struct ttm_lru_bulk_move * struct ttm_lru_bulk_move
*
* @pos: first/last lru entry for resources in the each domain/priority * @pos: first/last lru entry for resources in the each domain/priority
* @cursor_list: The list of cursors currently traversing any of
* the sublists of @pos. Protected by the ttm device's lru_lock.
* *
* Container for the current bulk move state. Should be used with * Container for the current bulk move state. Should be used with
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move(). * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
...@@ -315,8 +296,39 @@ struct ttm_lru_bulk_move_pos { ...@@ -315,8 +296,39 @@ struct ttm_lru_bulk_move_pos {
*/ */
struct ttm_lru_bulk_move { struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY]; struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
struct list_head cursor_list;
}; };
/**
* struct ttm_resource_cursor
* @man: The resource manager currently being iterated over
* @hitch: A hitch list node inserted before the next resource
* to iterate over.
* @bulk_link: A list link for the list of cursors traversing the
* bulk sublist of @bulk. Protected by the ttm device's lru_lock.
* @bulk: Pointer to struct ttm_lru_bulk_move whose subrange @hitch is
* inserted to. NULL if none. Never dereference this pointer since
* the struct ttm_lru_bulk_move object pointed to might have been
* freed. The pointer is only for comparison.
* @mem_type: The memory type of the LRU list being traversed.
* This field is valid iff @bulk != NULL.
* @priority: the current priority
*
* Cursor to iterate over the resources in a manager.
*/
struct ttm_resource_cursor {
struct ttm_resource_manager *man;
struct ttm_lru_item hitch;
struct list_head bulk_link;
struct ttm_lru_bulk_move *bulk;
unsigned int mem_type;
unsigned int priority;
};
void ttm_resource_cursor_fini_locked(struct ttm_resource_cursor *cursor);
void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
/** /**
* struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping + * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
* struct sg_table backed struct ttm_resource. * struct sg_table backed struct ttm_resource.
...@@ -405,6 +417,8 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man) ...@@ -405,6 +417,8 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk); void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk); void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
struct ttm_lru_bulk_move *bulk);
void ttm_resource_add_bulk_move(struct ttm_resource *res, void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo); struct ttm_buffer_object *bo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment