Commit 7169f0df authored by Oded Gabbay's avatar Oded Gabbay

habanalabs: don't free phys_pg_pack inside lock

Freeing phys_pg_pack includes calling to scrubbing functions of the
device's memory, taking locks and possibly even calling reset.

This is not something that should be done while holding a device-wide
spinlock.

Therefore, save the relevant objects on a local linked-list and after
releasing the spinlock, traverse that list and free the phys_pg_pack
objects.
Signed-off-by: default avatarOded Gabbay <ogabbay@kernel.org>
parent 1dc6cc4b
...@@ -1738,6 +1738,8 @@ struct hl_vm_hw_block_list_node { ...@@ -1738,6 +1738,8 @@ struct hl_vm_hw_block_list_node {
* @pages: the physical page array. * @pages: the physical page array.
* @npages: num physical pages in the pack. * @npages: num physical pages in the pack.
* @total_size: total size of all the pages in this list. * @total_size: total size of all the pages in this list.
* @node: used to attach to deletion list that is used when all the allocations are cleared
* at the teardown of the context.
* @mapping_cnt: number of shared mappings. * @mapping_cnt: number of shared mappings.
* @exporting_cnt: number of dma-buf exporting. * @exporting_cnt: number of dma-buf exporting.
* @asid: the context related to this list. * @asid: the context related to this list.
...@@ -1753,6 +1755,7 @@ struct hl_vm_phys_pg_pack { ...@@ -1753,6 +1755,7 @@ struct hl_vm_phys_pg_pack {
u64 *pages; u64 *pages;
u64 npages; u64 npages;
u64 total_size; u64 total_size;
struct list_head node;
atomic_t mapping_cnt; atomic_t mapping_cnt;
u32 exporting_cnt; u32 exporting_cnt;
u32 asid; u32 asid;
......
...@@ -2607,11 +2607,12 @@ int hl_vm_ctx_init(struct hl_ctx *ctx) ...@@ -2607,11 +2607,12 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
*/ */
void hl_vm_ctx_fini(struct hl_ctx *ctx) void hl_vm_ctx_fini(struct hl_ctx *ctx)
{ {
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_list; struct hl_vm_phys_pg_pack *phys_pg_list;
struct hl_device *hdev = ctx->hdev;
struct hl_vm_hash_node *hnode; struct hl_vm_hash_node *hnode;
struct hl_vm *vm = &hdev->vm;
struct hlist_node *tmp_node; struct hlist_node *tmp_node;
struct list_head free_list;
struct hl_mem_in args; struct hl_mem_in args;
int i; int i;
...@@ -2644,19 +2645,24 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) ...@@ -2644,19 +2645,24 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
mutex_unlock(&ctx->mmu_lock); mutex_unlock(&ctx->mmu_lock);
INIT_LIST_HEAD(&free_list);
spin_lock(&vm->idr_lock); spin_lock(&vm->idr_lock);
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i) idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) { if (phys_pg_list->asid == ctx->asid) {
dev_dbg(hdev->dev, dev_dbg(hdev->dev,
"page list 0x%px of asid %d is still alive\n", "page list 0x%px of asid %d is still alive\n",
phys_pg_list, ctx->asid); phys_pg_list, ctx->asid);
atomic64_sub(phys_pg_list->total_size,
&hdev->dram_used_mem); atomic64_sub(phys_pg_list->total_size, &hdev->dram_used_mem);
free_phys_pg_pack(hdev, phys_pg_list);
idr_remove(&vm->phys_pg_pack_handles, i); idr_remove(&vm->phys_pg_pack_handles, i);
list_add(&phys_pg_list->node, &free_list);
} }
spin_unlock(&vm->idr_lock); spin_unlock(&vm->idr_lock);
list_for_each_entry(phys_pg_list, &free_list, node)
free_phys_pg_pack(hdev, phys_pg_list);
va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]); va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]); va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment