Commit 65d3c635 authored by Tomer Tayar's avatar Tomer Tayar Committed by Oded Gabbay

habanalabs: fix H/W block handling for partial unmappings

Several munmap() calls can be done or a mapped H/W block that has a
larger size than a page size.
Releasing the object should be done only when all mapped range is
unmapped.
Signed-off-by: default avatarTomer Tayar <ttayar@habana.ai>
Reviewed-by: default avatarOded Gabbay <ogabbay@kernel.org>
Signed-off-by: default avatarOded Gabbay <ogabbay@kernel.org>
parent 07ecaa0d
...@@ -291,14 +291,16 @@ static int vm_show(struct seq_file *s, void *data) ...@@ -291,14 +291,16 @@ static int vm_show(struct seq_file *s, void *data)
if (ctx->asid != HL_KERNEL_ASID_ID && if (ctx->asid != HL_KERNEL_ASID_ID &&
!list_empty(&ctx->hw_block_mem_list)) { !list_empty(&ctx->hw_block_mem_list)) {
seq_puts(s, "\nhw_block mappings:\n\n"); seq_puts(s, "\nhw_block mappings:\n\n");
seq_puts(s, " virtual address size HW block id\n"); seq_puts(s,
seq_puts(s, "-------------------------------------------\n"); " virtual address block size mapped size HW block id\n");
seq_puts(s,
"---------------------------------------------------------------\n");
mutex_lock(&ctx->hw_block_list_lock); mutex_lock(&ctx->hw_block_list_lock);
list_for_each_entry(lnode, &ctx->hw_block_mem_list, list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
node) {
seq_printf(s, seq_printf(s,
" 0x%-14lx %-6u %-9u\n", " 0x%-14lx %-6u %-6u %-9u\n",
lnode->vaddr, lnode->size, lnode->id); lnode->vaddr, lnode->block_size, lnode->mapped_size,
lnode->id);
} }
mutex_unlock(&ctx->hw_block_list_lock); mutex_unlock(&ctx->hw_block_list_lock);
} }
......
...@@ -2063,14 +2063,16 @@ struct hl_vm_hash_node { ...@@ -2063,14 +2063,16 @@ struct hl_vm_hash_node {
* @node: node to hang on the list in context object. * @node: node to hang on the list in context object.
* @ctx: the context this node belongs to. * @ctx: the context this node belongs to.
* @vaddr: virtual address of the HW block. * @vaddr: virtual address of the HW block.
* @size: size of the block. * @block_size: size of the block.
* @mapped_size: size of the block which is mapped. May change if partial un-mappings are done.
* @id: HW block id (handle). * @id: HW block id (handle).
*/ */
struct hl_vm_hw_block_list_node { struct hl_vm_hw_block_list_node {
struct list_head node; struct list_head node;
struct hl_ctx *ctx; struct hl_ctx *ctx;
unsigned long vaddr; unsigned long vaddr;
u32 size; u32 block_size;
u32 mapped_size;
u32 id; u32 id;
}; };
......
...@@ -1442,6 +1442,13 @@ static void hw_block_vm_close(struct vm_area_struct *vma) ...@@ -1442,6 +1442,13 @@ static void hw_block_vm_close(struct vm_area_struct *vma)
struct hl_vm_hw_block_list_node *lnode = struct hl_vm_hw_block_list_node *lnode =
(struct hl_vm_hw_block_list_node *) vma->vm_private_data; (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
struct hl_ctx *ctx = lnode->ctx; struct hl_ctx *ctx = lnode->ctx;
long new_mmap_size;
new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start);
if (new_mmap_size > 0) {
lnode->mapped_size = new_mmap_size;
return;
}
mutex_lock(&ctx->hw_block_list_lock); mutex_lock(&ctx->hw_block_list_lock);
list_del(&lnode->node); list_del(&lnode->node);
...@@ -1502,7 +1509,8 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) ...@@ -1502,7 +1509,8 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
lnode->ctx = ctx; lnode->ctx = ctx;
lnode->vaddr = vma->vm_start; lnode->vaddr = vma->vm_start;
lnode->size = block_size; lnode->block_size = block_size;
lnode->mapped_size = lnode->block_size;
lnode->id = block_id; lnode->id = block_id;
vma->vm_private_data = lnode; vma->vm_private_data = lnode;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment