Commit babe8e7c authored by Yuri Nudelman's avatar Yuri Nudelman Committed by Greg Kroah-Hartman

habanalabs: unified memory manager infrastructure

This is a part of overall refactoring attempt to separate nic and the
core drivers.
Currently, there are 4 different flows, that contain very similar code.
These are the ts, nic, hwblocks and cb alloc/map flows. The similar
aspect of all these flows is that they all contain a central store, with
memory buffers inside, supporting the following set of operations:

- Allocate buffer and return handle
- Get buffer from the store with handle
- Put the buffer (last put releases the buffer)
- Map the buffer to the user

This patch contains a generic data structure used to implement the above
memory buffer store interface. Conversion of the existing code to use
the new data structure will follow.
Signed-off-by: default avatarYuri Nudelman <ynudelman@habana.ai>
Reviewed-by: default avatarOded Gabbay <ogabbay@kernel.org>
Signed-off-by: default avatarOded Gabbay <ogabbay@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b75cce27
...@@ -11,4 +11,4 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \ ...@@ -11,4 +11,4 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_buffer.o common/hw_queue.o common/irq.o \ common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \ common/sysfs.o common/hwmon.o common/memory.o \
common/command_submission.o common/firmware_if.o \ common/command_submission.o common/firmware_if.o \
common/state_dump.o common/state_dump.o common/memory_mgr.o
...@@ -741,6 +741,57 @@ struct hl_ts_buff { ...@@ -741,6 +741,57 @@ struct hl_ts_buff {
u32 user_buff_size; u32 user_buff_size;
}; };
struct hl_mmap_mem_buf;
/**
* struct hl_mem_mgr - describes unified memory manager for mappable memory chunks.
* @dev: back pointer to the owning device
* @lock: protects handles
* @handles: an idr holding all active handles to the memory buffers in the system.
*/
struct hl_mem_mgr {
struct device *dev;
spinlock_t lock;
struct idr handles;
};
/**
* struct hl_mmap_mem_buf_ops - describes unified memory manager buffer behavior
* @alloc: callback executed on buffer allocation, shall allocate the memory,
* set it under buffer private, and set mappable size.
* @mmap: callback executed on mmap, must map the buffer to vma
* @release: callback executed on release, must free the resources used by the buffer
*/
struct hl_mmap_mem_buf_ops {
int (*alloc)(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args);
int (*mmap)(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args);
void (*release)(struct hl_mmap_mem_buf *buf);
};
/**
* struct hl_mmap_mem_buf_ops - describes a single unified memory buffer
* @ops: buffer behavior
* @mmg: back pointer to the unified memory manager
* @refcount: reference counter for buffer users
* @private: pointer to buffer behavior private data
* @mmap: atomic boolean indicating whether or not the buffer is mapped right now
* @real_mapped_size: the actual size of buffer mapped, after part of it may be released,
* may change at runtime.
* @mappable_size: the original mappable size of the buffer, does not change after
* the allocation.
* @handle: the buffer id in mmg handles store
*/
struct hl_mmap_mem_buf {
struct hl_mmap_mem_buf_ops *ops;
struct hl_mem_mgr *mmg;
struct kref refcount;
void *private;
atomic_t mmap;
u64 real_mapped_size;
u64 mappable_size;
u32 handle;
};
/** /**
* struct hl_cb - describes a Command Buffer. * struct hl_cb - describes a Command Buffer.
* @refcount: reference counter for usage of the CB. * @refcount: reference counter for usage of the CB.
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
/**
* hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
* the buffer descriptor.
*
* @mmg: parent unifed memory manager
* @handle: requested buffer handle
*
* @return Find the buffer in the store and return a pointer to its descriptor.
* Increase buffer refcount. If not found - return NULL.
*/
struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u32 handle)
{
struct hl_mmap_mem_buf *buf;
spin_lock(&mmg->lock);
buf = idr_find(&mmg->handles, handle);
if (!buf) {
spin_unlock(&mmg->lock);
dev_warn(mmg->dev,
"Buff get failed, no match to handle %u\n", handle);
return NULL;
}
kref_get(&buf->refcount);
spin_unlock(&mmg->lock);
return buf;
}
/**
* @hl_mmap_mem_buf_release - release buffer
*
* @kref: kref that reached 0.
*
* Internal function, used as a kref release callback, when the last user of
* the buffer is released. Shall be called from an interrupt context.
*/
static void hl_mmap_mem_buf_release(struct kref *kref)
{
struct hl_mmap_mem_buf *buf =
container_of(kref, struct hl_mmap_mem_buf, refcount);
spin_lock(&buf->mmg->lock);
idr_remove(&buf->mmg->handles, buf->handle);
spin_unlock(&buf->mmg->lock);
if (buf->ops->release)
buf->ops->release(buf);
kfree(buf);
}
/**
* @hl_mmap_mem_buf_put - decrease the reference to the buffer
*
* @buf: memory manager buffer descriptor
*
* Decrease the reference to the buffer, and release it if it was the last one.
* Shall be called from an interrupt context.
*/
int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
{
return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
}
/**
* @hl_mmap_mem_buf_alloc - allocate a new mappable buffer
*
* @mmg: parent unifed memory manager
* @behavior: behavior object describing this buffer polymorphic behavior
* @gfp: gfp flags to use for the memory allocations
* @args: additional args passed to behavior->alloc
*
* Allocate and register a new memory buffer inside the give memory manager.
* Return the pointer to the new buffer on success or NULL on failure.
*/
struct hl_mmap_mem_buf *
hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
struct hl_mmap_mem_buf_ops *behavior, gfp_t gfp,
void *args)
{
struct hl_mmap_mem_buf *buf;
int rc;
buf = kzalloc(sizeof(*buf), gfp);
if (!buf)
return NULL;
spin_lock(&mmg->lock);
rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
spin_unlock(&mmg->lock);
if (rc < 0) {
dev_err(mmg->dev,
"Failed to allocate IDR for a new buffer, rc=%d\n", rc);
goto free_buf;
}
buf->handle = rc;
buf->mmg = mmg;
buf->ops = behavior;
kref_init(&buf->refcount);
rc = buf->ops->alloc(buf, gfp, args);
if (rc) {
dev_err(mmg->dev, "Failure in buffer alloc callback %d\n",
rc);
goto remove_idr;
}
dev_dbg(mmg->dev, "Created buff object handle %u\n", buf->handle);
return buf;
remove_idr:
spin_lock(&mmg->lock);
idr_remove(&mmg->handles, buf->handle);
spin_unlock(&mmg->lock);
free_buf:
kfree(buf);
return NULL;
}
/**
* @hl_mmap_mem_buf_vm_close - handle mmap close
*
* @vma: the vma object for which mmap was closed.
*
* Put the memory buffer if it is no longer mapped.
*/
static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
{
struct hl_mmap_mem_buf *buf =
(struct hl_mmap_mem_buf *)vma->vm_private_data;
long new_mmap_size;
new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
if (new_mmap_size > 0) {
buf->real_mapped_size = new_mmap_size;
return;
}
atomic_set(&buf->mmap, 0);
hl_mmap_mem_buf_put(buf);
vma->vm_private_data = NULL;
}
static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
.close = hl_mmap_mem_buf_vm_close
};
/**
* @hl_mem_mgr_mmap - map the given buffer to the user
*
* @mmg: unifed memory manager
* @vma: the vma object for which mmap was closed.
* @args: additional args passed to behavior->mmap
*
* Map the buffer specified by the vma->vm_pgoff to the given vma.
*/
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args)
{
struct hl_mmap_mem_buf *buf;
u64 user_mem_size;
u32 handle;
int rc;
/* We use the page offset to hold the idr and thus we need to clear
* it before doing the mmap itself
*/
handle = vma->vm_pgoff;
vma->vm_pgoff = 0;
/* Reference was taken here */
buf = hl_mmap_mem_buf_get(mmg, handle);
if (!buf) {
dev_err(mmg->dev,
"Memory mmap failed, no match to handle %u\n", handle);
return -EINVAL;
}
/* Validation check */
user_mem_size = vma->vm_end - vma->vm_start;
if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
dev_err(mmg->dev,
"Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
user_mem_size, buf->mappable_size);
rc = -EINVAL;
goto put_mem;
}
#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
#else
if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
#endif
dev_err(mmg->dev, "user pointer is invalid - 0x%lx\n",
vma->vm_start);
rc = -EINVAL;
goto put_mem;
}
if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
dev_err(mmg->dev,
"Memory mmap failed, already mmaped to user\n");
rc = -EINVAL;
goto put_mem;
}
vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
/* Note: We're transferring the memory reference to vma->vm_private_data here. */
vma->vm_private_data = buf;
rc = buf->ops->mmap(buf, vma, args);
if (rc) {
atomic_set(&buf->mmap, 0);
goto put_mem;
}
buf->real_mapped_size = buf->mappable_size;
vma->vm_pgoff = handle;
return 0;
put_mem:
hl_mmap_mem_buf_put(buf);
return rc;
}
/**
* @hl_mem_mgr_init - initialize unified memory manager
*
* @dev: owner device pointer
* @mmg: structure to initialize
*
* Initialize an instance of unified memory manager
*/
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
{
mmg->dev = dev;
spin_lock_init(&mmg->lock);
idr_init(&mmg->handles);
}
/**
* @hl_mem_mgr_fini - release unified memory manager
*
* @mmg: parent unifed memory manager
*
* Release the unified memory manager. Shall be called from an interrupt context.
*/
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
{
struct hl_mmap_mem_buf *buf;
struct idr *idp;
u32 id;
idp = &mmg->handles;
idr_for_each_entry(idp, buf, id) {
if (hl_mmap_mem_buf_put(buf) != 1)
dev_err(mmg->dev,
"Buff handle %u for CTX is still alive\n", id);
}
/* TODO: can it happen that some buffer is still in use at this point? */
idr_destroy(&mmg->handles);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment