Commit b2458726 authored by Christian König's avatar Christian König

drm/ttm: give resource functions their own [ch] files

This is a separate object we work within TTM.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/384338/?series=80346&rev=1
parent e92ae67d
......@@ -381,7 +381,7 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
......
......@@ -578,7 +578,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
/* move BO (in tmp_mem) to new_mem */
r = ttm_bo_move_ttm(bo, ctx, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
ttm_resource_free(bo, &tmp_mem);
return r;
}
......@@ -625,7 +625,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
goto out_cleanup;
}
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
ttm_resource_free(bo, &tmp_mem);
return r;
}
......@@ -1203,11 +1203,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
gtt->offset = (u64)tmp.start << PAGE_SHIFT;
r = amdgpu_ttm_gart_bind(adev, bo, flags);
if (unlikely(r)) {
ttm_bo_mem_put(bo, &tmp);
ttm_resource_free(bo, &tmp);
return r;
}
ttm_bo_mem_put(bo, &bo->mem);
ttm_resource_free(bo, &bo->mem);
bo->mem = tmp;
}
......
......@@ -1191,7 +1191,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
out:
ttm_bo_mem_put(bo, &tmp_reg);
ttm_resource_free(bo, &tmp_reg);
return ret;
}
......@@ -1227,7 +1227,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
out:
ttm_bo_mem_put(bo, &tmp_reg);
ttm_resource_free(bo, &tmp_reg);
return ret;
}
......
......@@ -271,7 +271,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
}
r = ttm_bo_move_ttm(bo, &ctx, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
ttm_resource_free(bo, &tmp_mem);
return r;
}
......@@ -309,7 +309,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
goto out_cleanup;
}
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
ttm_resource_free(bo, &tmp_mem);
return r;
}
......
......@@ -4,7 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o
ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \
ttm_resource.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
......
......@@ -77,19 +77,6 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place,
return 0;
}
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p)
{
drm_printf(p, " use_type: %d\n", man->use_type);
drm_printf(p, " use_tt: %d\n", man->use_tt);
drm_printf(p, " size: %llu\n", man->size);
drm_printf(p, " available_caching: 0x%08X\n", man->available_caching);
drm_printf(p, " default_caching: 0x%08X\n", man->default_caching);
if (man->func && man->func->debug)
(*man->func->debug)(man, p);
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
......@@ -363,7 +350,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
ttm_bo_mem_put(bo, &bo->mem);
ttm_resource_free(bo, &bo->mem);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
......@@ -678,7 +665,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
ttm_bo_mem_put(bo, &evict_mem);
ttm_resource_free(bo, &evict_mem);
goto out;
}
bo->evicted = true;
......@@ -767,11 +754,11 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
return r == -EDEADLK ? -EBUSY : r;
}
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket)
int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
bool locked = false;
......@@ -839,32 +826,6 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
static int ttm_bo_mem_get(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *mem)
{
struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type);
mem->mm_node = NULL;
if (!man->func || !man->func->alloc)
return 0;
return man->func->alloc(man, bo, place, mem);
}
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem)
{
struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type);
if (!man->func || !man->func->free)
return;
man->func->free(man, mem);
mem->mm_node = NULL;
mem->mem_type = TTM_PL_SYSTEM;
}
EXPORT_SYMBOL(ttm_bo_mem_put);
/**
* Add the last move fence to the BO and reserve a new shared slot.
*/
......@@ -915,7 +876,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
ticket = dma_resv_locking_ctx(bo->base.resv);
do {
ret = ttm_bo_mem_get(bo, place, mem);
ret = ttm_resource_alloc(bo, place, mem);
if (likely(!ret))
break;
if (unlikely(ret != -ENOSPC))
......@@ -1056,7 +1017,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
goto error;
type_found = true;
ret = ttm_bo_mem_get(bo, place, mem);
ret = ttm_resource_alloc(bo, place, mem);
if (ret == -ENOSPC)
continue;
if (unlikely(ret))
......@@ -1065,7 +1026,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = ttm_manager_type(bdev, mem->mem_type);
ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
if (unlikely(ret)) {
ttm_bo_mem_put(bo, mem);
ttm_resource_free(bo, mem);
if (ret == -EBUSY)
continue;
......@@ -1132,7 +1093,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
out_unlock:
if (ret)
ttm_bo_mem_put(bo, &mem);
ttm_resource_free(bo, &mem);
return ret;
}
......@@ -1404,52 +1365,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_bo_create);
int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
struct ttm_resource_manager *man)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
.flags = TTM_OPT_FLAG_FORCE_ALLOC
};
struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence;
int ret;
unsigned i;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
NULL);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
}
}
spin_unlock(&glob->lru_lock);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
ret = dma_fence_wait(fence, false);
dma_fence_put(fence);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(ttm_resource_manager_force_list_clean);
int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
......@@ -1468,23 +1383,6 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
}
EXPORT_SYMBOL(ttm_bo_evict_mm);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size)
{
unsigned i;
man->use_io_reserve_lru = false;
mutex_init(&man->io_reserve_mutex);
spin_lock_init(&man->move_lock);
INIT_LIST_HEAD(&man->io_reserve_lru);
man->size = p_size;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
man->move = NULL;
}
EXPORT_SYMBOL(ttm_resource_manager_init);
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
struct ttm_bo_global *glob =
......
......@@ -47,7 +47,7 @@ struct ttm_transfer_obj {
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
ttm_bo_mem_put(bo, &bo->mem);
ttm_resource_free(bo, &bo->mem);
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
......@@ -398,7 +398,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* On error, keep the mm node!
*/
if (!ret)
ttm_bo_mem_put(bo, &old_copy);
ttm_resource_free(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
......
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_bo_driver.h>
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, res->mem_type);
res->mm_node = NULL;
if (!man->func || !man->func->alloc)
return 0;
return man->func->alloc(man, bo, place, res);
}
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, res->mem_type);
if (man->func && man->func->free)
man->func->free(man, res);
res->mm_node = NULL;
res->mem_type = TTM_PL_SYSTEM;
}
EXPORT_SYMBOL(ttm_resource_free);
/**
* ttm_resource_manager_init
*
* @man: memory manager object to init
* @p_size: size managed area in pages.
*
* Initialise core parts of a manager object.
*/
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size)
{
unsigned i;
man->use_io_reserve_lru = false;
mutex_init(&man->io_reserve_mutex);
spin_lock_init(&man->move_lock);
INIT_LIST_HEAD(&man->io_reserve_lru);
man->size = p_size;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
man->move = NULL;
}
EXPORT_SYMBOL(ttm_resource_manager_init);
/*
* ttm_resource_manager_force_list_clean
*
* @bdev - device to use
* @man - manager to use
*
* Force all the objects out of a memory manager until clean.
* Part of memory manager cleanup sequence.
*/
int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
struct ttm_resource_manager *man)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
.flags = TTM_OPT_FLAG_FORCE_ALLOC
};
struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence;
int ret;
unsigned i;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
NULL);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
}
}
spin_unlock(&glob->lru_lock);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
ret = dma_fence_wait(fence, false);
dma_fence_put(fence);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(ttm_resource_manager_force_list_clean);
/**
* ttm_resource_manager_debug
*
* @man: manager type to dump.
* @p: printer to use for debug.
*/
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p)
{
drm_printf(p, " use_type: %d\n", man->use_type);
drm_printf(p, " use_tt: %d\n", man->use_tt);
drm_printf(p, " size: %llu\n", man->size);
drm_printf(p, " available_caching: 0x%08X\n", man->available_caching);
drm_printf(p, " default_caching: 0x%08X\n", man->default_caching);
if (man->func && man->func->debug)
(*man->func->debug)(man, p);
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
......@@ -42,6 +42,8 @@
#include <linux/bitmap.h>
#include <linux/dma-resv.h>
#include "ttm_resource.h"
struct ttm_bo_global;
struct ttm_bo_device;
......@@ -54,57 +56,6 @@ struct ttm_place;
struct ttm_lru_bulk_move;
struct ttm_resource_manager;
/**
* struct ttm_bus_placement
*
* @addr: mapped virtual address
* @base: bus base address
* @is_iomem: is this io memory ?
* @size: size in byte
* @offset: offset from the base address
* @io_reserved_vm: The VM system has a refcount in @io_reserved_count
* @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
*
* Structure indicating the bus placement of an object.
*/
struct ttm_bus_placement {
void *addr;
phys_addr_t base;
unsigned long size;
unsigned long offset;
bool is_iomem;
bool io_reserved_vm;
uint64_t io_reserved_count;
};
/**
* struct ttm_resource
*
* @mm_node: Memory manager node.
* @size: Requested size of memory region.
* @num_pages: Actual size of memory region in pages.
* @page_alignment: Page alignment.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
*
* Structure indicating the placement and space resources used by a
* buffer object.
*/
struct ttm_resource {
void *mm_node;
unsigned long start;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
struct ttm_bus_placement bus;
};
/**
* enum ttm_bo_type
*
......@@ -533,17 +484,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
uint32_t page_alignment, bool interruptible,
struct ttm_buffer_object **p_bo);
/**
* ttm_resource_manager_init
*
* @man: memory manager object to init
* @p_size: size managed area in pages.
*
* Initialise core parts of a manager object.
*/
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
/**
* ttm_bo_evict_mm
*
......@@ -680,6 +620,12 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
return bo->base.dev != NULL;
}
int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket);
/* Default number of pre-faulted pages in the TTM fault handler */
#define TTM_BO_VM_NUM_PREFAULT 16
......
......@@ -43,131 +43,6 @@
#include "ttm_placement.h"
#include "ttm_tt.h"
#define TTM_MAX_BO_PRIORITY 4U
struct ttm_resource_manager;
struct ttm_resource_manager_func {
/**
* struct ttm_resource_manager_func member alloc
*
* @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for.
* @placement: Placement details.
* @flags: Additional placement flags.
* @mem: Pointer to a struct ttm_resource to be filled in.
*
* This function should allocate space in the memory type managed
* by @man. Placement details if
* applicable are given by @placement. If successful,
* @mem::mm_node should be set to a non-null value, and
* @mem::start should be set to a value identifying the beginning
* of the range allocated, and the function should return zero.
* If the memory region accommodate the buffer object, @mem::mm_node
* should be set to NULL, and the function should return 0.
* If a system error occurred, preventing the request to be fulfilled,
* the function should return a negative error code.
*
* Note that @mem::mm_node will only be dereferenced by
* struct ttm_resource_manager functions and optionally by the driver,
* which has knowledge of the underlying type.
*
* This function may not be called from within atomic context, so
* an implementation can and must use either a mutex or a spinlock to
* protect any data structures managing the space.
*/
int (*alloc)(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *mem);
/**
* struct ttm_resource_manager_func member free
*
* @man: Pointer to a memory type manager.
* @mem: Pointer to a struct ttm_resource to be filled in.
*
* This function frees memory type resources previously allocated
* and that are identified by @mem::mm_node and @mem::start. May not
* be called from within atomic context.
*/
void (*free)(struct ttm_resource_manager *man,
struct ttm_resource *mem);
/**
* struct ttm_resource_manager_func member debug
*
* @man: Pointer to a memory type manager.
* @printer: Prefix to be used in printout to identify the caller.
*
* This function is called to print out the state of the memory
* type manager to aid debugging of out-of-memory conditions.
* It may not be called from within atomic context.
*/
void (*debug)(struct ttm_resource_manager *man,
struct drm_printer *printer);
};
/**
* struct ttm_resource_manager
*
* @use_type: The memory type is enabled.
* @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
* managed by this memory type.
* @gpu_offset: If used, the GPU offset of the first managed page of
* fixed memory or the first managed location in an aperture.
* @size: Size of the managed region.
* @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
* as defined in ttm_placement_common.h
* @default_caching: The default caching policy used for a buffer object
* placed in this memory type if the user doesn't provide one.
* @func: structure pointer implementing the range manager. See above
* @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
* @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
* reserved by the TTM vm system.
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
* @move_lock: lock for move fence
* static information. bdev::driver::io_mem_free is never used.
* @lru: The lru list for this memory type.
* @move: The fence of the last pipelined move operation.
*
* This structure is used to identify and manage memory types for a device.
*/
struct ttm_resource_manager {
/*
* No protection. Constant from start.
*/
bool use_type;
bool use_tt;
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
const struct ttm_resource_manager_func *func;
struct mutex io_reserve_mutex;
bool use_io_reserve_lru;
spinlock_t move_lock;
/*
* Protected by @io_reserve_mutex:
*/
struct list_head io_reserve_lru;
/*
* Protected by the global->lru_lock.
*/
struct list_head lru[TTM_MAX_BO_PRIORITY];
/*
* Protected by @move_lock.
*/
struct dma_fence *move;
};
/**
* struct ttm_bo_driver
*
......@@ -537,8 +412,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct ttm_operation_ctx *ctx);
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem);
int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
......@@ -675,59 +548,6 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
dma_resv_unlock(bo->base.resv);
}
/**
* ttm_resource_manager_set_used
*
* @man: A memory manager object.
* @used: usage state to set.
*
* Set the manager in use flag. If disabled the manager is no longer
* used for object placement.
*/
static inline void ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used)
{
man->use_type = used;
}
/**
* ttm_resource_manager_used
*
* @man: Manager to get used state for
*
* Get the in use flag for a manager.
* Returns:
* true is used, false if not.
*/
static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man)
{
return man->use_type;
}
/**
* ttm_resource_manager_cleanup
*
* @man: A memory manager object.
*
* Cleanup the move fences from the memory manager object.
*/
static inline void ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
{
dma_fence_put(man->move);
man->move = NULL;
}
/*
* ttm_resource_manager_force_list_clean
*
* @bdev - device to use
* @man - manager to use
*
* Force all the objects out of a memory manager until clean.
* Part of memory manager cleanup sequence.
*/
int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
struct ttm_resource_manager *man);
/*
* ttm_bo_util.c
*/
......@@ -874,13 +694,4 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
int ttm_range_man_fini(struct ttm_bo_device *bdev,
unsigned type);
/**
* ttm_resource_manager_debug
*
* @man: manager type to dump.
* @p: printer to use for debug.
*/
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
#endif
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef _TTM_RESOURCE_H_
#define _TTM_RESOURCE_H_
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/dma-fence.h>
#include <drm/drm_print.h>
#define TTM_MAX_BO_PRIORITY 4U
struct ttm_bo_device;
struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
struct ttm_buffer_object;
struct ttm_resource_manager_func {
/**
* struct ttm_resource_manager_func member alloc
*
* @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for.
* @placement: Placement details.
* @flags: Additional placement flags.
* @mem: Pointer to a struct ttm_resource to be filled in.
*
* This function should allocate space in the memory type managed
* by @man. Placement details if
* applicable are given by @placement. If successful,
* @mem::mm_node should be set to a non-null value, and
* @mem::start should be set to a value identifying the beginning
* of the range allocated, and the function should return zero.
* If the memory region accommodate the buffer object, @mem::mm_node
* should be set to NULL, and the function should return 0.
* If a system error occurred, preventing the request to be fulfilled,
* the function should return a negative error code.
*
* Note that @mem::mm_node will only be dereferenced by
* struct ttm_resource_manager functions and optionally by the driver,
* which has knowledge of the underlying type.
*
* This function may not be called from within atomic context, so
* an implementation can and must use either a mutex or a spinlock to
* protect any data structures managing the space.
*/
int (*alloc)(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *mem);
/**
* struct ttm_resource_manager_func member free
*
* @man: Pointer to a memory type manager.
* @mem: Pointer to a struct ttm_resource to be filled in.
*
* This function frees memory type resources previously allocated
* and that are identified by @mem::mm_node and @mem::start. May not
* be called from within atomic context.
*/
void (*free)(struct ttm_resource_manager *man,
struct ttm_resource *mem);
/**
* struct ttm_resource_manager_func member debug
*
* @man: Pointer to a memory type manager.
* @printer: Prefix to be used in printout to identify the caller.
*
* This function is called to print out the state of the memory
* type manager to aid debugging of out-of-memory conditions.
* It may not be called from within atomic context.
*/
void (*debug)(struct ttm_resource_manager *man,
struct drm_printer *printer);
};
/**
* struct ttm_resource_manager
*
* @use_type: The memory type is enabled.
* @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
* managed by this memory type.
* @gpu_offset: If used, the GPU offset of the first managed page of
* fixed memory or the first managed location in an aperture.
* @size: Size of the managed region.
* @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
* as defined in ttm_placement_common.h
* @default_caching: The default caching policy used for a buffer object
* placed in this memory type if the user doesn't provide one.
* @func: structure pointer implementing the range manager. See above
* @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
* @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
* reserved by the TTM vm system.
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
* @move_lock: lock for move fence
* static information. bdev::driver::io_mem_free is never used.
* @lru: The lru list for this memory type.
* @move: The fence of the last pipelined move operation.
*
* This structure is used to identify and manage memory types for a device.
*/
struct ttm_resource_manager {
/*
* No protection. Constant from start.
*/
bool use_type;
bool use_tt;
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
const struct ttm_resource_manager_func *func;
struct mutex io_reserve_mutex;
bool use_io_reserve_lru;
spinlock_t move_lock;
/*
* Protected by @io_reserve_mutex:
*/
struct list_head io_reserve_lru;
/*
* Protected by the global->lru_lock.
*/
struct list_head lru[TTM_MAX_BO_PRIORITY];
/*
* Protected by @move_lock.
*/
struct dma_fence *move;
};
/**
* struct ttm_bus_placement
*
* @addr: mapped virtual address
* @base: bus base address
* @is_iomem: is this io memory ?
* @size: size in byte
* @offset: offset from the base address
* @io_reserved_vm: The VM system has a refcount in @io_reserved_count
* @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
*
* Structure indicating the bus placement of an object.
*/
struct ttm_bus_placement {
void *addr;
phys_addr_t base;
unsigned long size;
unsigned long offset;
bool is_iomem;
bool io_reserved_vm;
uint64_t io_reserved_count;
};
/**
* struct ttm_resource
*
* @mm_node: Memory manager node.
* @size: Requested size of memory region.
* @num_pages: Actual size of memory region in pages.
* @page_alignment: Page alignment.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
*
* Structure indicating the placement and space resources used by a
* buffer object.
*/
struct ttm_resource {
void *mm_node;
unsigned long start;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
struct ttm_bus_placement bus;
};
/**
* ttm_resource_manager_set_used
*
* @man: A memory manager object.
* @used: usage state to set.
*
* Set the manager in use flag. If disabled the manager is no longer
* used for object placement.
*/
static inline void
ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used)
{
man->use_type = used;
}
/**
* ttm_resource_manager_used
*
* @man: Manager to get used state for
*
* Get the in use flag for a manager.
* Returns:
* true is used, false if not.
*/
static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man)
{
return man->use_type;
}
/**
* ttm_resource_manager_cleanup
*
* @man: A memory manager object.
*
* Cleanup the move fences from the memory manager object.
*/
static inline void
ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
{
dma_fence_put(man->move);
man->move = NULL;
}
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment