Commit 819a3e9a authored by Rex Zhu's avatar Rex Zhu Committed by Alex Deucher

drm/amdgpu: Delete cgs wrapper functions for gpu memory manager

delete those cgs interfaces:
amdgpu_cgs_alloc_gpu_mem
amdgpu_cgs_free_gpu_mem
amdgpu_cgs_gmap_gpu_mem
amdgpu_cgs_gunmap_gpu_mem
amdgpu_cgs_kmap_gpu_mem
amdgpu_cgs_kunmap_gpu_mem
Reviewed-by: default avatarAlex Deucher <alexdeucher@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarRex Zhu <Rex.Zhu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ecc124b0
......@@ -42,131 +42,6 @@ struct amdgpu_cgs_device {
((struct amdgpu_cgs_device *)cgs_device)->adev
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
cgs_handle_t *handle)
{
CGS_FUNC_ADEV;
uint16_t flags = 0;
int ret = 0;
uint32_t domain = 0;
struct amdgpu_bo *obj;
/* fail if the alignment is not a power of 2 */
if (((align != 1) && (align & (align - 1)))
|| size == 0 || align == 0)
return -EINVAL;
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
domain = AMDGPU_GEM_DOMAIN_GTT;
break;
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
domain = AMDGPU_GEM_DOMAIN_GTT;
break;
default:
return -EINVAL;
}
*handle = 0;
ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
NULL, NULL, &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
}
*handle = (cgs_handle_t)obj;
return ret;
}
static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
if (obj) {
int r = amdgpu_bo_reserve(obj, true);
if (likely(r == 0)) {
amdgpu_bo_kunmap(obj);
amdgpu_bo_unpin(obj);
amdgpu_bo_unreserve(obj);
}
amdgpu_bo_unref(&obj);
}
return 0;
}
static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
uint64_t *mcaddr)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
WARN_ON_ONCE(obj->placement.num_placement > 1);
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
}
static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_unpin(obj);
amdgpu_bo_unreserve(obj);
return r;
}
static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
void **map)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(obj, map);
amdgpu_bo_unreserve(obj);
return r;
}
static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_kunmap(obj);
amdgpu_bo_unreserve(obj);
return r;
}
static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
{
CGS_FUNC_ADEV;
......@@ -906,12 +781,6 @@ static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool ena
}
static const struct cgs_ops amdgpu_cgs_ops = {
.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
.free_gpu_mem = amdgpu_cgs_free_gpu_mem,
.gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
.gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
.kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
.kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
.read_register = amdgpu_cgs_read_register,
.write_register = amdgpu_cgs_write_register,
.read_ind_register = amdgpu_cgs_read_ind_register,
......
......@@ -28,18 +28,6 @@
struct cgs_device;
/**
* enum cgs_gpu_mem_type - GPU memory types
*/
enum cgs_gpu_mem_type {
CGS_GPU_MEM_TYPE__VISIBLE_FB,
CGS_GPU_MEM_TYPE__INVISIBLE_FB,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB,
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
};
/**
* enum cgs_ind_reg - Indirect register spaces
*/
......@@ -130,89 +118,6 @@ struct cgs_display_info {
typedef unsigned long cgs_handle_t;
/**
* cgs_alloc_gpu_mem() - Allocate GPU memory
* @cgs_device: opaque device handle
* @type: memory type
* @size: size in bytes
* @align: alignment in bytes
* @handle: memory handle (output)
*
* The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
* memory allocation. This guarantees that the MC address returned by
* cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous
* FB memory types may be GART mapped depending on memory
* fragmentation and memory allocator policies.
*
* If min/max_offset are non-0, the allocation will be forced to
* reside between these offsets in its respective memory heap. The
* base address that the offset relates to, depends on the memory
* type.
*
* - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address
* - CGS_GPU_MEM_TYPE__GART_*: GART aperture base address
* - others: undefined, don't use with max_offset
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
cgs_handle_t *handle);
/**
* cgs_free_gpu_mem() - Free GPU memory
* @cgs_device: opaque device handle
* @handle: memory handle returned by alloc or import
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_free_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
/**
* cgs_gmap_gpu_mem() - GPU-map GPU memory
* @cgs_device: opaque device handle
* @handle: memory handle returned by alloc or import
* @mcaddr: MC address (output)
*
* Ensures that a buffer is GPU accessible and returns its MC address.
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
uint64_t *mcaddr);
/**
* cgs_gunmap_gpu_mem() - GPU-unmap GPU memory
* @cgs_device: opaque device handle
* @handle: memory handle returned by alloc or import
*
* Allows the buffer to be migrated while it's not used by the GPU.
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
/**
* cgs_kmap_gpu_mem() - Kernel-map GPU memory
*
* @cgs_device: opaque device handle
* @handle: memory handle returned by alloc or import
* @map: Kernel virtual address the memory was mapped to (output)
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_kmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
void **map);
/**
* cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory
* @cgs_device: opaque device handle
* @handle: memory handle returned by alloc or import
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_kunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
/**
* cgs_read_register() - Read an MMIO register
* @cgs_device: opaque device handle
......@@ -355,13 +260,6 @@ typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_alloc_gpu_mem_t alloc_gpu_mem;
cgs_free_gpu_mem_t free_gpu_mem;
cgs_gmap_gpu_mem_t gmap_gpu_mem;
cgs_gunmap_gpu_mem_t gunmap_gpu_mem;
cgs_kmap_gpu_mem_t kmap_gpu_mem;
cgs_kunmap_gpu_mem_t kunmap_gpu_mem;
/* MMIO access */
cgs_read_register_t read_register;
cgs_write_register_t write_register;
......@@ -404,19 +302,6 @@ struct cgs_device
#define CGS_OS_CALL(func,dev,...) \
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
#define cgs_alloc_gpu_mem(dev,type,size,align,handle) \
CGS_CALL(alloc_gpu_mem,dev,type,size,align,handle)
#define cgs_free_gpu_mem(dev,handle) \
CGS_CALL(free_gpu_mem,dev,handle)
#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr)
#define cgs_gunmap_gpu_mem(dev,handle) \
CGS_CALL(gunmap_gpu_mem,dev,handle)
#define cgs_kmap_gpu_mem(dev,handle,map) \
CGS_CALL(kmap_gpu_mem,dev,handle,map)
#define cgs_kunmap_gpu_mem(dev,handle) \
CGS_CALL(kunmap_gpu_mem,dev,handle)
#define cgs_read_register(dev,offset) \
CGS_CALL(read_register,dev,offset)
#define cgs_write_register(dev,offset,value) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment