Commit ab2f7a5c authored by Felix Kuehling's avatar Felix Kuehling Committed by Alex Deucher

drm/amdgpu: Implement VRAM wipe on release

Wipe VRAM memory containing sensitive data when moving or releasing
BOs. Clearing the memory is pipelined to minimize any impact on
subsequent memory allocation latency. Use of a poison value should
help debug future use-after-free bugs.

When moving BOs, the existing ttm_bo_pipelined_move ensures that the
memory won't be reused before being wiped.

When releasing BOs, the BO is fenced with the memory fill operation,
which results in queuing the BO for a delayed delete.

v2: Move amdgpu_amdkfd_unreserve_memory_limit into
amdgpu_bo_release_notify so that KFD can use memory that's still
being cleared in the background
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d8f4981e
......@@ -80,9 +80,6 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
if (bo->pin_count > 0)
amdgpu_bo_subtract_pin_size(bo);
if (bo->kfd_bo)
amdgpu_amdkfd_unreserve_memory_limit(bo);
amdgpu_bo_kunmap(bo);
if (bo->gem_base.import_attach)
......@@ -1220,6 +1217,42 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
/**
* amdgpu_bo_move_notify - notification about a BO being released
* @bo: pointer to a buffer object
*
* Wipes VRAM buffers whose contents should not be leaked before the
* memory is released.
*/
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
{
struct dma_fence *fence = NULL;
struct amdgpu_bo *abo;
int r;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
abo = ttm_to_amdgpu_bo(bo);
if (abo->kfd_bo)
amdgpu_amdkfd_unreserve_memory_limit(abo);
if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
reservation_object_lock(bo->resv, NULL);
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->resv, &fence);
if (!WARN_ON(r)) {
amdgpu_bo_fence(abo, fence, false);
dma_fence_put(fence);
}
reservation_object_unlock(bo->resv);
}
/**
* amdgpu_bo_fault_reserve_notify - notification about a memory fault
* @bo: pointer to a buffer object
......
......@@ -265,6 +265,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_mem_reg *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
......
......@@ -444,6 +444,22 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
if (r)
goto error;
/* clear the space being freed */
if (old_mem->mem_type == TTM_PL_VRAM &&
(ttm_to_amdgpu_bo(bo)->flags &
AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
NULL, &wipe_fence);
if (r) {
goto error;
} else if (wipe_fence) {
dma_fence_put(fence);
fence = wipe_fence;
}
}
/* Always block for VM page tables before committing the new location */
if (bo->type == ttm_bo_type_kernel)
r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
......@@ -1599,6 +1615,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
.move_notify = &amdgpu_bo_move_notify,
.release_notify = &amdgpu_bo_release_notify,
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
......
......@@ -38,6 +38,8 @@
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
#define AMDGPU_POISON 0xd0bed0be
struct amdgpu_mman {
struct ttm_bo_device bdev;
bool mem_global_referenced;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment