Commit 21c355b0 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-next-fixes-2021-07-01' of...

Merge tag 'drm-misc-next-fixes-2021-07-01' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

Short summary of fixes pull:

 * amdgpu: TTM fixes
 * dma-buf: Doc fixes
 * gma500: Fix potential BO leaks in error handling
 * radeon: Fix NULL-ptr deref
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/YN2GK2SH64yqXqh9@linux-uq9g
parents 5cebdea6 f18f5801
......@@ -919,7 +919,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
if (bo_mem->mem_type != TTM_PL_TT ||
!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
gtt->offset = AMDGPU_BO_INVALID_OFFSET;
return 0;
}
......
......@@ -352,6 +352,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
const struct drm_mode_fb_cmd2 *cmd)
{
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
/*
* Find the GEM object and thus the gtt range object that is
......@@ -362,7 +363,11 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
return psb_framebuffer_create(dev, cmd, obj);
fb = psb_framebuffer_create(dev, cmd, obj);
if (IS_ERR(fb))
drm_gem_object_put(obj);
return fb;
}
static int psbfb_probe(struct drm_fb_helper *fb_helper,
......
......@@ -49,23 +49,23 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
static void radeon_update_memory_usage(struct radeon_bo *bo,
unsigned mem_type, int sign)
static void radeon_update_memory_usage(struct ttm_buffer_object *bo,
unsigned int mem_type, int sign)
{
struct radeon_device *rdev = bo->rdev;
struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
switch (mem_type) {
case TTM_PL_TT:
if (sign > 0)
atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
atomic64_add(bo->base.size, &rdev->gtt_usage);
else
atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
atomic64_sub(bo->base.size, &rdev->gtt_usage);
break;
case TTM_PL_VRAM:
if (sign > 0)
atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
atomic64_add(bo->base.size, &rdev->vram_usage);
else
atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
atomic64_sub(bo->base.size, &rdev->vram_usage);
break;
}
}
......@@ -76,8 +76,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct radeon_bo, tbo);
radeon_update_memory_usage(bo, bo->tbo.resource->mem_type, -1);
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
......@@ -727,24 +725,21 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
unsigned int old_type,
struct ttm_resource *new_mem)
{
struct radeon_bo *rbo;
radeon_update_memory_usage(bo, old_type, -1);
if (new_mem)
radeon_update_memory_usage(bo, new_mem->mem_type, 1);
if (!radeon_ttm_bo_is_radeon_bo(bo))
return;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
radeon_vm_bo_invalidate(rbo->rdev, rbo);
/* update statistics */
if (!new_mem)
return;
radeon_update_memory_usage(rbo, bo->resource->mem_type, -1);
radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
......
......@@ -161,7 +161,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
unsigned int old_type,
struct ttm_resource *new_mem);
extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
......
......@@ -199,7 +199,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = bo->resource;
struct radeon_device *rdev;
struct radeon_bo *rbo;
int r;
int r, old_type;
if (new_mem->mem_type == TTM_PL_TT) {
r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
......@@ -216,6 +216,9 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
return -EINVAL;
/* Save old type for statistics update */
old_type = old_mem->mem_type;
rdev = radeon_get_rdev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
......@@ -261,7 +264,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
out:
/* update statistics */
atomic64_add(bo->base.size, &rdev->num_bytes_moved);
radeon_bo_move_notify(bo, evict, new_mem);
radeon_bo_move_notify(bo, old_type, new_mem);
return 0;
}
......@@ -682,7 +685,11 @@ bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
static void
radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
radeon_bo_move_notify(bo, false, NULL);
unsigned int old_type = TTM_PL_SYSTEM;
if (bo->resource)
old_type = bo->resource->mem_type;
radeon_bo_move_notify(bo, old_type, NULL);
}
static struct ttm_device_funcs radeon_bo_driver = {
......
......@@ -212,7 +212,7 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
}
/**
* dma_resv_exclusive - return the object's exclusive fence
* dma_resv_excl_fence - return the object's exclusive fence
* @obj: the reservation object
*
* Returns the exclusive fence (if any). Caller must either hold the objects
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment