Commit aba07b9a authored by Zack Rusin's avatar Zack Rusin

drm/vmwgfx: Prevent unmapping active read buffers

The kms paths keep a persistent map active to read and compare the cursor
buffer. These maps can race with each other in simple scenario where:
a) buffer "a" mapped for update
b) buffer "a" mapped for compare
c) do the compare
d) unmap "a" for compare
e) update the cursor
f) unmap "a" for update
At step "e" the buffer has been unmapped and the read contents is bogus.

Prevent unmapping of active read buffers by simply keeping a count of
how many paths have currently active maps and unmap only when the count
reaches 0.

Fixes: 485d98d4 ("drm/vmwgfx: Add support for CursorMob and CursorBypass 4")
Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
Cc: dri-devel@lists.freedesktop.org
Cc: <stable@vger.kernel.org> # v5.19+
Signed-off-by: default avatarZack Rusin <zack.rusin@broadcom.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240816183332.31961-2-zack.rusin@broadcom.comReviewed-by: default avatarMartin Krastev <martin.krastev@broadcom.com>
Reviewed-by: default avatarMaaz Mombasawala <maaz.mombasawala@broadcom.com>
parent c358a809
...@@ -360,6 +360,8 @@ void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size) ...@@ -360,6 +360,8 @@ void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
void *virtual; void *virtual;
int ret; int ret;
atomic_inc(&vbo->map_count);
virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used); virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
if (virtual) if (virtual)
return virtual; return virtual;
...@@ -383,11 +385,17 @@ void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size) ...@@ -383,11 +385,17 @@ void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
*/ */
void vmw_bo_unmap(struct vmw_bo *vbo) void vmw_bo_unmap(struct vmw_bo *vbo)
{ {
int map_count;
if (vbo->map.bo == NULL) if (vbo->map.bo == NULL)
return; return;
ttm_bo_kunmap(&vbo->map); map_count = atomic_dec_return(&vbo->map_count);
vbo->map.bo = NULL;
if (!map_count) {
ttm_bo_kunmap(&vbo->map);
vbo->map.bo = NULL;
}
} }
...@@ -421,6 +429,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv, ...@@ -421,6 +429,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo->tbo.priority = 3; vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT; vmw_bo->res_tree = RB_ROOT;
xa_init(&vmw_bo->detached_resources); xa_init(&vmw_bo->detached_resources);
atomic_set(&vmw_bo->map_count, 0);
params->size = ALIGN(params->size, PAGE_SIZE); params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size); drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
......
...@@ -71,6 +71,8 @@ struct vmw_bo_params { ...@@ -71,6 +71,8 @@ struct vmw_bo_params {
* @map: Kmap object for semi-persistent mappings * @map: Kmap object for semi-persistent mappings
* @res_tree: RB tree of resources using this buffer object as a backing MOB * @res_tree: RB tree of resources using this buffer object as a backing MOB
* @res_prios: Eviction priority counts for attached resources * @res_prios: Eviction priority counts for attached resources
* @map_count: The number of currently active maps. Will differ from the
* cpu_writers because it includes kernel maps.
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation. * increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
...@@ -90,6 +92,7 @@ struct vmw_bo { ...@@ -90,6 +92,7 @@ struct vmw_bo {
u32 res_prios[TTM_MAX_BO_PRIORITY]; u32 res_prios[TTM_MAX_BO_PRIORITY];
struct xarray detached_resources; struct xarray detached_resources;
atomic_t map_count;
atomic_t cpu_writers; atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */ /* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx; struct vmw_resource *dx_query_ctx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment