Commit b4b0193e authored by Thomas Zimmermann's avatar Thomas Zimmermann

drm/fbdev-generic: Fix locking with drm_client_buffer_vmap_local()

Temporarily lock the fbdev buffer object during updates to prevent
memory managers from evicting/moving the buffer. Moving a buffer
object while update its content results in undefined behaviour.

Fbdev-generic updates its buffer object from a shadow buffer. Gem-shmem
and gem-dma helpers do not move buffer objects, so they are safe to be
used with fbdev-generic. Gem-vram and qxl are based on TTM, but pin
buffer objects are part of the vmap operation. So both are also safe
to be used with fbdev-generic.

Amdgpu and nouveau do not pin or lock the buffer object during an
update. Their TTM-based memory management could move the buffer object
while the update is ongoing.

The new vmap_local and vunmap_local helpers hold the buffer object's
reservation lock during the buffer update. This prevents moving the
buffer object on all memory managers.
Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> # virtio-gpu
Acked-by: default avatarZack Rusin <zack.rusin@broadcom.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240227113853.8464-11-tzimmermann@suse.de
parent a7802784
......@@ -304,6 +304,66 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
return ERR_PTR(ret);
}
/**
* drm_client_buffer_vmap_local - Map DRM client buffer into address space
* @buffer: DRM client buffer
* @map_copy: Returns the mapped memory's address
*
* This function maps a client buffer into kernel address space. If the
* buffer is already mapped, it returns the existing mapping's address.
*
* Client buffer mappings are not ref'counted. Each call to
* drm_client_buffer_vmap_local() should be closely followed by a call to
* drm_client_buffer_vunmap_local(). See drm_client_buffer_vmap() for
* long-term mappings.
*
* The returned address is a copy of the internal value. In contrast to
* other vmap interfaces, you don't need it for the client's vunmap
* function. So you can modify it at will during blit and draw operations.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
struct iosys_map *map_copy)
{
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
int ret;
drm_gem_lock(gem);
ret = drm_gem_vmap(gem, map);
if (ret)
goto err_drm_gem_vmap_unlocked;
*map_copy = *map;
return 0;
err_drm_gem_vmap_unlocked:
drm_gem_unlock(gem);
return 0;
}
EXPORT_SYMBOL(drm_client_buffer_vmap_local);
/**
* drm_client_buffer_vunmap_local - Unmap DRM client buffer
* @buffer: DRM client buffer
*
* This function removes a client buffer's memory mapping established
* with drm_client_buffer_vunmap_local(). Calling this function is only
* required by clients that manage their buffer mappings by themselves.
*/
void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
{
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
drm_gem_vunmap(gem, map);
drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
/**
* drm_client_buffer_vmap - Map DRM client buffer into address space
* @buffer: DRM client buffer
......@@ -331,14 +391,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer,
struct iosys_map *map = &buffer->map;
int ret;
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
* backend-agnostic dma-buf vmap support instead. This would
* require that the handle2fd prime ioctl is reworked to pull the
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
ret = drm_gem_vmap_unlocked(buffer->gem, map);
if (ret)
return ret;
......
......@@ -197,14 +197,14 @@ static int drm_fbdev_generic_damage_blit(struct drm_fb_helper *fb_helper,
*/
mutex_lock(&fb_helper->lock);
ret = drm_client_buffer_vmap(buffer, &map);
ret = drm_client_buffer_vmap_local(buffer, &map);
if (ret)
goto out;
dst = map;
drm_fbdev_generic_damage_blit_real(fb_helper, clip, &dst);
drm_client_buffer_vunmap(buffer);
drm_client_buffer_vunmap_local(buffer);
out:
mutex_unlock(&fb_helper->lock);
......
......@@ -1227,6 +1227,18 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
}
EXPORT_SYMBOL(drm_gem_vunmap);
void drm_gem_lock(struct drm_gem_object *obj)
{
dma_resv_lock(obj->resv, NULL);
}
EXPORT_SYMBOL(drm_gem_lock);
void drm_gem_unlock(struct drm_gem_object *obj)
{
dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_unlock);
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
......
......@@ -141,6 +141,13 @@ struct drm_client_buffer {
/**
* @gem: GEM object backing this buffer
*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
* backend-agnostic dma-buf vmap support instead. This would
* require that the handle2fd prime ioctl is reworked to pull the
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
struct drm_gem_object *gem;
......@@ -159,6 +166,9 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
struct iosys_map *map_copy);
void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer);
int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
struct iosys_map *map);
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
......
......@@ -527,6 +527,9 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
void drm_gem_lock(struct drm_gem_object *obj);
void drm_gem_unlock(struct drm_gem_object *obj);
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment