Commit 17acb9f3 authored by Rob Herring's avatar Rob Herring

drm/shmem: Add madvise state and purge helpers

Add support to the shmem GEM helpers for tracking madvise state and
purging pages. This is based on the msm implementation.

The BO provides a list_head, but the list management is handled outside
of the shmem helpers as there are different locking requirements.

Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <maxime.ripard@bootlin.com>
Cc: Sean Paul <sean@poorly.run>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Eric Anholt <eric@anholt.net>
Acked-by: default avatarAcked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Signed-off-by: default avatarRob Herring <robh@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20190805143358.21245-1-robh@kernel.org
parent 3551a9fa
...@@ -75,6 +75,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t ...@@ -75,6 +75,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
shmem = to_drm_gem_shmem_obj(obj); shmem = to_drm_gem_shmem_obj(obj);
mutex_init(&shmem->pages_lock); mutex_init(&shmem->pages_lock);
mutex_init(&shmem->vmap_lock); mutex_init(&shmem->vmap_lock);
INIT_LIST_HEAD(&shmem->madv_list);
/* /*
* Our buffers are kept pinned, so allocating them * Our buffers are kept pinned, so allocating them
...@@ -362,6 +363,62 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, ...@@ -362,6 +363,62 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
} }
EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
mutex_lock(&shmem->pages_lock);
if (shmem->madv >= 0)
shmem->madv = madv;
madv = shmem->madv;
mutex_unlock(&shmem->pages_lock);
return (madv >= 0);
}
EXPORT_SYMBOL(drm_gem_shmem_madvise);
void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
drm_gem_shmem_put_pages_locked(shmem);
shmem->madv = -1;
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1);
}
EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
void drm_gem_shmem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
mutex_lock(&shmem->pages_lock);
drm_gem_shmem_purge_locked(obj);
mutex_unlock(&shmem->pages_lock);
}
EXPORT_SYMBOL(drm_gem_shmem_purge);
/** /**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @file: DRM file structure to create the dumb buffer for * @file: DRM file structure to create the dumb buffer for
......
...@@ -44,6 +44,9 @@ struct drm_gem_shmem_object { ...@@ -44,6 +44,9 @@ struct drm_gem_shmem_object {
*/ */
unsigned int pages_use_count; unsigned int pages_use_count;
int madv;
struct list_head madv_list;
/** /**
* @pages_mark_dirty_on_put: * @pages_mark_dirty_on_put:
* *
...@@ -121,6 +124,18 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj); ...@@ -121,6 +124,18 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj);
void *drm_gem_shmem_vmap(struct drm_gem_object *obj); void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
return (shmem->madv > 0) &&
!shmem->vmap_use_count && shmem->sgt &&
!shmem->base.dma_buf && !shmem->base.import_attach;
}
void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
void drm_gem_shmem_purge(struct drm_gem_object *obj);
struct drm_gem_shmem_object * struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file *file_priv, drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size, struct drm_device *dev, size_t size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment