Commit 25ed38b3 authored by Rob Clark's avatar Rob Clark

drm/msm: Drop mm_lock in scan loop

lock_stat + mmm_donut[1] say that this reduces contention on mm_lock
significantly (~350x lower waittime-max, and ~100x lower waittime-avg)

[1] https://chromium.googlesource.com/chromiumos/platform/microbenchmarks/+/refs/heads/main/mmm_donut.pySigned-off-by: default avatarRob Clark <robdclark@chromium.org>
Reviewed-by: default avatarDouglas Anderson <dianders@chromium.org>
Link: https://lore.kernel.org/r/20210402211226.875726-1-robdclark@gmail.comSigned-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 528107c8
...@@ -184,7 +184,8 @@ struct msm_drm_private { ...@@ -184,7 +184,8 @@ struct msm_drm_private {
/** /**
* Lists of inactive GEM objects. Every bo is either in one of the * Lists of inactive GEM objects. Every bo is either in one of the
* inactive lists (depending on whether or not it is shrinkable) or * inactive lists (depending on whether or not it is shrinkable) or
* gpu->active_list (for the gpu it is active on[1]) * gpu->active_list (for the gpu it is active on[1]), or transiently
* on a temporary list as the shrinker is running.
* *
* These lists are protected by mm_lock (which should be acquired * These lists are protected by mm_lock (which should be acquired
* before per GEM object lock). One should *not* hold mm_lock in * before per GEM object lock). One should *not* hold mm_lock in
......
...@@ -719,7 +719,7 @@ void msm_gem_purge(struct drm_gem_object *obj) ...@@ -719,7 +719,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova_vmas(obj); put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED; msm_obj->madv = __MSM_MADV_PURGED;
mark_unpurgable(msm_obj); update_inactive(msm_obj);
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_gem_free_mmap_offset(obj); drm_gem_free_mmap_offset(obj);
......
...@@ -22,26 +22,62 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -22,26 +22,62 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{ {
struct msm_drm_private *priv = struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker); container_of(shrinker, struct msm_drm_private, shrinker);
struct msm_gem_object *msm_obj; struct list_head still_in_list;
unsigned long freed = 0; unsigned long freed = 0;
INIT_LIST_HEAD(&still_in_list);
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { while (freed < sc->nr_to_scan) {
if (freed >= sc->nr_to_scan) struct msm_gem_object *msm_obj = list_first_entry_or_null(
&priv->inactive_dontneed, typeof(*msm_obj), mm_list);
if (!msm_obj)
break; break;
/* Use trylock, because we cannot block on a obj that
* might be trying to acquire mm_lock list_move_tail(&msm_obj->mm_list, &still_in_list);
/*
* If it is in the process of being freed, msm_gem_free_object
* can be blocked on mm_lock waiting to remove it. So just
* skip it.
*/ */
if (!msm_gem_trylock(&msm_obj->base)) if (!kref_get_unless_zero(&msm_obj->base.refcount))
continue; continue;
/*
* Now that we own a reference, we can drop mm_lock for the
* rest of the loop body, to reduce contention with the
* retire_submit path (which could make more objects purgable)
*/
mutex_unlock(&priv->mm_lock);
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
if (!msm_gem_trylock(&msm_obj->base))
goto tail;
if (is_purgeable(msm_obj)) { if (is_purgeable(msm_obj)) {
/*
* This will move the obj out of still_in_list to
* the purged list
*/
msm_gem_purge(&msm_obj->base); msm_gem_purge(&msm_obj->base);
freed += msm_obj->base.size >> PAGE_SHIFT; freed += msm_obj->base.size >> PAGE_SHIFT;
} }
msm_gem_unlock(&msm_obj->base); msm_gem_unlock(&msm_obj->base);
tail:
drm_gem_object_put(&msm_obj->base);
mutex_lock(&priv->mm_lock);
} }
list_splice_tail(&still_in_list, &priv->inactive_dontneed);
mutex_unlock(&priv->mm_lock); mutex_unlock(&priv->mm_lock);
if (freed > 0) { if (freed > 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment