Commit 0951dce6 authored by Jonathan Cavitt's avatar Jonathan Cavitt Committed by Nirmoy Das

drm/i915/gem: Make i915_gem_shrinker multi-gt aware

Where applicable, use for_each_gt instead of to_gt in the
i915_gem_shrinker functions to make them apply to more than just the
primary GT.  Specifically, this ensure i915_gem_shrink_all retires all
requests across all GTs, and this makes i915_gem_shrinker_vmap unmap
VMAs from all GTs.

v2: Pass correct GT to intel_gt_retire_requests(Andrzej).
v3: Remove unnecessary braces(Andi)
v4: Undo v3 to fix build failure.
Signed-off-by: default avatarJonathan Cavitt <jonathan.cavitt@intel.com>
Signed-off-by: default avatarNirmoy Das <nirmoy.das@intel.com>
Reviewed-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230926093028.23614-1-nirmoy.das@intel.com
parent 37d62359
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include "gt/intel_gt_requests.h" #include "gt/intel_gt_requests.h"
#include "gt/intel_gt.h"
#include "i915_trace.h" #include "i915_trace.h"
...@@ -119,7 +120,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, ...@@ -119,7 +120,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
intel_wakeref_t wakeref = 0; intel_wakeref_t wakeref = 0;
unsigned long count = 0; unsigned long count = 0;
unsigned long scanned = 0; unsigned long scanned = 0;
int err = 0; int err = 0, i = 0;
struct intel_gt *gt;
/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */ /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915); bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
...@@ -147,9 +149,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, ...@@ -147,9 +149,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
* what we can do is give them a kick so that we do not keep idle * what we can do is give them a kick so that we do not keep idle
* contexts around longer than is necessary. * contexts around longer than is necessary.
*/ */
if (shrink & I915_SHRINK_ACTIVE) if (shrink & I915_SHRINK_ACTIVE) {
/* Retire requests to unpin all idle contexts */ for_each_gt(gt, i915, i)
intel_gt_retire_requests(to_gt(i915)); /* Retire requests to unpin all idle contexts */
intel_gt_retire_requests(gt);
}
/* /*
* As we may completely rewrite the (un)bound list whilst unbinding * As we may completely rewrite the (un)bound list whilst unbinding
...@@ -389,6 +393,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -389,6 +393,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
unsigned long freed_pages = 0; unsigned long freed_pages = 0;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct intel_gt *gt;
int i;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL, freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
...@@ -397,24 +403,26 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -397,24 +403,26 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
I915_SHRINK_VMAPS); I915_SHRINK_VMAPS);
/* We also want to clear any cached iomaps as they wrap vmap */ /* We also want to clear any cached iomaps as they wrap vmap */
mutex_lock(&to_gt(i915)->ggtt->vm.mutex); for_each_gt(gt, i915, i) {
list_for_each_entry_safe(vma, next, mutex_lock(&gt->ggtt->vm.mutex);
&to_gt(i915)->ggtt->vm.bound_list, vm_link) { list_for_each_entry_safe(vma, next,
unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT; &gt->ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj; unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
if (!vma->iomap || i915_vma_is_active(vma))
continue; if (!vma->iomap || i915_vma_is_active(vma))
continue;
if (!i915_gem_object_trylock(obj, NULL)) if (!i915_gem_object_trylock(obj, NULL))
continue; continue;
if (__i915_vma_unbind(vma) == 0) if (__i915_vma_unbind(vma) == 0)
freed_pages += count; freed_pages += count;
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
}
mutex_unlock(&gt->ggtt->vm.mutex);
} }
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
*(unsigned long *)ptr += freed_pages; *(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE; return NOTIFY_DONE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment