Commit 64e3d12f authored by Kuo-Hsin Yang's avatar Kuo-Hsin Yang Committed by Chris Wilson

mm, drm/i915: mark pinned shmemfs pages as unevictable

The i915 driver uses shmemfs to allocate backing storage for gem
objects. These shmemfs pages can be pinned (increased ref count) by
shmem_read_mapping_page_gfp(). When a lot of pages are pinned, vmscan
wastes a lot of time scanning these pinned pages. In some extreme case,
all pages in the inactive anon lru are pinned, and only the inactive
anon lru is scanned due to inactive_ratio, the system cannot swap and
invokes the oom-killer. Mark these pinned pages as unevictable to speed
up vmscan.

Export pagevec API check_move_unevictable_pages().

This patch was inspired by Chris Wilson's change [1].

[1]: https://patchwork.kernel.org/patch/9768741/

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: default avatarKuo-Hsin Yang <vovoy@chromium.org>
Acked-by: Michal Hocko <mhocko@suse.com> # mm part
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarDave Hansen <dave.hansen@intel.com>
Acked-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20181106132324.17390-1-chris@chris-wilson.co.ukSigned-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent f45a7977
...@@ -143,7 +143,7 @@ using a number of wrapper functions: ...@@ -143,7 +143,7 @@ using a number of wrapper functions:
Query the address space, and return true if it is completely Query the address space, and return true if it is completely
unevictable. unevictable.
These are currently used in two places in the kernel: These are currently used in three places in the kernel:
(1) By ramfs to mark the address spaces of its inodes when they are created, (1) By ramfs to mark the address spaces of its inodes when they are created,
and this mark remains for the life of the inode. and this mark remains for the life of the inode.
...@@ -154,6 +154,10 @@ These are currently used in two places in the kernel: ...@@ -154,6 +154,10 @@ These are currently used in two places in the kernel:
swapped out; the application must touch the pages manually if it wants to swapped out; the application must touch the pages manually if it wants to
ensure they're in memory. ensure they're in memory.
(3) By the i915 driver to mark pinned address space until it's unpinned. The
amount of unevictable memory marked by i915 driver is roughly the bounded
object size in debugfs/dri/0/i915_gem_objects.
Detecting Unevictable Pages Detecting Unevictable Pages
--------------------------- ---------------------------
......
...@@ -2382,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) ...@@ -2382,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
invalidate_mapping_pages(mapping, 0, (loff_t)-1); invalidate_mapping_pages(mapping, 0, (loff_t)-1);
} }
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
* ref count of those pages.
*/
static void check_release_pagevec(struct pagevec *pvec)
{
check_move_unevictable_pages(pvec);
__pagevec_release(pvec);
cond_resched();
}
static void static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sg_table *pages) struct sg_table *pages)
{ {
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct pagevec pvec;
struct page *page; struct page *page;
__i915_gem_object_release_shmem(obj, pages, true); __i915_gem_object_release_shmem(obj, pages, true);
...@@ -2396,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, ...@@ -2396,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj, pages); i915_gem_object_save_bit_17_swizzle(obj, pages);
mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, pages) { for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty) if (obj->mm.dirty)
set_page_dirty(page); set_page_dirty(page);
...@@ -2403,9 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, ...@@ -2403,9 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv == I915_MADV_WILLNEED) if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page); mark_page_accessed(page);
put_page(page); if (!pagevec_add(&pvec, page))
cond_resched(); check_release_pagevec(&pvec);
} }
if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
obj->mm.dirty = false; obj->mm.dirty = false;
sg_free_table(pages); sg_free_table(pages);
...@@ -2526,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2526,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment = i915_sg_segment_size(); unsigned int max_segment = i915_sg_segment_size();
unsigned int sg_page_sizes; unsigned int sg_page_sizes;
struct pagevec pvec;
gfp_t noreclaim; gfp_t noreclaim;
int ret; int ret;
...@@ -2561,6 +2579,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2561,6 +2579,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* Fail silently without starting the shrinker * Fail silently without starting the shrinker
*/ */
mapping = obj->base.filp->f_mapping; mapping = obj->base.filp->f_mapping;
mapping_set_unevictable(mapping);
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
noreclaim |= __GFP_NORETRY | __GFP_NOWARN; noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
...@@ -2675,8 +2694,14 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2675,8 +2694,14 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_sg: err_sg:
sg_mark_end(sg); sg_mark_end(sg);
err_pages: err_pages:
for_each_sgt_page(page, sgt_iter, st) mapping_clear_unevictable(mapping);
put_page(page); pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, st) {
if (!pagevec_add(&pvec, page))
check_release_pagevec(&pvec);
}
if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
sg_free_table(st); sg_free_table(st);
kfree(st); kfree(st);
......
...@@ -18,6 +18,8 @@ struct notifier_block; ...@@ -18,6 +18,8 @@ struct notifier_block;
struct bio; struct bio;
struct pagevec;
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff #define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_PRIO_SHIFT 0 #define SWAP_FLAG_PRIO_SHIFT 0
...@@ -373,7 +375,7 @@ static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, ...@@ -373,7 +375,7 @@ static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
#endif #endif
extern int page_evictable(struct page *page); extern int page_evictable(struct page *page);
extern void check_move_unevictable_pages(struct page **, int nr_pages); extern void check_move_unevictable_pages(struct pagevec *pvec);
extern int kswapd_run(int nid); extern int kswapd_run(int nid);
extern void kswapd_stop(int nid); extern void kswapd_stop(int nid);
......
...@@ -781,7 +781,7 @@ void shmem_unlock_mapping(struct address_space *mapping) ...@@ -781,7 +781,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
break; break;
index = indices[pvec.nr - 1] + 1; index = indices[pvec.nr - 1] + 1;
pagevec_remove_exceptionals(&pvec); pagevec_remove_exceptionals(&pvec);
check_move_unevictable_pages(pvec.pages, pvec.nr); check_move_unevictable_pages(&pvec);
pagevec_release(&pvec); pagevec_release(&pvec);
cond_resched(); cond_resched();
} }
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/delayacct.h> #include <linux/delayacct.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/oom.h> #include <linux/oom.h>
#include <linux/pagevec.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/dax.h> #include <linux/dax.h>
...@@ -4162,17 +4163,16 @@ int page_evictable(struct page *page) ...@@ -4162,17 +4163,16 @@ int page_evictable(struct page *page)
return ret; return ret;
} }
#ifdef CONFIG_SHMEM
/** /**
* check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list * check_move_unevictable_pages - check pages for evictability and move to
* @pages: array of pages to check * appropriate zone lru list
* @nr_pages: number of pages to check * @pvec: pagevec with lru pages to check
* *
* Checks pages for evictability and moves them to the appropriate lru list. * Checks pages for evictability, if an evictable page is in the unevictable
* * lru list, moves it to the appropriate evictable lru list. This function
* This function is only used for SysV IPC SHM_UNLOCK. * should be only used for lru pages.
*/ */
void check_move_unevictable_pages(struct page **pages, int nr_pages) void check_move_unevictable_pages(struct pagevec *pvec)
{ {
struct lruvec *lruvec; struct lruvec *lruvec;
struct pglist_data *pgdat = NULL; struct pglist_data *pgdat = NULL;
...@@ -4180,8 +4180,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) ...@@ -4180,8 +4180,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
int pgrescued = 0; int pgrescued = 0;
int i; int i;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < pvec->nr; i++) {
struct page *page = pages[i]; struct page *page = pvec->pages[i];
struct pglist_data *pagepgdat = page_pgdat(page); struct pglist_data *pagepgdat = page_pgdat(page);
pgscanned++; pgscanned++;
...@@ -4213,4 +4213,4 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) ...@@ -4213,4 +4213,4 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
spin_unlock_irq(&pgdat->lru_lock); spin_unlock_irq(&pgdat->lru_lock);
} }
} }
#endif /* CONFIG_SHMEM */ EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment