Commit 054f1d1f authored by Huang Ying's avatar Huang Ying Committed by Linus Torvalds

mm/swap_state.c: simplify total_swapcache_pages() with get_swap_device()

total_swapcache_pages() may race with swapper_spaces[] allocation and
freeing.  Previously, this is protected with a swapper_spaces[] specific
RCU mechanism.  To simplify the logic/code complexity, it is replaced with
get/put_swap_device().  The code line number is reduced too.  Although not
so important, the swapoff() performance improves too because one
synchronize_rcu() call during swapoff() is deleted.

[ying.huang@intel.com: fix bad swap file entry warning]
  Link: http://lkml.kernel.org/r/20190531024102.21723-1-ying.huang@intel.com
Link: http://lkml.kernel.org/r/20190527082714.12151-1-ying.huang@intel.comSigned-off-by: default avatar"Huang, Ying" <ying.huang@intel.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Tested-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Andrea Parri <andrea.parri@amarulasolutions.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent eb085574
...@@ -73,23 +73,24 @@ unsigned long total_swapcache_pages(void) ...@@ -73,23 +73,24 @@ unsigned long total_swapcache_pages(void)
unsigned int i, j, nr; unsigned int i, j, nr;
unsigned long ret = 0; unsigned long ret = 0;
struct address_space *spaces; struct address_space *spaces;
struct swap_info_struct *si;
rcu_read_lock();
for (i = 0; i < MAX_SWAPFILES; i++) { for (i = 0; i < MAX_SWAPFILES; i++) {
/* swp_entry_t entry = swp_entry(i, 1);
* The corresponding entries in nr_swapper_spaces and
* swapper_spaces will be reused only after at least /* Avoid get_swap_device() to warn for bad swap entry */
* one grace period. So it is impossible for them if (!swp_swap_info(entry))
* belongs to different usage. continue;
*/ /* Prevent swapoff to free swapper_spaces */
nr = nr_swapper_spaces[i]; si = get_swap_device(entry);
spaces = rcu_dereference(swapper_spaces[i]); if (!si)
if (!nr || !spaces)
continue; continue;
nr = nr_swapper_spaces[i];
spaces = swapper_spaces[i];
for (j = 0; j < nr; j++) for (j = 0; j < nr; j++)
ret += spaces[j].nrpages; ret += spaces[j].nrpages;
put_swap_device(si);
} }
rcu_read_unlock();
return ret; return ret;
} }
...@@ -611,20 +612,16 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages) ...@@ -611,20 +612,16 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
mapping_set_no_writeback_tags(space); mapping_set_no_writeback_tags(space);
} }
nr_swapper_spaces[type] = nr; nr_swapper_spaces[type] = nr;
rcu_assign_pointer(swapper_spaces[type], spaces); swapper_spaces[type] = spaces;
return 0; return 0;
} }
void exit_swap_address_space(unsigned int type) void exit_swap_address_space(unsigned int type)
{ {
struct address_space *spaces; kvfree(swapper_spaces[type]);
spaces = swapper_spaces[type];
nr_swapper_spaces[type] = 0; nr_swapper_spaces[type] = 0;
rcu_assign_pointer(swapper_spaces[type], NULL); swapper_spaces[type] = NULL;
synchronize_rcu();
kvfree(spaces);
} }
static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment