Commit 07d80269 authored by John Hubbard's avatar John Hubbard Committed by Linus Torvalds

mm: devmap: refactor 1-based refcounting for ZONE_DEVICE pages

An upcoming patch changes and complicates the refcounting and especially
the "put page" aspects of it.  In order to keep everything clean,
refactor the devmap page release routines:

* Rename put_devmap_managed_page() to page_is_devmap_managed(), and
  limit the functionality to "read only": return a bool, with no side
  effects.

* Add a new routine, put_devmap_managed_page(), to handle decrementing
  the refcount for ZONE_DEVICE pages.

* Change callers (just release_pages() and put_page()) to check
  page_is_devmap_managed() before calling the new
  put_devmap_managed_page() routine.  This is a performance point:
  put_page() is a hot path, so we need to avoid non- inline function calls
  where possible.

* Rename __put_devmap_managed_page() to free_devmap_managed_page(), and
  limit the functionality to unconditionally freeing a devmap page.

This is originally based on a separate patch by Ira Weiny, which applied
to an early version of the put_user_page() experiments.  Since then,
Jérôme Glisse suggested the refactoring described above.

Link: http://lkml.kernel.org/r/20200107224558.2362728-5-jhubbard@nvidia.comSigned-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Suggested-by: default avatarJérôme Glisse <jglisse@redhat.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Björn Töpel <bjorn.topel@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Leon Romanovsky <leonro@mellanox.com>
Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 429589d6
...@@ -947,9 +947,10 @@ static inline bool is_zone_device_page(const struct page *page) ...@@ -947,9 +947,10 @@ static inline bool is_zone_device_page(const struct page *page)
#endif #endif
#ifdef CONFIG_DEV_PAGEMAP_OPS #ifdef CONFIG_DEV_PAGEMAP_OPS
void __put_devmap_managed_page(struct page *page); void free_devmap_managed_page(struct page *page);
DECLARE_STATIC_KEY_FALSE(devmap_managed_key); DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
static inline bool put_devmap_managed_page(struct page *page)
static inline bool page_is_devmap_managed(struct page *page)
{ {
if (!static_branch_unlikely(&devmap_managed_key)) if (!static_branch_unlikely(&devmap_managed_key))
return false; return false;
...@@ -958,7 +959,6 @@ static inline bool put_devmap_managed_page(struct page *page) ...@@ -958,7 +959,6 @@ static inline bool put_devmap_managed_page(struct page *page)
switch (page->pgmap->type) { switch (page->pgmap->type) {
case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_PRIVATE:
case MEMORY_DEVICE_FS_DAX: case MEMORY_DEVICE_FS_DAX:
__put_devmap_managed_page(page);
return true; return true;
default: default:
break; break;
...@@ -966,11 +966,17 @@ static inline bool put_devmap_managed_page(struct page *page) ...@@ -966,11 +966,17 @@ static inline bool put_devmap_managed_page(struct page *page)
return false; return false;
} }
void put_devmap_managed_page(struct page *page);
#else /* CONFIG_DEV_PAGEMAP_OPS */ #else /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool put_devmap_managed_page(struct page *page) static inline bool page_is_devmap_managed(struct page *page)
{ {
return false; return false;
} }
static inline void put_devmap_managed_page(struct page *page)
{
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */ #endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page) static inline bool is_device_private_page(const struct page *page)
...@@ -1023,8 +1029,10 @@ static inline void put_page(struct page *page) ...@@ -1023,8 +1029,10 @@ static inline void put_page(struct page *page)
* need to inform the device driver through callback. See * need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details. * include/linux/memremap.h and HMM for details.
*/ */
if (put_devmap_managed_page(page)) if (page_is_devmap_managed(page)) {
put_devmap_managed_page(page);
return; return;
}
if (put_page_testzero(page)) if (put_page_testzero(page))
__put_page(page); __put_page(page);
......
...@@ -411,20 +411,8 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, ...@@ -411,20 +411,8 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
EXPORT_SYMBOL_GPL(get_dev_pagemap); EXPORT_SYMBOL_GPL(get_dev_pagemap);
#ifdef CONFIG_DEV_PAGEMAP_OPS #ifdef CONFIG_DEV_PAGEMAP_OPS
void __put_devmap_managed_page(struct page *page) void free_devmap_managed_page(struct page *page)
{ {
int count = page_ref_dec_return(page);
/* still busy */
if (count > 1)
return;
/* only triggered by the dev_pagemap shutdown path */
if (count == 0) {
__put_page(page);
return;
}
/* notify page idle for dax */ /* notify page idle for dax */
if (!is_device_private_page(page)) { if (!is_device_private_page(page)) {
wake_up_var(&page->_refcount); wake_up_var(&page->_refcount);
...@@ -461,5 +449,4 @@ void __put_devmap_managed_page(struct page *page) ...@@ -461,5 +449,4 @@ void __put_devmap_managed_page(struct page *page)
page->mapping = NULL; page->mapping = NULL;
page->pgmap->ops->page_free(page); page->pgmap->ops->page_free(page);
} }
EXPORT_SYMBOL(__put_devmap_managed_page);
#endif /* CONFIG_DEV_PAGEMAP_OPS */ #endif /* CONFIG_DEV_PAGEMAP_OPS */
...@@ -813,8 +813,10 @@ void release_pages(struct page **pages, int nr) ...@@ -813,8 +813,10 @@ void release_pages(struct page **pages, int nr)
* processing, and instead, expect a call to * processing, and instead, expect a call to
* put_page_testzero(). * put_page_testzero().
*/ */
if (put_devmap_managed_page(page)) if (page_is_devmap_managed(page)) {
put_devmap_managed_page(page);
continue; continue;
}
} }
page = compound_head(page); page = compound_head(page);
...@@ -1102,3 +1104,26 @@ void __init swap_setup(void) ...@@ -1102,3 +1104,26 @@ void __init swap_setup(void)
* _really_ don't want to cluster much more * _really_ don't want to cluster much more
*/ */
} }
#ifdef CONFIG_DEV_PAGEMAP_OPS
void put_devmap_managed_page(struct page *page)
{
int count;
if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
return;
count = page_ref_dec_return(page);
/*
* devmap page refcounts are 1-based, rather than 0-based: if
* refcount is 1, then the page is free and the refcount is
* stable because nobody holds a reference on the page.
*/
if (count == 1)
free_devmap_managed_page(page);
else if (!count)
__put_page(page);
}
EXPORT_SYMBOL(put_devmap_managed_page);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment