Commit 94ad9338 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm: introduce page_shift()

Replace PAGE_SHIFT + compound_order(page) with the new page_shift()
function.  Minor improvements in readability.

[akpm@linux-foundation.org: fix build in tce_page_is_contained()]
  Link: http://lkml.kernel.org/r/201907241853.yNQTrJWd%25lkp@intel.com
Link: http://lkml.kernel.org/r/20190721104612.19120-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a50b854e
...@@ -129,11 +129,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, ...@@ -129,11 +129,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
* Allow to use larger than 64k IOMMU pages. Only do that * Allow to use larger than 64k IOMMU pages. Only do that
* if we are backed by hugetlb. * if we are backed by hugetlb.
*/ */
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) { if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
struct page *head = compound_head(page); pageshift = page_shift(compound_head(page));
pageshift = compound_order(head) + PAGE_SHIFT;
}
mem->pageshift = min(mem->pageshift, pageshift); mem->pageshift = min(mem->pageshift, pageshift);
/* /*
* We don't need struct page reference any more, switch * We don't need struct page reference any more, switch
......
...@@ -176,13 +176,13 @@ static long tce_iommu_register_pages(struct tce_container *container, ...@@ -176,13 +176,13 @@ static long tce_iommu_register_pages(struct tce_container *container,
} }
static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa, static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
unsigned int page_shift) unsigned int it_page_shift)
{ {
struct page *page; struct page *page;
unsigned long size = 0; unsigned long size = 0;
if (mm_iommu_is_devmem(mm, hpa, page_shift, &size)) if (mm_iommu_is_devmem(mm, hpa, it_page_shift, &size))
return size == (1UL << page_shift); return size == (1UL << it_page_shift);
page = pfn_to_page(hpa >> PAGE_SHIFT); page = pfn_to_page(hpa >> PAGE_SHIFT);
/* /*
...@@ -190,7 +190,7 @@ static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa, ...@@ -190,7 +190,7 @@ static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
* a page we just found. Otherwise the hardware can get access to * a page we just found. Otherwise the hardware can get access to
* a bigger memory chunk that it should. * a bigger memory chunk that it should.
*/ */
return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift; return page_shift(compound_head(page)) >= it_page_shift;
} }
static inline bool tce_groups_attached(struct tce_container *container) static inline bool tce_groups_attached(struct tce_container *container)
......
...@@ -811,6 +811,12 @@ static inline unsigned long page_size(struct page *page) ...@@ -811,6 +811,12 @@ static inline unsigned long page_size(struct page *page)
return PAGE_SIZE << compound_order(page); return PAGE_SIZE << compound_order(page);
} }
/* Returns the number of bits needed for the number of bytes in a page */
static inline unsigned int page_shift(struct page *page)
{
return PAGE_SHIFT + compound_order(page);
}
void free_compound_page(struct page *page); void free_compound_page(struct page *page);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment