Commit 692a68c1 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds

mm: remove the page size change check in tlb_remove_page

Now that we check for page size change early in the loop, we can
partially revert e9d55e15 ("mm: change the interface for
__tlb_remove_page").

This simplies the code much, by removing the need to track the last
address with which we adjusted the range.  We also go back to the older
way of filling the mmu_gather array, ie, we add an entry and then check
whether the gather batch is full.

Link: http://lkml.kernel.org/r/20161026084839.27299-6-aneesh.kumar@linux.vnet.ibm.comSigned-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 07e32661
...@@ -213,18 +213,17 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) ...@@ -213,18 +213,17 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{ {
tlb->pages[tlb->nr++] = page;
VM_WARN_ON(tlb->nr > tlb->max);
if (tlb->nr == tlb->max) if (tlb->nr == tlb->max)
return true; return true;
tlb->pages[tlb->nr++] = page;
return false; return false;
} }
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{ {
if (__tlb_remove_page(tlb, page)) { if (__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
__tlb_remove_page(tlb, page);
}
} }
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
...@@ -233,12 +232,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, ...@@ -233,12 +232,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page); return __tlb_remove_page(tlb, page);
} }
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
struct page *page)
{
return __tlb_remove_page(tlb, page);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
......
...@@ -207,15 +207,15 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) ...@@ -207,15 +207,15 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
*/ */
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{ {
if (tlb->nr == tlb->max)
return true;
tlb->need_flush = 1; tlb->need_flush = 1;
if (!tlb->nr && tlb->pages == tlb->local) if (!tlb->nr && tlb->pages == tlb->local)
__tlb_alloc_page(tlb); __tlb_alloc_page(tlb);
tlb->pages[tlb->nr++] = page; tlb->pages[tlb->nr++] = page;
VM_WARN_ON(tlb->nr > tlb->max);
if (tlb->nr == tlb->max)
return true;
return false; return false;
} }
...@@ -236,10 +236,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) ...@@ -236,10 +236,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{ {
if (__tlb_remove_page(tlb, page)) { if (__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
__tlb_remove_page(tlb, page);
}
} }
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
...@@ -248,12 +246,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, ...@@ -248,12 +246,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page); return __tlb_remove_page(tlb, page);
} }
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
struct page *page)
{
return __tlb_remove_page(tlb, page);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
......
...@@ -104,12 +104,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, ...@@ -104,12 +104,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page); return __tlb_remove_page(tlb, page);
} }
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
struct page *page)
{
return __tlb_remove_page(tlb, page);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
......
...@@ -118,12 +118,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, ...@@ -118,12 +118,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page); return __tlb_remove_page(tlb, page);
} }
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
struct page *page)
{
return __tlb_remove_page(tlb, page);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
......
...@@ -116,12 +116,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, ...@@ -116,12 +116,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page); return __tlb_remove_page(tlb, page);
} }
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
struct page *page)
{
return __tlb_remove_page(tlb, page);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
......
...@@ -107,11 +107,6 @@ struct mmu_gather { ...@@ -107,11 +107,6 @@ struct mmu_gather {
struct mmu_gather_batch local; struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE]; struct page *__pages[MMU_GATHER_BUNDLE];
unsigned int batch_count; unsigned int batch_count;
/*
* __tlb_adjust_range will track the new addr here,
* that that we can adjust the range after the flush
*/
unsigned long addr;
int page_size; int page_size;
}; };
...@@ -130,12 +125,6 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb, ...@@ -130,12 +125,6 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
{ {
tlb->start = min(tlb->start, address); tlb->start = min(tlb->start, address);
tlb->end = max(tlb->end, address + range_size); tlb->end = max(tlb->end, address + range_size);
/*
* Track the last address with which we adjusted the range. This
* will be used later to adjust again after a mmu_flush due to
* failed __tlb_remove_page
*/
tlb->addr = address;
} }
static inline void __tlb_reset_range(struct mmu_gather *tlb) static inline void __tlb_reset_range(struct mmu_gather *tlb)
...@@ -151,15 +140,11 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) ...@@ -151,15 +140,11 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
if (__tlb_remove_page_size(tlb, page, page_size)) { if (__tlb_remove_page_size(tlb, page, page_size))
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
tlb->page_size = page_size;
__tlb_adjust_range(tlb, tlb->addr, page_size);
__tlb_remove_page_size(tlb, page, page_size);
}
} }
static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{ {
return __tlb_remove_page_size(tlb, page, PAGE_SIZE); return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
} }
...@@ -173,15 +158,6 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) ...@@ -173,15 +158,6 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE); return tlb_remove_page_size(tlb, page, PAGE_SIZE);
} }
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
{
/* active->nr should be zero when we call this */
VM_BUG_ON_PAGE(tlb->active->nr, page);
tlb->page_size = PAGE_SIZE;
__tlb_adjust_range(tlb, tlb->addr, PAGE_SIZE);
return __tlb_remove_page(tlb, page);
}
#ifndef tlb_remove_check_page_size_change #ifndef tlb_remove_check_page_size_change
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
......
...@@ -300,15 +300,14 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ ...@@ -300,15 +300,14 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
struct mmu_gather_batch *batch; struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->end); VM_BUG_ON(!tlb->end);
VM_WARN_ON(tlb->page_size != page_size);
if (!tlb->page_size)
tlb->page_size = page_size;
else {
if (page_size != tlb->page_size)
return true;
}
batch = tlb->active; batch = tlb->active;
/*
* Add the page and check if we are full. If so
* force a flush.
*/
batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) { if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb)) if (!tlb_next_batch(tlb))
return true; return true;
...@@ -316,7 +315,6 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ ...@@ -316,7 +315,6 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
} }
VM_BUG_ON_PAGE(batch->nr > batch->max, page); VM_BUG_ON_PAGE(batch->nr > batch->max, page);
batch->pages[batch->nr++] = page;
return false; return false;
} }
...@@ -1122,7 +1120,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1122,7 +1120,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
pte_t *start_pte; pte_t *start_pte;
pte_t *pte; pte_t *pte;
swp_entry_t entry; swp_entry_t entry;
struct page *pending_page = NULL;
tlb_remove_check_page_size_change(tlb, PAGE_SIZE); tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
again: again:
...@@ -1177,7 +1174,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1177,7 +1174,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
print_bad_pte(vma, addr, ptent, page); print_bad_pte(vma, addr, ptent, page);
if (unlikely(__tlb_remove_page(tlb, page))) { if (unlikely(__tlb_remove_page(tlb, page))) {
force_flush = 1; force_flush = 1;
pending_page = page;
addr += PAGE_SIZE; addr += PAGE_SIZE;
break; break;
} }
...@@ -1218,11 +1214,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1218,11 +1214,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (force_flush) { if (force_flush) {
force_flush = 0; force_flush = 0;
tlb_flush_mmu_free(tlb); tlb_flush_mmu_free(tlb);
if (pending_page) {
/* remove the page with new size */
__tlb_remove_pte_page(tlb, pending_page);
pending_page = NULL;
}
if (addr != end) if (addr != end)
goto again; goto again;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment