Commit c30d6bc8 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/mmu_gather: pass "delay_rmap" instead of encoded page to __tlb_remove_page_size()

We have two bits available in the encoded page pointer to store additional
information.  Currently, we use one bit to request delay of the rmap
removal until after a TLB flush.

We want to make use of the remaining bit internally for batching of
multiple pages of the same folio, specifying that the next encoded page
pointer in an array is actually "nr_pages".  So pass page + delay_rmap
flag instead of an encoded page, to handle the encoding internally.

Link: https://lkml.kernel.org/r/20240214204435.167852-6-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarRyan Roberts <ryan.roberts@arm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 2b42a7e5
...@@ -25,8 +25,7 @@ ...@@ -25,8 +25,7 @@
void __tlb_remove_table(void *_table); void __tlb_remove_table(void *_table);
static inline void tlb_flush(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb);
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct encoded_page *page, struct page *page, bool delay_rmap, int page_size);
int page_size);
#define tlb_flush tlb_flush #define tlb_flush tlb_flush
#define pte_free_tlb pte_free_tlb #define pte_free_tlb pte_free_tlb
...@@ -42,14 +41,14 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, ...@@ -42,14 +41,14 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache. * has already been freed, so just do free_page_and_swap_cache.
* *
* s390 doesn't delay rmap removal, so there is nothing encoded in * s390 doesn't delay rmap removal.
* the page pointer.
*/ */
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct encoded_page *page, struct page *page, bool delay_rmap, int page_size)
int page_size)
{ {
free_page_and_swap_cache(encoded_page_ptr(page)); VM_WARN_ON_ONCE(delay_rmap);
free_page_and_swap_cache(page);
return false; return false;
} }
......
...@@ -260,9 +260,8 @@ struct mmu_gather_batch { ...@@ -260,9 +260,8 @@ struct mmu_gather_batch {
*/ */
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
struct encoded_page *page, bool delay_rmap, int page_size);
int page_size);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
...@@ -462,13 +461,14 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) ...@@ -462,13 +461,14 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size)) if (__tlb_remove_page_size(tlb, page, false, page_size))
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
} }
static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page, unsigned int flags) static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb,
struct page *page, bool delay_rmap)
{ {
return __tlb_remove_page_size(tlb, encode_page(page, flags), PAGE_SIZE); return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE);
} }
/* tlb_remove_page /* tlb_remove_page
......
...@@ -116,7 +116,8 @@ static void tlb_batch_list_free(struct mmu_gather *tlb) ...@@ -116,7 +116,8 @@ static void tlb_batch_list_free(struct mmu_gather *tlb)
tlb->local.next = NULL; tlb->local.next = NULL;
} }
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size) bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
bool delay_rmap, int page_size)
{ {
struct mmu_gather_batch *batch; struct mmu_gather_batch *batch;
...@@ -131,13 +132,13 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, i ...@@ -131,13 +132,13 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, i
* Add the page and check if we are full. If so * Add the page and check if we are full. If so
* force a flush. * force a flush.
*/ */
batch->encoded_pages[batch->nr++] = page; batch->encoded_pages[batch->nr++] = encode_page(page, delay_rmap);
if (batch->nr == batch->max) { if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb)) if (!tlb_next_batch(tlb))
return true; return true;
batch = tlb->active; batch = tlb->active;
} }
VM_BUG_ON_PAGE(batch->nr > batch->max, encoded_page_ptr(page)); VM_BUG_ON_PAGE(batch->nr > batch->max, page);
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment