Commit 9547d01b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

mm: uninline large generic tlb.h functions

Some of these functions have grown beyond inline sanity, move them
out-of-line.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Requested-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Requested-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 88c22088
...@@ -96,134 +96,25 @@ struct mmu_gather { ...@@ -96,134 +96,25 @@ struct mmu_gather {
struct page *__pages[MMU_GATHER_BUNDLE]; struct page *__pages[MMU_GATHER_BUNDLE];
}; };
/* #define HAVE_GENERIC_MMU_GATHER
* For UP we don't need to worry about TLB flush
* and page free order so much..
*/
#ifdef CONFIG_SMP
#define tlb_fast_mode(tlb) (tlb->fast_mode)
#else
#define tlb_fast_mode(tlb) 1
#endif
static inline int tlb_next_batch(struct mmu_gather *tlb) static inline int tlb_fast_mode(struct mmu_gather *tlb)
{ {
struct mmu_gather_batch *batch; #ifdef CONFIG_SMP
return tlb->fast_mode;
batch = tlb->active; #else
if (batch->next) { /*
tlb->active = batch->next; * For UP we don't need to worry about TLB flush
return 1; * and page free order so much..
} */
batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (!batch)
return 0;
batch->next = NULL;
batch->nr = 0;
batch->max = MAX_GATHER_BATCH;
tlb->active->next = batch;
tlb->active = batch;
return 1; return 1;
}
/* tlb_gather_mmu
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm. The @fullmm argument is used when @mm is without
* users and we're going to destroy the full address space (exit/execve).
*/
static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
{
tlb->mm = mm;
tlb->fullmm = fullmm;
tlb->need_flush = 0;
tlb->fast_mode = (num_possible_cpus() == 1);
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif #endif
} }
static inline void void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
tlb_flush_mmu(struct mmu_gather *tlb) void tlb_flush_mmu(struct mmu_gather *tlb);
{ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
struct mmu_gather_batch *batch; int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
if (!tlb->need_flush)
return;
tlb->need_flush = 0;
tlb_flush(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
if (tlb_fast_mode(tlb))
return;
for (batch = &tlb->local; batch; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
tlb->active = &tlb->local;
}
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
struct mmu_gather_batch *batch, *next;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
for (batch = tlb->local.next; batch; batch = next) {
next = batch->next;
free_pages((unsigned long)batch, 0);
}
tlb->local.next = NULL;
}
/* __tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs. Returns the number of free page slots left.
* When out of page slots we must call tlb_flush_mmu().
*/
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
struct mmu_gather_batch *batch;
tlb->need_flush = 1;
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return 1; /* avoid calling tlb_flush_mmu() */
}
batch = tlb->active;
batch->pages[batch->nr++] = page;
VM_BUG_ON(batch->nr > batch->max);
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return 0;
}
return batch->max - batch->nr;
}
/* tlb_remove_page /* tlb_remove_page
* Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
......
...@@ -182,7 +182,7 @@ void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) ...@@ -182,7 +182,7 @@ void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
{ {
__sync_task_rss_stat(task, mm); __sync_task_rss_stat(task, mm);
} }
#else #else /* SPLIT_RSS_COUNTING */
#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
...@@ -191,8 +191,128 @@ static void check_sync_rss_stat(struct task_struct *task) ...@@ -191,8 +191,128 @@ static void check_sync_rss_stat(struct task_struct *task)
{ {
} }
#endif /* SPLIT_RSS_COUNTING */
#ifdef HAVE_GENERIC_MMU_GATHER
static int tlb_next_batch(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
batch = tlb->active;
if (batch->next) {
tlb->active = batch->next;
return 1;
}
batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (!batch)
return 0;
batch->next = NULL;
batch->nr = 0;
batch->max = MAX_GATHER_BATCH;
tlb->active->next = batch;
tlb->active = batch;
return 1;
}
/* tlb_gather_mmu
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm. The @fullmm argument is used when @mm is without
* users and we're going to destroy the full address space (exit/execve).
*/
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
{
tlb->mm = mm;
tlb->fullmm = fullmm;
tlb->need_flush = 0;
tlb->fast_mode = (num_possible_cpus() == 1);
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
}
void tlb_flush_mmu(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
if (!tlb->need_flush)
return;
tlb->need_flush = 0;
tlb_flush(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif #endif
if (tlb_fast_mode(tlb))
return;
for (batch = &tlb->local; batch; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
tlb->active = &tlb->local;
}
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
struct mmu_gather_batch *batch, *next;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
for (batch = tlb->local.next; batch; batch = next) {
next = batch->next;
free_pages((unsigned long)batch, 0);
}
tlb->local.next = NULL;
}
/* __tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs. Returns the number of free page slots left.
* When out of page slots we must call tlb_flush_mmu().
*/
int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
struct mmu_gather_batch *batch;
tlb->need_flush = 1;
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return 1; /* avoid calling tlb_flush_mmu() */
}
batch = tlb->active;
batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return 0;
}
VM_BUG_ON(batch->nr > batch->max);
return batch->max - batch->nr;
}
#endif /* HAVE_GENERIC_MMU_GATHER */
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifdef CONFIG_HAVE_RCU_TABLE_FREE
/* /*
...@@ -268,7 +388,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) ...@@ -268,7 +388,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
tlb_table_flush(tlb); tlb_table_flush(tlb);
} }
#endif #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
/* /*
* If a p?d_bad entry is found while walking page tables, report * If a p?d_bad entry is found while walking page tables, report
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment