Commit e1f56c89 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: convert mm->nr_ptes to atomic_long_t

With split page table lock for PMD level we can't hold mm->page_table_lock
while updating nr_ptes.

Let's convert it to atomic_long_t to avoid races.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: default avatarAlex Thorlton <athorlton@sgi.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Jones <davej@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Sedat Dilek <sedat.dilek@gmail.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 57c1ffce
...@@ -62,7 +62,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) ...@@ -62,7 +62,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
total_rss << (PAGE_SHIFT-10), total_rss << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10), data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib, mm->stack_vm << (PAGE_SHIFT-10), text, lib,
(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, (PTRS_PER_PTE * sizeof(pte_t) *
atomic_long_read(&mm->nr_ptes)) >> 10,
swap << (PAGE_SHIFT-10)); swap << (PAGE_SHIFT-10));
} }
......
...@@ -339,6 +339,7 @@ struct mm_struct { ...@@ -339,6 +339,7 @@ struct mm_struct {
pgd_t * pgd; pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */ atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
atomic_long_t nr_ptes; /* Page table pages */
int map_count; /* number of VMAs */ int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some counters */ spinlock_t page_table_lock; /* Protects page tables and some counters */
...@@ -360,7 +361,6 @@ struct mm_struct { ...@@ -360,7 +361,6 @@ struct mm_struct {
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
unsigned long stack_vm; /* VM_GROWSUP/DOWN */ unsigned long stack_vm; /* VM_GROWSUP/DOWN */
unsigned long def_flags; unsigned long def_flags;
unsigned long nr_ptes; /* Page table pages */
unsigned long start_code, end_code, start_data, end_data; unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack; unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end; unsigned long arg_start, arg_end, env_start, env_end;
......
...@@ -532,7 +532,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) ...@@ -532,7 +532,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
mm->flags = (current->mm) ? mm->flags = (current->mm) ?
(current->mm->flags & MMF_INIT_MASK) : default_dump_filter; (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
mm->core_state = NULL; mm->core_state = NULL;
mm->nr_ptes = 0; atomic_long_set(&mm->nr_ptes, 0);
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock); spin_lock_init(&mm->page_table_lock);
mm_init_aio(mm); mm_init_aio(mm);
......
...@@ -738,7 +738,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, ...@@ -738,7 +738,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
pgtable_trans_huge_deposit(mm, pmd, pgtable); pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, haddr, pmd, entry);
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm->nr_ptes++; atomic_long_inc(&mm->nr_ptes);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} }
...@@ -771,7 +771,7 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, ...@@ -771,7 +771,7 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
entry = pmd_mkhuge(entry); entry = pmd_mkhuge(entry);
pgtable_trans_huge_deposit(mm, pmd, pgtable); pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, haddr, pmd, entry);
mm->nr_ptes++; atomic_long_inc(&mm->nr_ptes);
return true; return true;
} }
...@@ -896,7 +896,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -896,7 +896,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd = pmd_mkold(pmd_wrprotect(pmd)); pmd = pmd_mkold(pmd_wrprotect(pmd));
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
set_pmd_at(dst_mm, addr, dst_pmd, pmd); set_pmd_at(dst_mm, addr, dst_pmd, pmd);
dst_mm->nr_ptes++; atomic_long_inc(&dst_mm->nr_ptes);
ret = 0; ret = 0;
out_unlock: out_unlock:
...@@ -1392,7 +1392,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1392,7 +1392,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb_remove_pmd_tlb_entry(tlb, pmd, addr); tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
if (is_huge_zero_pmd(orig_pmd)) { if (is_huge_zero_pmd(orig_pmd)) {
tlb->mm->nr_ptes--; atomic_long_dec(&tlb->mm->nr_ptes);
spin_unlock(&tlb->mm->page_table_lock); spin_unlock(&tlb->mm->page_table_lock);
put_huge_zero_page(); put_huge_zero_page();
} else { } else {
...@@ -1401,7 +1401,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1401,7 +1401,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON(page_mapcount(page) < 0); VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page)); VM_BUG_ON(!PageHead(page));
tlb->mm->nr_ptes--; atomic_long_dec(&tlb->mm->nr_ptes);
spin_unlock(&tlb->mm->page_table_lock); spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page); tlb_remove_page(tlb, page);
} }
......
...@@ -382,7 +382,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, ...@@ -382,7 +382,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
pgtable_t token = pmd_pgtable(*pmd); pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd); pmd_clear(pmd);
pte_free_tlb(tlb, token, addr); pte_free_tlb(tlb, token, addr);
tlb->mm->nr_ptes--; atomic_long_dec(&tlb->mm->nr_ptes);
} }
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
...@@ -573,7 +573,7 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -573,7 +573,7 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
wait_split_huge_page = 0; wait_split_huge_page = 0;
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
mm->nr_ptes++; atomic_long_inc(&mm->nr_ptes);
pmd_populate(mm, pmd, new); pmd_populate(mm, pmd, new);
new = NULL; new = NULL;
} else if (unlikely(pmd_trans_splitting(*pmd))) } else if (unlikely(pmd_trans_splitting(*pmd)))
......
...@@ -2724,7 +2724,8 @@ void exit_mmap(struct mm_struct *mm) ...@@ -2724,7 +2724,8 @@ void exit_mmap(struct mm_struct *mm)
} }
vm_unacct_memory(nr_accounted); vm_unacct_memory(nr_accounted);
WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); WARN_ON(atomic_long_read(&mm->nr_ptes) >
(FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
} }
/* Insert vm structure into process list sorted by address /* Insert vm structure into process list sorted by address
......
...@@ -161,7 +161,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, ...@@ -161,7 +161,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* The baseline for the badness score is the proportion of RAM that each * The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use. * task's rss, pagetable and swap space use.
*/ */
points = get_mm_rss(p->mm) + p->mm->nr_ptes + points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
get_mm_counter(p->mm, MM_SWAPENTS); get_mm_counter(p->mm, MM_SWAPENTS);
task_unlock(p); task_unlock(p);
...@@ -364,10 +364,10 @@ static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemas ...@@ -364,10 +364,10 @@ static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemas
continue; continue;
} }
pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu %5hd %s\n", pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)), task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm), task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
task->mm->nr_ptes, atomic_long_read(&task->mm->nr_ptes),
get_mm_counter(task->mm, MM_SWAPENTS), get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm); task->signal->oom_score_adj, task->comm);
task_unlock(task); task_unlock(task);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment