Commit e57d9f63 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Update and clean up x86 fault handling, by Andy Lutomirski.

   - Drop usage of __flush_tlb_all() in kernel_physical_mapping_init()
     and related fallout, by Dan Williams.

   - CPA cleanups and reorganization by Peter Zijlstra: simplify the
     flow and remove a few warts.

   - Other misc cleanups"

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits)
  x86/mm/dump_pagetables: Use DEFINE_SHOW_ATTRIBUTE()
  x86/mm/cpa: Rename @addrinarray to @numpages
  x86/mm/cpa: Better use CLFLUSHOPT
  x86/mm/cpa: Fold cpa_flush_range() and cpa_flush_array() into a single cpa_flush() function
  x86/mm/cpa: Make cpa_data::numpages invariant
  x86/mm/cpa: Optimize cpa_flush_array() TLB invalidation
  x86/mm/cpa: Simplify the code after making cpa->vaddr invariant
  x86/mm/cpa: Make cpa_data::vaddr invariant
  x86/mm/cpa: Add __cpa_addr() helper
  x86/mm/cpa: Add ARRAY and PAGES_ARRAY selftests
  x86/mm: Drop usage of __flush_tlb_all() in kernel_physical_mapping_init()
  x86/mm: Validate kernel_physical_mapping_init() PTE population
  generic/pgtable: Introduce set_pte_safe()
  generic/pgtable: Introduce {p4d,pgd}_same()
  generic/pgtable: Make {pmd, pud}_same() unconditionally available
  x86/fault: Clean up the page fault oops decoder a bit
  x86/fault: Decode page fault OOPSes better
  x86/vsyscall/64: Use X86_PF constants in the simulated #PF error code
  x86/oops: Show the correct CS value in show_regs()
  x86/fault: Don't try to recover from an implicit supervisor access
  ...
parents d6e867a6 6848ac7c
...@@ -102,7 +102,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size) ...@@ -102,7 +102,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) { if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
struct thread_struct *thread = &current->thread; struct thread_struct *thread = &current->thread;
thread->error_code = 6; /* user fault, no page, write */ thread->error_code = X86_PF_USER | X86_PF_WRITE;
thread->cr2 = ptr; thread->cr2 = ptr;
thread->trap_nr = X86_TRAP_PF; thread->trap_nr = X86_TRAP_PF;
......
...@@ -16,6 +16,12 @@ ...@@ -16,6 +16,12 @@
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) # define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
#endif #endif
#ifdef CONFIG_X86_SMAP
# define DISABLE_SMAP 0
#else
# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
#endif
#ifdef CONFIG_X86_INTEL_UMIP #ifdef CONFIG_X86_INTEL_UMIP
# define DISABLE_UMIP 0 # define DISABLE_UMIP 0
#else #else
...@@ -68,7 +74,7 @@ ...@@ -68,7 +74,7 @@
#define DISABLED_MASK6 0 #define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0 #define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_MPX) #define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP)
#define DISABLED_MASK10 0 #define DISABLED_MASK10 0
#define DISABLED_MASK11 0 #define DISABLED_MASK11 0
#define DISABLED_MASK12 0 #define DISABLED_MASK12 0
......
...@@ -80,6 +80,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, ...@@ -80,6 +80,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
} }
static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
struct page *pte) struct page *pte)
{ {
...@@ -132,6 +139,12 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ...@@ -132,6 +139,12 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
} }
static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd)));
}
#endif /* CONFIG_X86_PAE */ #endif /* CONFIG_X86_PAE */
#if CONFIG_PGTABLE_LEVELS > 3 #if CONFIG_PGTABLE_LEVELS > 3
...@@ -141,6 +154,12 @@ static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) ...@@ -141,6 +154,12 @@ static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud))); set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
} }
static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
{
paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
gfp_t gfp = GFP_KERNEL_ACCOUNT; gfp_t gfp = GFP_KERNEL_ACCOUNT;
...@@ -173,6 +192,14 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) ...@@ -173,6 +192,14 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
} }
static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
{
if (!pgtable_l5_enabled())
return;
paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
}
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
gfp_t gfp = GFP_KERNEL_ACCOUNT; gfp_t gfp = GFP_KERNEL_ACCOUNT;
......
...@@ -68,7 +68,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) ...@@ -68,7 +68,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7; unsigned long d0, d1, d2, d3, d6, d7;
unsigned int fsindex, gsindex; unsigned int fsindex, gsindex;
unsigned int ds, cs, es; unsigned int ds, es;
show_iret_regs(regs); show_iret_regs(regs);
...@@ -100,7 +100,6 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) ...@@ -100,7 +100,6 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
} }
asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%ds,%0" : "=r" (ds));
asm("movl %%cs,%0" : "=r" (cs));
asm("movl %%es,%0" : "=r" (es)); asm("movl %%es,%0" : "=r" (es));
asm("movl %%fs,%0" : "=r" (fsindex)); asm("movl %%fs,%0" : "=r" (fsindex));
asm("movl %%gs,%0" : "=r" (gsindex)); asm("movl %%gs,%0" : "=r" (gsindex));
...@@ -116,7 +115,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) ...@@ -116,7 +115,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
fs, fsindex, gs, gsindex, shadowgs); fs, fsindex, gs, gsindex, shadowgs);
printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds,
es, cr0); es, cr0);
printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
cr4); cr4);
......
...@@ -10,20 +10,9 @@ static int ptdump_show(struct seq_file *m, void *v) ...@@ -10,20 +10,9 @@ static int ptdump_show(struct seq_file *m, void *v)
return 0; return 0;
} }
static int ptdump_open(struct inode *inode, struct file *filp) DEFINE_SHOW_ATTRIBUTE(ptdump);
{
return single_open(filp, ptdump_show, NULL);
}
static const struct file_operations ptdump_fops = {
.owner = THIS_MODULE,
.open = ptdump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int ptdump_show_curknl(struct seq_file *m, void *v) static int ptdump_curknl_show(struct seq_file *m, void *v)
{ {
if (current->mm->pgd) { if (current->mm->pgd) {
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
...@@ -33,23 +22,12 @@ static int ptdump_show_curknl(struct seq_file *m, void *v) ...@@ -33,23 +22,12 @@ static int ptdump_show_curknl(struct seq_file *m, void *v)
return 0; return 0;
} }
static int ptdump_open_curknl(struct inode *inode, struct file *filp) DEFINE_SHOW_ATTRIBUTE(ptdump_curknl);
{
return single_open(filp, ptdump_show_curknl, NULL);
}
static const struct file_operations ptdump_curknl_fops = {
.owner = THIS_MODULE,
.open = ptdump_open_curknl,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#ifdef CONFIG_PAGE_TABLE_ISOLATION #ifdef CONFIG_PAGE_TABLE_ISOLATION
static struct dentry *pe_curusr; static struct dentry *pe_curusr;
static int ptdump_show_curusr(struct seq_file *m, void *v) static int ptdump_curusr_show(struct seq_file *m, void *v)
{ {
if (current->mm->pgd) { if (current->mm->pgd) {
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
...@@ -59,42 +37,20 @@ static int ptdump_show_curusr(struct seq_file *m, void *v) ...@@ -59,42 +37,20 @@ static int ptdump_show_curusr(struct seq_file *m, void *v)
return 0; return 0;
} }
static int ptdump_open_curusr(struct inode *inode, struct file *filp) DEFINE_SHOW_ATTRIBUTE(ptdump_curusr);
{
return single_open(filp, ptdump_show_curusr, NULL);
}
static const struct file_operations ptdump_curusr_fops = {
.owner = THIS_MODULE,
.open = ptdump_open_curusr,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif #endif
#if defined(CONFIG_EFI) && defined(CONFIG_X86_64) #if defined(CONFIG_EFI) && defined(CONFIG_X86_64)
static struct dentry *pe_efi; static struct dentry *pe_efi;
static int ptdump_show_efi(struct seq_file *m, void *v) static int ptdump_efi_show(struct seq_file *m, void *v)
{ {
if (efi_mm.pgd) if (efi_mm.pgd)
ptdump_walk_pgd_level_debugfs(m, efi_mm.pgd, false); ptdump_walk_pgd_level_debugfs(m, efi_mm.pgd, false);
return 0; return 0;
} }
static int ptdump_open_efi(struct inode *inode, struct file *filp) DEFINE_SHOW_ATTRIBUTE(ptdump_efi);
{
return single_open(filp, ptdump_show_efi, NULL);
}
static const struct file_operations ptdump_efi_fops = {
.owner = THIS_MODULE,
.open = ptdump_open_efi,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif #endif
static struct dentry *dir, *pe_knl, *pe_curknl; static struct dentry *dir, *pe_knl, *pe_curknl;
......
This diff is collapsed.
...@@ -432,7 +432,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, ...@@ -432,7 +432,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) && E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PAGE_MASK, paddr_next, !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
E820_TYPE_RESERVED_KERN)) E820_TYPE_RESERVED_KERN))
set_pte(pte, __pte(0)); set_pte_safe(pte, __pte(0));
continue; continue;
} }
...@@ -452,7 +452,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, ...@@ -452,7 +452,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr, pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
pages++; pages++;
set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); set_pte_safe(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
} }
...@@ -487,7 +487,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, ...@@ -487,7 +487,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) && E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PMD_MASK, paddr_next, !e820__mapped_any(paddr & PMD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN)) E820_TYPE_RESERVED_KERN))
set_pmd(pmd, __pmd(0)); set_pmd_safe(pmd, __pmd(0));
continue; continue;
} }
...@@ -524,7 +524,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, ...@@ -524,7 +524,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
if (page_size_mask & (1<<PG_LEVEL_2M)) { if (page_size_mask & (1<<PG_LEVEL_2M)) {
pages++; pages++;
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pmd, set_pte_safe((pte_t *)pmd,
pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT, pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
__pgprot(pgprot_val(prot) | _PAGE_PSE))); __pgprot(pgprot_val(prot) | _PAGE_PSE)));
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
...@@ -536,7 +536,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, ...@@ -536,7 +536,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot); paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pmd_populate_kernel(&init_mm, pmd, pte); pmd_populate_kernel_safe(&init_mm, pmd, pte);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
} }
update_page_count(PG_LEVEL_2M, pages); update_page_count(PG_LEVEL_2M, pages);
...@@ -573,7 +573,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, ...@@ -573,7 +573,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) && E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PUD_MASK, paddr_next, !e820__mapped_any(paddr & PUD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN)) E820_TYPE_RESERVED_KERN))
set_pud(pud, __pud(0)); set_pud_safe(pud, __pud(0));
continue; continue;
} }
...@@ -584,7 +584,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, ...@@ -584,7 +584,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
paddr_end, paddr_end,
page_size_mask, page_size_mask,
prot); prot);
__flush_tlb_all();
continue; continue;
} }
/* /*
...@@ -611,7 +610,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, ...@@ -611,7 +610,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
if (page_size_mask & (1<<PG_LEVEL_1G)) { if (page_size_mask & (1<<PG_LEVEL_1G)) {
pages++; pages++;
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pud, set_pte_safe((pte_t *)pud,
pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
PAGE_KERNEL_LARGE)); PAGE_KERNEL_LARGE));
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
...@@ -624,10 +623,9 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, ...@@ -624,10 +623,9 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
page_size_mask, prot); page_size_mask, prot);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pud_populate(&init_mm, pud, pmd); pud_populate_safe(&init_mm, pud, pmd);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
} }
__flush_tlb_all();
update_page_count(PG_LEVEL_1G, pages); update_page_count(PG_LEVEL_1G, pages);
...@@ -659,7 +657,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, ...@@ -659,7 +657,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) && E820_TYPE_RAM) &&
!e820__mapped_any(paddr & P4D_MASK, paddr_next, !e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_RESERVED_KERN)) E820_TYPE_RESERVED_KERN))
set_p4d(p4d, __p4d(0)); set_p4d_safe(p4d, __p4d(0));
continue; continue;
} }
...@@ -668,7 +666,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, ...@@ -668,7 +666,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
paddr_last = phys_pud_init(pud, paddr, paddr_last = phys_pud_init(pud, paddr,
paddr_end, paddr_end,
page_size_mask); page_size_mask);
__flush_tlb_all();
continue; continue;
} }
...@@ -677,10 +674,9 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, ...@@ -677,10 +674,9 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
page_size_mask); page_size_mask);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
p4d_populate(&init_mm, p4d, pud); p4d_populate_safe(&init_mm, p4d, pud);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
} }
__flush_tlb_all();
return paddr_last; return paddr_last;
} }
...@@ -723,9 +719,9 @@ kernel_physical_mapping_init(unsigned long paddr_start, ...@@ -723,9 +719,9 @@ kernel_physical_mapping_init(unsigned long paddr_start,
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
if (pgtable_l5_enabled()) if (pgtable_l5_enabled())
pgd_populate(&init_mm, pgd, p4d); pgd_populate_safe(&init_mm, pgd, p4d);
else else
p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d); p4d_populate_safe(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
pgd_changed = true; pgd_changed = true;
} }
...@@ -733,8 +729,6 @@ kernel_physical_mapping_init(unsigned long paddr_start, ...@@ -733,8 +729,6 @@ kernel_physical_mapping_init(unsigned long paddr_start,
if (pgd_changed) if (pgd_changed)
sync_global_pgds(vaddr_start, vaddr_end - 1); sync_global_pgds(vaddr_start, vaddr_end - 1);
__flush_tlb_all();
return paddr_last; return paddr_last;
} }
......
...@@ -19,4 +19,6 @@ extern int after_bootmem; ...@@ -19,4 +19,6 @@ extern int after_bootmem;
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
extern unsigned long tlb_single_page_flush_ceiling;
#endif /* __X86_MM_INTERNAL_H */ #endif /* __X86_MM_INTERNAL_H */
...@@ -23,7 +23,8 @@ ...@@ -23,7 +23,8 @@
static __read_mostly int print = 1; static __read_mostly int print = 1;
enum { enum {
NTEST = 400, NTEST = 3 * 100,
NPAGES = 100,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
LPS = (1 << PMD_SHIFT), LPS = (1 << PMD_SHIFT),
#elif defined(CONFIG_X86_PAE) #elif defined(CONFIG_X86_PAE)
...@@ -110,6 +111,9 @@ static int print_split(struct split_state *s) ...@@ -110,6 +111,9 @@ static int print_split(struct split_state *s)
static unsigned long addr[NTEST]; static unsigned long addr[NTEST];
static unsigned int len[NTEST]; static unsigned int len[NTEST];
static struct page *pages[NPAGES];
static unsigned long addrs[NPAGES];
/* Change the global bit on random pages in the direct mapping */ /* Change the global bit on random pages in the direct mapping */
static int pageattr_test(void) static int pageattr_test(void)
{ {
...@@ -120,7 +124,6 @@ static int pageattr_test(void) ...@@ -120,7 +124,6 @@ static int pageattr_test(void)
unsigned int level; unsigned int level;
int i, k; int i, k;
int err; int err;
unsigned long test_addr;
if (print) if (print)
printk(KERN_INFO "CPA self-test:\n"); printk(KERN_INFO "CPA self-test:\n");
...@@ -137,7 +140,7 @@ static int pageattr_test(void) ...@@ -137,7 +140,7 @@ static int pageattr_test(void)
unsigned long pfn = prandom_u32() % max_pfn_mapped; unsigned long pfn = prandom_u32() % max_pfn_mapped;
addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
len[i] = prandom_u32() % 100; len[i] = prandom_u32() % NPAGES;
len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
if (len[i] == 0) if (len[i] == 0)
...@@ -167,14 +170,29 @@ static int pageattr_test(void) ...@@ -167,14 +170,29 @@ static int pageattr_test(void)
break; break;
} }
__set_bit(pfn + k, bm); __set_bit(pfn + k, bm);
addrs[k] = addr[i] + k*PAGE_SIZE;
pages[k] = pfn_to_page(pfn + k);
} }
if (!addr[i] || !pte || !k) { if (!addr[i] || !pte || !k) {
addr[i] = 0; addr[i] = 0;
continue; continue;
} }
test_addr = addr[i]; switch (i % 3) {
err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0); case 0:
err = change_page_attr_set(&addr[i], len[i], PAGE_CPA_TEST, 0);
break;
case 1:
err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1);
break;
case 2:
err = cpa_set_pages_array(pages, len[i], PAGE_CPA_TEST);
break;
}
if (err < 0) { if (err < 0) {
printk(KERN_ERR "CPA %d failed %d\n", i, err); printk(KERN_ERR "CPA %d failed %d\n", i, err);
failed++; failed++;
...@@ -206,8 +224,7 @@ static int pageattr_test(void) ...@@ -206,8 +224,7 @@ static int pageattr_test(void)
failed++; failed++;
continue; continue;
} }
test_addr = addr[i]; err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 0);
err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0);
if (err < 0) { if (err < 0) {
printk(KERN_ERR "CPA reverting failed: %d\n", err); printk(KERN_ERR "CPA reverting failed: %d\n", err);
failed++; failed++;
......
This diff is collapsed.
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
#include "mm_internal.h"
/* /*
* TLB flushing, formerly SMP-only * TLB flushing, formerly SMP-only
* c/o Linus Torvalds. * c/o Linus Torvalds.
...@@ -721,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, ...@@ -721,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
* *
* This is in units of pages. * This is in units of pages.
*/ */
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int stride_shift, unsigned long end, unsigned int stride_shift,
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define p4d_clear(p4d) pgd_clear(p4d) #define p4d_clear(p4d) pgd_clear(p4d)
#define p4d_val(p4d) pgd_val(p4d) #define p4d_val(p4d) pgd_val(p4d)
#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) #define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud)
#define p4d_page(p4d) pgd_page(p4d) #define p4d_page(p4d) pgd_page(p4d)
#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) #define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
......
...@@ -31,6 +31,7 @@ static inline void pgd_clear(pgd_t *pgd) { } ...@@ -31,6 +31,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
#define pgd_populate(mm, pgd, pud) do { } while (0) #define pgd_populate(mm, pgd, pud) do { } while (0)
#define pgd_populate_safe(mm, pgd, pud) do { } while (0)
/* /*
* (puds are folded into pgds so this doesn't get actually called, * (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.) * but the define is needed for a generic inline function.)
......
...@@ -26,6 +26,7 @@ static inline void pgd_clear(pgd_t *pgd) { } ...@@ -26,6 +26,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) #define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
#define pgd_populate(mm, pgd, p4d) do { } while (0) #define pgd_populate(mm, pgd, p4d) do { } while (0)
#define pgd_populate_safe(mm, pgd, p4d) do { } while (0)
/* /*
* (p4ds are folded into pgds so this doesn't get actually called, * (p4ds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.) * but the define is needed for a generic inline function.)
......
...@@ -35,6 +35,7 @@ static inline void p4d_clear(p4d_t *p4d) { } ...@@ -35,6 +35,7 @@ static inline void p4d_clear(p4d_t *p4d) { }
#define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) #define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
#define p4d_populate(mm, p4d, pud) do { } while (0) #define p4d_populate(mm, p4d, pud) do { } while (0)
#define p4d_populate_safe(mm, p4d, pud) do { } while (0)
/* /*
* (puds are folded into p4ds so this doesn't get actually called, * (puds are folded into p4ds so this doesn't get actually called,
* but the define is needed for a generic inline function.) * but the define is needed for a generic inline function.)
......
...@@ -375,7 +375,6 @@ static inline int pte_unused(pte_t pte) ...@@ -375,7 +375,6 @@ static inline int pte_unused(pte_t pte)
#endif #endif
#ifndef __HAVE_ARCH_PMD_SAME #ifndef __HAVE_ARCH_PMD_SAME
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{ {
return pmd_val(pmd_a) == pmd_val(pmd_b); return pmd_val(pmd_a) == pmd_val(pmd_b);
...@@ -385,21 +384,60 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b) ...@@ -385,21 +384,60 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b)
{ {
return pud_val(pud_a) == pud_val(pud_b); return pud_val(pud_a) == pud_val(pud_b);
} }
#else /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#ifndef __HAVE_ARCH_P4D_SAME
static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
{ {
BUILD_BUG(); return p4d_val(p4d_a) == p4d_val(p4d_b);
return 0;
} }
#endif
static inline int pud_same(pud_t pud_a, pud_t pud_b) #ifndef __HAVE_ARCH_PGD_SAME
static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
{ {
BUILD_BUG(); return pgd_val(pgd_a) == pgd_val(pgd_b);
return 0;
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif #endif
/*
* Use set_p*_safe(), and elide TLB flushing, when confident that *no*
* TLB flush will be required as a result of the "set". For example, use
* in scenarios where it is known ahead of time that the routine is
* setting non-present entries, or re-setting an existing entry to the
* same value. Otherwise, use the typical "set" helpers and flush the
* TLB.
*/
#define set_pte_safe(ptep, pte) \
({ \
WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
set_pte(ptep, pte); \
})
#define set_pmd_safe(pmdp, pmd) \
({ \
WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
set_pmd(pmdp, pmd); \
})
#define set_pud_safe(pudp, pud) \
({ \
WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
set_pud(pudp, pud); \
})
#define set_p4d_safe(p4dp, p4d) \
({ \
WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
set_p4d(p4dp, p4d); \
})
#define set_pgd_safe(pgdp, pgd) \
({ \
WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
set_pgd(pgdp, pgd); \
})
#ifndef __HAVE_ARCH_DO_SWAP_PAGE #ifndef __HAVE_ARCH_DO_SWAP_PAGE
/* /*
* Some architectures support metadata associated with a page. When a * Some architectures support metadata associated with a page. When a
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment