Commit eabb6297 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-mm-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm updates from Ingo Molnar:

 - Fix W^X violation check false-positives in the CPA code
   when running as a Xen PV guest

 - Fix W^X violation warning false-positives in show_fault_oops()

* tag 'x86-mm-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/pat: Fix W^X violation false-positives when running as Xen PV guest
  x86/pat: Restructure _lookup_address_cpa()
  x86/mm: Use lookup_address_in_pgd_attr() in show_fault_oops()
  x86/pat: Introduce lookup_address_in_pgd_attr()
parents 963795f7 5bc8b0f5
...@@ -567,6 +567,8 @@ static inline void update_page_count(int level, unsigned long pages) { } ...@@ -567,6 +567,8 @@ static inline void update_page_count(int level, unsigned long pages) { }
extern pte_t *lookup_address(unsigned long address, unsigned int *level); extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level); unsigned int *level);
pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
unsigned int *level, bool *nx, bool *rw);
extern pmd_t *lookup_pmd_address(unsigned long address); extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address); extern phys_addr_t slow_virt_to_phys(void *__address);
extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
......
...@@ -514,18 +514,19 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad ...@@ -514,18 +514,19 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad
if (error_code & X86_PF_INSTR) { if (error_code & X86_PF_INSTR) {
unsigned int level; unsigned int level;
bool nx, rw;
pgd_t *pgd; pgd_t *pgd;
pte_t *pte; pte_t *pte;
pgd = __va(read_cr3_pa()); pgd = __va(read_cr3_pa());
pgd += pgd_index(address); pgd += pgd_index(address);
pte = lookup_address_in_pgd(pgd, address, &level); pte = lookup_address_in_pgd_attr(pgd, address, &level, &nx, &rw);
if (pte && pte_present(*pte) && !pte_exec(*pte)) if (pte && pte_present(*pte) && (!pte_exec(*pte) || nx))
pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
from_kuid(&init_user_ns, current_uid())); from_kuid(&init_user_ns, current_uid()));
if (pte && pte_present(*pte) && pte_exec(*pte) && if (pte && pte_present(*pte) && pte_exec(*pte) && !nx &&
(pgd_flags(*pgd) & _PAGE_USER) && (pgd_flags(*pgd) & _PAGE_USER) &&
(__read_cr4() & X86_CR4_SMEP)) (__read_cr4() & X86_CR4_SMEP))
pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n", pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
......
...@@ -619,7 +619,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, ...@@ -619,7 +619,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
* Validate strict W^X semantics. * Validate strict W^X semantics.
*/ */
static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start, static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start,
unsigned long pfn, unsigned long npg) unsigned long pfn, unsigned long npg,
bool nx, bool rw)
{ {
unsigned long end; unsigned long end;
...@@ -641,6 +642,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star ...@@ -641,6 +642,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW) if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW)
return new; return new;
/* Non-leaf translation entries can disable writing or execution. */
if (!rw || nx)
return new;
end = start + npg * PAGE_SIZE - 1; end = start + npg * PAGE_SIZE - 1;
WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n", WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n",
(unsigned long long)pgprot_val(old), (unsigned long long)pgprot_val(old),
...@@ -657,20 +662,26 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star ...@@ -657,20 +662,26 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
/* /*
* Lookup the page table entry for a virtual address in a specific pgd. * Lookup the page table entry for a virtual address in a specific pgd.
* Return a pointer to the entry and the level of the mapping. * Return a pointer to the entry, the level of the mapping, and the effective
* NX and RW bits of all page table levels.
*/ */
pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
unsigned int *level) unsigned int *level, bool *nx, bool *rw)
{ {
p4d_t *p4d; p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
*level = PG_LEVEL_NONE; *level = PG_LEVEL_NONE;
*nx = false;
*rw = true;
if (pgd_none(*pgd)) if (pgd_none(*pgd))
return NULL; return NULL;
*nx |= pgd_flags(*pgd) & _PAGE_NX;
*rw &= pgd_flags(*pgd) & _PAGE_RW;
p4d = p4d_offset(pgd, address); p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d)) if (p4d_none(*p4d))
return NULL; return NULL;
...@@ -679,6 +690,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, ...@@ -679,6 +690,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (p4d_leaf(*p4d) || !p4d_present(*p4d)) if (p4d_leaf(*p4d) || !p4d_present(*p4d))
return (pte_t *)p4d; return (pte_t *)p4d;
*nx |= p4d_flags(*p4d) & _PAGE_NX;
*rw &= p4d_flags(*p4d) & _PAGE_RW;
pud = pud_offset(p4d, address); pud = pud_offset(p4d, address);
if (pud_none(*pud)) if (pud_none(*pud))
return NULL; return NULL;
...@@ -687,6 +701,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, ...@@ -687,6 +701,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (pud_leaf(*pud) || !pud_present(*pud)) if (pud_leaf(*pud) || !pud_present(*pud))
return (pte_t *)pud; return (pte_t *)pud;
*nx |= pud_flags(*pud) & _PAGE_NX;
*rw &= pud_flags(*pud) & _PAGE_RW;
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
if (pmd_none(*pmd)) if (pmd_none(*pmd))
return NULL; return NULL;
...@@ -695,11 +712,26 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, ...@@ -695,11 +712,26 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (pmd_leaf(*pmd) || !pmd_present(*pmd)) if (pmd_leaf(*pmd) || !pmd_present(*pmd))
return (pte_t *)pmd; return (pte_t *)pmd;
*nx |= pmd_flags(*pmd) & _PAGE_NX;
*rw &= pmd_flags(*pmd) & _PAGE_RW;
*level = PG_LEVEL_4K; *level = PG_LEVEL_4K;
return pte_offset_kernel(pmd, address); return pte_offset_kernel(pmd, address);
} }
/*
* Lookup the page table entry for a virtual address in a specific pgd.
* Return a pointer to the entry and the level of the mapping.
*/
pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level)
{
bool nx, rw;
return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw);
}
/* /*
* Lookup the page table entry for a virtual address. Return a pointer * Lookup the page table entry for a virtual address. Return a pointer
* to the entry and the level of the mapping. * to the entry and the level of the mapping.
...@@ -715,13 +747,16 @@ pte_t *lookup_address(unsigned long address, unsigned int *level) ...@@ -715,13 +747,16 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
EXPORT_SYMBOL_GPL(lookup_address); EXPORT_SYMBOL_GPL(lookup_address);
static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
unsigned int *level) unsigned int *level, bool *nx, bool *rw)
{ {
if (cpa->pgd) pgd_t *pgd;
return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
address, level); if (!cpa->pgd)
pgd = pgd_offset_k(address);
else
pgd = cpa->pgd + pgd_index(address);
return lookup_address(address, level); return lookup_address_in_pgd_attr(pgd, address, level, nx, rw);
} }
/* /*
...@@ -849,12 +884,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address, ...@@ -849,12 +884,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
pgprot_t old_prot, new_prot, req_prot, chk_prot; pgprot_t old_prot, new_prot, req_prot, chk_prot;
pte_t new_pte, *tmp; pte_t new_pte, *tmp;
enum pg_level level; enum pg_level level;
bool nx, rw;
/* /*
* Check for races, another CPU might have split this page * Check for races, another CPU might have split this page
* up already: * up already:
*/ */
tmp = _lookup_address_cpa(cpa, address, &level); tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
if (tmp != kpte) if (tmp != kpte)
return 1; return 1;
...@@ -965,7 +1001,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address, ...@@ -965,7 +1001,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
psize, CPA_DETECT); psize, CPA_DETECT);
new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages); new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages,
nx, rw);
/* /*
* If there is a conflict, split the large page. * If there is a conflict, split the large page.
...@@ -1046,6 +1083,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, ...@@ -1046,6 +1083,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
pte_t *pbase = (pte_t *)page_address(base); pte_t *pbase = (pte_t *)page_address(base);
unsigned int i, level; unsigned int i, level;
pgprot_t ref_prot; pgprot_t ref_prot;
bool nx, rw;
pte_t *tmp; pte_t *tmp;
spin_lock(&pgd_lock); spin_lock(&pgd_lock);
...@@ -1053,7 +1091,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, ...@@ -1053,7 +1091,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
* Check for races, another CPU might have split this page * Check for races, another CPU might have split this page
* up for us already: * up for us already:
*/ */
tmp = _lookup_address_cpa(cpa, address, &level); tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
if (tmp != kpte) { if (tmp != kpte) {
spin_unlock(&pgd_lock); spin_unlock(&pgd_lock);
return 1; return 1;
...@@ -1594,10 +1632,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) ...@@ -1594,10 +1632,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
int do_split, err; int do_split, err;
unsigned int level; unsigned int level;
pte_t *kpte, old_pte; pte_t *kpte, old_pte;
bool nx, rw;
address = __cpa_addr(cpa, cpa->curpage); address = __cpa_addr(cpa, cpa->curpage);
repeat: repeat:
kpte = _lookup_address_cpa(cpa, address, &level); kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
if (!kpte) if (!kpte)
return __cpa_process_fault(cpa, address, primary); return __cpa_process_fault(cpa, address, primary);
...@@ -1619,7 +1658,8 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) ...@@ -1619,7 +1658,8 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
new_prot = static_protections(new_prot, address, pfn, 1, 0, new_prot = static_protections(new_prot, address, pfn, 1, 0,
CPA_PROTECT); CPA_PROTECT);
new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1); new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
nx, rw);
new_prot = pgprot_clear_protnone_bits(new_prot); new_prot = pgprot_clear_protnone_bits(new_prot);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment