Commit a7858747 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc: Rely on address instead of pte_user()

pte_user() may return 'false' when a user page is PAGE_NONE.

In that case it is still a user page and needs to be handled
as such. So use is_kernel_addr() instead.

And remove "user" text from ptdump as ptdump only dumps
kernel tables.

Note: no change done for book3s/64 which still has it
'priviledge' bit.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/c778dad89fad07727c31717a9c62f45357c29ebc.1695659959.git.christophe.leroy@csgroup.eu
parent 69339071
...@@ -58,7 +58,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p ...@@ -58,7 +58,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
*p = __pte(new); *p = __pte(new);
if (IS_ENABLED(CONFIG_44x) && (old & _PAGE_USER) && (old & _PAGE_EXEC)) if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC))
icache_44x_need_flush = 1; icache_44x_need_flush = 1;
/* huge pages use the old page table lock */ /* huge pages use the old page table lock */
......
...@@ -127,7 +127,7 @@ static void setibat(int index, unsigned long virt, phys_addr_t phys, ...@@ -127,7 +127,7 @@ static void setibat(int index, unsigned long virt, phys_addr_t phys,
wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX); wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp; bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
if (flags & _PAGE_USER) if (!is_kernel_addr(virt))
bat[0].batu |= 1; /* Vp = 1 */ bat[0].batu |= 1; /* Vp = 1 */
} }
...@@ -280,7 +280,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys, ...@@ -280,7 +280,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp; bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
if (flags & _PAGE_USER) if (!is_kernel_addr(virt))
bat[1].batu |= 1; /* Vp = 1 */ bat[1].batu |= 1; /* Vp = 1 */
if (flags & _PAGE_GUARDED) { if (flags & _PAGE_GUARDED) {
/* G bit must be zero in IBATs */ /* G bit must be zero in IBATs */
......
...@@ -122,7 +122,7 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, ...@@ -122,7 +122,7 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
TLBCAM[index].MAS7 = (u64)phys >> 32; TLBCAM[index].MAS7 = (u64)phys >> 32;
/* Below is unlikely -- only for large user pages or similar */ /* Below is unlikely -- only for large user pages or similar */
if (pte_user(__pte(flags))) { if (!is_kernel_addr(virt)) {
TLBCAM[index].MAS3 |= MAS3_UR; TLBCAM[index].MAS3 |= MAS3_UR;
TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0; TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0; TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
......
...@@ -46,13 +46,13 @@ static inline int is_exec_fault(void) ...@@ -46,13 +46,13 @@ static inline int is_exec_fault(void)
* and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
* on userspace PTEs * on userspace PTEs
*/ */
static inline int pte_looks_normal(pte_t pte) static inline int pte_looks_normal(pte_t pte, unsigned long addr)
{ {
if (pte_present(pte) && !pte_special(pte)) { if (pte_present(pte) && !pte_special(pte)) {
if (pte_ci(pte)) if (pte_ci(pte))
return 0; return 0;
if (pte_user(pte)) if (!is_kernel_addr(addr))
return 1; return 1;
} }
return 0; return 0;
...@@ -79,11 +79,11 @@ static struct folio *maybe_pte_to_folio(pte_t pte) ...@@ -79,11 +79,11 @@ static struct folio *maybe_pte_to_folio(pte_t pte)
* support falls into the same category. * support falls into the same category.
*/ */
static pte_t set_pte_filter_hash(pte_t pte) static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
{ {
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || if (pte_looks_normal(pte, addr) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
cpu_has_feature(CPU_FTR_NOEXECUTE))) { cpu_has_feature(CPU_FTR_NOEXECUTE))) {
struct folio *folio = maybe_pte_to_folio(pte); struct folio *folio = maybe_pte_to_folio(pte);
if (!folio) if (!folio)
return pte; return pte;
...@@ -97,7 +97,7 @@ static pte_t set_pte_filter_hash(pte_t pte) ...@@ -97,7 +97,7 @@ static pte_t set_pte_filter_hash(pte_t pte)
#else /* CONFIG_PPC_BOOK3S */ #else /* CONFIG_PPC_BOOK3S */
static pte_t set_pte_filter_hash(pte_t pte) { return pte; } static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) { return pte; }
#endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_PPC_BOOK3S */
...@@ -105,7 +105,7 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; } ...@@ -105,7 +105,7 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
* instead we "filter out" the exec permission for non clean pages. * instead we "filter out" the exec permission for non clean pages.
*/ */
static inline pte_t set_pte_filter(pte_t pte) static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
{ {
struct folio *folio; struct folio *folio;
...@@ -113,10 +113,10 @@ static inline pte_t set_pte_filter(pte_t pte) ...@@ -113,10 +113,10 @@ static inline pte_t set_pte_filter(pte_t pte)
return pte; return pte;
if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
return set_pte_filter_hash(pte); return set_pte_filter_hash(pte, addr);
/* No exec permission in the first place, move on */ /* No exec permission in the first place, move on */
if (!pte_exec(pte) || !pte_looks_normal(pte)) if (!pte_exec(pte) || !pte_looks_normal(pte, addr))
return pte; return pte;
/* If you set _PAGE_EXEC on weird pages you're on your own */ /* If you set _PAGE_EXEC on weird pages you're on your own */
...@@ -200,7 +200,7 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, ...@@ -200,7 +200,7 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
* this context might not have been activated yet when this * this context might not have been activated yet when this
* is called. * is called.
*/ */
pte = set_pte_filter(pte); pte = set_pte_filter(pte, addr);
/* Perform the setting of the PTE */ /* Perform the setting of the PTE */
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
...@@ -301,7 +301,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_ ...@@ -301,7 +301,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_
*/ */
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
pte = set_pte_filter(pte); pte = set_pte_filter(pte, addr);
val = pte_val(pte); val = pte_val(pte);
......
...@@ -20,11 +20,6 @@ static const struct flag_info flag_array[] = { ...@@ -20,11 +20,6 @@ static const struct flag_info flag_array[] = {
#endif #endif
.set = "huge", .set = "huge",
.clear = " ", .clear = " ",
}, {
.mask = _PAGE_SH,
.val = 0,
.set = "user",
.clear = " ",
}, { }, {
.mask = _PAGE_RO | _PAGE_NA, .mask = _PAGE_RO | _PAGE_NA,
.val = 0, .val = 0,
......
...@@ -11,11 +11,6 @@ ...@@ -11,11 +11,6 @@
static const struct flag_info flag_array[] = { static const struct flag_info flag_array[] = {
{ {
.mask = _PAGE_USER,
.val = _PAGE_USER,
.set = "user",
.clear = " ",
}, {
.mask = _PAGE_RW, .mask = _PAGE_RW,
.val = 0, .val = 0,
.set = "r ", .set = "r ",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment