Commit f5ea64dc authored by David Gibson's avatar David Gibson Committed by Benjamin Herrenschmidt

powerpc: Get USE_STRICT_MM_TYPECHECKS working again

The typesafe version of the powerpc pagetable handling (with
USE_STRICT_MM_TYPECHECKS defined) has bitrotted again.  This patch
makes a bunch of small fixes to get it back to building status.

It's still not enabled by default as gcc still generates worse
code with it for some reason.
Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent cd301c7b
...@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) ...@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{ {
return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0; return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
} }
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
......
...@@ -431,7 +431,7 @@ extern int icache_44x_need_flush; ...@@ -431,7 +431,7 @@ extern int icache_44x_need_flush;
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
_PAGE_WRITETHRU | _PAGE_ENDIAN | \ _PAGE_WRITETHRU | _PAGE_ENDIAN | \
_PAGE_USER | _PAGE_ACCESSED | \ _PAGE_USER | _PAGE_ACCESSED | \
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
...@@ -570,9 +570,9 @@ static inline pte_t pte_mkyoung(pte_t pte) { ...@@ -570,9 +570,9 @@ static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; } pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { static inline pte_t pte_mkspecial(pte_t pte) {
pte_val(pte) |= _PAGE_SPECIAL; return pte; } pte_val(pte) |= _PAGE_SPECIAL; return pte; }
static inline unsigned long pte_pgprot(pte_t pte) static inline pgprot_t pte_pgprot(pte_t pte)
{ {
return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
} }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
...@@ -688,7 +688,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -688,7 +688,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
: "r" (pte) : "memory"); : "r" (pte) : "memory");
#else #else
*ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE); *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
| (pte_val(pte) & ~_PAGE_HASHPTE));
#endif #endif
} }
......
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
#define HAVE_PAGE_AGP #define HAVE_PAGE_AGP
#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \ #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \
_PAGE_NO_CACHE | _PAGE_WRITETHRU | \ _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
_PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
...@@ -264,9 +264,9 @@ static inline pte_t pte_mkhuge(pte_t pte) { ...@@ -264,9 +264,9 @@ static inline pte_t pte_mkhuge(pte_t pte) {
return pte; } return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { static inline pte_t pte_mkspecial(pte_t pte) {
pte_val(pte) |= _PAGE_SPECIAL; return pte; } pte_val(pte) |= _PAGE_SPECIAL; return pte; }
static inline unsigned long pte_pgprot(pte_t pte) static inline pgprot_t pte_pgprot(pte_t pte)
{ {
return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
} }
/* Atomic PTE updates */ /* Atomic PTE updates */
......
...@@ -41,7 +41,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, ...@@ -41,7 +41,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
page = pte_page(pte); page = pte_page(pte);
if (!page_cache_get_speculative(page)) if (!page_cache_get_speculative(page))
return 0; return 0;
if (unlikely(pte != *ptep)) { if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(page); put_page(page);
return 0; return 0;
} }
...@@ -92,7 +92,7 @@ static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate, ...@@ -92,7 +92,7 @@ static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
*nr -= refs; *nr -= refs;
return 0; return 0;
} }
if (unlikely(pte != *ptep)) { if (unlikely(pte_val(pte) != pte_val(*ptep))) {
/* Could be optimized better */ /* Could be optimized better */
while (*nr) { while (*nr) {
put_page(page); put_page(page);
...@@ -237,7 +237,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -237,7 +237,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
pgd_t pgd = *pgdp; pgd_t pgd = *pgdp;
VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift); VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
pr_debug(" %016lx: normal pgd %p\n", addr, (void *)pgd); pr_debug(" %016lx: normal pgd %p\n", addr,
(void *)pgd_val(pgd));
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
if (pgd_none(pgd)) if (pgd_none(pgd))
goto slow; goto slow;
......
...@@ -541,7 +541,7 @@ static unsigned long __init htab_get_table_size(void) ...@@ -541,7 +541,7 @@ static unsigned long __init htab_get_table_size(void)
void create_section_mapping(unsigned long start, unsigned long end) void create_section_mapping(unsigned long start, unsigned long end)
{ {
BUG_ON(htab_bolt_mapping(start, end, __pa(start), BUG_ON(htab_bolt_mapping(start, end, __pa(start),
PAGE_KERNEL, mmu_linear_psize, pgprot_val(PAGE_KERNEL), mmu_linear_psize,
mmu_kernel_ssize)); mmu_kernel_ssize));
} }
...@@ -649,7 +649,7 @@ void __init htab_initialize(void) ...@@ -649,7 +649,7 @@ void __init htab_initialize(void)
mtspr(SPRN_SDR1, _SDR1); mtspr(SPRN_SDR1, _SDR1);
} }
prot = PAGE_KERNEL; prot = pgprot_val(PAGE_KERNEL);
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
......
...@@ -228,8 +228,8 @@ int __meminit vmemmap_populate(struct page *start_page, ...@@ -228,8 +228,8 @@ int __meminit vmemmap_populate(struct page *start_page,
start, p, __pa(p)); start, p, __pa(p));
mapped = htab_bolt_mapping(start, start + page_size, __pa(p), mapped = htab_bolt_mapping(start, start + page_size, __pa(p),
PAGE_KERNEL, mmu_vmemmap_psize, pgprot_val(PAGE_KERNEL),
mmu_kernel_ssize); mmu_vmemmap_psize, mmu_kernel_ssize);
BUG_ON(mapped < 0); BUG_ON(mapped < 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment