Commit c56208f6 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: update for pte in highmem changes

parent 79b65813
...@@ -639,7 +639,7 @@ pte_t * find_linux_pte( pgd_t * pgdir, unsigned long ea ) ...@@ -639,7 +639,7 @@ pte_t * find_linux_pte( pgd_t * pgdir, unsigned long ea )
pm = pmd_offset( pg, ea ); pm = pmd_offset( pg, ea );
if ( ! pmd_none( *pm ) ) { if ( ! pmd_none( *pm ) ) {
pt = pte_offset( pm, ea ); pt = pte_offset_kernel( pm, ea );
pte = *pt; pte = *pt;
if ( ! pte_present( pte ) ) if ( ! pte_present( pte ) )
pt = NULL; pt = NULL;
......
...@@ -91,10 +91,8 @@ int idled(void) ...@@ -91,10 +91,8 @@ int idled(void)
paca = (struct Paca *)mfspr(SPRG3); paca = (struct Paca *)mfspr(SPRG3);
while(1) { while(1) {
if (need_resched()) { if (need_resched())
schedule(); schedule();
check_pgt_cache();
}
} }
for (;;) { for (;;) {
...@@ -122,10 +120,8 @@ int idled(void) ...@@ -122,10 +120,8 @@ int idled(void)
} }
} }
HMT_medium(); HMT_medium();
if (need_resched()) { if (need_resched())
schedule(); schedule();
check_pgt_cache();
}
} }
return 0; return 0;
} }
......
...@@ -113,23 +113,6 @@ unsigned long __max_memory; ...@@ -113,23 +113,6 @@ unsigned long __max_memory;
*/ */
mmu_gather_t mmu_gathers[NR_CPUS]; mmu_gather_t mmu_gathers[NR_CPUS];
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if (pgtable_cache_size > high) {
do {
if (pgd_quicklist)
free_page((unsigned long)pgd_alloc_one_fast(0)), ++freed;
if (pmd_quicklist)
free_page((unsigned long)pmd_alloc_one_fast(0, 0)), ++freed;
if (pte_quicklist)
free_page((unsigned long)pte_alloc_one_fast(0, 0)), ++freed;
} while (pgtable_cache_size > low);
}
return freed;
}
void show_mem(void) void show_mem(void)
{ {
int i,free = 0,total = 0,reserved = 0; int i,free = 0,total = 0,reserved = 0;
...@@ -155,7 +138,6 @@ void show_mem(void) ...@@ -155,7 +138,6 @@ void show_mem(void)
printk("%d reserved pages\n",reserved); printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared); printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached); printk("%d pages swap cached\n",cached);
printk("%d pages in page table cache\n",(int)pgtable_cache_size);
show_buffers(); show_buffers();
} }
...@@ -260,7 +242,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) ...@@ -260,7 +242,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
spin_lock(&ioremap_mm.page_table_lock); spin_lock(&ioremap_mm.page_table_lock);
pgdp = pgd_offset_i(ea); pgdp = pgd_offset_i(ea);
pmdp = pmd_alloc(&ioremap_mm, pgdp, ea); pmdp = pmd_alloc(&ioremap_mm, pgdp, ea);
ptep = pte_alloc(&ioremap_mm, pmdp, ea); ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
pa = absolute_to_phys(pa); pa = absolute_to_phys(pa);
set_pte(ptep, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags))); set_pte(ptep, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
...@@ -336,7 +318,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) ...@@ -336,7 +318,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
if (!pgd_none(*pgd)) { if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, vmaddr); pmd = pmd_offset(pgd, vmaddr);
if (!pmd_none(*pmd)) { if (!pmd_none(*pmd)) {
ptep = pte_offset(pmd, vmaddr); ptep = pte_offset_kernel(pmd, vmaddr);
/* Check if HPTE might exist and flush it if so */ /* Check if HPTE might exist and flush it if so */
pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0)); pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
if ( pte_val(pte) & _PAGE_HASHPTE ) { if ( pte_val(pte) & _PAGE_HASHPTE ) {
...@@ -391,7 +373,7 @@ local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long e ...@@ -391,7 +373,7 @@ local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long e
if ( pmd_end > end ) if ( pmd_end > end )
pmd_end = end; pmd_end = end;
if ( !pmd_none( *pmd ) ) { if ( !pmd_none( *pmd ) ) {
ptep = pte_offset( pmd, start ); ptep = pte_offset_kernel( pmd, start );
do { do {
if ( pte_val(*ptep) & _PAGE_HASHPTE ) { if ( pte_val(*ptep) & _PAGE_HASHPTE ) {
pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0)); pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
......
...@@ -92,10 +92,10 @@ struct Paca ...@@ -92,10 +92,10 @@ struct Paca
* CACHE_LINE_2 0x0080 - 0x00FF * CACHE_LINE_2 0x0080 - 0x00FF
*===================================================================================== *=====================================================================================
*/ */
u64 *pgd_cache; /* 0x00 */ u64 spare1; /* 0x00 */
u64 *pmd_cache; /* 0x08 */ u64 spare2; /* 0x08 */
u64 *pte_cache; /* 0x10 */ u64 spare3; /* 0x10 */
u64 pgtable_cache_sz; /* 0x18 */ u64 spare4; /* 0x18 */
u64 next_jiffy_update_tb; /* TB value for next jiffy update 0x20 */ u64 next_jiffy_update_tb; /* TB value for next jiffy update 0x20 */
u32 lpEvent_count; /* lpEvents processed 0x28 */ u32 lpEvent_count; /* lpEvents processed 0x28 */
u32 prof_multiplier; /* 0x2C */ u32 prof_multiplier; /* 0x2C */
......
...@@ -130,6 +130,13 @@ extern void xmon(struct pt_regs *excp); ...@@ -130,6 +130,13 @@ extern void xmon(struct pt_regs *excp);
#define PAGE_BUG(page) do { BUG(); } while (0) #define PAGE_BUG(page) do { BUG(); } while (0)
/*
* XXX A bug in the current ppc64 compiler prevents an optimisation
* where a divide is replaced by a multiply by shifted inverse. For
* the moment use page->virtaul
*/
#define WANT_PAGE_VIRTUAL 1
/* Pure 2^n version of get_order */ /* Pure 2^n version of get_order */
extern __inline__ int get_order(unsigned long size) extern __inline__ int get_order(unsigned long size)
{ {
......
...@@ -12,116 +12,82 @@ ...@@ -12,116 +12,82 @@
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#define quicklists get_paca()
#define pgd_quicklist (quicklists->pgd_cache) static inline pgd_t *
#define pmd_quicklist (quicklists->pmd_cache) pgd_alloc(struct mm_struct *mm)
#define pte_quicklist (quicklists->pte_cache)
#define pgtable_cache_size (quicklists->pgtable_cache_sz)
static inline pgd_t*
pgd_alloc_one_fast (struct mm_struct *mm)
{
unsigned long *ret = pgd_quicklist;
if (ret != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
} else
ret = NULL;
return (pgd_t *) ret;
}
static inline pgd_t*
pgd_alloc (struct mm_struct *mm)
{ {
/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */ pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
pgd_t *pgd = pgd_alloc_one_fast(mm); if (pgd != NULL)
clear_page(pgd);
if (pgd == NULL) {
pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (pgd != NULL)
clear_page(pgd);
}
return pgd; return pgd;
} }
static inline void static inline void
pgd_free (pgd_t *pgd) pgd_free(pgd_t *pgd)
{ {
*(unsigned long *)pgd = (unsigned long) pgd_quicklist; free_page((unsigned long)pgd);
pgd_quicklist = (unsigned long *) pgd;
++pgtable_cache_size;
} }
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
static inline pmd_t* static inline pmd_t *
pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr) pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
unsigned long *ret = (unsigned long *)pmd_quicklist; int count = 0;
pmd_t *pmd;
if (ret != NULL) {
pmd_quicklist = (unsigned long *)(*ret); do {
ret[0] = 0; pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
--pgtable_cache_size; if (pmd)
} clear_page(pmd);
return (pmd_t *)ret; else {
} current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
static inline pmd_t* }
pmd_alloc_one (struct mm_struct *mm, unsigned long addr) } while (!pmd && (count++ < 10));
{
pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
if (pmd != NULL)
clear_page(pmd);
return pmd; return pmd;
} }
static inline void static inline void
pmd_free (pmd_t *pmd) pmd_free(pmd_t *pmd)
{ {
*(unsigned long *)pmd = (unsigned long) pmd_quicklist; free_page((unsigned long)pmd);
pmd_quicklist = (unsigned long *) pmd;
++pgtable_cache_size;
} }
#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE) #define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
static inline pte_t* static inline pte_t *
pte_alloc_one_fast (struct mm_struct *mm, unsigned long addr) pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
unsigned long *ret = (unsigned long *)pte_quicklist; int count = 0;
pte_t *pte;
if (ret != NULL) {
pte_quicklist = (unsigned long *)(*ret); do {
ret[0] = 0; pte = (pte_t *)__get_free_page(GFP_KERNEL);
--pgtable_cache_size; if (pte)
} clear_page(pte);
return (pte_t *)ret; else {
} current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
static inline pte_t*
pte_alloc_one (struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte != NULL)
clear_page(pte);
return pte; return pte;
} }
#define pte_alloc_one_kernel(mm, address) pte_alloc_one((mm), (address))
static inline void static inline void
pte_free (pte_t *pte) pte_free(pte_t *pte)
{ {
*(unsigned long *)pte = (unsigned long) pte_quicklist; free_page((unsigned long)pte);
pte_quicklist = (unsigned long *) pte;
++pgtable_cache_size;
} }
extern int do_check_pgt_cache(int, int); #define pte_free_kernel(pte) pte_free(pte)
#endif /* _PPC64_PGALLOC_H */ #endif /* _PPC64_PGALLOC_H */
...@@ -202,6 +202,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; ...@@ -202,6 +202,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define pmd_present(pmd) ((pmd_val(pmd)) != 0) #define pmd_present(pmd) ((pmd_val(pmd)) != 0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
#define pmd_page(pmd) (__bpn_to_ba(pmd_val(pmd))) #define pmd_page(pmd) (__bpn_to_ba(pmd_val(pmd)))
#define pmd_page_kernel(pmd) pmd_page(pmd)
#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp))) #define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp)))
#define pgd_none(pgd) (!pgd_val(pgd)) #define pgd_none(pgd) (!pgd_val(pgd))
#define pgd_bad(pgd) ((pgd_val(pgd)) == 0) #define pgd_bad(pgd) ((pgd_val(pgd)) == 0)
...@@ -222,9 +223,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; ...@@ -222,9 +223,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
#define pte_offset(dir,addr) \ #define pte_offset_kernel(dir,addr) \
((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) ((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_unmap(pte) do { } while(0)
#define pte_unmap_nested(pte) do { } while(0)
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */ /* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
...@@ -232,12 +238,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; ...@@ -232,12 +238,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
/* to find an entry in the ioremap page-table-directory */ /* to find an entry in the ioremap page-table-directory */
#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address)) #define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))
/*
* Given a pointer to an mem_map[] entry, return the kernel virtual
* address corresponding to that page.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment