Commit 76af7e63 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] Make page allocator aware of requests for zeroed memory

Thisintroduces __GFP_ZERO as an additional gfp_mask element to allow to
request zeroed pages from the page allocator:

 - Modifies the page allocator so that it zeroes memory if __GFP_ZERO is
   set

 - Replace all page zeroing after allocating pages by prior allocations with
   allocations using __GFP_ZERO
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 904e737b
...@@ -42,10 +42,9 @@ pgd_alloc(struct mm_struct *mm) ...@@ -42,10 +42,9 @@ pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *ret, *init; pgd_t *ret, *init;
ret = (pgd_t *)__get_free_page(GFP_KERNEL); ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
init = pgd_offset(&init_mm, 0UL); init = pgd_offset(&init_mm, 0UL);
if (ret) { if (ret) {
clear_page(ret);
#ifdef CONFIG_ALPHA_LARGE_VMALLOC #ifdef CONFIG_ALPHA_LARGE_VMALLOC
memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t)); (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
...@@ -63,9 +62,7 @@ pgd_alloc(struct mm_struct *mm) ...@@ -63,9 +62,7 @@ pgd_alloc(struct mm_struct *mm)
pte_t * pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte)
clear_page(pte);
return pte; return pte;
} }
......
...@@ -140,10 +140,7 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) ...@@ -140,10 +140,7 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte)
clear_page(pte);
return pte;
} }
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
...@@ -151,12 +148,10 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -151,12 +148,10 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
struct page *pte; struct page *pte;
#ifdef CONFIG_HIGHPTE #ifdef CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
#else #else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif #endif
if (pte)
clear_highpage(pte);
return pte; return pte;
} }
......
...@@ -85,8 +85,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) ...@@ -85,8 +85,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *ret; pgd_t *ret;
if ((ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER)) != NULL) ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
clear_pages(ret, PGDIR_ORDER);
return ret; return ret;
} }
...@@ -102,7 +101,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) ...@@ -102,7 +101,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
extern void *early_get_page(void); extern void *early_get_page(void);
if (mem_init_done) { if (mem_init_done) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte) { if (pte) {
struct page *ptepage = virt_to_page(pte); struct page *ptepage = virt_to_page(pte);
ptepage->mapping = (void *) mm; ptepage->mapping = (void *) mm;
...@@ -110,8 +109,6 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) ...@@ -110,8 +109,6 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
} }
} else } else
pte = (pte_t *)early_get_page(); pte = (pte_t *)early_get_page();
if (pte)
clear_page(pte);
return pte; return pte;
} }
......
...@@ -1687,13 +1687,12 @@ void __init mem_init(void) ...@@ -1687,13 +1687,12 @@ void __init mem_init(void)
* Set up the zero page, mark it reserved, so that page count * Set up the zero page, mark it reserved, so that page count
* is not manipulated when freeing the page from user ptes. * is not manipulated when freeing the page from user ptes.
*/ */
mem_map_zero = alloc_pages(GFP_KERNEL, 0); mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if (mem_map_zero == NULL) { if (mem_map_zero == NULL) {
prom_printf("paging_init: Cannot alloc zero page.\n"); prom_printf("paging_init: Cannot alloc zero page.\n");
prom_halt(); prom_halt();
} }
SetPageReserved(mem_map_zero); SetPageReserved(mem_map_zero);
clear_page(page_address(mem_map_zero));
codepages = (((unsigned long) _etext) - ((unsigned long) _start)); codepages = (((unsigned long) _etext) - ((unsigned long) _start));
codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
......
...@@ -327,9 +327,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) ...@@ -327,9 +327,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte)
clear_page(pte);
return pte; return pte;
} }
...@@ -337,9 +335,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -337,9 +335,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
if (pte)
clear_highpage(pte);
return pte; return pte;
} }
......
...@@ -135,12 +135,10 @@ static struct packet_data *pkt_alloc_packet_data(void) ...@@ -135,12 +135,10 @@ static struct packet_data *pkt_alloc_packet_data(void)
goto no_bio; goto no_bio;
for (i = 0; i < PAGES_PER_PACKET; i++) { for (i = 0; i < PAGES_PER_PACKET; i++) {
pkt->pages[i] = alloc_page(GFP_KERNEL); pkt->pages[i] = alloc_page(GFP_KERNEL|| __GFP_ZERO);
if (!pkt->pages[i]) if (!pkt->pages[i])
goto no_page; goto no_page;
} }
for (i = 0; i < PAGES_PER_PACKET; i++)
clear_page(page_address(pkt->pages[i]));
spin_lock_init(&pkt->lock); spin_lock_init(&pkt->lock);
......
...@@ -40,9 +40,7 @@ pgd_free(pgd_t *pgd) ...@@ -40,9 +40,7 @@ pgd_free(pgd_t *pgd)
static inline pmd_t * static inline pmd_t *
pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (ret)
clear_page(ret);
return ret; return ret;
} }
......
...@@ -50,9 +50,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) ...@@ -50,9 +50,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte) { if (pte) {
clear_page(pte);
clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
pte += PTRS_PER_PTE; pte += PTRS_PER_PTE;
} }
...@@ -65,10 +64,9 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) ...@@ -65,10 +64,9 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
if (pte) { if (pte) {
void *page = page_address(pte); void *page = page_address(pte);
clear_page(page);
clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
} }
......
...@@ -24,18 +24,14 @@ extern inline void pgd_free (pgd_t *pgd) ...@@ -24,18 +24,14 @@ extern inline void pgd_free (pgd_t *pgd)
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte)
clear_page(pte);
return pte; return pte;
} }
extern inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) extern inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
if (pte)
clear_page(page_address(pte));
return pte; return pte;
} }
......
...@@ -61,9 +61,7 @@ pgd_alloc (struct mm_struct *mm) ...@@ -61,9 +61,7 @@ pgd_alloc (struct mm_struct *mm)
pgd_t *pgd = pgd_alloc_one_fast(mm); pgd_t *pgd = pgd_alloc_one_fast(mm);
if (unlikely(pgd == NULL)) { if (unlikely(pgd == NULL)) {
pgd = (pgd_t *)__get_free_page(GFP_KERNEL); pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (likely(pgd != NULL))
clear_page(pgd);
} }
return pgd; return pgd;
} }
...@@ -106,10 +104,8 @@ pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr) ...@@ -106,10 +104,8 @@ pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
static inline pmd_t* static inline pmd_t*
pmd_alloc_one (struct mm_struct *mm, unsigned long addr) pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
{ {
pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (likely(pmd != NULL))
clear_page(pmd);
return pmd; return pmd;
} }
...@@ -140,20 +136,16 @@ pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte) ...@@ -140,20 +136,16 @@ pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
static inline struct page * static inline struct page *
pte_alloc_one (struct mm_struct *mm, unsigned long addr) pte_alloc_one (struct mm_struct *mm, unsigned long addr)
{ {
struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
if (likely(pte != NULL))
clear_page(page_address(pte));
return pte; return pte;
} }
static inline pte_t * static inline pte_t *
pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (likely(pte != NULL))
clear_page(pte);
return pte; return pte;
} }
......
...@@ -23,10 +23,7 @@ static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd, ...@@ -23,10 +23,7 @@ static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
*/ */
static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm) static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (pgd)
clear_page(pgd);
return pgd; return pgd;
} }
...@@ -39,10 +36,7 @@ static __inline__ void pgd_free(pgd_t *pgd) ...@@ -39,10 +36,7 @@ static __inline__ void pgd_free(pgd_t *pgd)
static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (pte)
clear_page(pte);
return pte; return pte;
} }
...@@ -50,10 +44,8 @@ static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, ...@@ -50,10 +44,8 @@ static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
static __inline__ struct page *pte_alloc_one(struct mm_struct *mm, static __inline__ struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *pte = alloc_page(GFP_KERNEL); struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (pte)
clear_page(page_address(pte));
return pte; return pte;
} }
......
...@@ -12,9 +12,8 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad ...@@ -12,9 +12,8 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte) { if (pte) {
clear_page(pte);
__flush_page_to_ram(pte); __flush_page_to_ram(pte);
flush_tlb_kernel_page(pte); flush_tlb_kernel_page(pte);
nocache_page(pte); nocache_page(pte);
...@@ -31,7 +30,7 @@ static inline void pte_free_kernel(pte_t *pte) ...@@ -31,7 +30,7 @@ static inline void pte_free_kernel(pte_t *pte)
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
pte_t *pte; pte_t *pte;
if(!page) if(!page)
...@@ -39,7 +38,6 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add ...@@ -39,7 +38,6 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
pte = kmap(page); pte = kmap(page);
if (pte) { if (pte) {
clear_page(pte);
__flush_page_to_ram(pte); __flush_page_to_ram(pte);
flush_tlb_kernel_page(pte); flush_tlb_kernel_page(pte);
nocache_page(pte); nocache_page(pte);
......
...@@ -56,9 +56,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, ...@@ -56,9 +56,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PTE_ORDER); pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
if (pte)
clear_page(pte);
return pte; return pte;
} }
......
...@@ -120,18 +120,14 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) ...@@ -120,18 +120,14 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline struct page * static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT); struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (likely(page != NULL))
clear_page(page_address(page));
return page; return page;
} }
static inline pte_t * static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (likely(pte != NULL))
clear_page(pte);
return pte; return pte;
} }
......
...@@ -44,9 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, ...@@ -44,9 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT); pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
if (pte)
clear_page(pte);
return pte; return pte;
} }
...@@ -56,9 +54,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, ...@@ -56,9 +54,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
if (pte)
clear_page(page_address(pte));
return pte; return pte;
} }
......
...@@ -112,9 +112,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, ...@@ -112,9 +112,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT|__GFP_ZERO);
if (pte)
clear_page(pte);
return pte; return pte;
} }
...@@ -123,9 +121,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add ...@@ -123,9 +121,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
if (pte)
clear_page(page_address(pte));
return pte; return pte;
} }
...@@ -150,9 +146,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add ...@@ -150,9 +146,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pmd_t *pmd; pmd_t *pmd;
pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pmd)
clear_page(pmd);
return pmd; return pmd;
} }
......
...@@ -73,10 +73,9 @@ static __inline__ pgd_t *get_pgd_fast(void) ...@@ -73,10 +73,9 @@ static __inline__ pgd_t *get_pgd_fast(void)
struct page *page; struct page *page;
preempt_enable(); preempt_enable();
page = alloc_page(GFP_KERNEL|__GFP_REPEAT); page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (page) { if (page) {
ret = (struct page *)page_address(page); ret = (struct page *)page_address(page);
clear_page(ret);
page->lru.prev = (void *) 2UL; page->lru.prev = (void *) 2UL;
preempt_disable(); preempt_disable();
......
...@@ -37,6 +37,7 @@ struct vm_area_struct; ...@@ -37,6 +37,7 @@ struct vm_area_struct;
#define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */ #define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */
#define __GFP_NO_GROW 0x2000 /* Slab internal usage */ #define __GFP_NO_GROW 0x2000 /* Slab internal usage */
#define __GFP_COMP 0x4000 /* Add compound page metadata */ #define __GFP_COMP 0x4000 /* Add compound page metadata */
#define __GFP_ZERO 0x8000 /* Return zeroed page on success */
#define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ #define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */
#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
...@@ -52,6 +53,7 @@ struct vm_area_struct; ...@@ -52,6 +53,7 @@ struct vm_area_struct;
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS) #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM) #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
#define GFP_HIGHZERO (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM | __GFP_ZERO)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */ platforms, used as appropriate on others */
......
...@@ -326,17 +326,15 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, ...@@ -326,17 +326,15 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
node = cpu_to_node(cpu); node = cpu_to_node(cpu);
per_cpu(cpu_profile_flip, cpu) = 0; per_cpu(cpu_profile_flip, cpu) = 0;
if (!per_cpu(cpu_profile_hits, cpu)[1]) { if (!per_cpu(cpu_profile_hits, cpu)[1]) {
page = alloc_pages_node(node, GFP_KERNEL, 0); page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
if (!page) if (!page)
return NOTIFY_BAD; return NOTIFY_BAD;
clear_highpage(page);
per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
} }
if (!per_cpu(cpu_profile_hits, cpu)[0]) { if (!per_cpu(cpu_profile_hits, cpu)[0]) {
page = alloc_pages_node(node, GFP_KERNEL, 0); page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
if (!page) if (!page)
goto out_free; goto out_free;
clear_highpage(page);
per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
} }
break; break;
...@@ -510,16 +508,14 @@ static int __init create_hash_tables(void) ...@@ -510,16 +508,14 @@ static int __init create_hash_tables(void)
int node = cpu_to_node(cpu); int node = cpu_to_node(cpu);
struct page *page; struct page *page;
page = alloc_pages_node(node, GFP_KERNEL, 0); page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
if (!page) if (!page)
goto out_cleanup; goto out_cleanup;
clear_highpage(page);
per_cpu(cpu_profile_hits, cpu)[1] per_cpu(cpu_profile_hits, cpu)[1]
= (struct profile_hit *)page_address(page); = (struct profile_hit *)page_address(page);
page = alloc_pages_node(node, GFP_KERNEL, 0); page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
if (!page) if (!page)
goto out_cleanup; goto out_cleanup;
clear_highpage(page);
per_cpu(cpu_profile_hits, cpu)[0] per_cpu(cpu_profile_hits, cpu)[0]
= (struct profile_hit *)page_address(page); = (struct profile_hit *)page_address(page);
} }
......
...@@ -1673,10 +1673,9 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1673,10 +1673,9 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
goto no_mem; goto no_mem;
page = alloc_page_vma(GFP_HIGHUSER, vma, addr); page = alloc_page_vma(GFP_HIGHZERO, vma, addr);
if (!page) if (!page)
goto no_mem; goto no_mem;
clear_user_highpage(page, addr);
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, addr); page_table = pte_offset_map(pmd, addr);
......
...@@ -558,6 +558,13 @@ void fastcall free_cold_page(struct page *page) ...@@ -558,6 +558,13 @@ void fastcall free_cold_page(struct page *page)
* we cheat by calling it from here, in the order > 0 path. Saves a branch * we cheat by calling it from here, in the order > 0 path. Saves a branch
* or two. * or two.
*/ */
static inline void prep_zero_page(struct page *page, int order)
{
int i;
for(i = 0; i < (1 << order); i++)
clear_highpage(page + i);
}
static struct page * static struct page *
buffered_rmqueue(struct zone *zone, int order, int gfp_flags) buffered_rmqueue(struct zone *zone, int order, int gfp_flags)
...@@ -593,6 +600,10 @@ buffered_rmqueue(struct zone *zone, int order, int gfp_flags) ...@@ -593,6 +600,10 @@ buffered_rmqueue(struct zone *zone, int order, int gfp_flags)
BUG_ON(bad_range(zone, page)); BUG_ON(bad_range(zone, page));
mod_page_state_zone(zone, pgalloc, 1 << order); mod_page_state_zone(zone, pgalloc, 1 << order);
prep_new_page(page, order); prep_new_page(page, order);
if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order);
if (order && (gfp_flags & __GFP_COMP)) if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order); prep_compound_page(page, order);
} }
...@@ -805,12 +816,9 @@ fastcall unsigned long get_zeroed_page(unsigned int gfp_mask) ...@@ -805,12 +816,9 @@ fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
*/ */
BUG_ON(gfp_mask & __GFP_HIGHMEM); BUG_ON(gfp_mask & __GFP_HIGHMEM);
page = alloc_pages(gfp_mask, 0); page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
if (page) { if (page)
void *address = page_address(page); return (unsigned long) page_address(page);
clear_page(address);
return (unsigned long) address;
}
return 0; return 0;
} }
......
...@@ -369,9 +369,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long ...@@ -369,9 +369,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
} }
spin_unlock(&info->lock); spin_unlock(&info->lock);
page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
if (page) { if (page) {
clear_highpage(page);
page->nr_swapped = 0; page->nr_swapped = 0;
} }
spin_lock(&info->lock); spin_lock(&info->lock);
...@@ -910,7 +909,7 @@ shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, ...@@ -910,7 +909,7 @@ shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info,
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
pvma.vm_pgoff = idx; pvma.vm_pgoff = idx;
pvma.vm_end = PAGE_SIZE; pvma.vm_end = PAGE_SIZE;
page = alloc_page_vma(gfp, &pvma, 0); page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
mpol_free(pvma.vm_policy); mpol_free(pvma.vm_policy);
return page; return page;
} }
...@@ -926,7 +925,7 @@ static inline struct page * ...@@ -926,7 +925,7 @@ static inline struct page *
shmem_alloc_page(unsigned long gfp,struct shmem_inode_info *info, shmem_alloc_page(unsigned long gfp,struct shmem_inode_info *info,
unsigned long idx) unsigned long idx)
{ {
return alloc_page(gfp); return alloc_page(gfp | __GFP_ZERO);
} }
#endif #endif
...@@ -1135,7 +1134,6 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1135,7 +1134,6 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
info->alloced++; info->alloced++;
spin_unlock(&info->lock); spin_unlock(&info->lock);
clear_highpage(filepage);
flush_dcache_page(filepage); flush_dcache_page(filepage);
SetPageUptodate(filepage); SetPageUptodate(filepage);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment