Commit 68b5a30f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] use __GFP_REPEAT in pte_alloc_one()

Remove all the open-coded retry loops in various architectures, use
__GFP_REPEAT.

It could be that at some time in the future we change __GFP_REPEAT to give up
after ten seconds or so, so all the checks for failed allocations are
retained.
parent 8db50e8b
...@@ -66,19 +66,9 @@ pgd_alloc(struct mm_struct *mm) ...@@ -66,19 +66,9 @@ pgd_alloc(struct mm_struct *mm)
pte_t * pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte; pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
long timeout = 10;
retry:
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
else if (--timeout >= 0) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
goto retry;
}
return pte; return pte;
} }
......
...@@ -131,39 +131,23 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) ...@@ -131,39 +131,23 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
int count = 0; pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
pte_t *pte;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte; return pte;
} }
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
int count = 0;
struct page *pte; struct page *pte;
do {
#if CONFIG_HIGHPTE #if CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0); pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
#else #else
pte = alloc_pages(GFP_KERNEL, 0); pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
#endif #endif
if (pte) if (pte)
clear_highpage(pte); clear_highpage(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte; return pte;
} }
......
...@@ -76,15 +76,11 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) ...@@ -76,15 +76,11 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
extern void *early_get_page(void); extern void *early_get_page(void);
int timeout = 0; int timeout = 0;
if (mem_init_done) { if (mem_init_done)
while ((pte = (pte_t *) __get_free_page(GFP_KERNEL)) == NULL pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
&& ++timeout < 10) { else
set_current_state(TASK_UNINTERRUPTIBLE); pte = (pte_t *)early_get_page();
schedule_timeout(HZ); if (pte)
}
} else
pte = (pte_t *) early_get_page();
if (pte != NULL)
clear_page(pte); clear_page(pte);
return pte; return pte;
} }
...@@ -92,19 +88,15 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) ...@@ -92,19 +88,15 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *pte; struct page *pte;
int timeout = 0;
#ifdef CONFIG_HIGHPTE #ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM; int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else #else
int flags = GFP_KERNEL; int flags = GFP_KERNEL | __GFP_REPEAT;
#endif #endif
while ((pte = alloc_pages(flags, 0)) == NULL) { pte = alloc_pages(flags, 0);
if (++timeout >= 10) if (pte)
return NULL;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
clear_highpage(pte); clear_highpage(pte);
return pte; return pte;
} }
......
...@@ -1901,7 +1901,7 @@ static pte_t *sun4c_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add ...@@ -1901,7 +1901,7 @@ static pte_t *sun4c_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
if ((pte = sun4c_pte_alloc_one_fast(mm, address)) != NULL) if ((pte = sun4c_pte_alloc_one_fast(mm, address)) != NULL)
return pte; return pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) if (pte)
memset(pte, 0, PAGE_SIZE); memset(pte, 0, PAGE_SIZE);
return pte; return pte;
......
...@@ -810,35 +810,21 @@ void pgd_free(pgd_t *pgd) ...@@ -810,35 +810,21 @@ void pgd_free(pgd_t *pgd)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
int count = 0;
pte_t *pte; pte_t *pte;
do { pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte; return pte;
} }
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
int count = 0;
struct page *pte; struct page *pte;
do { pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
pte = alloc_pages(GFP_KERNEL, 0);
if (pte) if (pte)
clear_highpage(pte); clear_highpage(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte; return pte;
} }
......
...@@ -27,17 +27,9 @@ ...@@ -27,17 +27,9 @@
static inline pte_t * static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{ {
int count = 0;
pte_t *pte; pte_t *pte;
do { pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (!pte) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
if (pte) { if (pte) {
clear_page(pte); clear_page(pte);
clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
...@@ -51,16 +43,8 @@ static inline struct page * ...@@ -51,16 +43,8 @@ static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
struct page *pte; struct page *pte;
int count = 0;
do {
pte = alloc_pages(GFP_KERNEL, 0);
if (!pte) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (pte) { if (pte) {
void *page = page_address(pte); void *page = page_address(pte);
clear_page(page); clear_page(page);
......
...@@ -62,7 +62,7 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -62,7 +62,7 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
return pte; return pte;
......
...@@ -125,7 +125,7 @@ pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte) ...@@ -125,7 +125,7 @@ pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
static inline struct page * static inline struct page *
pte_alloc_one (struct mm_struct *mm, unsigned long addr) pte_alloc_one (struct mm_struct *mm, unsigned long addr)
{ {
struct page *pte = alloc_pages(GFP_KERNEL, 0); struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (likely(pte != NULL)) if (likely(pte != NULL))
clear_page(page_address(pte)); clear_page(page_address(pte));
...@@ -135,7 +135,7 @@ pte_alloc_one (struct mm_struct *mm, unsigned long addr) ...@@ -135,7 +135,7 @@ pte_alloc_one (struct mm_struct *mm, unsigned long addr)
static inline pte_t * static inline pte_t *
pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
{ {
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(pte != NULL)) if (likely(pte != NULL))
clear_page(pte); clear_page(pte);
......
...@@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad ...@@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) { if (pte) {
clear_page(pte); clear_page(pte);
__flush_page_to_ram(pte); __flush_page_to_ram(pte);
...@@ -30,7 +30,7 @@ static inline void pte_free_kernel(pte_t *pte) ...@@ -30,7 +30,7 @@ static inline void pte_free_kernel(pte_t *pte)
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *page = alloc_pages(GFP_KERNEL, 0); struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
pte_t *pte; pte_t *pte;
if(!page) if(!page)
......
...@@ -39,7 +39,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page) ...@@ -39,7 +39,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
unsigned long page = __get_free_page(GFP_KERNEL); unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (!page) if (!page)
return NULL; return NULL;
...@@ -51,7 +51,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, ...@@ -51,7 +51,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
static inline struct page *pte_alloc_one(struct mm_struct *mm, static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *page = alloc_pages(GFP_KERNEL, 0); struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (page == NULL) if (page == NULL)
return NULL; return NULL;
......
...@@ -132,7 +132,7 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -132,7 +132,7 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL); pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
return pte; return pte;
......
...@@ -93,7 +93,7 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -93,7 +93,7 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
return pte; return pte;
......
...@@ -73,7 +73,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) ...@@ -73,7 +73,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline struct page * static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *page = alloc_page(GFP_KERNEL); struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(page != NULL)) if (likely(page != NULL))
clear_page(page_address(page)); clear_page(page_address(page));
return page; return page;
...@@ -82,7 +82,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -82,7 +82,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pte_t * static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{ {
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(pte != NULL)) if (likely(pte != NULL))
clear_page(pte); clear_page(pte);
return pte; return pte;
......
...@@ -62,19 +62,11 @@ pmd_free(pmd_t *pmd) ...@@ -62,19 +62,11 @@ pmd_free(pmd_t *pmd)
static inline pte_t * static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{ {
int count = 0;
pte_t *pte; pte_t *pte;
do { pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte; return pte;
} }
......
...@@ -120,20 +120,13 @@ static inline pte_t * ...@@ -120,20 +120,13 @@ static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
{ {
pte_t *pte; pte_t *pte;
int count;
int i; int i;
count = 0; pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte != NULL) { if (pte != NULL) {
for (i=0; i < PTRS_PER_PTE; i++) for (i=0; i < PTRS_PER_PTE; i++)
pte_clear(pte+i); pte_clear(pte+i);
} else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
} }
} while (!pte && (count++ < 10));
return pte; return pte;
} }
......
...@@ -35,7 +35,7 @@ static inline void pgd_free(pgd_t *pgd) ...@@ -35,7 +35,7 @@ static inline void pgd_free(pgd_t *pgd)
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
return pte; return pte;
......
...@@ -48,12 +48,12 @@ static inline void pgd_free (pgd_t *pgd) ...@@ -48,12 +48,12 @@ static inline void pgd_free (pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
return (pte_t *) get_zeroed_page(GFP_KERNEL); return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
} }
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
void *p = (void *)get_zeroed_page(GFP_KERNEL); void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
if (!p) if (!p)
return NULL; return NULL;
return virt_to_page(p); return virt_to_page(p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment