Commit 53f9fc93 authored by Al Viro's avatar Al Viro Committed by Linus Torvalds

[PATCH] gfp_t: remaining bits of arch/*

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 1ef64e67
...@@ -397,7 +397,7 @@ pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) ...@@ -397,7 +397,7 @@ pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
{ {
void *cpu_addr; void *cpu_addr;
long order = get_order(size); long order = get_order(size);
int gfp = GFP_ATOMIC; gfp_t gfp = GFP_ATOMIC;
try_again: try_again:
cpu_addr = (void *)__get_free_pages(gfp, order); cpu_addr = (void *)__get_free_pages(gfp, order);
......
...@@ -939,7 +939,7 @@ xpc_map_bte_errors(bte_result_t error) ...@@ -939,7 +939,7 @@ xpc_map_bte_errors(bte_result_t error)
static inline void * static inline void *
xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base) xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{ {
/* see if kmalloc will give us cachline aligned memory by default */ /* see if kmalloc will give us cachline aligned memory by default */
*base = kmalloc(size, flags); *base = kmalloc(size, flags);
......
...@@ -114,9 +114,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -114,9 +114,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
struct page *ptepage; struct page *ptepage;
#ifdef CONFIG_HIGHPTE #ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else #else
int flags = GFP_KERNEL | __GFP_REPEAT; gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
#endif #endif
ptepage = alloc_pages(flags, 0); ptepage = alloc_pages(flags, 0);
......
...@@ -49,7 +49,7 @@ IPPROTO_EGP, IPPROTO_PUP, IPPROTO_UDP, IPPROTO_IDP, IPPROTO_RAW, ...@@ -49,7 +49,7 @@ IPPROTO_EGP, IPPROTO_PUP, IPPROTO_UDP, IPPROTO_IDP, IPPROTO_RAW,
#else #else
extern void * mykmalloc(size_t s, int gfp); extern void * mykmalloc(size_t s, gfp_t gfp);
extern void mykfree(void *); extern void mykfree(void *);
#endif #endif
......
...@@ -39,7 +39,7 @@ static char * page = NULL ; ...@@ -39,7 +39,7 @@ static char * page = NULL ;
#else #else
void * mykmalloc(size_t s, int gfp) void * mykmalloc(size_t s, gfp_t gfp)
{ {
static char * page; static char * page;
static size_t free; static size_t free;
......
...@@ -252,7 +252,7 @@ void paging_init(void) ...@@ -252,7 +252,7 @@ void paging_init(void)
#endif #endif
} }
struct page *arch_validate(struct page *page, int mask, int order) struct page *arch_validate(struct page *page, gfp_t mask, int order)
{ {
unsigned long addr, zero = 0; unsigned long addr, zero = 0;
int i; int i;
......
...@@ -80,7 +80,7 @@ void free_stack(unsigned long stack, int order) ...@@ -80,7 +80,7 @@ void free_stack(unsigned long stack, int order)
unsigned long alloc_stack(int order, int atomic) unsigned long alloc_stack(int order, int atomic)
{ {
unsigned long page; unsigned long page;
int flags = GFP_KERNEL; gfp_t flags = GFP_KERNEL;
if (atomic) if (atomic)
flags = GFP_ATOMIC; flags = GFP_ATOMIC;
......
...@@ -115,7 +115,7 @@ extern unsigned long uml_physmem; ...@@ -115,7 +115,7 @@ extern unsigned long uml_physmem;
#define pfn_valid(pfn) ((pfn) < max_mapnr) #define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
extern struct page *arch_validate(struct page *page, int mask, int order); extern struct page *arch_validate(struct page *page, gfp_t mask, int order);
#define HAVE_ARCH_VALIDATE #define HAVE_ARCH_VALIDATE
extern void arch_free_page(struct page *page, int order); extern void arch_free_page(struct page *page, int order);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment