Commit 3ac8c845 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] remove __GFP_HIGHIO

Patch From: Hugh Dickins <hugh@veritas.com>

Recently noticed that __GFP_HIGHIO has played no real part since bounce
buffering was converted to mempool in 2.5.12: so this patch (over 2.5.58-mm1)
removes it and GFP_NOHIGHIO and SLAB_NOHIGHIO.

Also removes GFP_KSWAPD, in 2.5 same as GFP_KERNEL; leaves GFP_USER, which
can be a useful comment, even though in 2.5 same as GFP_KERNEL.

One anomaly needs comment: strictly, if there's no __GFP_HIGHIO, then
GFP_NOHIGHIO translates to GFP_NOFS; but GFP_NOFS looks wrong in the block
layer, and if you follow them down, you find that GFP_NOFS and GFP_NOIO
behave the same way in mempool_alloc - so I've used the less surprising
GFP_NOIO to replace GFP_NOHIGHIO.
parent 99c88bc2
......@@ -274,7 +274,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA;
} else
q->bounce_gfp = GFP_NOHIGHIO;
q->bounce_gfp = GFP_NOIO;
/*
* keep this for debugging for now...
......
......@@ -292,7 +292,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
extern int init_emergency_isa_pool(void);
inline void blk_queue_bounce(request_queue_t *q, struct bio **bio);
extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
#define rq_for_each_bio(bio, rq) \
if ((rq->bio)) \
......
......@@ -14,20 +14,17 @@
/* Action modifiers - doesn't change the zoning */
#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
#define __GFP_HIGH 0x20 /* Should access emergency pools? */
#define __GFP_IO 0x40 /* Can start low memory physical IO? */
#define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
#define __GFP_FS 0x100 /* Can call down to low-level FS? */
#define __GFP_COLD 0x200 /* Cache-cold page required */
#define __GFP_NOWARN 0x400 /* Suppress page allocation failure warning */
#define GFP_NOHIGHIO ( __GFP_WAIT | __GFP_IO)
#define GFP_NOIO ( __GFP_WAIT)
#define GFP_NOFS ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
#define __GFP_IO 0x40 /* Can start physical IO? */
#define __GFP_FS 0x80 /* Can call down to low-level FS? */
#define __GFP_COLD 0x100 /* Cache-cold page required */
#define __GFP_NOWARN 0x200 /* Suppress page allocation failure warning */
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
#define GFP_KERNEL ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
......
......@@ -17,13 +17,12 @@ typedef struct kmem_cache_s kmem_cache_t;
/* flags for kmem_cache_alloc() */
#define SLAB_NOFS GFP_NOFS
#define SLAB_NOIO GFP_NOIO
#define SLAB_NOHIGHIO GFP_NOHIGHIO
#define SLAB_ATOMIC GFP_ATOMIC
#define SLAB_USER GFP_USER
#define SLAB_KERNEL GFP_KERNEL
#define SLAB_DMA GFP_DMA
#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS|__GFP_COLD|__GFP_NOWARN)
#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|__GFP_COLD|__GFP_NOWARN)
#define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
/* flags to pass to kmem_cache_create().
......
......@@ -366,7 +366,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return 0;
}
void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp,
static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
mempool_t *pool)
{
struct page *page;
......@@ -387,7 +387,7 @@ void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp,
* irk, bounce it
*/
if (!bio)
bio = bio_alloc(bio_gfp, (*bio_orig)->bi_vcnt);
bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt);
to = bio->bi_io_vec + i;
......@@ -447,10 +447,9 @@ void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp,
*bio_orig = bio;
}
inline void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
{
mempool_t *pool;
int bio_gfp;
/*
* for non-isa bounce case, just check if the bounce pfn is equal
......@@ -460,20 +459,16 @@ inline void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
if (!(q->bounce_gfp & GFP_DMA)) {
if (q->bounce_pfn >= blk_max_pfn)
return;
bio_gfp = GFP_NOHIGHIO;
pool = page_pool;
} else {
BUG_ON(!isa_page_pool);
bio_gfp = GFP_NOIO;
pool = isa_page_pool;
}
/*
* slow path
*/
__blk_queue_bounce(q, bio_orig, bio_gfp, pool);
__blk_queue_bounce(q, bio_orig, pool);
}
#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM)
......
......@@ -894,9 +894,9 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps)
max_scan = to_reclaim * 2;
if (max_scan < SWAP_CLUSTER_MAX)
max_scan = SWAP_CLUSTER_MAX;
to_free -= shrink_zone(zone, max_scan, GFP_KSWAPD,
to_free -= shrink_zone(zone, max_scan, GFP_KERNEL,
to_reclaim, &nr_mapped, ps, priority);
shrink_slab(max_scan + nr_mapped, GFP_KSWAPD);
shrink_slab(max_scan + nr_mapped, GFP_KERNEL);
if (zone->all_unreclaimable)
continue;
if (zone->pages_scanned > zone->present_pages * 2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment