Commit aa5c158e authored by Dave Chinner's avatar Dave Chinner Committed by Ben Myers

xfs: kill XBF_DONTBLOCK

Just about all callers of xfs_buf_read() and xfs_buf_get() use XBF_DONTBLOCK.
This is used to make memory allocation use GFP_NOFS rather than GFP_KERNEL to
avoid recursion through memory reclaim back into the filesystem.

All the blocking get calls in growfs occur inside a transaction, even though
they are no part of the transaction, so all allocation will be GFP_NOFS due to
the task flag PF_TRANS being set. The blocking read calls occur during log
recovery, so they will probably be unaffected by converting to GFP_NOFS
allocations.

Hence make XBF_DONTBLOCK behaviour always occur for buffers and kill the flag.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMark Tinguely <tinguely@sgi.com>
Signed-off-by: default avatarBen Myers <bpm@sgi.com>
parent 7ca790a5
...@@ -2114,8 +2114,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) ...@@ -2114,8 +2114,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0);
XBF_DONT_BLOCK);
if (!bp) if (!bp)
return ENOMEM; return ENOMEM;
......
...@@ -56,11 +56,7 @@ static struct workqueue_struct *xfslogd_workqueue; ...@@ -56,11 +56,7 @@ static struct workqueue_struct *xfslogd_workqueue;
#endif #endif
#define xb_to_gfp(flags) \ #define xb_to_gfp(flags) \
((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \ ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
#define xb_to_km(flags) \
(((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
static inline int static inline int
...@@ -178,14 +174,14 @@ xfs_buf_alloc( ...@@ -178,14 +174,14 @@ xfs_buf_alloc(
{ {
struct xfs_buf *bp; struct xfs_buf *bp;
bp = kmem_zone_zalloc(xfs_buf_zone, xb_to_km(flags)); bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
if (unlikely(!bp)) if (unlikely(!bp))
return NULL; return NULL;
/* /*
* We don't want certain flags to appear in b_flags. * We don't want certain flags to appear in b_flags.
*/ */
flags &= ~(XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD); flags &= ~(XBF_MAPPED|XBF_READ_AHEAD);
atomic_set(&bp->b_hold, 1); atomic_set(&bp->b_hold, 1);
atomic_set(&bp->b_lru_ref, 1); atomic_set(&bp->b_lru_ref, 1);
...@@ -239,7 +235,7 @@ _xfs_buf_get_pages( ...@@ -239,7 +235,7 @@ _xfs_buf_get_pages(
bp->b_pages = bp->b_page_array; bp->b_pages = bp->b_page_array;
} else { } else {
bp->b_pages = kmem_alloc(sizeof(struct page *) * bp->b_pages = kmem_alloc(sizeof(struct page *) *
page_count, xb_to_km(flags)); page_count, KM_NOFS);
if (bp->b_pages == NULL) if (bp->b_pages == NULL)
return -ENOMEM; return -ENOMEM;
} }
...@@ -316,7 +312,7 @@ xfs_buf_allocate_memory( ...@@ -316,7 +312,7 @@ xfs_buf_allocate_memory(
*/ */
size = BBTOB(bp->b_length); size = BBTOB(bp->b_length);
if (size < PAGE_SIZE) { if (size < PAGE_SIZE) {
bp->b_addr = kmem_alloc(size, xb_to_km(flags)); bp->b_addr = kmem_alloc(size, KM_NOFS);
if (!bp->b_addr) { if (!bp->b_addr) {
/* low memory - use alloc_page loop instead */ /* low memory - use alloc_page loop instead */
goto use_alloc_page; goto use_alloc_page;
...@@ -659,7 +655,7 @@ xfs_buf_readahead( ...@@ -659,7 +655,7 @@ xfs_buf_readahead(
return; return;
xfs_buf_read(target, blkno, numblks, xfs_buf_read(target, blkno, numblks,
XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK); XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
} }
/* /*
...@@ -750,7 +746,7 @@ xfs_buf_associate_memory( ...@@ -750,7 +746,7 @@ xfs_buf_associate_memory(
bp->b_pages = NULL; bp->b_pages = NULL;
bp->b_addr = mem; bp->b_addr = mem;
rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK); rval = _xfs_buf_get_pages(bp, page_count, 0);
if (rval) if (rval)
return rval; return rval;
......
...@@ -53,7 +53,6 @@ typedef enum { ...@@ -53,7 +53,6 @@ typedef enum {
/* flags used only as arguments to access routines */ /* flags used only as arguments to access routines */
#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
#define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */
/* flags used only internally */ /* flags used only internally */
#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
...@@ -74,7 +73,6 @@ typedef unsigned int xfs_buf_flags_t; ...@@ -74,7 +73,6 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_FUA, "FUA" }, \ { XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \ { XBF_FLUSH, "FLUSH" }, \
{ XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
{ XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \ { _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \ { _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" } { _XBF_DELWRI_Q, "DELWRI_Q" }
......
...@@ -148,8 +148,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, ...@@ -148,8 +148,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
* Default to a normal get_buf() call if the tp is NULL. * Default to a normal get_buf() call if the tp is NULL.
*/ */
if (tp == NULL) if (tp == NULL)
return xfs_buf_get(target_dev, blkno, len, return xfs_buf_get(target_dev, blkno, len, flags);
flags | XBF_DONT_BLOCK);
/* /*
* If we find the buffer in the cache with this transaction * If we find the buffer in the cache with this transaction
...@@ -174,15 +173,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, ...@@ -174,15 +173,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
return (bp); return (bp);
} }
/* bp = xfs_buf_get(target_dev, blkno, len, flags);
* We always specify the XBF_DONT_BLOCK flag within a transaction
* so that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) { if (bp == NULL) {
return NULL; return NULL;
} }
...@@ -283,7 +274,7 @@ xfs_trans_read_buf( ...@@ -283,7 +274,7 @@ xfs_trans_read_buf(
* Default to a normal get_buf() call if the tp is NULL. * Default to a normal get_buf() call if the tp is NULL.
*/ */
if (tp == NULL) { if (tp == NULL) {
bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK); bp = xfs_buf_read(target, blkno, len, flags);
if (!bp) if (!bp)
return (flags & XBF_TRYLOCK) ? return (flags & XBF_TRYLOCK) ?
EAGAIN : XFS_ERROR(ENOMEM); EAGAIN : XFS_ERROR(ENOMEM);
...@@ -367,15 +358,7 @@ xfs_trans_read_buf( ...@@ -367,15 +358,7 @@ xfs_trans_read_buf(
return 0; return 0;
} }
/* bp = xfs_buf_read(target, blkno, len, flags);
* We always specify the XBF_DONT_BLOCK flag within a transaction
* so that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) { if (bp == NULL) {
*bpp = NULL; *bpp = NULL;
return (flags & XBF_TRYLOCK) ? return (flags & XBF_TRYLOCK) ?
......
...@@ -82,7 +82,7 @@ xfs_readlink_bmap( ...@@ -82,7 +82,7 @@ xfs_readlink_bmap(
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt),
XBF_MAPPED | XBF_DONT_BLOCK); XBF_MAPPED);
if (!bp) if (!bp)
return XFS_ERROR(ENOMEM); return XFS_ERROR(ENOMEM);
error = bp->b_error; error = bp->b_error;
...@@ -1966,7 +1966,7 @@ xfs_zero_remaining_bytes( ...@@ -1966,7 +1966,7 @@ xfs_zero_remaining_bytes(
bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ? bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
mp->m_rtdev_targp : mp->m_ddev_targp, mp->m_rtdev_targp : mp->m_ddev_targp,
BTOBB(mp->m_sb.sb_blocksize), XBF_DONT_BLOCK); BTOBB(mp->m_sb.sb_blocksize), 0);
if (!bp) if (!bp)
return XFS_ERROR(ENOMEM); return XFS_ERROR(ENOMEM);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment