Commit e68ff9cd authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  xfs: replace xfs_buf_geterror() with bp->b_error
  xfs: Check the return value of xfs_buf_read() for NULL
  "xfs: fix error handling for synchronous writes" revisited
  xfs: set cursor in xfs_ail_splice() even when AIL was empty
  xfs: Remove the macro XFS_BUFTARG_NAME
  xfs: Remove the macro XFS_BUF_TARGET
  xfs: Remove the macro XFS_BUF_SET_TARGET
  Replace the macro XFS_BUF_ISPINNED with helper xfs_buf_ispinned
  xfs: Remove the macro XFS_BUF_SET_PTR
  xfs: Remove the macro XFS_BUF_PTR
  xfs: Remove macro XFS_BUF_SET_START
  xfs: Remove macro XFS_BUF_HOLD
  xfs: Remove macro XFS_BUF_BUSY and family
  xfs: Remove the macro XFS_BUF_ERROR and family
  xfs: Remove the macro XFS_BUF_BFLAGS
parents c44efbaa e5702805
......@@ -596,7 +596,7 @@ _xfs_buf_read(
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
status = xfs_buf_iorequest(bp);
if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
if (status || bp->b_error || (flags & XBF_ASYNC))
return status;
return xfs_buf_iowait(bp);
}
......@@ -679,7 +679,6 @@ xfs_buf_read_uncached(
/* set up the buffer for a read IO */
XFS_BUF_SET_ADDR(bp, daddr);
XFS_BUF_READ(bp);
XFS_BUF_BUSY(bp);
xfsbdstrat(mp, bp);
error = xfs_buf_iowait(bp);
......@@ -1069,7 +1068,7 @@ xfs_bioerror(
/*
* No need to wait until the buffer is unpinned, we aren't flushing it.
*/
XFS_BUF_ERROR(bp, EIO);
xfs_buf_ioerror(bp, EIO);
/*
* We're calling xfs_buf_ioend, so delete XBF_DONE flag.
......@@ -1094,7 +1093,7 @@ STATIC int
xfs_bioerror_relse(
struct xfs_buf *bp)
{
int64_t fl = XFS_BUF_BFLAGS(bp);
int64_t fl = bp->b_flags;
/*
* No need to wait until the buffer is unpinned.
* We aren't flushing it.
......@@ -1115,7 +1114,7 @@ xfs_bioerror_relse(
* There's no reason to mark error for
* ASYNC buffers.
*/
XFS_BUF_ERROR(bp, EIO);
xfs_buf_ioerror(bp, EIO);
XFS_BUF_FINISH_IOWAIT(bp);
} else {
xfs_buf_relse(bp);
......@@ -1324,7 +1323,7 @@ xfs_buf_offset(
struct page *page;
if (bp->b_flags & XBF_MAPPED)
return XFS_BUF_PTR(bp) + offset;
return bp->b_addr + offset;
offset += bp->b_offset;
page = bp->b_pages[offset >> PAGE_SHIFT];
......@@ -1484,7 +1483,7 @@ xfs_setsize_buftarg_flags(
if (set_blocksize(btp->bt_bdev, sectorsize)) {
xfs_warn(btp->bt_mount,
"Cannot set_blocksize to %u on device %s\n",
sectorsize, XFS_BUFTARG_NAME(btp));
sectorsize, xfs_buf_target_name(btp));
return EINVAL;
}
......@@ -1681,7 +1680,7 @@ xfs_buf_delwri_split(
list_for_each_entry_safe(bp, n, dwq, b_list) {
ASSERT(bp->b_flags & XBF_DELWRI);
if (!XFS_BUF_ISPINNED(bp) && xfs_buf_trylock(bp)) {
if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) {
if (!force &&
time_before(jiffies, bp->b_queuetime + age)) {
xfs_buf_unlock(bp);
......
......@@ -228,11 +228,15 @@ extern void xfs_buf_delwri_promote(xfs_buf_t *);
extern int xfs_buf_init(void);
extern void xfs_buf_terminate(void);
#define xfs_buf_target_name(target) \
({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
static inline const char *
xfs_buf_target_name(struct xfs_buftarg *target)
{
static char __b[BDEVNAME_SIZE];
return bdevname(target->bt_bdev, __b);
}
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
#define XFS_BUF_ZEROFLAGS(bp) \
((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \
XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
......@@ -251,23 +255,14 @@ void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
#define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no)
#define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp)
#define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0)
#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
#define XFS_BUF_BUSY(bp) do { } while (0)
#define XFS_BUF_UNBUSY(bp) do { } while (0)
#define XFS_BUF_ISBUSY(bp) (1)
#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
......@@ -276,10 +271,6 @@ void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
#define XFS_BUF_SET_START(bp) do { } while (0)
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
#define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt)
#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
......@@ -299,14 +290,13 @@ xfs_buf_set_ref(
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) xfs_buf_set_ref(bp, ref)
#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count))
static inline int xfs_buf_ispinned(struct xfs_buf *bp)
{
return atomic_read(&bp->b_pin_count);
}
#define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait);
#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
#define XFS_BUF_TARGET(bp) ((bp)->b_target)
#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target)
static inline void xfs_buf_relse(xfs_buf_t *bp)
{
xfs_buf_unlock(bp);
......
......@@ -332,7 +332,7 @@ xfs_sync_fsdata(
* between there and here.
*/
bp = xfs_getsb(mp, 0);
if (XFS_BUF_ISPINNED(bp))
if (xfs_buf_ispinned(bp))
xfs_log_force(mp, 0);
return xfs_bwrite(mp, bp);
......
......@@ -318,10 +318,9 @@ xfs_qm_init_dquot_blk(
int curid, i;
ASSERT(tp);
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(xfs_buf_islocked(bp));
d = (xfs_dqblk_t *)XFS_BUF_PTR(bp);
d = bp->b_addr;
/*
* ID of the first dquot in the block - id's are zero based.
......@@ -403,7 +402,7 @@ xfs_qm_dqalloc(
dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen,
0);
if (!bp || (error = XFS_BUF_GETERROR(bp)))
if (!bp || (error = xfs_buf_geterror(bp)))
goto error1;
/*
* Make a chunk of dquots out of this buffer and log
......@@ -534,13 +533,12 @@ xfs_qm_dqtobp(
return XFS_ERROR(error);
}
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(xfs_buf_islocked(bp));
/*
* calculate the location of the dquot inside the buffer.
*/
ddq = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset);
ddq = bp->b_addr + dqp->q_bufoffset;
/*
* A simple sanity check in case we got a corrupted dquot...
......@@ -553,7 +551,6 @@ xfs_qm_dqtobp(
xfs_trans_brelse(tp, bp);
return XFS_ERROR(EIO);
}
XFS_BUF_BUSY(bp); /* We dirtied this */
}
*O_bpp = bp;
......@@ -622,7 +619,6 @@ xfs_qm_dqread(
* this particular dquot was repaired. We still aren't afraid to
* brelse it because we have the changes incore.
*/
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(xfs_buf_islocked(bp));
xfs_trans_brelse(tp, bp);
......@@ -1204,7 +1200,7 @@ xfs_qm_dqflush(
/*
* Calculate the location of the dquot inside the buffer.
*/
ddqp = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset);
ddqp = bp->b_addr + dqp->q_bufoffset;
/*
* A simple sanity check in case we got a corrupted dquot..
......@@ -1240,7 +1236,7 @@ xfs_qm_dqflush(
* If the buffer is pinned then push on the log so we won't
* get stuck waiting in the write for too long.
*/
if (XFS_BUF_ISPINNED(bp)) {
if (xfs_buf_ispinned(bp)) {
trace_xfs_dqflush_force(dqp);
xfs_log_force(mp, 0);
}
......@@ -1447,7 +1443,7 @@ xfs_qm_dqflock_pushbuf_wait(
goto out_lock;
if (XFS_BUF_ISDELAYWRITE(bp)) {
if (XFS_BUF_ISPINNED(bp))
if (xfs_buf_ispinned(bp))
xfs_log_force(mp, 0);
xfs_buf_delwri_promote(bp);
wake_up_process(bp->b_target->bt_task);
......
......@@ -1240,7 +1240,7 @@ xfs_qm_reset_dqcounts(
do_div(j, sizeof(xfs_dqblk_t));
ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
#endif
ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
ddq = bp->b_addr;
for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
/*
* Do a sanity check, and if needed, repair the dqblk. Don't
......
......@@ -103,7 +103,7 @@ typedef struct xfs_agf {
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)XFS_BUF_PTR(bp))
#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr))
extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
......@@ -156,7 +156,7 @@ typedef struct xfs_agi {
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp))
#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)XFS_BUF_PTR(bp))
#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr))
extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, struct xfs_buf **bpp);
......@@ -168,7 +168,7 @@ extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log))
#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp))
#define XFS_AGFL_SIZE(mp) ((mp)->m_sb.sb_sectsize / sizeof(xfs_agblock_t))
#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)XFS_BUF_PTR(bp))
#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr))
typedef struct xfs_agfl {
__be32 agfl_bno[1]; /* actually XFS_AGFL_SIZE(mp) */
......
......@@ -451,8 +451,7 @@ xfs_alloc_read_agfl(
XFS_FSS_TO_BB(mp, 1), 0, &bp);
if (error)
return error;
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
ASSERT(!xfs_buf_geterror(bp));
XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF);
*bpp = bp;
return 0;
......@@ -2116,7 +2115,7 @@ xfs_read_agf(
if (!*bpp)
return 0;
ASSERT(!XFS_BUF_GETERROR(*bpp));
ASSERT(!(*bpp)->b_error);
agf = XFS_BUF_TO_AGF(*bpp);
/*
......@@ -2168,7 +2167,7 @@ xfs_alloc_read_agf(
return error;
if (!*bpp)
return 0;
ASSERT(!XFS_BUF_GETERROR(*bpp));
ASSERT(!(*bpp)->b_error);
agf = XFS_BUF_TO_AGF(*bpp);
pag = xfs_perag_get(mp, agno);
......
......@@ -2121,8 +2121,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
XBF_LOCK | XBF_DONT_BLOCK);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
ASSERT(!xfs_buf_geterror(bp));
tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
XFS_BUF_SIZE(bp);
......
......@@ -3383,8 +3383,7 @@ xfs_bmap_local_to_extents(
ASSERT(args.len == 1);
*firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
ifp->if_bytes);
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
......
......@@ -275,8 +275,7 @@ xfs_btree_dup_cursor(
return error;
}
new->bc_bufs[i] = bp;
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
ASSERT(!xfs_buf_geterror(bp));
} else
new->bc_bufs[i] = NULL;
}
......@@ -467,8 +466,7 @@ xfs_btree_get_bufl(
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
ASSERT(!xfs_buf_geterror(bp));
return bp;
}
......@@ -491,8 +489,7 @@ xfs_btree_get_bufs(
ASSERT(agbno != NULLAGBLOCK);
d = XFS_AGB_TO_DADDR(mp, agno, agbno);
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
ASSERT(!xfs_buf_geterror(bp));
return bp;
}
......@@ -632,7 +629,7 @@ xfs_btree_read_bufl(
mp->m_bsize, lock, &bp))) {
return error;
}
ASSERT(!bp || !XFS_BUF_GETERROR(bp));
ASSERT(!xfs_buf_geterror(bp));
if (bp)
XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
*bpp = bp;
......@@ -973,8 +970,7 @@ xfs_btree_get_buf_block(
*bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
mp->m_bsize, flags);
ASSERT(*bpp);
ASSERT(!XFS_BUF_GETERROR(*bpp));
ASSERT(!xfs_buf_geterror(*bpp));
*block = XFS_BUF_TO_BLOCK(*bpp);
return 0;
......@@ -1006,8 +1002,7 @@ xfs_btree_read_buf_block(
if (error)
return error;
ASSERT(*bpp != NULL);
ASSERT(!XFS_BUF_GETERROR(*bpp));
ASSERT(!xfs_buf_geterror(*bpp));
xfs_btree_set_refs(cur, *bpp);
*block = XFS_BUF_TO_BLOCK(*bpp);
......
......@@ -262,7 +262,7 @@ typedef struct xfs_btree_cur
/*
* Convert from buffer to btree block header.
*/
#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)XFS_BUF_PTR(bp))
#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr))
/*
......
......@@ -124,9 +124,9 @@ xfs_buf_item_log_check(
bp = bip->bli_buf;
ASSERT(XFS_BUF_COUNT(bp) > 0);
ASSERT(XFS_BUF_PTR(bp) != NULL);
ASSERT(bp->b_addr != NULL);
orig = bip->bli_orig;
buffer = XFS_BUF_PTR(bp);
buffer = bp->b_addr;
for (x = 0; x < XFS_BUF_COUNT(bp); x++) {
if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
xfs_emerg(bp->b_mount,
......@@ -371,7 +371,6 @@ xfs_buf_item_pin(
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
ASSERT(XFS_BUF_ISBUSY(bip->bli_buf));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
......@@ -479,13 +478,13 @@ xfs_buf_item_trylock(
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
if (XFS_BUF_ISPINNED(bp))
if (xfs_buf_ispinned(bp))
return XFS_ITEM_PINNED;
if (!xfs_buf_trylock(bp))
return XFS_ITEM_LOCKED;
/* take a reference to the buffer. */
XFS_BUF_HOLD(bp);
xfs_buf_hold(bp);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
trace_xfs_buf_item_trylock(bip);
......@@ -726,7 +725,7 @@ xfs_buf_item_init(
* to have logged.
*/
bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP);
memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp));
memcpy(bip->bli_orig, bp->b_addr, XFS_BUF_COUNT(bp));
bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP);
#endif
......@@ -895,7 +894,6 @@ xfs_buf_attach_iodone(
{
xfs_log_item_t *head_lip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(xfs_buf_islocked(bp));
lip->li_cb = cb;
......@@ -960,7 +958,7 @@ xfs_buf_iodone_callbacks(
static ulong lasttime;
static xfs_buftarg_t *lasttarg;
if (likely(!XFS_BUF_GETERROR(bp)))
if (likely(!xfs_buf_geterror(bp)))
goto do_callbacks;
/*
......@@ -973,14 +971,14 @@ xfs_buf_iodone_callbacks(
goto do_callbacks;
}
if (XFS_BUF_TARGET(bp) != lasttarg ||
if (bp->b_target != lasttarg ||
time_after(jiffies, (lasttime + 5*HZ))) {
lasttime = jiffies;
xfs_alert(mp, "Device %s: metadata write error block 0x%llx",
XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
xfs_buf_target_name(bp->b_target),
(__uint64_t)XFS_BUF_ADDR(bp));
}
lasttarg = XFS_BUF_TARGET(bp);
lasttarg = bp->b_target;
/*
* If the write was asynchronous then no one will be looking for the
......@@ -991,12 +989,11 @@ xfs_buf_iodone_callbacks(
* around.
*/
if (XFS_BUF_ISASYNC(bp)) {
XFS_BUF_ERROR(bp, 0); /* errno of 0 unsets the flag */
xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
if (!XFS_BUF_ISSTALE(bp)) {
XFS_BUF_DELAYWRITE(bp);
XFS_BUF_DONE(bp);
XFS_BUF_SET_START(bp);
}
ASSERT(bp->b_iodone != NULL);
trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
......@@ -1013,7 +1010,6 @@ xfs_buf_iodone_callbacks(
XFS_BUF_UNDELAYWRITE(bp);
trace_xfs_buf_error_relse(bp, _RET_IP_);
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
do_callbacks:
xfs_buf_do_callbacks(bp);
......
......@@ -2050,7 +2050,7 @@ xfs_da_do_buf(
case 0:
bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
mappedbno, nmapped, 0);
error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO);
error = bp ? bp->b_error : XFS_ERROR(EIO);
break;
case 1:
case 2:
......@@ -2268,7 +2268,7 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
dabuf->nbuf = 1;
bp = bps[0];
dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp));
dabuf->data = XFS_BUF_PTR(bp);
dabuf->data = bp->b_addr;
dabuf->bps[0] = bp;
} else {
dabuf->nbuf = nbuf;
......@@ -2279,7 +2279,7 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
bp = bps[i];
memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp),
memcpy((char *)dabuf->data + off, bp->b_addr,
XFS_BUF_COUNT(bp));
}
}
......@@ -2302,7 +2302,7 @@ xfs_da_buf_clean(xfs_dabuf_t *dabuf)
for (i = off = 0; i < dabuf->nbuf;
i++, off += XFS_BUF_COUNT(bp)) {
bp = dabuf->bps[i];
memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off,
memcpy(bp->b_addr, dabuf->data + off,
XFS_BUF_COUNT(bp));
}
}
......@@ -2340,7 +2340,7 @@ xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
if (dabuf->nbuf == 1) {
ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0]));
ASSERT(dabuf->data == dabuf->bps[0]->b_addr);
xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
return;
}
......
......@@ -148,7 +148,7 @@ typedef enum xfs_dinode_fmt {
be32_to_cpu((dip)->di_nextents) : \
be16_to_cpu((dip)->di_anextents))
#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp))
#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)((bp)->b_addr))
/*
* For block and character special files the 32bit dev_t is stored at the
......
......@@ -202,8 +202,7 @@ xfs_ialloc_inode_init(
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
mp->m_bsize * blks_per_cluster,
XBF_LOCK);
ASSERT(fbuf);
ASSERT(!XFS_BUF_GETERROR(fbuf));
ASSERT(!xfs_buf_geterror(fbuf));
/*
* Initialize all inodes in this buffer and then log them.
......@@ -1486,7 +1485,7 @@ xfs_read_agi(
if (error)
return error;
ASSERT(*bpp && !XFS_BUF_GETERROR(*bpp));
ASSERT(!xfs_buf_geterror(*bpp));
agi = XFS_BUF_TO_AGI(*bpp);
/*
......
......@@ -2473,7 +2473,7 @@ xfs_iflush_cluster(
if (bp->b_iodone) {
XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp);
XFS_BUF_ERROR(bp,EIO);
xfs_buf_ioerror(bp, EIO);
xfs_buf_ioend(bp, 0);
} else {
XFS_BUF_STALE(bp);
......@@ -2585,7 +2585,7 @@ xfs_iflush(
* If the buffer is pinned then push on the log now so we won't
* get stuck waiting in the write for too long.
*/
if (XFS_BUF_ISPINNED(bp))
if (xfs_buf_ispinned(bp))
xfs_log_force(mp, 0);
/*
......
......@@ -878,7 +878,7 @@ xlog_iodone(xfs_buf_t *bp)
/*
* Race to shutdown the filesystem if we see an error.
*/
if (XFS_TEST_ERROR((XFS_BUF_GETERROR(bp)), l->l_mp,
if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp,
XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp));
XFS_BUF_STALE(bp);
......@@ -1051,7 +1051,6 @@ xlog_alloc_log(xfs_mount_t *mp,
if (!bp)
goto out_free_log;
bp->b_iodone = xlog_iodone;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(xfs_buf_islocked(bp));
log->l_xbuf = bp;
......@@ -1108,7 +1107,6 @@ xlog_alloc_log(xfs_mount_t *mp,
iclog->ic_callback_tail = &(iclog->ic_callback);
iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
ASSERT(xfs_buf_islocked(iclog->ic_bp));
init_waitqueue_head(&iclog->ic_force_wait);
init_waitqueue_head(&iclog->ic_write_wait);
......@@ -1248,7 +1246,7 @@ xlog_bdstrat(
struct xlog_in_core *iclog = bp->b_fspriv;
if (iclog->ic_state & XLOG_STATE_IOERROR) {
XFS_BUF_ERROR(bp, EIO);
xfs_buf_ioerror(bp, EIO);
XFS_BUF_STALE(bp);
xfs_buf_ioend(bp, 0);
/*
......@@ -1355,7 +1353,6 @@ xlog_sync(xlog_t *log,
XFS_BUF_SET_COUNT(bp, count);
bp->b_fspriv = iclog;
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_SYNCIO;
......@@ -1398,16 +1395,15 @@ xlog_sync(xlog_t *log,
if (split) {
bp = iclog->ic_log->l_xbuf;
XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */
XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+
(__psint_t)count), split);
xfs_buf_associate_memory(bp,
(char *)&iclog->ic_header + count, split);
bp->b_fspriv = iclog;
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_SYNCIO;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
bp->b_flags |= XBF_FUA;
dptr = XFS_BUF_PTR(bp);
dptr = bp->b_addr;
/*
* Bump the cycle numbers at the start of each block
* since this part of the buffer is at the start of
......
......@@ -147,7 +147,7 @@ xlog_align(
xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp));
return XFS_BUF_PTR(bp) + BBTOB(offset);
return bp->b_addr + BBTOB(offset);
}
......@@ -178,9 +178,7 @@ xlog_bread_noalign(
XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
XFS_BUF_READ(bp);
XFS_BUF_BUSY(bp);
XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
xfsbdstrat(log->l_mp, bp);
error = xfs_buf_iowait(bp);
......@@ -220,18 +218,18 @@ xlog_bread_offset(
xfs_buf_t *bp,
xfs_caddr_t offset)
{
xfs_caddr_t orig_offset = XFS_BUF_PTR(bp);
xfs_caddr_t orig_offset = bp->b_addr;
int orig_len = bp->b_buffer_length;
int error, error2;
error = XFS_BUF_SET_PTR(bp, offset, BBTOB(nbblks));
error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
if (error)
return error;
error = xlog_bread_noalign(log, blk_no, nbblks, bp);
/* must reset buffer pointer even on error */
error2 = XFS_BUF_SET_PTR(bp, orig_offset, orig_len);
error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
if (error)
return error;
return error2;
......@@ -266,11 +264,9 @@ xlog_bwrite(
XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp);
XFS_BUF_HOLD(bp);
xfs_buf_hold(bp);
xfs_buf_lock(bp);
XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
if ((error = xfs_bwrite(log->l_mp, bp)))
xfs_ioerror_alert("xlog_bwrite", log->l_mp,
......@@ -360,7 +356,7 @@ STATIC void
xlog_recover_iodone(
struct xfs_buf *bp)
{
if (XFS_BUF_GETERROR(bp)) {
if (bp->b_error) {
/*
* We're not going to bother about retrying
* this during recovery. One strike!
......@@ -1262,7 +1258,7 @@ xlog_write_log_records(
*/
ealign = round_down(end_block, sectbb);
if (j == 0 && (start_block + endcount > ealign)) {
offset = XFS_BUF_PTR(bp) + BBTOB(ealign - start_block);
offset = bp->b_addr + BBTOB(ealign - start_block);
error = xlog_bread_offset(log, ealign, sectbb,
bp, offset);
if (error)
......@@ -2135,15 +2131,16 @@ xlog_recover_buffer_pass2(
bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
buf_flags);
if (XFS_BUF_ISERROR(bp)) {
if (!bp)
return XFS_ERROR(ENOMEM);
error = bp->b_error;
if (error) {
xfs_ioerror_alert("xlog_recover_do..(read#1)", mp,
bp, buf_f->blf_blkno);
error = XFS_BUF_GETERROR(bp);
xfs_buf_relse(bp);
return error;
}
error = 0;
if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
} else if (buf_f->blf_flags &
......@@ -2227,14 +2224,17 @@ xlog_recover_inode_pass2(
bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
XBF_LOCK);
if (XFS_BUF_ISERROR(bp)) {
if (!bp) {
error = ENOMEM;
goto error;
}
error = bp->b_error;
if (error) {
xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
bp, in_f->ilf_blkno);
error = XFS_BUF_GETERROR(bp);
xfs_buf_relse(bp);
goto error;
}
error = 0;
ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
......@@ -3437,7 +3437,7 @@ xlog_do_recovery_pass(
/*
* Check for header wrapping around physical end-of-log
*/
offset = XFS_BUF_PTR(hbp);
offset = hbp->b_addr;
split_hblks = 0;
wrapped_hblks = 0;
if (blk_no + hblks <= log->l_logBBsize) {
......@@ -3497,7 +3497,7 @@ xlog_do_recovery_pass(
} else {
/* This log record is split across the
* physical end of log */
offset = XFS_BUF_PTR(dbp);
offset = dbp->b_addr;
split_bblks = 0;
if (blk_no != log->l_logBBsize) {
/* some data is before the physical
......
......@@ -1615,7 +1615,7 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
XFS_BUF_UNDELAYWRITE(sbp);
XFS_BUF_WRITE(sbp);
XFS_BUF_UNASYNC(sbp);
ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
ASSERT(sbp->b_target == mp->m_ddev_targp);
xfsbdstrat(mp, sbp);
error = xfs_buf_iowait(sbp);
if (error)
......@@ -1938,7 +1938,7 @@ xfs_getsb(
xfs_buf_lock(bp);
}
XFS_BUF_HOLD(bp);
xfs_buf_hold(bp);
ASSERT(XFS_BUF_ISDONE(bp));
return bp;
}
......
......@@ -168,7 +168,7 @@ xfs_growfs_rt_alloc(
xfs_trans_cancel(tp, cancelflags);
goto error;
}
memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize);
memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
/*
* Commit the transaction.
......@@ -883,7 +883,7 @@ xfs_rtbuf_get(
if (error) {
return error;
}
ASSERT(bp && !XFS_BUF_GETERROR(bp));
ASSERT(!xfs_buf_geterror(bp));
*bpp = bp;
return 0;
}
......@@ -943,7 +943,7 @@ xfs_rtcheck_range(
if (error) {
return error;
}
bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
bufp = bp->b_addr;
/*
* Compute the starting word's address, and starting bit.
*/
......@@ -994,7 +994,7 @@ xfs_rtcheck_range(
if (error) {
return error;
}
b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
b = bufp = bp->b_addr;
word = 0;
} else {
/*
......@@ -1040,7 +1040,7 @@ xfs_rtcheck_range(
if (error) {
return error;
}
b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
b = bufp = bp->b_addr;
word = 0;
} else {
/*
......@@ -1158,7 +1158,7 @@ xfs_rtfind_back(
if (error) {
return error;
}
bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
bufp = bp->b_addr;
/*
* Get the first word's index & point to it.
*/
......@@ -1210,7 +1210,7 @@ xfs_rtfind_back(
if (error) {
return error;
}
bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
bufp = bp->b_addr;
word = XFS_BLOCKWMASK(mp);
b = &bufp[word];
} else {
......@@ -1256,7 +1256,7 @@ xfs_rtfind_back(
if (error) {
return error;
}
bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
bufp = bp->b_addr;
word = XFS_BLOCKWMASK(mp);
b = &bufp[word];
} else {
......@@ -1333,7 +1333,7 @@ xfs_rtfind_forw(
if (error) {
return error;
}
bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
bufp = bp->b_addr;
/*
* Get the first word's index & point to it.
*/
......@@ -1384,7 +1384,7 @@ xfs_rtfind_forw(
if (error) {
return error;
}
b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
b = bufp = bp->b_addr;
word = 0;
} else {
/*
......@@ -1429,7 +1429,7 @@ xfs_rtfind_forw(
if (error) {
return error;
}
b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
b = bufp = bp->b_addr;
word = 0;
} else {
/*
......@@ -1649,7 +1649,7 @@ xfs_rtmodify_range(
if (error) {
return error;
}
bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
bufp = bp->b_addr;
/*
* Compute the starting word's address, and starting bit.
*/
......@@ -1694,7 +1694,7 @@ xfs_rtmodify_range(
if (error) {
return error;
}
first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
first = b = bufp = bp->b_addr;
word = 0;
} else {
/*
......@@ -1734,7 +1734,7 @@ xfs_rtmodify_range(
if (error) {
return error;
}
first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
first = b = bufp = bp->b_addr;
word = 0;
} else {
/*
......@@ -1832,8 +1832,8 @@ xfs_rtmodify_summary(
*/
sp = XFS_SUMPTR(mp, bp, so);
*sp += delta;
xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)XFS_BUF_PTR(bp)),
(uint)((char *)sp - (char *)XFS_BUF_PTR(bp) + sizeof(*sp) - 1));
xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
(uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
return 0;
}
......
......@@ -47,7 +47,7 @@ struct xfs_trans;
#define XFS_SUMOFFSTOBLOCK(mp,s) \
(((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog)
#define XFS_SUMPTR(mp,bp,so) \
((xfs_suminfo_t *)((char *)XFS_BUF_PTR(bp) + \
((xfs_suminfo_t *)((bp)->b_addr + \
(((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp))))
#define XFS_BITTOBLOCK(mp,bi) ((bi) >> (mp)->m_blkbit_log)
......
......@@ -104,9 +104,9 @@ xfs_ioerror_alert(
xfs_alert(mp,
"I/O error occurred: meta-data dev %s block 0x%llx"
" (\"%s\") error %d buf count %zd",
XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
xfs_buf_target_name(bp->b_target),
(__uint64_t)blkno, func,
XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp));
bp->b_error, XFS_BUF_COUNT(bp));
}
/*
......@@ -137,8 +137,8 @@ xfs_read_buf(
bp = xfs_buf_read(target, blkno, len, flags);
if (!bp)
return XFS_ERROR(EIO);
error = XFS_BUF_GETERROR(bp);
if (bp && !error && !XFS_FORCED_SHUTDOWN(mp)) {
error = bp->b_error;
if (!error && !XFS_FORCED_SHUTDOWN(mp)) {
*bpp = bp;
} else {
*bpp = NULL;
......
......@@ -509,7 +509,7 @@ static inline int xfs_sb_version_hasprojid32bit(xfs_sb_t *sbp)
#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */
#define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR)
#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)XFS_BUF_PTR(bp))
#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)((bp)->b_addr))
#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d))
#define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \
......
......@@ -299,7 +299,7 @@ xfs_trans_ail_cursor_last(
* Splice the log item list into the AIL at the given LSN. We splice to the
* tail of the given LSN to maintain insert order for push traversals. The
* cursor is optional, allowing repeated updates to the same LSN to avoid
* repeated traversals.
* repeated traversals. This should not be called with an empty list.
*/
static void
xfs_ail_splice(
......@@ -308,50 +308,39 @@ xfs_ail_splice(
struct list_head *list,
xfs_lsn_t lsn)
{
struct xfs_log_item *lip = cur ? cur->item : NULL;
struct xfs_log_item *next_lip;
struct xfs_log_item *lip;
ASSERT(!list_empty(list));
/*
* Get a new cursor if we don't have a placeholder or the existing one
* has been invalidated.
* Use the cursor to determine the insertion point if one is
* provided. If not, or if the one we got is not valid,
* find the place in the AIL where the items belong.
*/
if (!lip || (__psint_t)lip & 1) {
lip = cur ? cur->item : NULL;
if (!lip || (__psint_t) lip & 1)
lip = __xfs_trans_ail_cursor_last(ailp, lsn);
if (!lip) {
/* The list is empty, so just splice and return. */
/*
* If a cursor is provided, we know we're processing the AIL
* in lsn order, and future items to be spliced in will
* follow the last one being inserted now. Update the
* cursor to point to that last item, now while we have a
* reliable pointer to it.
*/
if (cur)
cur->item = NULL;
list_splice(list, &ailp->xa_ail);
return;
}
}
cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
/*
* Our cursor points to the item we want to insert _after_, so we have
* to update the cursor to point to the end of the list we are splicing
* in so that it points to the correct location for the next splice.
* i.e. before the splice
*
* lsn -> lsn -> lsn + x -> lsn + x ...
* ^
* | cursor points here
*
* After the splice we have:
*
* lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ...
* ^ ^
* | cursor points here | needs to move here
*
* So we set the cursor to the last item in the list to be spliced
* before we execute the splice, resulting in the cursor pointing to
* the correct item after the splice occurs.
* Finally perform the splice. Unless the AIL was empty,
* lip points to the item in the AIL _after_ which the new
* items should go. If lip is null the AIL was empty, so
* the new items go at the head of the AIL.
*/
if (cur) {
next_lip = list_entry(list->prev, struct xfs_log_item, li_ail);
cur->item = next_lip;
}
if (lip)
list_splice(list, &lip->li_ail);
else
list_splice(list, &ailp->xa_ail);
}
/*
......@@ -682,6 +671,7 @@ xfs_trans_ail_update_bulk(
int i;
LIST_HEAD(tmp);
ASSERT(nr_items > 0); /* Not required, but true. */
mlip = xfs_ail_min(ailp);
for (i = 0; i < nr_items; i++) {
......@@ -701,6 +691,7 @@ xfs_trans_ail_update_bulk(
list_add(&lip->li_ail, &tmp);
}
if (!list_empty(&tmp))
xfs_ail_splice(ailp, cur, &tmp, lsn);
if (!mlip_changed) {
......
......@@ -54,7 +54,7 @@ xfs_trans_buf_item_match(
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
blip = (struct xfs_buf_log_item *)lidp->lid_item;
if (blip->bli_item.li_type == XFS_LI_BUF &&
XFS_BUF_TARGET(blip->bli_buf) == target &&
blip->bli_buf->b_target == target &&
XFS_BUF_ADDR(blip->bli_buf) == blkno &&
XFS_BUF_COUNT(blip->bli_buf) == len)
return blip->bli_buf;
......@@ -80,7 +80,6 @@ _xfs_trans_bjoin(
{
struct xfs_buf_log_item *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(bp->b_transp == NULL);
/*
......@@ -194,7 +193,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
return NULL;
}
ASSERT(!XFS_BUF_GETERROR(bp));
ASSERT(!bp->b_error);
_xfs_trans_bjoin(tp, bp, 1);
trace_xfs_trans_get_buf(bp->b_fspriv);
......@@ -293,10 +292,10 @@ xfs_trans_read_buf(
return (flags & XBF_TRYLOCK) ?
EAGAIN : XFS_ERROR(ENOMEM);
if (XFS_BUF_GETERROR(bp) != 0) {
if (bp->b_error) {
error = bp->b_error;
xfs_ioerror_alert("xfs_trans_read_buf", mp,
bp, blkno);
error = XFS_BUF_GETERROR(bp);
xfs_buf_relse(bp);
return error;
}
......@@ -330,7 +329,7 @@ xfs_trans_read_buf(
ASSERT(xfs_buf_islocked(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bp->b_fspriv != NULL);
ASSERT((XFS_BUF_ISERROR(bp)) == 0);
ASSERT(!bp->b_error);
if (!(XFS_BUF_ISDONE(bp))) {
trace_xfs_trans_read_buf_io(bp, _RET_IP_);
ASSERT(!XFS_BUF_ISASYNC(bp));
......@@ -386,10 +385,9 @@ xfs_trans_read_buf(
return (flags & XBF_TRYLOCK) ?
0 : XFS_ERROR(ENOMEM);
}
if (XFS_BUF_GETERROR(bp) != 0) {
if (bp->b_error) {
error = bp->b_error;
XFS_BUF_SUPER_STALE(bp);
error = XFS_BUF_GETERROR(bp);
xfs_ioerror_alert("xfs_trans_read_buf", mp,
bp, blkno);
if (tp->t_flags & XFS_TRANS_DIRTY)
......@@ -430,7 +428,7 @@ xfs_trans_read_buf(
if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
xfs_notice(mp, "about to pop assert, bp == 0x%p", bp);
#endif
ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) !=
ASSERT((bp->b_flags & (XBF_STALE|XBF_DELWRI)) !=
(XBF_STALE|XBF_DELWRI));
trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
......@@ -581,7 +579,6 @@ xfs_trans_bhold(xfs_trans_t *tp,
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
......@@ -602,7 +599,6 @@ xfs_trans_bhold_release(xfs_trans_t *tp,
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
......@@ -631,7 +627,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp)));
......@@ -702,7 +697,6 @@ xfs_trans_binval(
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
......@@ -774,7 +768,6 @@ xfs_trans_inode_buf(
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
......@@ -798,7 +791,6 @@ xfs_trans_stale_inode_buf(
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
......@@ -823,7 +815,6 @@ xfs_trans_inode_alloc_buf(
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
......@@ -851,7 +842,6 @@ xfs_trans_dquot_buf(
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT(type == XFS_BLF_UDQUOT_BUF ||
......
......@@ -83,7 +83,9 @@ xfs_readlink_bmap(
bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt),
XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK);
error = XFS_BUF_GETERROR(bp);
if (!bp)
return XFS_ERROR(ENOMEM);
error = bp->b_error;
if (error) {
xfs_ioerror_alert("xfs_readlink",
ip->i_mount, bp, XFS_BUF_ADDR(bp));
......@@ -94,7 +96,7 @@ xfs_readlink_bmap(
byte_cnt = pathlen;
pathlen -= byte_cnt;
memcpy(link, XFS_BUF_PTR(bp), byte_cnt);
memcpy(link, bp->b_addr, byte_cnt);
xfs_buf_relse(bp);
}
......@@ -1648,13 +1650,13 @@ xfs_symlink(
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
BTOBB(byte_cnt), 0);
ASSERT(bp && !XFS_BUF_GETERROR(bp));
ASSERT(!xfs_buf_geterror(bp));
if (pathlen < byte_cnt) {
byte_cnt = pathlen;
}
pathlen -= byte_cnt;
memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt);
memcpy(bp->b_addr, cur_chunk, byte_cnt);
cur_chunk += byte_cnt;
xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1);
......@@ -1999,7 +2001,7 @@ xfs_zero_remaining_bytes(
mp, bp, XFS_BUF_ADDR(bp));
break;
}
memset(XFS_BUF_PTR(bp) +
memset(bp->b_addr +
(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
0, lastoffset - offset + 1);
XFS_BUF_UNDONE(bp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment