Commit a6ddeee3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-for-linus-v3.13-rc5' of git://oss.sgi.com/xfs/xfs

Pull xfs bugfixes from Ben Myers:
 "This contains fixes for some asserts
   related to project quotas, a memory leak, a hang when disabling group or
   project quotas before disabling user quotas, Dave's email address, several
   fixes for the alignment of file allocation to stripe unit/width geometry, a
   fix for an assertion with xfs_zero_remaining_bytes, and the behavior of
   metadata writeback in the face of IO errors.

   Details:
   - fix memory leak in xfs_dir2_node_removename
   - fix quota assertion in xfs_setattr_size
   - fix quota assertions in xfs_qm_vop_create_dqattach
   - fix for hang when disabling group and project quotas before
     disabling user quotas
   - fix Dave Chinner's email address in MAINTAINERS
   - fix for file allocation alignment
   - fix for assertion in xfs_buf_stale by removing xfsbdstrat
   - fix for alignment with swalloc mount option
   - fix for "retry forever" semantics on IO errors"

* tag 'xfs-for-linus-v3.13-rc5' of git://oss.sgi.com/xfs/xfs:
  xfs: abort metadata writeback on permanent errors
  xfs: swalloc doesn't align allocations properly
  xfs: remove xfsbdstrat error
  xfs: align initial file allocations correctly
  MAINTAINERS: fix incorrect mail address of XFS maintainer
  xfs: fix infinite loop by detaching the group/project hints from user dquot
  xfs: fix assertion failure at xfs_setattr_nonsize
  xfs: fix false assertion at xfs_qm_vop_create_dqattach
  xfs: fix memory leak in xfs_dir2_node_removename
parents 40b64acd ac8809f9
...@@ -9588,7 +9588,7 @@ F: drivers/xen/*swiotlb* ...@@ -9588,7 +9588,7 @@ F: drivers/xen/*swiotlb*
XFS FILESYSTEM XFS FILESYSTEM
P: Silicon Graphics Inc P: Silicon Graphics Inc
M: Dave Chinner <dchinner@fromorbit.com> M: Dave Chinner <david@fromorbit.com>
M: Ben Myers <bpm@sgi.com> M: Ben Myers <bpm@sgi.com>
M: xfs@oss.sgi.com M: xfs@oss.sgi.com
L: xfs@oss.sgi.com L: xfs@oss.sgi.com
......
...@@ -1635,7 +1635,7 @@ xfs_bmap_last_extent( ...@@ -1635,7 +1635,7 @@ xfs_bmap_last_extent(
* blocks at the end of the file which do not start at the previous data block, * blocks at the end of the file which do not start at the previous data block,
* we will try to align the new blocks at stripe unit boundaries. * we will try to align the new blocks at stripe unit boundaries.
* *
* Returns 0 in bma->aeof if the file (fork) is empty as any new write will be * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
* at, or past the EOF. * at, or past the EOF.
*/ */
STATIC int STATIC int
...@@ -1650,9 +1650,14 @@ xfs_bmap_isaeof( ...@@ -1650,9 +1650,14 @@ xfs_bmap_isaeof(
bma->aeof = 0; bma->aeof = 0;
error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
&is_empty); &is_empty);
if (error || is_empty) if (error)
return error; return error;
if (is_empty) {
bma->aeof = 1;
return 0;
}
/* /*
* Check if we are allocation or past the last extent, or at least into * Check if we are allocation or past the last extent, or at least into
* the last delayed allocated extent. * the last delayed allocated extent.
...@@ -3643,10 +3648,19 @@ xfs_bmap_btalloc( ...@@ -3643,10 +3648,19 @@ xfs_bmap_btalloc(
int isaligned; int isaligned;
int tryagain; int tryagain;
int error; int error;
int stripe_align;
ASSERT(ap->length); ASSERT(ap->length);
mp = ap->ip->i_mount; mp = ap->ip->i_mount;
/* stripe alignment for allocation is determined by mount parameters */
stripe_align = 0;
if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
stripe_align = mp->m_swidth;
else if (mp->m_dalign)
stripe_align = mp->m_dalign;
align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
if (unlikely(align)) { if (unlikely(align)) {
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
...@@ -3655,6 +3669,8 @@ xfs_bmap_btalloc( ...@@ -3655,6 +3669,8 @@ xfs_bmap_btalloc(
ASSERT(!error); ASSERT(!error);
ASSERT(ap->length); ASSERT(ap->length);
} }
nullfb = *ap->firstblock == NULLFSBLOCK; nullfb = *ap->firstblock == NULLFSBLOCK;
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
if (nullfb) { if (nullfb) {
...@@ -3730,7 +3746,7 @@ xfs_bmap_btalloc( ...@@ -3730,7 +3746,7 @@ xfs_bmap_btalloc(
*/ */
if (!ap->flist->xbf_low && ap->aeof) { if (!ap->flist->xbf_low && ap->aeof) {
if (!ap->offset) { if (!ap->offset) {
args.alignment = mp->m_dalign; args.alignment = stripe_align;
atype = args.type; atype = args.type;
isaligned = 1; isaligned = 1;
/* /*
...@@ -3755,13 +3771,13 @@ xfs_bmap_btalloc( ...@@ -3755,13 +3771,13 @@ xfs_bmap_btalloc(
* of minlen+alignment+slop doesn't go up * of minlen+alignment+slop doesn't go up
* between the calls. * between the calls.
*/ */
if (blen > mp->m_dalign && blen <= args.maxlen) if (blen > stripe_align && blen <= args.maxlen)
nextminlen = blen - mp->m_dalign; nextminlen = blen - stripe_align;
else else
nextminlen = args.minlen; nextminlen = args.minlen;
if (nextminlen + mp->m_dalign > args.minlen + 1) if (nextminlen + stripe_align > args.minlen + 1)
args.minalignslop = args.minalignslop =
nextminlen + mp->m_dalign - nextminlen + stripe_align -
args.minlen - 1; args.minlen - 1;
else else
args.minalignslop = 0; args.minalignslop = 0;
...@@ -3783,7 +3799,7 @@ xfs_bmap_btalloc( ...@@ -3783,7 +3799,7 @@ xfs_bmap_btalloc(
*/ */
args.type = atype; args.type = atype;
args.fsbno = ap->blkno; args.fsbno = ap->blkno;
args.alignment = mp->m_dalign; args.alignment = stripe_align;
args.minlen = nextminlen; args.minlen = nextminlen;
args.minalignslop = 0; args.minalignslop = 0;
isaligned = 1; isaligned = 1;
......
...@@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes( ...@@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes(
XFS_BUF_UNWRITE(bp); XFS_BUF_UNWRITE(bp);
XFS_BUF_READ(bp); XFS_BUF_READ(bp);
XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
xfsbdstrat(mp, bp);
if (XFS_FORCED_SHUTDOWN(mp)) {
error = XFS_ERROR(EIO);
break;
}
xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp); error = xfs_buf_iowait(bp);
if (error) { if (error) {
xfs_buf_ioerror_alert(bp, xfs_buf_ioerror_alert(bp,
...@@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes( ...@@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes(
XFS_BUF_UNDONE(bp); XFS_BUF_UNDONE(bp);
XFS_BUF_UNREAD(bp); XFS_BUF_UNREAD(bp);
XFS_BUF_WRITE(bp); XFS_BUF_WRITE(bp);
xfsbdstrat(mp, bp);
if (XFS_FORCED_SHUTDOWN(mp)) {
error = XFS_ERROR(EIO);
break;
}
xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp); error = xfs_buf_iowait(bp);
if (error) { if (error) {
xfs_buf_ioerror_alert(bp, xfs_buf_ioerror_alert(bp,
......
...@@ -698,7 +698,11 @@ xfs_buf_read_uncached( ...@@ -698,7 +698,11 @@ xfs_buf_read_uncached(
bp->b_flags |= XBF_READ; bp->b_flags |= XBF_READ;
bp->b_ops = ops; bp->b_ops = ops;
xfsbdstrat(target->bt_mount, bp); if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
xfs_buf_relse(bp);
return NULL;
}
xfs_buf_iorequest(bp);
xfs_buf_iowait(bp); xfs_buf_iowait(bp);
return bp; return bp;
} }
...@@ -1089,7 +1093,7 @@ xfs_bioerror( ...@@ -1089,7 +1093,7 @@ xfs_bioerror(
* This is meant for userdata errors; metadata bufs come with * This is meant for userdata errors; metadata bufs come with
* iodone functions attached, so that we can track down errors. * iodone functions attached, so that we can track down errors.
*/ */
STATIC int int
xfs_bioerror_relse( xfs_bioerror_relse(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
...@@ -1152,7 +1156,7 @@ xfs_bwrite( ...@@ -1152,7 +1156,7 @@ xfs_bwrite(
ASSERT(xfs_buf_islocked(bp)); ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_WRITE; bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
xfs_bdstrat_cb(bp); xfs_bdstrat_cb(bp);
...@@ -1164,25 +1168,6 @@ xfs_bwrite( ...@@ -1164,25 +1168,6 @@ xfs_bwrite(
return error; return error;
} }
/*
* Wrapper around bdstrat so that we can stop data from going to disk in case
* we are shutting down the filesystem. Typically user data goes thru this
* path; one of the exceptions is the superblock.
*/
void
xfsbdstrat(
struct xfs_mount *mp,
struct xfs_buf *bp)
{
if (XFS_FORCED_SHUTDOWN(mp)) {
trace_xfs_bdstrat_shut(bp, _RET_IP_);
xfs_bioerror_relse(bp);
return;
}
xfs_buf_iorequest(bp);
}
STATIC void STATIC void
_xfs_buf_ioend( _xfs_buf_ioend(
xfs_buf_t *bp, xfs_buf_t *bp,
...@@ -1516,6 +1501,12 @@ xfs_wait_buftarg( ...@@ -1516,6 +1501,12 @@ xfs_wait_buftarg(
struct xfs_buf *bp; struct xfs_buf *bp;
bp = list_first_entry(&dispose, struct xfs_buf, b_lru); bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru); list_del_init(&bp->b_lru);
if (bp->b_flags & XBF_WRITE_FAIL) {
xfs_alert(btp->bt_mount,
"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
"Please run xfs_repair to determine the extent of the problem.",
(long long)bp->b_bn);
}
xfs_buf_rele(bp); xfs_buf_rele(bp);
} }
if (loop++ != 0) if (loop++ != 0)
...@@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit( ...@@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit(
blk_start_plug(&plug); blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, io_list, b_list) { list_for_each_entry_safe(bp, n, io_list, b_list) {
bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
bp->b_flags |= XBF_WRITE; bp->b_flags |= XBF_WRITE;
if (!wait) { if (!wait) {
......
...@@ -45,6 +45,7 @@ typedef enum { ...@@ -45,6 +45,7 @@ typedef enum {
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
/* I/O hints for the BIO layer */ /* I/O hints for the BIO layer */
#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
...@@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t; ...@@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_ASYNC, "ASYNC" }, \ { XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \ { XBF_DONE, "DONE" }, \
{ XBF_STALE, "STALE" }, \ { XBF_STALE, "STALE" }, \
{ XBF_WRITE_FAIL, "WRITE_FAIL" }, \
{ XBF_SYNCIO, "SYNCIO" }, \ { XBF_SYNCIO, "SYNCIO" }, \
{ XBF_FUA, "FUA" }, \ { XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \ { XBF_FLUSH, "FLUSH" }, \
...@@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t; ...@@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t;
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \ { _XBF_DELWRI_Q, "DELWRI_Q" }, \
{ _XBF_COMPOUND, "COMPOUND" } { _XBF_COMPOUND, "COMPOUND" }
/* /*
* Internal state flags. * Internal state flags.
*/ */
...@@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *); ...@@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */ /* Buffer Read and Write Routines */
extern int xfs_bwrite(struct xfs_buf *bp); extern int xfs_bwrite(struct xfs_buf *bp);
extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
extern void xfs_buf_ioend(xfs_buf_t *, int); extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int); extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
...@@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, ...@@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
#define xfs_buf_zero(bp, off, len) \ #define xfs_buf_zero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
extern int xfs_bioerror_relse(struct xfs_buf *);
static inline int xfs_buf_geterror(xfs_buf_t *bp) static inline int xfs_buf_geterror(xfs_buf_t *bp)
{ {
return bp ? bp->b_error : ENOMEM; return bp ? bp->b_error : ENOMEM;
...@@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void); ...@@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_ZEROFLAGS(bp) \ #define XFS_BUF_ZEROFLAGS(bp) \
((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
XBF_WRITE_FAIL))
void xfs_buf_stale(struct xfs_buf *bp); void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
......
...@@ -496,6 +496,14 @@ xfs_buf_item_unpin( ...@@ -496,6 +496,14 @@ xfs_buf_item_unpin(
} }
} }
/*
* Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
* seconds so as to not spam logs too much on repeated detection of the same
* buffer being bad..
*/
DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
STATIC uint STATIC uint
xfs_buf_item_push( xfs_buf_item_push(
struct xfs_log_item *lip, struct xfs_log_item *lip,
...@@ -524,6 +532,14 @@ xfs_buf_item_push( ...@@ -524,6 +532,14 @@ xfs_buf_item_push(
trace_xfs_buf_item_push(bip); trace_xfs_buf_item_push(bip);
/* has a previous flush failed due to IO errors? */
if ((bp->b_flags & XBF_WRITE_FAIL) &&
___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
xfs_warn(bp->b_target->bt_mount,
"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
(long long)bp->b_bn);
}
if (!xfs_buf_delwri_queue(bp, buffer_list)) if (!xfs_buf_delwri_queue(bp, buffer_list))
rval = XFS_ITEM_FLUSHING; rval = XFS_ITEM_FLUSHING;
xfs_buf_unlock(bp); xfs_buf_unlock(bp);
...@@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks( ...@@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks(
xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
if (!XFS_BUF_ISSTALE(bp)) { if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; bp->b_flags |= XBF_WRITE | XBF_ASYNC |
XBF_DONE | XBF_WRITE_FAIL;
xfs_buf_iorequest(bp); xfs_buf_iorequest(bp);
} else { } else {
xfs_buf_relse(bp); xfs_buf_relse(bp);
......
...@@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup( ...@@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup(
*/ */
int /* error */ int /* error */
xfs_dir2_node_removename( xfs_dir2_node_removename(
xfs_da_args_t *args) /* operation arguments */ struct xfs_da_args *args) /* operation arguments */
{ {
xfs_da_state_blk_t *blk; /* leaf block */ struct xfs_da_state_blk *blk; /* leaf block */
int error; /* error return value */ int error; /* error return value */
int rval; /* operation return value */ int rval; /* operation return value */
xfs_da_state_t *state; /* btree cursor */ struct xfs_da_state *state; /* btree cursor */
trace_xfs_dir2_node_removename(args); trace_xfs_dir2_node_removename(args);
...@@ -2084,19 +2084,18 @@ xfs_dir2_node_removename( ...@@ -2084,19 +2084,18 @@ xfs_dir2_node_removename(
state->mp = args->dp->i_mount; state->mp = args->dp->i_mount;
state->blocksize = state->mp->m_dirblksize; state->blocksize = state->mp->m_dirblksize;
state->node_ents = state->mp->m_dir_node_ents; state->node_ents = state->mp->m_dir_node_ents;
/*
* Look up the entry we're deleting, set up the cursor. /* Look up the entry we're deleting, set up the cursor. */
*/
error = xfs_da3_node_lookup_int(state, &rval); error = xfs_da3_node_lookup_int(state, &rval);
if (error) if (error)
rval = error; goto out_free;
/*
* Didn't find it, upper layer screwed up. /* Didn't find it, upper layer screwed up. */
*/
if (rval != EEXIST) { if (rval != EEXIST) {
xfs_da_state_free(state); error = rval;
return rval; goto out_free;
} }
blk = &state->path.blk[state->path.active - 1]; blk = &state->path.blk[state->path.active - 1];
ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
ASSERT(state->extravalid); ASSERT(state->extravalid);
...@@ -2107,7 +2106,7 @@ xfs_dir2_node_removename( ...@@ -2107,7 +2106,7 @@ xfs_dir2_node_removename(
error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, error = xfs_dir2_leafn_remove(args, blk->bp, blk->index,
&state->extrablk, &rval); &state->extrablk, &rval);
if (error) if (error)
return error; goto out_free;
/* /*
* Fix the hash values up the btree. * Fix the hash values up the btree.
*/ */
...@@ -2122,6 +2121,7 @@ xfs_dir2_node_removename( ...@@ -2122,6 +2121,7 @@ xfs_dir2_node_removename(
*/ */
if (!error) if (!error)
error = xfs_dir2_node_to_leaf(state); error = xfs_dir2_node_to_leaf(state);
out_free:
xfs_da_state_free(state); xfs_da_state_free(state);
return error; return error;
} }
......
...@@ -618,7 +618,8 @@ xfs_setattr_nonsize( ...@@ -618,7 +618,8 @@ xfs_setattr_nonsize(
} }
if (!gid_eq(igid, gid)) { if (!gid_eq(igid, gid)) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
ASSERT(!XFS_IS_PQUOTA_ON(mp)); ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
!XFS_IS_PQUOTA_ON(mp));
ASSERT(mask & ATTR_GID); ASSERT(mask & ATTR_GID);
ASSERT(gdqp); ASSERT(gdqp);
olddquot2 = xfs_qm_vop_chown(tp, ip, olddquot2 = xfs_qm_vop_chown(tp, ip,
......
...@@ -193,7 +193,10 @@ xlog_bread_noalign( ...@@ -193,7 +193,10 @@ xlog_bread_noalign(
bp->b_io_length = nbblks; bp->b_io_length = nbblks;
bp->b_error = 0; bp->b_error = 0;
xfsbdstrat(log->l_mp, bp); if (XFS_FORCED_SHUTDOWN(log->l_mp))
return XFS_ERROR(EIO);
xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp); error = xfs_buf_iowait(bp);
if (error) if (error)
xfs_buf_ioerror_alert(bp, __func__); xfs_buf_ioerror_alert(bp, __func__);
...@@ -4397,7 +4400,13 @@ xlog_do_recover( ...@@ -4397,7 +4400,13 @@ xlog_do_recover(
XFS_BUF_READ(bp); XFS_BUF_READ(bp);
XFS_BUF_UNASYNC(bp); XFS_BUF_UNASYNC(bp);
bp->b_ops = &xfs_sb_buf_ops; bp->b_ops = &xfs_sb_buf_ops;
xfsbdstrat(log->l_mp, bp);
if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
xfs_buf_relse(bp);
return XFS_ERROR(EIO);
}
xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp); error = xfs_buf_iowait(bp);
if (error) { if (error) {
xfs_buf_ioerror_alert(bp, __func__); xfs_buf_ioerror_alert(bp, __func__);
......
...@@ -134,8 +134,6 @@ xfs_qm_dqpurge( ...@@ -134,8 +134,6 @@ xfs_qm_dqpurge(
{ {
struct xfs_mount *mp = dqp->q_mount; struct xfs_mount *mp = dqp->q_mount;
struct xfs_quotainfo *qi = mp->m_quotainfo; struct xfs_quotainfo *qi = mp->m_quotainfo;
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
xfs_dqlock(dqp); xfs_dqlock(dqp);
if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
...@@ -143,21 +141,6 @@ xfs_qm_dqpurge( ...@@ -143,21 +141,6 @@ xfs_qm_dqpurge(
return EAGAIN; return EAGAIN;
} }
/*
* If this quota has a hint attached, prepare for releasing it now.
*/
gdqp = dqp->q_gdquot;
if (gdqp) {
xfs_dqlock(gdqp);
dqp->q_gdquot = NULL;
}
pdqp = dqp->q_pdquot;
if (pdqp) {
xfs_dqlock(pdqp);
dqp->q_pdquot = NULL;
}
dqp->dq_flags |= XFS_DQ_FREEING; dqp->dq_flags |= XFS_DQ_FREEING;
xfs_dqflock(dqp); xfs_dqflock(dqp);
...@@ -206,11 +189,47 @@ xfs_qm_dqpurge( ...@@ -206,11 +189,47 @@ xfs_qm_dqpurge(
XFS_STATS_DEC(xs_qm_dquot_unused); XFS_STATS_DEC(xs_qm_dquot_unused);
xfs_qm_dqdestroy(dqp); xfs_qm_dqdestroy(dqp);
return 0;
}
/*
* Release the group or project dquot pointers the user dquots maybe carrying
* around as a hint, and proceed to purge the user dquot cache if requested.
*/
STATIC int
xfs_qm_dqpurge_hints(
struct xfs_dquot *dqp,
void *data)
{
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
uint flags = *((uint *)data);
xfs_dqlock(dqp);
if (dqp->dq_flags & XFS_DQ_FREEING) {
xfs_dqunlock(dqp);
return EAGAIN;
}
/* If this quota has a hint attached, prepare for releasing it now */
gdqp = dqp->q_gdquot;
if (gdqp)
dqp->q_gdquot = NULL;
pdqp = dqp->q_pdquot;
if (pdqp)
dqp->q_pdquot = NULL;
xfs_dqunlock(dqp);
if (gdqp) if (gdqp)
xfs_qm_dqput(gdqp); xfs_qm_dqrele(gdqp);
if (pdqp) if (pdqp)
xfs_qm_dqput(pdqp); xfs_qm_dqrele(pdqp);
if (flags & XFS_QMOPT_UQUOTA)
return xfs_qm_dqpurge(dqp, NULL);
return 0; return 0;
} }
...@@ -222,8 +241,18 @@ xfs_qm_dqpurge_all( ...@@ -222,8 +241,18 @@ xfs_qm_dqpurge_all(
struct xfs_mount *mp, struct xfs_mount *mp,
uint flags) uint flags)
{ {
if (flags & XFS_QMOPT_UQUOTA) /*
xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); * We have to release group/project dquot hint(s) from the user dquot
* at first if they are there, otherwise we would run into an infinite
* loop while walking through radix tree to purge other type of dquots
* since their refcount is not zero if the user dquot refers to them
* as hint.
*
* Call the special xfs_qm_dqpurge_hints() will end up go through the
* general xfs_qm_dqpurge() against user dquot cache if requested.
*/
xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
if (flags & XFS_QMOPT_GQUOTA) if (flags & XFS_QMOPT_GQUOTA)
xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
if (flags & XFS_QMOPT_PQUOTA) if (flags & XFS_QMOPT_PQUOTA)
...@@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach( ...@@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(XFS_IS_QUOTA_RUNNING(mp)); ASSERT(XFS_IS_QUOTA_RUNNING(mp));
if (udqp) { if (udqp && XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_udquot == NULL);
ASSERT(XFS_IS_UQUOTA_ON(mp));
ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
ip->i_udquot = xfs_qm_dqhold(udqp); ip->i_udquot = xfs_qm_dqhold(udqp);
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
} }
if (gdqp) { if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL); ASSERT(ip->i_gdquot == NULL);
ASSERT(XFS_IS_GQUOTA_ON(mp));
ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
ip->i_gdquot = xfs_qm_dqhold(gdqp); ip->i_gdquot = xfs_qm_dqhold(gdqp);
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
} }
if (pdqp) { if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
ASSERT(ip->i_pdquot == NULL); ASSERT(ip->i_pdquot == NULL);
ASSERT(XFS_IS_PQUOTA_ON(mp));
ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
ip->i_pdquot = xfs_qm_dqhold(pdqp); ip->i_pdquot = xfs_qm_dqhold(pdqp);
......
...@@ -314,7 +314,18 @@ xfs_trans_read_buf_map( ...@@ -314,7 +314,18 @@ xfs_trans_read_buf_map(
ASSERT(bp->b_iodone == NULL); ASSERT(bp->b_iodone == NULL);
XFS_BUF_READ(bp); XFS_BUF_READ(bp);
bp->b_ops = ops; bp->b_ops = ops;
xfsbdstrat(tp->t_mountp, bp);
/*
* XXX(hch): clean up the error handling here to be less
* of a mess..
*/
if (XFS_FORCED_SHUTDOWN(mp)) {
trace_xfs_bdstrat_shut(bp, _RET_IP_);
xfs_bioerror_relse(bp);
} else {
xfs_buf_iorequest(bp);
}
error = xfs_buf_iowait(bp); error = xfs_buf_iowait(bp);
if (error) { if (error) {
xfs_buf_ioerror_alert(bp, __func__); xfs_buf_ioerror_alert(bp, __func__);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment