Commit f30f656e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Chandan Babu R

xfs: split xfs_mod_freecounter

xfs_mod_freecounter has two entirely separate code paths for adding or
subtracting from the free counters.  Only the subtract case looks at the
rsvd flag and can return an error.

Split xfs_mod_freecounter into separate helpers for subtracting or
adding the freecounter, and remove all the impossible to reach error
handling for the addition case.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatar"Darrick J. Wong" <djwong@kernel.org>
Signed-off-by: default avatarChandan Babu R <chandanbabu@kernel.org>
parent 5e1e4d4f
...@@ -963,9 +963,7 @@ xfs_ag_shrink_space( ...@@ -963,9 +963,7 @@ xfs_ag_shrink_space(
* Disable perag reservations so it doesn't cause the allocation request * Disable perag reservations so it doesn't cause the allocation request
* to fail. We'll reestablish reservation before we return. * to fail. We'll reestablish reservation before we return.
*/ */
error = xfs_ag_resv_free(pag); xfs_ag_resv_free(pag);
if (error)
return error;
/* internal log shouldn't also show up in the free space btrees */ /* internal log shouldn't also show up in the free space btrees */
error = xfs_alloc_vextent_exact_bno(&args, error = xfs_alloc_vextent_exact_bno(&args,
......
...@@ -126,14 +126,13 @@ xfs_ag_resv_needed( ...@@ -126,14 +126,13 @@ xfs_ag_resv_needed(
} }
/* Clean out a reservation */ /* Clean out a reservation */
static int static void
__xfs_ag_resv_free( __xfs_ag_resv_free(
struct xfs_perag *pag, struct xfs_perag *pag,
enum xfs_ag_resv_type type) enum xfs_ag_resv_type type)
{ {
struct xfs_ag_resv *resv; struct xfs_ag_resv *resv;
xfs_extlen_t oldresv; xfs_extlen_t oldresv;
int error;
trace_xfs_ag_resv_free(pag, type, 0); trace_xfs_ag_resv_free(pag, type, 0);
...@@ -149,30 +148,19 @@ __xfs_ag_resv_free( ...@@ -149,30 +148,19 @@ __xfs_ag_resv_free(
oldresv = resv->ar_orig_reserved; oldresv = resv->ar_orig_reserved;
else else
oldresv = resv->ar_reserved; oldresv = resv->ar_reserved;
error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true); xfs_add_fdblocks(pag->pag_mount, oldresv);
resv->ar_reserved = 0; resv->ar_reserved = 0;
resv->ar_asked = 0; resv->ar_asked = 0;
resv->ar_orig_reserved = 0; resv->ar_orig_reserved = 0;
if (error)
trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
error, _RET_IP_);
return error;
} }
/* Free a per-AG reservation. */ /* Free a per-AG reservation. */
int void
xfs_ag_resv_free( xfs_ag_resv_free(
struct xfs_perag *pag) struct xfs_perag *pag)
{ {
int error; __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT);
int err2; __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA);
error = __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT);
err2 = __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA);
if (err2 && !error)
error = err2;
return error;
} }
static int static int
...@@ -216,7 +204,7 @@ __xfs_ag_resv_init( ...@@ -216,7 +204,7 @@ __xfs_ag_resv_init(
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_AG_RESV_FAIL)) if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_AG_RESV_FAIL))
error = -ENOSPC; error = -ENOSPC;
else else
error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true); error = xfs_dec_fdblocks(mp, hidden_space, true);
if (error) { if (error) {
trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
error, _RET_IP_); error, _RET_IP_);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#ifndef __XFS_AG_RESV_H__ #ifndef __XFS_AG_RESV_H__
#define __XFS_AG_RESV_H__ #define __XFS_AG_RESV_H__
int xfs_ag_resv_free(struct xfs_perag *pag); void xfs_ag_resv_free(struct xfs_perag *pag);
int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp); int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp);
bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type); bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type);
......
...@@ -79,7 +79,7 @@ xfs_prealloc_blocks( ...@@ -79,7 +79,7 @@ xfs_prealloc_blocks(
} }
/* /*
* The number of blocks per AG that we withhold from xfs_mod_fdblocks to * The number of blocks per AG that we withhold from xfs_dec_fdblocks to
* guarantee that we can refill the AGFL prior to allocating space in a nearly * guarantee that we can refill the AGFL prior to allocating space in a nearly
* full AG. Although the space described by the free space btrees, the * full AG. Although the space described by the free space btrees, the
* blocks used by the freesp btrees themselves, and the blocks owned by the * blocks used by the freesp btrees themselves, and the blocks owned by the
...@@ -89,7 +89,7 @@ xfs_prealloc_blocks( ...@@ -89,7 +89,7 @@ xfs_prealloc_blocks(
* until the fs goes down, we subtract this many AG blocks from the incore * until the fs goes down, we subtract this many AG blocks from the incore
* fdblocks to ensure user allocation does not overcommit the space the * fdblocks to ensure user allocation does not overcommit the space the
* filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
* withhold space from xfs_mod_fdblocks, so we do not account for that here. * withhold space from xfs_dec_fdblocks, so we do not account for that here.
*/ */
#define XFS_ALLOCBT_AGFL_RESERVE 4 #define XFS_ALLOCBT_AGFL_RESERVE 4
......
...@@ -1985,10 +1985,11 @@ xfs_bmap_add_extent_delay_real( ...@@ -1985,10 +1985,11 @@ xfs_bmap_add_extent_delay_real(
} }
/* adjust for changes in reserved delayed indirect blocks */ /* adjust for changes in reserved delayed indirect blocks */
if (da_new != da_old) { if (da_new < da_old) {
ASSERT(state == 0 || da_new < da_old); xfs_add_fdblocks(mp, da_old - da_new);
error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), } else if (da_new > da_old) {
false); ASSERT(state == 0);
error = xfs_dec_fdblocks(mp, da_new - da_old, false);
} }
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
...@@ -2690,8 +2691,8 @@ xfs_bmap_add_extent_hole_delay( ...@@ -2690,8 +2691,8 @@ xfs_bmap_add_extent_hole_delay(
} }
if (oldlen != newlen) { if (oldlen != newlen) {
ASSERT(oldlen > newlen); ASSERT(oldlen > newlen);
xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), xfs_add_fdblocks(ip->i_mount, oldlen - newlen);
false);
/* /*
* Nothing to do for disk quota accounting here. * Nothing to do for disk quota accounting here.
*/ */
...@@ -4110,11 +4111,11 @@ xfs_bmapi_reserve_delalloc( ...@@ -4110,11 +4111,11 @@ xfs_bmapi_reserve_delalloc(
indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
ASSERT(indlen > 0); ASSERT(indlen > 0);
error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); error = xfs_dec_fdblocks(mp, alen, false);
if (error) if (error)
goto out_unreserve_quota; goto out_unreserve_quota;
error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); error = xfs_dec_fdblocks(mp, indlen, false);
if (error) if (error)
goto out_unreserve_blocks; goto out_unreserve_blocks;
...@@ -4142,7 +4143,7 @@ xfs_bmapi_reserve_delalloc( ...@@ -4142,7 +4143,7 @@ xfs_bmapi_reserve_delalloc(
return 0; return 0;
out_unreserve_blocks: out_unreserve_blocks:
xfs_mod_fdblocks(mp, alen, false); xfs_add_fdblocks(mp, alen);
out_unreserve_quota: out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp)) if (XFS_IS_QUOTA_ON(mp))
xfs_quota_unreserve_blkres(ip, alen); xfs_quota_unreserve_blkres(ip, alen);
...@@ -4928,7 +4929,7 @@ xfs_bmap_del_extent_delay( ...@@ -4928,7 +4929,7 @@ xfs_bmap_del_extent_delay(
ASSERT(got_endoff >= del_endoff); ASSERT(got_endoff >= del_endoff);
if (isrt) if (isrt)
xfs_mod_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount)); xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
/* /*
* Update the inode delalloc counter now and wait to update the * Update the inode delalloc counter now and wait to update the
...@@ -5015,7 +5016,7 @@ xfs_bmap_del_extent_delay( ...@@ -5015,7 +5016,7 @@ xfs_bmap_del_extent_delay(
if (!isrt) if (!isrt)
da_diff += del->br_blockcount; da_diff += del->br_blockcount;
if (da_diff) { if (da_diff) {
xfs_mod_fdblocks(mp, da_diff, false); xfs_add_fdblocks(mp, da_diff);
xfs_mod_delalloc(mp, -da_diff); xfs_mod_delalloc(mp, -da_diff);
} }
return error; return error;
......
...@@ -517,7 +517,7 @@ xchk_fscounters( ...@@ -517,7 +517,7 @@ xchk_fscounters(
/* /*
* If the filesystem is not frozen, the counter summation calls above * If the filesystem is not frozen, the counter summation calls above
* can race with xfs_mod_freecounter, which subtracts a requested space * can race with xfs_dec_freecounter, which subtracts a requested space
* reservation from the counter and undoes the subtraction if that made * reservation from the counter and undoes the subtraction if that made
* the counter go negative. Therefore, it's possible to see negative * the counter go negative. Therefore, it's possible to see negative
* values here, and we should only flag that as a corruption if we * values here, and we should only flag that as a corruption if we
......
...@@ -968,9 +968,7 @@ xrep_reset_perag_resv( ...@@ -968,9 +968,7 @@ xrep_reset_perag_resv(
ASSERT(sc->tp); ASSERT(sc->tp);
sc->flags &= ~XREP_RESET_PERAG_RESV; sc->flags &= ~XREP_RESET_PERAG_RESV;
error = xfs_ag_resv_free(sc->sa.pag); xfs_ag_resv_free(sc->sa.pag);
if (error)
goto out;
error = xfs_ag_resv_init(sc->sa.pag, sc->tp); error = xfs_ag_resv_init(sc->sa.pag, sc->tp);
if (error == -ENOSPC) { if (error == -ENOSPC) {
xfs_err(sc->mp, xfs_err(sc->mp,
...@@ -979,7 +977,6 @@ xrep_reset_perag_resv( ...@@ -979,7 +977,6 @@ xrep_reset_perag_resv(
error = 0; error = 0;
} }
out:
return error; return error;
} }
......
...@@ -213,10 +213,8 @@ xfs_growfs_data_private( ...@@ -213,10 +213,8 @@ xfs_growfs_data_private(
struct xfs_perag *pag; struct xfs_perag *pag;
pag = xfs_perag_get(mp, id.agno); pag = xfs_perag_get(mp, id.agno);
error = xfs_ag_resv_free(pag); xfs_ag_resv_free(pag);
xfs_perag_put(pag); xfs_perag_put(pag);
if (error)
return error;
} }
/* /*
* Reserve AG metadata blocks. ENOSPC here does not mean there * Reserve AG metadata blocks. ENOSPC here does not mean there
...@@ -385,14 +383,14 @@ xfs_reserve_blocks( ...@@ -385,14 +383,14 @@ xfs_reserve_blocks(
*/ */
if (mp->m_resblks > request) { if (mp->m_resblks > request) {
lcounter = mp->m_resblks_avail - request; lcounter = mp->m_resblks_avail - request;
if (lcounter > 0) { /* release unused blocks */ if (lcounter > 0) { /* release unused blocks */
fdblks_delta = lcounter; fdblks_delta = lcounter;
mp->m_resblks_avail -= lcounter; mp->m_resblks_avail -= lcounter;
} }
mp->m_resblks = request; mp->m_resblks = request;
if (fdblks_delta) { if (fdblks_delta) {
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
error = xfs_mod_fdblocks(mp, fdblks_delta, 0); xfs_add_fdblocks(mp, fdblks_delta);
spin_lock(&mp->m_sb_lock); spin_lock(&mp->m_sb_lock);
} }
...@@ -428,9 +426,9 @@ xfs_reserve_blocks( ...@@ -428,9 +426,9 @@ xfs_reserve_blocks(
*/ */
fdblks_delta = min(free, delta); fdblks_delta = min(free, delta);
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
error = xfs_mod_fdblocks(mp, -fdblks_delta, 0); error = xfs_dec_fdblocks(mp, fdblks_delta, 0);
if (!error) if (!error)
xfs_mod_fdblocks(mp, fdblks_delta, 0); xfs_add_fdblocks(mp, fdblks_delta);
spin_lock(&mp->m_sb_lock); spin_lock(&mp->m_sb_lock);
} }
out: out:
...@@ -556,24 +554,13 @@ xfs_fs_reserve_ag_blocks( ...@@ -556,24 +554,13 @@ xfs_fs_reserve_ag_blocks(
/* /*
* Free space reserved for per-AG metadata. * Free space reserved for per-AG metadata.
*/ */
int void
xfs_fs_unreserve_ag_blocks( xfs_fs_unreserve_ag_blocks(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
xfs_agnumber_t agno; xfs_agnumber_t agno;
struct xfs_perag *pag; struct xfs_perag *pag;
int error = 0;
int err2;
for_each_perag(mp, agno, pag) { for_each_perag(mp, agno, pag)
err2 = xfs_ag_resv_free(pag); xfs_ag_resv_free(pag);
if (err2 && !error)
error = err2;
}
if (error)
xfs_warn(mp,
"Error %d freeing per-AG metadata reserve pool.", error);
return error;
} }
...@@ -12,6 +12,6 @@ int xfs_reserve_blocks(struct xfs_mount *mp, uint64_t request); ...@@ -12,6 +12,6 @@ int xfs_reserve_blocks(struct xfs_mount *mp, uint64_t request);
int xfs_fs_goingdown(struct xfs_mount *mp, uint32_t inflags); int xfs_fs_goingdown(struct xfs_mount *mp, uint32_t inflags);
int xfs_fs_reserve_ag_blocks(struct xfs_mount *mp); int xfs_fs_reserve_ag_blocks(struct xfs_mount *mp);
int xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp); void xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp);
#endif /* __XFS_FSOPS_H__ */ #endif /* __XFS_FSOPS_H__ */
...@@ -1136,16 +1136,44 @@ xfs_fs_writable( ...@@ -1136,16 +1136,44 @@ xfs_fs_writable(
return true; return true;
} }
/* Adjust m_fdblocks or m_frextents. */ void
xfs_add_freecounter(
struct xfs_mount *mp,
struct percpu_counter *counter,
uint64_t delta)
{
bool has_resv_pool = (counter == &mp->m_fdblocks);
uint64_t res_used;
/*
* If the reserve pool is depleted, put blocks back into it first.
* Most of the time the pool is full.
*/
if (!has_resv_pool || mp->m_resblks == mp->m_resblks_avail) {
percpu_counter_add(counter, delta);
return;
}
spin_lock(&mp->m_sb_lock);
res_used = mp->m_resblks - mp->m_resblks_avail;
if (res_used > delta) {
mp->m_resblks_avail += delta;
} else {
delta -= res_used;
mp->m_resblks_avail = mp->m_resblks;
percpu_counter_add(counter, delta);
}
spin_unlock(&mp->m_sb_lock);
}
int int
xfs_mod_freecounter( xfs_dec_freecounter(
struct xfs_mount *mp, struct xfs_mount *mp,
struct percpu_counter *counter, struct percpu_counter *counter,
int64_t delta, uint64_t delta,
bool rsvd) bool rsvd)
{ {
int64_t lcounter; int64_t lcounter;
long long res_used;
uint64_t set_aside = 0; uint64_t set_aside = 0;
s32 batch; s32 batch;
bool has_resv_pool; bool has_resv_pool;
...@@ -1155,31 +1183,6 @@ xfs_mod_freecounter( ...@@ -1155,31 +1183,6 @@ xfs_mod_freecounter(
if (rsvd) if (rsvd)
ASSERT(has_resv_pool); ASSERT(has_resv_pool);
if (delta > 0) {
/*
* If the reserve pool is depleted, put blocks back into it
* first. Most of the time the pool is full.
*/
if (likely(!has_resv_pool ||
mp->m_resblks == mp->m_resblks_avail)) {
percpu_counter_add(counter, delta);
return 0;
}
spin_lock(&mp->m_sb_lock);
res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
if (res_used > delta) {
mp->m_resblks_avail += delta;
} else {
delta -= res_used;
mp->m_resblks_avail = mp->m_resblks;
percpu_counter_add(counter, delta);
}
spin_unlock(&mp->m_sb_lock);
return 0;
}
/* /*
* Taking blocks away, need to be more accurate the closer we * Taking blocks away, need to be more accurate the closer we
* are to zero. * are to zero.
...@@ -1207,7 +1210,7 @@ xfs_mod_freecounter( ...@@ -1207,7 +1210,7 @@ xfs_mod_freecounter(
*/ */
if (has_resv_pool) if (has_resv_pool)
set_aside = xfs_fdblocks_unavailable(mp); set_aside = xfs_fdblocks_unavailable(mp);
percpu_counter_add_batch(counter, delta, batch); percpu_counter_add_batch(counter, -((int64_t)delta), batch);
if (__percpu_counter_compare(counter, set_aside, if (__percpu_counter_compare(counter, set_aside,
XFS_FDBLOCKS_BATCH) >= 0) { XFS_FDBLOCKS_BATCH) >= 0) {
/* we had space! */ /* we had space! */
...@@ -1219,11 +1222,11 @@ xfs_mod_freecounter( ...@@ -1219,11 +1222,11 @@ xfs_mod_freecounter(
* that took us to ENOSPC. * that took us to ENOSPC.
*/ */
spin_lock(&mp->m_sb_lock); spin_lock(&mp->m_sb_lock);
percpu_counter_add(counter, -delta); percpu_counter_add(counter, delta);
if (!has_resv_pool || !rsvd) if (!has_resv_pool || !rsvd)
goto fdblocks_enospc; goto fdblocks_enospc;
lcounter = (long long)mp->m_resblks_avail + delta; lcounter = (long long)mp->m_resblks_avail - delta;
if (lcounter >= 0) { if (lcounter >= 0) {
mp->m_resblks_avail = lcounter; mp->m_resblks_avail = lcounter;
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
......
...@@ -562,19 +562,30 @@ xfs_fdblocks_unavailable( ...@@ -562,19 +562,30 @@ xfs_fdblocks_unavailable(
return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
} }
int xfs_mod_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, int xfs_dec_freecounter(struct xfs_mount *mp, struct percpu_counter *counter,
int64_t delta, bool rsvd); uint64_t delta, bool rsvd);
void xfs_add_freecounter(struct xfs_mount *mp, struct percpu_counter *counter,
uint64_t delta);
static inline int static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta,
xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, bool reserved) bool reserved)
{ {
return xfs_mod_freecounter(mp, &mp->m_fdblocks, delta, reserved); return xfs_dec_freecounter(mp, &mp->m_fdblocks, delta, reserved);
} }
static inline int static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta)
xfs_mod_frextents(struct xfs_mount *mp, int64_t delta)
{ {
return xfs_mod_freecounter(mp, &mp->m_frextents, delta, false); xfs_add_freecounter(mp, &mp->m_fdblocks, delta);
}
static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta)
{
return xfs_dec_freecounter(mp, &mp->m_frextents, delta, false);
}
static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta)
{
xfs_add_freecounter(mp, &mp->m_frextents, delta);
} }
extern int xfs_readsb(xfs_mount_t *, int); extern int xfs_readsb(xfs_mount_t *, int);
......
...@@ -1882,11 +1882,7 @@ xfs_remount_ro( ...@@ -1882,11 +1882,7 @@ xfs_remount_ro(
xfs_inodegc_stop(mp); xfs_inodegc_stop(mp);
/* Free the per-AG metadata reservation pool. */ /* Free the per-AG metadata reservation pool. */
error = xfs_fs_unreserve_ag_blocks(mp); xfs_fs_unreserve_ag_blocks(mp);
if (error) {
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
return error;
}
/* /*
* Before we sync the metadata, we need to free up the reserve block * Before we sync the metadata, we need to free up the reserve block
......
...@@ -3069,7 +3069,6 @@ DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent); ...@@ -3069,7 +3069,6 @@ DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent);
DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical); DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical);
DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed); DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed);
DEFINE_AG_ERROR_EVENT(xfs_ag_resv_free_error);
DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error); DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error);
/* refcount tracepoint classes */ /* refcount tracepoint classes */
......
...@@ -163,7 +163,7 @@ xfs_trans_reserve( ...@@ -163,7 +163,7 @@ xfs_trans_reserve(
* fail if the count would go below zero. * fail if the count would go below zero.
*/ */
if (blocks > 0) { if (blocks > 0) {
error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd); error = xfs_dec_fdblocks(mp, blocks, rsvd);
if (error != 0) if (error != 0)
return -ENOSPC; return -ENOSPC;
tp->t_blk_res += blocks; tp->t_blk_res += blocks;
...@@ -210,7 +210,7 @@ xfs_trans_reserve( ...@@ -210,7 +210,7 @@ xfs_trans_reserve(
* fail if the count would go below zero. * fail if the count would go below zero.
*/ */
if (rtextents > 0) { if (rtextents > 0) {
error = xfs_mod_frextents(mp, -((int64_t)rtextents)); error = xfs_dec_frextents(mp, rtextents);
if (error) { if (error) {
error = -ENOSPC; error = -ENOSPC;
goto undo_log; goto undo_log;
...@@ -234,7 +234,7 @@ xfs_trans_reserve( ...@@ -234,7 +234,7 @@ xfs_trans_reserve(
undo_blocks: undo_blocks:
if (blocks > 0) { if (blocks > 0) {
xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd); xfs_add_fdblocks(mp, blocks);
tp->t_blk_res = 0; tp->t_blk_res = 0;
} }
return error; return error;
...@@ -593,12 +593,10 @@ xfs_trans_unreserve_and_mod_sb( ...@@ -593,12 +593,10 @@ xfs_trans_unreserve_and_mod_sb(
struct xfs_trans *tp) struct xfs_trans *tp)
{ {
struct xfs_mount *mp = tp->t_mountp; struct xfs_mount *mp = tp->t_mountp;
bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
int64_t blkdelta = tp->t_blk_res; int64_t blkdelta = tp->t_blk_res;
int64_t rtxdelta = tp->t_rtx_res; int64_t rtxdelta = tp->t_rtx_res;
int64_t idelta = 0; int64_t idelta = 0;
int64_t ifreedelta = 0; int64_t ifreedelta = 0;
int error;
/* /*
* Calculate the deltas. * Calculate the deltas.
...@@ -631,10 +629,8 @@ xfs_trans_unreserve_and_mod_sb( ...@@ -631,10 +629,8 @@ xfs_trans_unreserve_and_mod_sb(
} }
/* apply the per-cpu counters */ /* apply the per-cpu counters */
if (blkdelta) { if (blkdelta)
error = xfs_mod_fdblocks(mp, blkdelta, rsvd); xfs_add_fdblocks(mp, blkdelta);
ASSERT(!error);
}
if (idelta) if (idelta)
percpu_counter_add_batch(&mp->m_icount, idelta, percpu_counter_add_batch(&mp->m_icount, idelta,
...@@ -643,10 +639,8 @@ xfs_trans_unreserve_and_mod_sb( ...@@ -643,10 +639,8 @@ xfs_trans_unreserve_and_mod_sb(
if (ifreedelta) if (ifreedelta)
percpu_counter_add(&mp->m_ifree, ifreedelta); percpu_counter_add(&mp->m_ifree, ifreedelta);
if (rtxdelta) { if (rtxdelta)
error = xfs_mod_frextents(mp, rtxdelta); xfs_add_frextents(mp, rtxdelta);
ASSERT(!error);
}
if (!(tp->t_flags & XFS_TRANS_SB_DIRTY)) if (!(tp->t_flags & XFS_TRANS_SB_DIRTY))
return; return;
...@@ -682,7 +676,6 @@ xfs_trans_unreserve_and_mod_sb( ...@@ -682,7 +676,6 @@ xfs_trans_unreserve_and_mod_sb(
*/ */
ASSERT(mp->m_sb.sb_imax_pct >= 0); ASSERT(mp->m_sb.sb_imax_pct >= 0);
ASSERT(mp->m_sb.sb_rextslog >= 0); ASSERT(mp->m_sb.sb_rextslog >= 0);
return;
} }
/* Add the given log item to the transaction's list of log items. */ /* Add the given log item to the transaction's list of log items. */
...@@ -1301,9 +1294,9 @@ xfs_trans_reserve_more_inode( ...@@ -1301,9 +1294,9 @@ xfs_trans_reserve_more_inode(
return 0; return 0;
/* Quota failed, give back the new reservation. */ /* Quota failed, give back the new reservation. */
xfs_mod_fdblocks(mp, dblocks, tp->t_flags & XFS_TRANS_RESERVE); xfs_add_fdblocks(mp, dblocks);
tp->t_blk_res -= dblocks; tp->t_blk_res -= dblocks;
xfs_mod_frextents(mp, rtx); xfs_add_frextents(mp, rtx);
tp->t_rtx_res -= rtx; tp->t_rtx_res -= rtx;
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment