Commit 0c842ad4 authored by Christoph Hellwig's avatar Christoph Hellwig

xfs: clean up buffer locking helpers

Rename xfs_buf_cond_lock and reverse it's return value to fit most other
trylock operations in the Kernel and XFS (with the exception of down_trylock,
after which xfs_buf_cond_lock was modelled), and replace xfs_buf_lock_val
with an xfs_buf_islocked for use in asserts, or and opencoded variant in
tracing.  remove the XFS_BUF_* wrappers for all the locking helpers.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarAlex Elder <aelder@sgi.com>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
parent bbb4197c
...@@ -499,16 +499,14 @@ _xfs_buf_find( ...@@ -499,16 +499,14 @@ _xfs_buf_find(
spin_unlock(&pag->pag_buf_lock); spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag); xfs_perag_put(pag);
if (xfs_buf_cond_lock(bp)) { if (!xfs_buf_trylock(bp)) {
/* failed, so wait for the lock if requested. */ if (flags & XBF_TRYLOCK) {
if (!(flags & XBF_TRYLOCK)) {
xfs_buf_lock(bp);
XFS_STATS_INC(xb_get_locked_waited);
} else {
xfs_buf_rele(bp); xfs_buf_rele(bp);
XFS_STATS_INC(xb_busy_locked); XFS_STATS_INC(xb_busy_locked);
return NULL; return NULL;
} }
xfs_buf_lock(bp);
XFS_STATS_INC(xb_get_locked_waited);
} }
/* /*
...@@ -896,8 +894,8 @@ xfs_buf_rele( ...@@ -896,8 +894,8 @@ xfs_buf_rele(
* to push on stale inode buffers. * to push on stale inode buffers.
*/ */
int int
xfs_buf_cond_lock( xfs_buf_trylock(
xfs_buf_t *bp) struct xfs_buf *bp)
{ {
int locked; int locked;
...@@ -907,15 +905,8 @@ xfs_buf_cond_lock( ...@@ -907,15 +905,8 @@ xfs_buf_cond_lock(
else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0); xfs_log_force(bp->b_target->bt_mount, 0);
trace_xfs_buf_cond_lock(bp, _RET_IP_); trace_xfs_buf_trylock(bp, _RET_IP_);
return locked ? 0 : -EBUSY; return locked;
}
int
xfs_buf_lock_value(
xfs_buf_t *bp)
{
return bp->b_sema.count;
} }
/* /*
...@@ -929,7 +920,7 @@ xfs_buf_lock_value( ...@@ -929,7 +920,7 @@ xfs_buf_lock_value(
*/ */
void void
xfs_buf_lock( xfs_buf_lock(
xfs_buf_t *bp) struct xfs_buf *bp)
{ {
trace_xfs_buf_lock(bp, _RET_IP_); trace_xfs_buf_lock(bp, _RET_IP_);
...@@ -950,7 +941,7 @@ xfs_buf_lock( ...@@ -950,7 +941,7 @@ xfs_buf_lock(
*/ */
void void
xfs_buf_unlock( xfs_buf_unlock(
xfs_buf_t *bp) struct xfs_buf *bp)
{ {
if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) { if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
atomic_inc(&bp->b_hold); atomic_inc(&bp->b_hold);
...@@ -1694,7 +1685,7 @@ xfs_buf_delwri_split( ...@@ -1694,7 +1685,7 @@ xfs_buf_delwri_split(
list_for_each_entry_safe(bp, n, dwq, b_list) { list_for_each_entry_safe(bp, n, dwq, b_list) {
ASSERT(bp->b_flags & XBF_DELWRI); ASSERT(bp->b_flags & XBF_DELWRI);
if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { if (!XFS_BUF_ISPINNED(bp) && xfs_buf_trylock(bp)) {
if (!force && if (!force &&
time_before(jiffies, bp->b_queuetime + age)) { time_before(jiffies, bp->b_queuetime + age)) {
xfs_buf_unlock(bp); xfs_buf_unlock(bp);
......
...@@ -187,10 +187,11 @@ extern void xfs_buf_free(xfs_buf_t *); ...@@ -187,10 +187,11 @@ extern void xfs_buf_free(xfs_buf_t *);
extern void xfs_buf_rele(xfs_buf_t *); extern void xfs_buf_rele(xfs_buf_t *);
/* Locking and Unlocking Buffers */ /* Locking and Unlocking Buffers */
extern int xfs_buf_cond_lock(xfs_buf_t *); extern int xfs_buf_trylock(xfs_buf_t *);
extern int xfs_buf_lock_value(xfs_buf_t *);
extern void xfs_buf_lock(xfs_buf_t *); extern void xfs_buf_lock(xfs_buf_t *);
extern void xfs_buf_unlock(xfs_buf_t *); extern void xfs_buf_unlock(xfs_buf_t *);
#define xfs_buf_islocked(bp) \
((bp)->b_sema.count <= 0)
/* Buffer Read and Write Routines */ /* Buffer Read and Write Routines */
extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp); extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
...@@ -308,10 +309,6 @@ xfs_buf_set_ref( ...@@ -308,10 +309,6 @@ xfs_buf_set_ref(
#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count)) #define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count))
#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
#define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); #define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait);
#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target)) #define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
......
...@@ -293,7 +293,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class, ...@@ -293,7 +293,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
__entry->buffer_length = bp->b_buffer_length; __entry->buffer_length = bp->b_buffer_length;
__entry->hold = atomic_read(&bp->b_hold); __entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count); __entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = xfs_buf_lock_value(bp); __entry->lockval = bp->b_sema.count;
__entry->flags = bp->b_flags; __entry->flags = bp->b_flags;
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
), ),
...@@ -323,7 +323,7 @@ DEFINE_BUF_EVENT(xfs_buf_bawrite); ...@@ -323,7 +323,7 @@ DEFINE_BUF_EVENT(xfs_buf_bawrite);
DEFINE_BUF_EVENT(xfs_buf_bdwrite); DEFINE_BUF_EVENT(xfs_buf_bdwrite);
DEFINE_BUF_EVENT(xfs_buf_lock); DEFINE_BUF_EVENT(xfs_buf_lock);
DEFINE_BUF_EVENT(xfs_buf_lock_done); DEFINE_BUF_EVENT(xfs_buf_lock_done);
DEFINE_BUF_EVENT(xfs_buf_cond_lock); DEFINE_BUF_EVENT(xfs_buf_trylock);
DEFINE_BUF_EVENT(xfs_buf_unlock); DEFINE_BUF_EVENT(xfs_buf_unlock);
DEFINE_BUF_EVENT(xfs_buf_iowait); DEFINE_BUF_EVENT(xfs_buf_iowait);
DEFINE_BUF_EVENT(xfs_buf_iowait_done); DEFINE_BUF_EVENT(xfs_buf_iowait_done);
...@@ -366,7 +366,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class, ...@@ -366,7 +366,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class,
__entry->flags = flags; __entry->flags = flags;
__entry->hold = atomic_read(&bp->b_hold); __entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count); __entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = xfs_buf_lock_value(bp); __entry->lockval = bp->b_sema.count;
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
), ),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
...@@ -409,7 +409,7 @@ TRACE_EVENT(xfs_buf_ioerror, ...@@ -409,7 +409,7 @@ TRACE_EVENT(xfs_buf_ioerror,
__entry->buffer_length = bp->b_buffer_length; __entry->buffer_length = bp->b_buffer_length;
__entry->hold = atomic_read(&bp->b_hold); __entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count); __entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = xfs_buf_lock_value(bp); __entry->lockval = bp->b_sema.count;
__entry->error = error; __entry->error = error;
__entry->flags = bp->b_flags; __entry->flags = bp->b_flags;
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
...@@ -454,7 +454,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class, ...@@ -454,7 +454,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class,
__entry->buf_flags = bip->bli_buf->b_flags; __entry->buf_flags = bip->bli_buf->b_flags;
__entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
__entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count); __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
__entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); __entry->buf_lockval = bip->bli_buf->b_sema.count;
__entry->li_desc = bip->bli_item.li_desc; __entry->li_desc = bip->bli_item.li_desc;
__entry->li_flags = bip->bli_item.li_flags; __entry->li_flags = bip->bli_item.li_flags;
), ),
......
...@@ -318,7 +318,7 @@ xfs_qm_init_dquot_blk( ...@@ -318,7 +318,7 @@ xfs_qm_init_dquot_blk(
ASSERT(tp); ASSERT(tp);
ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); ASSERT(xfs_buf_islocked(bp));
d = (xfs_dqblk_t *)XFS_BUF_PTR(bp); d = (xfs_dqblk_t *)XFS_BUF_PTR(bp);
...@@ -534,7 +534,7 @@ xfs_qm_dqtobp( ...@@ -534,7 +534,7 @@ xfs_qm_dqtobp(
} }
ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); ASSERT(xfs_buf_islocked(bp));
/* /*
* calculate the location of the dquot inside the buffer. * calculate the location of the dquot inside the buffer.
...@@ -622,7 +622,7 @@ xfs_qm_dqread( ...@@ -622,7 +622,7 @@ xfs_qm_dqread(
* brelse it because we have the changes incore. * brelse it because we have the changes incore.
*/ */
ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); ASSERT(xfs_buf_islocked(bp));
xfs_trans_brelse(tp, bp); xfs_trans_brelse(tp, bp);
return (error); return (error);
......
...@@ -420,7 +420,7 @@ xfs_buf_item_unpin( ...@@ -420,7 +420,7 @@ xfs_buf_item_unpin(
if (freed && stale) { if (freed && stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE); ASSERT(bip->bli_flags & XFS_BLI_STALE);
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); ASSERT(xfs_buf_islocked(bp));
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp)); ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
...@@ -483,7 +483,7 @@ xfs_buf_item_trylock( ...@@ -483,7 +483,7 @@ xfs_buf_item_trylock(
if (XFS_BUF_ISPINNED(bp)) if (XFS_BUF_ISPINNED(bp))
return XFS_ITEM_PINNED; return XFS_ITEM_PINNED;
if (!XFS_BUF_CPSEMA(bp)) if (!xfs_buf_trylock(bp))
return XFS_ITEM_LOCKED; return XFS_ITEM_LOCKED;
/* take a reference to the buffer. */ /* take a reference to the buffer. */
...@@ -905,7 +905,7 @@ xfs_buf_attach_iodone( ...@@ -905,7 +905,7 @@ xfs_buf_attach_iodone(
xfs_log_item_t *head_lip; xfs_log_item_t *head_lip;
ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); ASSERT(xfs_buf_islocked(bp));
lip->li_cb = cb; lip->li_cb = cb;
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
......
...@@ -1059,7 +1059,7 @@ xlog_alloc_log(xfs_mount_t *mp, ...@@ -1059,7 +1059,7 @@ xlog_alloc_log(xfs_mount_t *mp,
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); ASSERT(xfs_buf_islocked(bp));
log->l_xbuf = bp; log->l_xbuf = bp;
spin_lock_init(&log->l_icloglock); spin_lock_init(&log->l_icloglock);
...@@ -1090,7 +1090,7 @@ xlog_alloc_log(xfs_mount_t *mp, ...@@ -1090,7 +1090,7 @@ xlog_alloc_log(xfs_mount_t *mp,
log->l_iclog_size, 0); log->l_iclog_size, 0);
if (!bp) if (!bp)
goto out_free_iclog; goto out_free_iclog;
if (!XFS_BUF_CPSEMA(bp)) if (!xfs_buf_trylock(bp))
ASSERT(0); ASSERT(0);
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
...@@ -1118,7 +1118,7 @@ xlog_alloc_log(xfs_mount_t *mp, ...@@ -1118,7 +1118,7 @@ xlog_alloc_log(xfs_mount_t *mp,
iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); ASSERT(xfs_buf_islocked(iclog->ic_bp));
init_waitqueue_head(&iclog->ic_force_wait); init_waitqueue_head(&iclog->ic_force_wait);
init_waitqueue_head(&iclog->ic_write_wait); init_waitqueue_head(&iclog->ic_write_wait);
......
...@@ -264,7 +264,7 @@ xlog_bwrite( ...@@ -264,7 +264,7 @@ xlog_bwrite(
XFS_BUF_ZEROFLAGS(bp); XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp); XFS_BUF_BUSY(bp);
XFS_BUF_HOLD(bp); XFS_BUF_HOLD(bp);
XFS_BUF_PSEMA(bp, PRIBIO); xfs_buf_lock(bp);
XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
......
...@@ -1941,22 +1941,19 @@ xfs_mod_incore_sb_batch( ...@@ -1941,22 +1941,19 @@ xfs_mod_incore_sb_batch(
* the superblock buffer if it can be locked without sleeping. * the superblock buffer if it can be locked without sleeping.
* If it can't then we'll return NULL. * If it can't then we'll return NULL.
*/ */
xfs_buf_t * struct xfs_buf *
xfs_getsb( xfs_getsb(
xfs_mount_t *mp, struct xfs_mount *mp,
int flags) int flags)
{ {
xfs_buf_t *bp; struct xfs_buf *bp = mp->m_sb_bp;
ASSERT(mp->m_sb_bp != NULL); if (!xfs_buf_trylock(bp)) {
bp = mp->m_sb_bp; if (flags & XBF_TRYLOCK)
if (flags & XBF_TRYLOCK) {
if (!XFS_BUF_CPSEMA(bp)) {
return NULL; return NULL;
} xfs_buf_lock(bp);
} else {
XFS_BUF_PSEMA(bp, PRIBIO);
} }
XFS_BUF_HOLD(bp); XFS_BUF_HOLD(bp);
ASSERT(XFS_BUF_ISDONE(bp)); ASSERT(XFS_BUF_ISDONE(bp));
return bp; return bp;
......
...@@ -160,7 +160,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, ...@@ -160,7 +160,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
*/ */
bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len); bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
if (bp != NULL) { if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); ASSERT(xfs_buf_islocked(bp));
if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
XFS_BUF_SUPER_STALE(bp); XFS_BUF_SUPER_STALE(bp);
...@@ -327,7 +327,7 @@ xfs_trans_read_buf( ...@@ -327,7 +327,7 @@ xfs_trans_read_buf(
*/ */
bp = xfs_trans_buf_item_match(tp, target, blkno, len); bp = xfs_trans_buf_item_match(tp, target, blkno, len);
if (bp != NULL) { if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); ASSERT(xfs_buf_islocked(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
ASSERT((XFS_BUF_ISERROR(bp)) == 0); ASSERT((XFS_BUF_ISERROR(bp)) == 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment