Commit ca5de404 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

fs: rename buffer trylock

Like the page lock change, this also requires name change, so convert the
raw test_and_set bitop to a trylock.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 529ae9aa
...@@ -1720,7 +1720,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, ...@@ -1720,7 +1720,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
*/ */
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
lock_buffer(bh); lock_buffer(bh);
} else if (test_set_buffer_locked(bh)) { } else if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page); redirty_page_for_writepage(wbc, page);
continue; continue;
} }
...@@ -3000,7 +3000,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) ...@@ -3000,7 +3000,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
if (rw == SWRITE || rw == SWRITE_SYNC) if (rw == SWRITE || rw == SWRITE_SYNC)
lock_buffer(bh); lock_buffer(bh);
else if (test_set_buffer_locked(bh)) else if (!trylock_buffer(bh))
continue; continue;
if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
......
...@@ -221,7 +221,7 @@ static int journal_submit_data_buffers(journal_t *journal, ...@@ -221,7 +221,7 @@ static int journal_submit_data_buffers(journal_t *journal,
* blocking lock_buffer(). * blocking lock_buffer().
*/ */
if (buffer_dirty(bh)) { if (buffer_dirty(bh)) {
if (test_set_buffer_locked(bh)) { if (!trylock_buffer(bh)) {
BUFFER_TRACE(bh, "needs blocking lock"); BUFFER_TRACE(bh, "needs blocking lock");
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
/* Write out all data to prevent deadlocks */ /* Write out all data to prevent deadlocks */
......
...@@ -1194,7 +1194,7 @@ static int ntfs_write_mst_block(struct page *page, ...@@ -1194,7 +1194,7 @@ static int ntfs_write_mst_block(struct page *page,
tbh = bhs[i]; tbh = bhs[i];
if (!tbh) if (!tbh)
continue; continue;
if (unlikely(test_set_buffer_locked(tbh))) if (!trylock_buffer(tbh))
BUG(); BUG();
/* The buffer dirty state is now irrelevant, just clean it. */ /* The buffer dirty state is now irrelevant, just clean it. */
clear_buffer_dirty(tbh); clear_buffer_dirty(tbh);
......
...@@ -665,7 +665,7 @@ int ntfs_read_compressed_block(struct page *page) ...@@ -665,7 +665,7 @@ int ntfs_read_compressed_block(struct page *page)
for (i = 0; i < nr_bhs; i++) { for (i = 0; i < nr_bhs; i++) {
struct buffer_head *tbh = bhs[i]; struct buffer_head *tbh = bhs[i];
if (unlikely(test_set_buffer_locked(tbh))) if (!trylock_buffer(tbh))
continue; continue;
if (unlikely(buffer_uptodate(tbh))) { if (unlikely(buffer_uptodate(tbh))) {
unlock_buffer(tbh); unlock_buffer(tbh);
......
...@@ -586,7 +586,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, ...@@ -586,7 +586,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
struct buffer_head *tbh = bhs[i_bhs]; struct buffer_head *tbh = bhs[i_bhs];
if (unlikely(test_set_buffer_locked(tbh))) if (!trylock_buffer(tbh))
BUG(); BUG();
BUG_ON(!buffer_uptodate(tbh)); BUG_ON(!buffer_uptodate(tbh));
clear_buffer_dirty(tbh); clear_buffer_dirty(tbh);
...@@ -779,7 +779,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -779,7 +779,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
struct buffer_head *tbh = bhs[i_bhs]; struct buffer_head *tbh = bhs[i_bhs];
if (unlikely(test_set_buffer_locked(tbh))) if (!trylock_buffer(tbh))
BUG(); BUG();
BUG_ON(!buffer_uptodate(tbh)); BUG_ON(!buffer_uptodate(tbh));
clear_buffer_dirty(tbh); clear_buffer_dirty(tbh);
......
...@@ -2435,7 +2435,7 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2435,7 +2435,7 @@ static int reiserfs_write_full_page(struct page *page,
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
lock_buffer(bh); lock_buffer(bh);
} else { } else {
if (test_set_buffer_locked(bh)) { if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page); redirty_page_for_writepage(wbc, page);
continue; continue;
} }
......
...@@ -855,7 +855,7 @@ static int write_ordered_buffers(spinlock_t * lock, ...@@ -855,7 +855,7 @@ static int write_ordered_buffers(spinlock_t * lock,
jh = JH_ENTRY(list->next); jh = JH_ENTRY(list->next);
bh = jh->bh; bh = jh->bh;
get_bh(bh); get_bh(bh);
if (test_set_buffer_locked(bh)) { if (!trylock_buffer(bh)) {
if (!buffer_dirty(bh)) { if (!buffer_dirty(bh)) {
list_move(&jh->list, &tmp); list_move(&jh->list, &tmp);
goto loop_next; goto loop_next;
...@@ -3871,7 +3871,7 @@ int reiserfs_prepare_for_journal(struct super_block *p_s_sb, ...@@ -3871,7 +3871,7 @@ int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
{ {
PROC_INFO_INC(p_s_sb, journal.prepare); PROC_INFO_INC(p_s_sb, journal.prepare);
if (test_set_buffer_locked(bh)) { if (!trylock_buffer(bh)) {
if (!wait) if (!wait)
return 0; return 0;
lock_buffer(bh); lock_buffer(bh);
......
...@@ -1104,7 +1104,7 @@ xfs_page_state_convert( ...@@ -1104,7 +1104,7 @@ xfs_page_state_convert(
* that we are writing into for the first time. * that we are writing into for the first time.
*/ */
type = IOMAP_NEW; type = IOMAP_NEW;
if (!test_and_set_bit(BH_Lock, &bh->b_state)) { if (trylock_buffer(bh)) {
ASSERT(buffer_mapped(bh)); ASSERT(buffer_mapped(bh));
if (iomap_valid) if (iomap_valid)
all_bh = 1; all_bh = 1;
......
...@@ -115,7 +115,6 @@ BUFFER_FNS(Uptodate, uptodate) ...@@ -115,7 +115,6 @@ BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty) BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty) TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked) BUFFER_FNS(Lock, locked)
TAS_BUFFER_FNS(Lock, locked)
BUFFER_FNS(Req, req) BUFFER_FNS(Req, req)
TAS_BUFFER_FNS(Req, req) TAS_BUFFER_FNS(Req, req)
BUFFER_FNS(Mapped, mapped) BUFFER_FNS(Mapped, mapped)
...@@ -321,10 +320,15 @@ static inline void wait_on_buffer(struct buffer_head *bh) ...@@ -321,10 +320,15 @@ static inline void wait_on_buffer(struct buffer_head *bh)
__wait_on_buffer(bh); __wait_on_buffer(bh);
} }
static inline int trylock_buffer(struct buffer_head *bh)
{
return likely(!test_and_set_bit(BH_Lock, &bh->b_state));
}
static inline void lock_buffer(struct buffer_head *bh) static inline void lock_buffer(struct buffer_head *bh)
{ {
might_sleep(); might_sleep();
if (test_set_buffer_locked(bh)) if (!trylock_buffer(bh))
__lock_buffer(bh); __lock_buffer(bh);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment