Commit 87e99511 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Al Viro

kill BH_Ordered flag

Instead of abusing a buffer_head flag just add a variant of
sync_dirty_buffer which allows passing the exact type of write
flag required.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent dad5eb6d
...@@ -2911,13 +2911,6 @@ int submit_bh(int rw, struct buffer_head * bh) ...@@ -2911,13 +2911,6 @@ int submit_bh(int rw, struct buffer_head * bh)
BUG_ON(buffer_delay(bh)); BUG_ON(buffer_delay(bh));
BUG_ON(buffer_unwritten(bh)); BUG_ON(buffer_unwritten(bh));
/*
* Mask in barrier bit for a write (could be either a WRITE or a
* WRITE_SYNC
*/
if (buffer_ordered(bh) && (rw & WRITE))
rw |= WRITE_BARRIER;
/* /*
* Only clear out a write error when rewriting * Only clear out a write error when rewriting
*/ */
...@@ -3021,7 +3014,7 @@ EXPORT_SYMBOL(ll_rw_block); ...@@ -3021,7 +3014,7 @@ EXPORT_SYMBOL(ll_rw_block);
* and then start new I/O and then wait upon it. The caller must have a ref on * and then start new I/O and then wait upon it. The caller must have a ref on
* the buffer_head. * the buffer_head.
*/ */
int sync_dirty_buffer(struct buffer_head *bh) int __sync_dirty_buffer(struct buffer_head *bh, int rw)
{ {
int ret = 0; int ret = 0;
...@@ -3030,7 +3023,7 @@ int sync_dirty_buffer(struct buffer_head *bh) ...@@ -3030,7 +3023,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
if (test_clear_buffer_dirty(bh)) { if (test_clear_buffer_dirty(bh)) {
get_bh(bh); get_bh(bh);
bh->b_end_io = end_buffer_write_sync; bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(WRITE_SYNC, bh); ret = submit_bh(rw, bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (buffer_eopnotsupp(bh)) { if (buffer_eopnotsupp(bh)) {
clear_buffer_eopnotsupp(bh); clear_buffer_eopnotsupp(bh);
...@@ -3043,6 +3036,12 @@ int sync_dirty_buffer(struct buffer_head *bh) ...@@ -3043,6 +3036,12 @@ int sync_dirty_buffer(struct buffer_head *bh)
} }
return ret; return ret;
} }
EXPORT_SYMBOL(__sync_dirty_buffer);
int sync_dirty_buffer(struct buffer_head *bh)
{
return __sync_dirty_buffer(bh, WRITE_SYNC);
}
EXPORT_SYMBOL(sync_dirty_buffer); EXPORT_SYMBOL(sync_dirty_buffer);
/* /*
......
...@@ -119,7 +119,6 @@ static int journal_write_commit_record(journal_t *journal, ...@@ -119,7 +119,6 @@ static int journal_write_commit_record(journal_t *journal,
struct buffer_head *bh; struct buffer_head *bh;
journal_header_t *header; journal_header_t *header;
int ret; int ret;
int barrier_done = 0;
if (is_journal_aborted(journal)) if (is_journal_aborted(journal))
return 0; return 0;
...@@ -137,19 +136,17 @@ static int journal_write_commit_record(journal_t *journal, ...@@ -137,19 +136,17 @@ static int journal_write_commit_record(journal_t *journal,
JBUFFER_TRACE(descriptor, "write commit block"); JBUFFER_TRACE(descriptor, "write commit block");
set_buffer_dirty(bh); set_buffer_dirty(bh);
if (journal->j_flags & JFS_BARRIER) { if (journal->j_flags & JFS_BARRIER) {
set_buffer_ordered(bh); ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);
barrier_done = 1;
} /*
ret = sync_dirty_buffer(bh); * Is it possible for another commit to fail at roughly
if (barrier_done)
clear_buffer_ordered(bh);
/* is it possible for another commit to fail at roughly
* the same time as this one? If so, we don't want to * the same time as this one? If so, we don't want to
* trust the barrier flag in the super, but instead want * trust the barrier flag in the super, but instead want
* to remember if we sent a barrier request * to remember if we sent a barrier request
*/ */
if (ret == -EOPNOTSUPP && barrier_done) { if (ret == -EOPNOTSUPP) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
printk(KERN_WARNING printk(KERN_WARNING
...@@ -165,6 +162,10 @@ static int journal_write_commit_record(journal_t *journal, ...@@ -165,6 +162,10 @@ static int journal_write_commit_record(journal_t *journal,
set_buffer_dirty(bh); set_buffer_dirty(bh);
ret = sync_dirty_buffer(bh); ret = sync_dirty_buffer(bh);
} }
} else {
ret = sync_dirty_buffer(bh);
}
put_bh(bh); /* One for getblk() */ put_bh(bh); /* One for getblk() */
journal_put_journal_head(descriptor); journal_put_journal_head(descriptor);
......
...@@ -101,7 +101,6 @@ static int journal_submit_commit_record(journal_t *journal, ...@@ -101,7 +101,6 @@ static int journal_submit_commit_record(journal_t *journal,
struct commit_header *tmp; struct commit_header *tmp;
struct buffer_head *bh; struct buffer_head *bh;
int ret; int ret;
int barrier_done = 0;
struct timespec now = current_kernel_time(); struct timespec now = current_kernel_time();
if (is_journal_aborted(journal)) if (is_journal_aborted(journal))
...@@ -136,19 +135,8 @@ static int journal_submit_commit_record(journal_t *journal, ...@@ -136,19 +135,8 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER && if (journal->j_flags & JBD2_BARRIER &&
!JBD2_HAS_INCOMPAT_FEATURE(journal, !JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
set_buffer_ordered(bh); ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
barrier_done = 1; if (ret == -EOPNOTSUPP) {
}
ret = submit_bh(WRITE_SYNC_PLUG, bh);
if (barrier_done)
clear_buffer_ordered(bh);
/* is it possible for another commit to fail at roughly
* the same time as this one? If so, we don't want to
* trust the barrier flag in the super, but instead want
* to remember if we sent a barrier request
*/
if (ret == -EOPNOTSUPP && barrier_done) {
printk(KERN_WARNING printk(KERN_WARNING
"JBD2: Disabling barriers on %s, " "JBD2: Disabling barriers on %s, "
"not supported by device\n", journal->j_devname); "not supported by device\n", journal->j_devname);
...@@ -162,6 +150,9 @@ static int journal_submit_commit_record(journal_t *journal, ...@@ -162,6 +150,9 @@ static int journal_submit_commit_record(journal_t *journal,
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
ret = submit_bh(WRITE_SYNC_PLUG, bh); ret = submit_bh(WRITE_SYNC_PLUG, bh);
} }
} else {
ret = submit_bh(WRITE_SYNC_PLUG, bh);
}
*cbh = bh; *cbh = bh;
return ret; return ret;
} }
......
...@@ -175,24 +175,24 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag) ...@@ -175,24 +175,24 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
{ {
struct the_nilfs *nilfs = sbi->s_nilfs; struct the_nilfs *nilfs = sbi->s_nilfs;
int err; int err;
int barrier_done = 0;
if (nilfs_test_opt(sbi, BARRIER)) {
set_buffer_ordered(nilfs->ns_sbh[0]);
barrier_done = 1;
}
retry: retry:
set_buffer_dirty(nilfs->ns_sbh[0]); set_buffer_dirty(nilfs->ns_sbh[0]);
err = sync_dirty_buffer(nilfs->ns_sbh[0]);
if (err == -EOPNOTSUPP && barrier_done) { if (nilfs_test_opt(sbi, BARRIER)) {
err = __sync_dirty_buffer(nilfs->ns_sbh[0],
WRITE_SYNC | WRITE_BARRIER);
if (err == -EOPNOTSUPP) {
nilfs_warning(sbi->s_super, __func__, nilfs_warning(sbi->s_super, __func__,
"barrier-based sync failed. " "barrier-based sync failed. "
"disabling barriers\n"); "disabling barriers\n");
nilfs_clear_opt(sbi, BARRIER); nilfs_clear_opt(sbi, BARRIER);
barrier_done = 0;
clear_buffer_ordered(nilfs->ns_sbh[0]);
goto retry; goto retry;
} }
} else {
err = sync_dirty_buffer(nilfs->ns_sbh[0]);
}
if (unlikely(err)) { if (unlikely(err)) {
printk(KERN_ERR printk(KERN_ERR
"NILFS: unable to write superblock (err=%d)\n", err); "NILFS: unable to write superblock (err=%d)\n", err);
......
...@@ -32,7 +32,6 @@ enum bh_state_bits { ...@@ -32,7 +32,6 @@ enum bh_state_bits {
BH_Delay, /* Buffer is not yet allocated on disk */ BH_Delay, /* Buffer is not yet allocated on disk */
BH_Boundary, /* Block is followed by a discontiguity */ BH_Boundary, /* Block is followed by a discontiguity */
BH_Write_EIO, /* I/O error on write */ BH_Write_EIO, /* I/O error on write */
BH_Ordered, /* ordered write */
BH_Eopnotsupp, /* operation not supported (barrier) */ BH_Eopnotsupp, /* operation not supported (barrier) */
BH_Unwritten, /* Buffer is allocated on disk but not written */ BH_Unwritten, /* Buffer is allocated on disk but not written */
BH_Quiet, /* Buffer Error Prinks to be quiet */ BH_Quiet, /* Buffer Error Prinks to be quiet */
...@@ -125,7 +124,6 @@ BUFFER_FNS(Async_Write, async_write) ...@@ -125,7 +124,6 @@ BUFFER_FNS(Async_Write, async_write)
BUFFER_FNS(Delay, delay) BUFFER_FNS(Delay, delay)
BUFFER_FNS(Boundary, boundary) BUFFER_FNS(Boundary, boundary)
BUFFER_FNS(Write_EIO, write_io_error) BUFFER_FNS(Write_EIO, write_io_error)
BUFFER_FNS(Ordered, ordered)
BUFFER_FNS(Eopnotsupp, eopnotsupp) BUFFER_FNS(Eopnotsupp, eopnotsupp)
BUFFER_FNS(Unwritten, unwritten) BUFFER_FNS(Unwritten, unwritten)
...@@ -183,6 +181,7 @@ void unlock_buffer(struct buffer_head *bh); ...@@ -183,6 +181,7 @@ void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh); void __lock_buffer(struct buffer_head *bh);
void ll_rw_block(int, int, struct buffer_head * bh[]); void ll_rw_block(int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh); int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int rw);
int submit_bh(int, struct buffer_head *); int submit_bh(int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev, void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize); sector_t bblock, unsigned blocksize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment