Commit 0628003f authored by Jan Kara's avatar Jan Kara Committed by Greg Kroah-Hartman

reiserfs: Make cancel_old_flush() reliable


[ Upstream commit 71b0576b ]

Currently canceling of delayed work that flushes old data using
cancel_old_flush() does not prevent work from being requeued. Thus
in theory new work can be queued after cancel_old_flush() from
reiserfs_freeze() has run. This will become larger problem once
flush_old_commits() can requeue the work itself.

Fix the problem by recording in sbi->work_queue that flushing work is
canceled and should not be requeued.
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarSasha Levin <alexander.levin@microsoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5f73ddae
...@@ -1961,7 +1961,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, ...@@ -1961,7 +1961,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
* will be requeued because superblock is being shutdown and doesn't * will be requeued because superblock is being shutdown and doesn't
* have MS_ACTIVE set. * have MS_ACTIVE set.
*/ */
cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); reiserfs_cancel_old_flush(sb);
/* wait for all commits to finish */ /* wait for all commits to finish */
cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
......
...@@ -2948,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s, ...@@ -2948,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s,
struct reiserfs_list_bitmap *, unsigned int); struct reiserfs_list_bitmap *, unsigned int);
void reiserfs_schedule_old_flush(struct super_block *s); void reiserfs_schedule_old_flush(struct super_block *s);
void reiserfs_cancel_old_flush(struct super_block *s);
void add_save_link(struct reiserfs_transaction_handle *th, void add_save_link(struct reiserfs_transaction_handle *th,
struct inode *inode, int truncate); struct inode *inode, int truncate);
int remove_save_link(struct inode *inode, int truncate); int remove_save_link(struct inode *inode, int truncate);
......
...@@ -90,6 +90,8 @@ static void flush_old_commits(struct work_struct *work) ...@@ -90,6 +90,8 @@ static void flush_old_commits(struct work_struct *work)
s = sbi->s_journal->j_work_sb; s = sbi->s_journal->j_work_sb;
spin_lock(&sbi->old_work_lock); spin_lock(&sbi->old_work_lock);
/* Avoid clobbering the cancel state... */
if (sbi->work_queued == 1)
sbi->work_queued = 0; sbi->work_queued = 0;
spin_unlock(&sbi->old_work_lock); spin_unlock(&sbi->old_work_lock);
...@@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s) ...@@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s)
spin_unlock(&sbi->old_work_lock); spin_unlock(&sbi->old_work_lock);
} }
static void cancel_old_flush(struct super_block *s) void reiserfs_cancel_old_flush(struct super_block *s)
{ {
struct reiserfs_sb_info *sbi = REISERFS_SB(s); struct reiserfs_sb_info *sbi = REISERFS_SB(s);
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
spin_lock(&sbi->old_work_lock); spin_lock(&sbi->old_work_lock);
sbi->work_queued = 0; /* Make sure no new flushes will be queued */
sbi->work_queued = 2;
spin_unlock(&sbi->old_work_lock); spin_unlock(&sbi->old_work_lock);
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
} }
static int reiserfs_freeze(struct super_block *s) static int reiserfs_freeze(struct super_block *s)
{ {
struct reiserfs_transaction_handle th; struct reiserfs_transaction_handle th;
cancel_old_flush(s); reiserfs_cancel_old_flush(s);
reiserfs_write_lock(s); reiserfs_write_lock(s);
if (!(s->s_flags & MS_RDONLY)) { if (!(s->s_flags & MS_RDONLY)) {
...@@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s) ...@@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s)
static int reiserfs_unfreeze(struct super_block *s) static int reiserfs_unfreeze(struct super_block *s)
{ {
struct reiserfs_sb_info *sbi = REISERFS_SB(s);
reiserfs_allow_writes(s); reiserfs_allow_writes(s);
spin_lock(&sbi->old_work_lock);
/* Allow old_work to run again */
sbi->work_queued = 0;
spin_unlock(&sbi->old_work_lock);
return 0; return 0;
} }
...@@ -2187,7 +2196,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) ...@@ -2187,7 +2196,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (sbi->commit_wq) if (sbi->commit_wq)
destroy_workqueue(sbi->commit_wq); destroy_workqueue(sbi->commit_wq);
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); reiserfs_cancel_old_flush(s);
reiserfs_free_bitmap_cache(s); reiserfs_free_bitmap_cache(s);
if (SB_BUFFER_WITH_SB(s)) if (SB_BUFFER_WITH_SB(s))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment