Commit a3eb51ec authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'writeback' of git://git.kernel.dk/linux-2.6-block

* 'writeback' of git://git.kernel.dk/linux-2.6-block:
  writeback: fix possible bdi writeback refcounting problem
  writeback: Fix bdi use after free in wb_work_complete()
  writeback: improve scalability of bdi writeback work queues
  writeback: remove smp_mb(), it's not needed with list_add_tail_rcu()
  writeback: use schedule_timeout_interruptible()
  writeback: add comments to bdi_work structure
  writeback: splice dirty inode entries to default bdi on bdi_destroy()
  writeback: separate starting of sync vs opportunistic writeback
  writeback: inline allocation failure handling in bdi_alloc_queue_work()
  writeback: use RCU to protect bdi_list
  writeback: only use bdi_writeback_all() for WB_SYNC_NONE writeout
  fs: Assign bdi in super_block
  writeback: make wb_writeback() take an argument structure
  writeback: merely wakeup flusher thread if work allocation fails for WB_SYNC_NONE
  writeback: get rid of wbc->for_writepages
  fs: remove bdev->bd_inode_backing_dev_info
parents fdaa45e9 1ef7d9aa
...@@ -712,7 +712,6 @@ int afs_writeback_all(struct afs_vnode *vnode) ...@@ -712,7 +712,6 @@ int afs_writeback_all(struct afs_vnode *vnode)
.bdi = mapping->backing_dev_info, .bdi = mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL, .sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX, .nr_to_write = LONG_MAX,
.for_writepages = 1,
.range_cyclic = 1, .range_cyclic = 1,
}; };
int ret; int ret;
......
...@@ -420,7 +420,6 @@ static void bdev_destroy_inode(struct inode *inode) ...@@ -420,7 +420,6 @@ static void bdev_destroy_inode(struct inode *inode)
{ {
struct bdev_inode *bdi = BDEV_I(inode); struct bdev_inode *bdi = BDEV_I(inode);
bdi->bdev.bd_inode_backing_dev_info = NULL;
kmem_cache_free(bdev_cachep, bdi); kmem_cache_free(bdev_cachep, bdi);
} }
......
...@@ -1600,6 +1600,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1600,6 +1600,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
sb->s_blocksize = 4096; sb->s_blocksize = 4096;
sb->s_blocksize_bits = blksize_bits(4096); sb->s_blocksize_bits = blksize_bits(4096);
sb->s_bdi = &fs_info->bdi;
/* /*
* we set the i_size on the btree inode to the max possible int. * we set the i_size on the btree inode to the max possible int.
......
...@@ -740,7 +740,6 @@ int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, ...@@ -740,7 +740,6 @@ int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
.nr_to_write = mapping->nrpages * 2, .nr_to_write = mapping->nrpages * 2,
.range_start = start, .range_start = start,
.range_end = end, .range_end = end,
.for_writepages = 1,
}; };
return btrfs_writepages(mapping, &wbc); return btrfs_writepages(mapping, &wbc);
} }
......
This diff is collapsed.
...@@ -894,6 +894,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) ...@@ -894,6 +894,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (err) if (err)
goto err_put_conn; goto err_put_conn;
sb->s_bdi = &fc->bdi;
/* Handle umasking inside the fuse code */ /* Handle umasking inside the fuse code */
if (sb->s_flags & MS_POSIXACL) if (sb->s_flags & MS_POSIXACL)
fc->dont_mask = 1; fc->dont_mask = 1;
......
...@@ -182,9 +182,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) ...@@ -182,9 +182,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
if (sb->s_bdev) { if (sb->s_bdev) {
struct backing_dev_info *bdi; struct backing_dev_info *bdi;
bdi = sb->s_bdev->bd_inode_backing_dev_info; bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
if (!bdi)
bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
mapping->backing_dev_info = bdi; mapping->backing_dev_info = bdi;
} }
inode->i_private = NULL; inode->i_private = NULL;
......
...@@ -220,7 +220,6 @@ static int journal_submit_inode_data_buffers(struct address_space *mapping) ...@@ -220,7 +220,6 @@ static int journal_submit_inode_data_buffers(struct address_space *mapping)
.nr_to_write = mapping->nrpages * 2, .nr_to_write = mapping->nrpages * 2,
.range_start = 0, .range_start = 0,
.range_end = i_size_read(mapping->host), .range_end = i_size_read(mapping->host),
.for_writepages = 1,
}; };
ret = generic_writepages(mapping, &wbc); ret = generic_writepages(mapping, &wbc);
......
...@@ -1918,6 +1918,8 @@ static inline void nfs_initialise_sb(struct super_block *sb) ...@@ -1918,6 +1918,8 @@ static inline void nfs_initialise_sb(struct super_block *sb)
if (server->flags & NFS_MOUNT_NOAC) if (server->flags & NFS_MOUNT_NOAC)
sb->s_flags |= MS_SYNCHRONOUS; sb->s_flags |= MS_SYNCHRONOUS;
sb->s_bdi = &server->backing_dev_info;
nfs_super_set_maxbytes(sb, server->maxfilesize); nfs_super_set_maxbytes(sb, server->maxfilesize);
} }
......
...@@ -1490,7 +1490,6 @@ static int nfs_write_mapping(struct address_space *mapping, int how) ...@@ -1490,7 +1490,6 @@ static int nfs_write_mapping(struct address_space *mapping, int how)
.nr_to_write = LONG_MAX, .nr_to_write = LONG_MAX,
.range_start = 0, .range_start = 0,
.range_end = LLONG_MAX, .range_end = LLONG_MAX,
.for_writepages = 1,
}; };
return __nfs_write_mapping(mapping, &wbc, how); return __nfs_write_mapping(mapping, &wbc, how);
......
...@@ -591,9 +591,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) ...@@ -591,9 +591,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); nilfs->ns_mount_state = le16_to_cpu(sbp->s_state);
bdi = nilfs->ns_bdev->bd_inode_backing_dev_info; bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info;
if (!bdi)
bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info;
nilfs->ns_bdi = bdi ? : &default_backing_dev_info; nilfs->ns_bdi = bdi ? : &default_backing_dev_info;
/* Finding last segment */ /* Finding last segment */
......
...@@ -707,6 +707,12 @@ static int set_bdev_super(struct super_block *s, void *data) ...@@ -707,6 +707,12 @@ static int set_bdev_super(struct super_block *s, void *data)
{ {
s->s_bdev = data; s->s_bdev = data;
s->s_dev = s->s_bdev->bd_dev; s->s_dev = s->s_bdev->bd_dev;
/*
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0; return 0;
} }
......
...@@ -27,6 +27,13 @@ ...@@ -27,6 +27,13 @@
*/ */
static int __sync_filesystem(struct super_block *sb, int wait) static int __sync_filesystem(struct super_block *sb, int wait)
{ {
/*
* This should be safe, as we require bdi backing to actually
* write out data in the first place
*/
if (!sb->s_bdi)
return 0;
/* Avoid doing twice syncing and cache pruning for quota sync */ /* Avoid doing twice syncing and cache pruning for quota sync */
if (!wait) { if (!wait) {
writeout_quota_sb(sb, -1); writeout_quota_sb(sb, -1);
...@@ -101,7 +108,7 @@ static void sync_filesystems(int wait) ...@@ -101,7 +108,7 @@ static void sync_filesystems(int wait)
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
down_read(&sb->s_umount); down_read(&sb->s_umount);
if (!(sb->s_flags & MS_RDONLY) && sb->s_root) if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi)
__sync_filesystem(sb, wait); __sync_filesystem(sb, wait);
up_read(&sb->s_umount); up_read(&sb->s_umount);
......
...@@ -54,29 +54,15 @@ ...@@ -54,29 +54,15 @@
* @nr_to_write: how many dirty pages to write-back * @nr_to_write: how many dirty pages to write-back
* *
* This function shrinks UBIFS liability by means of writing back some amount * This function shrinks UBIFS liability by means of writing back some amount
* of dirty inodes and their pages. Returns the amount of pages which were * of dirty inodes and their pages.
* written back. The returned value does not include dirty inodes which were
* synchronized.
* *
* Note, this function synchronizes even VFS inodes which are locked * Note, this function synchronizes even VFS inodes which are locked
* (@i_mutex) by the caller of the budgeting function, because write-back does * (@i_mutex) by the caller of the budgeting function, because write-back does
* not touch @i_mutex. * not touch @i_mutex.
*/ */
static int shrink_liability(struct ubifs_info *c, int nr_to_write) static void shrink_liability(struct ubifs_info *c, int nr_to_write)
{ {
int nr_written; writeback_inodes_sb(c->vfs_sb);
nr_written = writeback_inodes_sb(c->vfs_sb);
if (!nr_written) {
/*
* Re-try again but wait on pages/inodes which are being
* written-back concurrently (e.g., by pdflush).
*/
nr_written = sync_inodes_sb(c->vfs_sb);
}
dbg_budg("%d pages were written back", nr_written);
return nr_written;
} }
/** /**
......
...@@ -1980,6 +1980,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1980,6 +1980,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
if (err) if (err)
goto out_bdi; goto out_bdi;
sb->s_bdi = &c->bdi;
sb->s_fs_info = c; sb->s_fs_info = c;
sb->s_magic = UBIFS_SUPER_MAGIC; sb->s_magic = UBIFS_SUPER_MAGIC;
sb->s_blocksize = UBIFS_BLOCK_SIZE; sb->s_blocksize = UBIFS_BLOCK_SIZE;
......
...@@ -59,6 +59,7 @@ struct bdi_writeback { ...@@ -59,6 +59,7 @@ struct bdi_writeback {
struct backing_dev_info { struct backing_dev_info {
struct list_head bdi_list; struct list_head bdi_list;
struct rcu_head rcu_head;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */ unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */ unsigned int capabilities; /* Device capabilities */
...@@ -100,7 +101,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, ...@@ -100,7 +101,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...); const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi); void bdi_unregister(struct backing_dev_info *bdi);
void bdi_start_writeback(struct writeback_control *wbc); void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
int bdi_writeback_task(struct bdi_writeback *wb); int bdi_writeback_task(struct bdi_writeback *wb);
int bdi_has_dirty_io(struct backing_dev_info *bdi); int bdi_has_dirty_io(struct backing_dev_info *bdi);
......
...@@ -655,7 +655,6 @@ struct block_device { ...@@ -655,7 +655,6 @@ struct block_device {
int bd_invalidated; int bd_invalidated;
struct gendisk * bd_disk; struct gendisk * bd_disk;
struct list_head bd_list; struct list_head bd_list;
struct backing_dev_info *bd_inode_backing_dev_info;
/* /*
* Private data. You must have bd_claim'ed the block_device * Private data. You must have bd_claim'ed the block_device
* to use this. NOTE: bd_claim allows an owner to claim * to use this. NOTE: bd_claim allows an owner to claim
...@@ -1343,6 +1342,7 @@ struct super_block { ...@@ -1343,6 +1342,7 @@ struct super_block {
int s_nr_dentry_unused; /* # of dentry on lru */ int s_nr_dentry_unused; /* # of dentry on lru */
struct block_device *s_bdev; struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd; struct mtd_info *s_mtd;
struct list_head s_instances; struct list_head s_instances;
struct quota_info s_dquot; /* Diskquota specific options */ struct quota_info s_dquot; /* Diskquota specific options */
......
...@@ -50,7 +50,6 @@ struct writeback_control { ...@@ -50,7 +50,6 @@ struct writeback_control {
unsigned encountered_congestion:1; /* An output: a queue is full */ unsigned encountered_congestion:1; /* An output: a queue is full */
unsigned for_kupdate:1; /* A kupdate writeback */ unsigned for_kupdate:1; /* A kupdate writeback */
unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned for_writepages:1; /* This is a writepages() call */
unsigned range_cyclic:1; /* range_start is cyclic */ unsigned range_cyclic:1; /* range_start is cyclic */
unsigned more_io:1; /* more io to be dispatched */ unsigned more_io:1; /* more io to be dispatched */
/* /*
...@@ -69,8 +68,8 @@ struct writeback_control { ...@@ -69,8 +68,8 @@ struct writeback_control {
*/ */
struct bdi_writeback; struct bdi_writeback;
int inode_wait(void *); int inode_wait(void *);
long writeback_inodes_sb(struct super_block *); void writeback_inodes_sb(struct super_block *);
long sync_inodes_sb(struct super_block *); void sync_inodes_sb(struct super_block *);
void writeback_inodes_wbc(struct writeback_control *wbc); void writeback_inodes_wbc(struct writeback_control *wbc);
long wb_do_writeback(struct bdi_writeback *wb, int force_wait); long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
void wakeup_flusher_threads(long nr_pages); void wakeup_flusher_threads(long nr_pages);
......
...@@ -227,7 +227,6 @@ TRACE_EVENT(ext4_da_writepages, ...@@ -227,7 +227,6 @@ TRACE_EVENT(ext4_da_writepages,
__field( char, nonblocking ) __field( char, nonblocking )
__field( char, for_kupdate ) __field( char, for_kupdate )
__field( char, for_reclaim ) __field( char, for_reclaim )
__field( char, for_writepages )
__field( char, range_cyclic ) __field( char, range_cyclic )
), ),
...@@ -241,16 +240,15 @@ TRACE_EVENT(ext4_da_writepages, ...@@ -241,16 +240,15 @@ TRACE_EVENT(ext4_da_writepages,
__entry->nonblocking = wbc->nonblocking; __entry->nonblocking = wbc->nonblocking;
__entry->for_kupdate = wbc->for_kupdate; __entry->for_kupdate = wbc->for_kupdate;
__entry->for_reclaim = wbc->for_reclaim; __entry->for_reclaim = wbc->for_reclaim;
__entry->for_writepages = wbc->for_writepages;
__entry->range_cyclic = wbc->range_cyclic; __entry->range_cyclic = wbc->range_cyclic;
), ),
TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d for_writepages %d range_cyclic %d", TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d",
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write, jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write,
__entry->pages_skipped, __entry->range_start, __entry->pages_skipped, __entry->range_start,
__entry->range_end, __entry->nonblocking, __entry->range_end, __entry->nonblocking,
__entry->for_kupdate, __entry->for_reclaim, __entry->for_kupdate, __entry->for_reclaim,
__entry->for_writepages, __entry->range_cyclic) __entry->range_cyclic)
); );
TRACE_EVENT(ext4_da_writepages_result, TRACE_EVENT(ext4_da_writepages_result,
......
...@@ -26,6 +26,12 @@ struct backing_dev_info default_backing_dev_info = { ...@@ -26,6 +26,12 @@ struct backing_dev_info default_backing_dev_info = {
EXPORT_SYMBOL_GPL(default_backing_dev_info); EXPORT_SYMBOL_GPL(default_backing_dev_info);
static struct class *bdi_class; static struct class *bdi_class;
/*
* bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
* reader side protection for bdi_pending_list. bdi_list has RCU reader side
* locking.
*/
DEFINE_SPINLOCK(bdi_lock); DEFINE_SPINLOCK(bdi_lock);
LIST_HEAD(bdi_list); LIST_HEAD(bdi_list);
LIST_HEAD(bdi_pending_list); LIST_HEAD(bdi_pending_list);
...@@ -284,9 +290,9 @@ static int bdi_start_fn(void *ptr) ...@@ -284,9 +290,9 @@ static int bdi_start_fn(void *ptr)
/* /*
* Add us to the active bdi_list * Add us to the active bdi_list
*/ */
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
list_add(&bdi->bdi_list, &bdi_list); list_add_rcu(&bdi->bdi_list, &bdi_list);
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
bdi_task_init(bdi, wb); bdi_task_init(bdi, wb);
...@@ -389,7 +395,7 @@ static int bdi_forker_task(void *ptr) ...@@ -389,7 +395,7 @@ static int bdi_forker_task(void *ptr)
if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
wb_do_writeback(me, 0); wb_do_writeback(me, 0);
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
/* /*
* Check if any existing bdi's have dirty data without * Check if any existing bdi's have dirty data without
...@@ -410,7 +416,7 @@ static int bdi_forker_task(void *ptr) ...@@ -410,7 +416,7 @@ static int bdi_forker_task(void *ptr)
if (list_empty(&bdi_pending_list)) { if (list_empty(&bdi_pending_list)) {
unsigned long wait; unsigned long wait;
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
wait = msecs_to_jiffies(dirty_writeback_interval * 10); wait = msecs_to_jiffies(dirty_writeback_interval * 10);
schedule_timeout(wait); schedule_timeout(wait);
try_to_freeze(); try_to_freeze();
...@@ -426,7 +432,7 @@ static int bdi_forker_task(void *ptr) ...@@ -426,7 +432,7 @@ static int bdi_forker_task(void *ptr)
bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
bdi_list); bdi_list);
list_del_init(&bdi->bdi_list); list_del_init(&bdi->bdi_list);
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
wb = &bdi->wb; wb = &bdi->wb;
wb->task = kthread_run(bdi_start_fn, wb, "flush-%s", wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
...@@ -445,9 +451,9 @@ static int bdi_forker_task(void *ptr) ...@@ -445,9 +451,9 @@ static int bdi_forker_task(void *ptr)
* a chance to flush other bdi's to free * a chance to flush other bdi's to free
* memory. * memory.
*/ */
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
list_add_tail(&bdi->bdi_list, &bdi_pending_list); list_add_tail(&bdi->bdi_list, &bdi_pending_list);
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
bdi_flush_io(bdi); bdi_flush_io(bdi);
} }
...@@ -456,6 +462,24 @@ static int bdi_forker_task(void *ptr) ...@@ -456,6 +462,24 @@ static int bdi_forker_task(void *ptr)
return 0; return 0;
} }
static void bdi_add_to_pending(struct rcu_head *head)
{
struct backing_dev_info *bdi;
bdi = container_of(head, struct backing_dev_info, rcu_head);
INIT_LIST_HEAD(&bdi->bdi_list);
spin_lock(&bdi_lock);
list_add_tail(&bdi->bdi_list, &bdi_pending_list);
spin_unlock(&bdi_lock);
/*
* We are now on the pending list, wake up bdi_forker_task()
* to finish the job and add us back to the active bdi_list
*/
wake_up_process(default_backing_dev_info.wb.task);
}
/* /*
* Add the default flusher task that gets created for any bdi * Add the default flusher task that gets created for any bdi
* that has dirty data pending writeout * that has dirty data pending writeout
...@@ -478,16 +502,29 @@ void static bdi_add_default_flusher_task(struct backing_dev_info *bdi) ...@@ -478,16 +502,29 @@ void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
* waiting for previous additions to finish. * waiting for previous additions to finish.
*/ */
if (!test_and_set_bit(BDI_pending, &bdi->state)) { if (!test_and_set_bit(BDI_pending, &bdi->state)) {
list_move_tail(&bdi->bdi_list, &bdi_pending_list); list_del_rcu(&bdi->bdi_list);
/* /*
* We are now on the pending list, wake up bdi_forker_task() * We must wait for the current RCU period to end before
* to finish the job and add us back to the active bdi_list * moving to the pending list. So schedule that operation
* from an RCU callback.
*/ */
wake_up_process(default_backing_dev_info.wb.task); call_rcu(&bdi->rcu_head, bdi_add_to_pending);
} }
} }
/*
* Remove bdi from bdi_list, and ensure that it is no longer visible
*/
static void bdi_remove_from_list(struct backing_dev_info *bdi)
{
spin_lock_bh(&bdi_lock);
list_del_rcu(&bdi->bdi_list);
spin_unlock_bh(&bdi_lock);
synchronize_rcu();
}
int bdi_register(struct backing_dev_info *bdi, struct device *parent, int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...) const char *fmt, ...)
{ {
...@@ -506,9 +543,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, ...@@ -506,9 +543,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
goto exit; goto exit;
} }
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
list_add_tail(&bdi->bdi_list, &bdi_list); list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
bdi->dev = dev; bdi->dev = dev;
...@@ -526,9 +563,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, ...@@ -526,9 +563,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
wb->task = NULL; wb->task = NULL;
ret = -ENOMEM; ret = -ENOMEM;
spin_lock(&bdi_lock); bdi_remove_from_list(bdi);
list_del(&bdi->bdi_list);
spin_unlock(&bdi_lock);
goto exit; goto exit;
} }
} }
...@@ -565,9 +600,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) ...@@ -565,9 +600,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
/* /*
* Make sure nobody finds us on the bdi_list anymore * Make sure nobody finds us on the bdi_list anymore
*/ */
spin_lock(&bdi_lock); bdi_remove_from_list(bdi);
list_del(&bdi->bdi_list);
spin_unlock(&bdi_lock);
/* /*
* Finally, kill the kernel threads. We don't need to be RCU * Finally, kill the kernel threads. We don't need to be RCU
...@@ -599,6 +632,7 @@ int bdi_init(struct backing_dev_info *bdi) ...@@ -599,6 +632,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi->max_ratio = 100; bdi->max_ratio = 100;
bdi->max_prop_frac = PROP_FRAC_BASE; bdi->max_prop_frac = PROP_FRAC_BASE;
spin_lock_init(&bdi->wb_lock); spin_lock_init(&bdi->wb_lock);
INIT_RCU_HEAD(&bdi->rcu_head);
INIT_LIST_HEAD(&bdi->bdi_list); INIT_LIST_HEAD(&bdi->bdi_list);
INIT_LIST_HEAD(&bdi->wb_list); INIT_LIST_HEAD(&bdi->wb_list);
INIT_LIST_HEAD(&bdi->work_list); INIT_LIST_HEAD(&bdi->work_list);
...@@ -634,7 +668,19 @@ void bdi_destroy(struct backing_dev_info *bdi) ...@@ -634,7 +668,19 @@ void bdi_destroy(struct backing_dev_info *bdi)
{ {
int i; int i;
WARN_ON(bdi_has_dirty_io(bdi)); /*
* Splice our entries to the default_backing_dev_info, if this
* bdi disappears
*/
if (bdi_has_dirty_io(bdi)) {
struct bdi_writeback *dst = &default_backing_dev_info.wb;
spin_lock(&inode_lock);
list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
list_splice(&bdi->wb.b_io, &dst->b_io);
list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
spin_unlock(&inode_lock);
}
bdi_unregister(bdi); bdi_unregister(bdi);
......
...@@ -315,7 +315,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) ...@@ -315,7 +315,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{ {
int ret = 0; int ret = 0;
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
if (min_ratio > bdi->max_ratio) { if (min_ratio > bdi->max_ratio) {
ret = -EINVAL; ret = -EINVAL;
} else { } else {
...@@ -327,7 +327,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) ...@@ -327,7 +327,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
ret = -EINVAL; ret = -EINVAL;
} }
} }
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
return ret; return ret;
} }
...@@ -339,14 +339,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) ...@@ -339,14 +339,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
if (max_ratio > 100) if (max_ratio > 100)
return -EINVAL; return -EINVAL;
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
if (bdi->min_ratio > max_ratio) { if (bdi->min_ratio > max_ratio) {
ret = -EINVAL; ret = -EINVAL;
} else { } else {
bdi->max_ratio = max_ratio; bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
} }
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
return ret; return ret;
} }
...@@ -582,16 +582,8 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -582,16 +582,8 @@ static void balance_dirty_pages(struct address_space *mapping)
if ((laptop_mode && pages_written) || if ((laptop_mode && pages_written) ||
(!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY) (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY)
+ global_page_state(NR_UNSTABLE_NFS)) + global_page_state(NR_UNSTABLE_NFS))
> background_thresh))) { > background_thresh)))
struct writeback_control wbc = { bdi_start_writeback(bdi, nr_writeback);
.bdi = bdi,
.sync_mode = WB_SYNC_NONE,
.nr_to_write = nr_writeback,
};
bdi_start_writeback(&wbc);
}
} }
void set_page_dirty_balance(struct page *page, int page_mkwrite) void set_page_dirty_balance(struct page *page, int page_mkwrite)
...@@ -1020,12 +1012,10 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc) ...@@ -1020,12 +1012,10 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (wbc->nr_to_write <= 0) if (wbc->nr_to_write <= 0)
return 0; return 0;
wbc->for_writepages = 1;
if (mapping->a_ops->writepages) if (mapping->a_ops->writepages)
ret = mapping->a_ops->writepages(mapping, wbc); ret = mapping->a_ops->writepages(mapping, wbc);
else else
ret = generic_writepages(mapping, wbc); ret = generic_writepages(mapping, wbc);
wbc->for_writepages = 0;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment