Commit 551de6f3 authored by Al Viro's avatar Al Viro

Leave superblocks on s_list until the end

We used to remove from s_list and s_instances at the same
time.  So let's *not* do the former and skip superblocks
that have empty s_instances in the loops over s_list.

The next step, of course, will be to get rid of rescan logics
in those loops.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 1712ac8f
...@@ -568,6 +568,8 @@ static void do_thaw_all(struct work_struct *work) ...@@ -568,6 +568,8 @@ static void do_thaw_all(struct work_struct *work)
spin_lock(&sb_lock); spin_lock(&sb_lock);
restart: restart:
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
sb->s_count++; sb->s_count++;
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
down_read(&sb->s_umount); down_read(&sb->s_umount);
......
...@@ -552,6 +552,8 @@ static void prune_dcache(int count) ...@@ -552,6 +552,8 @@ static void prune_dcache(int count)
prune_ratio = unused / count; prune_ratio = unused / count;
spin_lock(&sb_lock); spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_nr_dentry_unused == 0) if (sb->s_nr_dentry_unused == 0)
continue; continue;
sb->s_count++; sb->s_count++;
......
...@@ -40,6 +40,8 @@ static void drop_pagecache(void) ...@@ -40,6 +40,8 @@ static void drop_pagecache(void)
spin_lock(&sb_lock); spin_lock(&sb_lock);
restart: restart:
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
sb->s_count++; sb->s_count++;
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
down_read(&sb->s_umount); down_read(&sb->s_umount);
......
...@@ -59,6 +59,8 @@ static int quota_sync_all(int type) ...@@ -59,6 +59,8 @@ static int quota_sync_all(int type)
spin_lock(&sb_lock); spin_lock(&sb_lock);
restart: restart:
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (!sb->s_qcop || !sb->s_qcop->quota_sync) if (!sb->s_qcop || !sb->s_qcop->quota_sync)
continue; continue;
......
...@@ -135,6 +135,7 @@ static int __put_super(struct super_block *sb) ...@@ -135,6 +135,7 @@ static int __put_super(struct super_block *sb)
int ret = 0; int ret = 0;
if (!--sb->s_count) { if (!--sb->s_count) {
list_del_init(&sb->s_list);
destroy_super(sb); destroy_super(sb);
ret = 1; ret = 1;
} }
...@@ -151,7 +152,7 @@ static int __put_super(struct super_block *sb) ...@@ -151,7 +152,7 @@ static int __put_super(struct super_block *sb)
int __put_super_and_need_restart(struct super_block *sb) int __put_super_and_need_restart(struct super_block *sb)
{ {
/* check for race with generic_shutdown_super() */ /* check for race with generic_shutdown_super() */
if (list_empty(&sb->s_list)) { if (list_empty(&sb->s_instances)) {
/* super block is removed, need to restart... */ /* super block is removed, need to restart... */
__put_super(sb); __put_super(sb);
return 1; return 1;
...@@ -308,8 +309,7 @@ void generic_shutdown_super(struct super_block *sb) ...@@ -308,8 +309,7 @@ void generic_shutdown_super(struct super_block *sb)
} }
spin_lock(&sb_lock); spin_lock(&sb_lock);
/* should be initialized for __put_super_and_need_restart() */ /* should be initialized for __put_super_and_need_restart() */
list_del_init(&sb->s_list); list_del_init(&sb->s_instances);
list_del(&sb->s_instances);
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
up_write(&sb->s_umount); up_write(&sb->s_umount);
} }
...@@ -400,6 +400,8 @@ void sync_supers(void) ...@@ -400,6 +400,8 @@ void sync_supers(void)
spin_lock(&sb_lock); spin_lock(&sb_lock);
restart: restart:
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_op->write_super && sb->s_dirt) { if (sb->s_op->write_super && sb->s_dirt) {
sb->s_count++; sb->s_count++;
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
...@@ -435,6 +437,8 @@ struct super_block * get_super(struct block_device *bdev) ...@@ -435,6 +437,8 @@ struct super_block * get_super(struct block_device *bdev)
spin_lock(&sb_lock); spin_lock(&sb_lock);
rescan: rescan:
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_bdev == bdev) { if (sb->s_bdev == bdev) {
sb->s_count++; sb->s_count++;
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
...@@ -471,6 +475,8 @@ struct super_block *get_active_super(struct block_device *bdev) ...@@ -471,6 +475,8 @@ struct super_block *get_active_super(struct block_device *bdev)
spin_lock(&sb_lock); spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_bdev != bdev) if (sb->s_bdev != bdev)
continue; continue;
...@@ -490,6 +496,8 @@ struct super_block * user_get_super(dev_t dev) ...@@ -490,6 +496,8 @@ struct super_block * user_get_super(dev_t dev)
spin_lock(&sb_lock); spin_lock(&sb_lock);
rescan: rescan:
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_dev == dev) { if (sb->s_dev == dev) {
sb->s_count++; sb->s_count++;
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
...@@ -600,6 +608,8 @@ static void do_emergency_remount(struct work_struct *work) ...@@ -600,6 +608,8 @@ static void do_emergency_remount(struct work_struct *work)
spin_lock(&sb_lock); spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
sb->s_count++; sb->s_count++;
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
down_write(&sb->s_umount); down_write(&sb->s_umount);
......
...@@ -99,10 +99,13 @@ static void sync_filesystems(int wait) ...@@ -99,10 +99,13 @@ static void sync_filesystems(int wait)
mutex_lock(&mutex); /* Could be down_interruptible */ mutex_lock(&mutex); /* Could be down_interruptible */
spin_lock(&sb_lock); spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) list_for_each_entry(sb, &super_blocks, s_list)
sb->s_need_sync = 1; if (!list_empty(&sb->s_instances))
sb->s_need_sync = 1;
restart: restart:
list_for_each_entry(sb, &super_blocks, s_list) { list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (!sb->s_need_sync) if (!sb->s_need_sync)
continue; continue;
sb->s_need_sync = 0; sb->s_need_sync = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment