Commit f24075bd authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] sem2mutex: iprune

Semaphore to mutex conversion.

The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent a11f3a05
...@@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable; ...@@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable;
DEFINE_SPINLOCK(inode_lock); DEFINE_SPINLOCK(inode_lock);
/* /*
* iprune_sem provides exclusion between the kswapd or try_to_free_pages * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
* icache shrinking path, and the umount path. Without this exclusion, * icache shrinking path, and the umount path. Without this exclusion,
* by the time prune_icache calls iput for the inode whose pages it has * by the time prune_icache calls iput for the inode whose pages it has
* been invalidating, or by the time it calls clear_inode & destroy_inode * been invalidating, or by the time it calls clear_inode & destroy_inode
* from its final dispose_list, the struct super_block they refer to * from its final dispose_list, the struct super_block they refer to
* (for inode->i_sb->s_op) may already have been freed and reused. * (for inode->i_sb->s_op) may already have been freed and reused.
*/ */
DECLARE_MUTEX(iprune_sem); DEFINE_MUTEX(iprune_mutex);
/* /*
* Statistics gathering.. * Statistics gathering..
...@@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) ...@@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
/* /*
* We can reschedule here without worrying about the list's * We can reschedule here without worrying about the list's
* consistency because the per-sb list of inodes must not * consistency because the per-sb list of inodes must not
* change during umount anymore, and because iprune_sem keeps * change during umount anymore, and because iprune_mutex keeps
* shrink_icache_memory() away. * shrink_icache_memory() away.
*/ */
cond_resched_lock(&inode_lock); cond_resched_lock(&inode_lock);
...@@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb) ...@@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb)
int busy; int busy;
LIST_HEAD(throw_away); LIST_HEAD(throw_away);
down(&iprune_sem); mutex_lock(&iprune_mutex);
spin_lock(&inode_lock); spin_lock(&inode_lock);
inotify_unmount_inodes(&sb->s_inodes); inotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away); busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
dispose_list(&throw_away); dispose_list(&throw_away);
up(&iprune_sem); mutex_unlock(&iprune_mutex);
return busy; return busy;
} }
...@@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev) ...@@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev)
if (sb) { if (sb) {
/* /*
* no need to lock the super, get_super holds the * no need to lock the super, get_super holds the
* read semaphore so the filesystem cannot go away * read mutex so the filesystem cannot go away
* under us (->put_super runs with the write lock * under us (->put_super runs with the write lock
* hold). * hold).
*/ */
...@@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan) ...@@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan)
int nr_scanned; int nr_scanned;
unsigned long reap = 0; unsigned long reap = 0;
down(&iprune_sem); mutex_lock(&iprune_mutex);
spin_lock(&inode_lock); spin_lock(&inode_lock);
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
struct inode *inode; struct inode *inode;
...@@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan) ...@@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan)
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
dispose_list(&freeable); dispose_list(&freeable);
up(&iprune_sem); mutex_unlock(&iprune_mutex);
if (current_is_kswapd()) if (current_is_kswapd())
mod_page_state(kswapd_inodesteal, reap); mod_page_state(kswapd_inodesteal, reap);
......
...@@ -54,7 +54,7 @@ int inotify_max_queued_events; ...@@ -54,7 +54,7 @@ int inotify_max_queued_events;
* Lock ordering: * Lock ordering:
* *
* dentry->d_lock (used to keep d_move() away from dentry->d_parent) * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
* iprune_sem (synchronize shrink_icache_memory()) * iprune_mutex (synchronize shrink_icache_memory())
* inode_lock (protects the super_block->s_inodes list) * inode_lock (protects the super_block->s_inodes list)
* inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
* inotify_dev->mutex (protects inotify_device and watches->d_list) * inotify_dev->mutex (protects inotify_device and watches->d_list)
...@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie); ...@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie);
* @list: list of inodes being unmounted (sb->s_inodes) * @list: list of inodes being unmounted (sb->s_inodes)
* *
* Called with inode_lock held, protecting the unmounting super block's list * Called with inode_lock held, protecting the unmounting super block's list
* of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
* We temporarily drop inode_lock, however, and CAN block. * We temporarily drop inode_lock, however, and CAN block.
*/ */
void inotify_unmount_inodes(struct list_head *list) void inotify_unmount_inodes(struct list_head *list)
...@@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list) ...@@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list)
* We can safely drop inode_lock here because we hold * We can safely drop inode_lock here because we hold
* references on both inode and next_i. Also no new inodes * references on both inode and next_i. Also no new inodes
* will be added since the umount has begun. Finally, * will be added since the umount has begun. Finally,
* iprune_sem keeps shrink_icache_memory() away. * iprune_mutex keeps shrink_icache_memory() away.
*/ */
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
......
...@@ -1534,7 +1534,7 @@ extern void destroy_inode(struct inode *); ...@@ -1534,7 +1534,7 @@ extern void destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *); extern struct inode *new_inode(struct super_block *);
extern int remove_suid(struct dentry *); extern int remove_suid(struct dentry *);
extern void remove_dquot_ref(struct super_block *, int, struct list_head *); extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
extern struct semaphore iprune_sem; extern struct mutex iprune_mutex;
extern void __insert_inode_hash(struct inode *, unsigned long hashval); extern void __insert_inode_hash(struct inode *, unsigned long hashval);
extern void remove_inode_hash(struct inode *); extern void remove_inode_hash(struct inode *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment