Commit 335e92e8 authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

vfs: fix possible deadlock in ext2, ext3, ext4 when using xattrs

mb_cache_entry_alloc() was allocating cache entries with GFP_KERNEL.  But
filesystems are calling this function while holding xattr_sem so possible
recursion into the fs violates locking ordering of xattr_sem and transaction
start / i_mutex for ext2-4.  Change mb_cache_entry_alloc() so that filesystems
can specify desired gfp mask and use GFP_NOFS from all of them.
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Reported-by: default avatarDave Jones <davej@redhat.com>
Cc: <linux-ext4@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 423bec43
...@@ -835,7 +835,7 @@ ext2_xattr_cache_insert(struct buffer_head *bh) ...@@ -835,7 +835,7 @@ ext2_xattr_cache_insert(struct buffer_head *bh)
struct mb_cache_entry *ce; struct mb_cache_entry *ce;
int error; int error;
ce = mb_cache_entry_alloc(ext2_xattr_cache); ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS);
if (!ce) if (!ce)
return -ENOMEM; return -ENOMEM;
error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash); error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
......
...@@ -1126,7 +1126,7 @@ ext3_xattr_cache_insert(struct buffer_head *bh) ...@@ -1126,7 +1126,7 @@ ext3_xattr_cache_insert(struct buffer_head *bh)
struct mb_cache_entry *ce; struct mb_cache_entry *ce;
int error; int error;
ce = mb_cache_entry_alloc(ext3_xattr_cache); ce = mb_cache_entry_alloc(ext3_xattr_cache, GFP_NOFS);
if (!ce) { if (!ce) {
ea_bdebug(bh, "out of memory"); ea_bdebug(bh, "out of memory");
return; return;
......
...@@ -1386,7 +1386,7 @@ ext4_xattr_cache_insert(struct buffer_head *bh) ...@@ -1386,7 +1386,7 @@ ext4_xattr_cache_insert(struct buffer_head *bh)
struct mb_cache_entry *ce; struct mb_cache_entry *ce;
int error; int error;
ce = mb_cache_entry_alloc(ext4_xattr_cache); ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
if (!ce) { if (!ce) {
ea_bdebug(bh, "out of memory"); ea_bdebug(bh, "out of memory");
return; return;
......
...@@ -399,11 +399,11 @@ mb_cache_destroy(struct mb_cache *cache) ...@@ -399,11 +399,11 @@ mb_cache_destroy(struct mb_cache *cache)
* if no more memory was available. * if no more memory was available.
*/ */
struct mb_cache_entry * struct mb_cache_entry *
mb_cache_entry_alloc(struct mb_cache *cache) mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
{ {
struct mb_cache_entry *ce; struct mb_cache_entry *ce;
ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL); ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
if (ce) { if (ce) {
atomic_inc(&cache->c_entry_count); atomic_inc(&cache->c_entry_count);
INIT_LIST_HEAD(&ce->e_lru_list); INIT_LIST_HEAD(&ce->e_lru_list);
......
...@@ -34,7 +34,7 @@ void mb_cache_destroy(struct mb_cache *); ...@@ -34,7 +34,7 @@ void mb_cache_destroy(struct mb_cache *);
/* Functions on cache entries */ /* Functions on cache entries */
struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *); struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t);
int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *, int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
sector_t, unsigned int[]); sector_t, unsigned int[]);
void mb_cache_entry_release(struct mb_cache_entry *); void mb_cache_entry_release(struct mb_cache_entry *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment