Commit fcad2b42 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] async write errors: use flags in address space

From: Oliver Xymoron <oxymoron@waste.org>

This patch just saves a few bytes in the inode by turning mapping->gfp_mask
into an unsigned long mapping->flags.

The mapping's gfp mask is placed in the 16 high bits of mapping->flags and
two of the remaining 16 bits are used for tracking EIO and ENOSPC errors.

This leaves 14 bits in the mapping for future use.  They should be accessed
with the atomic bitops.
parent fe7e689f
...@@ -728,8 +728,9 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file, ...@@ -728,8 +728,9 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
fput(file); fput(file);
goto out_putf; goto out_putf;
} }
lo->old_gfp_mask = inode->i_mapping->gfp_mask; lo->old_gfp_mask = mapping_gfp_mask(inode->i_mapping);
inode->i_mapping->gfp_mask &= ~(__GFP_IO|__GFP_FS); mapping_set_gfp_mask(inode->i_mapping,
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
set_blocksize(bdev, lo_blocksize); set_blocksize(bdev, lo_blocksize);
...@@ -845,7 +846,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) ...@@ -845,7 +846,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
memset(lo->lo_file_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE);
invalidate_bdev(bdev, 0); invalidate_bdev(bdev, 0);
set_capacity(disks[lo->lo_number], 0); set_capacity(disks[lo->lo_number], 0);
filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp; mapping_set_gfp_mask(filp->f_dentry->d_inode->i_mapping, gfp);
lo->lo_state = Lo_unbound; lo->lo_state = Lo_unbound;
fput(filp); fput(filp);
/* This is safe: open() is still holding a reference. */ /* This is safe: open() is still holding a reference. */
......
...@@ -320,7 +320,7 @@ struct block_device *bdget(dev_t dev) ...@@ -320,7 +320,7 @@ struct block_device *bdget(dev_t dev)
inode->i_rdev = kdev; inode->i_rdev = kdev;
inode->i_bdev = new_bdev; inode->i_bdev = new_bdev;
inode->i_data.a_ops = &def_blk_aops; inode->i_data.a_ops = &def_blk_aops;
inode->i_data.gfp_mask = GFP_USER; mapping_set_gfp_mask(&inode->i_data, GFP_USER);
inode->i_data.backing_dev_info = &default_backing_dev_info; inode->i_data.backing_dev_info = &default_backing_dev_info;
spin_lock(&bdev_lock); spin_lock(&bdev_lock);
bdev = bdfind(dev, head); bdev = bdfind(dev, head);
......
...@@ -579,7 +579,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) ...@@ -579,7 +579,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
buffer_io_error(bh); buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to I/O error on %s\n", printk(KERN_WARNING "lost page write due to I/O error on %s\n",
bdevname(bh->b_bdev, b)); bdevname(bh->b_bdev, b));
page->mapping->error = -EIO; set_bit(AS_EIO, &page->mapping->flags);
clear_buffer_uptodate(bh); clear_buffer_uptodate(bh);
SetPageError(page); SetPageError(page);
} }
...@@ -2815,7 +2815,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free) ...@@ -2815,7 +2815,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
do { do {
check_ttfb_buffer(page, bh); check_ttfb_buffer(page, bh);
if (buffer_write_io_error(bh)) if (buffer_write_io_error(bh))
page->mapping->error = -EIO; set_bit(AS_EIO, &page->mapping->flags);
if (buffer_busy(bh)) if (buffer_busy(bh))
goto failed; goto failed;
if (!buffer_uptodate(bh) && !buffer_req(bh)) if (!buffer_uptodate(bh) && !buffer_req(bh))
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/pagemap.h>
#include <linux/cdev.h> #include <linux/cdev.h>
/* /*
...@@ -141,11 +142,11 @@ static struct inode *alloc_inode(struct super_block *sb) ...@@ -141,11 +142,11 @@ static struct inode *alloc_inode(struct super_block *sb)
mapping->a_ops = &empty_aops; mapping->a_ops = &empty_aops;
mapping->host = inode; mapping->host = inode;
mapping->gfp_mask = GFP_HIGHUSER; mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
mapping->dirtied_when = 0; mapping->dirtied_when = 0;
mapping->assoc_mapping = NULL; mapping->assoc_mapping = NULL;
mapping->backing_dev_info = &default_backing_dev_info; mapping->backing_dev_info = &default_backing_dev_info;
mapping->error = 0;
if (sb->s_bdev) if (sb->s_bdev)
mapping->backing_dev_info = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; mapping->backing_dev_info = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
memset(&inode->u, 0, sizeof(inode->u)); memset(&inode->u, 0, sizeof(inode->u));
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/mpage.h> #include <linux/mpage.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include "jfs_incore.h" #include "jfs_incore.h"
#include "jfs_filsys.h" #include "jfs_filsys.h"
#include "jfs_imap.h" #include "jfs_imap.h"
...@@ -51,7 +52,7 @@ void jfs_read_inode(struct inode *inode) ...@@ -51,7 +52,7 @@ void jfs_read_inode(struct inode *inode)
inode->i_op = &jfs_dir_inode_operations; inode->i_op = &jfs_dir_inode_operations;
inode->i_fop = &jfs_dir_operations; inode->i_fop = &jfs_dir_operations;
inode->i_mapping->a_ops = &jfs_aops; inode->i_mapping->a_ops = &jfs_aops;
inode->i_mapping->gfp_mask = GFP_NOFS; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
} else if (S_ISLNK(inode->i_mode)) { } else if (S_ISLNK(inode->i_mode)) {
if (inode->i_size >= IDATASIZE) { if (inode->i_size >= IDATASIZE) {
inode->i_op = &page_symlink_inode_operations; inode->i_op = &page_symlink_inode_operations;
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include "jfs_incore.h" #include "jfs_incore.h"
#include "jfs_filsys.h" #include "jfs_filsys.h"
...@@ -504,7 +505,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary) ...@@ -504,7 +505,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
} }
ip->i_mapping->a_ops = &jfs_aops; ip->i_mapping->a_ops = &jfs_aops;
ip->i_mapping->gfp_mask = GFP_NOFS; mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS);
if ((inum == FILESYSTEM_I) && (JFS_IP(ip)->ipimap == sbi->ipaimap)) { if ((inum == FILESYSTEM_I) && (JFS_IP(ip)->ipimap == sbi->ipaimap)) {
sbi->gengen = le32_to_cpu(dp->di_gengen); sbi->gengen = le32_to_cpu(dp->di_gengen);
......
...@@ -258,7 +258,7 @@ int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) ...@@ -258,7 +258,7 @@ int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
ip->i_op = &jfs_dir_inode_operations; ip->i_op = &jfs_dir_inode_operations;
ip->i_fop = &jfs_dir_operations; ip->i_fop = &jfs_dir_operations;
ip->i_mapping->a_ops = &jfs_aops; ip->i_mapping->a_ops = &jfs_aops;
ip->i_mapping->gfp_mask = GFP_NOFS; mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS);
insert_inode_hash(ip); insert_inode_hash(ip);
mark_inode_dirty(ip); mark_inode_dirty(ip);
......
...@@ -566,8 +566,12 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, ...@@ -566,8 +566,12 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
/* /*
* The caller has a ref on the inode, so *mapping is stable * The caller has a ref on the inode, so *mapping is stable
*/ */
if (*ret < 0) if (*ret) {
mapping->error = *ret; if (*ret == -ENOSPC)
set_bit(AS_ENOSPC, &mapping->flags);
else
set_bit(AS_EIO, &mapping->flags);
}
out: out:
return bio; return bio;
} }
...@@ -669,8 +673,14 @@ mpage_writepages(struct address_space *mapping, ...@@ -669,8 +673,14 @@ mpage_writepages(struct address_space *mapping,
test_clear_page_dirty(page)) { test_clear_page_dirty(page)) {
if (writepage) { if (writepage) {
ret = (*writepage)(page, wbc); ret = (*writepage)(page, wbc);
if (ret < 0) if (ret) {
mapping->error = ret; if (ret == -ENOSPC)
set_bit(AS_ENOSPC,
&mapping->flags);
else
set_bit(AS_EIO,
&mapping->flags);
}
} else { } else {
bio = mpage_writepage(bio, page, get_block, bio = mpage_writepage(bio, page, get_block,
&last_block_in_bio, &ret, wbc); &last_block_in_bio, &ret, wbc);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/vfs.h> #include <linux/vfs.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/pagemap.h>
int vfs_statfs(struct super_block *sb, struct kstatfs *buf) int vfs_statfs(struct super_block *sb, struct kstatfs *buf)
{ {
...@@ -954,10 +955,10 @@ int filp_close(struct file *filp, fl_owner_t id) ...@@ -954,10 +955,10 @@ int filp_close(struct file *filp, fl_owner_t id)
retval = err; retval = err;
} }
err = mapping->error; if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
if (!retval) retval = -ENOSPC;
retval = err; if (test_and_clear_bit(AS_EIO, &mapping->flags))
mapping->error = 0; retval = -EIO;
if (!file_count(filp)) { if (!file_count(filp)) {
printk(KERN_ERR "VFS: Close: file count is 0\n"); printk(KERN_ERR "VFS: Close: file count is 0\n");
......
...@@ -327,12 +327,11 @@ struct address_space { ...@@ -327,12 +327,11 @@ struct address_space {
struct semaphore i_shared_sem; /* protect both above lists */ struct semaphore i_shared_sem; /* protect both above lists */
atomic_t truncate_count; /* Cover race condition with truncate */ atomic_t truncate_count; /* Cover race condition with truncate */
unsigned long dirtied_when; /* jiffies of first page dirtying */ unsigned long dirtied_when; /* jiffies of first page dirtying */
int gfp_mask; /* how to allocate the pages */ unsigned long flags; /* error bits/gfp mask */
struct backing_dev_info *backing_dev_info; /* device readahead, etc */ struct backing_dev_info *backing_dev_info; /* device readahead, etc */
spinlock_t private_lock; /* for use by the address_space */ spinlock_t private_lock; /* for use by the address_space */
struct list_head private_list; /* ditto */ struct list_head private_list; /* ditto */
struct address_space *assoc_mapping; /* ditto */ struct address_space *assoc_mapping; /* ditto */
int error; /* write error for fsync */
}; };
struct block_device { struct block_device {
......
...@@ -33,6 +33,9 @@ ...@@ -33,6 +33,9 @@
#define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */ #define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */
#define __GFP_NO_GROW 0x2000 /* Slab internal usage */ #define __GFP_NO_GROW 0x2000 /* Slab internal usage */
#define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */
#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
#define GFP_ATOMIC (__GFP_HIGH) #define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT) #define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO) #define GFP_NOFS (__GFP_WAIT | __GFP_IO)
......
...@@ -8,7 +8,30 @@ ...@@ -8,7 +8,30 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/pagemap.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/gfp.h>
/*
* Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
* allocation mode flags.
*/
#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
static inline int mapping_gfp_mask(struct address_space * mapping)
{
return mapping->flags & __GFP_BITS_MASK;
}
/*
* This is non-atomic. Only to be used before the mapping is activated.
* Probably needs a barrier...
*/
static inline void mapping_set_gfp_mask(struct address_space *m, int mask)
{
m->flags = (m->flags & ~__GFP_BITS_MASK) | mask;
}
/* /*
* The page cache can done in larger chunks than * The page cache can done in larger chunks than
...@@ -29,12 +52,12 @@ void release_pages(struct page **pages, int nr, int cold); ...@@ -29,12 +52,12 @@ void release_pages(struct page **pages, int nr, int cold);
static inline struct page *page_cache_alloc(struct address_space *x) static inline struct page *page_cache_alloc(struct address_space *x)
{ {
return alloc_pages(x->gfp_mask, 0); return alloc_pages(mapping_gfp_mask(x), 0);
} }
static inline struct page *page_cache_alloc_cold(struct address_space *x) static inline struct page *page_cache_alloc_cold(struct address_space *x)
{ {
return alloc_pages(x->gfp_mask|__GFP_COLD, 0); return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
} }
typedef int filler_t(void *, struct page *); typedef int filler_t(void *, struct page *);
...@@ -56,7 +79,7 @@ extern unsigned int find_get_pages(struct address_space *mapping, ...@@ -56,7 +79,7 @@ extern unsigned int find_get_pages(struct address_space *mapping,
*/ */
static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index) static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
{ {
return find_or_create_page(mapping, index, mapping->gfp_mask); return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
} }
extern struct page * grab_cache_page_nowait(struct address_space *mapping, extern struct page * grab_cache_page_nowait(struct address_space *mapping,
......
...@@ -205,11 +205,10 @@ int filemap_fdatawait(struct address_space * mapping) ...@@ -205,11 +205,10 @@ int filemap_fdatawait(struct address_space * mapping)
spin_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
/* Check for outstanding write errors */ /* Check for outstanding write errors */
if (mapping->error) { if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
if (!ret) ret = -ENOSPC;
ret = mapping->error; if (test_and_clear_bit(AS_EIO, &mapping->flags))
mapping->error = 0; ret = -EIO;
}
return ret; return ret;
} }
...@@ -532,7 +531,7 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index) ...@@ -532,7 +531,7 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
page_cache_release(page); page_cache_release(page);
return NULL; return NULL;
} }
gfp_mask = mapping->gfp_mask & ~__GFP_FS; gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
page = alloc_pages(gfp_mask, 0); page = alloc_pages(gfp_mask, 0);
if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) { if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
page_cache_release(page); page_cache_release(page);
......
...@@ -320,7 +320,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long ...@@ -320,7 +320,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
spin_unlock(&info->lock); spin_unlock(&info->lock);
page = shmem_dir_alloc(inode->i_mapping->gfp_mask); page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
if (page) { if (page) {
clear_highpage(page); clear_highpage(page);
page->nr_swapped = 0; page->nr_swapped = 0;
......
...@@ -251,8 +251,12 @@ static void handle_write_error(struct address_space *mapping, ...@@ -251,8 +251,12 @@ static void handle_write_error(struct address_space *mapping,
struct page *page, int error) struct page *page, int error)
{ {
lock_page(page); lock_page(page);
if (page->mapping == mapping) if (page->mapping == mapping) {
mapping->error = error; if (error == -ENOSPC)
set_bit(AS_ENOSPC, &mapping->flags);
else
set_bit(AS_EIO, &mapping->flags);
}
unlock_page(page); unlock_page(page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment