Commit 86ee4c5d authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Use kmap_atomic() for generic_file_write()

This patch uses the atomic copy_from_user() facility in
generic_file_write().

This required a change in the prepare_write/commit_write API
definition.  It is no longer the case that these functions will kmap
the page for you.

If any part of the kernel wants to get at the page in the write path,
it now has to kmap it for itself.  The best way to do this is with
kmap_atomic(KM_USER0).

This patch updates all callers.  It also converts several places which
were unnecessarily using kmap() over to using kmap_atomic().

The reiserfs changes here are Oleg Drokin's revised version.

The patch has been tested with loop, ext2, ext3, reiserfs, jfs,
minixfs, vfat, iso9660, nfs and the ramdisk driver.

I haven't fixed the racy deadlock avoidance thing in
generic_file_write() - the case where we take a fault when the source
and dest of the copy are both the same pagecache page.

There is a printk in there now which will trigger if the page was
unexpectedly not present.  And guess what?  I get 50-100 of them when
running `dbench 64' on mem=48m.   This deadlock can happen.
parent 88a3b490
...@@ -210,8 +210,7 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) ...@@ -210,8 +210,7 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos)
goto fail; goto fail;
if (aops->prepare_write(file, page, offset, offset+size)) if (aops->prepare_write(file, page, offset, offset+size))
goto unlock; goto unlock;
kaddr = page_address(page); kaddr = kmap(page);
flush_dcache_page(page);
transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV); transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV);
if (transfer_result) { if (transfer_result) {
/* /*
...@@ -221,6 +220,8 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) ...@@ -221,6 +220,8 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos)
printk(KERN_ERR "loop: transfer error block %ld\n", index); printk(KERN_ERR "loop: transfer error block %ld\n", index);
memset(kaddr + offset, 0, size); memset(kaddr + offset, 0, size);
} }
flush_dcache_page(page);
kunmap(page);
if (aops->commit_write(file, page, offset, offset+size)) if (aops->commit_write(file, page, offset, offset+size))
goto unlock; goto unlock;
if (transfer_result) if (transfer_result)
......
...@@ -109,9 +109,11 @@ int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */ ...@@ -109,9 +109,11 @@ int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */
static int ramdisk_readpage(struct file *file, struct page * page) static int ramdisk_readpage(struct file *file, struct page * page)
{ {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(kmap(page), 0, PAGE_CACHE_SIZE); void *kaddr = kmap_atomic(page, KM_USER0);
kunmap(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
unlock_page(page); unlock_page(page);
...@@ -121,9 +123,11 @@ static int ramdisk_readpage(struct file *file, struct page * page) ...@@ -121,9 +123,11 @@ static int ramdisk_readpage(struct file *file, struct page * page)
static int ramdisk_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) static int ramdisk_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{ {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
void *addr = page_address(page); void *kaddr = kmap_atomic(page, KM_USER0);
memset(addr, 0, PAGE_CACHE_SIZE);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
SetPageDirty(page); SetPageDirty(page);
...@@ -178,8 +182,11 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, ...@@ -178,8 +182,11 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec,
err = 0; err = 0;
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(kmap(page), 0, PAGE_CACHE_SIZE); void *kaddr = kmap_atomic(page, KM_USER0);
kunmap(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/amigaffs.h> #include <linux/amigaffs.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
...@@ -518,6 +519,7 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign ...@@ -518,6 +519,7 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign
pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to); pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
if (from > to || to > PAGE_CACHE_SIZE) if (from > to || to > PAGE_CACHE_SIZE)
BUG(); BUG();
kmap(page);
data = page_address(page); data = page_address(page);
bsize = AFFS_SB(sb)->s_data_blksize; bsize = AFFS_SB(sb)->s_data_blksize;
tmp = (page->index << PAGE_CACHE_SHIFT) + from; tmp = (page->index << PAGE_CACHE_SHIFT) + from;
...@@ -537,6 +539,8 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign ...@@ -537,6 +539,8 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign
from += tmp; from += tmp;
boff = 0; boff = 0;
} }
flush_dcache_page(page);
kunmap(page);
return 0; return 0;
} }
...@@ -656,7 +660,11 @@ static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned ...@@ -656,7 +660,11 @@ static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned
return err; return err;
} }
if (to < PAGE_CACHE_SIZE) { if (to < PAGE_CACHE_SIZE) {
memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); char *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
if (size > offset + to) { if (size > offset + to) {
if (size < offset + PAGE_CACHE_SIZE) if (size < offset + PAGE_CACHE_SIZE)
tmp = size & ~PAGE_CACHE_MASK; tmp = size & ~PAGE_CACHE_MASK;
......
...@@ -1833,7 +1833,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1833,7 +1833,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
int err = 0; int err = 0;
unsigned blocksize, bbits; unsigned blocksize, bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
char *kaddr = kmap(page);
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_CACHE_SIZE); BUG_ON(from > PAGE_CACHE_SIZE);
...@@ -1874,13 +1873,19 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1874,13 +1873,19 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
} }
if (block_end > to) if (block_end > to || block_start < from) {
memset(kaddr+to, 0, block_end-to); void *kaddr;
if (block_start < from)
memset(kaddr+block_start, kaddr = kmap_atomic(page, KM_USER0);
0, from-block_start); if (block_end > to)
if (block_end > to || block_start < from) memset(kaddr+to, 0,
block_end-to);
if (block_start < from)
memset(kaddr+block_start,
0, from-block_start);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
}
continue; continue;
} }
} }
...@@ -1919,10 +1924,14 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1919,10 +1924,14 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (block_start >= to) if (block_start >= to)
break; break;
if (buffer_new(bh)) { if (buffer_new(bh)) {
void *kaddr;
clear_buffer_new(bh); clear_buffer_new(bh);
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
buffer_error(); buffer_error();
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr+block_start, 0, bh->b_size); memset(kaddr+block_start, 0, bh->b_size);
kunmap_atomic(kaddr, KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
} }
...@@ -2008,9 +2017,10 @@ int block_read_full_page(struct page *page, get_block_t *get_block) ...@@ -2008,9 +2017,10 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
SetPageError(page); SetPageError(page);
} }
if (!buffer_mapped(bh)) { if (!buffer_mapped(bh)) {
memset(kmap(page) + i*blocksize, 0, blocksize); void *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + i * blocksize, 0, blocksize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap_atomic(kaddr, KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
} }
...@@ -2118,7 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset, ...@@ -2118,7 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset,
long status; long status;
unsigned zerofrom; unsigned zerofrom;
unsigned blocksize = 1 << inode->i_blkbits; unsigned blocksize = 1 << inode->i_blkbits;
char *kaddr; void *kaddr;
while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
status = -ENOMEM; status = -ENOMEM;
...@@ -2140,12 +2150,12 @@ int cont_prepare_write(struct page *page, unsigned offset, ...@@ -2140,12 +2150,12 @@ int cont_prepare_write(struct page *page, unsigned offset,
PAGE_CACHE_SIZE, get_block); PAGE_CACHE_SIZE, get_block);
if (status) if (status)
goto out_unmap; goto out_unmap;
kaddr = page_address(new_page); kaddr = kmap_atomic(new_page, KM_USER0);
memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom); memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
flush_dcache_page(new_page); flush_dcache_page(new_page);
kunmap_atomic(kaddr, KM_USER0);
__block_commit_write(inode, new_page, __block_commit_write(inode, new_page,
zerofrom, PAGE_CACHE_SIZE); zerofrom, PAGE_CACHE_SIZE);
kunmap(new_page);
unlock_page(new_page); unlock_page(new_page);
page_cache_release(new_page); page_cache_release(new_page);
} }
...@@ -2170,21 +2180,20 @@ int cont_prepare_write(struct page *page, unsigned offset, ...@@ -2170,21 +2180,20 @@ int cont_prepare_write(struct page *page, unsigned offset,
status = __block_prepare_write(inode, page, zerofrom, to, get_block); status = __block_prepare_write(inode, page, zerofrom, to, get_block);
if (status) if (status)
goto out1; goto out1;
kaddr = page_address(page);
if (zerofrom < offset) { if (zerofrom < offset) {
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr+zerofrom, 0, offset-zerofrom); memset(kaddr+zerofrom, 0, offset-zerofrom);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
__block_commit_write(inode, page, zerofrom, offset); __block_commit_write(inode, page, zerofrom, offset);
} }
return 0; return 0;
out1: out1:
ClearPageUptodate(page); ClearPageUptodate(page);
kunmap(page);
return status; return status;
out_unmap: out_unmap:
ClearPageUptodate(new_page); ClearPageUptodate(new_page);
kunmap(new_page);
unlock_page(new_page); unlock_page(new_page);
page_cache_release(new_page); page_cache_release(new_page);
out: out:
...@@ -2196,10 +2205,8 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to, ...@@ -2196,10 +2205,8 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
int err = __block_prepare_write(inode, page, from, to, get_block); int err = __block_prepare_write(inode, page, from, to, get_block);
if (err) { if (err)
ClearPageUptodate(page); ClearPageUptodate(page);
kunmap(page);
}
return err; return err;
} }
...@@ -2207,7 +2214,6 @@ int block_commit_write(struct page *page, unsigned from, unsigned to) ...@@ -2207,7 +2214,6 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
__block_commit_write(inode,page,from,to); __block_commit_write(inode,page,from,to);
kunmap(page);
return 0; return 0;
} }
...@@ -2217,7 +2223,6 @@ int generic_commit_write(struct file *file, struct page *page, ...@@ -2217,7 +2223,6 @@ int generic_commit_write(struct file *file, struct page *page,
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
__block_commit_write(inode,page,from,to); __block_commit_write(inode,page,from,to);
kunmap(page);
if (pos > inode->i_size) { if (pos > inode->i_size) {
inode->i_size = pos; inode->i_size = pos;
mark_inode_dirty(inode); mark_inode_dirty(inode);
...@@ -2234,6 +2239,7 @@ int block_truncate_page(struct address_space *mapping, ...@@ -2234,6 +2239,7 @@ int block_truncate_page(struct address_space *mapping,
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct page *page; struct page *page;
struct buffer_head *bh; struct buffer_head *bh;
void *kaddr;
int err; int err;
blocksize = 1 << inode->i_blkbits; blocksize = 1 << inode->i_blkbits;
...@@ -2286,9 +2292,10 @@ int block_truncate_page(struct address_space *mapping, ...@@ -2286,9 +2292,10 @@ int block_truncate_page(struct address_space *mapping,
goto unlock; goto unlock;
} }
memset(kmap(page) + offset, 0, length); kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + offset, 0, length);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap_atomic(kaddr, KM_USER0);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
err = 0; err = 0;
...@@ -2308,7 +2315,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block) ...@@ -2308,7 +2315,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block)
struct inode * const inode = page->mapping->host; struct inode * const inode = page->mapping->host;
const unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; const unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
unsigned offset; unsigned offset;
char *kaddr; void *kaddr;
/* Is the page fully inside i_size? */ /* Is the page fully inside i_size? */
if (page->index < end_index) if (page->index < end_index)
...@@ -2328,10 +2335,10 @@ int block_write_full_page(struct page *page, get_block_t *get_block) ...@@ -2328,10 +2335,10 @@ int block_write_full_page(struct page *page, get_block_t *get_block)
* the page size, the remaining memory is zeroed when mapped, and * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file." * writes to that region are not written out to the file."
*/ */
kaddr = kmap(page); kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap_atomic(kaddr, KM_USER0);
return __block_write_full_page(inode, page, get_block); return __block_write_full_page(inode, page, get_block);
} }
......
...@@ -59,9 +59,11 @@ static int mount_count = 0; ...@@ -59,9 +59,11 @@ static int mount_count = 0;
static int driverfs_readpage(struct file *file, struct page * page) static int driverfs_readpage(struct file *file, struct page * page)
{ {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(kmap(page), 0, PAGE_CACHE_SIZE); void *kaddr = kmap_atomic(page, KM_USER0);
kunmap(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
unlock_page(page); unlock_page(page);
...@@ -70,10 +72,12 @@ static int driverfs_readpage(struct file *file, struct page * page) ...@@ -70,10 +72,12 @@ static int driverfs_readpage(struct file *file, struct page * page)
static int driverfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) static int driverfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{ {
void *addr = kmap(page);
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(addr, 0, PAGE_CACHE_SIZE); void *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
return 0; return 0;
...@@ -85,7 +89,6 @@ static int driverfs_commit_write(struct file *file, struct page *page, unsigned ...@@ -85,7 +89,6 @@ static int driverfs_commit_write(struct file *file, struct page *page, unsigned
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
set_page_dirty(page); set_page_dirty(page);
kunmap(page);
if (pos > inode->i_size) if (pos > inode->i_size)
inode->i_size = pos; inode->i_size = pos;
return 0; return 0;
......
...@@ -571,8 +571,8 @@ int ext2_make_empty(struct inode *inode, struct inode *parent) ...@@ -571,8 +571,8 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
struct page *page = grab_cache_page(mapping, 0); struct page *page = grab_cache_page(mapping, 0);
unsigned chunk_size = ext2_chunk_size(inode); unsigned chunk_size = ext2_chunk_size(inode);
struct ext2_dir_entry_2 * de; struct ext2_dir_entry_2 * de;
char *base;
int err; int err;
void *kaddr;
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -581,22 +581,21 @@ int ext2_make_empty(struct inode *inode, struct inode *parent) ...@@ -581,22 +581,21 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
unlock_page(page); unlock_page(page);
goto fail; goto fail;
} }
base = page_address(page); kaddr = kmap_atomic(page, KM_USER0);
de = (struct ext2_dir_entry_2 *)kaddr;
de = (struct ext2_dir_entry_2 *) base;
de->name_len = 1; de->name_len = 1;
de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1)); de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
memcpy (de->name, ".\0\0", 4); memcpy (de->name, ".\0\0", 4);
de->inode = cpu_to_le32(inode->i_ino); de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type (de, inode); ext2_set_de_type (de, inode);
de = (struct ext2_dir_entry_2 *) (base + EXT2_DIR_REC_LEN(1)); de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1));
de->name_len = 2; de->name_len = 2;
de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1)); de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1));
de->inode = cpu_to_le32(parent->i_ino); de->inode = cpu_to_le32(parent->i_ino);
memcpy (de->name, "..\0", 4); memcpy (de->name, "..\0", 4);
ext2_set_de_type (de, inode); ext2_set_de_type (de, inode);
kunmap_atomic(kaddr, KM_USER0);
err = ext2_commit_chunk(page, 0, chunk_size); err = ext2_commit_chunk(page, 0, chunk_size);
fail: fail:
page_cache_release(page); page_cache_release(page);
......
...@@ -1082,16 +1082,6 @@ static int ext3_prepare_write(struct file *file, struct page *page, ...@@ -1082,16 +1082,6 @@ static int ext3_prepare_write(struct file *file, struct page *page,
if (ext3_should_journal_data(inode)) { if (ext3_should_journal_data(inode)) {
ret = walk_page_buffers(handle, page_buffers(page), ret = walk_page_buffers(handle, page_buffers(page),
from, to, NULL, do_journal_get_write_access); from, to, NULL, do_journal_get_write_access);
if (ret) {
/*
* We're going to fail this prepare_write(),
* so commit_write() will not be called.
* We need to undo block_prepare_write()'s kmap().
* AKPM: Do we need to clear PageUptodate? I don't
* think so.
*/
kunmap(page);
}
} }
prepare_write_failed: prepare_write_failed:
if (ret) if (ret)
...@@ -1151,7 +1141,6 @@ static int ext3_commit_write(struct file *file, struct page *page, ...@@ -1151,7 +1141,6 @@ static int ext3_commit_write(struct file *file, struct page *page,
from, to, &partial, commit_write_fn); from, to, &partial, commit_write_fn);
if (!partial) if (!partial)
SetPageUptodate(page); SetPageUptodate(page);
kunmap(page);
if (pos > inode->i_size) if (pos > inode->i_size)
inode->i_size = pos; inode->i_size = pos;
EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
...@@ -1162,17 +1151,8 @@ static int ext3_commit_write(struct file *file, struct page *page, ...@@ -1162,17 +1151,8 @@ static int ext3_commit_write(struct file *file, struct page *page,
} }
/* Be careful here if generic_commit_write becomes a /* Be careful here if generic_commit_write becomes a
* required invocation after block_prepare_write. */ * required invocation after block_prepare_write. */
if (ret == 0) { if (ret == 0)
ret = generic_commit_write(file, page, from, to); ret = generic_commit_write(file, page, from, to);
} else {
/*
* block_prepare_write() was called, but we're not
* going to call generic_commit_write(). So we
* need to perform generic_commit_write()'s kunmap
* by hand.
*/
kunmap(page);
}
} }
if (inode->i_size > EXT3_I(inode)->i_disksize) { if (inode->i_size > EXT3_I(inode)->i_disksize) {
EXT3_I(inode)->i_disksize = inode->i_size; EXT3_I(inode)->i_disksize = inode->i_size;
...@@ -1535,6 +1515,7 @@ static int ext3_block_truncate_page(handle_t *handle, ...@@ -1535,6 +1515,7 @@ static int ext3_block_truncate_page(handle_t *handle,
struct page *page; struct page *page;
struct buffer_head *bh; struct buffer_head *bh;
int err; int err;
void *kaddr;
blocksize = inode->i_sb->s_blocksize; blocksize = inode->i_sb->s_blocksize;
length = offset & (blocksize - 1); length = offset & (blocksize - 1);
...@@ -1590,10 +1571,11 @@ static int ext3_block_truncate_page(handle_t *handle, ...@@ -1590,10 +1571,11 @@ static int ext3_block_truncate_page(handle_t *handle,
if (err) if (err)
goto unlock; goto unlock;
} }
memset(kmap(page) + offset, 0, length); kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + offset, 0, length);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap_atomic(kaddr, KM_USER0);
BUFFER_TRACE(bh, "zeroed end of block"); BUFFER_TRACE(bh, "zeroed end of block");
......
...@@ -982,11 +982,24 @@ static int fat_readpage(struct file *file, struct page *page) ...@@ -982,11 +982,24 @@ static int fat_readpage(struct file *file, struct page *page)
{ {
return block_read_full_page(page,fat_get_block); return block_read_full_page(page,fat_get_block);
} }
static int fat_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
static int
fat_prepare_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{ {
kmap(page);
return cont_prepare_write(page,from,to,fat_get_block, return cont_prepare_write(page,from,to,fat_get_block,
&MSDOS_I(page->mapping->host)->mmu_private); &MSDOS_I(page->mapping->host)->mmu_private);
} }
static int
fat_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
kunmap(page);
return generic_commit_write(file, page, from, to);
}
static int _fat_bmap(struct address_space *mapping, long block) static int _fat_bmap(struct address_space *mapping, long block)
{ {
return generic_block_bmap(mapping,block,fat_get_block); return generic_block_bmap(mapping,block,fat_get_block);
...@@ -996,7 +1009,7 @@ static struct address_space_operations fat_aops = { ...@@ -996,7 +1009,7 @@ static struct address_space_operations fat_aops = {
writepage: fat_writepage, writepage: fat_writepage,
sync_page: block_sync_page, sync_page: block_sync_page,
prepare_write: fat_prepare_write, prepare_write: fat_prepare_write,
commit_write: generic_commit_write, commit_write: fat_commit_write,
bmap: _fat_bmap bmap: _fat_bmap
}; };
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/quotaops.h> #include <linux/quotaops.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -751,7 +752,6 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ...@@ -751,7 +752,6 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
get_page(page); get_page(page);
/* Don't SetPageLocked(page), should be locked already */ /* Don't SetPageLocked(page), should be locked already */
buf = page_address(page);
ClearPageUptodate(page); ClearPageUptodate(page);
ClearPageError(page); ClearPageError(page);
...@@ -760,8 +760,10 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ...@@ -760,8 +760,10 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
read_len = 0; read_len = 0;
result = 0; result = 0;
offset = page->index << PAGE_CACHE_SHIFT; offset = page->index << PAGE_CACHE_SHIFT;
kmap(page);
buf = page_address(page);
if (offset < inode->i_size) { if (offset < inode->i_size) {
read_len = min_t(long, inode->i_size - offset, PAGE_SIZE); read_len = min_t(long, inode->i_size - offset, PAGE_SIZE);
r = jffs_read_data(f, buf, offset, read_len); r = jffs_read_data(f, buf, offset, read_len);
...@@ -779,6 +781,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ...@@ -779,6 +781,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
/* This handles the case of partial or no read in above */ /* This handles the case of partial or no read in above */
if(read_len < PAGE_SIZE) if(read_len < PAGE_SIZE)
memset(buf + read_len, 0, PAGE_SIZE - read_len); memset(buf + read_len, 0, PAGE_SIZE - read_len);
flush_dcache_page(page);
kunmap(page);
D3(printk (KERN_NOTICE "readpage(): up biglock\n")); D3(printk (KERN_NOTICE "readpage(): up biglock\n"));
up(&c->fmc->biglock); up(&c->fmc->biglock);
...@@ -788,9 +792,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ...@@ -788,9 +792,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
}else { }else {
SetPageUptodate(page); SetPageUptodate(page);
} }
flush_dcache_page(page);
put_page(page); page_cache_release(page);
D3(printk("jffs_readpage(): Leaving...\n")); D3(printk("jffs_readpage(): Leaving...\n"));
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/jffs2.h> #include <linux/jffs2.h>
#include "nodelist.h" #include "nodelist.h"
...@@ -381,9 +382,10 @@ int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsi ...@@ -381,9 +382,10 @@ int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsi
ri->isize = (uint32_t)inode->i_size; ri->isize = (uint32_t)inode->i_size;
ri->atime = ri->ctime = ri->mtime = CURRENT_TIME; ri->atime = ri->ctime = ri->mtime = CURRENT_TIME;
/* We rely on the fact that generic_file_write() currently kmaps the page for us. */ kmap(pg);
ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + start, ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + start,
(pg->index << PAGE_CACHE_SHIFT) + start, end - start, &writtenlen); (pg->index << PAGE_CACHE_SHIFT) + start, end - start, &writtenlen);
kunmap(pg);
if (ret) { if (ret) {
/* There was an error writing. */ /* There was an error writing. */
......
...@@ -403,7 +403,6 @@ static void __write_metapage(metapage_t * mp) ...@@ -403,7 +403,6 @@ static void __write_metapage(metapage_t * mp)
if (rc) { if (rc) {
jERROR(1, ("prepare_write return %d!\n", rc)); jERROR(1, ("prepare_write return %d!\n", rc));
ClearPageUptodate(mp->page); ClearPageUptodate(mp->page);
kunmap(mp->page);
unlock_page(mp->page); unlock_page(mp->page);
clear_bit(META_dirty, &mp->flag); clear_bit(META_dirty, &mp->flag);
return; return;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include "minix.h" #include "minix.h"
#include <linux/highmem.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
typedef struct minix_dir_entry minix_dirent; typedef struct minix_dir_entry minix_dirent;
...@@ -261,7 +262,7 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page) ...@@ -261,7 +262,7 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
struct inode *inode = (struct inode*)mapping->host; struct inode *inode = (struct inode*)mapping->host;
char *kaddr = (char*)page_address(page); char *kaddr = page_address(page);
unsigned from = (char*)de - kaddr; unsigned from = (char*)de - kaddr;
unsigned to = from + minix_sb(inode->i_sb)->s_dirsize; unsigned to = from + minix_sb(inode->i_sb)->s_dirsize;
int err; int err;
...@@ -286,7 +287,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir) ...@@ -286,7 +287,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
struct page *page = grab_cache_page(mapping, 0); struct page *page = grab_cache_page(mapping, 0);
struct minix_sb_info * sbi = minix_sb(inode->i_sb); struct minix_sb_info * sbi = minix_sb(inode->i_sb);
struct minix_dir_entry * de; struct minix_dir_entry * de;
char *base; char *kaddr;
int err; int err;
if (!page) if (!page)
...@@ -297,15 +298,16 @@ int minix_make_empty(struct inode *inode, struct inode *dir) ...@@ -297,15 +298,16 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
goto fail; goto fail;
} }
base = (char*)page_address(page); kaddr = kmap_atomic(page, KM_USER0);
memset(base, 0, PAGE_CACHE_SIZE); memset(kaddr, 0, PAGE_CACHE_SIZE);
de = (struct minix_dir_entry *) base; de = (struct minix_dir_entry *)kaddr;
de->inode = inode->i_ino; de->inode = inode->i_ino;
strcpy(de->name,"."); strcpy(de->name,".");
de = minix_next_entry(de, sbi); de = minix_next_entry(de, sbi);
de->inode = dir->i_ino; de->inode = dir->i_ino;
strcpy(de->name,".."); strcpy(de->name,"..");
kunmap_atomic(kaddr, KM_USER0);
err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
fail: fail:
......
...@@ -2200,8 +2200,9 @@ int page_symlink(struct inode *inode, const char *symname, int len) ...@@ -2200,8 +2200,9 @@ int page_symlink(struct inode *inode, const char *symname, int len)
err = mapping->a_ops->prepare_write(NULL, page, 0, len-1); err = mapping->a_ops->prepare_write(NULL, page, 0, len-1);
if (err) if (err)
goto fail_map; goto fail_map;
kaddr = page_address(page); kaddr = kmap_atomic(page, KM_USER0);
memcpy(kaddr, symname, len-1); memcpy(kaddr, symname, len-1);
kunmap_atomic(kaddr, KM_USER0);
mapping->a_ops->commit_write(NULL, page, 0, len-1); mapping->a_ops->commit_write(NULL, page, 0, len-1);
/* /*
* Notice that we are _not_ going to block here - end of page is * Notice that we are _not_ going to block here - end of page is
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
...@@ -47,8 +48,10 @@ static struct inode_operations ramfs_dir_inode_operations; ...@@ -47,8 +48,10 @@ static struct inode_operations ramfs_dir_inode_operations;
static int ramfs_readpage(struct file *file, struct page * page) static int ramfs_readpage(struct file *file, struct page * page)
{ {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(kmap(page), 0, PAGE_CACHE_SIZE); char *kaddr = kmap_atomic(page, KM_USER0);
kunmap(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page); flush_dcache_page(page);
SetPageUptodate(page); SetPageUptodate(page);
} }
...@@ -58,10 +61,12 @@ static int ramfs_readpage(struct file *file, struct page * page) ...@@ -58,10 +61,12 @@ static int ramfs_readpage(struct file *file, struct page * page)
static int ramfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) static int ramfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{ {
void *addr = kmap(page);
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(addr, 0, PAGE_CACHE_SIZE); char *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
SetPageDirty(page); SetPageDirty(page);
...@@ -73,7 +78,6 @@ static int ramfs_commit_write(struct file *file, struct page *page, unsigned off ...@@ -73,7 +78,6 @@ static int ramfs_commit_write(struct file *file, struct page *page, unsigned off
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
kunmap(page);
if (pos > inode->i_size) if (pos > inode->i_size)
inode->i_size = pos; inode->i_size = pos;
return 0; return 0;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
...@@ -1692,8 +1693,6 @@ static int grab_tail_page(struct inode *p_s_inode, ...@@ -1692,8 +1693,6 @@ static int grab_tail_page(struct inode *p_s_inode,
if (error) if (error)
goto unlock ; goto unlock ;
kunmap(page) ; /* mapped by block_prepare_write */
head = page_buffers(page) ; head = page_buffers(page) ;
bh = head; bh = head;
do { do {
...@@ -1788,10 +1787,13 @@ void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) { ...@@ -1788,10 +1787,13 @@ void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) {
length = offset & (blocksize - 1) ; length = offset & (blocksize - 1) ;
/* if we are not on a block boundary */ /* if we are not on a block boundary */
if (length) { if (length) {
char *kaddr;
length = blocksize - length ; length = blocksize - length ;
memset((char *)kmap(page) + offset, 0, length) ; kaddr = kmap_atomic(page, KM_USER0) ;
memset(kaddr + offset, 0, length) ;
flush_dcache_page(page) ; flush_dcache_page(page) ;
kunmap(page) ; kunmap_atomic(kaddr, KM_USER0) ;
if (buffer_mapped(bh) && bh->b_blocknr != 0) { if (buffer_mapped(bh) && bh->b_blocknr != 0) {
mark_buffer_dirty(bh) ; mark_buffer_dirty(bh) ;
} }
...@@ -1941,23 +1943,25 @@ static int reiserfs_write_full_page(struct page *page) { ...@@ -1941,23 +1943,25 @@ static int reiserfs_write_full_page(struct page *page) {
struct buffer_head *arr[PAGE_CACHE_SIZE/512] ; struct buffer_head *arr[PAGE_CACHE_SIZE/512] ;
int nr = 0 ; int nr = 0 ;
if (!page_has_buffers(page)) { if (!page_has_buffers(page))
block_prepare_write(page, 0, 0, NULL) ; block_prepare_write(page, 0, 0, NULL) ;
kunmap(page) ;
}
/* last page in the file, zero out any contents past the /* last page in the file, zero out any contents past the
** last byte in the file ** last byte in the file
*/ */
if (page->index >= end_index) { if (page->index >= end_index) {
char *kaddr;
last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1) ; last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1) ;
/* no file contents in this page */ /* no file contents in this page */
if (page->index >= end_index + 1 || !last_offset) { if (page->index >= end_index + 1 || !last_offset) {
error = -EIO ; error = -EIO ;
goto fail ; goto fail ;
} }
memset((char *)kmap(page)+last_offset, 0, PAGE_CACHE_SIZE-last_offset) ; kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + last_offset, 0, PAGE_CACHE_SIZE-last_offset) ;
flush_dcache_page(page) ; flush_dcache_page(page) ;
kunmap(page) ; kunmap_atomic(kaddr, KM_USER0) ;
} }
head = page_buffers(page) ; head = page_buffers(page) ;
bh = head ; bh = head ;
......
...@@ -1284,15 +1284,15 @@ int reiserfs_delete_item (struct reiserfs_transaction_handle *th, ...@@ -1284,15 +1284,15 @@ int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
** **
** p_s_un_bh is from the page cache (all unformatted nodes are ** p_s_un_bh is from the page cache (all unformatted nodes are
** from the page cache) and might be a highmem page. So, we ** from the page cache) and might be a highmem page. So, we
** can't use p_s_un_bh->b_data. But, the page has already been ** can't use p_s_un_bh->b_data.
** kmapped, so we can use page_address()
** -clm ** -clm
*/ */
data = page_address(p_s_un_bh->b_page) ; data = kmap_atomic(p_s_un_bh->b_page, KM_USER0);
off = ((le_ih_k_offset (&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); off = ((le_ih_k_offset (&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
memcpy(data + off, memcpy(data + off,
B_I_PITEM(PATH_PLAST_BUFFER(p_s_path), &s_ih), n_ret_value); B_I_PITEM(PATH_PLAST_BUFFER(p_s_path), &s_ih), n_ret_value);
kunmap_atomic(data, KM_USER0);
} }
/* Perform balancing after all resources have been collected at once. */ /* Perform balancing after all resources have been collected at once. */
......
...@@ -122,11 +122,12 @@ int direct2indirect (struct reiserfs_transaction_handle *th, struct inode * inod ...@@ -122,11 +122,12 @@ int direct2indirect (struct reiserfs_transaction_handle *th, struct inode * inod
} }
/* if we've copied bytes from disk into the page, we need to zero /* if we've copied bytes from disk into the page, we need to zero
** out the unused part of the block (it was not up to date before) ** out the unused part of the block (it was not up to date before)
** the page is still kmapped (by whoever called reiserfs_get_block)
*/ */
if (up_to_date_bh) { if (up_to_date_bh) {
unsigned pgoff = (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); unsigned pgoff = (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
memset(page_address(unbh->b_page) + pgoff, 0, n_blk_size - total_tail) ; char *kaddr=kmap_atomic(up_to_date_bh->b_page, KM_USER0);
memset(kaddr + pgoff, 0, n_blk_size - total_tail) ;
kunmap_atomic(kaddr, KM_USER0);
} }
REISERFS_I(inode)->i_first_direct_byte = U32_MAX; REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
*/ */
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include "sysv.h" #include "sysv.h"
...@@ -273,6 +274,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir) ...@@ -273,6 +274,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
kmap(page);
err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * SYSV_DIRSIZE); err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * SYSV_DIRSIZE);
if (err) { if (err) {
unlock_page(page); unlock_page(page);
...@@ -291,6 +293,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir) ...@@ -291,6 +293,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
fail: fail:
kunmap(page);
page_cache_release(page); page_cache_release(page);
return err; return err;
} }
......
...@@ -1065,6 +1065,21 @@ static inline int fault_in_pages_writeable(char *uaddr, int size) ...@@ -1065,6 +1065,21 @@ static inline int fault_in_pages_writeable(char *uaddr, int size)
return ret; return ret;
} }
static inline void fault_in_pages_readable(const char *uaddr, int size)
{
volatile char c;
int ret;
ret = __get_user(c, (char *)uaddr);
if (ret == 0) {
const char *end = uaddr + size - 1;
if (((unsigned long)uaddr & PAGE_MASK) !=
((unsigned long)end & PAGE_MASK))
__get_user(c, (char *)end);
}
}
int file_read_actor(read_descriptor_t *desc, struct page *page, int file_read_actor(read_descriptor_t *desc, struct page *page,
unsigned long offset, unsigned long size) unsigned long offset, unsigned long size)
{ {
...@@ -1882,6 +1897,26 @@ inline void remove_suid(struct dentry *dentry) ...@@ -1882,6 +1897,26 @@ inline void remove_suid(struct dentry *dentry)
} }
} }
static inline int
filemap_copy_from_user(struct page *page, unsigned long offset,
const char *buf, unsigned bytes)
{
char *kaddr;
int left;
kaddr = kmap_atomic(page, KM_USER0);
left = __copy_from_user(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
if (left != 0) {
/* Do it the slow way */
kaddr = kmap(page);
left = __copy_from_user(kaddr + offset, buf, bytes);
kunmap(page);
}
return left;
}
/* /*
* Write to a file through the page cache. * Write to a file through the page cache.
* *
...@@ -2034,7 +2069,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf, ...@@ -2034,7 +2069,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
unsigned long index; unsigned long index;
unsigned long offset; unsigned long offset;
long page_fault; long page_fault;
char *kaddr;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
index = pos >> PAGE_CACHE_SHIFT; index = pos >> PAGE_CACHE_SHIFT;
...@@ -2048,10 +2082,7 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf, ...@@ -2048,10 +2082,7 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
* same page as we're writing to, without it being marked * same page as we're writing to, without it being marked
* up-to-date. * up-to-date.
*/ */
{ volatile unsigned char dummy; fault_in_pages_readable(buf, bytes);
__get_user(dummy, buf);
__get_user(dummy, buf+bytes-1);
}
page = __grab_cache_page(mapping, index, &cached_page, &lru_pvec); page = __grab_cache_page(mapping, index, &cached_page, &lru_pvec);
if (!page) { if (!page) {
...@@ -2059,22 +2090,19 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf, ...@@ -2059,22 +2090,19 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
break; break;
} }
kaddr = kmap(page);
status = a_ops->prepare_write(file, page, offset, offset+bytes); status = a_ops->prepare_write(file, page, offset, offset+bytes);
if (unlikely(status)) { if (unlikely(status)) {
/* /*
* prepare_write() may have instantiated a few blocks * prepare_write() may have instantiated a few blocks
* outside i_size. Trim these off again. * outside i_size. Trim these off again.
*/ */
kunmap(page);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
if (pos + bytes > inode->i_size) if (pos + bytes > inode->i_size)
vmtruncate(inode, inode->i_size); vmtruncate(inode, inode->i_size);
break; break;
} }
page_fault = __copy_from_user(kaddr + offset, buf, bytes); page_fault = filemap_copy_from_user(page, offset, buf, bytes);
flush_dcache_page(page);
status = a_ops->commit_write(file, page, offset, offset+bytes); status = a_ops->commit_write(file, page, offset, offset+bytes);
if (unlikely(page_fault)) { if (unlikely(page_fault)) {
status = -EFAULT; status = -EFAULT;
...@@ -2089,7 +2117,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf, ...@@ -2089,7 +2117,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
buf += status; buf += status;
} }
} }
kunmap(page);
if (!PageReferenced(page)) if (!PageReferenced(page))
SetPageReferenced(page); SetPageReferenced(page);
unlock_page(page); unlock_page(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment