Commit 62fa9ec7 authored by Chao Yu's avatar Chao Yu Committed by Stefan Bader

f2fs: fix to avoid reading out encrypted data in page cache

BugLink: https://bugs.launchpad.net/bugs/1818797

commit 78682f79 upstream.

For encrypted inode, if user overwrites data of the inode, f2fs will read
encrypted data into page cache, and then do the decryption.

However reader can race with overwriter, and it will see encrypted data
which has not been decrypted by overwriter yet. Fix it by moving decrypting
work to background and keep page non-uptodated until data is decrypted.

Thread A				Thread B
- f2fs_file_write_iter
 - __generic_file_write_iter
  - generic_perform_write
   - f2fs_write_begin
    - f2fs_submit_page_bio
					- generic_file_read_iter
					 - do_generic_file_read
					  - lock_page_killable
					  - unlock_page
					  - copy_page_to_iter
					  hit the encrypted data in updated page
    - lock_page
    - fscrypt_decrypt_page
Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
[bwh: Backported to 4.4:
 - Keep using f2fs_crypto functions instead of generic fscrypt API
 - Use PAGE_CACHE_SIZE instead of PAGE_SIZE
 - Use submit_bio() instead of __submit_bio()
 - In f2fs_write_begin(), use dn.data_blkaddr instead of blkaddr
 - Adjust context]
Signed-off-by: default avatarBen Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
parent 63e35989
...@@ -866,6 +866,37 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -866,6 +866,37 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return ret; return ret;
} }
struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_crypto_ctx *ctx = NULL;
struct block_device *bdev = sbi->sb->s_bdev;
struct bio *bio;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
ctx = f2fs_get_crypto_ctx(inode);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
/* wait the page to be moved by cleaning */
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
}
bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio) {
if (ctx)
f2fs_release_crypto_ctx(ctx);
return ERR_PTR(-ENOMEM);
}
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = ctx;
return bio;
}
/* /*
* This function was originally taken from fs/mpage.c, and customized for f2fs. * This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default. * Major change was from block_size == page_size in f2fs by default.
...@@ -884,7 +915,6 @@ static int f2fs_mpage_readpages(struct address_space *mapping, ...@@ -884,7 +915,6 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block; sector_t last_block;
sector_t last_block_in_file; sector_t last_block_in_file;
sector_t block_nr; sector_t block_nr;
struct block_device *bdev = inode->i_sb->s_bdev;
struct f2fs_map_blocks map; struct f2fs_map_blocks map;
map.m_pblk = 0; map.m_pblk = 0;
...@@ -958,31 +988,9 @@ static int f2fs_mpage_readpages(struct address_space *mapping, ...@@ -958,31 +988,9 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
bio = NULL; bio = NULL;
} }
if (bio == NULL) { if (bio == NULL) {
struct f2fs_crypto_ctx *ctx = NULL; bio = f2fs_grab_bio(inode, block_nr, nr_pages);
if (IS_ERR(bio))
if (f2fs_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
ctx = f2fs_get_crypto_ctx(inode);
if (IS_ERR(ctx))
goto set_error_page;
/* wait the page to be moved by cleaning */
f2fs_wait_on_encrypted_page_writeback(
F2FS_I_SB(inode), block_nr);
}
bio = bio_alloc(GFP_KERNEL,
min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio) {
if (ctx)
f2fs_release_crypto_ctx(ctx);
goto set_error_page; goto set_error_page;
}
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = ctx;
} }
if (bio_add_page(bio, page, blocksize, 0) < blocksize) if (bio_add_page(bio, page, blocksize, 0) < blocksize)
...@@ -1482,17 +1490,21 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1482,17 +1490,21 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
if (dn.data_blkaddr == NEW_ADDR) { if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE); zero_user_segment(page, 0, PAGE_CACHE_SIZE);
} else { } else {
struct f2fs_io_info fio = { struct bio *bio;
.sbi = sbi,
.type = DATA, bio = f2fs_grab_bio(inode, dn.data_blkaddr, 1);
.rw = READ_SYNC, if (IS_ERR(bio)) {
.blk_addr = dn.data_blkaddr, err = PTR_ERR(bio);
.page = page,
.encrypted_page = NULL,
};
err = f2fs_submit_page_bio(&fio);
if (err)
goto fail; goto fail;
}
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
bio_put(bio);
err = -EFAULT;
goto fail;
}
submit_bio(READ_SYNC, bio);
lock_page(page); lock_page(page);
if (unlikely(!PageUptodate(page))) { if (unlikely(!PageUptodate(page))) {
...@@ -1503,13 +1515,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1503,13 +1515,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
goto repeat; goto repeat;
} }
/* avoid symlink page */
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
err = f2fs_decrypt_one(inode, page);
if (err)
goto fail;
}
} }
out_update: out_update:
SetPageUptodate(page); SetPageUptodate(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment