Commit 04d4ba4c authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: make check_compressed_csum() to be subpage compatible

Currently check_compressed_csum() completely relies on sectorsize ==
PAGE_SIZE to do checksum verification for compressed extents.

To make it subpage compatible, this patch will:
- Do extra calculation for the csum range
  Since we have multiple sectors inside a page, we need to only hash
  the range we want, not the full page anymore.

- Do sector-by-sector hash inside the page

With this patch and previous conversion on
btrfs_submit_compressed_read(), now we can read subpage compressed
extents properly, and do proper csum verification.
Reviewed-by: default avatarAnand Jain <anand.jain@oracle.com>
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent be6a1361
...@@ -141,6 +141,7 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio, ...@@ -141,6 +141,7 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
const u32 csum_size = fs_info->csum_size; const u32 csum_size = fs_info->csum_size;
const u32 sectorsize = fs_info->sectorsize;
struct page *page; struct page *page;
unsigned long i; unsigned long i;
char *kaddr; char *kaddr;
...@@ -154,22 +155,34 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio, ...@@ -154,22 +155,34 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
shash->tfm = fs_info->csum_shash; shash->tfm = fs_info->csum_shash;
for (i = 0; i < cb->nr_pages; i++) { for (i = 0; i < cb->nr_pages; i++) {
u32 pg_offset;
u32 bytes_left = PAGE_SIZE;
page = cb->compressed_pages[i]; page = cb->compressed_pages[i];
kaddr = kmap_atomic(page); /* Determine the remaining bytes inside the page first */
crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum); if (i == cb->nr_pages - 1)
kunmap_atomic(kaddr); bytes_left = cb->compressed_len - i * PAGE_SIZE;
if (memcmp(&csum, cb_sum, csum_size)) { /* Hash through the page sector by sector */
btrfs_print_data_csum_error(inode, disk_start, for (pg_offset = 0; pg_offset < bytes_left;
csum, cb_sum, cb->mirror_num); pg_offset += sectorsize) {
if (btrfs_io_bio(bio)->device) kaddr = kmap_atomic(page);
btrfs_dev_stat_inc_and_print( crypto_shash_digest(shash, kaddr + pg_offset,
btrfs_io_bio(bio)->device, sectorsize, csum);
BTRFS_DEV_STAT_CORRUPTION_ERRS); kunmap_atomic(kaddr);
return -EIO;
if (memcmp(&csum, cb_sum, csum_size) != 0) {
btrfs_print_data_csum_error(inode, disk_start,
csum, cb_sum, cb->mirror_num);
if (btrfs_io_bio(bio)->device)
btrfs_dev_stat_inc_and_print(
btrfs_io_bio(bio)->device,
BTRFS_DEV_STAT_CORRUPTION_ERRS);
return -EIO;
}
cb_sum += csum_size;
disk_start += sectorsize;
} }
cb_sum += csum_size;
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment