Commit 147d4a09 authored by Ritesh Harjani (IBM)'s avatar Ritesh Harjani (IBM) Committed by Theodore Ts'o

jbd2: Remove page size assumptions

jbd2_alloc() allocates a buffer from slab when the block size is smaller
than PAGE_SIZE, and slab may be using a compound page.  Before commit
8147c4c4, we set b_page to the precise page containing the buffer
and this code worked well.  Now we set b_page to the head page of the
allocation, so we can no longer use offset_in_page().  While we could
do a 1:1 replacement with offset_in_folio(), use the more idiomatic
bh_offset() and the folio APIs to map the buffer.

This isn't enough to support a b_size larger than PAGE_SIZE on HIGHMEM
machines, but this is good enough to fix the actual bug we're seeing.

Fixes: 8147c4c4 ("jbd2: use a folio in jbd2_journal_write_metadata_buffer()")
Reported-by: default avatarZorro Lang <zlang@kernel.org>
Signed-off-by: default avatarRitesh Harjani (IBM) <ritesh.list@gmail.com>
[converted to be more folio]
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
parent f94cf220
......@@ -298,14 +298,12 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
struct page *page = bh->b_page;
char *addr;
__u32 checksum;
addr = kmap_atomic(page);
checksum = crc32_be(crc32_sum,
(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
kunmap_atomic(addr);
addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
checksum = crc32_be(crc32_sum, addr, bh->b_size);
kunmap_local(addr);
return checksum;
}
......@@ -322,7 +320,6 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
struct buffer_head *bh, __u32 sequence)
{
journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
struct page *page = bh->b_page;
__u8 *addr;
__u32 csum32;
__be32 seq;
......@@ -331,11 +328,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
return;
seq = cpu_to_be32(sequence);
addr = kmap_atomic(page);
addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
bh->b_size);
kunmap_atomic(addr);
csum32 = jbd2_chksum(j, csum32, addr, bh->b_size);
kunmap_local(addr);
if (jbd2_has_feature_csum3(j))
tag3->t_checksum = cpu_to_be32(csum32);
......
......@@ -935,19 +935,15 @@ static void warn_dirty_buffer(struct buffer_head *bh)
/* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
static void jbd2_freeze_jh_data(struct journal_head *jh)
{
struct page *page;
int offset;
char *source;
struct buffer_head *bh = jh2bh(jh);
J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
page = bh->b_page;
offset = offset_in_page(bh->b_data);
source = kmap_atomic(page);
source = kmap_local_folio(bh->b_folio, bh_offset(bh));
/* Fire data frozen trigger just before we copy the data */
jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
memcpy(jh->b_frozen_data, source + offset, bh->b_size);
kunmap_atomic(source);
jbd2_buffer_frozen_trigger(jh, source, jh->b_triggers);
memcpy(jh->b_frozen_data, source, bh->b_size);
kunmap_local(source);
/*
* Now that the frozen data is saved off, we need to store any matching
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment