Commit ffecfd1a authored by Darrick J. Wong's avatar Darrick J. Wong Committed by Linus Torvalds

block: optionally snapshot page contents to provide stable pages during write

This provides a band-aid to provide stable page writes on jbd without
needing to backport the fixed locking and page writeback bit handling
schemes of jbd2.  The band-aid works by using bounce buffers to snapshot
page contents instead of waiting.

For those wondering about the ext3 bandage -- fixing the jbd locking
(which was done as part of ext4dev years ago) is a lot of surgery, and
setting PG_writeback on data pages when we actually hold the page lock
dropped ext3 performance by nearly an order of magnitude.  If we're
going to migrate iscsi and raid to use stable page writes, the
complaints about high latency will likely return.  We might as well
centralize their page snapshotting thing to one place.
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Tested-by: default avatarAndy Lutomirski <luto@amacapital.net>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Artem Bityutskiy <dedekind1@gmail.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Eric Van Hensbergen <ericvh@gmail.com>
Cc: Ron Minnich <rminnich@sandia.gov>
Cc: Latchesar Ionkov <lucho@ionkov.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 13575ca1
...@@ -412,12 +412,6 @@ config TILE_USB ...@@ -412,12 +412,6 @@ config TILE_USB
Provides USB host adapter support for the built-in EHCI and OHCI Provides USB host adapter support for the built-in EHCI and OHCI
interfaces on TILE-Gx chips. interfaces on TILE-Gx chips.
# USB OHCI needs the bounce pool since tilegx will often have more
# than 4GB of memory, but we don't currently use the IOTLB to present
# a 32-bit address to OHCI. So we need to use a bounce pool instead.
config NEED_BOUNCE_POOL
def_bool USB_OHCI_HCD
source "drivers/pci/hotplug/Kconfig" source "drivers/pci/hotplug/Kconfig"
endmenu endmenu
......
...@@ -1474,6 +1474,11 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1474,6 +1474,11 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
*/ */
blk_queue_bounce(q, &bio); blk_queue_bounce(q, &bio);
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio_endio(bio, -EIO);
return;
}
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH; where = ELEVATOR_INSERT_FLUSH;
...@@ -1714,9 +1719,6 @@ generic_make_request_checks(struct bio *bio) ...@@ -1714,9 +1719,6 @@ generic_make_request_checks(struct bio *bio)
*/ */
blk_partition_remap(bio); blk_partition_remap(bio);
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
goto end_io;
if (bio_check_eod(bio, nr_sectors)) if (bio_check_eod(bio, nr_sectors))
goto end_io; goto end_io;
......
...@@ -2065,6 +2065,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ...@@ -2065,6 +2065,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal": test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal":
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered": test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
"writeback"); "writeback");
sb->s_flags |= MS_SNAP_STABLE;
return 0; return 0;
......
...@@ -86,6 +86,9 @@ struct inodes_stat_t { ...@@ -86,6 +86,9 @@ struct inodes_stat_t {
#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define MS_I_VERSION (1<<23) /* Update inode I_version field */ #define MS_I_VERSION (1<<23) /* Update inode I_version field */
#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ #define MS_STRICTATIME (1<<24) /* Always perform atime updates */
/* These sb flags are internal to the kernel */
#define MS_SNAP_STABLE (1<<27) /* Snapshot pages during writeback, if needed */
#define MS_NOSEC (1<<28) #define MS_NOSEC (1<<28)
#define MS_BORN (1<<29) #define MS_BORN (1<<29)
#define MS_ACTIVE (1<<30) #define MS_ACTIVE (1<<30)
......
...@@ -258,6 +258,19 @@ config BOUNCE ...@@ -258,6 +258,19 @@ config BOUNCE
def_bool y def_bool y
depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM) depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
# On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often
# have more than 4GB of memory, but we don't currently use the IOTLB to present
# a 32-bit address to OHCI. So we need to use a bounce pool instead.
#
# We also use the bounce pool to provide stable page writes for jbd. jbd
# initiates buffer writeback without locking the page or setting PG_writeback,
# and fixing that behavior (a second time; jbd2 doesn't have this problem) is
# a major rework effort. Instead, use the bounce buffer to snapshot pages
# (until jbd goes away). The only jbd user is ext3.
config NEED_BOUNCE_POOL
bool
default y if (TILE && USB_OHCI_HCD) || (BLK_DEV_INTEGRITY && JBD)
config NR_QUICK config NR_QUICK
int int
depends on QUICKLIST depends on QUICKLIST
......
...@@ -178,8 +178,45 @@ static void bounce_end_io_read_isa(struct bio *bio, int err) ...@@ -178,8 +178,45 @@ static void bounce_end_io_read_isa(struct bio *bio, int err)
__bounce_end_io_read(bio, isa_page_pool, err); __bounce_end_io_read(bio, isa_page_pool, err);
} }
#ifdef CONFIG_NEED_BOUNCE_POOL
static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
{
struct page *page;
struct backing_dev_info *bdi;
struct address_space *mapping;
struct bio_vec *from;
int i;
if (bio_data_dir(bio) != WRITE)
return 0;
if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
return 0;
/*
* Based on the first page that has a valid mapping, decide whether or
* not we have to employ bounce buffering to guarantee stable pages.
*/
bio_for_each_segment(from, bio, i) {
page = from->bv_page;
mapping = page_mapping(page);
if (!mapping)
continue;
bdi = mapping->backing_dev_info;
return mapping->host->i_sb->s_flags & MS_SNAP_STABLE;
}
return 0;
}
#else
static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
{
return 0;
}
#endif /* CONFIG_NEED_BOUNCE_POOL */
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool) mempool_t *pool, int force)
{ {
struct page *page; struct page *page;
struct bio *bio = NULL; struct bio *bio = NULL;
...@@ -192,7 +229,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, ...@@ -192,7 +229,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
/* /*
* is destination page below bounce pfn? * is destination page below bounce pfn?
*/ */
if (page_to_pfn(page) <= queue_bounce_pfn(q)) if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
continue; continue;
/* /*
...@@ -270,6 +307,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, ...@@ -270,6 +307,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{ {
int must_bounce;
mempool_t *pool; mempool_t *pool;
/* /*
...@@ -278,13 +316,15 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) ...@@ -278,13 +316,15 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
if (!bio_has_data(*bio_orig)) if (!bio_has_data(*bio_orig))
return; return;
must_bounce = must_snapshot_stable_pages(q, *bio_orig);
/* /*
* for non-isa bounce case, just check if the bounce pfn is equal * for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case, * to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments * don't waste time iterating over bio segments
*/ */
if (!(q->bounce_gfp & GFP_DMA)) { if (!(q->bounce_gfp & GFP_DMA)) {
if (queue_bounce_pfn(q) >= blk_max_pfn) if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
return; return;
pool = page_pool; pool = page_pool;
} else { } else {
...@@ -295,7 +335,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) ...@@ -295,7 +335,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
/* /*
* slow path * slow path
*/ */
__blk_queue_bounce(q, bio_orig, pool); __blk_queue_bounce(q, bio_orig, pool, must_bounce);
} }
EXPORT_SYMBOL(blk_queue_bounce); EXPORT_SYMBOL(blk_queue_bounce);
...@@ -2306,6 +2306,10 @@ void wait_for_stable_page(struct page *page) ...@@ -2306,6 +2306,10 @@ void wait_for_stable_page(struct page *page)
if (!bdi_cap_stable_pages_required(bdi)) if (!bdi_cap_stable_pages_required(bdi))
return; return;
#ifdef CONFIG_NEED_BOUNCE_POOL
if (mapping->host->i_sb->s_flags & MS_SNAP_STABLE)
return;
#endif /* CONFIG_NEED_BOUNCE_POOL */
wait_on_page_writeback(page); wait_on_page_writeback(page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment