Commit 9b4e30bd authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Andrew Morton

mm: use an on-stack bio for synchronous swapin

Optimize the synchronous swap in case by using an on-stack bio instead of
allocating one using bio_alloc.

Link: https://lkml.kernel.org/r/20230125133436.447864-5-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Keith Busch <kbusch@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 14bd75f5
...@@ -51,10 +51,9 @@ static void end_swap_bio_write(struct bio *bio) ...@@ -51,10 +51,9 @@ static void end_swap_bio_write(struct bio *bio)
bio_put(bio); bio_put(bio);
} }
static void end_swap_bio_read(struct bio *bio) static void __end_swap_bio_read(struct bio *bio)
{ {
struct page *page = bio_first_page_all(bio); struct page *page = bio_first_page_all(bio);
struct task_struct *waiter = bio->bi_private;
if (bio->bi_status) { if (bio->bi_status) {
SetPageError(page); SetPageError(page);
...@@ -62,18 +61,16 @@ static void end_swap_bio_read(struct bio *bio) ...@@ -62,18 +61,16 @@ static void end_swap_bio_read(struct bio *bio)
pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
goto out; } else {
SetPageUptodate(page);
} }
SetPageUptodate(page);
out:
unlock_page(page); unlock_page(page);
WRITE_ONCE(bio->bi_private, NULL); }
static void end_swap_bio_read(struct bio *bio)
{
__end_swap_bio_read(bio);
bio_put(bio); bio_put(bio);
if (waiter) {
blk_wake_io_task(waiter);
put_task_struct(waiter);
}
} }
int generic_swapfile_activate(struct swap_info_struct *sis, int generic_swapfile_activate(struct swap_info_struct *sis,
...@@ -444,10 +441,11 @@ static void swap_readpage_fs(struct page *page, ...@@ -444,10 +441,11 @@ static void swap_readpage_fs(struct page *page,
*plug = sio; *plug = sio;
} }
static void swap_readpage_bdev(struct page *page, bool synchronous, static void swap_readpage_bdev_sync(struct page *page,
struct swap_info_struct *sis) struct swap_info_struct *sis)
{ {
struct bio *bio; struct bio_vec bv;
struct bio bio;
if ((sis->flags & SWP_SYNCHRONOUS_IO) && if ((sis->flags & SWP_SYNCHRONOUS_IO) &&
!bdev_read_page(sis->bdev, swap_page_sector(page), page)) { !bdev_read_page(sis->bdev, swap_page_sector(page), page)) {
...@@ -455,30 +453,37 @@ static void swap_readpage_bdev(struct page *page, bool synchronous, ...@@ -455,30 +453,37 @@ static void swap_readpage_bdev(struct page *page, bool synchronous,
return; return;
} }
bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
bio->bi_iter.bi_sector = swap_page_sector(page); bio.bi_iter.bi_sector = swap_page_sector(page);
bio->bi_end_io = end_swap_bio_read; bio_add_page(&bio, page, thp_size(page), 0);
bio_add_page(bio, page, thp_size(page), 0);
/* /*
* Keep this task valid during swap readpage because the oom killer may * Keep this task valid during swap readpage because the oom killer may
* attempt to access it in the page fault retry time check. * attempt to access it in the page fault retry time check.
*/ */
if (synchronous) { get_task_struct(current);
get_task_struct(current);
bio->bi_private = current;
}
count_vm_event(PSWPIN); count_vm_event(PSWPIN);
bio_get(bio); submit_bio_wait(&bio);
submit_bio(bio); __end_swap_bio_read(&bio);
while (synchronous) { put_task_struct(current);
set_current_state(TASK_UNINTERRUPTIBLE); }
if (!READ_ONCE(bio->bi_private))
break; static void swap_readpage_bdev_async(struct page *page,
struct swap_info_struct *sis)
{
struct bio *bio;
blk_io_schedule(); if ((sis->flags & SWP_SYNCHRONOUS_IO) &&
!bdev_read_page(sis->bdev, swap_page_sector(page), page)) {
count_vm_event(PSWPIN);
return;
} }
__set_current_state(TASK_RUNNING);
bio_put(bio); bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
bio->bi_iter.bi_sector = swap_page_sector(page);
bio->bi_end_io = end_swap_bio_read;
bio_add_page(bio, page, thp_size(page), 0);
count_vm_event(PSWPIN);
submit_bio(bio);
} }
void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
...@@ -508,8 +513,10 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) ...@@ -508,8 +513,10 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
unlock_page(page); unlock_page(page);
} else if (data_race(sis->flags & SWP_FS_OPS)) { } else if (data_race(sis->flags & SWP_FS_OPS)) {
swap_readpage_fs(page, plug); swap_readpage_fs(page, plug);
} else if (synchronous) {
swap_readpage_bdev_sync(page, sis);
} else { } else {
swap_readpage_bdev(page, synchronous, sis); swap_readpage_bdev_async(page, sis);
} }
if (workingset) { if (workingset) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment