Commit fd45af53 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Andrew Morton

zram: pass a page to read_from_bdev

read_from_bdev always reads a whole page, so pass a page to it instead of
the bvec and remove the now pointless zram_bvec_read_from_bdev wrapper.

Link: https://lkml.kernel.org/r/20230411171459.567614-15-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a0b81ae7
...@@ -588,7 +588,7 @@ static void zram_page_end_io(struct bio *bio) ...@@ -588,7 +588,7 @@ static void zram_page_end_io(struct bio *bio)
/* /*
* Returns 1 if the submission is successful. * Returns 1 if the submission is successful.
*/ */
static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, static int read_from_bdev_async(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent) unsigned long entry, struct bio *parent)
{ {
struct bio *bio; struct bio *bio;
...@@ -599,7 +599,7 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, ...@@ -599,7 +599,7 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
return -ENOMEM; return -ENOMEM;
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
bio_put(bio); bio_put(bio);
return -EIO; return -EIO;
} }
...@@ -795,7 +795,7 @@ struct zram_work { ...@@ -795,7 +795,7 @@ struct zram_work {
struct zram *zram; struct zram *zram;
unsigned long entry; unsigned long entry;
struct bio *bio; struct bio *bio;
struct bio_vec bvec; struct page *page;
}; };
static void zram_sync_read(struct work_struct *work) static void zram_sync_read(struct work_struct *work)
...@@ -805,7 +805,7 @@ static void zram_sync_read(struct work_struct *work) ...@@ -805,7 +805,7 @@ static void zram_sync_read(struct work_struct *work)
unsigned long entry = zw->entry; unsigned long entry = zw->entry;
struct bio *bio = zw->bio; struct bio *bio = zw->bio;
read_from_bdev_async(zram, &zw->bvec, entry, bio); read_from_bdev_async(zram, zw->page, entry, bio);
} }
/* /*
...@@ -813,12 +813,12 @@ static void zram_sync_read(struct work_struct *work) ...@@ -813,12 +813,12 @@ static void zram_sync_read(struct work_struct *work)
* chained IO with parent IO in same context, it's a deadlock. To avoid that, * chained IO with parent IO in same context, it's a deadlock. To avoid that,
* use a worker thread context. * use a worker thread context.
*/ */
static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, static int read_from_bdev_sync(struct zram *zram, struct page *page,
unsigned long entry, struct bio *bio) unsigned long entry, struct bio *bio)
{ {
struct zram_work work; struct zram_work work;
work.bvec = *bvec; work.page = page;
work.zram = zram; work.zram = zram;
work.entry = entry; work.entry = entry;
work.bio = bio; work.bio = bio;
...@@ -831,20 +831,20 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, ...@@ -831,20 +831,20 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
return 1; return 1;
} }
static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, static int read_from_bdev(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent, bool sync) unsigned long entry, struct bio *parent, bool sync)
{ {
atomic64_inc(&zram->stats.bd_reads); atomic64_inc(&zram->stats.bd_reads);
if (sync) { if (sync) {
if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO))) if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO)))
return -EIO; return -EIO;
return read_from_bdev_sync(zram, bvec, entry, parent); return read_from_bdev_sync(zram, page, entry, parent);
} }
return read_from_bdev_async(zram, bvec, entry, parent); return read_from_bdev_async(zram, page, entry, parent);
} }
#else #else
static inline void reset_bdev(struct zram *zram) {}; static inline void reset_bdev(struct zram *zram) {};
static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, static int read_from_bdev(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent, bool sync) unsigned long entry, struct bio *parent, bool sync)
{ {
return -EIO; return -EIO;
...@@ -1328,20 +1328,6 @@ static void zram_free_page(struct zram *zram, size_t index) ...@@ -1328,20 +1328,6 @@ static void zram_free_page(struct zram *zram, size_t index)
~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
} }
/*
* Reads a page from the writeback devices. Corresponding ZRAM slot
* should be unlocked.
*/
static int zram_bvec_read_from_bdev(struct zram *zram, struct page *page,
u32 index, struct bio *bio, bool partial_io)
{
struct bio_vec bvec;
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
return read_from_bdev(zram, &bvec, zram_get_element(zram, index), bio,
partial_io);
}
/* /*
* Reads (decompresses if needed) a page from zspool (zsmalloc). * Reads (decompresses if needed) a page from zspool (zsmalloc).
* Corresponding ZRAM slot should be locked. * Corresponding ZRAM slot should be locked.
...@@ -1402,11 +1388,14 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index, ...@@ -1402,11 +1388,14 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
ret = zram_read_from_zspool(zram, page, index); ret = zram_read_from_zspool(zram, page, index);
zram_slot_unlock(zram, index); zram_slot_unlock(zram, index);
} else { } else {
/* Slot should be unlocked before the function call */ /*
* The slot should be unlocked before reading from the backing
* device.
*/
zram_slot_unlock(zram, index); zram_slot_unlock(zram, index);
ret = zram_bvec_read_from_bdev(zram, page, index, bio, ret = read_from_bdev(zram, page, zram_get_element(zram, index),
partial_io); bio, partial_io);
} }
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment