Commit 4ce4c73f authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

md/core: Combine two sync_page_io() arguments

Improve uniformity in the kernel of handling of request operation and
flags by passing these as a single argument.

Cc: Song Liu <song@kernel.org>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-32-bvanassche@acm.orgSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 13a1f650
...@@ -2036,7 +2036,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload) ...@@ -2036,7 +2036,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
rdev->sb_loaded = 0; rdev->sb_loaded = 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) { if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) {
DMERR("Failed to read superblock of device at position %d", DMERR("Failed to read superblock of device at position %d",
rdev->raid_disk); rdev->raid_disk);
md_error(rdev->mddev, rdev); md_error(rdev->mddev, rdev);
......
...@@ -165,7 +165,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset, ...@@ -165,7 +165,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
if (sync_page_io(rdev, target, if (sync_page_io(rdev, target,
roundup(size, bdev_logical_block_size(rdev->bdev)), roundup(size, bdev_logical_block_size(rdev->bdev)),
page, REQ_OP_READ, 0, true)) { page, REQ_OP_READ, true)) {
page->index = index; page->index = index;
return 0; return 0;
} }
......
...@@ -993,15 +993,15 @@ int md_super_wait(struct mddev *mddev) ...@@ -993,15 +993,15 @@ int md_super_wait(struct mddev *mddev)
} }
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags, bool metadata_op) struct page *page, blk_opf_t opf, bool metadata_op)
{ {
struct bio bio; struct bio bio;
struct bio_vec bvec; struct bio_vec bvec;
if (metadata_op && rdev->meta_bdev) if (metadata_op && rdev->meta_bdev)
bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags); bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
else else
bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags); bio_init(&bio, rdev->bdev, &bvec, 1, opf);
if (metadata_op) if (metadata_op)
bio.bi_iter.bi_sector = sector + rdev->sb_start; bio.bi_iter.bi_sector = sector + rdev->sb_start;
...@@ -1024,7 +1024,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size) ...@@ -1024,7 +1024,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
if (rdev->sb_loaded) if (rdev->sb_loaded)
return 0; return 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true))
goto fail; goto fail;
rdev->sb_loaded = 1; rdev->sb_loaded = 1;
return 0; return 0;
...@@ -1722,7 +1722,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ ...@@ -1722,7 +1722,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL; return -EINVAL;
bb_sector = (long long)offset; bb_sector = (long long)offset;
if (!sync_page_io(rdev, bb_sector, sectors << 9, if (!sync_page_io(rdev, bb_sector, sectors << 9,
rdev->bb_page, REQ_OP_READ, 0, true)) rdev->bb_page, REQ_OP_READ, true))
return -EIO; return -EIO;
bbp = (__le64 *)page_address(rdev->bb_page); bbp = (__le64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift; rdev->badblocks.shift = sb->bblog_shift;
......
...@@ -738,8 +738,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, ...@@ -738,8 +738,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page); sector_t sector, int size, struct page *page);
extern int md_super_wait(struct mddev *mddev); extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags, struct page *page, blk_opf_t opf, bool metadata_op);
bool metadata_op);
extern void md_do_sync(struct md_thread *thread); extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(void); extern void md_new_event(void);
extern void md_allow_write(struct mddev *mddev); extern void md_allow_write(struct mddev *mddev);
......
...@@ -1988,9 +1988,9 @@ static void end_sync_write(struct bio *bio) ...@@ -1988,9 +1988,9 @@ static void end_sync_write(struct bio *bio)
} }
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
int sectors, struct page *page, int rw) int sectors, struct page *page, int rw)
{ {
if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */ /* success */
return 1; return 1;
if (rw == WRITE) { if (rw == WRITE) {
...@@ -2057,7 +2057,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) ...@@ -2057,7 +2057,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[d].rdev; rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev, sect, s<<9, if (sync_page_io(rdev, sect, s<<9,
pages[idx], pages[idx],
REQ_OP_READ, 0, false)) { REQ_OP_READ, false)) {
success = 1; success = 1;
break; break;
} }
...@@ -2305,7 +2305,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, ...@@ -2305,7 +2305,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
if (sync_page_io(rdev, sect, s<<9, if (sync_page_io(rdev, sect, s<<9,
conf->tmppage, REQ_OP_READ, 0, false)) conf->tmppage, REQ_OP_READ, false))
success = 1; success = 1;
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
if (success) if (success)
......
...@@ -2512,7 +2512,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) ...@@ -2512,7 +2512,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr, addr,
s << 9, s << 9,
pages[idx], pages[idx],
REQ_OP_READ, 0, false); REQ_OP_READ, false);
if (ok) { if (ok) {
rdev = conf->mirrors[dw].rdev; rdev = conf->mirrors[dw].rdev;
addr = r10_bio->devs[1].addr + sect; addr = r10_bio->devs[1].addr + sect;
...@@ -2520,7 +2520,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) ...@@ -2520,7 +2520,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr, addr,
s << 9, s << 9,
pages[idx], pages[idx],
REQ_OP_WRITE, 0, false); REQ_OP_WRITE, false);
if (!ok) { if (!ok) {
set_bit(WriteErrorSeen, &rdev->flags); set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, if (!test_and_set_bit(WantReplacement,
...@@ -2644,7 +2644,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, ...@@ -2644,7 +2644,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
&& (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1; return -1;
if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */ /* success */
return 1; return 1;
if (rw == WRITE) { if (rw == WRITE) {
...@@ -2726,7 +2726,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 ...@@ -2726,7 +2726,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
sect, sect,
s<<9, s<<9,
conf->tmppage, conf->tmppage,
REQ_OP_READ, 0, false); REQ_OP_READ, false);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
rcu_read_lock(); rcu_read_lock();
if (success) if (success)
...@@ -5107,7 +5107,7 @@ static int handle_reshape_read_error(struct mddev *mddev, ...@@ -5107,7 +5107,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
addr, addr,
s << 9, s << 9,
pages[idx], pages[idx],
REQ_OP_READ, 0, false); REQ_OP_READ, false);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
rcu_read_lock(); rcu_read_lock();
if (success) if (success)
......
...@@ -1788,7 +1788,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, ...@@ -1788,7 +1788,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
mb = page_address(page); mb = page_address(page);
mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
mb, PAGE_SIZE)); mb, PAGE_SIZE));
if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
REQ_SYNC | REQ_FUA, false)) { REQ_SYNC | REQ_FUA, false)) {
__free_page(page); __free_page(page);
return -EIO; return -EIO;
...@@ -1898,7 +1898,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf, ...@@ -1898,7 +1898,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
sync_page_io(rdev, sh->sector, PAGE_SIZE, sync_page_io(rdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0, sh->dev[disk_index].page, REQ_OP_WRITE,
false); false);
rdev_dec_pending(rdev, rdev->mddev); rdev_dec_pending(rdev, rdev->mddev);
rcu_read_lock(); rcu_read_lock();
...@@ -1908,7 +1908,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf, ...@@ -1908,7 +1908,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
atomic_inc(&rrdev->nr_pending); atomic_inc(&rrdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
sync_page_io(rrdev, sh->sector, PAGE_SIZE, sync_page_io(rrdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0, sh->dev[disk_index].page, REQ_OP_WRITE,
false); false);
rdev_dec_pending(rrdev, rrdev->mddev); rdev_dec_pending(rrdev, rrdev->mddev);
rcu_read_lock(); rcu_read_lock();
...@@ -2394,7 +2394,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, ...@@ -2394,7 +2394,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
PAGE_SIZE)); PAGE_SIZE));
kunmap_atomic(addr); kunmap_atomic(addr);
sync_page_io(log->rdev, write_pos, PAGE_SIZE, sync_page_io(log->rdev, write_pos, PAGE_SIZE,
dev->page, REQ_OP_WRITE, 0, false); dev->page, REQ_OP_WRITE, false);
write_pos = r5l_ring_add(log, write_pos, write_pos = r5l_ring_add(log, write_pos,
BLOCK_SECTORS); BLOCK_SECTORS);
offset += sizeof(__le32) + offset += sizeof(__le32) +
...@@ -2406,7 +2406,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, ...@@ -2406,7 +2406,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
mb, PAGE_SIZE)); mb, PAGE_SIZE));
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
sh->log_start = ctx->pos; sh->log_start = ctx->pos;
list_add_tail(&sh->r5c, &log->stripe_in_journal_list); list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
atomic_inc(&log->stripe_in_journal_count); atomic_inc(&log->stripe_in_journal_count);
...@@ -2971,7 +2971,7 @@ static int r5l_load_log(struct r5l_log *log) ...@@ -2971,7 +2971,7 @@ static int r5l_load_log(struct r5l_log *log)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
ret = -EIO; ret = -EIO;
goto ioerr; goto ioerr;
} }
......
...@@ -897,7 +897,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, ...@@ -897,7 +897,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
__func__, indent, "", rdev->bdev, __func__, indent, "", rdev->bdev,
(unsigned long long)sector); (unsigned long long)sector);
if (!sync_page_io(rdev, sector, block_size, page2, if (!sync_page_io(rdev, sector, block_size, page2,
REQ_OP_READ, 0, false)) { REQ_OP_READ, false)) {
md_error(mddev, rdev); md_error(mddev, rdev);
pr_debug("%s:%*s read failed!\n", __func__, pr_debug("%s:%*s read failed!\n", __func__,
indent, ""); indent, "");
...@@ -919,7 +919,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, ...@@ -919,7 +919,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)(ppl_sector + i)); (unsigned long long)(ppl_sector + i));
if (!sync_page_io(log->rdev, if (!sync_page_io(log->rdev,
ppl_sector - log->rdev->data_offset + i, ppl_sector - log->rdev->data_offset + i,
block_size, page2, REQ_OP_READ, 0, block_size, page2, REQ_OP_READ,
false)) { false)) {
pr_debug("%s:%*s read failed!\n", __func__, pr_debug("%s:%*s read failed!\n", __func__,
indent, ""); indent, "");
...@@ -946,7 +946,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, ...@@ -946,7 +946,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)parity_sector, (unsigned long long)parity_sector,
parity_rdev->bdev); parity_rdev->bdev);
if (!sync_page_io(parity_rdev, parity_sector, block_size, if (!sync_page_io(parity_rdev, parity_sector, block_size,
page1, REQ_OP_WRITE, 0, false)) { page1, REQ_OP_WRITE, false)) {
pr_debug("%s:%*s parity write error!\n", __func__, pr_debug("%s:%*s parity write error!\n", __func__,
indent, ""); indent, "");
md_error(mddev, parity_rdev); md_error(mddev, parity_rdev);
...@@ -998,7 +998,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr, ...@@ -998,7 +998,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size; int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
if (!sync_page_io(rdev, sector - rdev->data_offset, if (!sync_page_io(rdev, sector - rdev->data_offset,
s, page, REQ_OP_READ, 0, false)) { s, page, REQ_OP_READ, false)) {
md_error(mddev, rdev); md_error(mddev, rdev);
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -1062,7 +1062,7 @@ static int ppl_write_empty_header(struct ppl_log *log) ...@@ -1062,7 +1062,7 @@ static int ppl_write_empty_header(struct ppl_log *log)
if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
REQ_FUA, 0, false)) { REQ_FUA, false)) {
md_error(rdev->mddev, rdev); md_error(rdev->mddev, rdev);
ret = -EIO; ret = -EIO;
} }
...@@ -1100,7 +1100,7 @@ static int ppl_load_distributed(struct ppl_log *log) ...@@ -1100,7 +1100,7 @@ static int ppl_load_distributed(struct ppl_log *log)
if (!sync_page_io(rdev, if (!sync_page_io(rdev,
rdev->ppl.sector - rdev->data_offset + rdev->ppl.sector - rdev->data_offset +
pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ, pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
0, false)) { false)) {
md_error(mddev, rdev); md_error(mddev, rdev);
ret = -EIO; ret = -EIO;
/* if not able to read - don't recover any PPL */ /* if not able to read - don't recover any PPL */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment