data.c 69.6 KB
Newer Older
Chao Yu's avatar
Chao Yu committed
1
// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2
/*
3 4 5 6 7 8 9 10 11 12 13
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
Chao Yu's avatar
Chao Yu committed
14
#include <linux/pagevec.h>
15 16
#include <linux/blkdev.h>
#include <linux/bio.h>
17
#include <linux/prefetch.h>
18
#include <linux/uio.h>
19
#include <linux/cleancache.h>
20
#include <linux/sched/signal.h>
21 22 23 24

#include "f2fs.h"
#include "node.h"
#include "segment.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
25
#include "trace.h"
26
#include <trace/events/f2fs.h>
27

28 29 30 31 32
#define NUM_PREALLOC_POST_READ_CTXS	128

static struct kmem_cache *bio_post_read_ctx_cache;
static mempool_t *bio_post_read_ctx_pool;

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
static bool __is_cp_guaranteed(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct f2fs_sb_info *sbi;

	if (!mapping)
		return false;

	inode = mapping->host;
	sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_META_INO(sbi) ||
			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
			S_ISDIR(inode->i_mode) ||
48
			(S_ISREG(inode->i_mode) &&
49
			(f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
50 51 52 53 54
			is_cold_data(page))
		return true;
	return false;
}

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
static enum count_type __read_io_type(struct page *page)
{
	struct address_space *mapping = page->mapping;

	if (mapping) {
		struct inode *inode = mapping->host;
		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

		if (inode->i_ino == F2FS_META_INO(sbi))
			return F2FS_RD_META;

		if (inode->i_ino == F2FS_NODE_INO(sbi))
			return F2FS_RD_NODE;
	}
	return F2FS_RD_DATA;
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85
/* postprocessing steps for read bios */
enum bio_post_read_step {
	STEP_INITIAL = 0,
	STEP_DECRYPT,
};

struct bio_post_read_ctx {
	struct bio *bio;
	struct work_struct work;
	unsigned int cur_step;
	unsigned int enabled_steps;
};

static void __read_end_io(struct bio *bio)
86
{
87 88
	struct page *page;
	struct bio_vec *bv;
89
	int i;
90
	struct bvec_iter_all iter_all;
91

92
	bio_for_each_segment_all(bv, bio, i, iter_all) {
93 94 95 96 97
		page = bv->bv_page;

		/* PG_error was set if any post_read step failed */
		if (bio->bi_status || PageError(page)) {
			ClearPageUptodate(page);
98 99
			/* will re-read again later */
			ClearPageError(page);
100 101 102
		} else {
			SetPageUptodate(page);
		}
103
		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
		unlock_page(page);
	}
	if (bio->bi_private)
		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
	bio_put(bio);
}

static void bio_post_read_processing(struct bio_post_read_ctx *ctx);

static void decrypt_work(struct work_struct *work)
{
	struct bio_post_read_ctx *ctx =
		container_of(work, struct bio_post_read_ctx, work);

	fscrypt_decrypt_bio(ctx->bio);

	bio_post_read_processing(ctx);
}

static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
{
	switch (++ctx->cur_step) {
	case STEP_DECRYPT:
		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
			INIT_WORK(&ctx->work, decrypt_work);
			fscrypt_enqueue_decrypt_work(&ctx->work);
			return;
		}
		ctx->cur_step++;
		/* fall-through */
	default:
		__read_end_io(ctx->bio);
	}
}

static bool f2fs_bio_post_read_required(struct bio *bio)
{
	return bio->bi_private && !bio->bi_status;
}

static void f2fs_read_end_io(struct bio *bio)
{
146 147 148
	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
						FAULT_READ_IO)) {
		f2fs_show_injection_info(FAULT_READ_IO);
149
		bio->bi_status = BLK_STS_IOERR;
150
	}
Chao Yu's avatar
Chao Yu committed
151

152 153
	if (f2fs_bio_post_read_required(bio)) {
		struct bio_post_read_ctx *ctx = bio->bi_private;
154

155 156 157
		ctx->cur_step = STEP_INITIAL;
		bio_post_read_processing(ctx);
		return;
158
	}
159 160

	__read_end_io(bio);
161 162
}

163
static void f2fs_write_end_io(struct bio *bio)
164
{
165
	struct f2fs_sb_info *sbi = bio->bi_private;
166 167
	struct bio_vec *bvec;
	int i;
168
	struct bvec_iter_all iter_all;
169

170 171 172 173 174
	if (time_to_inject(sbi, FAULT_WRITE_IO)) {
		f2fs_show_injection_info(FAULT_WRITE_IO);
		bio->bi_status = BLK_STS_IOERR;
	}

175
	bio_for_each_segment_all(bvec, bio, i, iter_all) {
176
		struct page *page = bvec->bv_page;
177
		enum count_type type = WB_DATA_TYPE(page);
178

179 180 181 182 183 184
		if (IS_DUMMY_WRITTEN_PAGE(page)) {
			set_page_private(page, (unsigned long)NULL);
			ClearPagePrivate(page);
			unlock_page(page);
			mempool_free(page, sbi->write_io_dummy);

185
			if (unlikely(bio->bi_status))
186 187 188 189
				f2fs_stop_checkpoint(sbi, true);
			continue;
		}

190
		fscrypt_pullback_bio_page(&page, true);
191

192
		if (unlikely(bio->bi_status)) {
193
			mapping_set_error(page->mapping, -EIO);
194 195
			if (type == F2FS_WB_CP_DATA)
				f2fs_stop_checkpoint(sbi, true);
196
		}
197 198 199 200

		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
					page->index != nid_of_node(page));

201
		dec_page_count(sbi, type);
202 203
		if (f2fs_in_warm_node_list(sbi, page))
			f2fs_del_fsync_node_entry(sbi, page);
204
		clear_cold_data(page);
205
		end_page_writeback(page);
206
	}
207
	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
208
				wq_has_sleeper(&sbi->cp_wait))
209 210 211 212 213
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
214 215 216 217 218 219 220 221 222
/*
 * Return true, if pre_bio's bdev is same as its target device.
 */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	int i;

223 224 225 226 227 228 229 230
	if (f2fs_is_multi_device(sbi)) {
		for (i = 0; i < sbi->s_ndevs; i++) {
			if (FDEV(i).start_blk <= blk_addr &&
			    FDEV(i).end_blk >= blk_addr) {
				blk_addr -= FDEV(i).start_blk;
				bdev = FDEV(i).bdev;
				break;
			}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
231 232 233
		}
	}
	if (bio) {
234
		bio_set_dev(bio, bdev);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
235 236 237 238 239 240 241 242 243
		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
	}
	return bdev;
}

int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
	int i;

244 245 246
	if (!f2fs_is_multi_device(sbi))
		return 0;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
247 248 249 250 251 252 253 254 255
	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
			return i;
	return 0;
}

static bool __same_bdev(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
256 257
	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
258 259
}

260 261 262 263
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
264
				struct writeback_control *wbc,
265 266
				int npages, bool is_read,
				enum page_type type, enum temp_type temp)
267 268 269
{
	struct bio *bio;

270
	bio = f2fs_bio_alloc(sbi, npages, true);
271

Jaegeuk Kim's avatar
Jaegeuk Kim committed
272
	f2fs_target_device(sbi, blk_addr, bio);
273 274 275 276 277 278
	if (is_read) {
		bio->bi_end_io = f2fs_read_end_io;
		bio->bi_private = NULL;
	} else {
		bio->bi_end_io = f2fs_write_end_io;
		bio->bi_private = sbi;
Chao Yu's avatar
Chao Yu committed
279
		bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp);
280
	}
281 282
	if (wbc)
		wbc_init_bio(wbc, bio);
283 284 285 286

	return bio;
}

287 288
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
289
{
290
	if (!is_read_io(bio_op(bio))) {
291 292 293 294 295
		unsigned int start;

		if (type != DATA && type != NODE)
			goto submit_io;

296
		if (test_opt(sbi, LFS) && current->plug)
297 298
			blk_finish_plug(current->plug);

299 300 301 302 303 304 305 306 307 308
		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
		start %= F2FS_IO_SIZE(sbi);

		if (start == 0)
			goto submit_io;

		/* fill dummy pages */
		for (; start < F2FS_IO_SIZE(sbi); start++) {
			struct page *page =
				mempool_alloc(sbi->write_io_dummy,
309
					      GFP_NOIO | __GFP_NOFAIL);
310 311
			f2fs_bug_on(sbi, !page);

312
			zero_user_segment(page, 0, PAGE_SIZE);
313 314 315 316 317 318 319 320 321 322 323 324
			SetPagePrivate(page);
			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
			lock_page(page);
			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
				f2fs_bug_on(sbi, 1);
		}
		/*
		 * In the NODE case, we lose next block address chain. So, we
		 * need to do checkpoint in f2fs_sync_file.
		 */
		if (type == NODE)
			set_sbi_flag(sbi, SBI_NEED_CP);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
325
	}
326
submit_io:
Jaegeuk Kim's avatar
Jaegeuk Kim committed
327 328 329 330
	if (is_read_io(bio_op(bio)))
		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
	else
		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
331
	submit_bio(bio);
332 333
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
334
static void __submit_merged_bio(struct f2fs_bio_info *io)
335
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
336
	struct f2fs_io_info *fio = &io->fio;
337 338 339 340

	if (!io->bio)
		return;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
341 342
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

Mike Christie's avatar
Mike Christie committed
343
	if (is_read_io(fio->op))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
344
		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
345
	else
Jaegeuk Kim's avatar
Jaegeuk Kim committed
346
		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
Mike Christie's avatar
Mike Christie committed
347

348
	__submit_bio(io->sbi, io->bio, fio->type);
349 350 351
	io->bio = NULL;
}

352 353
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
Chao Yu's avatar
Chao Yu committed
354 355 356 357
{
	struct bio_vec *bvec;
	struct page *target;
	int i;
358
	struct bvec_iter_all iter_all;
Chao Yu's avatar
Chao Yu committed
359

360
	if (!io->bio)
Chao Yu's avatar
Chao Yu committed
361
		return false;
362

363
	if (!inode && !page && !ino)
364
		return true;
Chao Yu's avatar
Chao Yu committed
365

366
	bio_for_each_segment_all(bvec, io->bio, i, iter_all) {
Chao Yu's avatar
Chao Yu committed
367

368
		if (bvec->bv_page->mapping)
Chao Yu's avatar
Chao Yu committed
369
			target = bvec->bv_page;
370 371
		else
			target = fscrypt_control_page(bvec->bv_page);
Chao Yu's avatar
Chao Yu committed
372

373 374
		if (inode && inode == target->mapping->host)
			return true;
375 376
		if (page && page == target)
			return true;
377
		if (ino && ino == ino_of_node(target))
Chao Yu's avatar
Chao Yu committed
378 379 380 381 382 383
			return true;
	}

	return false;
}

384
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
385
				enum page_type type, enum temp_type temp)
386 387
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
388
	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
389

390
	down_write(&io->io_rwsem);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
391 392 393 394

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
Mike Christie's avatar
Mike Christie committed
395
		io->fio.op = REQ_OP_WRITE;
396
		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
397
		if (!test_opt(sbi, NOBARRIER))
398
			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
399 400
	}
	__submit_merged_bio(io);
401
	up_write(&io->io_rwsem);
402 403
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
404
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
405 406
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, bool force)
407
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
408
	enum temp_type temp;
409
	bool ret = true;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
410 411

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
412 413 414
		if (!force)	{
			enum page_type btype = PAGE_TYPE_OF_BIO(type);
			struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
415

416 417 418 419 420 421
			down_read(&io->io_rwsem);
			ret = __has_merged_page(io, inode, page, ino);
			up_read(&io->io_rwsem);
		}
		if (ret)
			__f2fs_submit_merged_write(sbi, type, temp);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
422 423 424 425 426

		/* TODO: use HOT temp only for meta pages now. */
		if (type >= META)
			break;
	}
427 428
}

429
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
430
{
431
	__submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
432 433
}

434
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
435 436
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type)
437
{
438
	__submit_merged_write_cond(sbi, inode, page, ino, type, false);
439 440
}

441
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
442
{
443 444 445
	f2fs_submit_merged_write(sbi, DATA);
	f2fs_submit_merged_write(sbi, NODE);
	f2fs_submit_merged_write(sbi, META);
446 447
}

448 449
/*
 * Fill the locked page with data located in the block address.
450
 * A caller needs to unlock the page on failure.
451
 */
452
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
453 454
{
	struct bio *bio;
455 456
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
457

458
	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
459 460
			fio->is_por ? META_POR :
			(__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)))
461 462
		return -EFAULT;

463
	trace_f2fs_submit_page_bio(page, fio);
464
	f2fs_trace_ios(fio, 0);
465 466

	/* Allocate a new bio */
467
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
468
				1, is_read_io(fio->op), fio->type, fio->temp);
469

470
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
471 472 473
		bio_put(bio);
		return -EFAULT;
	}
474 475 476 477

	if (fio->io_wbc && !is_read_io(fio->op))
		wbc_account_io(fio->io_wbc, page, PAGE_SIZE);

Mike Christie's avatar
Mike Christie committed
478
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
479

480 481
	inc_page_count(fio->sbi, is_read_io(fio->op) ?
			__read_io_type(page): WB_DATA_TYPE(fio->page));
Chao Yu's avatar
Chao Yu committed
482 483

	__submit_bio(fio->sbi, bio, fio->type);
484 485 486
	return 0;
}

487
void f2fs_submit_page_write(struct f2fs_io_info *fio)
488
{
489
	struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
490
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
491
	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
492
	struct page *bio_page;
493

494
	f2fs_bug_on(sbi, is_read_io(fio->op));
495

496 497 498 499 500 501
	down_write(&io->io_rwsem);
next:
	if (fio->in_list) {
		spin_lock(&io->io_lock);
		if (list_empty(&io->io_list)) {
			spin_unlock(&io->io_lock);
502
			goto out;
503 504 505 506 507 508
		}
		fio = list_first_entry(&io->io_list,
						struct f2fs_io_info, list);
		list_del(&fio->list);
		spin_unlock(&io->io_lock);
	}
509

510
	if (__is_valid_data_blkaddr(fio->old_blkaddr))
511 512
		verify_block_addr(fio, fio->old_blkaddr);
	verify_block_addr(fio, fio->new_blkaddr);
513

514 515
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

516 517
	/* set submitted = true as a return value */
	fio->submitted = true;
518

519
	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
520

521
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
Jaegeuk Kim's avatar
Jaegeuk Kim committed
522 523
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
524
		__submit_merged_bio(io);
525 526
alloc_new:
	if (io->bio == NULL) {
527 528
		if ((fio->type == DATA || fio->type == NODE) &&
				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
529
			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
530 531
			fio->retry = true;
			goto skip;
532
		}
533
		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
534 535
						BIO_MAX_PAGES, false,
						fio->type, fio->temp);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
536
		io->fio = *fio;
537 538
	}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
539
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
540
		__submit_merged_bio(io);
541 542 543
		goto alloc_new;
	}

544 545 546
	if (fio->io_wbc)
		wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);

547
	io->last_block_in_bio = fio->new_blkaddr;
548
	f2fs_trace_ios(fio, 0);
549 550

	trace_f2fs_submit_page_write(fio->page, fio);
551
skip:
552 553
	if (fio->in_list)
		goto next;
554
out:
Daniel Rosenberg's avatar
Daniel Rosenberg committed
555 556
	if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
				f2fs_is_checkpoint_ready(sbi))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
557
		__submit_merged_bio(io);
558
	up_write(&io->io_rwsem);
559 560
}

561
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
562
					unsigned nr_pages, unsigned op_flag)
563 564 565
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct bio *bio;
566 567
	struct bio_post_read_ctx *ctx;
	unsigned int post_read_steps = 0;
568

569 570 571
	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
		return ERR_PTR(-EFAULT);

572
	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
573
	if (!bio)
574 575 576
		return ERR_PTR(-ENOMEM);
	f2fs_target_device(sbi, blkaddr, bio);
	bio->bi_end_io = f2fs_read_end_io;
577
	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
578

579 580 581 582 583 584 585 586 587 588 589 590 591
	if (f2fs_encrypted_file(inode))
		post_read_steps |= 1 << STEP_DECRYPT;
	if (post_read_steps) {
		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
		if (!ctx) {
			bio_put(bio);
			return ERR_PTR(-ENOMEM);
		}
		ctx->bio = bio;
		ctx->enabled_steps = post_read_steps;
		bio->bi_private = ctx;
	}

592 593 594 595 596 597 598
	return bio;
}

/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
							block_t blkaddr)
{
599
	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
600 601 602 603

	if (IS_ERR(bio))
		return PTR_ERR(bio);

604 605 606
	/* wait for GCed page writeback via META_MAPPING */
	f2fs_wait_on_block_writeback(inode, blkaddr);

607 608 609 610
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}
611
	ClearPageError(page);
612
	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
613 614 615 616
	__submit_bio(F2FS_I_SB(inode), bio, DATA);
	return 0;
}

617 618 619 620
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;
621 622 623 624
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
625 626 627

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
628
	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
629 630
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
631
/*
632 633 634 635 636
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
Chao Yu's avatar
Chao Yu committed
637
void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
638
{
639
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
640 641
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
642
		dn->node_changed = true;
643 644
}

645 646 647
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
Chao Yu's avatar
Chao Yu committed
648
	f2fs_set_data_blkaddr(dn);
649 650 651
	f2fs_update_extent_cache(dn);
}

652
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
Chao Yu's avatar
Chao Yu committed
653
int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
654
{
655
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
656
	int err;
657

658 659 660
	if (!count)
		return 0;

661
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
662
		return -EPERM;
663 664
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
665

666 667 668
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

669
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
670 671

	for (; count > 0; dn->ofs_in_node++) {
672 673
		block_t blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
674 675 676 677 678 679 680 681 682
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
683 684 685
	return 0;
}

686
/* Should keep dn->ofs_in_node unchanged */
Chao Yu's avatar
Chao Yu committed
687
int f2fs_reserve_new_block(struct dnode_of_data *dn)
688 689 690 691
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

Chao Yu's avatar
Chao Yu committed
692
	ret = f2fs_reserve_new_blocks(dn, 1);
693 694 695 696
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

697 698 699 700 701
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

Chao Yu's avatar
Chao Yu committed
702
	err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
703 704
	if (err)
		return err;
705

706
	if (dn->data_blkaddr == NULL_ADDR)
Chao Yu's avatar
Chao Yu committed
707
		err = f2fs_reserve_new_block(dn);
708
	if (err || need_put)
709 710 711 712
		f2fs_put_dnode(dn);
	return err;
}

713
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
714
{
715
	struct extent_info ei  = {0,0,0};
716
	struct inode *inode = dn->inode;
717

718 719 720
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
721
	}
722

723
	return f2fs_reserve_block(dn, index);
724 725
}

Chao Yu's avatar
Chao Yu committed
726
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
Mike Christie's avatar
Mike Christie committed
727
						int op_flags, bool for_write)
728 729 730 731
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
732
	struct extent_info ei = {0,0,0};
733
	int err;
734

735
	page = f2fs_grab_cache_page(mapping, index, for_write);
736 737 738
	if (!page)
		return ERR_PTR(-ENOMEM);

Chao Yu's avatar
Chao Yu committed
739 740 741 742 743
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

744
	set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu's avatar
Chao Yu committed
745
	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
746 747
	if (err)
		goto put_err;
748 749
	f2fs_put_dnode(&dn);

750
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
751 752
		err = -ENOENT;
		goto put_err;
753
	}
Chao Yu's avatar
Chao Yu committed
754
got_it:
755 756
	if (PageUptodate(page)) {
		unlock_page(page);
757
		return page;
758
	}
759

Jaegeuk Kim's avatar
Jaegeuk Kim committed
760 761 762 763
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
Chao Yu's avatar
Chao Yu committed
764 765
	 * see, f2fs_add_link -> f2fs_get_new_data_page ->
	 * f2fs_init_inode_metadata.
Jaegeuk Kim's avatar
Jaegeuk Kim committed
766 767
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
768
		zero_user_segment(page, 0, PAGE_SIZE);
769 770
		if (!PageUptodate(page))
			SetPageUptodate(page);
771
		unlock_page(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
772 773
		return page;
	}
774

775
	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
776
	if (err)
777
		goto put_err;
778
	return page;
779 780 781 782

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
783 784
}

Chao Yu's avatar
Chao Yu committed
785
struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
786 787 788 789 790 791 792 793 794
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

Chao Yu's avatar
Chao Yu committed
795
	page = f2fs_get_read_data_page(inode, index, 0, false);
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
Chao Yu's avatar
Chao Yu committed
815
struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
816
							bool for_write)
817 818 819 820
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
Chao Yu's avatar
Chao Yu committed
821
	page = f2fs_get_read_data_page(inode, index, 0, for_write);
822 823
	if (IS_ERR(page))
		return page;
824

825
	/* wait for read completion */
826
	lock_page(page);
827
	if (unlikely(page->mapping != mapping)) {
828 829
		f2fs_put_page(page, 1);
		goto repeat;
830
	}
831 832 833 834
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
835 836 837
	return page;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
838
/*
839 840
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
841
 *
Chao Yu's avatar
Chao Yu committed
842 843
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
844 845
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
846
 */
Chao Yu's avatar
Chao Yu committed
847
struct page *f2fs_get_new_data_page(struct inode *inode,
848
		struct page *ipage, pgoff_t index, bool new_i_size)
849 850 851 852 853
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
854

855
	page = f2fs_grab_cache_page(mapping, index, true);
856 857 858 859 860 861
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
862
		return ERR_PTR(-ENOMEM);
863
	}
864

865
	set_new_dnode(&dn, inode, ipage, NULL, 0);
866
	err = f2fs_reserve_block(&dn, index);
867 868
	if (err) {
		f2fs_put_page(page, 1);
869
		return ERR_PTR(err);
870
	}
871 872
	if (!ipage)
		f2fs_put_dnode(&dn);
873 874

	if (PageUptodate(page))
875
		goto got_it;
876 877

	if (dn.data_blkaddr == NEW_ADDR) {
878
		zero_user_segment(page, 0, PAGE_SIZE);
879 880
		if (!PageUptodate(page))
			SetPageUptodate(page);
881
	} else {
882
		f2fs_put_page(page, 1);
883

884 885
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
Chao Yu's avatar
Chao Yu committed
886
		page = f2fs_get_lock_data_page(inode, index, true);
887
		if (IS_ERR(page))
888
			return page;
889
	}
890
got_it:
891
	if (new_i_size && i_size_read(inode) <
892
				((loff_t)(index + 1) << PAGE_SHIFT))
893
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
894 895 896
	return page;
}

897
static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
898
{
899
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
900 901
	struct f2fs_summary sum;
	struct node_info ni;
902
	block_t old_blkaddr;
903
	blkcnt_t count = 1;
904
	int err;
905

906
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
907
		return -EPERM;
908

909 910 911 912
	err = f2fs_get_node_info(sbi, dn->nid, &ni);
	if (err)
		return err;

913 914
	dn->data_blkaddr = datablock_addr(dn->inode,
				dn->node_page, dn->ofs_in_node);
915
	if (dn->data_blkaddr != NULL_ADDR)
916 917
		goto alloc;

918 919
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
920

921
alloc:
922
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
923 924
	old_blkaddr = dn->data_blkaddr;
	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
925
					&sum, seg_type, NULL, false);
926 927 928
	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
		invalidate_mapping_pages(META_MAPPING(sbi),
					old_blkaddr, old_blkaddr);
Chao Yu's avatar
Chao Yu committed
929
	f2fs_set_data_blkaddr(dn);
930

931 932 933 934
	/*
	 * i_size will be updated by direct_IO. Otherwise, we'll get stale
	 * data from unwritten block via dio_read.
	 */
935 936 937
	return 0;
}

938
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
939
{
940
	struct inode *inode = file_inode(iocb->ki_filp);
941
	struct f2fs_map_blocks map;
942
	int flag;
943
	int err = 0;
944
	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
945

946
	/* convert inline data for Direct I/O*/
947
	if (direct_io) {
948 949 950 951 952
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
	}

953 954 955
	if (direct_io && allow_outplace_dio(inode, iocb, from))
		return 0;

956 957 958
	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

959
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
960 961 962 963 964 965
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

966
	map.m_next_pgofs = NULL;
967
	map.m_next_extent = NULL;
968
	map.m_seg_type = NO_CHECK_TYPE;
969
	map.m_may_create = true;
970

971
	if (direct_io) {
Chao Yu's avatar
Chao Yu committed
972
		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
973
		flag = f2fs_force_buffered_io(inode, iocb, from) ?
974 975 976
					F2FS_GET_BLOCK_PRE_AIO :
					F2FS_GET_BLOCK_PRE_DIO;
		goto map_blocks;
977
	}
978
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
979 980 981
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
982
	}
983
	if (f2fs_has_inline_data(inode))
984
		return err;
985 986 987 988 989 990 991 992 993

	flag = F2FS_GET_BLOCK_PRE_AIO;

map_blocks:
	err = f2fs_map_blocks(inode, &map, 1, flag);
	if (map.m_len > 0 && err == -ENOSPC) {
		if (!direct_io)
			set_inode_flag(inode, FI_NO_PREALLOC);
		err = 0;
994
	}
995
	return err;
996 997
}

998
void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
{
	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
		if (lock)
			down_read(&sbi->node_change);
		else
			up_read(&sbi->node_change);
	} else {
		if (lock)
			f2fs_lock_op(sbi);
		else
			f2fs_unlock_op(sbi);
	}
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1013
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1014 1015
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
Chao Yu's avatar
Chao Yu committed
1016 1017 1018 1019 1020
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
1021
 */
Chao Yu's avatar
Chao Yu committed
1022
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1023
						int create, int flag)
1024
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1025
	unsigned int maxblocks = map->m_len;
1026
	struct dnode_of_data dn;
1027
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1028
	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1029
	pgoff_t pgofs, end_offset, end;
1030
	int err = 0, ofs = 1;
1031 1032
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
1033
	struct extent_info ei = {0,0,0};
1034
	block_t blkaddr;
1035
	unsigned int start_pgofs;
1036

1037 1038 1039
	if (!maxblocks)
		return 0;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1040 1041 1042 1043 1044
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
1045
	end = pgofs + maxblocks;
1046

1047
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1048 1049 1050 1051
		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
							map->m_may_create)
			goto next_dnode;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1052 1053 1054
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
1055 1056
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + map->m_len;
1057 1058 1059 1060 1061

		/* for hardware encryption, but to avoid potential issue in future */
		if (flag == F2FS_GET_BLOCK_DIO)
			f2fs_wait_on_block_writeback_range(inode,
						map->m_pblk, map->m_len);
1062
		goto out;
1063
	}
1064

Chao Yu's avatar
Chao Yu committed
1065
next_dnode:
1066
	if (map->m_may_create)
1067
		__do_map_lock(sbi, flag, true);
1068 1069 1070

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu's avatar
Chao Yu committed
1071
	err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1072
	if (err) {
1073 1074
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
1075
		if (err == -ENOENT) {
1076
			err = 0;
1077 1078
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
Chao Yu's avatar
Chao Yu committed
1079
					f2fs_get_next_page_offset(&dn, pgofs);
1080 1081
			if (map->m_next_extent)
				*map->m_next_extent =
Chao Yu's avatar
Chao Yu committed
1082
					f2fs_get_next_page_offset(&dn, pgofs);
1083
		}
1084
		goto unlock_out;
1085
	}
Chao Yu's avatar
Chao Yu committed
1086

1087
	start_pgofs = pgofs;
1088
	prealloc = 0;
1089
	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1090
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Chao Yu's avatar
Chao Yu committed
1091 1092

next_block:
1093
	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
Chao Yu's avatar
Chao Yu committed
1094

1095 1096 1097 1098 1099 1100
	if (__is_valid_data_blkaddr(blkaddr) &&
		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
		err = -EFAULT;
		goto sync_out;
	}

1101 1102
	if (is_valid_data_blkaddr(sbi, blkaddr)) {
		/* use out-place-update for driect IO under LFS mode */
1103 1104
		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
							map->m_may_create) {
1105
			err = __allocate_data_block(&dn, map->m_seg_type);
1106 1107
			if (!err) {
				blkaddr = dn.data_blkaddr;
1108
				set_inode_flag(inode, FI_APPEND_WRITE);
1109
			}
1110 1111
		}
	} else {
Chao Yu's avatar
Chao Yu committed
1112
		if (create) {
1113 1114
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
Chao Yu's avatar
Chao Yu committed
1115
				goto sync_out;
1116
			}
1117
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1118 1119 1120 1121
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
1122
			} else {
1123 1124
				WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
					flag != F2FS_GET_BLOCK_DIO);
1125 1126
				err = __allocate_data_block(&dn,
							map->m_seg_type);
1127
				if (!err)
1128
					set_inode_flag(inode, FI_APPEND_WRITE);
1129
			}
Chao Yu's avatar
Chao Yu committed
1130
			if (err)
Chao Yu's avatar
Chao Yu committed
1131
				goto sync_out;
1132
			map->m_flags |= F2FS_MAP_NEW;
Chao Yu's avatar
Chao Yu committed
1133
			blkaddr = dn.data_blkaddr;
Chao Yu's avatar
Chao Yu committed
1134
		} else {
1135 1136 1137 1138
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
1139 1140
			if (flag == F2FS_GET_BLOCK_PRECACHE)
				goto sync_out;
1141 1142 1143 1144
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
Chao Yu's avatar
Chao Yu committed
1145
				goto sync_out;
1146
			}
1147 1148 1149 1150
			if (flag != F2FS_GET_BLOCK_FIEMAP) {
				/* for defragment case */
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
Chao Yu's avatar
Chao Yu committed
1151
				goto sync_out;
1152
			}
1153 1154
		}
	}
1155

1156 1157 1158
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

Chao Yu's avatar
Chao Yu committed
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
1169
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1170
			flag == F2FS_GET_BLOCK_PRE_DIO) {
Chao Yu's avatar
Chao Yu committed
1171 1172 1173 1174 1175
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
1176

1177
skip:
1178 1179 1180
	dn.ofs_in_node++;
	pgofs++;

1181 1182 1183
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
1184

1185
		dn.ofs_in_node = ofs_in_node;
Chao Yu's avatar
Chao Yu committed
1186
		err = f2fs_reserve_new_blocks(&dn, prealloc);
1187 1188
		if (err)
			goto sync_out;
1189

1190 1191 1192 1193
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
1194
		}
1195 1196 1197 1198 1199 1200 1201 1202
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
	}

1213 1214
	f2fs_put_dnode(&dn);

1215
	if (map->m_may_create) {
1216
		__do_map_lock(sbi, flag, false);
1217
		f2fs_balance_fs(sbi, dn.node_changed);
1218
	}
1219
	goto next_dnode;
1220

1221
sync_out:
1222 1223 1224 1225 1226 1227

	/* for hardware encryption, but to avoid potential issue in future */
	if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
		f2fs_wait_on_block_writeback_range(inode,
						map->m_pblk, map->m_len);

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + 1;
	}
1239
	f2fs_put_dnode(&dn);
1240
unlock_out:
1241
	if (map->m_may_create) {
1242
		__do_map_lock(sbi, flag, false);
1243
		f2fs_balance_fs(sbi, dn.node_changed);
1244
	}
1245
out:
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1246
	trace_f2fs_map_blocks(inode, map, err);
1247
	return err;
1248 1249
}

Hyunchul Lee's avatar
Hyunchul Lee committed
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
{
	struct f2fs_map_blocks map;
	block_t last_lblk;
	int err;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
	map.m_next_pgofs = NULL;
	map.m_next_extent = NULL;
	map.m_seg_type = NO_CHECK_TYPE;
1263
	map.m_may_create = false;
Hyunchul Lee's avatar
Hyunchul Lee committed
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
	last_lblk = F2FS_BLK_ALIGN(pos + len);

	while (map.m_lblk < last_lblk) {
		map.m_len = last_lblk - map.m_lblk;
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
		if (err || map.m_len == 0)
			return false;
		map.m_lblk += map.m_len;
	}
	return true;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1276
static int __get_data_block(struct inode *inode, sector_t iblock,
1277
			struct buffer_head *bh, int create, int flag,
1278
			pgoff_t *next_pgofs, int seg_type, bool may_write)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1279 1280
{
	struct f2fs_map_blocks map;
1281
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1282 1283 1284

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
1285
	map.m_next_pgofs = next_pgofs;
1286
	map.m_next_extent = NULL;
1287
	map.m_seg_type = seg_type;
1288
	map.m_may_create = may_write;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1289

1290 1291
	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1292 1293
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1294
		bh->b_size = (u64)map.m_len << inode->i_blkbits;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1295
	}
1296
	return err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1297 1298
}

1299
static int get_data_block(struct inode *inode, sector_t iblock,
1300 1301
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
1302
{
1303
	return __get_data_block(inode, iblock, bh_result, create,
1304
							flag, next_pgofs,
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
							NO_CHECK_TYPE, create);
}

static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
			struct buffer_head *bh_result, int create)
{
	return __get_data_block(inode, iblock, bh_result, create,
				F2FS_GET_BLOCK_DIO, NULL,
				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
				true);
1315 1316 1317
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
1318 1319
			struct buffer_head *bh_result, int create)
{
1320
	return __get_data_block(inode, iblock, bh_result, create,
1321 1322 1323
				F2FS_GET_BLOCK_DIO, NULL,
				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
				false);
1324 1325
}

1326
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1327 1328
			struct buffer_head *bh_result, int create)
{
1329
	/* Block number less than F2FS MAX BLOCKS */
1330
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1331 1332
		return -EFBIG;

1333
	return __get_data_block(inode, iblock, bh_result, create,
1334
						F2FS_GET_BLOCK_BMAP, NULL,
1335
						NO_CHECK_TYPE, create);
1336 1337
}

1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

Chao Yu's avatar
Chao Yu committed
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
static int f2fs_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page;
	struct node_info ni;
	__u64 phys = 0, len;
	__u32 flags;
	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
	int err = 0;

	if (f2fs_has_inline_xattr(inode)) {
		int offset;

		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
						inode->i_ino, false);
		if (!page)
			return -ENOMEM;

1367 1368 1369 1370 1371
		err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
		if (err) {
			f2fs_put_page(page, 1);
			return err;
		}
Chao Yu's avatar
Chao Yu committed
1372 1373 1374 1375

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		offset = offsetof(struct f2fs_inode, i_addr) +
					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1376
					get_inline_xattr_addrs(inode));
Chao Yu's avatar
Chao Yu committed
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397

		phys += offset;
		len = inline_xattr_size(inode);

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;

		if (!xnid)
			flags |= FIEMAP_EXTENT_LAST;

		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
		if (err || err == 1)
			return err;
	}

	if (xnid) {
		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
		if (!page)
			return -ENOMEM;

1398 1399 1400 1401 1402
		err = f2fs_get_node_info(sbi, xnid, &ni);
		if (err) {
			f2fs_put_page(page, 1);
			return err;
		}
Chao Yu's avatar
Chao Yu committed
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		len = inode->i_sb->s_blocksize;

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_LAST;
	}

	if (phys)
		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);

	return (err < 0 ? err : 0);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1418 1419 1420
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
1421 1422
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
1423
	pgoff_t next_pgofs;
1424 1425 1426 1427
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

1428 1429 1430 1431 1432 1433
	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
		ret = f2fs_precache_extents(inode);
		if (ret)
			return ret;
	}

Chao Yu's avatar
Chao Yu committed
1434
	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
1435 1436 1437
	if (ret)
		return ret;

1438 1439
	inode_lock(inode);

Chao Yu's avatar
Chao Yu committed
1440 1441 1442 1443 1444
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		ret = f2fs_xattr_fiemap(inode, fieinfo);
		goto out;
	}

1445 1446 1447
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
1448
			goto out;
1449 1450
	}

1451 1452 1453 1454 1455
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
1456

1457 1458 1459 1460
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

1461
	ret = get_data_block(inode, start_blk, &map_bh, 0,
1462
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1463 1464 1465 1466 1467
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
1468
		start_blk = next_pgofs;
1469 1470 1471

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
1472
			goto prep_next;
1473

1474 1475
		flags |= FIEMAP_EXTENT_LAST;
	}
1476

1477
	if (size) {
1478
		if (IS_ENCRYPTED(inode))
1479 1480
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

1481 1482
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
1483
	}
1484

1485 1486
	if (start_blk > last_blk || ret)
		goto out;
1487

1488 1489 1490 1491 1492 1493
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
1494

1495
	start_blk += logical_to_blk(inode, size);
1496

1497
prep_next:
1498 1499 1500 1501 1502 1503 1504 1505 1506
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

Al Viro's avatar
Al Viro committed
1507
	inode_unlock(inode);
1508
	return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1509 1510
}

1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
static int f2fs_read_single_page(struct inode *inode, struct page *page,
					unsigned nr_pages,
					struct f2fs_map_blocks *map,
					struct bio **bio_ret,
					sector_t *last_block_in_bio,
					bool is_readahead)
{
	struct bio *bio = *bio_ret;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	int ret = 0;

	block_in_file = (sector_t)page->index;
	last_block = block_in_file + nr_pages;
	last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
							blkbits;
	if (last_block > last_block_in_file)
		last_block = last_block_in_file;

	/* just zeroing out page which is beyond EOF */
	if (block_in_file >= last_block)
		goto zero_out;
	/*
	 * Map blocks using the previous result first.
	 */
	if ((map->m_flags & F2FS_MAP_MAPPED) &&
			block_in_file > map->m_lblk &&
			block_in_file < (map->m_lblk + map->m_len))
		goto got_it;

	/*
	 * Then do more f2fs_map_blocks() calls until we are
	 * done with this page.
	 */
	map->m_lblk = block_in_file;
	map->m_len = last_block - block_in_file;

	ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
	if (ret)
		goto out;
got_it:
	if ((map->m_flags & F2FS_MAP_MAPPED)) {
		block_nr = map->m_pblk + block_in_file - map->m_lblk;
		SetPageMappedToDisk(page);

		if (!PageUptodate(page) && !cleancache_get_page(page)) {
			SetPageUptodate(page);
			goto confused;
		}

		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
							DATA_GENERIC)) {
			ret = -EFAULT;
			goto out;
		}
	} else {
zero_out:
		zero_user_segment(page, 0, PAGE_SIZE);
		if (!PageUptodate(page))
			SetPageUptodate(page);
		unlock_page(page);
		goto out;
	}

	/*
	 * This page will go to BIO.  Do we need to send this
	 * BIO off first?
	 */
	if (bio && (*last_block_in_bio != block_nr - 1 ||
		!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
		bio = NULL;
	}
	if (bio == NULL) {
		bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
				is_readahead ? REQ_RAHEAD : 0);
		if (IS_ERR(bio)) {
			ret = PTR_ERR(bio);
			bio = NULL;
			goto out;
		}
	}

	/*
	 * If the page is under writeback, we need to wait for
	 * its completion to see the correct decrypted data.
	 */
	f2fs_wait_on_block_writeback(inode, block_nr);

	if (bio_add_page(bio, page, blocksize, 0) < blocksize)
		goto submit_and_realloc;

	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
	ClearPageError(page);
	*last_block_in_bio = block_nr;
	goto out;
confused:
	if (bio) {
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
		bio = NULL;
	}
	unlock_page(page);
out:
	*bio_ret = bio;
	return ret;
}

1623 1624 1625
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
1626 1627 1628 1629 1630
 *
 * Note that the aops->readpages() function is ONLY used for read-ahead. If
 * this function ever deviates from doing just read-ahead, it should either
 * use ->readpage() or do the necessary surgery to decouple ->readpages()
 * from read-ahead.
1631 1632 1633
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
1634
			unsigned nr_pages, bool is_readahead)
1635 1636 1637 1638 1639
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	struct f2fs_map_blocks map;
1640
	int ret = 0;
1641 1642 1643 1644 1645

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1646
	map.m_next_pgofs = NULL;
1647
	map.m_next_extent = NULL;
1648
	map.m_seg_type = NO_CHECK_TYPE;
1649
	map.m_may_create = false;
1650

LiFan's avatar
LiFan committed
1651
	for (; nr_pages; nr_pages--) {
1652
		if (pages) {
1653
			page = list_last_entry(pages, struct page, lru);
1654 1655

			prefetchw(&page->flags);
1656 1657
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1658 1659
						  page->index,
						  readahead_gfp_mask(mapping)))
1660 1661 1662
				goto next_page;
		}

1663 1664 1665 1666
		ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
					&last_block_in_bio, is_readahead);
		if (ret) {
			SetPageError(page);
1667
			zero_user_segment(page, 0, PAGE_SIZE);
1668 1669 1670 1671
			unlock_page(page);
		}
next_page:
		if (pages)
1672
			put_page(page);
1673 1674 1675
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1676
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
1677
	return pages ? 0 : ret;
1678 1679
}

1680 1681
static int f2fs_read_data_page(struct file *file, struct page *page)
{
1682
	struct inode *inode = page->mapping->host;
1683
	int ret = -EAGAIN;
1684

1685 1686
	trace_f2fs_readpage(page, DATA);

arter97's avatar
arter97 committed
1687
	/* If the file has inline data, try to read it directly */
1688 1689
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1690
	if (ret == -EAGAIN)
1691
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
1692
	return ret;
1693 1694 1695 1696 1697 1698
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
1699
	struct inode *inode = mapping->host;
1700
	struct page *page = list_last_entry(pages, struct page, lru);
1701 1702

	trace_f2fs_readpages(inode, page, nr_pages);
1703 1704 1705 1706 1707

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

1708
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
1709 1710
}

1711 1712 1713
static int encrypt_one_page(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;
1714
	struct page *mpage;
1715 1716
	gfp_t gfp_flags = GFP_NOFS;

1717
	if (!f2fs_encrypted_file(inode))
1718 1719
		return 0;

1720
	/* wait for GCed page writeback via META_MAPPING */
1721
	f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
1722 1723 1724 1725

retry_encrypt:
	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
			PAGE_SIZE, 0, fio->page->index, gfp_flags);
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
	if (IS_ERR(fio->encrypted_page)) {
		/* flush pending IOs and wait for a while in the ENOMEM case */
		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
			f2fs_flush_merged_writes(fio->sbi);
			congestion_wait(BLK_RW_ASYNC, HZ/50);
			gfp_flags |= __GFP_NOFAIL;
			goto retry_encrypt;
		}
		return PTR_ERR(fio->encrypted_page);
	}
1736

1737 1738 1739 1740 1741 1742
	mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
	if (mpage) {
		if (PageUptodate(mpage))
			memcpy(page_address(mpage),
				page_address(fio->encrypted_page), PAGE_SIZE);
		f2fs_put_page(mpage, 1);
1743
	}
1744
	return 0;
1745 1746
}

Chao Yu's avatar
Chao Yu committed
1747 1748
static inline bool check_inplace_update_policy(struct inode *inode,
				struct f2fs_io_info *fio)
1749
{
Chao Yu's avatar
Chao Yu committed
1750 1751
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	unsigned int policy = SM_I(sbi)->ipu_policy;
1752

Chao Yu's avatar
Chao Yu committed
1753 1754
	if (policy & (0x1 << F2FS_IPU_FORCE))
		return true;
Chao Yu's avatar
Chao Yu committed
1755
	if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
Chao Yu's avatar
Chao Yu committed
1756 1757 1758 1759
		return true;
	if (policy & (0x1 << F2FS_IPU_UTIL) &&
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;
Chao Yu's avatar
Chao Yu committed
1760
	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
Chao Yu's avatar
Chao Yu committed
1761 1762 1763 1764 1765 1766 1767 1768 1769
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;

	/*
	 * IPU for rewrite async pages
	 */
	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
			fio && fio->op == REQ_OP_WRITE &&
			!(fio->op_flags & REQ_SYNC) &&
1770
			!IS_ENCRYPTED(inode))
Chao Yu's avatar
Chao Yu committed
1771 1772 1773 1774 1775 1776 1777
		return true;

	/* this is only set during fdatasync */
	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
			is_inode_flag_set(inode, FI_NEED_IPU))
		return true;

Daniel Rosenberg's avatar
Daniel Rosenberg committed
1778 1779 1780 1781
	if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
			!f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
		return true;

Chao Yu's avatar
Chao Yu committed
1782 1783 1784
	return false;
}

Chao Yu's avatar
Chao Yu committed
1785
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yu's avatar
Chao Yu committed
1786
{
1787 1788
	if (f2fs_is_pinned_file(inode))
		return true;
Chao Yu's avatar
Chao Yu committed
1789 1790 1791 1792 1793 1794 1795 1796

	/* if this is cold file, we should overwrite to avoid fragmentation */
	if (file_is_cold(inode))
		return true;

	return check_inplace_update_policy(inode, fio);
}

Chao Yu's avatar
Chao Yu committed
1797
bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yu's avatar
Chao Yu committed
1798 1799 1800 1801 1802 1803 1804
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	if (test_opt(sbi, LFS))
		return true;
	if (S_ISDIR(inode->i_mode))
		return true;
1805 1806
	if (IS_NOQUOTA(inode))
		return true;
Chao Yu's avatar
Chao Yu committed
1807 1808 1809 1810 1811 1812 1813
	if (f2fs_is_atomic_file(inode))
		return true;
	if (fio) {
		if (is_cold_data(fio->page))
			return true;
		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
			return true;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1814 1815 1816
		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
			f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
			return true;
Chao Yu's avatar
Chao Yu committed
1817 1818 1819 1820
	}
	return false;
}

1821 1822 1823 1824
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;

Chao Yu's avatar
Chao Yu committed
1825
	if (f2fs_should_update_outplace(inode, fio))
1826 1827
		return false;

Chao Yu's avatar
Chao Yu committed
1828
	return f2fs_should_update_inplace(inode, fio);
1829 1830
}

Chao Yu's avatar
Chao Yu committed
1831
int f2fs_do_write_data_page(struct f2fs_io_info *fio)
1832
{
1833
	struct page *page = fio->page;
1834 1835
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
1836
	struct extent_info ei = {0,0,0};
1837
	struct node_info ni;
1838
	bool ipu_force = false;
1839 1840 1841
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1842 1843 1844
	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1845

1846 1847 1848 1849 1850 1851 1852
		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
							DATA_GENERIC))
			return -EFAULT;

		ipu_force = true;
		fio->need_lock = LOCK_DONE;
		goto got_it;
1853
	}
1854

1855 1856 1857
	/* Deadlock due to between page->lock and f2fs_lock_op */
	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
		return -EAGAIN;
1858

Chao Yu's avatar
Chao Yu committed
1859
	err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1860
	if (err)
1861
		goto out;
1862

1863
	fio->old_blkaddr = dn.data_blkaddr;
1864 1865

	/* This page is already truncated */
1866
	if (fio->old_blkaddr == NULL_ADDR) {
1867
		ClearPageUptodate(page);
1868
		clear_cold_data(page);
1869
		goto out_writepage;
1870
	}
1871
got_it:
1872 1873 1874 1875 1876 1877
	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
							DATA_GENERIC)) {
		err = -EFAULT;
		goto out_writepage;
	}
1878 1879 1880 1881
	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1882
	if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
1883
					need_inplace_update(fio))) {
1884 1885 1886 1887 1888
		err = encrypt_one_page(fio);
		if (err)
			goto out_writepage;

		set_page_writeback(page);
1889
		ClearPageError(page);
1890
		f2fs_put_dnode(&dn);
1891
		if (fio->need_lock == LOCK_REQ)
1892
			f2fs_unlock_op(fio->sbi);
Chao Yu's avatar
Chao Yu committed
1893
		err = f2fs_inplace_write_data(fio);
1894 1895 1896 1897 1898 1899
		if (err) {
			if (f2fs_encrypted_file(inode))
				fscrypt_pullback_bio_page(&fio->encrypted_page,
									true);
			if (PageWriteback(page))
				end_page_writeback(page);
1900 1901
		} else {
			set_inode_flag(inode, FI_UPDATE_WRITE);
1902
		}
1903
		trace_f2fs_do_write_data_page(fio->page, IPU);
1904
		return err;
1905
	}
1906

1907 1908 1909 1910 1911 1912 1913 1914
	if (fio->need_lock == LOCK_RETRY) {
		if (!f2fs_trylock_op(fio->sbi)) {
			err = -EAGAIN;
			goto out_writepage;
		}
		fio->need_lock = LOCK_REQ;
	}

1915 1916 1917 1918 1919 1920
	err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
	if (err)
		goto out_writepage;

	fio->version = ni.version;

1921 1922 1923 1924 1925
	err = encrypt_one_page(fio);
	if (err)
		goto out_writepage;

	set_page_writeback(page);
1926
	ClearPageError(page);
1927

1928
	/* LFS mode write path */
Chao Yu's avatar
Chao Yu committed
1929
	f2fs_outplace_write_data(&dn, fio);
1930 1931 1932 1933
	trace_f2fs_do_write_data_page(page, OPU);
	set_inode_flag(inode, FI_APPEND_WRITE);
	if (page->index == 0)
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1934 1935
out_writepage:
	f2fs_put_dnode(&dn);
1936
out:
1937
	if (fio->need_lock == LOCK_REQ)
1938
		f2fs_unlock_op(fio->sbi);
1939 1940 1941
	return err;
}

1942
static int __write_data_page(struct page *page, bool *submitted,
Chao Yu's avatar
Chao Yu committed
1943 1944
				struct writeback_control *wbc,
				enum iostat_type io_type)
1945 1946
{
	struct inode *inode = page->mapping->host;
1947
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1948 1949
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1950
							>> PAGE_SHIFT;
1951
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
1952
	unsigned offset = 0;
1953
	bool need_balance_fs = false;
1954
	int err = 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1955
	struct f2fs_io_info fio = {
1956
		.sbi = sbi,
Chao Yu's avatar
Chao Yu committed
1957
		.ino = inode->i_ino,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1958
		.type = DATA,
Mike Christie's avatar
Mike Christie committed
1959
		.op = REQ_OP_WRITE,
1960
		.op_flags = wbc_to_write_flags(wbc),
1961
		.old_blkaddr = NULL_ADDR,
1962
		.page = page,
1963
		.encrypted_page = NULL,
1964
		.submitted = false,
1965
		.need_lock = LOCK_RETRY,
Chao Yu's avatar
Chao Yu committed
1966
		.io_type = io_type,
1967
		.io_wbc = wbc,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1968
	};
1969

1970 1971
	trace_f2fs_writepage(page, DATA);

1972 1973 1974
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		mapping_set_error(page->mapping, -EIO);
1975 1976 1977 1978 1979 1980
		/*
		 * don't drop any dirty dentry pages for keeping lastest
		 * directory structure.
		 */
		if (S_ISDIR(inode->i_mode))
			goto redirty_out;
1981 1982 1983
		goto out;
	}

1984 1985 1986
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;

1987
	if (page->index < end_index)
1988
		goto write;
1989 1990 1991 1992 1993

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1994
	offset = i_size & (PAGE_SIZE - 1);
1995
	if ((page->index >= end_index + 1) || !offset)
1996
		goto out;
1997

1998
	zero_user_segment(page, offset, PAGE_SIZE);
1999
write:
2000 2001
	if (f2fs_is_drop_cache(inode))
		goto out;
2002 2003 2004
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
Chao Yu's avatar
Chao Yu committed
2005
			f2fs_available_free_memory(sbi, BASE_CHECK))))
2006
		goto redirty_out;
2007

2008
	/* Dentry blocks are controlled by checkpoint */
2009
	if (S_ISDIR(inode->i_mode)) {
2010
		fio.need_lock = LOCK_DONE;
Chao Yu's avatar
Chao Yu committed
2011
		err = f2fs_do_write_data_page(&fio);
2012 2013
		goto done;
	}
2014

2015
	if (!wbc->for_reclaim)
2016
		need_balance_fs = true;
2017
	else if (has_not_enough_free_secs(sbi, 0, 0))
2018
		goto redirty_out;
2019 2020
	else
		set_inode_flag(inode, FI_HOT_DATA);
2021

2022
	err = -EAGAIN;
2023
	if (f2fs_has_inline_data(inode)) {
2024
		err = f2fs_write_inline_data(inode, page);
2025 2026 2027
		if (!err)
			goto out;
	}
2028

2029
	if (err == -EAGAIN) {
Chao Yu's avatar
Chao Yu committed
2030
		err = f2fs_do_write_data_page(&fio);
2031 2032
		if (err == -EAGAIN) {
			fio.need_lock = LOCK_REQ;
Chao Yu's avatar
Chao Yu committed
2033
			err = f2fs_do_write_data_page(&fio);
2034 2035
		}
	}
2036

2037 2038 2039 2040 2041 2042 2043 2044
	if (err) {
		file_set_keep_isize(inode);
	} else {
		down_write(&F2FS_I(inode)->i_sem);
		if (F2FS_I(inode)->last_disk_size < psize)
			F2FS_I(inode)->last_disk_size = psize;
		up_write(&F2FS_I(inode)->i_sem);
	}
2045

2046 2047 2048
done:
	if (err && err != -ENOENT)
		goto redirty_out;
2049

2050
out:
2051
	inode_dec_dirty_pages(inode);
2052
	if (err) {
2053
		ClearPageUptodate(page);
2054 2055
		clear_cold_data(page);
	}
2056 2057

	if (wbc->for_reclaim) {
2058
		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2059
		clear_inode_flag(inode, FI_HOT_DATA);
Chao Yu's avatar
Chao Yu committed
2060
		f2fs_remove_dirty_inode(inode);
2061
		submitted = NULL;
2062 2063
	}

2064
	unlock_page(page);
2065 2066
	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
					!F2FS_I(inode)->cp_task)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2067
		f2fs_balance_fs(sbi, need_balance_fs);
2068

2069
	if (unlikely(f2fs_cp_error(sbi))) {
2070
		f2fs_submit_merged_write(sbi, DATA);
2071 2072 2073 2074 2075
		submitted = NULL;
	}

	if (submitted)
		*submitted = fio.submitted;
2076

2077 2078 2079
	return 0;

redirty_out:
2080
	redirty_page_for_writepage(wbc, page);
2081 2082 2083 2084 2085 2086 2087
	/*
	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
	 * file_write_and_wait_range() will see EIO error, which is critical
	 * to return value of fsync() followed by atomic_write failure to user.
	 */
	if (!err || wbc->for_reclaim)
2088
		return AOP_WRITEPAGE_ACTIVATE;
2089 2090
	unlock_page(page);
	return err;
2091 2092
}

2093 2094 2095
static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
Chao Yu's avatar
Chao Yu committed
2096
	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
2097 2098
}

Chao Yu's avatar
Chao Yu committed
2099 2100 2101 2102 2103 2104
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
Chao Yu's avatar
Chao Yu committed
2105 2106
					struct writeback_control *wbc,
					enum iostat_type io_type)
Chao Yu's avatar
Chao Yu committed
2107 2108 2109 2110
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
2111
	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
Chao Yu's avatar
Chao Yu committed
2112 2113 2114 2115 2116 2117 2118
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
Matthew Wilcox's avatar
Matthew Wilcox committed
2119
	xa_mark_t tag;
2120
	int nwritten = 0;
Chao Yu's avatar
Chao Yu committed
2121

2122
	pagevec_init(&pvec);
2123

2124 2125 2126 2127 2128 2129
	if (get_dirty_pages(mapping->host) <=
				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
		set_inode_flag(mapping->host, FI_HOT_DATA);
	else
		clear_inode_flag(mapping->host, FI_HOT_DATA);

Chao Yu's avatar
Chao Yu committed
2130 2131 2132 2133 2134 2135 2136 2137 2138
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
2139 2140
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
Chao Yu's avatar
Chao Yu committed
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

2156
		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2157
				tag);
Chao Yu's avatar
Chao Yu committed
2158 2159 2160 2161 2162
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
2163
			bool submitted = false;
Chao Yu's avatar
Chao Yu committed
2164

2165
			/* give a priority to WB_SYNC threads */
2166
			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2167 2168 2169 2170 2171
					wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}

Chao Yu's avatar
Chao Yu committed
2172
			done_index = page->index;
2173
retry_write:
Chao Yu's avatar
Chao Yu committed
2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188
			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
2189
					f2fs_wait_on_page_writeback(page,
2190
							DATA, true, true);
Chao Yu's avatar
Chao Yu committed
2191 2192 2193 2194 2195 2196 2197
				else
					goto continue_unlock;
			}

			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

Chao Yu's avatar
Chao Yu committed
2198
			ret = __write_data_page(page, &submitted, wbc, io_type);
Chao Yu's avatar
Chao Yu committed
2199
			if (unlikely(ret)) {
2200 2201 2202 2203 2204 2205 2206 2207
				/*
				 * keep nr_to_write, since vfs uses this to
				 * get # of written pages.
				 */
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
					continue;
2208 2209 2210 2211 2212 2213 2214 2215 2216
				} else if (ret == -EAGAIN) {
					ret = 0;
					if (wbc->sync_mode == WB_SYNC_ALL) {
						cond_resched();
						congestion_wait(BLK_RW_ASYNC,
									HZ/50);
						goto retry_write;
					}
					continue;
2217
				}
2218 2219 2220
				done_index = page->index + 1;
				done = 1;
				break;
2221
			} else if (submitted) {
2222
				nwritten++;
Chao Yu's avatar
Chao Yu committed
2223 2224
			}

2225
			if (--wbc->nr_to_write <= 0 &&
2226
					wbc->sync_mode == WB_SYNC_NONE) {
Chao Yu's avatar
Chao Yu committed
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

2244
	if (nwritten)
2245
		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
2246
								NULL, 0, DATA);
Chao Yu's avatar
Chao Yu committed
2247

Chao Yu's avatar
Chao Yu committed
2248 2249 2250
	return ret;
}

2251 2252 2253 2254 2255
static inline bool __should_serialize_io(struct inode *inode,
					struct writeback_control *wbc)
{
	if (!S_ISREG(inode->i_mode))
		return false;
2256 2257
	if (IS_NOQUOTA(inode))
		return false;
2258 2259 2260 2261 2262 2263 2264
	if (wbc->sync_mode != WB_SYNC_ALL)
		return true;
	if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
		return true;
	return false;
}

2265
static int __f2fs_write_data_pages(struct address_space *mapping,
Chao Yu's avatar
Chao Yu committed
2266 2267
						struct writeback_control *wbc,
						enum iostat_type io_type)
2268 2269
{
	struct inode *inode = mapping->host;
2270
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2271
	struct blk_plug plug;
2272
	int ret;
2273
	bool locked = false;
2274

P J P's avatar
P J P committed
2275 2276 2277 2278
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

2279 2280 2281 2282
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

2283 2284 2285 2286
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

2287 2288
	if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
			wbc->sync_mode == WB_SYNC_NONE &&
2289
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
Chao Yu's avatar
Chao Yu committed
2290
			f2fs_available_free_memory(sbi, DIRTY_DENTS))
2291 2292
		goto skip_write;

Chao Yu's avatar
Chao Yu committed
2293
	/* skip writing during file defragment */
2294
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
Chao Yu's avatar
Chao Yu committed
2295 2296
		goto skip_write;

Yunlei He's avatar
Yunlei He committed
2297 2298
	trace_f2fs_writepages(mapping->host, wbc, DATA);

2299 2300
	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
	if (wbc->sync_mode == WB_SYNC_ALL)
2301 2302
		atomic_inc(&sbi->wb_sync_req[DATA]);
	else if (atomic_read(&sbi->wb_sync_req[DATA]))
2303 2304
		goto skip_write;

2305 2306 2307 2308 2309
	if (__should_serialize_io(inode, wbc)) {
		mutex_lock(&sbi->writepages);
		locked = true;
	}

2310
	blk_start_plug(&plug);
Chao Yu's avatar
Chao Yu committed
2311
	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
2312
	blk_finish_plug(&plug);
2313

2314 2315 2316
	if (locked)
		mutex_unlock(&sbi->writepages);

2317
	if (wbc->sync_mode == WB_SYNC_ALL)
2318
		atomic_dec(&sbi->wb_sync_req[DATA]);
2319 2320 2321 2322
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2323

Chao Yu's avatar
Chao Yu committed
2324
	f2fs_remove_dirty_inode(inode);
2325
	return ret;
2326 2327

skip_write:
2328
	wbc->pages_skipped += get_dirty_pages(inode);
Yunlei He's avatar
Yunlei He committed
2329
	trace_f2fs_writepages(mapping->host, wbc, DATA);
2330
	return 0;
2331 2332
}

Chao Yu's avatar
Chao Yu committed
2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
static int f2fs_write_data_pages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;

	return __f2fs_write_data_pages(mapping, wbc,
			F2FS_I(inode)->cp_task == current ?
			FS_CP_DATA_IO : FS_DATA_IO);
}

2343 2344 2345
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
2346
	loff_t i_size = i_size_read(inode);
2347

2348
	if (to > i_size) {
2349
		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2350
		down_write(&F2FS_I(inode)->i_mmap_sem);
2351

2352
		truncate_pagecache(inode, i_size);
2353 2354
		if (!IS_NOQUOTA(inode))
			f2fs_truncate_blocks(inode, i_size, true);
2355

2356
		up_write(&F2FS_I(inode)->i_mmap_sem);
2357
		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2358 2359 2360
	}
}

2361 2362 2363 2364 2365 2366 2367 2368
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
2369
	bool locked = false;
2370
	struct extent_info ei = {0,0,0};
2371
	int err = 0;
2372
	int flag;
2373

2374 2375 2376 2377
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
2378 2379
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
			!is_inode_flag_set(inode, FI_NO_PREALLOC))
2380 2381
		return 0;

2382 2383 2384 2385 2386 2387
	/* f2fs_lock_op avoids race between write CP and convert_inline_page */
	if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
		flag = F2FS_GET_BLOCK_DEFAULT;
	else
		flag = F2FS_GET_BLOCK_PRE_AIO;

2388
	if (f2fs_has_inline_data(inode) ||
2389
			(pos & PAGE_MASK) >= i_size_read(inode)) {
2390
		__do_map_lock(sbi, flag, true);
2391 2392 2393
		locked = true;
	}
restart:
2394
	/* check inline_data */
Chao Yu's avatar
Chao Yu committed
2395
	ipage = f2fs_get_node_page(sbi, inode->i_ino);
2396 2397 2398 2399 2400 2401 2402 2403
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
2404
		if (pos + len <= MAX_INLINE_DATA(inode)) {
Chao Yu's avatar
Chao Yu committed
2405
			f2fs_do_read_inline_data(page, ipage);
2406
			set_inode_flag(inode, FI_DATA_EXIST);
2407 2408
			if (inode->i_nlink)
				set_inline_node(ipage);
2409 2410 2411
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
Chao Yu's avatar
Chao Yu committed
2423
			err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
2424
			if (err || dn.data_blkaddr == NULL_ADDR) {
2425
				f2fs_put_dnode(&dn);
2426 2427
				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
								true);
2428
				WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
2429 2430 2431
				locked = true;
				goto restart;
			}
2432 2433
		}
	}
2434

2435 2436 2437
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
2438
out:
2439 2440
	f2fs_put_dnode(&dn);
unlock_out:
2441
	if (locked)
2442
		__do_map_lock(sbi, flag, false);
2443 2444 2445
	return err;
}

2446 2447 2448 2449 2450
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
2451
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2452
	struct page *page = NULL;
2453
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2454
	bool need_balance = false, drop_atomic = false;
2455
	block_t blkaddr = NULL_ADDR;
2456 2457
	int err = 0;

2458 2459
	trace_f2fs_write_begin(inode, pos, len, flags);

Daniel Rosenberg's avatar
Daniel Rosenberg committed
2460 2461 2462 2463
	err = f2fs_is_checkpoint_ready(sbi);
	if (err)
		goto fail;

2464 2465 2466
	if ((f2fs_is_atomic_file(inode) &&
			!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
			is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2467
		err = -ENOMEM;
2468
		drop_atomic = true;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2469 2470 2471
		goto fail;
	}

2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
2482
repeat:
2483 2484 2485 2486
	/*
	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
	 * wait_for_stable_page. Will wait that below with our IO control.
	 */
2487
	page = f2fs_pagecache_get_page(mapping, index,
2488
				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
2489 2490 2491 2492
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
2493

2494 2495
	*pagep = page;

2496 2497
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
2498
	if (err)
2499
		goto fail;
2500

2501 2502
	if (need_balance && !IS_NOQUOTA(inode) &&
			has_not_enough_free_secs(sbi, 0, 0)) {
2503
		unlock_page(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2504
		f2fs_balance_fs(sbi, true);
2505 2506 2507 2508 2509 2510 2511 2512
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

2513
	f2fs_wait_on_page_writeback(page, DATA, false, true);
2514

2515 2516
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
2517

2518 2519 2520 2521 2522
	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
		zero_user_segment(page, len, PAGE_SIZE);
		return 0;
	}

2523
	if (blkaddr == NEW_ADDR) {
2524
		zero_user_segment(page, 0, PAGE_SIZE);
2525
		SetPageUptodate(page);
2526
	} else {
2527 2528
		err = f2fs_submit_page_read(inode, page, blkaddr);
		if (err)
2529
			goto fail;
2530

2531
		lock_page(page);
2532
		if (unlikely(page->mapping != mapping)) {
2533 2534
			f2fs_put_page(page, 1);
			goto repeat;
2535
		}
2536 2537 2538
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
2539
		}
2540 2541
	}
	return 0;
2542

2543
fail:
2544
	f2fs_put_page(page, 1);
2545
	f2fs_write_failed(mapping, pos + len);
2546
	if (drop_atomic)
Chao Yu's avatar
Chao Yu committed
2547
		f2fs_drop_inmem_pages_all(sbi, false);
2548
	return err;
2549 2550
}

2551 2552 2553 2554 2555 2556 2557
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

2558 2559
	trace_f2fs_write_end(inode, pos, len, copied);

2560 2561 2562 2563 2564 2565
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
2566
		if (unlikely(copied != len))
2567 2568 2569 2570 2571 2572 2573
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

2574
	set_page_dirty(page);
2575

2576 2577
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
2578
unlock_out:
2579
	f2fs_put_page(page, 1);
2580
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2581 2582 2583
	return copied;
}

2584 2585
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
2586
{
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600
	unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
	unsigned blkbits = i_blkbits;
	unsigned blocksize_mask = (1 << blkbits) - 1;
	unsigned long align = offset | iov_iter_alignment(iter);
	struct block_device *bdev = inode->i_sb->s_bdev;

	if (align & blocksize_mask) {
		if (bdev)
			blkbits = blksize_bits(bdev_logical_block_size(bdev));
		blocksize_mask = (1 << blkbits) - 1;
		if (align & blocksize_mask)
			return -EINVAL;
		return 1;
	}
2601 2602 2603
	return 0;
}

Chao Yu's avatar
Chao Yu committed
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
static void f2fs_dio_end_io(struct bio *bio)
{
	struct f2fs_private_dio *dio = bio->bi_private;

	dec_page_count(F2FS_I_SB(dio->inode),
			dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);

	bio->bi_private = dio->orig_private;
	bio->bi_end_io = dio->orig_end_io;

2614
	kvfree(dio);
Chao Yu's avatar
Chao Yu committed
2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626

	bio_endio(bio);
}

static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
							loff_t file_offset)
{
	struct f2fs_private_dio *dio;
	bool write = (bio_op(bio) == REQ_OP_WRITE);

	dio = f2fs_kzalloc(F2FS_I_SB(inode),
			sizeof(struct f2fs_private_dio), GFP_NOFS);
2627
	if (!dio)
Chao Yu's avatar
Chao Yu committed
2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647
		goto out;

	dio->inode = inode;
	dio->orig_end_io = bio->bi_end_io;
	dio->orig_private = bio->bi_private;
	dio->write = write;

	bio->bi_end_io = f2fs_dio_end_io;
	bio->bi_private = dio;

	inc_page_count(F2FS_I_SB(inode),
			write ? F2FS_DIO_WRITE : F2FS_DIO_READ);

	submit_bio(bio);
	return;
out:
	bio->bi_status = BLK_STS_IOERR;
	bio_endio(bio);
}

2648
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2649
{
2650
	struct address_space *mapping = iocb->ki_filp->f_mapping;
2651
	struct inode *inode = mapping->host;
2652
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2653
	struct f2fs_inode_info *fi = F2FS_I(inode);
2654
	size_t count = iov_iter_count(iter);
2655
	loff_t offset = iocb->ki_pos;
2656
	int rw = iov_iter_rw(iter);
2657
	int err;
2658
	enum rw_hint hint = iocb->ki_hint;
2659
	int whint_mode = F2FS_OPTION(sbi).whint_mode;
2660
	bool do_opu;
2661

2662
	err = check_direct_IO(inode, iter, offset);
2663
	if (err)
2664
		return err < 0 ? err : 0;
2665

2666
	if (f2fs_force_buffered_io(inode, iocb, iter))
2667
		return 0;
2668

2669 2670
	do_opu = allow_outplace_dio(inode, iocb, iter);

2671
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2672

2673 2674 2675
	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
		iocb->ki_hint = WRITE_LIFE_NOT_SET;

2676 2677 2678 2679 2680 2681 2682 2683
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
			iocb->ki_hint = hint;
			err = -EAGAIN;
			goto out;
		}
		if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
			up_read(&fi->i_gc_rwsem[rw]);
Hyunchul Lee's avatar
Hyunchul Lee committed
2684 2685 2686 2687
			iocb->ki_hint = hint;
			err = -EAGAIN;
			goto out;
		}
2688 2689 2690 2691
	} else {
		down_read(&fi->i_gc_rwsem[rw]);
		if (do_opu)
			down_read(&fi->i_gc_rwsem[READ]);
Hyunchul Lee's avatar
Hyunchul Lee committed
2692 2693
	}

Chao Yu's avatar
Chao Yu committed
2694
	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
2695 2696
			iter, rw == WRITE ? get_data_block_dio_write :
			get_data_block_dio, NULL, f2fs_dio_submit_bio,
Chao Yu's avatar
Chao Yu committed
2697
			DIO_LOCKING | DIO_SKIP_HOLES);
2698 2699 2700 2701 2702

	if (do_opu)
		up_read(&fi->i_gc_rwsem[READ]);

	up_read(&fi->i_gc_rwsem[rw]);
2703 2704

	if (rw == WRITE) {
2705 2706
		if (whint_mode == WHINT_MODE_OFF)
			iocb->ki_hint = hint;
Chao Yu's avatar
Chao Yu committed
2707 2708 2709
		if (err > 0) {
			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
									err);
2710 2711
			if (!do_opu)
				set_inode_flag(inode, FI_UPDATE_WRITE);
Chao Yu's avatar
Chao Yu committed
2712
		} else if (err < 0) {
2713
			f2fs_write_failed(mapping, offset + count);
Chao Yu's avatar
Chao Yu committed
2714
		}
2715
	}
2716

Hyunchul Lee's avatar
Hyunchul Lee committed
2717
out:
2718
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2719

2720
	return err;
2721 2722
}

2723 2724
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
2725 2726
{
	struct inode *inode = page->mapping->host;
2727
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2728

2729
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2730
		(offset % PAGE_SIZE || length != PAGE_SIZE))
2731 2732
		return;

2733
	if (PageDirty(page)) {
2734
		if (inode->i_ino == F2FS_META_INO(sbi)) {
2735
			dec_page_count(sbi, F2FS_DIRTY_META);
2736
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2737
			dec_page_count(sbi, F2FS_DIRTY_NODES);
2738
		} else {
2739
			inode_dec_dirty_pages(inode);
Chao Yu's avatar
Chao Yu committed
2740
			f2fs_remove_dirty_inode(inode);
2741
		}
2742
	}
Chao Yu's avatar
Chao Yu committed
2743

2744 2745
	clear_cold_data(page);

Chao Yu's avatar
Chao Yu committed
2746
	if (IS_ATOMIC_WRITTEN_PAGE(page))
Chao Yu's avatar
Chao Yu committed
2747
		return f2fs_drop_inmem_page(inode, page);
Chao Yu's avatar
Chao Yu committed
2748

2749
	f2fs_clear_page_private(page);
2750 2751
}

2752
int f2fs_release_page(struct page *page, gfp_t wait)
2753
{
2754 2755 2756 2757
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

Chao Yu's avatar
Chao Yu committed
2758 2759 2760 2761
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

2762
	clear_cold_data(page);
2763
	f2fs_clear_page_private(page);
2764
	return 1;
2765 2766 2767 2768 2769 2770 2771
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

2772 2773
	trace_f2fs_set_page_dirty(page, DATA);

2774 2775
	if (!PageUptodate(page))
		SetPageUptodate(page);
2776

Chao Yu's avatar
Chao Yu committed
2777
	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
Chao Yu's avatar
Chao Yu committed
2778
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
Chao Yu's avatar
Chao Yu committed
2779
			f2fs_register_inmem_page(inode, page);
Chao Yu's avatar
Chao Yu committed
2780 2781 2782 2783 2784 2785 2786
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
2787 2788
	}

2789
	if (!PageDirty(page)) {
2790
		__set_page_dirty_nobuffers(page);
Chao Yu's avatar
Chao Yu committed
2791
		f2fs_update_dirty_page(inode, page);
2792 2793 2794 2795 2796
		return 1;
	}
	return 0;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2797 2798
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
2799 2800
	struct inode *inode = mapping->host;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2801 2802 2803 2804 2805 2806 2807
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

2808
	return generic_block_bmap(mapping, block, get_data_block_bmap);
2809 2810
}

2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
2824 2825 2826 2827 2828 2829
	if (atomic_written) {
		if (mode != MIGRATE_SYNC)
			return -EBUSY;
		if (!mutex_trylock(&fi->inmem_lock))
			return -EAGAIN;
	}
2830

2831 2832
	/* one extra reference was held for atomic_write page */
	extra_count = atomic_written ? 1 : 0;
2833
	rc = migrate_page_move_mapping(mapping, newpage,
2834
				page, mode, extra_count);
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

2853 2854 2855 2856
	if (PagePrivate(page)) {
		f2fs_set_page_private(newpage, page_private(page));
		f2fs_clear_page_private(page);
	}
2857

2858 2859 2860 2861
	if (mode != MIGRATE_SYNC_NO_COPY)
		migrate_page_copy(newpage, page);
	else
		migrate_page_states(newpage, page);
2862 2863 2864 2865 2866

	return MIGRATEPAGE_SUCCESS;
}
#endif

2867 2868 2869 2870 2871 2872
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
2873
	.write_end	= f2fs_write_end,
2874
	.set_page_dirty	= f2fs_set_data_page_dirty,
2875 2876
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2877
	.direct_IO	= f2fs_direct_IO,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2878
	.bmap		= f2fs_bmap,
2879 2880 2881
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
2882
};
2883

Matthew Wilcox's avatar
Matthew Wilcox committed
2884
void f2fs_clear_page_cache_dirty_tag(struct page *page)
2885 2886 2887 2888 2889
{
	struct address_space *mapping = page_mapping(page);
	unsigned long flags;

	xa_lock_irqsave(&mapping->i_pages, flags);
Matthew Wilcox's avatar
Matthew Wilcox committed
2890
	__xa_clear_mark(&mapping->i_pages, page_index(page),
2891 2892 2893 2894
						PAGECACHE_TAG_DIRTY);
	xa_unlock_irqrestore(&mapping->i_pages, flags);
}

2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
int __init f2fs_init_post_read_processing(void)
{
	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
	if (!bio_post_read_ctx_cache)
		goto fail;
	bio_post_read_ctx_pool =
		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
					 bio_post_read_ctx_cache);
	if (!bio_post_read_ctx_pool)
		goto fail_free_cache;
	return 0;

fail_free_cache:
	kmem_cache_destroy(bio_post_read_ctx_cache);
fail:
	return -ENOMEM;
}

void __exit f2fs_destroy_post_read_processing(void)
{
	mempool_destroy(bio_post_read_ctx_pool);
	kmem_cache_destroy(bio_post_read_ctx_cache);
}