data.c 66.3 KB
Newer Older
Chao Yu's avatar
Chao Yu committed
1
// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2
/*
3 4 5 6 7 8 9 10 11 12 13
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
Chao Yu's avatar
Chao Yu committed
14
#include <linux/pagevec.h>
15 16
#include <linux/blkdev.h>
#include <linux/bio.h>
17
#include <linux/prefetch.h>
18
#include <linux/uio.h>
19
#include <linux/cleancache.h>
20
#include <linux/sched/signal.h>
21 22 23 24

#include "f2fs.h"
#include "node.h"
#include "segment.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
25
#include "trace.h"
26
#include <trace/events/f2fs.h>
27

28 29 30 31 32
#define NUM_PREALLOC_POST_READ_CTXS	128

static struct kmem_cache *bio_post_read_ctx_cache;
static mempool_t *bio_post_read_ctx_pool;

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
static bool __is_cp_guaranteed(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct f2fs_sb_info *sbi;

	if (!mapping)
		return false;

	inode = mapping->host;
	sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_META_INO(sbi) ||
			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
			S_ISDIR(inode->i_mode) ||
48 49
			(S_ISREG(inode->i_mode) &&
			is_inode_flag_set(inode, FI_ATOMIC_FILE)) ||
50 51 52 53 54
			is_cold_data(page))
		return true;
	return false;
}

55 56 57 58 59 60 61 62 63 64 65 66 67 68
/* postprocessing steps for read bios */
enum bio_post_read_step {
	STEP_INITIAL = 0,
	STEP_DECRYPT,
};

struct bio_post_read_ctx {
	struct bio *bio;
	struct work_struct work;
	unsigned int cur_step;
	unsigned int enabled_steps;
};

static void __read_end_io(struct bio *bio)
69
{
70 71
	struct page *page;
	struct bio_vec *bv;
72
	int i;
73

74 75 76 77 78 79
	bio_for_each_segment_all(bv, bio, i) {
		page = bv->bv_page;

		/* PG_error was set if any post_read step failed */
		if (bio->bi_status || PageError(page)) {
			ClearPageUptodate(page);
80 81
			/* will re-read again later */
			ClearPageError(page);
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
		} else {
			SetPageUptodate(page);
		}
		unlock_page(page);
	}
	if (bio->bi_private)
		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
	bio_put(bio);
}

static void bio_post_read_processing(struct bio_post_read_ctx *ctx);

static void decrypt_work(struct work_struct *work)
{
	struct bio_post_read_ctx *ctx =
		container_of(work, struct bio_post_read_ctx, work);

	fscrypt_decrypt_bio(ctx->bio);

	bio_post_read_processing(ctx);
}

static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
{
	switch (++ctx->cur_step) {
	case STEP_DECRYPT:
		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
			INIT_WORK(&ctx->work, decrypt_work);
			fscrypt_enqueue_decrypt_work(&ctx->work);
			return;
		}
		ctx->cur_step++;
		/* fall-through */
	default:
		__read_end_io(ctx->bio);
	}
}

static bool f2fs_bio_post_read_required(struct bio *bio)
{
	return bio->bi_private && !bio->bi_status;
}

static void f2fs_read_end_io(struct bio *bio)
{
127 128 129
	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
						FAULT_READ_IO)) {
		f2fs_show_injection_info(FAULT_READ_IO);
130
		bio->bi_status = BLK_STS_IOERR;
131
	}
Chao Yu's avatar
Chao Yu committed
132

133 134
	if (f2fs_bio_post_read_required(bio)) {
		struct bio_post_read_ctx *ctx = bio->bi_private;
135

136 137 138
		ctx->cur_step = STEP_INITIAL;
		bio_post_read_processing(ctx);
		return;
139
	}
140 141

	__read_end_io(bio);
142 143
}

144
static void f2fs_write_end_io(struct bio *bio)
145
{
146
	struct f2fs_sb_info *sbi = bio->bi_private;
147 148
	struct bio_vec *bvec;
	int i;
149

150 151 152 153 154
	if (time_to_inject(sbi, FAULT_WRITE_IO)) {
		f2fs_show_injection_info(FAULT_WRITE_IO);
		bio->bi_status = BLK_STS_IOERR;
	}

155
	bio_for_each_segment_all(bvec, bio, i) {
156
		struct page *page = bvec->bv_page;
157
		enum count_type type = WB_DATA_TYPE(page);
158

159 160 161 162 163 164
		if (IS_DUMMY_WRITTEN_PAGE(page)) {
			set_page_private(page, (unsigned long)NULL);
			ClearPagePrivate(page);
			unlock_page(page);
			mempool_free(page, sbi->write_io_dummy);

165
			if (unlikely(bio->bi_status))
166 167 168 169
				f2fs_stop_checkpoint(sbi, true);
			continue;
		}

170
		fscrypt_pullback_bio_page(&page, true);
171

172
		if (unlikely(bio->bi_status)) {
173
			mapping_set_error(page->mapping, -EIO);
174 175
			if (type == F2FS_WB_CP_DATA)
				f2fs_stop_checkpoint(sbi, true);
176
		}
177 178 179 180

		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
					page->index != nid_of_node(page));

181
		dec_page_count(sbi, type);
182 183
		if (f2fs_in_warm_node_list(sbi, page))
			f2fs_del_fsync_node_entry(sbi, page);
184
		clear_cold_data(page);
185
		end_page_writeback(page);
186
	}
187
	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
188
				wq_has_sleeper(&sbi->cp_wait))
189 190 191 192 193
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/*
 * Return true, if pre_bio's bdev is same as its target device.
 */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	int i;

	for (i = 0; i < sbi->s_ndevs; i++) {
		if (FDEV(i).start_blk <= blk_addr &&
					FDEV(i).end_blk >= blk_addr) {
			blk_addr -= FDEV(i).start_blk;
			bdev = FDEV(i).bdev;
			break;
		}
	}
	if (bio) {
212
		bio_set_dev(bio, bdev);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
	}
	return bdev;
}

int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
	int i;

	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
			return i;
	return 0;
}

static bool __same_bdev(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
231 232
	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
233 234
}

235 236 237 238
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
239
				struct writeback_control *wbc,
240 241
				int npages, bool is_read,
				enum page_type type, enum temp_type temp)
242 243 244
{
	struct bio *bio;

245
	bio = f2fs_bio_alloc(sbi, npages, true);
246

Jaegeuk Kim's avatar
Jaegeuk Kim committed
247
	f2fs_target_device(sbi, blk_addr, bio);
248 249 250 251 252 253
	if (is_read) {
		bio->bi_end_io = f2fs_read_end_io;
		bio->bi_private = NULL;
	} else {
		bio->bi_end_io = f2fs_write_end_io;
		bio->bi_private = sbi;
Chao Yu's avatar
Chao Yu committed
254
		bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp);
255
	}
256 257
	if (wbc)
		wbc_init_bio(wbc, bio);
258 259 260 261

	return bio;
}

262 263
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
264
{
265
	if (!is_read_io(bio_op(bio))) {
266 267 268 269 270
		unsigned int start;

		if (type != DATA && type != NODE)
			goto submit_io;

271
		if (test_opt(sbi, LFS) && current->plug)
272 273
			blk_finish_plug(current->plug);

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
		start %= F2FS_IO_SIZE(sbi);

		if (start == 0)
			goto submit_io;

		/* fill dummy pages */
		for (; start < F2FS_IO_SIZE(sbi); start++) {
			struct page *page =
				mempool_alloc(sbi->write_io_dummy,
					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
			f2fs_bug_on(sbi, !page);

			SetPagePrivate(page);
			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
			lock_page(page);
			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
				f2fs_bug_on(sbi, 1);
		}
		/*
		 * In the NODE case, we lose next block address chain. So, we
		 * need to do checkpoint in f2fs_sync_file.
		 */
		if (type == NODE)
			set_sbi_flag(sbi, SBI_NEED_CP);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
299
	}
300
submit_io:
Jaegeuk Kim's avatar
Jaegeuk Kim committed
301 302 303 304
	if (is_read_io(bio_op(bio)))
		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
	else
		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
305
	submit_bio(bio);
306 307
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
308
static void __submit_merged_bio(struct f2fs_bio_info *io)
309
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
310
	struct f2fs_io_info *fio = &io->fio;
311 312 313 314

	if (!io->bio)
		return;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
315 316
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

Mike Christie's avatar
Mike Christie committed
317
	if (is_read_io(fio->op))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
318
		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
319
	else
Jaegeuk Kim's avatar
Jaegeuk Kim committed
320
		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
Mike Christie's avatar
Mike Christie committed
321

322
	__submit_bio(io->sbi, io->bio, fio->type);
323 324 325
	io->bio = NULL;
}

326 327
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
Chao Yu's avatar
Chao Yu committed
328 329 330 331 332
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

333
	if (!io->bio)
Chao Yu's avatar
Chao Yu committed
334
		return false;
335

336
	if (!inode && !page && !ino)
337
		return true;
Chao Yu's avatar
Chao Yu committed
338 339 340

	bio_for_each_segment_all(bvec, io->bio, i) {

341
		if (bvec->bv_page->mapping)
Chao Yu's avatar
Chao Yu committed
342
			target = bvec->bv_page;
343 344
		else
			target = fscrypt_control_page(bvec->bv_page);
Chao Yu's avatar
Chao Yu committed
345

346 347
		if (inode && inode == target->mapping->host)
			return true;
348 349
		if (page && page == target)
			return true;
350
		if (ino && ino == ino_of_node(target))
Chao Yu's avatar
Chao Yu committed
351 352 353 354 355 356
			return true;
	}

	return false;
}

357
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
358 359
						struct page *page, nid_t ino,
						enum page_type type)
360 361
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
362 363 364
	enum temp_type temp;
	struct f2fs_bio_info *io;
	bool ret = false;
365

Jaegeuk Kim's avatar
Jaegeuk Kim committed
366 367 368 369
	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
		io = sbi->write_io[btype] + temp;

		down_read(&io->io_rwsem);
370
		ret = __has_merged_page(io, inode, page, ino);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
371
		up_read(&io->io_rwsem);
372

Jaegeuk Kim's avatar
Jaegeuk Kim committed
373 374 375 376
		/* TODO: use HOT temp only for meta pages now. */
		if (ret || btype == META)
			break;
	}
377 378 379
	return ret;
}

380
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
381
				enum page_type type, enum temp_type temp)
382 383
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
384
	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
385

386
	down_write(&io->io_rwsem);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
387 388 389 390

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
Mike Christie's avatar
Mike Christie committed
391
		io->fio.op = REQ_OP_WRITE;
392
		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
393
		if (!test_opt(sbi, NOBARRIER))
394
			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
395 396
	}
	__submit_merged_bio(io);
397
	up_write(&io->io_rwsem);
398 399
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
400
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
401 402
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, bool force)
403
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
404 405
	enum temp_type temp;

406
	if (!force && !has_merged_page(sbi, inode, page, ino, type))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
407 408 409 410 411 412 413 414 415 416
		return;

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {

		__f2fs_submit_merged_write(sbi, type, temp);

		/* TODO: use HOT temp only for meta pages now. */
		if (type >= META)
			break;
	}
417 418
}

419
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
420
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
421
	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
422 423
}

424
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
425 426
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type)
427
{
428
	__submit_merged_write_cond(sbi, inode, page, ino, type, false);
429 430
}

431
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
432
{
433 434 435
	f2fs_submit_merged_write(sbi, DATA);
	f2fs_submit_merged_write(sbi, NODE);
	f2fs_submit_merged_write(sbi, META);
436 437
}

438 439
/*
 * Fill the locked page with data located in the block address.
440
 * A caller needs to unlock the page on failure.
441
 */
442
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
443 444
{
	struct bio *bio;
445 446
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
447

448 449 450 451
	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
		return -EFAULT;

452
	trace_f2fs_submit_page_bio(page, fio);
453
	f2fs_trace_ios(fio, 0);
454 455

	/* Allocate a new bio */
456
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
457
				1, is_read_io(fio->op), fio->type, fio->temp);
458

459
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
460 461 462
		bio_put(bio);
		return -EFAULT;
	}
Mike Christie's avatar
Mike Christie committed
463
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
464

465
	__submit_bio(fio->sbi, bio, fio->type);
466 467 468

	if (!is_read_io(fio->op))
		inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
469 470 471
	return 0;
}

472
void f2fs_submit_page_write(struct f2fs_io_info *fio)
473
{
474
	struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
475
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
476
	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
477
	struct page *bio_page;
478

479
	f2fs_bug_on(sbi, is_read_io(fio->op));
480

481 482 483 484 485 486
	down_write(&io->io_rwsem);
next:
	if (fio->in_list) {
		spin_lock(&io->io_lock);
		if (list_empty(&io->io_list)) {
			spin_unlock(&io->io_lock);
487
			goto out;
488 489 490 491 492 493
		}
		fio = list_first_entry(&io->io_list,
						struct f2fs_io_info, list);
		list_del(&fio->list);
		spin_unlock(&io->io_lock);
	}
494

495
	if (__is_valid_data_blkaddr(fio->old_blkaddr))
496 497
		verify_block_addr(fio, fio->old_blkaddr);
	verify_block_addr(fio, fio->new_blkaddr);
498

499 500
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

501 502
	/* set submitted = true as a return value */
	fio->submitted = true;
503

504
	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
505

506
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
Jaegeuk Kim's avatar
Jaegeuk Kim committed
507 508
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
509
		__submit_merged_bio(io);
510 511
alloc_new:
	if (io->bio == NULL) {
512 513
		if ((fio->type == DATA || fio->type == NODE) &&
				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
514
			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
515 516
			fio->retry = true;
			goto skip;
517
		}
518
		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
519 520
						BIO_MAX_PAGES, false,
						fio->type, fio->temp);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
521
		io->fio = *fio;
522 523
	}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
524
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
525
		__submit_merged_bio(io);
526 527 528
		goto alloc_new;
	}

529 530 531
	if (fio->io_wbc)
		wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);

532
	io->last_block_in_bio = fio->new_blkaddr;
533
	f2fs_trace_ios(fio, 0);
534 535

	trace_f2fs_submit_page_write(fio->page, fio);
536
skip:
537 538
	if (fio->in_list)
		goto next;
539
out:
Daniel Rosenberg's avatar
Daniel Rosenberg committed
540 541
	if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
				f2fs_is_checkpoint_ready(sbi))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
542
		__submit_merged_bio(io);
543
	up_write(&io->io_rwsem);
544 545
}

546
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
547
					unsigned nr_pages, unsigned op_flag)
548 549 550
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct bio *bio;
551 552
	struct bio_post_read_ctx *ctx;
	unsigned int post_read_steps = 0;
553

554 555 556
	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
		return ERR_PTR(-EFAULT);

557
	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
558
	if (!bio)
559 560 561
		return ERR_PTR(-ENOMEM);
	f2fs_target_device(sbi, blkaddr, bio);
	bio->bi_end_io = f2fs_read_end_io;
562
	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
563

564 565 566 567 568 569 570 571 572 573 574 575 576
	if (f2fs_encrypted_file(inode))
		post_read_steps |= 1 << STEP_DECRYPT;
	if (post_read_steps) {
		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
		if (!ctx) {
			bio_put(bio);
			return ERR_PTR(-ENOMEM);
		}
		ctx->bio = bio;
		ctx->enabled_steps = post_read_steps;
		bio->bi_private = ctx;
	}

577 578 579 580 581 582 583
	return bio;
}

/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
							block_t blkaddr)
{
584
	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
585 586 587 588

	if (IS_ERR(bio))
		return PTR_ERR(bio);

589 590 591
	/* wait for GCed page writeback via META_MAPPING */
	f2fs_wait_on_block_writeback(inode, blkaddr);

592 593 594 595
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}
596
	ClearPageError(page);
597 598 599 600
	__submit_bio(F2FS_I_SB(inode), bio, DATA);
	return 0;
}

601 602 603 604
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;
605 606 607 608
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
609 610 611

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
612
	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
613 614
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
615
/*
616 617 618 619 620
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
Chao Yu's avatar
Chao Yu committed
621
void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
622
{
623 624 625
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
626
		dn->node_changed = true;
627 628
}

629 630 631
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
Chao Yu's avatar
Chao Yu committed
632
	f2fs_set_data_blkaddr(dn);
633 634 635
	f2fs_update_extent_cache(dn);
}

636
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
Chao Yu's avatar
Chao Yu committed
637
int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
638
{
639
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
640
	int err;
641

642 643 644
	if (!count)
		return 0;

645
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
646
		return -EPERM;
647 648
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
649

650 651 652 653 654 655
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
656 657
		block_t blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
658 659 660 661 662 663 664 665 666
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
667 668 669
	return 0;
}

670
/* Should keep dn->ofs_in_node unchanged */
Chao Yu's avatar
Chao Yu committed
671
int f2fs_reserve_new_block(struct dnode_of_data *dn)
672 673 674 675
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

Chao Yu's avatar
Chao Yu committed
676
	ret = f2fs_reserve_new_blocks(dn, 1);
677 678 679 680
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

681 682 683 684 685
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

Chao Yu's avatar
Chao Yu committed
686
	err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
687 688
	if (err)
		return err;
689

690
	if (dn->data_blkaddr == NULL_ADDR)
Chao Yu's avatar
Chao Yu committed
691
		err = f2fs_reserve_new_block(dn);
692
	if (err || need_put)
693 694 695 696
		f2fs_put_dnode(dn);
	return err;
}

697
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
698
{
699
	struct extent_info ei  = {0,0,0};
700
	struct inode *inode = dn->inode;
701

702 703 704
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
705
	}
706

707
	return f2fs_reserve_block(dn, index);
708 709
}

Chao Yu's avatar
Chao Yu committed
710
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
Mike Christie's avatar
Mike Christie committed
711
						int op_flags, bool for_write)
712 713 714 715
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
716
	struct extent_info ei = {0,0,0};
717
	int err;
718

719
	page = f2fs_grab_cache_page(mapping, index, for_write);
720 721 722
	if (!page)
		return ERR_PTR(-ENOMEM);

Chao Yu's avatar
Chao Yu committed
723 724 725 726 727
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

728
	set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu's avatar
Chao Yu committed
729
	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
730 731
	if (err)
		goto put_err;
732 733
	f2fs_put_dnode(&dn);

734
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
735 736
		err = -ENOENT;
		goto put_err;
737
	}
Chao Yu's avatar
Chao Yu committed
738
got_it:
739 740
	if (PageUptodate(page)) {
		unlock_page(page);
741
		return page;
742
	}
743

Jaegeuk Kim's avatar
Jaegeuk Kim committed
744 745 746 747
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
Chao Yu's avatar
Chao Yu committed
748 749
	 * see, f2fs_add_link -> f2fs_get_new_data_page ->
	 * f2fs_init_inode_metadata.
Jaegeuk Kim's avatar
Jaegeuk Kim committed
750 751
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
752
		zero_user_segment(page, 0, PAGE_SIZE);
753 754
		if (!PageUptodate(page))
			SetPageUptodate(page);
755
		unlock_page(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
756 757
		return page;
	}
758

759
	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
760
	if (err)
761
		goto put_err;
762
	return page;
763 764 765 766

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
767 768
}

Chao Yu's avatar
Chao Yu committed
769
struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
770 771 772 773 774 775 776 777 778
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

Chao Yu's avatar
Chao Yu committed
779
	page = f2fs_get_read_data_page(inode, index, 0, false);
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
Chao Yu's avatar
Chao Yu committed
799
struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
800
							bool for_write)
801 802 803 804
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
Chao Yu's avatar
Chao Yu committed
805
	page = f2fs_get_read_data_page(inode, index, 0, for_write);
806 807
	if (IS_ERR(page))
		return page;
808

809
	/* wait for read completion */
810
	lock_page(page);
811
	if (unlikely(page->mapping != mapping)) {
812 813
		f2fs_put_page(page, 1);
		goto repeat;
814
	}
815 816 817 818
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
819 820 821
	return page;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
822
/*
823 824
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
825
 *
Chao Yu's avatar
Chao Yu committed
826 827
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
828 829
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
830
 */
Chao Yu's avatar
Chao Yu committed
831
struct page *f2fs_get_new_data_page(struct inode *inode,
832
		struct page *ipage, pgoff_t index, bool new_i_size)
833 834 835 836 837
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
838

839
	page = f2fs_grab_cache_page(mapping, index, true);
840 841 842 843 844 845
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
846
		return ERR_PTR(-ENOMEM);
847
	}
848

849
	set_new_dnode(&dn, inode, ipage, NULL, 0);
850
	err = f2fs_reserve_block(&dn, index);
851 852
	if (err) {
		f2fs_put_page(page, 1);
853
		return ERR_PTR(err);
854
	}
855 856
	if (!ipage)
		f2fs_put_dnode(&dn);
857 858

	if (PageUptodate(page))
859
		goto got_it;
860 861

	if (dn.data_blkaddr == NEW_ADDR) {
862
		zero_user_segment(page, 0, PAGE_SIZE);
863 864
		if (!PageUptodate(page))
			SetPageUptodate(page);
865
	} else {
866
		f2fs_put_page(page, 1);
867

868 869
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
Chao Yu's avatar
Chao Yu committed
870
		page = f2fs_get_lock_data_page(inode, index, true);
871
		if (IS_ERR(page))
872
			return page;
873
	}
874
got_it:
875
	if (new_i_size && i_size_read(inode) <
876
				((loff_t)(index + 1) << PAGE_SHIFT))
877
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
878 879 880
	return page;
}

881
static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
882
{
883
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
884 885
	struct f2fs_summary sum;
	struct node_info ni;
886
	block_t old_blkaddr;
887
	blkcnt_t count = 1;
888
	int err;
889

890
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
891
		return -EPERM;
892

893 894 895 896
	err = f2fs_get_node_info(sbi, dn->nid, &ni);
	if (err)
		return err;

897 898
	dn->data_blkaddr = datablock_addr(dn->inode,
				dn->node_page, dn->ofs_in_node);
899
	if (dn->data_blkaddr != NULL_ADDR)
900 901
		goto alloc;

902 903
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
904

905
alloc:
906
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
907 908
	old_blkaddr = dn->data_blkaddr;
	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
909
					&sum, seg_type, NULL, false);
910 911 912
	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
		invalidate_mapping_pages(META_MAPPING(sbi),
					old_blkaddr, old_blkaddr);
Chao Yu's avatar
Chao Yu committed
913
	f2fs_set_data_blkaddr(dn);
914

915 916 917 918
	/*
	 * i_size will be updated by direct_IO. Otherwise, we'll get stale
	 * data from unwritten block via dio_read.
	 */
919 920 921
	return 0;
}

922
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
923
{
924
	struct inode *inode = file_inode(iocb->ki_filp);
925
	struct f2fs_map_blocks map;
926
	int flag;
927
	int err = 0;
928
	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
929

930
	/* convert inline data for Direct I/O*/
931
	if (direct_io) {
932 933 934 935 936
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
	}

937 938 939
	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

940
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
941 942 943 944 945 946
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

947
	map.m_next_pgofs = NULL;
948
	map.m_next_extent = NULL;
949
	map.m_seg_type = NO_CHECK_TYPE;
950

951
	if (direct_io) {
Chao Yu's avatar
Chao Yu committed
952
		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
953
		flag = f2fs_force_buffered_io(inode, iocb, from) ?
954 955 956
					F2FS_GET_BLOCK_PRE_AIO :
					F2FS_GET_BLOCK_PRE_DIO;
		goto map_blocks;
957
	}
958
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
959 960 961
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
962
	}
963
	if (f2fs_has_inline_data(inode))
964
		return err;
965 966 967 968 969 970 971 972 973

	flag = F2FS_GET_BLOCK_PRE_AIO;

map_blocks:
	err = f2fs_map_blocks(inode, &map, 1, flag);
	if (map.m_len > 0 && err == -ENOSPC) {
		if (!direct_io)
			set_inode_flag(inode, FI_NO_PREALLOC);
		err = 0;
974
	}
975
	return err;
976 977
}

978
void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
979 980 981 982 983 984 985 986 987 988 989 990 991 992
{
	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
		if (lock)
			down_read(&sbi->node_change);
		else
			up_read(&sbi->node_change);
	} else {
		if (lock)
			f2fs_lock_op(sbi);
		else
			f2fs_unlock_op(sbi);
	}
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
993
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
994 995
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
Chao Yu's avatar
Chao Yu committed
996 997 998 999 1000
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
1001
 */
Chao Yu's avatar
Chao Yu committed
1002
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1003
						int create, int flag)
1004
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1005
	unsigned int maxblocks = map->m_len;
1006
	struct dnode_of_data dn;
1007
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1008
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
1009
	pgoff_t pgofs, end_offset, end;
1010
	int err = 0, ofs = 1;
1011 1012
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
1013
	struct extent_info ei = {0,0,0};
1014
	block_t blkaddr;
1015
	unsigned int start_pgofs;
1016

1017 1018 1019
	if (!maxblocks)
		return 0;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1020 1021 1022 1023 1024
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
1025
	end = pgofs + maxblocks;
1026

1027
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1028 1029 1030
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
1031 1032
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + map->m_len;
1033
		goto out;
1034
	}
1035

Chao Yu's avatar
Chao Yu committed
1036
next_dnode:
1037
	if (create)
1038
		__do_map_lock(sbi, flag, true);
1039 1040 1041

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu's avatar
Chao Yu committed
1042
	err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1043
	if (err) {
1044 1045
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
1046
		if (err == -ENOENT) {
1047
			err = 0;
1048 1049
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
Chao Yu's avatar
Chao Yu committed
1050
					f2fs_get_next_page_offset(&dn, pgofs);
1051 1052
			if (map->m_next_extent)
				*map->m_next_extent =
Chao Yu's avatar
Chao Yu committed
1053
					f2fs_get_next_page_offset(&dn, pgofs);
1054
		}
1055
		goto unlock_out;
1056
	}
Chao Yu's avatar
Chao Yu committed
1057

1058
	start_pgofs = pgofs;
1059
	prealloc = 0;
1060
	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1061
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Chao Yu's avatar
Chao Yu committed
1062 1063

next_block:
1064
	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
Chao Yu's avatar
Chao Yu committed
1065

1066 1067 1068 1069 1070 1071
	if (__is_valid_data_blkaddr(blkaddr) &&
		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
		err = -EFAULT;
		goto sync_out;
	}

1072 1073 1074 1075 1076 1077 1078 1079 1080
	if (is_valid_data_blkaddr(sbi, blkaddr)) {
		/* use out-place-update for driect IO under LFS mode */
		if (test_opt(sbi, LFS) && create &&
				flag == F2FS_GET_BLOCK_DIO) {
			err = __allocate_data_block(&dn, map->m_seg_type);
			if (!err)
				set_inode_flag(inode, FI_APPEND_WRITE);
		}
	} else {
Chao Yu's avatar
Chao Yu committed
1081
		if (create) {
1082 1083
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
Chao Yu's avatar
Chao Yu committed
1084
				goto sync_out;
1085
			}
1086
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1087 1088 1089 1090
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
1091
			} else {
1092 1093
				WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
					flag != F2FS_GET_BLOCK_DIO);
1094 1095
				err = __allocate_data_block(&dn,
							map->m_seg_type);
1096
				if (!err)
1097
					set_inode_flag(inode, FI_APPEND_WRITE);
1098
			}
Chao Yu's avatar
Chao Yu committed
1099
			if (err)
Chao Yu's avatar
Chao Yu committed
1100
				goto sync_out;
1101
			map->m_flags |= F2FS_MAP_NEW;
Chao Yu's avatar
Chao Yu committed
1102
			blkaddr = dn.data_blkaddr;
Chao Yu's avatar
Chao Yu committed
1103
		} else {
1104 1105 1106 1107
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
1108 1109
			if (flag == F2FS_GET_BLOCK_PRECACHE)
				goto sync_out;
1110 1111 1112 1113
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
Chao Yu's avatar
Chao Yu committed
1114
				goto sync_out;
1115
			}
1116 1117 1118 1119
			if (flag != F2FS_GET_BLOCK_FIEMAP) {
				/* for defragment case */
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
Chao Yu's avatar
Chao Yu committed
1120
				goto sync_out;
1121
			}
1122 1123
		}
	}
1124

1125 1126 1127
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

Chao Yu's avatar
Chao Yu committed
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
1138
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1139
			flag == F2FS_GET_BLOCK_PRE_DIO) {
Chao Yu's avatar
Chao Yu committed
1140 1141 1142 1143 1144
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
1145

1146
skip:
1147 1148 1149
	dn.ofs_in_node++;
	pgofs++;

1150 1151 1152
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
1153

1154
		dn.ofs_in_node = ofs_in_node;
Chao Yu's avatar
Chao Yu committed
1155
		err = f2fs_reserve_new_blocks(&dn, prealloc);
1156 1157
		if (err)
			goto sync_out;
1158

1159 1160 1161 1162
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
1163
		}
1164 1165 1166 1167 1168 1169 1170 1171
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
	}

1182 1183 1184
	f2fs_put_dnode(&dn);

	if (create) {
1185
		__do_map_lock(sbi, flag, false);
1186
		f2fs_balance_fs(sbi, dn.node_changed);
1187
	}
1188
	goto next_dnode;
1189

1190
sync_out:
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + 1;
	}
1202
	f2fs_put_dnode(&dn);
1203
unlock_out:
1204
	if (create) {
1205
		__do_map_lock(sbi, flag, false);
1206
		f2fs_balance_fs(sbi, dn.node_changed);
1207
	}
1208
out:
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1209
	trace_f2fs_map_blocks(inode, map, err);
1210
	return err;
1211 1212
}

Hyunchul Lee's avatar
Hyunchul Lee committed
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
{
	struct f2fs_map_blocks map;
	block_t last_lblk;
	int err;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
	map.m_next_pgofs = NULL;
	map.m_next_extent = NULL;
	map.m_seg_type = NO_CHECK_TYPE;
	last_lblk = F2FS_BLK_ALIGN(pos + len);

	while (map.m_lblk < last_lblk) {
		map.m_len = last_lblk - map.m_lblk;
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
		if (err || map.m_len == 0)
			return false;
		map.m_lblk += map.m_len;
	}
	return true;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1238
static int __get_data_block(struct inode *inode, sector_t iblock,
1239
			struct buffer_head *bh, int create, int flag,
1240
			pgoff_t *next_pgofs, int seg_type)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1241 1242
{
	struct f2fs_map_blocks map;
1243
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1244 1245 1246

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
1247
	map.m_next_pgofs = next_pgofs;
1248
	map.m_next_extent = NULL;
1249
	map.m_seg_type = seg_type;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1250

1251 1252
	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1253 1254
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1255
		bh->b_size = (u64)map.m_len << inode->i_blkbits;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1256
	}
1257
	return err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1258 1259
}

1260
static int get_data_block(struct inode *inode, sector_t iblock,
1261 1262
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
1263
{
1264
	return __get_data_block(inode, iblock, bh_result, create,
1265 1266
							flag, next_pgofs,
							NO_CHECK_TYPE);
1267 1268 1269
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
1270 1271
			struct buffer_head *bh_result, int create)
{
1272
	return __get_data_block(inode, iblock, bh_result, create,
1273
						F2FS_GET_BLOCK_DIO, NULL,
Chao Yu's avatar
Chao Yu committed
1274
						f2fs_rw_hint_to_seg_type(
1275
							inode->i_write_hint));
1276 1277
}

1278
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1279 1280
			struct buffer_head *bh_result, int create)
{
1281
	/* Block number less than F2FS MAX BLOCKS */
1282
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1283 1284
		return -EFBIG;

1285
	return __get_data_block(inode, iblock, bh_result, create,
1286 1287
						F2FS_GET_BLOCK_BMAP, NULL,
						NO_CHECK_TYPE);
1288 1289
}

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

Chao Yu's avatar
Chao Yu committed
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
static int f2fs_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page;
	struct node_info ni;
	__u64 phys = 0, len;
	__u32 flags;
	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
	int err = 0;

	if (f2fs_has_inline_xattr(inode)) {
		int offset;

		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
						inode->i_ino, false);
		if (!page)
			return -ENOMEM;

1319 1320 1321 1322 1323
		err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
		if (err) {
			f2fs_put_page(page, 1);
			return err;
		}
Chao Yu's avatar
Chao Yu committed
1324 1325 1326 1327

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		offset = offsetof(struct f2fs_inode, i_addr) +
					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1328
					get_inline_xattr_addrs(inode));
Chao Yu's avatar
Chao Yu committed
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349

		phys += offset;
		len = inline_xattr_size(inode);

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;

		if (!xnid)
			flags |= FIEMAP_EXTENT_LAST;

		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
		if (err || err == 1)
			return err;
	}

	if (xnid) {
		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
		if (!page)
			return -ENOMEM;

1350 1351 1352 1353 1354
		err = f2fs_get_node_info(sbi, xnid, &ni);
		if (err) {
			f2fs_put_page(page, 1);
			return err;
		}
Chao Yu's avatar
Chao Yu committed
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		len = inode->i_sb->s_blocksize;

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_LAST;
	}

	if (phys)
		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);

	return (err < 0 ? err : 0);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1370 1371 1372
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
1373 1374
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
1375
	pgoff_t next_pgofs;
1376 1377 1378 1379
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

1380 1381 1382 1383 1384 1385
	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
		ret = f2fs_precache_extents(inode);
		if (ret)
			return ret;
	}

Chao Yu's avatar
Chao Yu committed
1386
	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
1387 1388 1389
	if (ret)
		return ret;

1390 1391
	inode_lock(inode);

Chao Yu's avatar
Chao Yu committed
1392 1393 1394 1395 1396
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		ret = f2fs_xattr_fiemap(inode, fieinfo);
		goto out;
	}

1397 1398 1399
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
1400
			goto out;
1401 1402
	}

1403 1404 1405 1406 1407
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
1408

1409 1410 1411 1412
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

1413
	ret = get_data_block(inode, start_blk, &map_bh, 0,
1414
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1415 1416 1417 1418 1419
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
1420
		start_blk = next_pgofs;
1421 1422 1423

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
1424
			goto prep_next;
1425

1426 1427
		flags |= FIEMAP_EXTENT_LAST;
	}
1428

1429 1430 1431 1432
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

1433 1434
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
1435
	}
1436

1437 1438
	if (start_blk > last_blk || ret)
		goto out;
1439

1440 1441 1442 1443 1444 1445
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
1446

1447
	start_blk += logical_to_blk(inode, size);
1448

1449
prep_next:
1450 1451 1452 1453 1454 1455 1456 1457 1458
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

Al Viro's avatar
Al Viro committed
1459
	inode_unlock(inode);
1460
	return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1461 1462
}

1463 1464 1465
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
1466 1467 1468 1469 1470
 *
 * Note that the aops->readpages() function is ONLY used for read-ahead. If
 * this function ever deviates from doing just read-ahead, it should either
 * use ->readpage() or do the necessary surgery to decouple ->readpages()
 * from read-ahead.
1471 1472 1473
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
1474
			unsigned nr_pages, bool is_readahead)
1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1491
	map.m_next_pgofs = NULL;
1492
	map.m_next_extent = NULL;
1493
	map.m_seg_type = NO_CHECK_TYPE;
1494

LiFan's avatar
LiFan committed
1495
	for (; nr_pages; nr_pages--) {
1496
		if (pages) {
1497
			page = list_last_entry(pages, struct page, lru);
1498 1499

			prefetchw(&page->flags);
1500 1501
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1502 1503
						  page->index,
						  readahead_gfp_mask(mapping)))
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1532
			if (f2fs_map_blocks(inode, &map, 0,
1533
						F2FS_GET_BLOCK_DEFAULT))
1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
1545 1546 1547 1548

			if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
								DATA_GENERIC))
				goto set_error_page;
1549
		} else {
1550
			zero_user_segment(page, 0, PAGE_SIZE);
1551 1552
			if (!PageUptodate(page))
				SetPageUptodate(page);
1553 1554 1555 1556 1557 1558 1559 1560
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1561 1562
		if (bio && (last_block_in_bio != block_nr - 1 ||
			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
1563
submit_and_realloc:
1564
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1565 1566 1567
			bio = NULL;
		}
		if (bio == NULL) {
1568 1569
			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
					is_readahead ? REQ_RAHEAD : 0);
1570 1571
			if (IS_ERR(bio)) {
				bio = NULL;
1572
				goto set_error_page;
1573
			}
1574 1575
		}

1576 1577 1578 1579 1580 1581
		/*
		 * If the page is under writeback, we need to wait for
		 * its completion to see the correct decrypted data.
		 */
		f2fs_wait_on_block_writeback(inode, block_nr);

1582 1583 1584
		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

1585
		ClearPageError(page);
1586 1587 1588 1589
		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1590
		zero_user_segment(page, 0, PAGE_SIZE);
1591 1592 1593 1594
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1595
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1596 1597 1598 1599 1600
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1601
			put_page(page);
1602 1603 1604
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1605
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
1606 1607 1608
	return 0;
}

1609 1610
static int f2fs_read_data_page(struct file *file, struct page *page)
{
1611
	struct inode *inode = page->mapping->host;
1612
	int ret = -EAGAIN;
1613

1614 1615
	trace_f2fs_readpage(page, DATA);

arter97's avatar
arter97 committed
1616
	/* If the file has inline data, try to read it directly */
1617 1618
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1619
	if (ret == -EAGAIN)
1620
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
1621
	return ret;
1622 1623 1624 1625 1626 1627
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
1628
	struct inode *inode = mapping->host;
1629
	struct page *page = list_last_entry(pages, struct page, lru);
1630 1631

	trace_f2fs_readpages(inode, page, nr_pages);
1632 1633 1634 1635 1636

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

1637
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
1638 1639
}

1640 1641 1642
static int encrypt_one_page(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;
1643
	struct page *mpage;
1644 1645
	gfp_t gfp_flags = GFP_NOFS;

1646
	if (!f2fs_encrypted_file(inode))
1647 1648
		return 0;

1649
	/* wait for GCed page writeback via META_MAPPING */
1650
	f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
1651 1652 1653 1654

retry_encrypt:
	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
			PAGE_SIZE, 0, fio->page->index, gfp_flags);
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
	if (IS_ERR(fio->encrypted_page)) {
		/* flush pending IOs and wait for a while in the ENOMEM case */
		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
			f2fs_flush_merged_writes(fio->sbi);
			congestion_wait(BLK_RW_ASYNC, HZ/50);
			gfp_flags |= __GFP_NOFAIL;
			goto retry_encrypt;
		}
		return PTR_ERR(fio->encrypted_page);
	}
1665

1666 1667 1668 1669 1670 1671
	mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
	if (mpage) {
		if (PageUptodate(mpage))
			memcpy(page_address(mpage),
				page_address(fio->encrypted_page), PAGE_SIZE);
		f2fs_put_page(mpage, 1);
1672
	}
1673
	return 0;
1674 1675
}

Chao Yu's avatar
Chao Yu committed
1676 1677
static inline bool check_inplace_update_policy(struct inode *inode,
				struct f2fs_io_info *fio)
1678
{
Chao Yu's avatar
Chao Yu committed
1679 1680
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	unsigned int policy = SM_I(sbi)->ipu_policy;
1681

Chao Yu's avatar
Chao Yu committed
1682 1683
	if (policy & (0x1 << F2FS_IPU_FORCE))
		return true;
Chao Yu's avatar
Chao Yu committed
1684
	if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
Chao Yu's avatar
Chao Yu committed
1685 1686 1687 1688
		return true;
	if (policy & (0x1 << F2FS_IPU_UTIL) &&
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;
Chao Yu's avatar
Chao Yu committed
1689
	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
Chao Yu's avatar
Chao Yu committed
1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;

	/*
	 * IPU for rewrite async pages
	 */
	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
			fio && fio->op == REQ_OP_WRITE &&
			!(fio->op_flags & REQ_SYNC) &&
			!f2fs_encrypted_inode(inode))
		return true;

	/* this is only set during fdatasync */
	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
			is_inode_flag_set(inode, FI_NEED_IPU))
		return true;

Daniel Rosenberg's avatar
Daniel Rosenberg committed
1707 1708 1709 1710
	if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
			!f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
		return true;

Chao Yu's avatar
Chao Yu committed
1711 1712 1713
	return false;
}

Chao Yu's avatar
Chao Yu committed
1714
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yu's avatar
Chao Yu committed
1715
{
1716 1717
	if (f2fs_is_pinned_file(inode))
		return true;
Chao Yu's avatar
Chao Yu committed
1718 1719 1720 1721 1722 1723 1724 1725

	/* if this is cold file, we should overwrite to avoid fragmentation */
	if (file_is_cold(inode))
		return true;

	return check_inplace_update_policy(inode, fio);
}

Chao Yu's avatar
Chao Yu committed
1726
bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yu's avatar
Chao Yu committed
1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	if (test_opt(sbi, LFS))
		return true;
	if (S_ISDIR(inode->i_mode))
		return true;
	if (f2fs_is_atomic_file(inode))
		return true;
	if (fio) {
		if (is_cold_data(fio->page))
			return true;
		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
			return true;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1741 1742 1743
		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
			f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
			return true;
Chao Yu's avatar
Chao Yu committed
1744 1745 1746 1747
	}
	return false;
}

1748 1749 1750 1751
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;

Chao Yu's avatar
Chao Yu committed
1752
	if (f2fs_should_update_outplace(inode, fio))
1753 1754
		return false;

Chao Yu's avatar
Chao Yu committed
1755
	return f2fs_should_update_inplace(inode, fio);
1756 1757
}

Chao Yu's avatar
Chao Yu committed
1758
int f2fs_do_write_data_page(struct f2fs_io_info *fio)
1759
{
1760
	struct page *page = fio->page;
1761 1762
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
1763
	struct extent_info ei = {0,0,0};
1764
	struct node_info ni;
1765
	bool ipu_force = false;
1766 1767 1768
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1769 1770 1771
	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1772

1773 1774 1775 1776 1777 1778 1779
		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
							DATA_GENERIC))
			return -EFAULT;

		ipu_force = true;
		fio->need_lock = LOCK_DONE;
		goto got_it;
1780
	}
1781

1782 1783 1784
	/* Deadlock due to between page->lock and f2fs_lock_op */
	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
		return -EAGAIN;
1785

Chao Yu's avatar
Chao Yu committed
1786
	err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1787
	if (err)
1788
		goto out;
1789

1790
	fio->old_blkaddr = dn.data_blkaddr;
1791 1792

	/* This page is already truncated */
1793
	if (fio->old_blkaddr == NULL_ADDR) {
1794
		ClearPageUptodate(page);
1795
		goto out_writepage;
1796
	}
1797
got_it:
1798 1799 1800 1801 1802 1803
	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
							DATA_GENERIC)) {
		err = -EFAULT;
		goto out_writepage;
	}
1804 1805 1806 1807
	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1808
	if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
1809
					need_inplace_update(fio))) {
1810 1811 1812 1813 1814
		err = encrypt_one_page(fio);
		if (err)
			goto out_writepage;

		set_page_writeback(page);
1815
		ClearPageError(page);
1816
		f2fs_put_dnode(&dn);
1817
		if (fio->need_lock == LOCK_REQ)
1818
			f2fs_unlock_op(fio->sbi);
Chao Yu's avatar
Chao Yu committed
1819
		err = f2fs_inplace_write_data(fio);
1820
		trace_f2fs_do_write_data_page(fio->page, IPU);
1821
		set_inode_flag(inode, FI_UPDATE_WRITE);
1822
		return err;
1823
	}
1824

1825 1826 1827 1828 1829 1830 1831 1832
	if (fio->need_lock == LOCK_RETRY) {
		if (!f2fs_trylock_op(fio->sbi)) {
			err = -EAGAIN;
			goto out_writepage;
		}
		fio->need_lock = LOCK_REQ;
	}

1833 1834 1835 1836 1837 1838
	err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
	if (err)
		goto out_writepage;

	fio->version = ni.version;

1839 1840 1841 1842 1843
	err = encrypt_one_page(fio);
	if (err)
		goto out_writepage;

	set_page_writeback(page);
1844
	ClearPageError(page);
1845

1846
	/* LFS mode write path */
Chao Yu's avatar
Chao Yu committed
1847
	f2fs_outplace_write_data(&dn, fio);
1848 1849 1850 1851
	trace_f2fs_do_write_data_page(page, OPU);
	set_inode_flag(inode, FI_APPEND_WRITE);
	if (page->index == 0)
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1852 1853
out_writepage:
	f2fs_put_dnode(&dn);
1854
out:
1855
	if (fio->need_lock == LOCK_REQ)
1856
		f2fs_unlock_op(fio->sbi);
1857 1858 1859
	return err;
}

1860
static int __write_data_page(struct page *page, bool *submitted,
Chao Yu's avatar
Chao Yu committed
1861 1862
				struct writeback_control *wbc,
				enum iostat_type io_type)
1863 1864
{
	struct inode *inode = page->mapping->host;
1865
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1866 1867
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1868
							>> PAGE_SHIFT;
1869
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
1870
	unsigned offset = 0;
1871
	bool need_balance_fs = false;
1872
	int err = 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1873
	struct f2fs_io_info fio = {
1874
		.sbi = sbi,
Chao Yu's avatar
Chao Yu committed
1875
		.ino = inode->i_ino,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1876
		.type = DATA,
Mike Christie's avatar
Mike Christie committed
1877
		.op = REQ_OP_WRITE,
1878
		.op_flags = wbc_to_write_flags(wbc),
1879
		.old_blkaddr = NULL_ADDR,
1880
		.page = page,
1881
		.encrypted_page = NULL,
1882
		.submitted = false,
1883
		.need_lock = LOCK_RETRY,
Chao Yu's avatar
Chao Yu committed
1884
		.io_type = io_type,
1885
		.io_wbc = wbc,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1886
	};
1887

1888 1889
	trace_f2fs_writepage(page, DATA);

1890 1891 1892
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		mapping_set_error(page->mapping, -EIO);
1893 1894 1895 1896 1897 1898
		/*
		 * don't drop any dirty dentry pages for keeping lastest
		 * directory structure.
		 */
		if (S_ISDIR(inode->i_mode))
			goto redirty_out;
1899 1900 1901
		goto out;
	}

1902 1903 1904
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;

1905
	if (page->index < end_index)
1906
		goto write;
1907 1908 1909 1910 1911

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1912
	offset = i_size & (PAGE_SIZE - 1);
1913
	if ((page->index >= end_index + 1) || !offset)
1914
		goto out;
1915

1916
	zero_user_segment(page, offset, PAGE_SIZE);
1917
write:
1918 1919
	if (f2fs_is_drop_cache(inode))
		goto out;
1920 1921 1922
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
Chao Yu's avatar
Chao Yu committed
1923
			f2fs_available_free_memory(sbi, BASE_CHECK))))
1924
		goto redirty_out;
1925

1926
	/* Dentry blocks are controlled by checkpoint */
1927
	if (S_ISDIR(inode->i_mode)) {
1928
		fio.need_lock = LOCK_DONE;
Chao Yu's avatar
Chao Yu committed
1929
		err = f2fs_do_write_data_page(&fio);
1930 1931
		goto done;
	}
1932

1933
	if (!wbc->for_reclaim)
1934
		need_balance_fs = true;
1935
	else if (has_not_enough_free_secs(sbi, 0, 0))
1936
		goto redirty_out;
1937 1938
	else
		set_inode_flag(inode, FI_HOT_DATA);
1939

1940
	err = -EAGAIN;
1941
	if (f2fs_has_inline_data(inode)) {
1942
		err = f2fs_write_inline_data(inode, page);
1943 1944 1945
		if (!err)
			goto out;
	}
1946

1947
	if (err == -EAGAIN) {
Chao Yu's avatar
Chao Yu committed
1948
		err = f2fs_do_write_data_page(&fio);
1949 1950
		if (err == -EAGAIN) {
			fio.need_lock = LOCK_REQ;
Chao Yu's avatar
Chao Yu committed
1951
			err = f2fs_do_write_data_page(&fio);
1952 1953
		}
	}
1954

1955 1956 1957 1958 1959 1960 1961 1962
	if (err) {
		file_set_keep_isize(inode);
	} else {
		down_write(&F2FS_I(inode)->i_sem);
		if (F2FS_I(inode)->last_disk_size < psize)
			F2FS_I(inode)->last_disk_size = psize;
		up_write(&F2FS_I(inode)->i_sem);
	}
1963

1964 1965 1966
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1967

1968
out:
1969
	inode_dec_dirty_pages(inode);
1970 1971
	if (err)
		ClearPageUptodate(page);
1972 1973

	if (wbc->for_reclaim) {
1974
		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
1975
		clear_inode_flag(inode, FI_HOT_DATA);
Chao Yu's avatar
Chao Yu committed
1976
		f2fs_remove_dirty_inode(inode);
1977
		submitted = NULL;
1978 1979
	}

1980
	unlock_page(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1981 1982
	if (!S_ISDIR(inode->i_mode))
		f2fs_balance_fs(sbi, need_balance_fs);
1983

1984
	if (unlikely(f2fs_cp_error(sbi))) {
1985
		f2fs_submit_merged_write(sbi, DATA);
1986 1987 1988 1989 1990
		submitted = NULL;
	}

	if (submitted)
		*submitted = fio.submitted;
1991

1992 1993 1994
	return 0;

redirty_out:
1995
	redirty_page_for_writepage(wbc, page);
1996 1997 1998 1999 2000 2001 2002
	/*
	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
	 * file_write_and_wait_range() will see EIO error, which is critical
	 * to return value of fsync() followed by atomic_write failure to user.
	 */
	if (!err || wbc->for_reclaim)
2003
		return AOP_WRITEPAGE_ACTIVATE;
2004 2005
	unlock_page(page);
	return err;
2006 2007
}

2008 2009 2010
static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
Chao Yu's avatar
Chao Yu committed
2011
	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
2012 2013
}

Chao Yu's avatar
Chao Yu committed
2014 2015 2016 2017 2018 2019
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
Chao Yu's avatar
Chao Yu committed
2020 2021
					struct writeback_control *wbc,
					enum iostat_type io_type)
Chao Yu's avatar
Chao Yu committed
2022 2023 2024 2025
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
2026
	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
Chao Yu's avatar
Chao Yu committed
2027 2028 2029 2030 2031 2032 2033 2034
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
2035
	int nwritten = 0;
Chao Yu's avatar
Chao Yu committed
2036

2037
	pagevec_init(&pvec);
2038

2039 2040 2041 2042 2043 2044
	if (get_dirty_pages(mapping->host) <=
				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
		set_inode_flag(mapping->host, FI_HOT_DATA);
	else
		clear_inode_flag(mapping->host, FI_HOT_DATA);

Chao Yu's avatar
Chao Yu committed
2045 2046 2047 2048 2049 2050 2051 2052 2053
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
2054 2055
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
Chao Yu's avatar
Chao Yu committed
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

2071
		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2072
				tag);
Chao Yu's avatar
Chao Yu committed
2073 2074 2075 2076 2077
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
2078
			bool submitted = false;
Chao Yu's avatar
Chao Yu committed
2079

2080
			/* give a priority to WB_SYNC threads */
2081
			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2082 2083 2084 2085 2086
					wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}

Chao Yu's avatar
Chao Yu committed
2087
			done_index = page->index;
2088
retry_write:
Chao Yu's avatar
Chao Yu committed
2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
2104 2105
					f2fs_wait_on_page_writeback(page,
								DATA, true);
Chao Yu's avatar
Chao Yu committed
2106 2107 2108 2109 2110 2111 2112 2113
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

Chao Yu's avatar
Chao Yu committed
2114
			ret = __write_data_page(page, &submitted, wbc, io_type);
Chao Yu's avatar
Chao Yu committed
2115
			if (unlikely(ret)) {
2116 2117 2118 2119 2120 2121 2122 2123
				/*
				 * keep nr_to_write, since vfs uses this to
				 * get # of written pages.
				 */
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
					continue;
2124 2125 2126 2127 2128 2129 2130 2131 2132
				} else if (ret == -EAGAIN) {
					ret = 0;
					if (wbc->sync_mode == WB_SYNC_ALL) {
						cond_resched();
						congestion_wait(BLK_RW_ASYNC,
									HZ/50);
						goto retry_write;
					}
					continue;
2133
				}
2134 2135 2136
				done_index = page->index + 1;
				done = 1;
				break;
2137
			} else if (submitted) {
2138
				nwritten++;
Chao Yu's avatar
Chao Yu committed
2139 2140
			}

2141
			if (--wbc->nr_to_write <= 0 &&
2142
					wbc->sync_mode == WB_SYNC_NONE) {
Chao Yu's avatar
Chao Yu committed
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

2160
	if (nwritten)
2161
		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
2162
								NULL, 0, DATA);
Chao Yu's avatar
Chao Yu committed
2163

Chao Yu's avatar
Chao Yu committed
2164 2165 2166
	return ret;
}

2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178
static inline bool __should_serialize_io(struct inode *inode,
					struct writeback_control *wbc)
{
	if (!S_ISREG(inode->i_mode))
		return false;
	if (wbc->sync_mode != WB_SYNC_ALL)
		return true;
	if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
		return true;
	return false;
}

2179
static int __f2fs_write_data_pages(struct address_space *mapping,
Chao Yu's avatar
Chao Yu committed
2180 2181
						struct writeback_control *wbc,
						enum iostat_type io_type)
2182 2183
{
	struct inode *inode = mapping->host;
2184
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2185
	struct blk_plug plug;
2186
	int ret;
2187
	bool locked = false;
2188

P J P's avatar
P J P committed
2189 2190 2191 2192
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

2193 2194 2195 2196
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

2197 2198 2199 2200
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

2201 2202
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
Chao Yu's avatar
Chao Yu committed
2203
			f2fs_available_free_memory(sbi, DIRTY_DENTS))
2204 2205
		goto skip_write;

Chao Yu's avatar
Chao Yu committed
2206
	/* skip writing during file defragment */
2207
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
Chao Yu's avatar
Chao Yu committed
2208 2209
		goto skip_write;

Yunlei He's avatar
Yunlei He committed
2210 2211
	trace_f2fs_writepages(mapping->host, wbc, DATA);

2212 2213
	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
	if (wbc->sync_mode == WB_SYNC_ALL)
2214 2215
		atomic_inc(&sbi->wb_sync_req[DATA]);
	else if (atomic_read(&sbi->wb_sync_req[DATA]))
2216 2217
		goto skip_write;

2218 2219 2220 2221 2222
	if (__should_serialize_io(inode, wbc)) {
		mutex_lock(&sbi->writepages);
		locked = true;
	}

2223
	blk_start_plug(&plug);
Chao Yu's avatar
Chao Yu committed
2224
	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
2225
	blk_finish_plug(&plug);
2226

2227 2228 2229
	if (locked)
		mutex_unlock(&sbi->writepages);

2230
	if (wbc->sync_mode == WB_SYNC_ALL)
2231
		atomic_dec(&sbi->wb_sync_req[DATA]);
2232 2233 2234 2235
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2236

Chao Yu's avatar
Chao Yu committed
2237
	f2fs_remove_dirty_inode(inode);
2238
	return ret;
2239 2240

skip_write:
2241
	wbc->pages_skipped += get_dirty_pages(inode);
Yunlei He's avatar
Yunlei He committed
2242
	trace_f2fs_writepages(mapping->host, wbc, DATA);
2243
	return 0;
2244 2245
}

Chao Yu's avatar
Chao Yu committed
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
static int f2fs_write_data_pages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;

	return __f2fs_write_data_pages(mapping, wbc,
			F2FS_I(inode)->cp_task == current ?
			FS_CP_DATA_IO : FS_DATA_IO);
}

2256 2257 2258
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
2259
	loff_t i_size = i_size_read(inode);
2260

2261
	if (to > i_size) {
2262
		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2263
		down_write(&F2FS_I(inode)->i_mmap_sem);
2264

2265
		truncate_pagecache(inode, i_size);
Chao Yu's avatar
Chao Yu committed
2266
		f2fs_truncate_blocks(inode, i_size, true);
2267

2268
		up_write(&F2FS_I(inode)->i_mmap_sem);
2269
		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2270 2271 2272
	}
}

2273 2274 2275 2276 2277 2278 2279 2280
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
2281
	bool locked = false;
2282
	struct extent_info ei = {0,0,0};
2283 2284
	int err = 0;

2285 2286 2287 2288
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
2289 2290
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
			!is_inode_flag_set(inode, FI_NO_PREALLOC))
2291 2292
		return 0;

2293
	if (f2fs_has_inline_data(inode) ||
2294
			(pos & PAGE_MASK) >= i_size_read(inode)) {
2295
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
2296 2297 2298
		locked = true;
	}
restart:
2299
	/* check inline_data */
Chao Yu's avatar
Chao Yu committed
2300
	ipage = f2fs_get_node_page(sbi, inode->i_ino);
2301 2302 2303 2304 2305 2306 2307 2308
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
2309
		if (pos + len <= MAX_INLINE_DATA(inode)) {
Chao Yu's avatar
Chao Yu committed
2310
			f2fs_do_read_inline_data(page, ipage);
2311
			set_inode_flag(inode, FI_DATA_EXIST);
2312 2313
			if (inode->i_nlink)
				set_inline_node(ipage);
2314 2315 2316
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
Chao Yu's avatar
Chao Yu committed
2328
			err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
2329
			if (err || dn.data_blkaddr == NULL_ADDR) {
2330
				f2fs_put_dnode(&dn);
2331 2332
				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
								true);
2333 2334 2335
				locked = true;
				goto restart;
			}
2336 2337
		}
	}
2338

2339 2340 2341
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
2342
out:
2343 2344
	f2fs_put_dnode(&dn);
unlock_out:
2345
	if (locked)
2346
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
2347 2348 2349
	return err;
}

2350 2351 2352 2353 2354
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
2355
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2356
	struct page *page = NULL;
2357
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2358
	bool need_balance = false, drop_atomic = false;
2359
	block_t blkaddr = NULL_ADDR;
2360 2361
	int err = 0;

2362 2363
	trace_f2fs_write_begin(inode, pos, len, flags);

Daniel Rosenberg's avatar
Daniel Rosenberg committed
2364 2365 2366 2367
	err = f2fs_is_checkpoint_ready(sbi);
	if (err)
		goto fail;

2368 2369 2370
	if ((f2fs_is_atomic_file(inode) &&
			!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
			is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2371
		err = -ENOMEM;
2372
		drop_atomic = true;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2373 2374 2375
		goto fail;
	}

2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
2386
repeat:
2387 2388 2389 2390
	/*
	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
	 * wait_for_stable_page. Will wait that below with our IO control.
	 */
2391
	page = f2fs_pagecache_get_page(mapping, index,
2392
				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
2393 2394 2395 2396
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
2397

2398 2399
	*pagep = page;

2400 2401
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
2402
	if (err)
2403
		goto fail;
2404

2405
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
2406
		unlock_page(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2407
		f2fs_balance_fs(sbi, true);
2408 2409 2410 2411 2412 2413 2414 2415
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

2416
	f2fs_wait_on_page_writeback(page, DATA, false);
2417

2418 2419
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
2420

2421 2422 2423 2424 2425
	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
		zero_user_segment(page, len, PAGE_SIZE);
		return 0;
	}

2426
	if (blkaddr == NEW_ADDR) {
2427
		zero_user_segment(page, 0, PAGE_SIZE);
2428
		SetPageUptodate(page);
2429
	} else {
2430 2431
		err = f2fs_submit_page_read(inode, page, blkaddr);
		if (err)
2432
			goto fail;
2433

2434
		lock_page(page);
2435
		if (unlikely(page->mapping != mapping)) {
2436 2437
			f2fs_put_page(page, 1);
			goto repeat;
2438
		}
2439 2440 2441
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
2442
		}
2443 2444
	}
	return 0;
2445

2446
fail:
2447
	f2fs_put_page(page, 1);
2448
	f2fs_write_failed(mapping, pos + len);
2449
	if (drop_atomic)
Chao Yu's avatar
Chao Yu committed
2450
		f2fs_drop_inmem_pages_all(sbi, false);
2451
	return err;
2452 2453
}

2454 2455 2456 2457 2458 2459 2460
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

2461 2462
	trace_f2fs_write_end(inode, pos, len, copied);

2463 2464 2465 2466 2467 2468
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
2469
		if (unlikely(copied != len))
2470 2471 2472 2473 2474 2475 2476
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

2477
	set_page_dirty(page);
2478

2479 2480
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
2481
unlock_out:
2482
	f2fs_put_page(page, 1);
2483
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2484 2485 2486
	return copied;
}

2487 2488
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
2489
{
2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
	unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
	unsigned blkbits = i_blkbits;
	unsigned blocksize_mask = (1 << blkbits) - 1;
	unsigned long align = offset | iov_iter_alignment(iter);
	struct block_device *bdev = inode->i_sb->s_bdev;

	if (align & blocksize_mask) {
		if (bdev)
			blkbits = blksize_bits(bdev_logical_block_size(bdev));
		blocksize_mask = (1 << blkbits) - 1;
		if (align & blocksize_mask)
			return -EINVAL;
		return 1;
	}
2504 2505 2506
	return 0;
}

2507
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2508
{
2509
	struct address_space *mapping = iocb->ki_filp->f_mapping;
2510
	struct inode *inode = mapping->host;
2511
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2512
	struct f2fs_inode_info *fi = F2FS_I(inode);
2513
	size_t count = iov_iter_count(iter);
2514
	loff_t offset = iocb->ki_pos;
2515
	int rw = iov_iter_rw(iter);
2516
	int err;
2517
	enum rw_hint hint = iocb->ki_hint;
2518
	int whint_mode = F2FS_OPTION(sbi).whint_mode;
2519
	bool do_opu;
2520

2521
	err = check_direct_IO(inode, iter, offset);
2522
	if (err)
2523
		return err < 0 ? err : 0;
2524

2525
	if (f2fs_force_buffered_io(inode, iocb, iter))
2526
		return 0;
2527

2528 2529
	do_opu = allow_outplace_dio(inode, iocb, iter);

2530
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2531

2532 2533 2534
	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
		iocb->ki_hint = WRITE_LIFE_NOT_SET;

2535 2536 2537 2538 2539 2540 2541 2542
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
			iocb->ki_hint = hint;
			err = -EAGAIN;
			goto out;
		}
		if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
			up_read(&fi->i_gc_rwsem[rw]);
Hyunchul Lee's avatar
Hyunchul Lee committed
2543 2544 2545 2546
			iocb->ki_hint = hint;
			err = -EAGAIN;
			goto out;
		}
2547 2548 2549 2550
	} else {
		down_read(&fi->i_gc_rwsem[rw]);
		if (do_opu)
			down_read(&fi->i_gc_rwsem[READ]);
Hyunchul Lee's avatar
Hyunchul Lee committed
2551 2552
	}

2553
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
2554 2555 2556 2557 2558

	if (do_opu)
		up_read(&fi->i_gc_rwsem[READ]);

	up_read(&fi->i_gc_rwsem[rw]);
2559 2560

	if (rw == WRITE) {
2561 2562
		if (whint_mode == WHINT_MODE_OFF)
			iocb->ki_hint = hint;
Chao Yu's avatar
Chao Yu committed
2563 2564 2565
		if (err > 0) {
			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
									err);
2566 2567
			if (!do_opu)
				set_inode_flag(inode, FI_UPDATE_WRITE);
Chao Yu's avatar
Chao Yu committed
2568
		} else if (err < 0) {
2569
			f2fs_write_failed(mapping, offset + count);
Chao Yu's avatar
Chao Yu committed
2570
		}
2571
	}
2572

Hyunchul Lee's avatar
Hyunchul Lee committed
2573
out:
2574
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2575

2576
	return err;
2577 2578
}

2579 2580
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
2581 2582
{
	struct inode *inode = page->mapping->host;
2583
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2584

2585
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2586
		(offset % PAGE_SIZE || length != PAGE_SIZE))
2587 2588
		return;

2589
	if (PageDirty(page)) {
2590
		if (inode->i_ino == F2FS_META_INO(sbi)) {
2591
			dec_page_count(sbi, F2FS_DIRTY_META);
2592
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2593
			dec_page_count(sbi, F2FS_DIRTY_NODES);
2594
		} else {
2595
			inode_dec_dirty_pages(inode);
Chao Yu's avatar
Chao Yu committed
2596
			f2fs_remove_dirty_inode(inode);
2597
		}
2598
	}
Chao Yu's avatar
Chao Yu committed
2599 2600 2601

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
Chao Yu's avatar
Chao Yu committed
2602
		return f2fs_drop_inmem_page(inode, page);
Chao Yu's avatar
Chao Yu committed
2603

2604
	set_page_private(page, 0);
2605 2606 2607
	ClearPagePrivate(page);
}

2608
int f2fs_release_page(struct page *page, gfp_t wait)
2609
{
2610 2611 2612 2613
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

Chao Yu's avatar
Chao Yu committed
2614 2615 2616 2617
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

2618
	set_page_private(page, 0);
2619
	ClearPagePrivate(page);
2620
	return 1;
2621 2622 2623 2624 2625 2626 2627
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

2628 2629
	trace_f2fs_set_page_dirty(page, DATA);

2630 2631
	if (!PageUptodate(page))
		SetPageUptodate(page);
2632

2633 2634 2635 2636
	/* don't remain PG_checked flag which was set during GC */
	if (is_cold_data(page))
		clear_cold_data(page);

Chao Yu's avatar
Chao Yu committed
2637
	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
Chao Yu's avatar
Chao Yu committed
2638
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
Chao Yu's avatar
Chao Yu committed
2639
			f2fs_register_inmem_page(inode, page);
Chao Yu's avatar
Chao Yu committed
2640 2641 2642 2643 2644 2645 2646
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
2647 2648
	}

2649
	if (!PageDirty(page)) {
2650
		__set_page_dirty_nobuffers(page);
Chao Yu's avatar
Chao Yu committed
2651
		f2fs_update_dirty_page(inode, page);
2652 2653 2654 2655 2656
		return 1;
	}
	return 0;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2657 2658
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
2659 2660
	struct inode *inode = mapping->host;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2661 2662 2663 2664 2665 2666 2667
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

2668
	return generic_block_bmap(mapping, block, get_data_block_bmap);
2669 2670
}

2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
2684 2685 2686 2687 2688 2689
	if (atomic_written) {
		if (mode != MIGRATE_SYNC)
			return -EBUSY;
		if (!mutex_trylock(&fi->inmem_lock))
			return -EAGAIN;
	}
2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

2721 2722 2723 2724
	if (mode != MIGRATE_SYNC_NO_COPY)
		migrate_page_copy(newpage, page);
	else
		migrate_page_states(newpage, page);
2725 2726 2727 2728 2729

	return MIGRATEPAGE_SUCCESS;
}
#endif

2730 2731 2732 2733 2734 2735
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
2736
	.write_end	= f2fs_write_end,
2737
	.set_page_dirty	= f2fs_set_data_page_dirty,
2738 2739
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2740
	.direct_IO	= f2fs_direct_IO,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2741
	.bmap		= f2fs_bmap,
2742 2743 2744
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
2745
};
2746

Chao Yu's avatar
Chao Yu committed
2747
void f2fs_clear_radix_tree_dirty_tag(struct page *page)
2748 2749 2750 2751 2752 2753 2754 2755 2756 2757
{
	struct address_space *mapping = page_mapping(page);
	unsigned long flags;

	xa_lock_irqsave(&mapping->i_pages, flags);
	radix_tree_tag_clear(&mapping->i_pages, page_index(page),
						PAGECACHE_TAG_DIRTY);
	xa_unlock_irqrestore(&mapping->i_pages, flags);
}

2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
int __init f2fs_init_post_read_processing(void)
{
	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
	if (!bio_post_read_ctx_cache)
		goto fail;
	bio_post_read_ctx_pool =
		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
					 bio_post_read_ctx_cache);
	if (!bio_post_read_ctx_pool)
		goto fail_free_cache;
	return 0;

fail_free_cache:
	kmem_cache_destroy(bio_post_read_ctx_cache);
fail:
	return -ENOMEM;
}

void __exit f2fs_destroy_post_read_processing(void)
{
	mempool_destroy(bio_post_read_ctx_pool);
	kmem_cache_destroy(bio_post_read_ctx_cache);
}