data.c 62.2 KB
Newer Older
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
Chao Yu's avatar
Chao Yu committed
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
22
#include <linux/cleancache.h>
23
#include <linux/sched/signal.h>
24 25 26 27

#include "f2fs.h"
#include "node.h"
#include "segment.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
28
#include "trace.h"
29
#include <trace/events/f2fs.h>
30

31 32 33 34 35
#define NUM_PREALLOC_POST_READ_CTXS	128

static struct kmem_cache *bio_post_read_ctx_cache;
static mempool_t *bio_post_read_ctx_pool;

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
static bool __is_cp_guaranteed(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct f2fs_sb_info *sbi;

	if (!mapping)
		return false;

	inode = mapping->host;
	sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_META_INO(sbi) ||
			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
			S_ISDIR(inode->i_mode) ||
51 52
			(S_ISREG(inode->i_mode) &&
			is_inode_flag_set(inode, FI_ATOMIC_FILE)) ||
53 54 55 56 57
			is_cold_data(page))
		return true;
	return false;
}

58 59 60 61 62 63 64 65 66 67 68 69 70 71
/* postprocessing steps for read bios */
enum bio_post_read_step {
	STEP_INITIAL = 0,
	STEP_DECRYPT,
};

struct bio_post_read_ctx {
	struct bio *bio;
	struct work_struct work;
	unsigned int cur_step;
	unsigned int enabled_steps;
};

static void __read_end_io(struct bio *bio)
72
{
73 74
	struct page *page;
	struct bio_vec *bv;
75
	int i;
76

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
	bio_for_each_segment_all(bv, bio, i) {
		page = bv->bv_page;

		/* PG_error was set if any post_read step failed */
		if (bio->bi_status || PageError(page)) {
			ClearPageUptodate(page);
			SetPageError(page);
		} else {
			SetPageUptodate(page);
		}
		unlock_page(page);
	}
	if (bio->bi_private)
		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
	bio_put(bio);
}

static void bio_post_read_processing(struct bio_post_read_ctx *ctx);

static void decrypt_work(struct work_struct *work)
{
	struct bio_post_read_ctx *ctx =
		container_of(work, struct bio_post_read_ctx, work);

	fscrypt_decrypt_bio(ctx->bio);

	bio_post_read_processing(ctx);
}

static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
{
	switch (++ctx->cur_step) {
	case STEP_DECRYPT:
		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
			INIT_WORK(&ctx->work, decrypt_work);
			fscrypt_enqueue_decrypt_work(&ctx->work);
			return;
		}
		ctx->cur_step++;
		/* fall-through */
	default:
		__read_end_io(ctx->bio);
	}
}

static bool f2fs_bio_post_read_required(struct bio *bio)
{
	return bio->bi_private && !bio->bi_status;
}

static void f2fs_read_end_io(struct bio *bio)
{
Chao Yu's avatar
Chao Yu committed
129
#ifdef CONFIG_F2FS_FAULT_INJECTION
130
	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
131
		f2fs_show_injection_info(FAULT_IO);
132
		bio->bi_status = BLK_STS_IOERR;
133
	}
Chao Yu's avatar
Chao Yu committed
134 135
#endif

136 137
	if (f2fs_bio_post_read_required(bio)) {
		struct bio_post_read_ctx *ctx = bio->bi_private;
138

139 140 141
		ctx->cur_step = STEP_INITIAL;
		bio_post_read_processing(ctx);
		return;
142
	}
143 144

	__read_end_io(bio);
145 146
}

147
static void f2fs_write_end_io(struct bio *bio)
148
{
149
	struct f2fs_sb_info *sbi = bio->bi_private;
150 151
	struct bio_vec *bvec;
	int i;
152

153
	bio_for_each_segment_all(bvec, bio, i) {
154
		struct page *page = bvec->bv_page;
155
		enum count_type type = WB_DATA_TYPE(page);
156

157 158 159 160 161 162
		if (IS_DUMMY_WRITTEN_PAGE(page)) {
			set_page_private(page, (unsigned long)NULL);
			ClearPagePrivate(page);
			unlock_page(page);
			mempool_free(page, sbi->write_io_dummy);

163
			if (unlikely(bio->bi_status))
164 165 166 167
				f2fs_stop_checkpoint(sbi, true);
			continue;
		}

168
		fscrypt_pullback_bio_page(&page, true);
169

170
		if (unlikely(bio->bi_status)) {
171
			mapping_set_error(page->mapping, -EIO);
172 173
			if (type == F2FS_WB_CP_DATA)
				f2fs_stop_checkpoint(sbi, true);
174
		}
175 176 177 178

		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
					page->index != nid_of_node(page));

179 180
		dec_page_count(sbi, type);
		clear_cold_data(page);
181
		end_page_writeback(page);
182
	}
183
	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
184
				wq_has_sleeper(&sbi->cp_wait))
185 186 187 188 189
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
/*
 * Return true, if pre_bio's bdev is same as its target device.
 */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	int i;

	for (i = 0; i < sbi->s_ndevs; i++) {
		if (FDEV(i).start_blk <= blk_addr &&
					FDEV(i).end_blk >= blk_addr) {
			blk_addr -= FDEV(i).start_blk;
			bdev = FDEV(i).bdev;
			break;
		}
	}
	if (bio) {
208
		bio_set_dev(bio, bdev);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
	}
	return bdev;
}

int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
	int i;

	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
			return i;
	return 0;
}

static bool __same_bdev(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
227 228
	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
229 230
}

231 232 233 234
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
235
				struct writeback_control *wbc,
236 237
				int npages, bool is_read,
				enum page_type type, enum temp_type temp)
238 239 240
{
	struct bio *bio;

241
	bio = f2fs_bio_alloc(sbi, npages, true);
242

Jaegeuk Kim's avatar
Jaegeuk Kim committed
243
	f2fs_target_device(sbi, blk_addr, bio);
244 245 246 247 248 249 250 251
	if (is_read) {
		bio->bi_end_io = f2fs_read_end_io;
		bio->bi_private = NULL;
	} else {
		bio->bi_end_io = f2fs_write_end_io;
		bio->bi_private = sbi;
		bio->bi_write_hint = io_type_to_rw_hint(sbi, type, temp);
	}
252 253
	if (wbc)
		wbc_init_bio(wbc, bio);
254 255 256 257

	return bio;
}

258 259
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
260
{
261
	if (!is_read_io(bio_op(bio))) {
262 263 264 265 266
		unsigned int start;

		if (type != DATA && type != NODE)
			goto submit_io;

267
		if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
268 269
			blk_finish_plug(current->plug);

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
		start %= F2FS_IO_SIZE(sbi);

		if (start == 0)
			goto submit_io;

		/* fill dummy pages */
		for (; start < F2FS_IO_SIZE(sbi); start++) {
			struct page *page =
				mempool_alloc(sbi->write_io_dummy,
					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
			f2fs_bug_on(sbi, !page);

			SetPagePrivate(page);
			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
			lock_page(page);
			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
				f2fs_bug_on(sbi, 1);
		}
		/*
		 * In the NODE case, we lose next block address chain. So, we
		 * need to do checkpoint in f2fs_sync_file.
		 */
		if (type == NODE)
			set_sbi_flag(sbi, SBI_NEED_CP);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
295
	}
296
submit_io:
Jaegeuk Kim's avatar
Jaegeuk Kim committed
297 298 299 300
	if (is_read_io(bio_op(bio)))
		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
	else
		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
301
	submit_bio(bio);
302 303
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
304
static void __submit_merged_bio(struct f2fs_bio_info *io)
305
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
306
	struct f2fs_io_info *fio = &io->fio;
307 308 309 310

	if (!io->bio)
		return;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
311 312
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

Mike Christie's avatar
Mike Christie committed
313
	if (is_read_io(fio->op))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
314
		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
315
	else
Jaegeuk Kim's avatar
Jaegeuk Kim committed
316
		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
Mike Christie's avatar
Mike Christie committed
317

318
	__submit_bio(io->sbi, io->bio, fio->type);
319 320 321
	io->bio = NULL;
}

322 323
static bool __has_merged_page(struct f2fs_bio_info *io,
				struct inode *inode, nid_t ino, pgoff_t idx)
Chao Yu's avatar
Chao Yu committed
324 325 326 327 328
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

329
	if (!io->bio)
Chao Yu's avatar
Chao Yu committed
330
		return false;
331

332
	if (!inode && !ino)
333
		return true;
Chao Yu's avatar
Chao Yu committed
334 335 336

	bio_for_each_segment_all(bvec, io->bio, i) {

337
		if (bvec->bv_page->mapping)
Chao Yu's avatar
Chao Yu committed
338
			target = bvec->bv_page;
339 340
		else
			target = fscrypt_control_page(bvec->bv_page);
Chao Yu's avatar
Chao Yu committed
341

342 343 344
		if (idx != target->index)
			continue;

345 346 347
		if (inode && inode == target->mapping->host)
			return true;
		if (ino && ino == ino_of_node(target))
Chao Yu's avatar
Chao Yu committed
348 349 350 351 352 353
			return true;
	}

	return false;
}

354
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
355
				nid_t ino, pgoff_t idx, enum page_type type)
356 357
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
358 359 360
	enum temp_type temp;
	struct f2fs_bio_info *io;
	bool ret = false;
361

Jaegeuk Kim's avatar
Jaegeuk Kim committed
362 363 364 365 366 367
	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
		io = sbi->write_io[btype] + temp;

		down_read(&io->io_rwsem);
		ret = __has_merged_page(io, inode, ino, idx);
		up_read(&io->io_rwsem);
368

Jaegeuk Kim's avatar
Jaegeuk Kim committed
369 370 371 372
		/* TODO: use HOT temp only for meta pages now. */
		if (ret || btype == META)
			break;
	}
373 374 375
	return ret;
}

376
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
377
				enum page_type type, enum temp_type temp)
378 379
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
380
	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
381

382
	down_write(&io->io_rwsem);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
383 384 385 386

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
Mike Christie's avatar
Mike Christie committed
387
		io->fio.op = REQ_OP_WRITE;
388
		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
389
		if (!test_opt(sbi, NOBARRIER))
390
			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
391 392
	}
	__submit_merged_bio(io);
393
	up_write(&io->io_rwsem);
394 395
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
396 397 398
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, nid_t ino, pgoff_t idx,
				enum page_type type, bool force)
399
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
400 401 402 403 404 405 406 407 408 409 410 411 412
	enum temp_type temp;

	if (!force && !has_merged_page(sbi, inode, ino, idx, type))
		return;

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {

		__f2fs_submit_merged_write(sbi, type, temp);

		/* TODO: use HOT temp only for meta pages now. */
		if (type >= META)
			break;
	}
413 414
}

415
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
416
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
417
	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
418 419
}

420
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
421
				struct inode *inode, nid_t ino, pgoff_t idx,
422
				enum page_type type)
423
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
424
	__submit_merged_write_cond(sbi, inode, ino, idx, type, false);
425 426
}

427
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
428
{
429 430 431
	f2fs_submit_merged_write(sbi, DATA);
	f2fs_submit_merged_write(sbi, NODE);
	f2fs_submit_merged_write(sbi, META);
432 433
}

434 435
/*
 * Fill the locked page with data located in the block address.
436
 * A caller needs to unlock the page on failure.
437
 */
438
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
439 440
{
	struct bio *bio;
441 442
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
443

444
	verify_block_addr(fio, fio->new_blkaddr);
445
	trace_f2fs_submit_page_bio(page, fio);
446
	f2fs_trace_ios(fio, 0);
447 448

	/* Allocate a new bio */
449
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
450
				1, is_read_io(fio->op), fio->type, fio->temp);
451

452
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
453 454 455
		bio_put(bio);
		return -EFAULT;
	}
Mike Christie's avatar
Mike Christie committed
456
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
457

458
	__submit_bio(fio->sbi, bio, fio->type);
459 460 461

	if (!is_read_io(fio->op))
		inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
462 463 464
	return 0;
}

465
int f2fs_submit_page_write(struct f2fs_io_info *fio)
466
{
467
	struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
468
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
469
	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
470
	struct page *bio_page;
471
	int err = 0;
472

473
	f2fs_bug_on(sbi, is_read_io(fio->op));
474

475 476 477 478 479 480 481 482 483 484 485 486 487
	down_write(&io->io_rwsem);
next:
	if (fio->in_list) {
		spin_lock(&io->io_lock);
		if (list_empty(&io->io_list)) {
			spin_unlock(&io->io_lock);
			goto out_fail;
		}
		fio = list_first_entry(&io->io_list,
						struct f2fs_io_info, list);
		list_del(&fio->list);
		spin_unlock(&io->io_lock);
	}
488

489
	if (is_valid_blkaddr(fio->old_blkaddr))
490 491
		verify_block_addr(fio, fio->old_blkaddr);
	verify_block_addr(fio, fio->new_blkaddr);
492

493 494
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

495 496
	/* set submitted = true as a return value */
	fio->submitted = true;
497

498
	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
499

500
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
Jaegeuk Kim's avatar
Jaegeuk Kim committed
501 502
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
503
		__submit_merged_bio(io);
504 505
alloc_new:
	if (io->bio == NULL) {
506 507 508
		if ((fio->type == DATA || fio->type == NODE) &&
				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
			err = -EAGAIN;
509
			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
510 511
			goto out_fail;
		}
512
		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
513 514
						BIO_MAX_PAGES, false,
						fio->type, fio->temp);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
515
		io->fio = *fio;
516 517
	}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
518
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
519
		__submit_merged_bio(io);
520 521 522
		goto alloc_new;
	}

523 524 525
	if (fio->io_wbc)
		wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);

526
	io->last_block_in_bio = fio->new_blkaddr;
527
	f2fs_trace_ios(fio, 0);
528 529 530 531 532

	trace_f2fs_submit_page_write(fio->page, fio);

	if (fio->in_list)
		goto next;
533
out_fail:
534
	up_write(&io->io_rwsem);
535
	return err;
536 537
}

538 539 540 541 542
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
							 unsigned nr_pages)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct bio *bio;
543 544
	struct bio_post_read_ctx *ctx;
	unsigned int post_read_steps = 0;
545

546
	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
547
	if (!bio)
548 549 550 551 552
		return ERR_PTR(-ENOMEM);
	f2fs_target_device(sbi, blkaddr, bio);
	bio->bi_end_io = f2fs_read_end_io;
	bio_set_op_attrs(bio, REQ_OP_READ, 0);

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	if (f2fs_encrypted_file(inode))
		post_read_steps |= 1 << STEP_DECRYPT;
	if (post_read_steps) {
		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
		if (!ctx) {
			bio_put(bio);
			return ERR_PTR(-ENOMEM);
		}
		ctx->bio = bio;
		ctx->enabled_steps = post_read_steps;
		bio->bi_private = ctx;

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_block_writeback(sbi, blkaddr);
	}

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
	return bio;
}

/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
							block_t blkaddr)
{
	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}
	__submit_bio(F2FS_I_SB(inode), bio, DATA);
	return 0;
}

589 590 591 592
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;
593 594 595 596
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
597 598 599

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
600
	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
601 602
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
603
/*
604 605 606 607 608
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
609
void set_data_blkaddr(struct dnode_of_data *dn)
610
{
611 612 613
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
614
		dn->node_changed = true;
615 616
}

617 618 619 620 621 622 623
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

624 625
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
626
{
627
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
628
	int err;
629

630 631 632
	if (!count)
		return 0;

633
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
634
		return -EPERM;
635 636
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
637

638 639 640 641 642 643
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
644 645
		block_t blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
646 647 648 649 650 651 652 653 654
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
655 656 657
	return 0;
}

658 659 660 661 662 663 664 665 666 667 668
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

669 670 671 672 673 674 675 676
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
677

678 679
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
680
	if (err || need_put)
681 682 683 684
		f2fs_put_dnode(dn);
	return err;
}

685
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
686
{
687
	struct extent_info ei  = {0,0,0};
688
	struct inode *inode = dn->inode;
689

690 691 692
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
693
	}
694

695
	return f2fs_reserve_block(dn, index);
696 697
}

698
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
Mike Christie's avatar
Mike Christie committed
699
						int op_flags, bool for_write)
700 701 702 703
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
704
	struct extent_info ei = {0,0,0};
705
	int err;
706

707
	page = f2fs_grab_cache_page(mapping, index, for_write);
708 709 710
	if (!page)
		return ERR_PTR(-ENOMEM);

Chao Yu's avatar
Chao Yu committed
711 712 713 714 715
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

716
	set_new_dnode(&dn, inode, NULL, NULL, 0);
717
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
718 719
	if (err)
		goto put_err;
720 721
	f2fs_put_dnode(&dn);

722
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
723 724
		err = -ENOENT;
		goto put_err;
725
	}
Chao Yu's avatar
Chao Yu committed
726
got_it:
727 728
	if (PageUptodate(page)) {
		unlock_page(page);
729
		return page;
730
	}
731

Jaegeuk Kim's avatar
Jaegeuk Kim committed
732 733 734 735 736 737 738
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
739
		zero_user_segment(page, 0, PAGE_SIZE);
740 741
		if (!PageUptodate(page))
			SetPageUptodate(page);
742
		unlock_page(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
743 744
		return page;
	}
745

746
	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
747
	if (err)
748
		goto put_err;
749
	return page;
750 751 752 753

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
754 755 756 757 758 759 760 761 762 763 764 765
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

766
	page = get_read_data_page(inode, index, 0, false);
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
786 787
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
788 789 790 791
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
792
	page = get_read_data_page(inode, index, 0, for_write);
793 794
	if (IS_ERR(page))
		return page;
795

796
	/* wait for read completion */
797
	lock_page(page);
798
	if (unlikely(page->mapping != mapping)) {
799 800
		f2fs_put_page(page, 1);
		goto repeat;
801
	}
802 803 804 805
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
806 807 808
	return page;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
809
/*
810 811
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
812
 *
Chao Yu's avatar
Chao Yu committed
813 814
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
815 816
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
817
 */
818
struct page *get_new_data_page(struct inode *inode,
819
		struct page *ipage, pgoff_t index, bool new_i_size)
820 821 822 823 824
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
825

826
	page = f2fs_grab_cache_page(mapping, index, true);
827 828 829 830 831 832
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
833
		return ERR_PTR(-ENOMEM);
834
	}
835

836
	set_new_dnode(&dn, inode, ipage, NULL, 0);
837
	err = f2fs_reserve_block(&dn, index);
838 839
	if (err) {
		f2fs_put_page(page, 1);
840
		return ERR_PTR(err);
841
	}
842 843
	if (!ipage)
		f2fs_put_dnode(&dn);
844 845

	if (PageUptodate(page))
846
		goto got_it;
847 848

	if (dn.data_blkaddr == NEW_ADDR) {
849
		zero_user_segment(page, 0, PAGE_SIZE);
850 851
		if (!PageUptodate(page))
			SetPageUptodate(page);
852
	} else {
853
		f2fs_put_page(page, 1);
854

855 856 857
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
858
		if (IS_ERR(page))
859
			return page;
860
	}
861
got_it:
862
	if (new_i_size && i_size_read(inode) <
863
				((loff_t)(index + 1) << PAGE_SHIFT))
864
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
865 866 867
	return page;
}

868
static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
869
{
870
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
871 872
	struct f2fs_summary sum;
	struct node_info ni;
873
	pgoff_t fofs;
874
	blkcnt_t count = 1;
875
	int err;
876

877
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
878
		return -EPERM;
879

880 881
	dn->data_blkaddr = datablock_addr(dn->inode,
				dn->node_page, dn->ofs_in_node);
882 883 884
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

885 886
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
887

888
alloc:
889 890 891
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

892
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
893
					&sum, seg_type, NULL, false);
894
	set_data_blkaddr(dn);
895

896
	/* update i_size */
897
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
898
							dn->ofs_in_node;
899
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
900
		f2fs_i_size_write(dn->inode,
901
				((loff_t)(fofs + 1) << PAGE_SHIFT));
902 903 904
	return 0;
}

905
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
906
{
907
	struct inode *inode = file_inode(iocb->ki_filp);
908
	struct f2fs_map_blocks map;
909
	int flag;
910
	int err = 0;
911
	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
912

913
	/* convert inline data for Direct I/O*/
914
	if (direct_io) {
915 916 917 918 919
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
	}

920 921 922
	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

923
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
924 925 926 927 928 929
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

930
	map.m_next_pgofs = NULL;
931
	map.m_next_extent = NULL;
932
	map.m_seg_type = NO_CHECK_TYPE;
933

934
	if (direct_io) {
935
		map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint);
Hyunchul Lee's avatar
Hyunchul Lee committed
936
		flag = f2fs_force_buffered_io(inode, WRITE) ?
937 938 939
					F2FS_GET_BLOCK_PRE_AIO :
					F2FS_GET_BLOCK_PRE_DIO;
		goto map_blocks;
940
	}
941
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
942 943 944
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
945
	}
946
	if (f2fs_has_inline_data(inode))
947
		return err;
948 949 950 951 952 953 954 955 956

	flag = F2FS_GET_BLOCK_PRE_AIO;

map_blocks:
	err = f2fs_map_blocks(inode, &map, 1, flag);
	if (map.m_len > 0 && err == -ENOSPC) {
		if (!direct_io)
			set_inode_flag(inode, FI_NO_PREALLOC);
		err = 0;
957
	}
958
	return err;
959 960
}

961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
		if (lock)
			down_read(&sbi->node_change);
		else
			up_read(&sbi->node_change);
	} else {
		if (lock)
			f2fs_lock_op(sbi);
		else
			f2fs_unlock_op(sbi);
	}
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
976
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
977 978
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
Chao Yu's avatar
Chao Yu committed
979 980 981 982 983
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
984
 */
Chao Yu's avatar
Chao Yu committed
985
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
986
						int create, int flag)
987
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
988
	unsigned int maxblocks = map->m_len;
989
	struct dnode_of_data dn;
990
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
991
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
992
	pgoff_t pgofs, end_offset, end;
993
	int err = 0, ofs = 1;
994 995
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
996
	struct extent_info ei = {0,0,0};
997
	block_t blkaddr;
998
	unsigned int start_pgofs;
999

1000 1001 1002
	if (!maxblocks)
		return 0;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1003 1004 1005 1006 1007
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
1008
	end = pgofs + maxblocks;
1009

1010
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1011 1012 1013
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
1014 1015
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + map->m_len;
1016
		goto out;
1017
	}
1018

Chao Yu's avatar
Chao Yu committed
1019
next_dnode:
1020
	if (create)
1021
		__do_map_lock(sbi, flag, true);
1022 1023 1024

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
1025
	err = get_dnode_of_data(&dn, pgofs, mode);
1026
	if (err) {
1027 1028
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
1029
		if (err == -ENOENT) {
1030
			err = 0;
1031 1032 1033
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
1034 1035 1036
			if (map->m_next_extent)
				*map->m_next_extent =
					get_next_page_offset(&dn, pgofs);
1037
		}
1038
		goto unlock_out;
1039
	}
Chao Yu's avatar
Chao Yu committed
1040

1041
	start_pgofs = pgofs;
1042
	prealloc = 0;
1043
	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1044
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Chao Yu's avatar
Chao Yu committed
1045 1046

next_block:
1047
	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
Chao Yu's avatar
Chao Yu committed
1048

1049
	if (!is_valid_blkaddr(blkaddr)) {
Chao Yu's avatar
Chao Yu committed
1050
		if (create) {
1051 1052
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
Chao Yu's avatar
Chao Yu committed
1053
				goto sync_out;
1054
			}
1055
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1056 1057 1058 1059
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
1060
			} else {
1061 1062
				err = __allocate_data_block(&dn,
							map->m_seg_type);
1063
				if (!err)
1064
					set_inode_flag(inode, FI_APPEND_WRITE);
1065
			}
Chao Yu's avatar
Chao Yu committed
1066
			if (err)
Chao Yu's avatar
Chao Yu committed
1067
				goto sync_out;
1068
			map->m_flags |= F2FS_MAP_NEW;
Chao Yu's avatar
Chao Yu committed
1069
			blkaddr = dn.data_blkaddr;
Chao Yu's avatar
Chao Yu committed
1070
		} else {
1071 1072 1073 1074
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
1075 1076
			if (flag == F2FS_GET_BLOCK_PRECACHE)
				goto sync_out;
1077 1078 1079 1080
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
Chao Yu's avatar
Chao Yu committed
1081
				goto sync_out;
1082
			}
1083 1084 1085 1086
			if (flag != F2FS_GET_BLOCK_FIEMAP) {
				/* for defragment case */
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
Chao Yu's avatar
Chao Yu committed
1087
				goto sync_out;
1088
			}
1089 1090
		}
	}
1091

1092 1093 1094
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

Chao Yu's avatar
Chao Yu committed
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
1105
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1106
			flag == F2FS_GET_BLOCK_PRE_DIO) {
Chao Yu's avatar
Chao Yu committed
1107 1108 1109 1110 1111
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
1112

1113
skip:
1114 1115 1116
	dn.ofs_in_node++;
	pgofs++;

1117 1118 1119
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
1120

1121 1122 1123 1124
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
1125

1126 1127 1128 1129
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
1130
		}
1131 1132 1133 1134 1135 1136 1137 1138
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
	}

1149 1150 1151
	f2fs_put_dnode(&dn);

	if (create) {
1152
		__do_map_lock(sbi, flag, false);
1153
		f2fs_balance_fs(sbi, dn.node_changed);
1154
	}
1155
	goto next_dnode;
1156

1157
sync_out:
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + 1;
	}
1169
	f2fs_put_dnode(&dn);
1170
unlock_out:
1171
	if (create) {
1172
		__do_map_lock(sbi, flag, false);
1173
		f2fs_balance_fs(sbi, dn.node_changed);
1174
	}
1175
out:
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1176
	trace_f2fs_map_blocks(inode, map, err);
1177
	return err;
1178 1179
}

Hyunchul Lee's avatar
Hyunchul Lee committed
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
{
	struct f2fs_map_blocks map;
	block_t last_lblk;
	int err;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
	map.m_next_pgofs = NULL;
	map.m_next_extent = NULL;
	map.m_seg_type = NO_CHECK_TYPE;
	last_lblk = F2FS_BLK_ALIGN(pos + len);

	while (map.m_lblk < last_lblk) {
		map.m_len = last_lblk - map.m_lblk;
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
		if (err || map.m_len == 0)
			return false;
		map.m_lblk += map.m_len;
	}
	return true;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1205
static int __get_data_block(struct inode *inode, sector_t iblock,
1206
			struct buffer_head *bh, int create, int flag,
1207
			pgoff_t *next_pgofs, int seg_type)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1208 1209
{
	struct f2fs_map_blocks map;
1210
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1211 1212 1213

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
1214
	map.m_next_pgofs = next_pgofs;
1215
	map.m_next_extent = NULL;
1216
	map.m_seg_type = seg_type;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1217

1218 1219
	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1220 1221
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1222
		bh->b_size = (u64)map.m_len << inode->i_blkbits;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1223
	}
1224
	return err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1225 1226
}

1227
static int get_data_block(struct inode *inode, sector_t iblock,
1228 1229
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
1230
{
1231
	return __get_data_block(inode, iblock, bh_result, create,
1232 1233
							flag, next_pgofs,
							NO_CHECK_TYPE);
1234 1235 1236
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
1237 1238
			struct buffer_head *bh_result, int create)
{
1239
	return __get_data_block(inode, iblock, bh_result, create,
1240 1241 1242
						F2FS_GET_BLOCK_DEFAULT, NULL,
						rw_hint_to_seg_type(
							inode->i_write_hint));
1243 1244
}

1245
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1246 1247
			struct buffer_head *bh_result, int create)
{
1248
	/* Block number less than F2FS MAX BLOCKS */
1249
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1250 1251
		return -EFBIG;

1252
	return __get_data_block(inode, iblock, bh_result, create,
1253 1254
						F2FS_GET_BLOCK_BMAP, NULL,
						NO_CHECK_TYPE);
1255 1256
}

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

Chao Yu's avatar
Chao Yu committed
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
static int f2fs_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page;
	struct node_info ni;
	__u64 phys = 0, len;
	__u32 flags;
	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
	int err = 0;

	if (f2fs_has_inline_xattr(inode)) {
		int offset;

		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
						inode->i_ino, false);
		if (!page)
			return -ENOMEM;

		get_node_info(sbi, inode->i_ino, &ni);

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		offset = offsetof(struct f2fs_inode, i_addr) +
					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1291
					get_inline_xattr_addrs(inode));
Chao Yu's avatar
Chao Yu committed
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328

		phys += offset;
		len = inline_xattr_size(inode);

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;

		if (!xnid)
			flags |= FIEMAP_EXTENT_LAST;

		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
		if (err || err == 1)
			return err;
	}

	if (xnid) {
		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
		if (!page)
			return -ENOMEM;

		get_node_info(sbi, xnid, &ni);

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		len = inode->i_sb->s_blocksize;

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_LAST;
	}

	if (phys)
		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);

	return (err < 0 ? err : 0);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1329 1330 1331
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
1332 1333
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
1334
	pgoff_t next_pgofs;
1335 1336 1337 1338
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

1339 1340 1341 1342 1343 1344
	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
		ret = f2fs_precache_extents(inode);
		if (ret)
			return ret;
	}

Chao Yu's avatar
Chao Yu committed
1345
	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
1346 1347 1348
	if (ret)
		return ret;

1349 1350
	inode_lock(inode);

Chao Yu's avatar
Chao Yu committed
1351 1352 1353 1354 1355
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		ret = f2fs_xattr_fiemap(inode, fieinfo);
		goto out;
	}

1356 1357 1358
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
1359
			goto out;
1360 1361
	}

1362 1363 1364 1365 1366
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
1367

1368 1369 1370 1371
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

1372
	ret = get_data_block(inode, start_blk, &map_bh, 0,
1373
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1374 1375 1376 1377 1378
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
1379
		start_blk = next_pgofs;
1380 1381 1382

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
1383
			goto prep_next;
1384

1385 1386
		flags |= FIEMAP_EXTENT_LAST;
	}
1387

1388 1389 1390 1391
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

1392 1393
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
1394
	}
1395

1396 1397
	if (start_blk > last_blk || ret)
		goto out;
1398

1399 1400 1401 1402 1403 1404
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
1405

1406
	start_blk += logical_to_blk(inode, size);
1407

1408
prep_next:
1409 1410 1411 1412 1413 1414 1415 1416 1417
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

Al Viro's avatar
Al Viro committed
1418
	inode_unlock(inode);
1419
	return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1420 1421
}

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1445
	map.m_next_pgofs = NULL;
1446
	map.m_next_extent = NULL;
1447
	map.m_seg_type = NO_CHECK_TYPE;
1448

LiFan's avatar
LiFan committed
1449
	for (; nr_pages; nr_pages--) {
1450
		if (pages) {
1451
			page = list_last_entry(pages, struct page, lru);
1452 1453

			prefetchw(&page->flags);
1454 1455
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1456 1457
						  page->index,
						  readahead_gfp_mask(mapping)))
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1486
			if (f2fs_map_blocks(inode, &map, 0,
1487
						F2FS_GET_BLOCK_DEFAULT))
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1500
			zero_user_segment(page, 0, PAGE_SIZE);
1501 1502
			if (!PageUptodate(page))
				SetPageUptodate(page);
1503 1504 1505 1506 1507 1508 1509 1510
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1511 1512
		if (bio && (last_block_in_bio != block_nr - 1 ||
			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
1513
submit_and_realloc:
1514
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1515 1516 1517
			bio = NULL;
		}
		if (bio == NULL) {
1518
			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
1519 1520
			if (IS_ERR(bio)) {
				bio = NULL;
1521
				goto set_error_page;
1522
			}
1523 1524 1525 1526 1527 1528 1529 1530 1531
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1532
		zero_user_segment(page, 0, PAGE_SIZE);
1533 1534 1535 1536
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1537
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1538 1539 1540 1541 1542
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1543
			put_page(page);
1544 1545 1546
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1547
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
1548 1549 1550
	return 0;
}

1551 1552
static int f2fs_read_data_page(struct file *file, struct page *page)
{
1553
	struct inode *inode = page->mapping->host;
1554
	int ret = -EAGAIN;
1555

1556 1557
	trace_f2fs_readpage(page, DATA);

arter97's avatar
arter97 committed
1558
	/* If the file has inline data, try to read it directly */
1559 1560
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1561
	if (ret == -EAGAIN)
1562
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
1563
	return ret;
1564 1565 1566 1567 1568 1569
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
1570
	struct inode *inode = mapping->host;
1571
	struct page *page = list_last_entry(pages, struct page, lru);
1572 1573

	trace_f2fs_readpages(inode, page, nr_pages);
1574 1575 1576 1577 1578

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

1579
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1580 1581
}

1582 1583 1584 1585 1586
static int encrypt_one_page(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;
	gfp_t gfp_flags = GFP_NOFS;

1587
	if (!f2fs_encrypted_file(inode))
1588 1589
		return 0;

1590
	/* wait for GCed page writeback via META_MAPPING */
1591
	f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
1592 1593 1594 1595 1596 1597 1598 1599 1600

retry_encrypt:
	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
			PAGE_SIZE, 0, fio->page->index, gfp_flags);
	if (!IS_ERR(fio->encrypted_page))
		return 0;

	/* flush pending IOs and wait for a while in the ENOMEM case */
	if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1601
		f2fs_flush_merged_writes(fio->sbi);
1602 1603 1604 1605 1606 1607 1608
		congestion_wait(BLK_RW_ASYNC, HZ/50);
		gfp_flags |= __GFP_NOFAIL;
		goto retry_encrypt;
	}
	return PTR_ERR(fio->encrypted_page);
}

Chao Yu's avatar
Chao Yu committed
1609 1610
static inline bool check_inplace_update_policy(struct inode *inode,
				struct f2fs_io_info *fio)
1611
{
Chao Yu's avatar
Chao Yu committed
1612 1613
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	unsigned int policy = SM_I(sbi)->ipu_policy;
1614

Chao Yu's avatar
Chao Yu committed
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
	if (policy & (0x1 << F2FS_IPU_FORCE))
		return true;
	if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
		return true;
	if (policy & (0x1 << F2FS_IPU_UTIL) &&
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;
	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;

	/*
	 * IPU for rewrite async pages
	 */
	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
			fio && fio->op == REQ_OP_WRITE &&
			!(fio->op_flags & REQ_SYNC) &&
			!f2fs_encrypted_inode(inode))
		return true;

	/* this is only set during fdatasync */
	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
			is_inode_flag_set(inode, FI_NEED_IPU))
		return true;

	return false;
}

bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
{
1645 1646
	if (f2fs_is_pinned_file(inode))
		return true;
Chao Yu's avatar
Chao Yu committed
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673

	/* if this is cold file, we should overwrite to avoid fragmentation */
	if (file_is_cold(inode))
		return true;

	return check_inplace_update_policy(inode, fio);
}

bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	if (test_opt(sbi, LFS))
		return true;
	if (S_ISDIR(inode->i_mode))
		return true;
	if (f2fs_is_atomic_file(inode))
		return true;
	if (fio) {
		if (is_cold_data(fio->page))
			return true;
		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
			return true;
	}
	return false;
}

1674 1675 1676 1677
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;

Chao Yu's avatar
Chao Yu committed
1678
	if (should_update_outplace(inode, fio))
1679 1680
		return false;

Chao Yu's avatar
Chao Yu committed
1681
	return should_update_inplace(inode, fio);
1682 1683
}

1684
int do_write_data_page(struct f2fs_io_info *fio)
1685
{
1686
	struct page *page = fio->page;
1687 1688
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
1689 1690
	struct extent_info ei = {0,0,0};
	bool ipu_force = false;
1691 1692 1693
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1694 1695 1696
	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1697

1698
		if (is_valid_blkaddr(fio->old_blkaddr)) {
1699
			ipu_force = true;
1700
			fio->need_lock = LOCK_DONE;
1701 1702 1703
			goto got_it;
		}
	}
1704

1705 1706 1707
	/* Deadlock due to between page->lock and f2fs_lock_op */
	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
		return -EAGAIN;
1708

1709
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1710
	if (err)
1711
		goto out;
1712

1713
	fio->old_blkaddr = dn.data_blkaddr;
1714 1715

	/* This page is already truncated */
1716
	if (fio->old_blkaddr == NULL_ADDR) {
1717
		ClearPageUptodate(page);
1718
		goto out_writepage;
1719
	}
1720
got_it:
1721 1722 1723 1724
	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1725 1726
	if (ipu_force || (is_valid_blkaddr(fio->old_blkaddr) &&
					need_inplace_update(fio))) {
1727 1728 1729 1730 1731
		err = encrypt_one_page(fio);
		if (err)
			goto out_writepage;

		set_page_writeback(page);
1732
		ClearPageError(page);
1733
		f2fs_put_dnode(&dn);
1734
		if (fio->need_lock == LOCK_REQ)
1735
			f2fs_unlock_op(fio->sbi);
1736
		err = rewrite_data_page(fio);
1737
		trace_f2fs_do_write_data_page(fio->page, IPU);
1738
		set_inode_flag(inode, FI_UPDATE_WRITE);
1739
		return err;
1740
	}
1741

1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
	if (fio->need_lock == LOCK_RETRY) {
		if (!f2fs_trylock_op(fio->sbi)) {
			err = -EAGAIN;
			goto out_writepage;
		}
		fio->need_lock = LOCK_REQ;
	}

	err = encrypt_one_page(fio);
	if (err)
		goto out_writepage;

	set_page_writeback(page);
1755
	ClearPageError(page);
1756

1757 1758 1759 1760 1761 1762
	/* LFS mode write path */
	write_data_page(&dn, fio);
	trace_f2fs_do_write_data_page(page, OPU);
	set_inode_flag(inode, FI_APPEND_WRITE);
	if (page->index == 0)
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1763 1764
out_writepage:
	f2fs_put_dnode(&dn);
1765
out:
1766
	if (fio->need_lock == LOCK_REQ)
1767
		f2fs_unlock_op(fio->sbi);
1768 1769 1770
	return err;
}

1771
static int __write_data_page(struct page *page, bool *submitted,
Chao Yu's avatar
Chao Yu committed
1772 1773
				struct writeback_control *wbc,
				enum iostat_type io_type)
1774 1775
{
	struct inode *inode = page->mapping->host;
1776
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1777 1778
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1779
							>> PAGE_SHIFT;
1780
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
1781
	unsigned offset = 0;
1782
	bool need_balance_fs = false;
1783
	int err = 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1784
	struct f2fs_io_info fio = {
1785
		.sbi = sbi,
Chao Yu's avatar
Chao Yu committed
1786
		.ino = inode->i_ino,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1787
		.type = DATA,
Mike Christie's avatar
Mike Christie committed
1788
		.op = REQ_OP_WRITE,
1789
		.op_flags = wbc_to_write_flags(wbc),
1790
		.old_blkaddr = NULL_ADDR,
1791
		.page = page,
1792
		.encrypted_page = NULL,
1793
		.submitted = false,
1794
		.need_lock = LOCK_RETRY,
Chao Yu's avatar
Chao Yu committed
1795
		.io_type = io_type,
1796
		.io_wbc = wbc,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1797
	};
1798

1799 1800
	trace_f2fs_writepage(page, DATA);

1801 1802 1803
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		mapping_set_error(page->mapping, -EIO);
1804 1805 1806 1807 1808 1809
		/*
		 * don't drop any dirty dentry pages for keeping lastest
		 * directory structure.
		 */
		if (S_ISDIR(inode->i_mode))
			goto redirty_out;
1810 1811 1812
		goto out;
	}

1813 1814 1815
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;

1816
	if (page->index < end_index)
1817
		goto write;
1818 1819 1820 1821 1822

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1823
	offset = i_size & (PAGE_SIZE - 1);
1824
	if ((page->index >= end_index + 1) || !offset)
1825
		goto out;
1826

1827
	zero_user_segment(page, offset, PAGE_SIZE);
1828
write:
1829 1830
	if (f2fs_is_drop_cache(inode))
		goto out;
1831 1832 1833 1834
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1835
		goto redirty_out;
1836

1837
	/* Dentry blocks are controlled by checkpoint */
1838
	if (S_ISDIR(inode->i_mode)) {
1839
		fio.need_lock = LOCK_DONE;
1840
		err = do_write_data_page(&fio);
1841 1842
		goto done;
	}
1843

1844
	if (!wbc->for_reclaim)
1845
		need_balance_fs = true;
1846
	else if (has_not_enough_free_secs(sbi, 0, 0))
1847
		goto redirty_out;
1848 1849
	else
		set_inode_flag(inode, FI_HOT_DATA);
1850

1851
	err = -EAGAIN;
1852
	if (f2fs_has_inline_data(inode)) {
1853
		err = f2fs_write_inline_data(inode, page);
1854 1855 1856
		if (!err)
			goto out;
	}
1857

1858
	if (err == -EAGAIN) {
1859
		err = do_write_data_page(&fio);
1860 1861 1862 1863 1864
		if (err == -EAGAIN) {
			fio.need_lock = LOCK_REQ;
			err = do_write_data_page(&fio);
		}
	}
1865

1866 1867 1868 1869 1870 1871 1872 1873
	if (err) {
		file_set_keep_isize(inode);
	} else {
		down_write(&F2FS_I(inode)->i_sem);
		if (F2FS_I(inode)->last_disk_size < psize)
			F2FS_I(inode)->last_disk_size = psize;
		up_write(&F2FS_I(inode)->i_sem);
	}
1874

1875 1876 1877
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1878

1879
out:
1880
	inode_dec_dirty_pages(inode);
1881 1882
	if (err)
		ClearPageUptodate(page);
1883 1884

	if (wbc->for_reclaim) {
1885
		f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
1886
		clear_inode_flag(inode, FI_HOT_DATA);
1887
		remove_dirty_inode(inode);
1888
		submitted = NULL;
1889 1890
	}

1891
	unlock_page(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1892 1893
	if (!S_ISDIR(inode->i_mode))
		f2fs_balance_fs(sbi, need_balance_fs);
1894

1895
	if (unlikely(f2fs_cp_error(sbi))) {
1896
		f2fs_submit_merged_write(sbi, DATA);
1897 1898 1899 1900 1901
		submitted = NULL;
	}

	if (submitted)
		*submitted = fio.submitted;
1902

1903 1904 1905
	return 0;

redirty_out:
1906
	redirty_page_for_writepage(wbc, page);
1907 1908 1909 1910 1911 1912 1913
	/*
	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
	 * file_write_and_wait_range() will see EIO error, which is critical
	 * to return value of fsync() followed by atomic_write failure to user.
	 */
	if (!err || wbc->for_reclaim)
1914
		return AOP_WRITEPAGE_ACTIVATE;
1915 1916
	unlock_page(page);
	return err;
1917 1918
}

1919 1920 1921
static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
Chao Yu's avatar
Chao Yu committed
1922
	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
1923 1924
}

Chao Yu's avatar
Chao Yu committed
1925 1926 1927 1928 1929 1930
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
Chao Yu's avatar
Chao Yu committed
1931 1932
					struct writeback_control *wbc,
					enum iostat_type io_type)
Chao Yu's avatar
Chao Yu committed
1933 1934 1935 1936 1937 1938 1939 1940 1941
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
1942
	pgoff_t last_idx = ULONG_MAX;
Chao Yu's avatar
Chao Yu committed
1943 1944 1945 1946
	int cycled;
	int range_whole = 0;
	int tag;

1947
	pagevec_init(&pvec);
1948

1949 1950 1951 1952 1953 1954
	if (get_dirty_pages(mapping->host) <=
				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
		set_inode_flag(mapping->host, FI_HOT_DATA);
	else
		clear_inode_flag(mapping->host, FI_HOT_DATA);

Chao Yu's avatar
Chao Yu committed
1955 1956 1957 1958 1959 1960 1961 1962 1963
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1964 1965
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
Chao Yu's avatar
Chao Yu committed
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

1981
		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
1982
				tag);
Chao Yu's avatar
Chao Yu committed
1983 1984 1985 1986 1987
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
1988
			bool submitted = false;
Chao Yu's avatar
Chao Yu committed
1989

1990 1991 1992 1993 1994 1995 1996
			/* give a priority to WB_SYNC threads */
			if (atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) &&
					wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}

Chao Yu's avatar
Chao Yu committed
1997
			done_index = page->index;
1998
retry_write:
Chao Yu's avatar
Chao Yu committed
1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
2014 2015
					f2fs_wait_on_page_writeback(page,
								DATA, true);
Chao Yu's avatar
Chao Yu committed
2016 2017 2018 2019 2020 2021 2022 2023
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

Chao Yu's avatar
Chao Yu committed
2024
			ret = __write_data_page(page, &submitted, wbc, io_type);
Chao Yu's avatar
Chao Yu committed
2025
			if (unlikely(ret)) {
2026 2027 2028 2029 2030 2031 2032 2033
				/*
				 * keep nr_to_write, since vfs uses this to
				 * get # of written pages.
				 */
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
					continue;
2034 2035 2036 2037 2038 2039 2040 2041 2042
				} else if (ret == -EAGAIN) {
					ret = 0;
					if (wbc->sync_mode == WB_SYNC_ALL) {
						cond_resched();
						congestion_wait(BLK_RW_ASYNC,
									HZ/50);
						goto retry_write;
					}
					continue;
2043
				}
2044 2045 2046
				done_index = page->index + 1;
				done = 1;
				break;
2047
			} else if (submitted) {
2048
				last_idx = page->index;
Chao Yu's avatar
Chao Yu committed
2049 2050
			}

2051
			if (--wbc->nr_to_write <= 0 &&
2052
					wbc->sync_mode == WB_SYNC_NONE) {
Chao Yu's avatar
Chao Yu committed
2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

2070
	if (last_idx != ULONG_MAX)
2071 2072
		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
						0, last_idx, DATA);
Chao Yu's avatar
Chao Yu committed
2073

Chao Yu's avatar
Chao Yu committed
2074 2075 2076
	return ret;
}

Chao Yu's avatar
Chao Yu committed
2077 2078 2079
int __f2fs_write_data_pages(struct address_space *mapping,
						struct writeback_control *wbc,
						enum iostat_type io_type)
2080 2081
{
	struct inode *inode = mapping->host;
2082
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2083
	struct blk_plug plug;
2084 2085
	int ret;

P J P's avatar
P J P committed
2086 2087 2088 2089
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

2090 2091 2092 2093
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

2094 2095 2096 2097
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

2098 2099 2100 2101 2102
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

Chao Yu's avatar
Chao Yu committed
2103
	/* skip writing during file defragment */
2104
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
Chao Yu's avatar
Chao Yu committed
2105 2106
		goto skip_write;

Yunlei He's avatar
Yunlei He committed
2107 2108
	trace_f2fs_writepages(mapping->host, wbc, DATA);

2109 2110 2111 2112 2113 2114
	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_inc(&sbi->wb_sync_req);
	else if (atomic_read(&sbi->wb_sync_req))
		goto skip_write;

2115
	blk_start_plug(&plug);
Chao Yu's avatar
Chao Yu committed
2116
	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
2117
	blk_finish_plug(&plug);
2118 2119 2120

	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_dec(&sbi->wb_sync_req);
2121 2122 2123 2124
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2125

2126
	remove_dirty_inode(inode);
2127
	return ret;
2128 2129

skip_write:
2130
	wbc->pages_skipped += get_dirty_pages(inode);
Yunlei He's avatar
Yunlei He committed
2131
	trace_f2fs_writepages(mapping->host, wbc, DATA);
2132
	return 0;
2133 2134
}

Chao Yu's avatar
Chao Yu committed
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
static int f2fs_write_data_pages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;

	return __f2fs_write_data_pages(mapping, wbc,
			F2FS_I(inode)->cp_task == current ?
			FS_CP_DATA_IO : FS_DATA_IO);
}

2145 2146 2147
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
2148
	loff_t i_size = i_size_read(inode);
2149

2150
	if (to > i_size) {
2151
		down_write(&F2FS_I(inode)->i_mmap_sem);
2152 2153
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
2154
		up_write(&F2FS_I(inode)->i_mmap_sem);
2155 2156 2157
	}
}

2158 2159 2160 2161 2162 2163 2164 2165
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
2166
	bool locked = false;
2167
	struct extent_info ei = {0,0,0};
2168 2169
	int err = 0;

2170 2171 2172 2173
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
2174 2175
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
			!is_inode_flag_set(inode, FI_NO_PREALLOC))
2176 2177
		return 0;

2178
	if (f2fs_has_inline_data(inode) ||
2179
			(pos & PAGE_MASK) >= i_size_read(inode)) {
2180
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
2181 2182 2183
		locked = true;
	}
restart:
2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
2194
		if (pos + len <= MAX_INLINE_DATA(inode)) {
2195
			read_inline_data(page, ipage);
2196
			set_inode_flag(inode, FI_DATA_EXIST);
2197 2198
			if (inode->i_nlink)
				set_inline_node(ipage);
2199 2200 2201
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
2214
			if (err || dn.data_blkaddr == NULL_ADDR) {
2215
				f2fs_put_dnode(&dn);
2216 2217
				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
								true);
2218 2219 2220
				locked = true;
				goto restart;
			}
2221 2222
		}
	}
2223

2224 2225 2226
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
2227
out:
2228 2229
	f2fs_put_dnode(&dn);
unlock_out:
2230
	if (locked)
2231
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
2232 2233 2234
	return err;
}

2235 2236 2237 2238 2239
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
2240
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2241
	struct page *page = NULL;
2242
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2243
	bool need_balance = false, drop_atomic = false;
2244
	block_t blkaddr = NULL_ADDR;
2245 2246
	int err = 0;

2247 2248
	trace_f2fs_write_begin(inode, pos, len, flags);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2249 2250 2251
	if (f2fs_is_atomic_file(inode) &&
			!available_free_memory(sbi, INMEM_PAGES)) {
		err = -ENOMEM;
2252
		drop_atomic = true;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2253 2254 2255
		goto fail;
	}

2256 2257 2258 2259 2260 2261 2262 2263 2264 2265
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
2266
repeat:
2267 2268 2269 2270
	/*
	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
	 * wait_for_stable_page. Will wait that below with our IO control.
	 */
2271
	page = f2fs_pagecache_get_page(mapping, index,
2272
				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
2273 2274 2275 2276
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
2277

2278 2279
	*pagep = page;

2280 2281
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
2282
	if (err)
2283
		goto fail;
2284

2285
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
2286
		unlock_page(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2287
		f2fs_balance_fs(sbi, true);
2288 2289 2290 2291 2292 2293 2294 2295
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

2296
	f2fs_wait_on_page_writeback(page, DATA, false);
2297

2298 2299
	/* wait for GCed page writeback via META_MAPPING */
	if (f2fs_post_read_required(inode))
2300
		f2fs_wait_on_block_writeback(sbi, blkaddr);
2301

2302 2303
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
2304

2305 2306 2307 2308 2309
	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
		zero_user_segment(page, len, PAGE_SIZE);
		return 0;
	}

2310
	if (blkaddr == NEW_ADDR) {
2311
		zero_user_segment(page, 0, PAGE_SIZE);
2312
		SetPageUptodate(page);
2313
	} else {
2314 2315
		err = f2fs_submit_page_read(inode, page, blkaddr);
		if (err)
2316
			goto fail;
2317

2318
		lock_page(page);
2319
		if (unlikely(page->mapping != mapping)) {
2320 2321
			f2fs_put_page(page, 1);
			goto repeat;
2322
		}
2323 2324 2325
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
2326
		}
2327 2328
	}
	return 0;
2329

2330
fail:
2331
	f2fs_put_page(page, 1);
2332
	f2fs_write_failed(mapping, pos + len);
2333
	if (drop_atomic)
2334
		drop_inmem_pages_all(sbi, false);
2335
	return err;
2336 2337
}

2338 2339 2340 2341 2342 2343 2344
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

2345 2346
	trace_f2fs_write_end(inode, pos, len, copied);

2347 2348 2349 2350 2351 2352
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
2353
		if (unlikely(copied != len))
2354 2355 2356 2357 2358 2359 2360
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

2361
	set_page_dirty(page);
2362

2363 2364
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
2365
unlock_out:
2366
	f2fs_put_page(page, 1);
2367
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2368 2369 2370
	return copied;
}

2371 2372
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
2373 2374 2375 2376 2377 2378
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

Al Viro's avatar
Al Viro committed
2379 2380 2381
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

2382 2383 2384
	return 0;
}

2385
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2386
{
2387
	struct address_space *mapping = iocb->ki_filp->f_mapping;
2388
	struct inode *inode = mapping->host;
2389
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2390
	size_t count = iov_iter_count(iter);
2391
	loff_t offset = iocb->ki_pos;
2392
	int rw = iov_iter_rw(iter);
2393
	int err;
2394
	enum rw_hint hint = iocb->ki_hint;
2395
	int whint_mode = F2FS_OPTION(sbi).whint_mode;
2396

2397
	err = check_direct_IO(inode, iter, offset);
2398 2399
	if (err)
		return err;
2400

Hyunchul Lee's avatar
Hyunchul Lee committed
2401
	if (f2fs_force_buffered_io(inode, rw))
2402
		return 0;
2403

2404
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2405

2406 2407 2408
	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
		iocb->ki_hint = WRITE_LIFE_NOT_SET;

2409
	if (!down_read_trylock(&F2FS_I(inode)->i_gc_rwsem[rw])) {
Hyunchul Lee's avatar
Hyunchul Lee committed
2410 2411 2412 2413 2414
		if (iocb->ki_flags & IOCB_NOWAIT) {
			iocb->ki_hint = hint;
			err = -EAGAIN;
			goto out;
		}
2415
		down_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
Hyunchul Lee's avatar
Hyunchul Lee committed
2416 2417
	}

2418
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
2419
	up_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
2420 2421

	if (rw == WRITE) {
2422 2423
		if (whint_mode == WHINT_MODE_OFF)
			iocb->ki_hint = hint;
Chao Yu's avatar
Chao Yu committed
2424 2425 2426
		if (err > 0) {
			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
									err);
2427
			set_inode_flag(inode, FI_UPDATE_WRITE);
Chao Yu's avatar
Chao Yu committed
2428
		} else if (err < 0) {
2429
			f2fs_write_failed(mapping, offset + count);
Chao Yu's avatar
Chao Yu committed
2430
		}
2431
	}
2432

Hyunchul Lee's avatar
Hyunchul Lee committed
2433
out:
2434
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2435

2436
	return err;
2437 2438
}

2439 2440
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
2441 2442
{
	struct inode *inode = page->mapping->host;
2443
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2444

2445
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2446
		(offset % PAGE_SIZE || length != PAGE_SIZE))
2447 2448
		return;

2449
	if (PageDirty(page)) {
2450
		if (inode->i_ino == F2FS_META_INO(sbi)) {
2451
			dec_page_count(sbi, F2FS_DIRTY_META);
2452
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2453
			dec_page_count(sbi, F2FS_DIRTY_NODES);
2454
		} else {
2455
			inode_dec_dirty_pages(inode);
2456 2457
			remove_dirty_inode(inode);
		}
2458
	}
Chao Yu's avatar
Chao Yu committed
2459 2460 2461

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
2462
		return drop_inmem_page(inode, page);
Chao Yu's avatar
Chao Yu committed
2463

2464
	set_page_private(page, 0);
2465 2466 2467
	ClearPagePrivate(page);
}

2468
int f2fs_release_page(struct page *page, gfp_t wait)
2469
{
2470 2471 2472 2473
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

Chao Yu's avatar
Chao Yu committed
2474 2475 2476 2477
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

2478
	set_page_private(page, 0);
2479
	ClearPagePrivate(page);
2480
	return 1;
2481 2482 2483 2484 2485 2486 2487
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

2488 2489
	trace_f2fs_set_page_dirty(page, DATA);

2490 2491
	if (!PageUptodate(page))
		SetPageUptodate(page);
2492

Chao Yu's avatar
Chao Yu committed
2493
	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
Chao Yu's avatar
Chao Yu committed
2494 2495 2496 2497 2498 2499 2500 2501 2502
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
2503 2504
	}

2505
	if (!PageDirty(page)) {
2506
		__set_page_dirty_nobuffers(page);
2507
		update_dirty_page(inode, page);
2508 2509 2510 2511 2512
		return 1;
	}
	return 0;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2513 2514
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
2515 2516
	struct inode *inode = mapping->host;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2517 2518 2519 2520 2521 2522 2523
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

2524
	return generic_block_bmap(mapping, block, get_data_block_bmap);
2525 2526
}

2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
2540 2541 2542 2543 2544 2545
	if (atomic_written) {
		if (mode != MIGRATE_SYNC)
			return -EBUSY;
		if (!mutex_trylock(&fi->inmem_lock))
			return -EAGAIN;
	}
2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

2577 2578 2579 2580
	if (mode != MIGRATE_SYNC_NO_COPY)
		migrate_page_copy(newpage, page);
	else
		migrate_page_states(newpage, page);
2581 2582 2583 2584 2585

	return MIGRATEPAGE_SUCCESS;
}
#endif

2586 2587 2588 2589 2590 2591
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
2592
	.write_end	= f2fs_write_end,
2593
	.set_page_dirty	= f2fs_set_data_page_dirty,
2594 2595
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2596
	.direct_IO	= f2fs_direct_IO,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2597
	.bmap		= f2fs_bmap,
2598 2599 2600
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
2601
};
2602

2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
void clear_radix_tree_dirty_tag(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long flags;

	xa_lock_irqsave(&mapping->i_pages, flags);
	radix_tree_tag_clear(&mapping->i_pages, page_index(page),
						PAGECACHE_TAG_DIRTY);
	xa_unlock_irqrestore(&mapping->i_pages, flags);
}

2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636
int __init f2fs_init_post_read_processing(void)
{
	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
	if (!bio_post_read_ctx_cache)
		goto fail;
	bio_post_read_ctx_pool =
		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
					 bio_post_read_ctx_cache);
	if (!bio_post_read_ctx_pool)
		goto fail_free_cache;
	return 0;

fail_free_cache:
	kmem_cache_destroy(bio_post_read_ctx_cache);
fail:
	return -ENOMEM;
}

void __exit f2fs_destroy_post_read_processing(void)
{
	mempool_destroy(bio_post_read_ctx_pool);
	kmem_cache_destroy(bio_post_read_ctx_cache);
}