data.c 41.1 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
22
#include <linux/cleancache.h>
23 24 25 26

#include "f2fs.h"
#include "node.h"
#include "segment.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
27
#include "trace.h"
28
#include <trace/events/f2fs.h>
29

30
static void f2fs_read_end_io(struct bio *bio)
31
{
32 33
	struct bio_vec *bvec;
	int i;
34

35
	if (f2fs_bio_encrypted(bio)) {
36
		if (bio->bi_error) {
37 38 39 40 41 42 43
			f2fs_release_crypto_ctx(bio->bi_private);
		} else {
			f2fs_end_io_crypto_work(bio->bi_private, bio);
			return;
		}
	}

44 45
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
46

47
		if (!bio->bi_error) {
48 49 50 51 52 53 54 55 56 57
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

58
static void f2fs_write_end_io(struct bio *bio)
59
{
60
	struct f2fs_sb_info *sbi = bio->bi_private;
61 62
	struct bio_vec *bvec;
	int i;
63

64
	bio_for_each_segment_all(bvec, bio, i) {
65 66
		struct page *page = bvec->bv_page;

67 68
		f2fs_restore_and_release_control_page(&page);

69
		if (unlikely(bio->bi_error)) {
70
			set_page_dirty(page);
71
			set_bit(AS_EIO, &page->mapping->flags);
72
			f2fs_stop_checkpoint(sbi);
73 74 75
		}
		end_page_writeback(page);
		dec_page_count(sbi, F2FS_WRITEBACK);
76
	}
77 78 79 80 81 82 83 84

	if (!get_pages(sbi, F2FS_WRITEBACK) &&
			!list_empty(&sbi->cp_wait.task_list))
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

85 86 87 88 89 90 91 92
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

93
	bio = f2fs_bio_alloc(npages);
94 95

	bio->bi_bdev = sbi->sb->s_bdev;
Chao Yu's avatar
Chao Yu committed
96
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
97
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
98
	bio->bi_private = is_read ? NULL : sbi;
99 100 101 102

	return bio;
}

103
static void __submit_merged_bio(struct f2fs_bio_info *io)
104
{
105
	struct f2fs_io_info *fio = &io->fio;
106 107 108 109

	if (!io->bio)
		return;

110
	if (is_read_io(fio->rw))
111
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
112
	else
113
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
114

115
	submit_bio(fio->rw, io->bio);
116 117 118
	io->bio = NULL;
}

Chao Yu's avatar
Chao Yu committed
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
bool is_merged_page(struct f2fs_sb_info *sbi, struct page *page,
							enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	struct bio_vec *bvec;
	struct page *target;
	int i;

	down_read(&io->io_rwsem);
	if (!io->bio) {
		up_read(&io->io_rwsem);
		return false;
	}

	bio_for_each_segment_all(bvec, io->bio, i) {

		if (bvec->bv_page->mapping) {
			target = bvec->bv_page;
		} else {
			struct f2fs_crypto_ctx *ctx;

			/* encrypted page */
			ctx = (struct f2fs_crypto_ctx *)page_private(
								bvec->bv_page);
			target = ctx->w.control_page;
		}

		if (page == target) {
			up_read(&io->io_rwsem);
			return true;
		}
	}

	up_read(&io->io_rwsem);
	return false;
}

157
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
158
				enum page_type type, int rw)
159 160 161 162 163 164
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

165
	down_write(&io->io_rwsem);
166 167 168 169

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
170 171 172 173
		if (test_opt(sbi, NOBARRIER))
			io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
		else
			io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
174 175
	}
	__submit_merged_bio(io);
176
	up_write(&io->io_rwsem);
177 178 179 180 181 182
}

/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
183
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
184 185
{
	struct bio *bio;
186
	struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
187

188
	trace_f2fs_submit_page_bio(page, fio);
189
	f2fs_trace_ios(fio, 0);
190 191

	/* Allocate a new bio */
192
	bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
193 194 195 196 197 198

	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}

199
	submit_bio(fio->rw, bio);
200 201 202
	return 0;
}

203
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
204
{
205
	struct f2fs_sb_info *sbi = fio->sbi;
206
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
207
	struct f2fs_bio_info *io;
208
	bool is_read = is_read_io(fio->rw);
209
	struct page *bio_page;
210

211
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
212

213
	verify_block_addr(sbi, fio->blk_addr);
214

215
	down_write(&io->io_rwsem);
216

217
	if (!is_read)
218 219
		inc_page_count(sbi, F2FS_WRITEBACK);

220
	if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
221 222
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
223 224
alloc_new:
	if (io->bio == NULL) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
225
		int bio_blocks = MAX_BIO_BLOCKS(sbi);
226

227
		io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
228
		io->fio = *fio;
229 230
	}

231 232 233
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

	if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
234
							PAGE_CACHE_SIZE) {
235
		__submit_merged_bio(io);
236 237 238
		goto alloc_new;
	}

239
	io->last_block_in_bio = fio->blk_addr;
240
	f2fs_trace_ios(fio, 0);
241

242
	up_write(&io->io_rwsem);
243
	trace_f2fs_submit_page_mbio(fio->page, fio);
244 245
}

246
/*
247 248 249 250 251
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
252
void set_data_blkaddr(struct dnode_of_data *dn)
253 254 255 256 257 258
{
	struct f2fs_node *rn;
	__le32 *addr_array;
	struct page *node_page = dn->node_page;
	unsigned int ofs_in_node = dn->ofs_in_node;

259
	f2fs_wait_on_page_writeback(node_page, NODE);
260

261
	rn = F2FS_NODE(node_page);
262 263 264

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
265
	addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
266 267
	if (set_page_dirty(node_page))
		dn->node_changed = true;
268 269 270 271
}

int reserve_new_block(struct dnode_of_data *dn)
{
272
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
273

274
	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
275
		return -EPERM;
276
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
277 278
		return -ENOSPC;

279 280
	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);

281
	dn->data_blkaddr = NEW_ADDR;
282
	set_data_blkaddr(dn);
283
	mark_inode_dirty(dn->inode);
284 285 286 287
	sync_inode_page(dn);
	return 0;
}

288 289 290 291 292 293 294 295
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
296

297 298
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
299
	if (err || need_put)
300 301 302 303
		f2fs_put_dnode(dn);
	return err;
}

304
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
305
{
306
	struct extent_info ei;
307
	struct inode *inode = dn->inode;
308

309 310 311
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
312
	}
313

314
	return f2fs_reserve_block(dn, index);
315 316
}

317 318
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
						int rw, bool for_write)
319 320 321 322
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
Chao Yu's avatar
Chao Yu committed
323
	struct extent_info ei;
324
	int err;
325
	struct f2fs_io_info fio = {
326
		.sbi = F2FS_I_SB(inode),
327
		.type = DATA,
328
		.rw = rw,
329
		.encrypted_page = NULL,
330
	};
331

332 333 334
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

335
	page = f2fs_grab_cache_page(mapping, index, for_write);
336 337 338
	if (!page)
		return ERR_PTR(-ENOMEM);

Chao Yu's avatar
Chao Yu committed
339 340 341 342 343
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

344
	set_new_dnode(&dn, inode, NULL, NULL, 0);
345
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
346 347
	if (err)
		goto put_err;
348 349
	f2fs_put_dnode(&dn);

350
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
351 352
		err = -ENOENT;
		goto put_err;
353
	}
Chao Yu's avatar
Chao Yu committed
354
got_it:
355 356
	if (PageUptodate(page)) {
		unlock_page(page);
357
		return page;
358
	}
359

360 361 362 363 364 365 366 367 368
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		SetPageUptodate(page);
369
		unlock_page(page);
370 371
		return page;
	}
372

373
	fio.blk_addr = dn.data_blkaddr;
374 375
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
376
	if (err)
377
		goto put_err;
378
	return page;
379 380 381 382

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
383 384 385 386 387 388 389 390 391 392 393 394
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

395
	page = get_read_data_page(inode, index, READ_SYNC, false);
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
415 416
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
417 418 419 420
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
421
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
422 423
	if (IS_ERR(page))
		return page;
424

425
	/* wait for read completion */
426
	lock_page(page);
427
	if (unlikely(!PageUptodate(page))) {
428 429
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
430
	}
431
	if (unlikely(page->mapping != mapping)) {
432 433
		f2fs_put_page(page, 1);
		goto repeat;
434 435 436 437
	}
	return page;
}

438
/*
439 440
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
441
 *
Chao Yu's avatar
Chao Yu committed
442 443
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
444 445
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
446
 */
447
struct page *get_new_data_page(struct inode *inode,
448
		struct page *ipage, pgoff_t index, bool new_i_size)
449 450 451 452 453
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
454

455
	page = f2fs_grab_cache_page(mapping, index, true);
456 457 458 459 460 461
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
462
		return ERR_PTR(-ENOMEM);
463
	}
464

465
	set_new_dnode(&dn, inode, ipage, NULL, 0);
466
	err = f2fs_reserve_block(&dn, index);
467 468
	if (err) {
		f2fs_put_page(page, 1);
469
		return ERR_PTR(err);
470
	}
471 472
	if (!ipage)
		f2fs_put_dnode(&dn);
473 474

	if (PageUptodate(page))
475
		goto got_it;
476 477 478

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
479
		SetPageUptodate(page);
480
	} else {
481
		f2fs_put_page(page, 1);
482

483 484 485
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
486
		if (IS_ERR(page))
487
			return page;
488
	}
489
got_it:
490 491 492
	if (new_i_size && i_size_read(inode) <
				((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
		i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
493 494
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
495 496 497 498
	}
	return page;
}

499 500
static int __allocate_data_block(struct dnode_of_data *dn)
{
501
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
502
	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
503 504
	struct f2fs_summary sum;
	struct node_info ni;
505
	int seg = CURSEG_WARM_DATA;
506
	pgoff_t fofs;
507 508 509

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;
510 511 512 513 514

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

515 516 517
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
		return -ENOSPC;

518
alloc:
519 520 521
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

522 523 524
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

525 526
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
527
	set_data_blkaddr(dn);
528

529 530 531
	/* update i_size */
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
							dn->ofs_in_node;
532 533 534
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
		i_size_write(dn->inode,
				((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
535 536 537
	return 0;
}

538
static int __allocate_data_blocks(struct inode *inode, loff_t offset,
539 540 541 542 543 544 545 546
							size_t count)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	u64 start = F2FS_BYTES_TO_BLK(offset);
	u64 len = F2FS_BYTES_TO_BLK(count);
	bool allocated;
	u64 end_offset;
547
	int err = 0;
548 549 550 551 552 553

	while (len) {
		f2fs_lock_op(sbi);

		/* When reading holes, we need its node page */
		set_new_dnode(&dn, inode, NULL, NULL, 0);
554 555
		err = get_dnode_of_data(&dn, start, ALLOC_NODE);
		if (err)
556 557 558 559 560 561
			goto out;

		allocated = false;
		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));

		while (dn.ofs_in_node < end_offset && len) {
562 563
			block_t blkaddr;

564 565
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
566
				goto sync_out;
567
			}
568

569
			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
570
			if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
571 572
				err = __allocate_data_block(&dn);
				if (err)
573 574 575 576 577 578 579 580 581 582 583 584 585
					goto sync_out;
				allocated = true;
			}
			len--;
			start++;
			dn.ofs_in_node++;
		}

		if (allocated)
			sync_inode_page(&dn);

		f2fs_put_dnode(&dn);
		f2fs_unlock_op(sbi);
586

587
		f2fs_balance_fs(sbi, dn.node_changed);
588
	}
589
	return err;
590 591 592 593 594 595 596

sync_out:
	if (allocated)
		sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
out:
	f2fs_unlock_op(sbi);
597
	f2fs_balance_fs(sbi, dn.node_changed);
598
	return err;
599 600
}

601
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
602 603
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
Chao Yu's avatar
Chao Yu committed
604 605 606 607 608
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
609
 */
Chao Yu's avatar
Chao Yu committed
610
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
611
						int create, int flag)
612
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
613
	unsigned int maxblocks = map->m_len;
614
	struct dnode_of_data dn;
615
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
616 617 618
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
	pgoff_t pgofs, end_offset;
	int err = 0, ofs = 1;
619
	struct extent_info ei;
620
	bool allocated = false;
621
	block_t blkaddr;
622

Jaegeuk Kim's avatar
Jaegeuk Kim committed
623 624 625 626 627
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
628

629
	if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
630 631 632
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
633
		goto out;
634
	}
635

636
	if (create)
637
		f2fs_lock_op(sbi);
638 639 640

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
641
	err = get_dnode_of_data(&dn, pgofs, mode);
642
	if (err) {
643 644 645
		if (err == -ENOENT)
			err = 0;
		goto unlock_out;
646
	}
Chao Yu's avatar
Chao Yu committed
647 648 649

	if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
		if (create) {
650 651 652 653
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
				goto put_out;
			}
Chao Yu's avatar
Chao Yu committed
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
			err = __allocate_data_block(&dn);
			if (err)
				goto put_out;
			allocated = true;
			map->m_flags = F2FS_MAP_NEW;
		} else {
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
						dn.data_blkaddr != NEW_ADDR) {
				if (flag == F2FS_GET_BLOCK_BMAP)
					err = -ENOENT;
				goto put_out;
			}

			/*
			 * preallocated unwritten block should be mapped
			 * for fiemap.
			 */
			if (dn.data_blkaddr == NEW_ADDR)
				map->m_flags = F2FS_MAP_UNWRITTEN;
673 674
		}
	}
675

Chao Yu's avatar
Chao Yu committed
676 677 678
	map->m_flags |= F2FS_MAP_MAPPED;
	map->m_pblk = dn.data_blkaddr;
	map->m_len = 1;
679

680
	end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
681 682 683 684
	dn.ofs_in_node++;
	pgofs++;

get_next:
685 686 687
	if (map->m_len >= maxblocks)
		goto sync_out;

688 689 690 691 692 693
	if (dn.ofs_in_node >= end_offset) {
		if (allocated)
			sync_inode_page(&dn);
		allocated = false;
		f2fs_put_dnode(&dn);

694 695
		if (create) {
			f2fs_unlock_op(sbi);
696
			f2fs_balance_fs(sbi, dn.node_changed);
697 698 699
			f2fs_lock_op(sbi);
		}

700 701
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		err = get_dnode_of_data(&dn, pgofs, mode);
702
		if (err) {
703 704 705 706
			if (err == -ENOENT)
				err = 0;
			goto unlock_out;
		}
707

708
		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
709
	}
710

711
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
Chao Yu's avatar
Chao Yu committed
712

713 714 715 716 717
	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
		if (create) {
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
				goto sync_out;
Chao Yu's avatar
Chao Yu committed
718
			}
719 720 721 722 723 724 725 726 727 728 729 730 731 732
			err = __allocate_data_block(&dn);
			if (err)
				goto sync_out;
			allocated = true;
			map->m_flags |= F2FS_MAP_NEW;
			blkaddr = dn.data_blkaddr;
		} else {
			/*
			 * we only merge preallocated unwritten blocks
			 * for fiemap.
			 */
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
					blkaddr != NEW_ADDR)
				goto sync_out;
733
		}
734
	}
Chao Yu's avatar
Chao Yu committed
735

736 737 738 739 740 741 742 743 744 745
	/* Give more consecutive addresses for the readahead */
	if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
			(map->m_pblk == NEW_ADDR &&
			blkaddr == NEW_ADDR)) {
		ofs++;
		dn.ofs_in_node++;
		pgofs++;
		map->m_len++;
		goto get_next;
746
	}
747

748 749 750 751
sync_out:
	if (allocated)
		sync_inode_page(&dn);
put_out:
752
	f2fs_put_dnode(&dn);
753
unlock_out:
754
	if (create) {
755
		f2fs_unlock_op(sbi);
756
		f2fs_balance_fs(sbi, dn.node_changed);
757
	}
758
out:
Jaegeuk Kim's avatar
Jaegeuk Kim committed
759
	trace_f2fs_map_blocks(inode, map, err);
760
	return err;
761 762
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
763
static int __get_data_block(struct inode *inode, sector_t iblock,
764
			struct buffer_head *bh, int create, int flag)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
765 766 767 768 769 770 771
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;

772
	ret = f2fs_map_blocks(inode, &map, create, flag);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
773 774 775 776 777 778 779 780
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

781
static int get_data_block(struct inode *inode, sector_t iblock,
782 783 784 785 786 787
			struct buffer_head *bh_result, int create, int flag)
{
	return __get_data_block(inode, iblock, bh_result, create, flag);
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
788 789
			struct buffer_head *bh_result, int create)
{
790 791
	return __get_data_block(inode, iblock, bh_result, create,
						F2FS_GET_BLOCK_DIO);
792 793
}

794
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
795 796
			struct buffer_head *bh_result, int create)
{
797
	/* Block number less than F2FS MAX BLOCKS */
798
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
799 800
		return -EFBIG;

801 802
	return __get_data_block(inode, iblock, bh_result, create,
						F2FS_GET_BLOCK_BMAP);
803 804
}

805 806 807 808 809 810 811 812 813 814
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
815 816 817
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
818 819
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
820
	loff_t isize;
821 822 823 824 825 826 827 828
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

829 830 831 832 833 834
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

Al Viro's avatar
Al Viro committed
835
	inode_lock(inode);
836 837

	isize = i_size_read(inode);
838 839
	if (start >= isize)
		goto out;
840

841 842
	if (start + len > isize)
		len = isize - start;
843 844 845 846 847 848

	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
849

850 851 852 853
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

854 855
	ret = get_data_block(inode, start_blk, &map_bh, 0,
					F2FS_GET_BLOCK_FIEMAP);
856 857 858 859 860
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
861 862 863 864 865 866 867 868 869
		/* Go through holes util pass the EOF */
		if (blk_to_logical(inode, start_blk++) < isize)
			goto prep_next;
		/* Found a hole beyond isize means no more extents.
		 * Note that the premise is that filesystems don't
		 * punch holes beyond isize and keep size unchanged.
		 */
		flags |= FIEMAP_EXTENT_LAST;
	}
870

871 872 873 874
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

875 876
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
877
	}
878

879 880
	if (start_blk > last_blk || ret)
		goto out;
881

882 883 884 885 886 887
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
888

889
	start_blk += logical_to_blk(inode, size);
890

891
prep_next:
892 893 894 895 896 897 898 899 900
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

Al Viro's avatar
Al Viro committed
901
	inode_unlock(inode);
902
	return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
903 904
}

905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

967 968
			if (f2fs_map_blocks(inode, &map, 0,
							F2FS_GET_BLOCK_READ))
969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
			submit_bio(READ, bio);
			bio = NULL;
		}
		if (bio == NULL) {
997 998 999 1000 1001 1002 1003 1004 1005 1006
			struct f2fs_crypto_ctx *ctx = NULL;

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {

				ctx = f2fs_get_crypto_ctx(inode);
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
1007 1008
				f2fs_wait_on_encrypted_page_writeback(
						F2FS_I_SB(inode), block_nr);
1009 1010
			}

1011
			bio = bio_alloc(GFP_KERNEL,
1012
				min_t(int, nr_pages, BIO_MAX_PAGES));
1013 1014 1015
			if (!bio) {
				if (ctx)
					f2fs_release_crypto_ctx(ctx);
1016
				goto set_error_page;
1017
			}
1018 1019
			bio->bi_bdev = bdev;
			bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1020
			bio->bi_end_io = f2fs_read_end_io;
1021
			bio->bi_private = ctx;
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
			submit_bio(READ, bio);
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
			page_cache_release(page);
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(READ, bio);
	return 0;
}

1050 1051
static int f2fs_read_data_page(struct file *file, struct page *page)
{
1052
	struct inode *inode = page->mapping->host;
1053
	int ret = -EAGAIN;
1054

1055 1056
	trace_f2fs_readpage(page, DATA);

arter97's avatar
arter97 committed
1057
	/* If the file has inline data, try to read it directly */
1058 1059
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1060
	if (ret == -EAGAIN)
1061
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
1062
	return ret;
1063 1064 1065 1066 1067 1068
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
1069
	struct inode *inode = file->f_mapping->host;
1070 1071 1072
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
1073 1074 1075 1076 1077

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

1078
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1079 1080
}

1081
int do_write_data_page(struct f2fs_io_info *fio)
1082
{
1083
	struct page *page = fio->page;
1084 1085 1086 1087 1088
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1089
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1090 1091 1092
	if (err)
		return err;

1093
	fio->blk_addr = dn.data_blkaddr;
1094 1095

	/* This page is already truncated */
1096 1097
	if (fio->blk_addr == NULL_ADDR) {
		ClearPageUptodate(page);
1098
		goto out_writepage;
1099
	}
1100

1101
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1102 1103 1104 1105 1106

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
							fio->blk_addr);

1107 1108 1109 1110 1111 1112 1113
		fio->encrypted_page = f2fs_encrypt(inode, fio->page);
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
			goto out_writepage;
		}
	}

1114 1115 1116 1117 1118 1119
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1120
	if (unlikely(fio->blk_addr != NEW_ADDR &&
1121
			!is_cold_data(page) &&
1122
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1123
			need_inplace_update(inode))) {
1124
		rewrite_data_page(fio);
1125
		set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1126
		trace_f2fs_do_write_data_page(page, IPU);
1127
	} else {
1128
		write_data_page(&dn, fio);
1129
		set_data_blkaddr(&dn);
1130
		f2fs_update_extent_cache(&dn);
1131
		trace_f2fs_do_write_data_page(page, OPU);
1132
		set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
1133 1134
		if (page->index == 0)
			set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1145
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1146 1147 1148
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
							>> PAGE_CACHE_SHIFT;
1149
	unsigned offset = 0;
1150
	bool need_balance_fs = false;
1151
	int err = 0;
1152
	struct f2fs_io_info fio = {
1153
		.sbi = sbi,
1154
		.type = DATA,
1155
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1156
		.page = page,
1157
		.encrypted_page = NULL,
1158
	};
1159

1160 1161
	trace_f2fs_writepage(page, DATA);

1162
	if (page->index < end_index)
1163
		goto write;
1164 1165 1166 1167 1168 1169

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
	offset = i_size & (PAGE_CACHE_SIZE - 1);
1170
	if ((page->index >= end_index + 1) || !offset)
1171
		goto out;
1172 1173

	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1174
write:
1175
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1176
		goto redirty_out;
1177 1178 1179 1180 1181
	if (f2fs_is_drop_cache(inode))
		goto out;
	if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))
		goto redirty_out;
1182

1183
	/* Dentry blocks are controlled by checkpoint */
1184
	if (S_ISDIR(inode->i_mode)) {
1185 1186
		if (unlikely(f2fs_cp_error(sbi)))
			goto redirty_out;
1187
		err = do_write_data_page(&fio);
1188 1189
		goto done;
	}
1190

1191 1192 1193
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		SetPageError(page);
1194
		goto out;
1195 1196
	}

1197
	if (!wbc->for_reclaim)
1198
		need_balance_fs = true;
1199
	else if (has_not_enough_free_secs(sbi, 0))
1200
		goto redirty_out;
1201

1202
	err = -EAGAIN;
1203
	f2fs_lock_op(sbi);
1204 1205 1206
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1207
		err = do_write_data_page(&fio);
1208 1209 1210 1211
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1212 1213

	clear_cold_data(page);
1214
out:
1215
	inode_dec_dirty_pages(inode);
1216 1217
	if (err)
		ClearPageUptodate(page);
1218
	unlock_page(page);
1219
	f2fs_balance_fs(sbi, need_balance_fs);
1220
	if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi))) {
1221
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1222
		remove_dirty_inode(inode);
1223
	}
1224 1225 1226
	return 0;

redirty_out:
1227
	redirty_page_for_writepage(wbc, page);
1228
	return AOP_WRITEPAGE_ACTIVATE;
1229 1230
}

1231 1232 1233 1234 1235 1236 1237 1238 1239
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
			struct writeback_control *wbc, writepage_t writepage,
			void *data)
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	int step = 0;

	pagevec_init(&pvec, 0);
next:
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

1318
			if (step == is_cold_data(page))
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
				goto continue_unlock;

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
					f2fs_wait_on_page_writeback(page, DATA);
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			ret = (*writepage)(page, wbc, data);
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					done_index = page->index + 1;
					done = 1;
					break;
				}
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (step < 1) {
		step++;
		goto next;
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

	return ret;
}

1371
static int f2fs_write_data_pages(struct address_space *mapping,
1372 1373 1374
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1375
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1376
	bool locked = false;
1377
	int ret;
1378
	long diff;
1379

1380 1381
	trace_f2fs_writepages(mapping->host, wbc, DATA);

P J P's avatar
P J P committed
1382 1383 1384 1385
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1386 1387 1388 1389
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1390 1391 1392 1393 1394
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

Chao Yu's avatar
Chao Yu committed
1395 1396 1397 1398
	/* skip writing during file defragment */
	if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG))
		goto skip_write;

1399 1400 1401 1402
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

1403
	diff = nr_pages_to_write(sbi, DATA, wbc);
1404

1405 1406 1407 1408
	if (!S_ISDIR(inode->i_mode)) {
		mutex_lock(&sbi->writepages);
		locked = true;
	}
1409
	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1410
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
1411 1412
	if (locked)
		mutex_unlock(&sbi->writepages);
1413

1414
	remove_dirty_inode(inode);
1415

1416
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1417
	return ret;
1418 1419

skip_write:
1420
	wbc->pages_skipped += get_dirty_pages(inode);
1421
	return 0;
1422 1423
}

1424 1425 1426
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
1427
	loff_t i_size = i_size_read(inode);
1428

1429 1430 1431
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1432 1433 1434
	}
}

1435 1436 1437 1438 1439 1440 1441 1442
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1443 1444
	bool locked = false;
	struct extent_info ei;
1445 1446
	int err = 0;

1447 1448 1449 1450 1451 1452
	if (f2fs_has_inline_data(inode) ||
			(pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
			sync_inode_page(&dn);
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			bool restart = false;

			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
			if (err || (!err && dn.data_blkaddr == NULL_ADDR))
				restart = true;
			if (restart) {
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1492 1493
		}
	}
1494

1495 1496 1497
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1498
out:
1499 1500
	f2fs_put_dnode(&dn);
unlock_out:
1501 1502
	if (locked)
		f2fs_unlock_op(sbi);
1503 1504 1505
	return err;
}

1506 1507 1508 1509 1510
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1511
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1512
	struct page *page = NULL;
1513
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1514 1515
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1516 1517
	int err = 0;

1518 1519
	trace_f2fs_write_begin(inode, pos, len, flags);

1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1530
repeat:
1531
	page = grab_cache_page_write_begin(mapping, index, flags);
1532 1533 1534 1535
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1536

1537 1538
	*pagep = page;

1539 1540
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1541
	if (err)
1542
		goto fail;
1543

1544
	if (need_balance && has_not_enough_free_secs(sbi, 0)) {
1545
		unlock_page(page);
1546
		f2fs_balance_fs(sbi, true);
1547 1548 1549 1550 1551 1552 1553 1554
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1555 1556
	f2fs_wait_on_page_writeback(page, DATA);

1557 1558
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1559
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1560

Chao Yu's avatar
Chao Yu committed
1561 1562 1563 1564
	if (len == PAGE_CACHE_SIZE)
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;
1565 1566 1567 1568 1569 1570 1571

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
Chao Yu's avatar
Chao Yu committed
1572
		goto out_update;
1573 1574
	}

1575
	if (blkaddr == NEW_ADDR) {
1576 1577
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
1578
		struct f2fs_io_info fio = {
1579
			.sbi = sbi,
1580 1581
			.type = DATA,
			.rw = READ_SYNC,
1582
			.blk_addr = blkaddr,
1583
			.page = page,
1584
			.encrypted_page = NULL,
1585
		};
1586
		err = f2fs_submit_page_bio(&fio);
1587 1588
		if (err)
			goto fail;
1589

1590
		lock_page(page);
1591
		if (unlikely(!PageUptodate(page))) {
1592 1593
			err = -EIO;
			goto fail;
1594
		}
1595
		if (unlikely(page->mapping != mapping)) {
1596 1597
			f2fs_put_page(page, 1);
			goto repeat;
1598
		}
1599 1600 1601 1602

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
			err = f2fs_decrypt_one(inode, page);
1603
			if (err)
1604 1605
				goto fail;
		}
1606
	}
Chao Yu's avatar
Chao Yu committed
1607
out_update:
1608
	SetPageUptodate(page);
Chao Yu's avatar
Chao Yu committed
1609
out_clear:
1610 1611
	clear_cold_data(page);
	return 0;
1612

1613
fail:
1614
	f2fs_put_page(page, 1);
1615 1616
	f2fs_write_failed(mapping, pos + len);
	return err;
1617 1618
}

1619 1620 1621 1622 1623 1624 1625
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1626 1627
	trace_f2fs_write_end(inode, pos, len, copied);

1628
	set_page_dirty(page);
1629 1630 1631 1632 1633 1634 1635

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

1636
	f2fs_put_page(page, 1);
1637
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1638 1639 1640
	return copied;
}

1641 1642
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1643 1644 1645 1646 1647 1648
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

1649 1650 1651
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1652 1653 1654
	return 0;
}

1655 1656
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
			      loff_t offset)
1657 1658
{
	struct file *file = iocb->ki_filp;
1659 1660 1661 1662
	struct address_space *mapping = file->f_mapping;
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
	int err;
1663

1664
	/* we don't need to use inline_data strictly */
1665 1666 1667
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
1668

1669 1670 1671
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;

1672 1673 1674
	err = check_direct_IO(inode, iter, offset);
	if (err)
		return err;
1675

1676
	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
1677

1678
	if (iov_iter_rw(iter) == WRITE) {
1679 1680
		err = __allocate_data_blocks(inode, offset, count);
		if (err)
1681 1682
			goto out;
	}
1683

1684
	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
1685
out:
1686
	if (err < 0 && iov_iter_rw(iter) == WRITE)
1687
		f2fs_write_failed(mapping, offset + count);
1688

1689
	trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
1690

1691
	return err;
1692 1693
}

1694 1695
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1696 1697
{
	struct inode *inode = page->mapping->host;
1698
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1699

1700 1701
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
		(offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
1702 1703
		return;

1704 1705 1706 1707 1708 1709 1710 1711
	if (PageDirty(page)) {
		if (inode->i_ino == F2FS_META_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_META);
		else if (inode->i_ino == F2FS_NODE_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_NODES);
		else
			inode_dec_dirty_pages(inode);
	}
Chao Yu's avatar
Chao Yu committed
1712 1713 1714 1715 1716

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1717 1718 1719
	ClearPagePrivate(page);
}

1720
int f2fs_release_page(struct page *page, gfp_t wait)
1721
{
1722 1723 1724 1725
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

Chao Yu's avatar
Chao Yu committed
1726 1727 1728 1729
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1730
	ClearPagePrivate(page);
1731
	return 1;
1732 1733 1734 1735 1736 1737 1738
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1739 1740
	trace_f2fs_set_page_dirty(page, DATA);

1741
	SetPageUptodate(page);
1742

1743
	if (f2fs_is_atomic_file(inode)) {
Chao Yu's avatar
Chao Yu committed
1744 1745 1746 1747 1748 1749 1750 1751 1752
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1753 1754
	}

1755 1756
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
1757
		update_dirty_page(inode, page);
1758 1759 1760 1761 1762
		return 1;
	}
	return 0;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1763 1764
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1765 1766
	struct inode *inode = mapping->host;

1767 1768 1769 1770 1771 1772 1773
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

1774
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1775 1776
}

1777 1778 1779 1780 1781 1782
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1783
	.write_end	= f2fs_write_end,
1784
	.set_page_dirty	= f2fs_set_data_page_dirty,
1785 1786
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1787
	.direct_IO	= f2fs_direct_IO,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1788
	.bmap		= f2fs_bmap,
1789
};