file.c 64 KB
Newer Older
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 * fs/f2fs/file.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
16
#include <linux/blkdev.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
17 18
#include <linux/falloc.h>
#include <linux/types.h>
19
#include <linux/compat.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
20 21
#include <linux/uaccess.h>
#include <linux/mount.h>
22
#include <linux/pagevec.h>
23
#include <linux/uio.h>
24
#include <linux/uuid.h>
25
#include <linux/file.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
26 27 28 29 30 31

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "acl.h"
32
#include "gc.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
33
#include "trace.h"
34
#include <trace/events/f2fs.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
35

36 37 38 39 40 41 42 43 44 45 46 47
static int f2fs_filemap_fault(struct vm_fault *vmf)
{
	struct inode *inode = file_inode(vmf->vma->vm_file);
	int err;

	down_read(&F2FS_I(inode)->i_mmap_sem);
	err = filemap_fault(vmf);
	up_read(&F2FS_I(inode)->i_mmap_sem);

	return err;
}

48
static int f2fs_vm_page_mkwrite(struct vm_fault *vmf)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
49 50
{
	struct page *page = vmf->page;
51
	struct inode *inode = file_inode(vmf->vma->vm_file);
52
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
53
	struct dnode_of_data dn;
54
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
55 56

	sb_start_pagefault(inode->i_sb);
57 58

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
59

Jaegeuk Kim's avatar
Jaegeuk Kim committed
60
	/* block allocation */
61
	f2fs_lock_op(sbi);
62
	set_new_dnode(&dn, inode, NULL, NULL, 0);
63
	err = f2fs_reserve_block(&dn, page->index);
64 65
	if (err) {
		f2fs_unlock_op(sbi);
66
		goto out;
67 68 69
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
70

Jaegeuk Kim's avatar
Jaegeuk Kim committed
71
	f2fs_balance_fs(sbi, dn.node_changed);
72

73
	file_update_time(vmf->vma->vm_file);
74
	down_read(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
75
	lock_page(page);
76
	if (unlikely(page->mapping != inode->i_mapping ||
77
			page_offset(page) > i_size_read(inode) ||
78
			!PageUptodate(page))) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
79 80
		unlock_page(page);
		err = -EFAULT;
81
		goto out_sem;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
82 83 84 85 86 87
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
88
		goto mapped;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
89 90

	/* page is wholly or partially inside EOF */
91
	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
92
						i_size_read(inode)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
93
		unsigned offset;
94 95
		offset = i_size_read(inode) & ~PAGE_MASK;
		zero_user_segment(page, offset, PAGE_SIZE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
96 97
	}
	set_page_dirty(page);
98 99
	if (!PageUptodate(page))
		SetPageUptodate(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
100

Chao Yu's avatar
Chao Yu committed
101 102
	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);

103
	trace_f2fs_vm_page_mkwrite(page, DATA);
104 105
mapped:
	/* fill the page */
106
	f2fs_wait_on_page_writeback(page, DATA, false);
107 108 109 110 111

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

112 113
out_sem:
	up_read(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
114 115
out:
	sb_end_pagefault(inode->i_sb);
116
	f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
117 118 119 120
	return block_page_mkwrite_return(err);
}

static const struct vm_operations_struct f2fs_file_vm_ops = {
121
	.fault		= f2fs_filemap_fault,
122
	.map_pages	= filemap_map_pages,
123
	.page_mkwrite	= f2fs_vm_page_mkwrite,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
124 125
};

126 127 128 129 130 131 132 133 134 135
static int get_parent_ino(struct inode *inode, nid_t *pino)
{
	struct dentry *dentry;

	inode = igrab(inode);
	dentry = d_find_any_alias(inode);
	iput(inode);
	if (!dentry)
		return 0;

136 137
	*pino = parent_ino(dentry);
	dput(dentry);
138 139 140
	return 1;
}

141 142
static inline bool need_do_checkpoint(struct inode *inode)
{
143
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
144 145 146 147
	bool need_cp = false;

	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
		need_cp = true;
148
	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
149
		need_cp = true;
150 151 152 153 154 155
	else if (file_wrong_pino(inode))
		need_cp = true;
	else if (!space_for_roll_forward(sbi))
		need_cp = true;
	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
		need_cp = true;
156 157
	else if (test_opt(sbi, FASTBOOT))
		need_cp = true;
158 159
	else if (sbi->active_logs == 2)
		need_cp = true;
160 161 162 163

	return need_cp;
}

164 165 166 167 168 169 170 171 172 173 174
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
	bool ret = false;
	/* But we need to avoid that there are some inode updates */
	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
		ret = true;
	f2fs_put_page(i, 0);
	return ret;
}

175 176 177 178 179 180 181 182
static void try_to_fix_pino(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t pino;

	down_write(&fi->i_sem);
	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
			get_parent_ino(inode, &pino)) {
183
		f2fs_i_pino_write(inode, pino);
184 185
		file_got_pino(inode);
	}
186
	up_write(&fi->i_sem);
187 188
}

189 190
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
						int datasync, bool atomic)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
191 192
{
	struct inode *inode = file->f_mapping->host;
193
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
194
	nid_t ino = inode->i_ino;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
195 196 197
	int ret = 0;
	bool need_cp = false;
	struct writeback_control wbc = {
198
		.sync_mode = WB_SYNC_ALL,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
199 200 201 202
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};

203
	if (unlikely(f2fs_readonly(inode->i_sb)))
204 205
		return 0;

206
	trace_f2fs_sync_file_enter(inode);
207 208

	/* if fdatasync is triggered, let's do in-place-update */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
209
	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
210
		set_inode_flag(inode, FI_NEED_IPU);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
211
	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
212
	clear_inode_flag(inode, FI_NEED_IPU);
213

214 215
	if (ret) {
		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
216
		return ret;
217
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
218

219
	/* if the inode is dirty, let's recover all the time */
Chao Yu's avatar
Chao Yu committed
220
	if (!f2fs_skip_inode_update(inode, datasync)) {
221
		f2fs_write_inode(inode, NULL);
222 223 224
		goto go_write;
	}

225 226 227
	/*
	 * if there is no written data, don't waste time to write recovery info.
	 */
228
	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
229
			!exist_written_data(sbi, ino, APPEND_INO)) {
230

231 232
		/* it may call write_inode just prior to fsync */
		if (need_inode_page_update(sbi, ino))
233 234
			goto go_write;

235
		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
236
				exist_written_data(sbi, ino, UPDATE_INO))
237 238 239
			goto flush_out;
		goto out;
	}
240
go_write:
241 242 243 244
	/*
	 * Both of fdatasync() and fsync() are able to be recovered from
	 * sudden-power-off.
	 */
245
	down_read(&F2FS_I(inode)->i_sem);
246
	need_cp = need_do_checkpoint(inode);
247
	up_read(&F2FS_I(inode)->i_sem);
248

Jaegeuk Kim's avatar
Jaegeuk Kim committed
249 250 251
	if (need_cp) {
		/* all the dirty node pages should be flushed for POR */
		ret = f2fs_sync_fs(inode->i_sb, 1);
252

253 254 255 256 257
		/*
		 * We've secured consistency through sync_fs. Following pino
		 * will be used only for fsynced inodes after checkpoint.
		 */
		try_to_fix_pino(inode);
258 259
		clear_inode_flag(inode, FI_APPEND_WRITE);
		clear_inode_flag(inode, FI_UPDATE_WRITE);
260 261
		goto out;
	}
262
sync_nodes:
263
	ret = fsync_node_pages(sbi, inode, &wbc, atomic);
264 265
	if (ret)
		goto out;
266

267
	/* if cp_error was enabled, we should avoid infinite loop */
268 269
	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
270
		goto out;
271
	}
272

273
	if (need_inode_block_update(sbi, ino)) {
274
		f2fs_mark_inode_dirty_sync(inode, true);
275 276
		f2fs_write_inode(inode, NULL);
		goto sync_nodes;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
277
	}
278

279 280 281 282 283 284 285 286 287 288 289 290 291
	/*
	 * If it's atomic_write, it's just fine to keep write ordering. So
	 * here we don't need to wait for node write completion, since we use
	 * node chain which serializes node blocks. If one of node writes are
	 * reordered, we can see simply broken chain, resulting in stopping
	 * roll-forward recovery. It means we'll recover all or none node blocks
	 * given fsync mark.
	 */
	if (!atomic) {
		ret = wait_on_node_pages_writeback(sbi, ino);
		if (ret)
			goto out;
	}
292 293

	/* once recovery info is written, don't need to tack this */
294
	remove_ino_entry(sbi, ino, APPEND_INO);
295
	clear_inode_flag(inode, FI_APPEND_WRITE);
296
flush_out:
297
	remove_ino_entry(sbi, ino, UPDATE_INO);
298
	clear_inode_flag(inode, FI_UPDATE_WRITE);
299 300
	if (!atomic)
		ret = f2fs_issue_flush(sbi);
301
	f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
302
out:
303
	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
304
	f2fs_trace_ios(NULL, 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
305 306 307
	return ret;
}

308 309 310 311 312
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
	return f2fs_do_sync_file(file, start, end, datasync, false);
}

313 314 315 316 317 318 319 320 321 322 323
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
						pgoff_t pgofs, int whence)
{
	struct pagevec pvec;
	int nr_pages;

	if (whence != SEEK_DATA)
		return 0;

	/* find first dirty page index */
	pagevec_init(&pvec, 0);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
324 325
	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
					PAGECACHE_TAG_DIRTY, 1);
326
	pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
	pagevec_release(&pvec);
	return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
							int whence)
{
	switch (whence) {
	case SEEK_DATA:
		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
			return true;
		break;
	case SEEK_HOLE:
		if (blkaddr == NULL_ADDR)
			return true;
		break;
	}
	return false;
}

348 349 350 351 352
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;
	struct dnode_of_data dn;
353 354 355
	pgoff_t pgofs, end_offset, dirty;
	loff_t data_ofs = offset;
	loff_t isize;
356 357
	int err = 0;

Al Viro's avatar
Al Viro committed
358
	inode_lock(inode);
359 360 361 362 363 364

	isize = i_size_read(inode);
	if (offset >= isize)
		goto fail;

	/* handle inline data case */
Chao Yu's avatar
Chao Yu committed
365
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
366 367 368 369 370
		if (whence == SEEK_HOLE)
			data_ofs = isize;
		goto found;
	}

371
	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
372

373 374
	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

375
	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
376
		set_new_dnode(&dn, inode, NULL, NULL, 0);
377
		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
378 379 380
		if (err && err != -ENOENT) {
			goto fail;
		} else if (err == -ENOENT) {
arter97's avatar
arter97 committed
381
			/* direct node does not exists */
382
			if (whence == SEEK_DATA) {
383
				pgofs = get_next_page_offset(&dn, pgofs);
384 385 386 387 388 389
				continue;
			} else {
				goto found;
			}
		}

390
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
391 392 393 394

		/* find data/hole in dnode block */
		for (; dn.ofs_in_node < end_offset;
				dn.ofs_in_node++, pgofs++,
395
				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
396
			block_t blkaddr;
397 398
			blkaddr = datablock_addr(dn.inode,
					dn.node_page, dn.ofs_in_node);
399

400
			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
401 402 403 404 405 406 407 408 409 410
				f2fs_put_dnode(&dn);
				goto found;
			}
		}
		f2fs_put_dnode(&dn);
	}

	if (whence == SEEK_DATA)
		goto fail;
found:
411 412
	if (whence == SEEK_HOLE && data_ofs > isize)
		data_ofs = isize;
Al Viro's avatar
Al Viro committed
413
	inode_unlock(inode);
414 415
	return vfs_setpos(file, data_ofs, maxbytes);
fail:
Al Viro's avatar
Al Viro committed
416
	inode_unlock(inode);
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
	return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;

	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
		return generic_file_llseek_size(file, offset, whence,
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
	case SEEK_HOLE:
433 434
		if (offset < 0)
			return -ENXIO;
435 436 437 438 439 440
		return f2fs_seek_block(file, offset, whence);
	}

	return -EINVAL;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
441 442
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
443
	struct inode *inode = file_inode(file);
444
	int err;
445 446

	/* we don't need to use inline_data strictly */
447 448 449
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
450

Jaegeuk Kim's avatar
Jaegeuk Kim committed
451 452 453 454 455
	file_accessed(file);
	vma->vm_ops = &f2fs_file_vm_ops;
	return 0;
}

456 457
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
458
	struct dentry *dir;
459

460 461
	if (f2fs_encrypted_inode(inode)) {
		int ret = fscrypt_get_encryption_info(inode);
462
		if (ret)
463
			return -EACCES;
464
		if (!fscrypt_has_encryption_key(inode))
465
			return -ENOKEY;
466
	}
467 468 469 470
	dir = dget_parent(file_dentry(filp));
	if (f2fs_encrypted_inode(d_inode(dir)) &&
			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
		dput(dir);
471
		return -EPERM;
472 473
	}
	dput(dir);
474
	return dquot_file_open(inode, filp);
475 476
}

477
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
478
{
479
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
480
	struct f2fs_node *raw_node;
Chao Yu's avatar
Chao Yu committed
481
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
482
	__le32 *addr;
483 484 485 486
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
487

488
	raw_node = F2FS_NODE(dn->node_page);
489
	addr = blkaddr_in_node(raw_node) + base + ofs;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
490

Chris Fries's avatar
Chris Fries committed
491
	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
492 493 494 495
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

496
		dn->data_blkaddr = NULL_ADDR;
497
		set_data_blkaddr(dn);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
498
		invalidate_blocks(sbi, blkaddr);
499
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
500
			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
501 502
		nr_free++;
	}
Chao Yu's avatar
Chao Yu committed
503

Jaegeuk Kim's avatar
Jaegeuk Kim committed
504
	if (nr_free) {
Chao Yu's avatar
Chao Yu committed
505 506 507 508 509 510
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
511
							dn->inode) + ofs;
Chao Yu's avatar
Chao Yu committed
512
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
513
		dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
514 515
	}
	dn->ofs_in_node = ofs;
516

517
	f2fs_update_time(sbi, REQ_TIME);
518 519
	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
520 521 522 523 524 525 526 527
	return nr_free;
}

void truncate_data_blocks(struct dnode_of_data *dn)
{
	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}

528
static int truncate_partial_data_page(struct inode *inode, u64 from,
529
								bool cache_only)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
530
{
531 532
	unsigned offset = from & (PAGE_SIZE - 1);
	pgoff_t index = from >> PAGE_SHIFT;
533
	struct address_space *mapping = inode->i_mapping;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
534 535
	struct page *page;

536
	if (!offset && !cache_only)
537
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
538

539
	if (cache_only) {
540
		page = find_lock_page(mapping, index);
541 542 543
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
544
		return 0;
545
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
546

547
	page = get_lock_data_page(inode, index, true);
548
	if (IS_ERR(page))
549
		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
550
truncate_out:
551
	f2fs_wait_on_page_writeback(page, DATA, true);
552
	zero_user(page, offset, PAGE_SIZE - offset);
553 554 555 556

	/* An encrypted inode should have a key and truncate the last page. */
	f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
	if (!cache_only)
557
		set_page_dirty(page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
558
	f2fs_put_page(page, 1);
559
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
560 561
}

562
int truncate_blocks(struct inode *inode, u64 from, bool lock)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
563
{
564
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
565 566 567
	unsigned int blocksize = inode->i_sb->s_blocksize;
	struct dnode_of_data dn;
	pgoff_t free_from;
568
	int count = 0, err = 0;
569
	struct page *ipage;
570
	bool truncate_page = false;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
571

572 573
	trace_f2fs_truncate_blocks_enter(inode, from);

574
	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
575

576 577 578
	if (free_from >= sbi->max_file_blocks)
		goto free_partial;

579 580
	if (lock)
		f2fs_lock_op(sbi);
581

582 583 584 585 586 587 588
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	if (f2fs_has_inline_data(inode)) {
589
		truncate_inline_inode(inode, ipage, from);
590
		f2fs_put_page(ipage, 1);
591
		truncate_page = true;
592 593 594 595
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, NULL, 0);
596
	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
597 598 599
	if (err) {
		if (err == -ENOENT)
			goto free_next;
600
		goto out;
601 602
	}

603
	count = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
604 605

	count -= dn.ofs_in_node;
606
	f2fs_bug_on(sbi, count < 0);
607

Jaegeuk Kim's avatar
Jaegeuk Kim committed
608 609 610 611 612 613 614 615
	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
		truncate_data_blocks_range(&dn, count);
		free_from += count;
	}

	f2fs_put_dnode(&dn);
free_next:
	err = truncate_inode_blocks(inode, free_from);
616 617 618
out:
	if (lock)
		f2fs_unlock_op(sbi);
619
free_partial:
620 621
	/* lastly zero out the first data page */
	if (!err)
622
		err = truncate_partial_data_page(inode, from, truncate_page);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
623

624
	trace_f2fs_truncate_blocks_exit(inode, err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
625 626 627
	return err;
}

628
int f2fs_truncate(struct inode *inode)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
629
{
630 631
	int err;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
632 633
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode)))
634
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
635

636 637
	trace_f2fs_truncate(inode);

638 639 640 641 642 643
#ifdef CONFIG_F2FS_FAULT_INJECTION
	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
		f2fs_show_injection_info(FAULT_TRUNCATE);
		return -EIO;
	}
#endif
644
	/* we should check inline_data size */
645
	if (!f2fs_may_inline_data(inode)) {
646 647 648
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
649 650
	}

651
	err = truncate_blocks(inode, i_size_read(inode), true);
652 653 654
	if (err)
		return err;

655
	inode->i_mtime = inode->i_ctime = current_time(inode);
656
	f2fs_mark_inode_dirty_sync(inode, false);
657
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
658 659
}

660
int f2fs_getattr(const struct path *path, struct kstat *stat,
Chao Yu's avatar
Chao Yu committed
661
		 u32 request_mask, unsigned int query_flags)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
662
{
663
	struct inode *inode = d_inode(path->dentry);
Chao Yu's avatar
Chao Yu committed
664 665 666
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int flags;

667
	flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
Chao Yu's avatar
Chao Yu committed
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
	if (flags & FS_APPEND_FL)
		stat->attributes |= STATX_ATTR_APPEND;
	if (flags & FS_COMPR_FL)
		stat->attributes |= STATX_ATTR_COMPRESSED;
	if (f2fs_encrypted_inode(inode))
		stat->attributes |= STATX_ATTR_ENCRYPTED;
	if (flags & FS_IMMUTABLE_FL)
		stat->attributes |= STATX_ATTR_IMMUTABLE;
	if (flags & FS_NODUMP_FL)
		stat->attributes |= STATX_ATTR_NODUMP;

	stat->attributes_mask |= (STATX_ATTR_APPEND |
				  STATX_ATTR_COMPRESSED |
				  STATX_ATTR_ENCRYPTED |
				  STATX_ATTR_IMMUTABLE |
				  STATX_ATTR_NODUMP);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
	generic_fillattr(inode, stat);
	return 0;
}

#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
	unsigned int ia_valid = attr->ia_valid;

	if (ia_valid & ATTR_UID)
		inode->i_uid = attr->ia_uid;
	if (ia_valid & ATTR_GID)
		inode->i_gid = attr->ia_gid;
	if (ia_valid & ATTR_ATIME)
		inode->i_atime = timespec_trunc(attr->ia_atime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MTIME)
		inode->i_mtime = timespec_trunc(attr->ia_mtime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_CTIME)
		inode->i_ctime = timespec_trunc(attr->ia_ctime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MODE) {
		umode_t mode = attr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;
712
		set_acl_inode(inode, mode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
713 714 715 716 717 718 719 720
	}
}
#else
#define __setattr_copy setattr_copy
#endif

int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
721
	struct inode *inode = d_inode(dentry);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
722
	int err;
723
	bool size_changed = false;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
724

725
	err = setattr_prepare(dentry, attr);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
726 727 728
	if (err)
		return err;

729 730 731 732 733 734 735 736 737 738 739 740 741 742
	if (is_quota_modification(inode, attr)) {
		err = dquot_initialize(inode);
		if (err)
			return err;
	}
	if ((attr->ia_valid & ATTR_UID &&
		!uid_eq(attr->ia_uid, inode->i_uid)) ||
		(attr->ia_valid & ATTR_GID &&
		!gid_eq(attr->ia_gid, inode->i_gid))) {
		err = dquot_transfer(inode, attr);
		if (err)
			return err;
	}

743
	if (attr->ia_valid & ATTR_SIZE) {
744 745 746 747 748 749 750
		if (f2fs_encrypted_inode(inode)) {
			err = fscrypt_get_encryption_info(inode);
			if (err)
				return err;
			if (!fscrypt_has_encryption_key(inode))
				return -ENOKEY;
		}
751

752
		if (attr->ia_size <= i_size_read(inode)) {
753
			down_write(&F2FS_I(inode)->i_mmap_sem);
754
			truncate_setsize(inode, attr->ia_size);
755
			err = f2fs_truncate(inode);
756
			up_write(&F2FS_I(inode)->i_mmap_sem);
757 758
			if (err)
				return err;
759 760
		} else {
			/*
761 762
			 * do not trim all blocks after i_size if target size is
			 * larger than i_size.
763
			 */
764
			down_write(&F2FS_I(inode)->i_mmap_sem);
765
			truncate_setsize(inode, attr->ia_size);
766
			up_write(&F2FS_I(inode)->i_mmap_sem);
767 768

			/* should convert inline inode here */
769
			if (!f2fs_may_inline_data(inode)) {
770 771 772 773
				err = f2fs_convert_inline_inode(inode);
				if (err)
					return err;
			}
774
			inode->i_mtime = inode->i_ctime = current_time(inode);
775
		}
776 777

		size_changed = true;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
778 779 780 781 782
	}

	__setattr_copy(inode, attr);

	if (attr->ia_valid & ATTR_MODE) {
783
		err = posix_acl_chmod(inode, get_inode_mode(inode));
784 785 786
		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
			inode->i_mode = F2FS_I(inode)->i_acl_mode;
			clear_inode_flag(inode, FI_ACL_MODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
787 788 789
		}
	}

790 791
	/* file size may changed here */
	f2fs_mark_inode_dirty_sync(inode, size_changed);
792 793 794 795

	/* inode change will produce dirty node pages flushed by checkpoint */
	f2fs_balance_fs(F2FS_I_SB(inode), true);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
796 797 798 799 800 801 802
	return err;
}

const struct inode_operations f2fs_file_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
803
	.set_acl	= f2fs_set_acl,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
804 805 806
#ifdef CONFIG_F2FS_FS_XATTR
	.listxattr	= f2fs_listxattr,
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
807
	.fiemap		= f2fs_fiemap,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
808 809
};

Chao Yu's avatar
Chao Yu committed
810
static int fill_zero(struct inode *inode, pgoff_t index,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
811 812
					loff_t start, loff_t len)
{
813
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
814 815 816
	struct page *page;

	if (!len)
Chao Yu's avatar
Chao Yu committed
817
		return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
818

Jaegeuk Kim's avatar
Jaegeuk Kim committed
819
	f2fs_balance_fs(sbi, true);
820

821
	f2fs_lock_op(sbi);
822
	page = get_new_data_page(inode, NULL, index, false);
823
	f2fs_unlock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
824

Chao Yu's avatar
Chao Yu committed
825 826 827
	if (IS_ERR(page))
		return PTR_ERR(page);

828
	f2fs_wait_on_page_writeback(page, DATA, true);
Chao Yu's avatar
Chao Yu committed
829 830 831 832
	zero_user(page, start, len);
	set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
833 834 835 836 837 838
}

int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
	int err;

839
	while (pg_start < pg_end) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
840
		struct dnode_of_data dn;
841
		pgoff_t end_offset, count;
842

Jaegeuk Kim's avatar
Jaegeuk Kim committed
843
		set_new_dnode(&dn, inode, NULL, NULL, 0);
844
		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
845
		if (err) {
846 847
			if (err == -ENOENT) {
				pg_start++;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
848
				continue;
849
			}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
850 851 852
			return err;
		}

853
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
854 855 856 857 858
		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);

		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);

		truncate_data_blocks_range(&dn, count);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
859
		f2fs_put_dnode(&dn);
860 861

		pg_start += count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
862 863 864 865
	}
	return 0;
}

866
static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
867 868 869
{
	pgoff_t pg_start, pg_end;
	loff_t off_start, off_end;
870
	int ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
871

872 873 874
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
875

876 877
	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
878

879 880
	off_start = offset & (PAGE_SIZE - 1);
	off_end = (offset + len) & (PAGE_SIZE - 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
881 882

	if (pg_start == pg_end) {
Chao Yu's avatar
Chao Yu committed
883
		ret = fill_zero(inode, pg_start, off_start,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
884
						off_end - off_start);
Chao Yu's avatar
Chao Yu committed
885 886
		if (ret)
			return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
887
	} else {
Chao Yu's avatar
Chao Yu committed
888 889
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
890
						PAGE_SIZE - off_start);
Chao Yu's avatar
Chao Yu committed
891 892 893 894 895 896 897 898
			if (ret)
				return ret;
		}
		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				return ret;
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
899 900 901 902

		if (pg_start < pg_end) {
			struct address_space *mapping = inode->i_mapping;
			loff_t blk_start, blk_end;
903
			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
904

Jaegeuk Kim's avatar
Jaegeuk Kim committed
905
			f2fs_balance_fs(sbi, true);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
906

907 908
			blk_start = (loff_t)pg_start << PAGE_SHIFT;
			blk_end = (loff_t)pg_end << PAGE_SHIFT;
909
			down_write(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
910 911
			truncate_inode_pages_range(mapping, blk_start,
					blk_end - 1);
912

913
			f2fs_lock_op(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
914
			ret = truncate_hole(inode, pg_start, pg_end);
915
			f2fs_unlock_op(sbi);
916
			up_write(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
917 918 919 920 921 922
		}
	}

	return ret;
}

923 924
static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, pgoff_t len)
925 926 927
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
928
	int ret, done, i;
929

930
next_dnode:
931
	set_new_dnode(&dn, inode, NULL, NULL, 0);
932
	ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
933 934 935
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
936 937 938 939 940 941 942 943 944 945 946
		if (dn.max_level == 0)
			return -ENOENT;
		done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
		blkaddr += done;
		do_replace += done;
		goto next;
	}

	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
							dn.ofs_in_node, len);
	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
947 948
		*blkaddr = datablock_addr(dn.inode,
					dn.node_page, dn.ofs_in_node);
949 950 951 952 953 954 955
		if (!is_checkpointed_data(sbi, *blkaddr)) {

			if (test_opt(sbi, LFS)) {
				f2fs_put_dnode(&dn);
				return -ENOTSUPP;
			}

956
			/* do not invalidate this block address */
957
			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
958
			*do_replace = 1;
959
		}
960
	}
961 962 963 964 965 966 967 968
	f2fs_put_dnode(&dn);
next:
	len -= done;
	off += done;
	if (len)
		goto next_dnode;
	return 0;
}
969

970 971 972 973 974 975
static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, int len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	int ret, i;
976

977 978 979
	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
		if (*do_replace == 0)
			continue;
980

981 982 983 984 985 986 987
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
		if (ret) {
			dec_valid_block_count(sbi, inode, 1);
			invalidate_blocks(sbi, *blkaddr);
		} else {
			f2fs_update_data_blkaddr(&dn, *blkaddr);
988
		}
989 990 991 992 993 994 995 996 997 998 999 1000
		f2fs_put_dnode(&dn);
	}
	return 0;
}

static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
			block_t *blkaddr, int *do_replace,
			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
	pgoff_t i = 0;
	int ret;
1001

1002 1003 1004 1005
	while (i < len) {
		if (blkaddr[i] == NULL_ADDR && !full) {
			i++;
			continue;
1006
		}
1007

1008 1009 1010 1011 1012
		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
			struct dnode_of_data dn;
			struct node_info ni;
			size_t new_size;
			pgoff_t ilen;
1013

1014 1015 1016 1017
			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
			if (ret)
				return ret;
1018

1019 1020 1021 1022 1023
			get_node_info(sbi, dn.nid, &ni);
			ilen = min((pgoff_t)
				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
						dn.ofs_in_node, len - i);
			do {
1024 1025
				dn.data_blkaddr = datablock_addr(dn.inode,
						dn.node_page, dn.ofs_in_node);
1026 1027 1028 1029
				truncate_data_blocks_range(&dn, 1);

				if (do_replace[i]) {
					f2fs_i_blocks_write(src_inode,
1030
							1, false, false);
1031
					f2fs_i_blocks_write(dst_inode,
1032
							1, true, false);
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
					blkaddr[i], ni.version, true, false);

					do_replace[i] = 0;
				}
				dn.ofs_in_node++;
				i++;
				new_size = (dst + i) << PAGE_SHIFT;
				if (dst_inode->i_size < new_size)
					f2fs_i_size_write(dst_inode, new_size);
1043
			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1044

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
			f2fs_put_dnode(&dn);
		} else {
			struct page *psrc, *pdst;

			psrc = get_lock_data_page(src_inode, src + i, true);
			if (IS_ERR(psrc))
				return PTR_ERR(psrc);
			pdst = get_new_data_page(dst_inode, NULL, dst + i,
								true);
			if (IS_ERR(pdst)) {
				f2fs_put_page(psrc, 1);
				return PTR_ERR(pdst);
			}
			f2fs_copy_page(psrc, pdst);
			set_page_dirty(pdst);
			f2fs_put_page(pdst, 1);
1061
			f2fs_put_page(psrc, 1);
1062

1063 1064 1065 1066 1067
			ret = truncate_hole(src_inode, src + i, src + i + 1);
			if (ret)
				return ret;
			i++;
		}
1068 1069
	}
	return 0;
1070
}
1071

1072 1073
static int __exchange_data_block(struct inode *src_inode,
			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1074
			pgoff_t len, bool full)
1075 1076 1077
{
	block_t *src_blkaddr;
	int *do_replace;
1078
	pgoff_t olen;
1079 1080
	int ret;

1081 1082
	while (len) {
		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
1083

1084
		src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
1085 1086
		if (!src_blkaddr)
			return -ENOMEM;
1087

1088
		do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL);
1089 1090 1091 1092
		if (!do_replace) {
			kvfree(src_blkaddr);
			return -ENOMEM;
		}
1093

1094 1095 1096 1097
		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
					do_replace, src, olen);
		if (ret)
			goto roll_back;
1098

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
					do_replace, src, dst, olen, full);
		if (ret)
			goto roll_back;

		src += olen;
		dst += olen;
		len -= olen;

		kvfree(src_blkaddr);
		kvfree(do_replace);
	}
1111 1112 1113 1114 1115 1116
	return 0;

roll_back:
	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
	kvfree(src_blkaddr);
	kvfree(do_replace);
1117 1118
	return ret;
}
1119

1120 1121 1122 1123
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1124
	int ret;
1125

1126 1127
	f2fs_balance_fs(sbi, true);
	f2fs_lock_op(sbi);
1128 1129 1130

	f2fs_drop_extent_tree(inode);

1131 1132
	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
	f2fs_unlock_op(sbi);
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	return ret;
}

static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
	pgoff_t pg_start, pg_end;
	loff_t new_size;
	int ret;

	if (offset + len >= i_size_read(inode))
		return -EINVAL;

	/* collapse range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

1149 1150 1151
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
1152

1153 1154
	pg_start = offset >> PAGE_SHIFT;
	pg_end = (offset + len) >> PAGE_SHIFT;
1155

1156
	down_write(&F2FS_I(inode)->i_mmap_sem);
1157 1158 1159
	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
1160
		goto out;
1161 1162 1163 1164 1165

	truncate_pagecache(inode, offset);

	ret = f2fs_do_collapse(inode, pg_start, pg_end);
	if (ret)
1166
		goto out;
1167

1168 1169 1170 1171
	/* write out all moved pages, if possible */
	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	truncate_pagecache(inode, offset);

1172
	new_size = i_size_read(inode) - len;
1173
	truncate_pagecache(inode, new_size);
1174 1175 1176

	ret = truncate_blocks(inode, new_size, true);
	if (!ret)
1177
		f2fs_i_size_write(inode, new_size);
1178

1179 1180
out:
	up_write(&F2FS_I(inode)->i_mmap_sem);
1181 1182 1183
	return ret;
}

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
								pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
	pgoff_t index = start;
	unsigned int ofs_in_node = dn->ofs_in_node;
	blkcnt_t count = 0;
	int ret;

	for (; index < end; index++, dn->ofs_in_node++) {
1194 1195
		if (datablock_addr(dn->inode, dn->node_page,
					dn->ofs_in_node) == NULL_ADDR)
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
			count++;
	}

	dn->ofs_in_node = ofs_in_node;
	ret = reserve_new_blocks(dn, count);
	if (ret)
		return ret;

	dn->ofs_in_node = ofs_in_node;
	for (index = start; index < end; index++, dn->ofs_in_node++) {
1206 1207
		dn->data_blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
		/*
		 * reserve_new_blocks will not guarantee entire block
		 * allocation.
		 */
		if (dn->data_blkaddr == NULL_ADDR) {
			ret = -ENOSPC;
			break;
		}
		if (dn->data_blkaddr != NEW_ADDR) {
			invalidate_blocks(sbi, dn->data_blkaddr);
			dn->data_blkaddr = NEW_ADDR;
			set_data_blkaddr(dn);
		}
	}

	f2fs_update_extent_cache_range(dn, start, 0, index - start);

	return ret;
}

Chao Yu's avatar
Chao Yu committed
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
								int mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	pgoff_t index, pg_start, pg_end;
	loff_t new_size = i_size_read(inode);
	loff_t off_start, off_end;
	int ret = 0;

	ret = inode_newsize_ok(inode, (len + offset));
	if (ret)
		return ret;

1242 1243 1244
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
Chao Yu's avatar
Chao Yu committed
1245

1246
	down_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yu's avatar
Chao Yu committed
1247 1248
	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
	if (ret)
1249
		goto out_sem;
Chao Yu's avatar
Chao Yu committed
1250 1251 1252

	truncate_pagecache_range(inode, offset, offset + len - 1);

1253 1254
	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
Chao Yu's avatar
Chao Yu committed
1255

1256 1257
	off_start = offset & (PAGE_SIZE - 1);
	off_end = (offset + len) & (PAGE_SIZE - 1);
Chao Yu's avatar
Chao Yu committed
1258 1259

	if (pg_start == pg_end) {
Chao Yu's avatar
Chao Yu committed
1260 1261 1262
		ret = fill_zero(inode, pg_start, off_start,
						off_end - off_start);
		if (ret)
1263
			goto out_sem;
Chao Yu's avatar
Chao Yu committed
1264

Chao Yu's avatar
Chao Yu committed
1265 1266 1267
		new_size = max_t(loff_t, new_size, offset + len);
	} else {
		if (off_start) {
Chao Yu's avatar
Chao Yu committed
1268
			ret = fill_zero(inode, pg_start++, off_start,
1269
						PAGE_SIZE - off_start);
Chao Yu's avatar
Chao Yu committed
1270
			if (ret)
1271
				goto out_sem;
Chao Yu's avatar
Chao Yu committed
1272

Chao Yu's avatar
Chao Yu committed
1273
			new_size = max_t(loff_t, new_size,
1274
					(loff_t)pg_start << PAGE_SHIFT);
Chao Yu's avatar
Chao Yu committed
1275 1276
		}

1277
		for (index = pg_start; index < pg_end;) {
Chao Yu's avatar
Chao Yu committed
1278
			struct dnode_of_data dn;
1279 1280
			unsigned int end_offset;
			pgoff_t end;
Chao Yu's avatar
Chao Yu committed
1281 1282 1283

			f2fs_lock_op(sbi);

1284 1285
			set_new_dnode(&dn, inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
Chao Yu's avatar
Chao Yu committed
1286 1287 1288 1289 1290
			if (ret) {
				f2fs_unlock_op(sbi);
				goto out;
			}

1291 1292 1293 1294
			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
			end = min(pg_end, end_offset - dn.ofs_in_node + index);

			ret = f2fs_do_zero_range(&dn, index, end);
Chao Yu's avatar
Chao Yu committed
1295 1296
			f2fs_put_dnode(&dn);
			f2fs_unlock_op(sbi);
1297 1298 1299

			f2fs_balance_fs(sbi, dn.node_changed);

1300 1301
			if (ret)
				goto out;
Chao Yu's avatar
Chao Yu committed
1302

1303
			index = end;
Chao Yu's avatar
Chao Yu committed
1304
			new_size = max_t(loff_t, new_size,
1305
					(loff_t)index << PAGE_SHIFT);
Chao Yu's avatar
Chao Yu committed
1306 1307 1308
		}

		if (off_end) {
Chao Yu's avatar
Chao Yu committed
1309 1310 1311 1312
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				goto out;

Chao Yu's avatar
Chao Yu committed
1313 1314 1315 1316 1317
			new_size = max_t(loff_t, new_size, offset + len);
		}
	}

out:
1318
	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
1319
		f2fs_i_size_write(inode, new_size);
1320 1321
out_sem:
	up_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yu's avatar
Chao Yu committed
1322 1323 1324 1325

	return ret;
}

1326 1327 1328
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1329
	pgoff_t nr, pg_start, pg_end, delta, idx;
1330
	loff_t new_size;
1331
	int ret = 0;
1332 1333

	new_size = i_size_read(inode) + len;
1334 1335 1336
	ret = inode_newsize_ok(inode, new_size);
	if (ret)
		return ret;
1337 1338 1339 1340 1341 1342 1343 1344

	if (offset >= i_size_read(inode))
		return -EINVAL;

	/* insert range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

1345 1346 1347
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
1348

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1349
	f2fs_balance_fs(sbi, true);
1350

1351
	down_write(&F2FS_I(inode)->i_mmap_sem);
1352 1353
	ret = truncate_blocks(inode, i_size_read(inode), true);
	if (ret)
1354
		goto out;
1355 1356 1357 1358

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
1359
		goto out;
1360 1361 1362

	truncate_pagecache(inode, offset);

1363 1364
	pg_start = offset >> PAGE_SHIFT;
	pg_end = (offset + len) >> PAGE_SHIFT;
1365
	delta = pg_end - pg_start;
1366 1367 1368 1369 1370 1371 1372
	idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;

	while (!ret && idx > pg_start) {
		nr = idx - pg_start;
		if (nr > delta)
			nr = delta;
		idx -= nr;
1373 1374

		f2fs_lock_op(sbi);
1375 1376
		f2fs_drop_extent_tree(inode);

1377 1378
		ret = __exchange_data_block(inode, inode, idx,
					idx + delta, nr, false);
1379 1380 1381
		f2fs_unlock_op(sbi);
	}

1382 1383 1384 1385 1386
	/* write out all moved pages, if possible */
	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	truncate_pagecache(inode, offset);

	if (!ret)
1387
		f2fs_i_size_write(inode, new_size);
1388 1389
out:
	up_write(&F2FS_I(inode)->i_mmap_sem);
1390 1391 1392
	return ret;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1393 1394 1395
static int expand_inode_data(struct inode *inode, loff_t offset,
					loff_t len, int mode)
{
1396
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1397 1398
	struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
	pgoff_t pg_end;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1399
	loff_t new_size = i_size_read(inode);
1400
	loff_t off_end;
1401
	int err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1402

1403 1404 1405
	err = inode_newsize_ok(inode, (len + offset));
	if (err)
		return err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1406

1407 1408 1409
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
1410

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1411
	f2fs_balance_fs(sbi, true);
1412

1413
	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1414
	off_end = (offset + len) & (PAGE_SIZE - 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1415

1416 1417 1418 1419
	map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
	map.m_len = pg_end - map.m_lblk;
	if (off_end)
		map.m_len++;
1420

1421 1422
	err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
	if (err) {
1423
		pgoff_t last_off;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1424

1425
		if (!map.m_len)
1426
			return err;
1427

1428 1429 1430 1431 1432 1433 1434
		last_off = map.m_lblk + map.m_len - 1;

		/* update new size to the failed position */
		new_size = (last_off == pg_end) ? offset + len:
					(loff_t)(last_off + 1) << PAGE_SHIFT;
	} else {
		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1435 1436
	}

1437
	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
1438
		f2fs_i_size_write(inode, new_size);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1439

1440
	return err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1441 1442 1443 1444 1445
}

static long f2fs_fallocate(struct file *file, int mode,
				loff_t offset, loff_t len)
{
1446
	struct inode *inode = file_inode(file);
1447
	long ret = 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1448

1449 1450 1451 1452
	/* f2fs only support ->fallocate for regular file */
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

1453 1454
	if (f2fs_encrypted_inode(inode) &&
		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1455 1456
		return -EOPNOTSUPP;

1457
	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1458 1459
			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
			FALLOC_FL_INSERT_RANGE))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1460 1461
		return -EOPNOTSUPP;

Al Viro's avatar
Al Viro committed
1462
	inode_lock(inode);
1463

1464 1465 1466 1467
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		if (offset >= inode->i_size)
			goto out;

1468
		ret = punch_hole(inode, offset, len);
1469 1470
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
		ret = f2fs_collapse_range(inode, offset, len);
Chao Yu's avatar
Chao Yu committed
1471 1472
	} else if (mode & FALLOC_FL_ZERO_RANGE) {
		ret = f2fs_zero_range(inode, offset, len, mode);
1473 1474
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
		ret = f2fs_insert_range(inode, offset, len);
1475
	} else {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1476
		ret = expand_inode_data(inode, offset, len, mode);
1477
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1478

1479
	if (!ret) {
1480
		inode->i_mtime = inode->i_ctime = current_time(inode);
1481
		f2fs_mark_inode_dirty_sync(inode, false);
1482 1483
		if (mode & FALLOC_FL_KEEP_SIZE)
			file_set_keep_isize(inode);
1484
		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1485
	}
1486

1487
out:
Al Viro's avatar
Al Viro committed
1488
	inode_unlock(inode);
1489

1490
	trace_f2fs_fallocate(inode, mode, offset, len, ret);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1491 1492 1493
	return ret;
}

1494 1495
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
1496 1497 1498 1499 1500 1501 1502 1503
	/*
	 * f2fs_relase_file is called at every close calls. So we should
	 * not drop any inmemory pages by close called by other process.
	 */
	if (!(filp->f_mode & FMODE_WRITE) ||
			atomic_read(&inode->i_writecount) != 1)
		return 0;

1504 1505
	/* some remained atomic pages should discarded */
	if (f2fs_is_atomic_file(inode))
1506
		drop_inmem_pages(inode);
1507
	if (f2fs_is_volatile_file(inode)) {
1508
		clear_inode_flag(inode, FI_VOLATILE_FILE);
1509
		stat_dec_volatile_write(inode);
1510
		set_inode_flag(inode, FI_DROP_CACHE);
1511
		filemap_fdatawrite(inode->i_mapping);
1512
		clear_inode_flag(inode, FI_DROP_CACHE);
1513 1514 1515 1516
	}
	return 0;
}

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
static int f2fs_file_flush(struct file *file, fl_owner_t id)
{
	struct inode *inode = file_inode(file);

	/*
	 * If the process doing a transaction is crashed, we should do
	 * roll-back. Otherwise, other reader/write can see corrupted database
	 * until all the writers close its file. Since this should be done
	 * before dropping file lock, it needs to do in ->flush.
	 */
	if (f2fs_is_atomic_file(inode) &&
			F2FS_I(inode)->inmem_task == current)
		drop_inmem_pages(inode);
	return 0;
}

1533
static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1534
{
1535
	struct inode *inode = file_inode(filp);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1536
	struct f2fs_inode_info *fi = F2FS_I(inode);
1537 1538
	unsigned int flags = fi->i_flags &
			(FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
1539 1540
	return put_user(flags, (int __user *)arg);
}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1541

1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int oldflags;

	/* Is it quota file? Do not allow user to mess with it */
	if (IS_NOQUOTA(inode))
		return -EPERM;

	flags = f2fs_mask_flags(inode->i_mode, flags);

	oldflags = fi->i_flags;

	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL))
		if (!capable(CAP_LINUX_IMMUTABLE))
			return -EPERM;

	flags = flags & (FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL);
	flags |= oldflags & ~(FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL);
	fi->i_flags = flags;

	if (fi->i_flags & FS_PROJINHERIT_FL)
		set_inode_flag(inode, FI_PROJ_INHERIT);
	else
		clear_inode_flag(inode, FI_PROJ_INHERIT);

	inode->i_ctime = current_time(inode);
	f2fs_set_inode_flags(inode);
	f2fs_mark_inode_dirty_sync(inode, false);
	return 0;
}

1574 1575 1576
static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
1577
	unsigned int flags;
1578
	int ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1579

1580 1581 1582 1583 1584 1585
	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (get_user(flags, (int __user *)arg))
		return -EFAULT;

1586 1587 1588
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1589

Al Viro's avatar
Al Viro committed
1590
	inode_lock(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1591

1592
	ret = __f2fs_ioc_setflags(inode, flags);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1593

1594
	inode_unlock(inode);
1595 1596 1597
	mnt_drop_write_file(filp);
	return ret;
}
1598

1599 1600 1601 1602 1603 1604 1605
static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);

	return put_user(inode->i_generation, (int __user *)arg);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1606 1607 1608
static int f2fs_ioc_start_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1609
	int ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1610 1611 1612 1613

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1614 1615 1616
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

1617 1618 1619 1620
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1621 1622
	inode_lock(inode);

1623
	if (f2fs_is_atomic_file(inode))
1624
		goto out;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1625

1626 1627
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
1628
		goto out;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1629

1630
	set_inode_flag(inode, FI_ATOMIC_FILE);
1631
	set_inode_flag(inode, FI_HOT_DATA);
1632 1633
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);

1634
	if (!get_dirty_pages(inode))
1635
		goto inc_stat;
1636 1637

	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
1638
		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
1639 1640
					inode->i_ino, get_dirty_pages(inode));
	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1641
	if (ret) {
1642
		clear_inode_flag(inode, FI_ATOMIC_FILE);
1643 1644 1645 1646
		goto out;
	}

inc_stat:
1647
	F2FS_I(inode)->inmem_task = current;
1648 1649
	stat_inc_atomic_write(inode);
	stat_update_max_atomic_write(inode);
1650
out:
1651
	inode_unlock(inode);
1652
	mnt_drop_write_file(filp);
1653
	return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
}

static int f2fs_ioc_commit_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1668 1669
	inode_lock(inode);

1670 1671 1672
	if (f2fs_is_volatile_file(inode))
		goto err_out;

1673
	if (f2fs_is_atomic_file(inode)) {
1674
		ret = commit_inmem_pages(inode);
Chao Yu's avatar
Chao Yu committed
1675
		if (ret)
1676
			goto err_out;
Chao Yu's avatar
Chao Yu committed
1677

1678
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
Chao Yu's avatar
Chao Yu committed
1679 1680 1681
		if (!ret) {
			clear_inode_flag(inode, FI_ATOMIC_FILE);
			stat_dec_atomic_write(inode);
1682
		}
1683 1684
	} else {
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1685
	}
1686
err_out:
1687
	inode_unlock(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1688 1689 1690 1691
	mnt_drop_write_file(filp);
	return ret;
}

1692 1693 1694
static int f2fs_ioc_start_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1695
	int ret;
1696 1697 1698 1699

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1700 1701 1702
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

1703 1704 1705 1706
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1707 1708
	inode_lock(inode);

1709
	if (f2fs_is_volatile_file(inode))
1710
		goto out;
1711

1712 1713
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
1714
		goto out;
1715

1716 1717 1718
	stat_inc_volatile_write(inode);
	stat_update_max_volatile_write(inode);

1719
	set_inode_flag(inode, FI_VOLATILE_FILE);
1720
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1721
out:
1722
	inode_unlock(inode);
1723 1724
	mnt_drop_write_file(filp);
	return ret;
1725 1726
}

1727 1728 1729
static int f2fs_ioc_release_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1730
	int ret;
1731 1732 1733 1734

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1735 1736 1737 1738
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1739 1740
	inode_lock(inode);

1741
	if (!f2fs_is_volatile_file(inode))
1742
		goto out;
1743

1744 1745 1746 1747
	if (!f2fs_is_first_block_written(inode)) {
		ret = truncate_partial_data_page(inode, 0, true);
		goto out;
	}
1748

1749 1750
	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
out:
1751
	inode_unlock(inode);
1752 1753
	mnt_drop_write_file(filp);
	return ret;
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
}

static int f2fs_ioc_abort_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1768 1769
	inode_lock(inode);

1770
	if (f2fs_is_atomic_file(inode))
1771
		drop_inmem_pages(inode);
1772
	if (f2fs_is_volatile_file(inode)) {
1773
		clear_inode_flag(inode, FI_VOLATILE_FILE);
1774
		stat_dec_volatile_write(inode);
1775
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1776
	}
1777

1778 1779
	inode_unlock(inode);

1780
	mnt_drop_write_file(filp);
1781
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1782 1783 1784
	return ret;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1785 1786 1787 1788 1789 1790
static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct super_block *sb = sbi->sb;
	__u32 in;
1791
	int ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1792 1793 1794 1795 1796 1797 1798

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (get_user(in, (__u32 __user *)arg))
		return -EFAULT;

1799 1800 1801 1802
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1803 1804 1805 1806
	switch (in) {
	case F2FS_GOING_DOWN_FULLSYNC:
		sb = freeze_bdev(sb->s_bdev);
		if (sb && !IS_ERR(sb)) {
1807
			f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1808 1809 1810 1811 1812 1813
			thaw_bdev(sb->s_bdev, sb);
		}
		break;
	case F2FS_GOING_DOWN_METASYNC:
		/* do checkpoint only */
		f2fs_sync_fs(sb, 1);
1814
		f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1815 1816
		break;
	case F2FS_GOING_DOWN_NOSYNC:
1817
		f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1818
		break;
1819
	case F2FS_GOING_DOWN_METAFLUSH:
Chao Yu's avatar
Chao Yu committed
1820
		sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
1821
		f2fs_stop_checkpoint(sbi, false);
1822
		break;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1823
	default:
1824 1825
		ret = -EINVAL;
		goto out;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1826
	}
1827
	f2fs_update_time(sbi, REQ_TIME);
1828 1829 1830
out:
	mnt_drop_write_file(filp);
	return ret;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1831 1832
}

1833 1834 1835 1836 1837 1838 1839
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct super_block *sb = inode->i_sb;
	struct request_queue *q = bdev_get_queue(sb->s_bdev);
	struct fstrim_range range;
	int ret;
1840

1841 1842
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
1843

1844 1845
	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;
1846

1847 1848 1849
	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
				sizeof(range)))
		return -EFAULT;
1850

1851 1852 1853 1854
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1855 1856 1857
	range.minlen = max((unsigned int)range.minlen,
				q->limits.discard_granularity);
	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
1858
	mnt_drop_write_file(filp);
1859 1860
	if (ret < 0)
		return ret;
1861

1862 1863 1864
	if (copy_to_user((struct fstrim_range __user *)arg, &range,
				sizeof(range)))
		return -EFAULT;
1865
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1866 1867 1868
	return 0;
}

1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
static bool uuid_is_nonzero(__u8 u[16])
{
	int i;

	for (i = 0; i < 16; i++)
		if (u[i])
			return true;
	return false;
}

static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);

1883
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1884

1885
	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
1886 1887 1888 1889
}

static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
1890
	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
}

static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	int err;

	if (!f2fs_sb_has_crypto(inode->i_sb))
		return -EOPNOTSUPP;

	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
		goto got_it;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	/* update superblock with uuid */
	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);

1912
	err = f2fs_commit_super(sbi, false);
1913 1914 1915
	if (err) {
		/* undo new data */
		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
1916
		mnt_drop_write_file(filp);
1917 1918
		return err;
	}
1919
	mnt_drop_write_file(filp);
1920 1921 1922 1923 1924 1925 1926
got_it:
	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
									16))
		return -EFAULT;
	return 0;
}

1927 1928 1929 1930
static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1931
	__u32 sync;
1932
	int ret;
1933 1934 1935 1936

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

1937
	if (get_user(sync, (__u32 __user *)arg))
1938 1939
		return -EFAULT;

1940 1941
	if (f2fs_readonly(sbi->sb))
		return -EROFS;
1942

1943 1944 1945 1946
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1947
	if (!sync) {
1948 1949 1950 1951
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
1952 1953
	} else {
		mutex_lock(&sbi->gc_mutex);
1954 1955
	}

1956
	ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
1957 1958 1959
out:
	mnt_drop_write_file(filp);
	return ret;
1960 1961
}

1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_gc_range range;
	u64 end;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
							sizeof(range)))
		return -EFAULT;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	end = range.start + range.len;
	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi))
		return -EINVAL;
do_more:
	if (!range.sync) {
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
	} else {
		mutex_lock(&sbi->gc_mutex);
	}

	ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
	range.start += sbi->blocks_per_seg;
	if (range.start <= end)
		goto do_more;
out:
	mnt_drop_write_file(filp);
	return ret;
}

2006 2007 2008 2009
static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2010
	int ret;
2011 2012 2013 2014 2015 2016 2017

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

2018 2019 2020 2021 2022 2023 2024 2025
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	ret = f2fs_sync_fs(sbi->sb, 1);

	mnt_drop_write_file(filp);
	return ret;
2026 2027
}

Chao Yu's avatar
Chao Yu committed
2028 2029 2030 2031 2032
static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
					struct file *filp,
					struct f2fs_defragment *range)
{
	struct inode *inode = file_inode(filp);
2033
	struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
2034
	struct extent_info ei = {0,0,0};
Chao Yu's avatar
Chao Yu committed
2035
	pgoff_t pg_start, pg_end;
2036
	unsigned int blk_per_seg = sbi->blocks_per_seg;
Chao Yu's avatar
Chao Yu committed
2037 2038 2039 2040 2041 2042
	unsigned int total = 0, sec_num;
	block_t blk_end = 0;
	bool fragmented = false;
	int err;

	/* if in-place-update policy is enabled, don't waste time here */
2043
	if (need_inplace_update_policy(inode, NULL))
Chao Yu's avatar
Chao Yu committed
2044 2045
		return -EINVAL;

2046 2047
	pg_start = range->start >> PAGE_SHIFT;
	pg_end = (range->start + range->len) >> PAGE_SHIFT;
Chao Yu's avatar
Chao Yu committed
2048

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2049
	f2fs_balance_fs(sbi, true);
Chao Yu's avatar
Chao Yu committed
2050

Al Viro's avatar
Al Viro committed
2051
	inode_lock(inode);
Chao Yu's avatar
Chao Yu committed
2052 2053 2054

	/* writeback all dirty pages in the range */
	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2055
						range->start + range->len - 1);
Chao Yu's avatar
Chao Yu committed
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
	if (err)
		goto out;

	/*
	 * lookup mapping info in extent cache, skip defragmenting if physical
	 * block addresses are continuous.
	 */
	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
		if (ei.fofs + ei.len >= pg_end)
			goto out;
	}

	map.m_lblk = pg_start;

	/*
	 * lookup mapping info in dnode page cache, skip defragmenting if all
	 * physical block addresses are continuous even if there are hole(s)
	 * in logical blocks.
	 */
	while (map.m_lblk < pg_end) {
2076
		map.m_len = pg_end - map.m_lblk;
Chao Yu's avatar
Chao Yu committed
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
		if (err)
			goto out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

		if (blk_end && blk_end != map.m_pblk) {
			fragmented = true;
			break;
		}
		blk_end = map.m_pblk + map.m_len;

		map.m_lblk += map.m_len;
	}

	if (!fragmented)
		goto out;

	map.m_lblk = pg_start;
	map.m_len = pg_end - pg_start;

2101
	sec_num = (map.m_len + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
Chao Yu's avatar
Chao Yu committed
2102 2103 2104 2105 2106 2107

	/*
	 * make sure there are enough free section for LFS allocation, this can
	 * avoid defragment running in SSR mode when free section are allocated
	 * intensively
	 */
2108
	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
Chao Yu's avatar
Chao Yu committed
2109 2110 2111 2112 2113 2114 2115 2116 2117
		err = -EAGAIN;
		goto out;
	}

	while (map.m_lblk < pg_end) {
		pgoff_t idx;
		int cnt = 0;

do_map:
2118
		map.m_len = pg_end - map.m_lblk;
Chao Yu's avatar
Chao Yu committed
2119 2120 2121 2122 2123 2124 2125 2126 2127
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
		if (err)
			goto clear_out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

2128
		set_inode_flag(inode, FI_DO_DEFRAG);
Chao Yu's avatar
Chao Yu committed
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152

		idx = map.m_lblk;
		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
			struct page *page;

			page = get_lock_data_page(inode, idx, true);
			if (IS_ERR(page)) {
				err = PTR_ERR(page);
				goto clear_out;
			}

			set_page_dirty(page);
			f2fs_put_page(page, 1);

			idx++;
			cnt++;
			total++;
		}

		map.m_lblk = idx;

		if (idx < pg_end && cnt < blk_per_seg)
			goto do_map;

2153
		clear_inode_flag(inode, FI_DO_DEFRAG);
Chao Yu's avatar
Chao Yu committed
2154 2155 2156 2157 2158 2159

		err = filemap_fdatawrite(inode->i_mapping);
		if (err)
			goto out;
	}
clear_out:
2160
	clear_inode_flag(inode, FI_DO_DEFRAG);
Chao Yu's avatar
Chao Yu committed
2161
out:
Al Viro's avatar
Al Viro committed
2162
	inode_unlock(inode);
Chao Yu's avatar
Chao Yu committed
2163
	if (!err)
2164
		range->len = (u64)total << PAGE_SHIFT;
Chao Yu's avatar
Chao Yu committed
2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177
	return err;
}

static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_defragment range;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2178
	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
Chao Yu's avatar
Chao Yu committed
2179 2180
		return -EINVAL;

2181 2182
	if (f2fs_readonly(sbi->sb))
		return -EROFS;
Chao Yu's avatar
Chao Yu committed
2183 2184

	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2185 2186
							sizeof(range)))
		return -EFAULT;
Chao Yu's avatar
Chao Yu committed
2187 2188

	/* verify alignment of offset & size */
2189 2190
	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
		return -EINVAL;
Chao Yu's avatar
Chao Yu committed
2191

2192
	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2193 2194 2195 2196 2197 2198
					sbi->max_file_blocks))
		return -EINVAL;

	err = mnt_want_write_file(filp);
	if (err)
		return err;
2199

Chao Yu's avatar
Chao Yu committed
2200
	err = f2fs_defragment_range(sbi, filp, &range);
2201 2202
	mnt_drop_write_file(filp);

2203
	f2fs_update_time(sbi, REQ_TIME);
Chao Yu's avatar
Chao Yu committed
2204
	if (err < 0)
2205
		return err;
Chao Yu's avatar
Chao Yu committed
2206 2207 2208

	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
							sizeof(range)))
2209 2210 2211
		return -EFAULT;

	return 0;
Chao Yu's avatar
Chao Yu committed
2212 2213
}

2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
			struct file *file_out, loff_t pos_out, size_t len)
{
	struct inode *src = file_inode(file_in);
	struct inode *dst = file_inode(file_out);
	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
	size_t olen = len, dst_max_i_size = 0;
	size_t dst_osize;
	int ret;

	if (file_in->f_path.mnt != file_out->f_path.mnt ||
				src->i_sb != dst->i_sb)
		return -EXDEV;

	if (unlikely(f2fs_readonly(src->i_sb)))
		return -EROFS;

2231 2232
	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
		return -EINVAL;
2233 2234 2235 2236

	if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
		return -EOPNOTSUPP;

2237 2238 2239 2240 2241 2242 2243
	if (src == dst) {
		if (pos_in == pos_out)
			return 0;
		if (pos_out > pos_in && pos_out < pos_in + len)
			return -EINVAL;
	}

2244
	inode_lock(src);
2245 2246 2247 2248 2249 2250
	if (src != dst) {
		if (!inode_trylock(dst)) {
			ret = -EBUSY;
			goto out;
		}
	}
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294

	ret = -EINVAL;
	if (pos_in + len > src->i_size || pos_in + len < pos_in)
		goto out_unlock;
	if (len == 0)
		olen = len = src->i_size - pos_in;
	if (pos_in + len == src->i_size)
		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
	if (len == 0) {
		ret = 0;
		goto out_unlock;
	}

	dst_osize = dst->i_size;
	if (pos_out + olen > dst->i_size)
		dst_max_i_size = pos_out + olen;

	/* verify the end result is block aligned */
	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
		goto out_unlock;

	ret = f2fs_convert_inline_inode(src);
	if (ret)
		goto out_unlock;

	ret = f2fs_convert_inline_inode(dst);
	if (ret)
		goto out_unlock;

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(src->i_mapping,
					pos_in, pos_in + len);
	if (ret)
		goto out_unlock;

	ret = filemap_write_and_wait_range(dst->i_mapping,
					pos_out, pos_out + len);
	if (ret)
		goto out_unlock;

	f2fs_balance_fs(sbi, true);
	f2fs_lock_op(sbi);
2295 2296 2297
	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
				pos_out >> F2FS_BLKSIZE_BITS,
				len >> F2FS_BLKSIZE_BITS, false);
2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308

	if (!ret) {
		if (dst_max_i_size)
			f2fs_i_size_write(dst, dst_max_i_size);
		else if (dst_osize != dst->i_size)
			f2fs_i_size_write(dst, dst_osize);
	}
	f2fs_unlock_op(sbi);
out_unlock:
	if (src != dst)
		inode_unlock(dst);
2309
out:
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
	inode_unlock(src);
	return ret;
}

static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
{
	struct f2fs_move_range range;
	struct fd dst;
	int err;

	if (!(filp->f_mode & FMODE_READ) ||
			!(filp->f_mode & FMODE_WRITE))
		return -EBADF;

	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
							sizeof(range)))
		return -EFAULT;

	dst = fdget(range.dst_fd);
	if (!dst.file)
		return -EBADF;

	if (!(dst.file->f_mode & FMODE_WRITE)) {
		err = -EBADF;
		goto err_out;
	}

	err = mnt_want_write_file(filp);
	if (err)
		goto err_out;

	err = f2fs_move_file_range(filp, range.pos_in, dst.file,
					range.pos_out, range.len);

	mnt_drop_write_file(filp);
2345 2346
	if (err)
		goto err_out;
2347 2348 2349 2350 2351 2352 2353 2354 2355

	if (copy_to_user((struct f2fs_move_range __user *)arg,
						&range, sizeof(range)))
		err = -EFAULT;
err_out:
	fdput(dst);
	return err;
}

2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct sit_info *sm = SIT_I(sbi);
	unsigned int start_segno = 0, end_segno = 0;
	unsigned int dev_start_segno = 0, dev_end_segno = 0;
	struct f2fs_flush_device range;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
							sizeof(range)))
		return -EFAULT;

	if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
			sbi->segs_per_sec != 1) {
		f2fs_msg(sbi->sb, KERN_WARNING,
			"Can't flush %u in %d for segs_per_sec %u != 1\n",
				range.dev_num, sbi->s_ndevs,
				sbi->segs_per_sec);
		return -EINVAL;
	}

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	if (range.dev_num != 0)
		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);

	start_segno = sm->last_victim[FLUSH_DEVICE];
	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
		start_segno = dev_start_segno;
	end_segno = min(start_segno + range.segments, dev_end_segno);

	while (start_segno < end_segno) {
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
		sm->last_victim[GC_CB] = end_segno + 1;
		sm->last_victim[GC_GREEDY] = end_segno + 1;
		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
		ret = f2fs_gc(sbi, true, true, start_segno);
		if (ret == -EAGAIN)
			ret = 0;
		else if (ret < 0)
			break;
		start_segno++;
	}
out:
	mnt_drop_write_file(filp);
	return ret;
}

2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);

	/* Must validate to set it with SQLite behavior in Android. */
	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;

	return put_user(sb_feature, (u32 __user *)arg);
}
2428

2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
#ifdef CONFIG_QUOTA
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct super_block *sb = sbi->sb;
	struct dquot *transfer_to[MAXQUOTAS] = {};
	struct page *ipage;
	kprojid_t kprojid;
	int err;

	if (!f2fs_sb_has_project_quota(sb)) {
		if (projid != F2FS_DEF_PROJID)
			return -EOPNOTSUPP;
		else
			return 0;
	}

	if (!f2fs_has_extra_attr(inode))
		return -EOPNOTSUPP;

	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);

	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
		return 0;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	err = -EPERM;
	inode_lock(inode);

	/* Is it quota file? Do not allow user to mess with it */
	if (IS_NOQUOTA(inode))
		goto out_unlock;

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out_unlock;
	}

	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
								i_projid)) {
		err = -EOVERFLOW;
		f2fs_put_page(ipage, 1);
		goto out_unlock;
	}
	f2fs_put_page(ipage, 1);

	dquot_initialize(inode);

	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
	if (!IS_ERR(transfer_to[PRJQUOTA])) {
		err = __dquot_transfer(inode, transfer_to);
		dqput(transfer_to[PRJQUOTA]);
		if (err)
			goto out_dirty;
	}

	F2FS_I(inode)->i_projid = kprojid;
	inode->i_ctime = current_time(inode);
out_dirty:
	f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	return err;
}
#else
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
	if (projid != F2FS_DEF_PROJID)
		return -EOPNOTSUPP;
	return 0;
}
#endif

/* Transfer internal flags to xflags */
static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags)
{
	__u32 xflags = 0;

	if (iflags & FS_SYNC_FL)
		xflags |= FS_XFLAG_SYNC;
	if (iflags & FS_IMMUTABLE_FL)
		xflags |= FS_XFLAG_IMMUTABLE;
	if (iflags & FS_APPEND_FL)
		xflags |= FS_XFLAG_APPEND;
	if (iflags & FS_NODUMP_FL)
		xflags |= FS_XFLAG_NODUMP;
	if (iflags & FS_NOATIME_FL)
		xflags |= FS_XFLAG_NOATIME;
	if (iflags & FS_PROJINHERIT_FL)
		xflags |= FS_XFLAG_PROJINHERIT;
	return xflags;
}

#define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
				  FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
				  FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)

/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
#define F2FS_FL_XFLAG_VISIBLE		(FS_SYNC_FL | \
					 FS_IMMUTABLE_FL | \
					 FS_APPEND_FL | \
					 FS_NODUMP_FL | \
					 FS_NOATIME_FL | \
					 FS_PROJINHERIT_FL)

/* Transfer xflags flags to internal */
static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags)
{
	unsigned long iflags = 0;

	if (xflags & FS_XFLAG_SYNC)
		iflags |= FS_SYNC_FL;
	if (xflags & FS_XFLAG_IMMUTABLE)
		iflags |= FS_IMMUTABLE_FL;
	if (xflags & FS_XFLAG_APPEND)
		iflags |= FS_APPEND_FL;
	if (xflags & FS_XFLAG_NODUMP)
		iflags |= FS_NODUMP_FL;
	if (xflags & FS_XFLAG_NOATIME)
		iflags |= FS_NOATIME_FL;
	if (xflags & FS_XFLAG_PROJINHERIT)
		iflags |= FS_PROJINHERIT_FL;

	return iflags;
}

static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct fsxattr fa;

	memset(&fa, 0, sizeof(struct fsxattr));
	fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
				(FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL));

	if (f2fs_sb_has_project_quota(inode->i_sb))
		fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
							fi->i_projid);

	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
		return -EFAULT;
	return 0;
}

static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct fsxattr fa;
	unsigned int flags;
	int err;

	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
		return -EFAULT;

	/* Make sure caller has proper permission */
	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS)
		return -EOPNOTSUPP;

	flags = f2fs_xflags_to_iflags(fa.fsx_xflags);
	if (f2fs_mask_flags(inode->i_mode, flags) != flags)
		return -EOPNOTSUPP;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	inode_lock(inode);
	flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) |
				(flags & F2FS_FL_XFLAG_VISIBLE);
	err = __f2fs_ioc_setflags(inode, flags);
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	if (err)
		return err;

	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
	if (err)
		return err;

	return 0;
}

2623 2624 2625 2626 2627 2628 2629
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case F2FS_IOC_GETFLAGS:
		return f2fs_ioc_getflags(filp, arg);
	case F2FS_IOC_SETFLAGS:
		return f2fs_ioc_setflags(filp, arg);
2630 2631
	case F2FS_IOC_GETVERSION:
		return f2fs_ioc_getversion(filp, arg);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2632 2633 2634 2635
	case F2FS_IOC_START_ATOMIC_WRITE:
		return f2fs_ioc_start_atomic_write(filp);
	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
		return f2fs_ioc_commit_atomic_write(filp);
2636 2637
	case F2FS_IOC_START_VOLATILE_WRITE:
		return f2fs_ioc_start_volatile_write(filp);
2638 2639 2640 2641
	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
		return f2fs_ioc_release_volatile_write(filp);
	case F2FS_IOC_ABORT_VOLATILE_WRITE:
		return f2fs_ioc_abort_volatile_write(filp);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2642 2643
	case F2FS_IOC_SHUTDOWN:
		return f2fs_ioc_shutdown(filp, arg);
2644 2645
	case FITRIM:
		return f2fs_ioc_fitrim(filp, arg);
2646 2647 2648 2649 2650 2651
	case F2FS_IOC_SET_ENCRYPTION_POLICY:
		return f2fs_ioc_set_encryption_policy(filp, arg);
	case F2FS_IOC_GET_ENCRYPTION_POLICY:
		return f2fs_ioc_get_encryption_policy(filp, arg);
	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
2652 2653
	case F2FS_IOC_GARBAGE_COLLECT:
		return f2fs_ioc_gc(filp, arg);
2654 2655
	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
		return f2fs_ioc_gc_range(filp, arg);
2656 2657
	case F2FS_IOC_WRITE_CHECKPOINT:
		return f2fs_ioc_write_checkpoint(filp, arg);
Chao Yu's avatar
Chao Yu committed
2658 2659
	case F2FS_IOC_DEFRAGMENT:
		return f2fs_ioc_defragment(filp, arg);
2660 2661
	case F2FS_IOC_MOVE_RANGE:
		return f2fs_ioc_move_range(filp, arg);
2662 2663
	case F2FS_IOC_FLUSH_DEVICE:
		return f2fs_ioc_flush_device(filp, arg);
2664 2665
	case F2FS_IOC_GET_FEATURES:
		return f2fs_ioc_get_features(filp, arg);
2666 2667 2668 2669
	case F2FS_IOC_FSGETXATTR:
		return f2fs_ioc_fsgetxattr(filp, arg);
	case F2FS_IOC_FSSETXATTR:
		return f2fs_ioc_fssetxattr(filp, arg);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2670 2671 2672 2673 2674
	default:
		return -ENOTTY;
	}
}

2675 2676
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
2677 2678
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
2679
	struct blk_plug plug;
2680
	ssize_t ret;
2681

2682 2683 2684
	inode_lock(inode);
	ret = generic_write_checks(iocb, from);
	if (ret > 0) {
2685
		int err;
2686

2687 2688
		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
			set_inode_flag(inode, FI_NO_PREALLOC);
2689

2690
		err = f2fs_preallocate_blocks(iocb, from);
2691 2692 2693
		if (err) {
			inode_unlock(inode);
			return err;
2694
		}
2695 2696 2697
		blk_start_plug(&plug);
		ret = __generic_file_write_iter(iocb, from);
		blk_finish_plug(&plug);
2698
		clear_inode_flag(inode, FI_NO_PREALLOC);
Chao Yu's avatar
Chao Yu committed
2699 2700 2701

		if (ret > 0)
			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
2702 2703 2704
	}
	inode_unlock(inode);

2705 2706
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
2707
	return ret;
2708 2709
}

2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
#ifdef CONFIG_COMPAT
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case F2FS_IOC32_GETFLAGS:
		cmd = F2FS_IOC_GETFLAGS;
		break;
	case F2FS_IOC32_SETFLAGS:
		cmd = F2FS_IOC_SETFLAGS;
		break;
2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732
	case F2FS_IOC32_GETVERSION:
		cmd = F2FS_IOC_GETVERSION;
		break;
	case F2FS_IOC_START_ATOMIC_WRITE:
	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
	case F2FS_IOC_START_VOLATILE_WRITE:
	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
	case F2FS_IOC_ABORT_VOLATILE_WRITE:
	case F2FS_IOC_SHUTDOWN:
	case F2FS_IOC_SET_ENCRYPTION_POLICY:
	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
	case F2FS_IOC_GET_ENCRYPTION_POLICY:
	case F2FS_IOC_GARBAGE_COLLECT:
2733
	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
2734 2735
	case F2FS_IOC_WRITE_CHECKPOINT:
	case F2FS_IOC_DEFRAGMENT:
2736
	case F2FS_IOC_MOVE_RANGE:
2737
	case F2FS_IOC_FLUSH_DEVICE:
2738
	case F2FS_IOC_GET_FEATURES:
2739 2740
	case F2FS_IOC_FSGETXATTR:
	case F2FS_IOC_FSSETXATTR:
2741
		break;
2742 2743 2744 2745 2746 2747 2748
	default:
		return -ENOIOCTLCMD;
	}
	return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2749
const struct file_operations f2fs_file_operations = {
2750
	.llseek		= f2fs_llseek,
2751
	.read_iter	= generic_file_read_iter,
2752 2753
	.write_iter	= f2fs_file_write_iter,
	.open		= f2fs_file_open,
2754
	.release	= f2fs_release_file,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2755
	.mmap		= f2fs_file_mmap,
2756
	.flush		= f2fs_file_flush,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2757 2758 2759
	.fsync		= f2fs_sync_file,
	.fallocate	= f2fs_fallocate,
	.unlocked_ioctl	= f2fs_ioctl,
2760 2761 2762
#ifdef CONFIG_COMPAT
	.compat_ioctl	= f2fs_compat_ioctl,
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2763
	.splice_read	= generic_file_splice_read,
Al Viro's avatar
Al Viro committed
2764
	.splice_write	= iter_file_splice_write,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2765
};