super.c 106 KB
Newer Older
Chao Yu's avatar
Chao Yu committed
1
// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2
/*
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * fs/f2fs/super.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/statfs.h>
#include <linux/buffer_head.h>
#include <linux/backing-dev.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
18
#include <linux/proc_fs.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
19 20
#include <linux/random.h>
#include <linux/exportfs.h>
21
#include <linux/blkdev.h>
22
#include <linux/quotaops.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
23
#include <linux/f2fs_fs.h>
24
#include <linux/sysfs.h>
Chao Yu's avatar
Chao Yu committed
25
#include <linux/quota.h>
26
#include <linux/unicode.h>
27
#include <linux/part_stat.h>
Jaegeuk Kim's avatar
Jaegeuk Kim committed
28 29 30

#include "f2fs.h"
#include "node.h"
31
#include "segment.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
32
#include "xattr.h"
33
#include "gc.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
34
#include "trace.h"
Jaegeuk Kim's avatar
Jaegeuk Kim committed
35

36 37 38
#define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h>

Jaegeuk Kim's avatar
Jaegeuk Kim committed
39 40
static struct kmem_cache *f2fs_inode_cachep;

41
#ifdef CONFIG_F2FS_FAULT_INJECTION
Jaegeuk Kim's avatar
Jaegeuk Kim committed
42

43
const char *f2fs_fault_name[FAULT_MAX] = {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
44
	[FAULT_KMALLOC]		= "kmalloc",
Chao Yu's avatar
Chao Yu committed
45
	[FAULT_KVMALLOC]	= "kvmalloc",
46
	[FAULT_PAGE_ALLOC]	= "page alloc",
47
	[FAULT_PAGE_GET]	= "page get",
48
	[FAULT_ALLOC_BIO]	= "alloc bio",
Jaegeuk Kim's avatar
Jaegeuk Kim committed
49 50 51 52
	[FAULT_ALLOC_NID]	= "alloc nid",
	[FAULT_ORPHAN]		= "orphan",
	[FAULT_BLOCK]		= "no more block",
	[FAULT_DIR_DEPTH]	= "too big dir depth",
53
	[FAULT_EVICT_INODE]	= "evict_inode fail",
54
	[FAULT_TRUNCATE]	= "truncate fail",
55
	[FAULT_READ_IO]		= "read IO error",
56
	[FAULT_CHECKPOINT]	= "checkpoint error",
57
	[FAULT_DISCARD]		= "discard error",
58
	[FAULT_WRITE_IO]	= "write IO error",
Jaegeuk Kim's avatar
Jaegeuk Kim committed
59
};
60

61 62
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
							unsigned int type)
63
{
64
	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
65

66
	if (rate) {
67 68
		atomic_set(&ffi->inject_ops, 0);
		ffi->inject_rate = rate;
69
	}
70 71 72 73 74 75

	if (type)
		ffi->inject_type = type;

	if (!rate && !type)
		memset(ffi, 0, sizeof(struct f2fs_fault_info));
76
}
77 78
#endif

79 80 81 82 83 84 85
/* f2fs-wide shrinker description */
static struct shrinker f2fs_shrinker_info = {
	.scan_objects = f2fs_shrink_scan,
	.count_objects = f2fs_shrink_count,
	.seeks = DEFAULT_SEEKS,
};

Jaegeuk Kim's avatar
Jaegeuk Kim committed
86
enum {
87
	Opt_gc_background,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
88
	Opt_disable_roll_forward,
89
	Opt_norecovery,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
90
	Opt_discard,
Chao Yu's avatar
Chao Yu committed
91
	Opt_nodiscard,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
92
	Opt_noheap,
93
	Opt_heap,
94
	Opt_user_xattr,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
95
	Opt_nouser_xattr,
96
	Opt_acl,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
97 98 99
	Opt_noacl,
	Opt_active_logs,
	Opt_disable_ext_identify,
100
	Opt_inline_xattr,
101
	Opt_noinline_xattr,
102
	Opt_inline_xattr_size,
103
	Opt_inline_data,
104
	Opt_inline_dentry,
105
	Opt_noinline_dentry,
106
	Opt_flush_merge,
107
	Opt_noflush_merge,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
108
	Opt_nobarrier,
109
	Opt_fastboot,
110
	Opt_extent_cache,
111
	Opt_noextent_cache,
112
	Opt_noinline_data,
113
	Opt_data_flush,
114
	Opt_reserve_root,
115 116
	Opt_resgid,
	Opt_resuid,
117
	Opt_mode,
118
	Opt_io_size_bits,
119
	Opt_fault_injection,
120
	Opt_fault_type,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
121 122
	Opt_lazytime,
	Opt_nolazytime,
Chao Yu's avatar
Chao Yu committed
123 124
	Opt_quota,
	Opt_noquota,
125 126
	Opt_usrquota,
	Opt_grpquota,
Chao Yu's avatar
Chao Yu committed
127
	Opt_prjquota,
Chao Yu's avatar
Chao Yu committed
128 129 130 131 132 133 134 135 136
	Opt_usrjquota,
	Opt_grpjquota,
	Opt_prjjquota,
	Opt_offusrjquota,
	Opt_offgrpjquota,
	Opt_offprjjquota,
	Opt_jqfmt_vfsold,
	Opt_jqfmt_vfsv0,
	Opt_jqfmt_vfsv1,
137
	Opt_whint,
138
	Opt_alloc,
139
	Opt_fsync,
140
	Opt_test_dummy_encryption,
141
	Opt_inlinecrypt,
142 143 144 145
	Opt_checkpoint_disable,
	Opt_checkpoint_disable_cap,
	Opt_checkpoint_disable_cap_perc,
	Opt_checkpoint_enable,
Chao Yu's avatar
Chao Yu committed
146 147 148
	Opt_compress_algorithm,
	Opt_compress_log_size,
	Opt_compress_extension,
149
	Opt_atgc,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
150 151 152 153
	Opt_err,
};

static match_table_t f2fs_tokens = {
154
	{Opt_gc_background, "background_gc=%s"},
Jaegeuk Kim's avatar
Jaegeuk Kim committed
155
	{Opt_disable_roll_forward, "disable_roll_forward"},
156
	{Opt_norecovery, "norecovery"},
Jaegeuk Kim's avatar
Jaegeuk Kim committed
157
	{Opt_discard, "discard"},
Chao Yu's avatar
Chao Yu committed
158
	{Opt_nodiscard, "nodiscard"},
Jaegeuk Kim's avatar
Jaegeuk Kim committed
159
	{Opt_noheap, "no_heap"},
160
	{Opt_heap, "heap"},
161
	{Opt_user_xattr, "user_xattr"},
Jaegeuk Kim's avatar
Jaegeuk Kim committed
162
	{Opt_nouser_xattr, "nouser_xattr"},
163
	{Opt_acl, "acl"},
Jaegeuk Kim's avatar
Jaegeuk Kim committed
164 165 166
	{Opt_noacl, "noacl"},
	{Opt_active_logs, "active_logs=%u"},
	{Opt_disable_ext_identify, "disable_ext_identify"},
167
	{Opt_inline_xattr, "inline_xattr"},
168
	{Opt_noinline_xattr, "noinline_xattr"},
169
	{Opt_inline_xattr_size, "inline_xattr_size=%u"},
170
	{Opt_inline_data, "inline_data"},
171
	{Opt_inline_dentry, "inline_dentry"},
172
	{Opt_noinline_dentry, "noinline_dentry"},
173
	{Opt_flush_merge, "flush_merge"},
174
	{Opt_noflush_merge, "noflush_merge"},
Jaegeuk Kim's avatar
Jaegeuk Kim committed
175
	{Opt_nobarrier, "nobarrier"},
176
	{Opt_fastboot, "fastboot"},
177
	{Opt_extent_cache, "extent_cache"},
178
	{Opt_noextent_cache, "noextent_cache"},
179
	{Opt_noinline_data, "noinline_data"},
180
	{Opt_data_flush, "data_flush"},
181
	{Opt_reserve_root, "reserve_root=%u"},
182 183
	{Opt_resgid, "resgid=%u"},
	{Opt_resuid, "resuid=%u"},
184
	{Opt_mode, "mode=%s"},
185
	{Opt_io_size_bits, "io_bits=%u"},
186
	{Opt_fault_injection, "fault_injection=%u"},
187
	{Opt_fault_type, "fault_type=%u"},
Jaegeuk Kim's avatar
Jaegeuk Kim committed
188 189
	{Opt_lazytime, "lazytime"},
	{Opt_nolazytime, "nolazytime"},
Chao Yu's avatar
Chao Yu committed
190 191
	{Opt_quota, "quota"},
	{Opt_noquota, "noquota"},
192 193
	{Opt_usrquota, "usrquota"},
	{Opt_grpquota, "grpquota"},
Chao Yu's avatar
Chao Yu committed
194
	{Opt_prjquota, "prjquota"},
Chao Yu's avatar
Chao Yu committed
195 196 197 198 199 200 201 202 203
	{Opt_usrjquota, "usrjquota=%s"},
	{Opt_grpjquota, "grpjquota=%s"},
	{Opt_prjjquota, "prjjquota=%s"},
	{Opt_offusrjquota, "usrjquota="},
	{Opt_offgrpjquota, "grpjquota="},
	{Opt_offprjjquota, "prjjquota="},
	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
204
	{Opt_whint, "whint_mode=%s"},
205
	{Opt_alloc, "alloc_mode=%s"},
206
	{Opt_fsync, "fsync_mode=%s"},
207
	{Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
208
	{Opt_test_dummy_encryption, "test_dummy_encryption"},
209
	{Opt_inlinecrypt, "inlinecrypt"},
210 211 212 213
	{Opt_checkpoint_disable, "checkpoint=disable"},
	{Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
	{Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
	{Opt_checkpoint_enable, "checkpoint=enable"},
Chao Yu's avatar
Chao Yu committed
214 215 216
	{Opt_compress_algorithm, "compress_algorithm=%s"},
	{Opt_compress_log_size, "compress_log_size=%u"},
	{Opt_compress_extension, "compress_extension=%s"},
217
	{Opt_atgc, "atgc"},
Jaegeuk Kim's avatar
Jaegeuk Kim committed
218 219 220
	{Opt_err, NULL},
};

221
void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
222 223 224
{
	struct va_format vaf;
	va_list args;
225
	int level;
226 227

	va_start(args, fmt);
228 229 230

	level = printk_get_level(fmt);
	vaf.fmt = printk_skip_level(fmt);
231
	vaf.va = &args;
232 233 234
	printk("%c%cF2FS-fs (%s): %pV\n",
	       KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);

235 236 237
	va_end(args);
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
#ifdef CONFIG_UNICODE
static const struct f2fs_sb_encodings {
	__u16 magic;
	char *name;
	char *version;
} f2fs_sb_encoding_map[] = {
	{F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
};

static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
				 const struct f2fs_sb_encodings **encoding,
				 __u16 *flags)
{
	__u16 magic = le16_to_cpu(sb->s_encoding);
	int i;

	for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
		if (magic == f2fs_sb_encoding_map[i].magic)
			break;

	if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
		return -EINVAL;

	*encoding = &f2fs_sb_encoding_map[i];
	*flags = le16_to_cpu(sb->s_encoding_flags);

	return 0;
}
#endif

268 269
static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
{
270 271
	block_t limit = min((sbi->user_block_count << 1) / 1000,
			sbi->user_block_count - sbi->reserved_blocks);
272 273

	/* limit is 0.2% */
274 275 276
	if (test_opt(sbi, RESERVE_ROOT) &&
			F2FS_OPTION(sbi).root_reserved_blocks > limit) {
		F2FS_OPTION(sbi).root_reserved_blocks = limit;
277 278
		f2fs_info(sbi, "Reduce reserved blocks for root = %u",
			  F2FS_OPTION(sbi).root_reserved_blocks);
279
	}
280
	if (!test_opt(sbi, RESERVE_ROOT) &&
281
		(!uid_eq(F2FS_OPTION(sbi).s_resuid,
282
				make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
283
		!gid_eq(F2FS_OPTION(sbi).s_resgid,
284
				make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
285 286 287 288 289
		f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
			  from_kuid_munged(&init_user_ns,
					   F2FS_OPTION(sbi).s_resuid),
			  from_kgid_munged(&init_user_ns,
					   F2FS_OPTION(sbi).s_resgid));
290 291
}

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
{
	if (!F2FS_OPTION(sbi).unusable_cap_perc)
		return;

	if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
		F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
	else
		F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
					F2FS_OPTION(sbi).unusable_cap_perc;

	f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
			F2FS_OPTION(sbi).unusable_cap,
			F2FS_OPTION(sbi).unusable_cap_perc);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
308 309 310 311 312 313 314
static void init_once(void *foo)
{
	struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;

	inode_init_once(&fi->vfs_inode);
}

Chao Yu's avatar
Chao Yu committed
315 316 317 318 319 320 321 322 323 324
#ifdef CONFIG_QUOTA
static const char * const quotatypes[] = INITQFNAMES;
#define QTYPE2NAME(t) (quotatypes[t])
static int f2fs_set_qf_name(struct super_block *sb, int qtype,
							substring_t *args)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	char *qname;
	int ret = -EINVAL;

325
	if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
326
		f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
Chao Yu's avatar
Chao Yu committed
327 328
		return -EINVAL;
	}
329
	if (f2fs_sb_has_quota_ino(sbi)) {
330
		f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
331 332 333
		return 0;
	}

Chao Yu's avatar
Chao Yu committed
334 335
	qname = match_strdup(args);
	if (!qname) {
336
		f2fs_err(sbi, "Not enough memory for storing quotafile name");
337
		return -ENOMEM;
Chao Yu's avatar
Chao Yu committed
338
	}
339 340
	if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
		if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
Chao Yu's avatar
Chao Yu committed
341 342
			ret = 0;
		else
343
			f2fs_err(sbi, "%s quota file already specified",
Chao Yu's avatar
Chao Yu committed
344 345 346 347
				 QTYPE2NAME(qtype));
		goto errout;
	}
	if (strchr(qname, '/')) {
348
		f2fs_err(sbi, "quotafile must be on filesystem root");
Chao Yu's avatar
Chao Yu committed
349 350
		goto errout;
	}
351
	F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
Chao Yu's avatar
Chao Yu committed
352 353 354
	set_opt(sbi, QUOTA);
	return 0;
errout:
355
	kfree(qname);
Chao Yu's avatar
Chao Yu committed
356 357 358 359 360 361 362
	return ret;
}

static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);

363
	if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
364
		f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
Chao Yu's avatar
Chao Yu committed
365 366
		return -EINVAL;
	}
367
	kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
368
	F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
Chao Yu's avatar
Chao Yu committed
369 370 371 372 373 374 375 376 377 378
	return 0;
}

static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
{
	/*
	 * We do the test below only for project quotas. 'usrquota' and
	 * 'grpquota' mount options are allowed even without quota feature
	 * to support legacy quotas in quota files.
	 */
379
	if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
380
		f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
Chao Yu's avatar
Chao Yu committed
381 382
		return -1;
	}
383 384 385 386 387
	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
		if (test_opt(sbi, USRQUOTA) &&
				F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
Chao Yu's avatar
Chao Yu committed
388 389
			clear_opt(sbi, USRQUOTA);

390 391
		if (test_opt(sbi, GRPQUOTA) &&
				F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
Chao Yu's avatar
Chao Yu committed
392 393
			clear_opt(sbi, GRPQUOTA);

394 395
		if (test_opt(sbi, PRJQUOTA) &&
				F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
Chao Yu's avatar
Chao Yu committed
396 397 398 399
			clear_opt(sbi, PRJQUOTA);

		if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
				test_opt(sbi, PRJQUOTA)) {
400
			f2fs_err(sbi, "old and new quota format mixing");
Chao Yu's avatar
Chao Yu committed
401 402 403
			return -1;
		}

404
		if (!F2FS_OPTION(sbi).s_jquota_fmt) {
405
			f2fs_err(sbi, "journaled quota format not specified");
Chao Yu's avatar
Chao Yu committed
406 407 408
			return -1;
		}
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
409

410
	if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
411
		f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
412
		F2FS_OPTION(sbi).s_jquota_fmt = 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
413
	}
Chao Yu's avatar
Chao Yu committed
414 415 416 417
	return 0;
}
#endif

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
static int f2fs_set_test_dummy_encryption(struct super_block *sb,
					  const char *opt,
					  const substring_t *arg,
					  bool is_remount)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
#ifdef CONFIG_FS_ENCRYPTION
	int err;

	if (!f2fs_sb_has_encrypt(sbi)) {
		f2fs_err(sbi, "Encrypt feature is off");
		return -EINVAL;
	}

	/*
	 * This mount option is just for testing, and it's not worthwhile to
	 * implement the extra complexity (e.g. RCU protection) that would be
	 * needed to allow it to be set or changed during remount.  We do allow
	 * it to be specified during remount, but only if there is no change.
	 */
438
	if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
439 440 441 442
		f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
		return -EINVAL;
	}
	err = fscrypt_set_test_dummy_encryption(
443
		sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
	if (err) {
		if (err == -EEXIST)
			f2fs_warn(sbi,
				  "Can't change test_dummy_encryption on remount");
		else if (err == -EINVAL)
			f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
				  opt);
		else
			f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
				  opt, err);
		return -EINVAL;
	}
	f2fs_warn(sbi, "Test dummy encryption mode enabled");
#else
	f2fs_warn(sbi, "Test dummy encryption mount option ignored");
#endif
	return 0;
}

static int parse_options(struct super_block *sb, char *options, bool is_remount)
464 465 466
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	substring_t args[MAX_OPT_ARGS];
467
#ifdef CONFIG_F2FS_FS_COMPRESSION
Chao Yu's avatar
Chao Yu committed
468
	unsigned char (*ext)[F2FS_EXTENSION_LEN];
469 470
	int ext_cnt;
#endif
471
	char *p, *name;
472
	int arg = 0;
473 474
	kuid_t uid;
	kgid_t gid;
Chao Yu's avatar
Chao Yu committed
475
	int ret;
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496

	if (!options)
		return 0;

	while ((p = strsep(&options, ",")) != NULL) {
		int token;
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
		args[0].to = args[0].from = NULL;
		token = match_token(p, f2fs_tokens, args);

		switch (token) {
		case Opt_gc_background:
			name = match_strdup(&args[0]);

			if (!name)
				return -ENOMEM;
497
			if (!strcmp(name, "on")) {
Chao Yu's avatar
Chao Yu committed
498
				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
499
			} else if (!strcmp(name, "off")) {
Chao Yu's avatar
Chao Yu committed
500
				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
501
			} else if (!strcmp(name, "sync")) {
Chao Yu's avatar
Chao Yu committed
502
				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
503
			} else {
504
				kfree(name);
505 506
				return -EINVAL;
			}
507
			kfree(name);
508 509 510 511
			break;
		case Opt_disable_roll_forward:
			set_opt(sbi, DISABLE_ROLL_FORWARD);
			break;
512 513
		case Opt_norecovery:
			/* this option mounts f2fs with ro */
514
			set_opt(sbi, NORECOVERY);
515 516 517
			if (!f2fs_readonly(sb))
				return -EINVAL;
			break;
518
		case Opt_discard:
519
			set_opt(sbi, DISCARD);
520
			break;
Chao Yu's avatar
Chao Yu committed
521
		case Opt_nodiscard:
522
			if (f2fs_sb_has_blkzoned(sbi)) {
523
				f2fs_warn(sbi, "discard is required for zoned block devices");
524 525
				return -EINVAL;
			}
Chao Yu's avatar
Chao Yu committed
526
			clear_opt(sbi, DISCARD);
527
			break;
528 529 530
		case Opt_noheap:
			set_opt(sbi, NOHEAP);
			break;
531 532 533
		case Opt_heap:
			clear_opt(sbi, NOHEAP);
			break;
534
#ifdef CONFIG_F2FS_FS_XATTR
535 536 537
		case Opt_user_xattr:
			set_opt(sbi, XATTR_USER);
			break;
538 539 540
		case Opt_nouser_xattr:
			clear_opt(sbi, XATTR_USER);
			break;
541 542 543
		case Opt_inline_xattr:
			set_opt(sbi, INLINE_XATTR);
			break;
544 545 546
		case Opt_noinline_xattr:
			clear_opt(sbi, INLINE_XATTR);
			break;
547 548 549 550
		case Opt_inline_xattr_size:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
			set_opt(sbi, INLINE_XATTR_SIZE);
551
			F2FS_OPTION(sbi).inline_xattr_size = arg;
552
			break;
553
#else
554
		case Opt_user_xattr:
555
			f2fs_info(sbi, "user_xattr options not supported");
556
			break;
557
		case Opt_nouser_xattr:
558
			f2fs_info(sbi, "nouser_xattr options not supported");
559
			break;
560
		case Opt_inline_xattr:
561
			f2fs_info(sbi, "inline_xattr options not supported");
562
			break;
563
		case Opt_noinline_xattr:
564
			f2fs_info(sbi, "noinline_xattr options not supported");
565
			break;
566 567
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
568 569 570
		case Opt_acl:
			set_opt(sbi, POSIX_ACL);
			break;
571 572 573 574
		case Opt_noacl:
			clear_opt(sbi, POSIX_ACL);
			break;
#else
575
		case Opt_acl:
576
			f2fs_info(sbi, "acl options not supported");
577
			break;
578
		case Opt_noacl:
579
			f2fs_info(sbi, "noacl options not supported");
580 581 582 583 584
			break;
#endif
		case Opt_active_logs:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
Chao Yu's avatar
Chao Yu committed
585 586
			if (arg != 2 && arg != 4 &&
				arg != NR_CURSEG_PERSIST_TYPE)
587
				return -EINVAL;
588
			F2FS_OPTION(sbi).active_logs = arg;
589 590 591 592
			break;
		case Opt_disable_ext_identify:
			set_opt(sbi, DISABLE_EXT_IDENTIFY);
			break;
593 594 595
		case Opt_inline_data:
			set_opt(sbi, INLINE_DATA);
			break;
596 597 598
		case Opt_inline_dentry:
			set_opt(sbi, INLINE_DENTRY);
			break;
599 600 601
		case Opt_noinline_dentry:
			clear_opt(sbi, INLINE_DENTRY);
			break;
602 603 604
		case Opt_flush_merge:
			set_opt(sbi, FLUSH_MERGE);
			break;
605 606 607
		case Opt_noflush_merge:
			clear_opt(sbi, FLUSH_MERGE);
			break;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
608 609 610
		case Opt_nobarrier:
			set_opt(sbi, NOBARRIER);
			break;
611 612 613
		case Opt_fastboot:
			set_opt(sbi, FASTBOOT);
			break;
614 615 616
		case Opt_extent_cache:
			set_opt(sbi, EXTENT_CACHE);
			break;
617 618 619
		case Opt_noextent_cache:
			clear_opt(sbi, EXTENT_CACHE);
			break;
620 621 622
		case Opt_noinline_data:
			clear_opt(sbi, INLINE_DATA);
			break;
623 624 625
		case Opt_data_flush:
			set_opt(sbi, DATA_FLUSH);
			break;
626 627 628 629
		case Opt_reserve_root:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
			if (test_opt(sbi, RESERVE_ROOT)) {
630 631
				f2fs_info(sbi, "Preserve previous reserve_root=%u",
					  F2FS_OPTION(sbi).root_reserved_blocks);
632
			} else {
633
				F2FS_OPTION(sbi).root_reserved_blocks = arg;
634 635 636
				set_opt(sbi, RESERVE_ROOT);
			}
			break;
637 638 639 640 641
		case Opt_resuid:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
			uid = make_kuid(current_user_ns(), arg);
			if (!uid_valid(uid)) {
642
				f2fs_err(sbi, "Invalid uid value %d", arg);
643 644
				return -EINVAL;
			}
645
			F2FS_OPTION(sbi).s_resuid = uid;
646 647 648 649 650 651
			break;
		case Opt_resgid:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
			gid = make_kgid(current_user_ns(), arg);
			if (!gid_valid(gid)) {
652
				f2fs_err(sbi, "Invalid gid value %d", arg);
653 654
				return -EINVAL;
			}
655
			F2FS_OPTION(sbi).s_resgid = gid;
656
			break;
657 658 659 660 661
		case Opt_mode:
			name = match_strdup(&args[0]);

			if (!name)
				return -ENOMEM;
662
			if (!strcmp(name, "adaptive")) {
663
				if (f2fs_sb_has_blkzoned(sbi)) {
664
					f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
665
					kfree(name);
666 667
					return -EINVAL;
				}
668
				F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
669
			} else if (!strcmp(name, "lfs")) {
670
				F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
671
			} else {
672
				kfree(name);
673 674
				return -EINVAL;
			}
675
			kfree(name);
676
			break;
677 678 679
		case Opt_io_size_bits:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
680
			if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
681 682
				f2fs_warn(sbi, "Not support %d, larger than %d",
					  1 << arg, BIO_MAX_PAGES);
683 684
				return -EINVAL;
			}
685
			F2FS_OPTION(sbi).write_io_size_bits = arg;
686
			break;
687
#ifdef CONFIG_F2FS_FAULT_INJECTION
688 689 690
		case Opt_fault_injection:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
691 692 693
			f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
			set_opt(sbi, FAULT_INJECTION);
			break;
694

695 696 697 698
		case Opt_fault_type:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
			f2fs_build_fault_attr(sbi, 0, arg);
699
			set_opt(sbi, FAULT_INJECTION);
700
			break;
701
#else
702
		case Opt_fault_injection:
703
			f2fs_info(sbi, "fault_injection options not supported");
704
			break;
705 706

		case Opt_fault_type:
707
			f2fs_info(sbi, "fault_type options not supported");
708 709
			break;
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
710
		case Opt_lazytime:
711
			sb->s_flags |= SB_LAZYTIME;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
712 713
			break;
		case Opt_nolazytime:
714
			sb->s_flags &= ~SB_LAZYTIME;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
715
			break;
716
#ifdef CONFIG_QUOTA
Chao Yu's avatar
Chao Yu committed
717
		case Opt_quota:
718 719 720 721 722 723
		case Opt_usrquota:
			set_opt(sbi, USRQUOTA);
			break;
		case Opt_grpquota:
			set_opt(sbi, GRPQUOTA);
			break;
Chao Yu's avatar
Chao Yu committed
724 725 726
		case Opt_prjquota:
			set_opt(sbi, PRJQUOTA);
			break;
Chao Yu's avatar
Chao Yu committed
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
		case Opt_usrjquota:
			ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
			if (ret)
				return ret;
			break;
		case Opt_grpjquota:
			ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
			if (ret)
				return ret;
			break;
		case Opt_prjjquota:
			ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
			if (ret)
				return ret;
			break;
		case Opt_offusrjquota:
			ret = f2fs_clear_qf_name(sb, USRQUOTA);
			if (ret)
				return ret;
			break;
		case Opt_offgrpjquota:
			ret = f2fs_clear_qf_name(sb, GRPQUOTA);
			if (ret)
				return ret;
			break;
		case Opt_offprjjquota:
			ret = f2fs_clear_qf_name(sb, PRJQUOTA);
			if (ret)
				return ret;
			break;
		case Opt_jqfmt_vfsold:
758
			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
Chao Yu's avatar
Chao Yu committed
759 760
			break;
		case Opt_jqfmt_vfsv0:
761
			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
Chao Yu's avatar
Chao Yu committed
762 763
			break;
		case Opt_jqfmt_vfsv1:
764
			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
Chao Yu's avatar
Chao Yu committed
765 766 767 768 769 770 771
			break;
		case Opt_noquota:
			clear_opt(sbi, QUOTA);
			clear_opt(sbi, USRQUOTA);
			clear_opt(sbi, GRPQUOTA);
			clear_opt(sbi, PRJQUOTA);
			break;
772
#else
Chao Yu's avatar
Chao Yu committed
773
		case Opt_quota:
774 775
		case Opt_usrquota:
		case Opt_grpquota:
Chao Yu's avatar
Chao Yu committed
776
		case Opt_prjquota:
Chao Yu's avatar
Chao Yu committed
777 778 779 780 781 782 783 784 785 786
		case Opt_usrjquota:
		case Opt_grpjquota:
		case Opt_prjjquota:
		case Opt_offusrjquota:
		case Opt_offgrpjquota:
		case Opt_offprjjquota:
		case Opt_jqfmt_vfsold:
		case Opt_jqfmt_vfsv0:
		case Opt_jqfmt_vfsv1:
		case Opt_noquota:
787
			f2fs_info(sbi, "quota operations not supported");
788 789
			break;
#endif
790 791 792 793
		case Opt_whint:
			name = match_strdup(&args[0]);
			if (!name)
				return -ENOMEM;
794
			if (!strcmp(name, "user-based")) {
795
				F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
796
			} else if (!strcmp(name, "off")) {
797
				F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
798
			} else if (!strcmp(name, "fs-based")) {
799
				F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
800
			} else {
801
				kfree(name);
802 803
				return -EINVAL;
			}
804
			kfree(name);
805
			break;
806 807 808 809 810
		case Opt_alloc:
			name = match_strdup(&args[0]);
			if (!name)
				return -ENOMEM;

811
			if (!strcmp(name, "default")) {
812
				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
813
			} else if (!strcmp(name, "reuse")) {
814
				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
815
			} else {
816
				kfree(name);
817 818
				return -EINVAL;
			}
819
			kfree(name);
820
			break;
821 822 823 824
		case Opt_fsync:
			name = match_strdup(&args[0]);
			if (!name)
				return -ENOMEM;
825
			if (!strcmp(name, "posix")) {
826
				F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
827
			} else if (!strcmp(name, "strict")) {
828
				F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
829
			} else if (!strcmp(name, "nobarrier")) {
830 831
				F2FS_OPTION(sbi).fsync_mode =
							FSYNC_MODE_NOBARRIER;
832
			} else {
833
				kfree(name);
834 835
				return -EINVAL;
			}
836
			kfree(name);
837
			break;
838
		case Opt_test_dummy_encryption:
839 840 841 842
			ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
							     is_remount);
			if (ret)
				return ret;
843
			break;
844 845 846 847 848 849 850
		case Opt_inlinecrypt:
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
			sb->s_flags |= SB_INLINECRYPT;
#else
			f2fs_info(sbi, "inline encryption not supported");
#endif
			break;
851 852
		case Opt_checkpoint_disable_cap_perc:
			if (args->from && match_int(args, &arg))
Daniel Rosenberg's avatar
Daniel Rosenberg committed
853
				return -EINVAL;
854 855
			if (arg < 0 || arg > 100)
				return -EINVAL;
856
			F2FS_OPTION(sbi).unusable_cap_perc = arg;
857 858 859 860 861 862 863 864 865 866 867 868 869
			set_opt(sbi, DISABLE_CHECKPOINT);
			break;
		case Opt_checkpoint_disable_cap:
			if (args->from && match_int(args, &arg))
				return -EINVAL;
			F2FS_OPTION(sbi).unusable_cap = arg;
			set_opt(sbi, DISABLE_CHECKPOINT);
			break;
		case Opt_checkpoint_disable:
			set_opt(sbi, DISABLE_CHECKPOINT);
			break;
		case Opt_checkpoint_enable:
			clear_opt(sbi, DISABLE_CHECKPOINT);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
870
			break;
871
#ifdef CONFIG_F2FS_FS_COMPRESSION
Chao Yu's avatar
Chao Yu committed
872 873
		case Opt_compress_algorithm:
			if (!f2fs_sb_has_compression(sbi)) {
874 875
				f2fs_info(sbi, "Image doesn't support compression");
				break;
Chao Yu's avatar
Chao Yu committed
876 877 878 879
			}
			name = match_strdup(&args[0]);
			if (!name)
				return -ENOMEM;
880
			if (!strcmp(name, "lzo")) {
Chao Yu's avatar
Chao Yu committed
881 882
				F2FS_OPTION(sbi).compress_algorithm =
								COMPRESS_LZO;
883
			} else if (!strcmp(name, "lz4")) {
Chao Yu's avatar
Chao Yu committed
884 885
				F2FS_OPTION(sbi).compress_algorithm =
								COMPRESS_LZ4;
886
			} else if (!strcmp(name, "zstd")) {
887 888
				F2FS_OPTION(sbi).compress_algorithm =
								COMPRESS_ZSTD;
889 890 891
			} else if (!strcmp(name, "lzo-rle")) {
				F2FS_OPTION(sbi).compress_algorithm =
								COMPRESS_LZORLE;
Chao Yu's avatar
Chao Yu committed
892 893 894 895 896 897 898 899
			} else {
				kfree(name);
				return -EINVAL;
			}
			kfree(name);
			break;
		case Opt_compress_log_size:
			if (!f2fs_sb_has_compression(sbi)) {
900 901
				f2fs_info(sbi, "Image doesn't support compression");
				break;
Chao Yu's avatar
Chao Yu committed
902 903 904 905 906 907 908 909 910 911 912 913 914
			}
			if (args->from && match_int(args, &arg))
				return -EINVAL;
			if (arg < MIN_COMPRESS_LOG_SIZE ||
				arg > MAX_COMPRESS_LOG_SIZE) {
				f2fs_err(sbi,
					"Compress cluster log size is out of range");
				return -EINVAL;
			}
			F2FS_OPTION(sbi).compress_log_size = arg;
			break;
		case Opt_compress_extension:
			if (!f2fs_sb_has_compression(sbi)) {
915 916
				f2fs_info(sbi, "Image doesn't support compression");
				break;
Chao Yu's avatar
Chao Yu committed
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
			}
			name = match_strdup(&args[0]);
			if (!name)
				return -ENOMEM;

			ext = F2FS_OPTION(sbi).extensions;
			ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;

			if (strlen(name) >= F2FS_EXTENSION_LEN ||
				ext_cnt >= COMPRESS_EXT_NUM) {
				f2fs_err(sbi,
					"invalid extension length/number");
				kfree(name);
				return -EINVAL;
			}

			strcpy(ext[ext_cnt], name);
			F2FS_OPTION(sbi).compress_ext_cnt++;
			kfree(name);
			break;
937 938 939 940 941 942 943
#else
		case Opt_compress_algorithm:
		case Opt_compress_log_size:
		case Opt_compress_extension:
			f2fs_info(sbi, "compression options not supported");
			break;
#endif
944 945 946
		case Opt_atgc:
			set_opt(sbi, ATGC);
			break;
947
		default:
948 949
			f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
				 p);
950 951 952
			return -EINVAL;
		}
	}
Chao Yu's avatar
Chao Yu committed
953 954 955
#ifdef CONFIG_QUOTA
	if (f2fs_check_quota_options(sbi))
		return -EINVAL;
956
#else
957
	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
958
		f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
959 960
		return -EINVAL;
	}
961
	if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
962
		f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
963 964
		return -EINVAL;
	}
Chao Yu's avatar
Chao Yu committed
965
#endif
966 967 968 969 970 971 972
#ifndef CONFIG_UNICODE
	if (f2fs_sb_has_casefold(sbi)) {
		f2fs_err(sbi,
			"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
		return -EINVAL;
	}
#endif
973 974 975 976 977 978 979 980 981 982 983
	/*
	 * The BLKZONED feature indicates that the drive was formatted with
	 * zone alignment optimization. This is optional for host-aware
	 * devices, but mandatory for host-managed zoned block devices.
	 */
#ifndef CONFIG_BLK_DEV_ZONED
	if (f2fs_sb_has_blkzoned(sbi)) {
		f2fs_err(sbi, "Zoned block device support is not enabled");
		return -EINVAL;
	}
#endif
984

985
	if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
986 987
		f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
			 F2FS_IO_SIZE_KB(sbi));
988 989
		return -EINVAL;
	}
990 991

	if (test_opt(sbi, INLINE_XATTR_SIZE)) {
992 993
		int min_size, max_size;

994 995
		if (!f2fs_sb_has_extra_attr(sbi) ||
			!f2fs_sb_has_flexible_inline_xattr(sbi)) {
996
			f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
997 998
			return -EINVAL;
		}
999
		if (!test_opt(sbi, INLINE_XATTR)) {
1000
			f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1001 1002
			return -EINVAL;
		}
1003 1004

		min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
1005
		max_size = MAX_INLINE_XATTR_SIZE;
1006 1007 1008

		if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
				F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1009 1010
			f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
				 min_size, max_size);
1011 1012 1013
			return -EINVAL;
		}
	}
1014

1015
	if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
1016
		f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1017 1018 1019
		return -EINVAL;
	}

1020
	/* Not pass down write hints if the number of active logs is lesser
Chao Yu's avatar
Chao Yu committed
1021
	 * than NR_CURSEG_PERSIST_TYPE.
1022
	 */
1023 1024
	if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
		F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1025 1026 1027
	return 0;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1028 1029 1030 1031
static struct inode *f2fs_alloc_inode(struct super_block *sb)
{
	struct f2fs_inode_info *fi;

1032
	fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1033 1034 1035 1036 1037
	if (!fi)
		return NULL;

	init_once((void *) fi);

Masanari Iida's avatar
Masanari Iida committed
1038
	/* Initialize f2fs-specific inode info */
1039
	atomic_set(&fi->dirty_pages, 0);
1040
	atomic_set(&fi->i_compr_blocks, 0);
1041
	init_rwsem(&fi->i_sem);
1042
	spin_lock_init(&fi->i_size_lock);
1043
	INIT_LIST_HEAD(&fi->dirty_list);
1044
	INIT_LIST_HEAD(&fi->gdirty_list);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1045
	INIT_LIST_HEAD(&fi->inmem_ilist);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1046 1047
	INIT_LIST_HEAD(&fi->inmem_pages);
	mutex_init(&fi->inmem_lock);
1048 1049
	init_rwsem(&fi->i_gc_rwsem[READ]);
	init_rwsem(&fi->i_gc_rwsem[WRITE]);
1050
	init_rwsem(&fi->i_mmap_sem);
1051
	init_rwsem(&fi->i_xattr_sem);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1052

1053 1054
	/* Will be used by directory only */
	fi->i_dir_level = F2FS_SB(sb)->dir_level;
1055

1056 1057
	fi->ra_offset = -1;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1058 1059 1060
	return &fi->vfs_inode;
}

1061 1062
static int f2fs_drop_inode(struct inode *inode)
{
1063
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1064
	int ret;
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077

	/*
	 * during filesystem shutdown, if checkpoint is disabled,
	 * drop useless meta/node dirty pages.
	 */
	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
		if (inode->i_ino == F2FS_NODE_INO(sbi) ||
			inode->i_ino == F2FS_META_INO(sbi)) {
			trace_f2fs_drop_inode(inode, 1);
			return 1;
		}
	}

1078 1079 1080 1081 1082 1083 1084
	/*
	 * This is to avoid a deadlock condition like below.
	 * writeback_single_inode(inode)
	 *  - f2fs_write_data_page
	 *    - f2fs_gc -> iput -> evict
	 *       - inode_wait_for_writeback(inode)
	 */
1085
	if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1086
		if (!inode->i_nlink && !is_bad_inode(inode)) {
1087 1088
			/* to avoid evict_inode call simultaneously */
			atomic_inc(&inode->i_count);
1089 1090 1091 1092
			spin_unlock(&inode->i_lock);

			/* some remained atomic pages should discarded */
			if (f2fs_is_atomic_file(inode))
Chao Yu's avatar
Chao Yu committed
1093
				f2fs_drop_inmem_pages(inode);
1094

1095 1096 1097
			/* should remain fi->extent_tree for writepage */
			f2fs_destroy_extent_node(inode);

1098
			sb_start_intwrite(inode->i_sb);
1099
			f2fs_i_size_write(inode, 0);
1100

1101 1102 1103 1104
			f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
					inode, NULL, 0, DATA);
			truncate_inode_pages_final(inode->i_mapping);

1105
			if (F2FS_HAS_BLOCKS(inode))
1106
				f2fs_truncate(inode);
1107 1108 1109 1110

			sb_end_intwrite(inode->i_sb);

			spin_lock(&inode->i_lock);
1111
			atomic_dec(&inode->i_count);
1112
		}
1113
		trace_f2fs_drop_inode(inode, 0);
1114
		return 0;
1115
	}
1116
	ret = generic_drop_inode(inode);
1117 1118
	if (!ret)
		ret = fscrypt_drop_inode(inode);
1119 1120
	trace_f2fs_drop_inode(inode, ret);
	return ret;
1121 1122
}

1123
int f2fs_inode_dirtied(struct inode *inode, bool sync)
1124
{
1125
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1126
	int ret = 0;
1127 1128 1129

	spin_lock(&sbi->inode_lock[DIRTY_META]);
	if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1130 1131 1132 1133
		ret = 1;
	} else {
		set_inode_flag(inode, FI_DIRTY_INODE);
		stat_inc_dirty_inode(sbi, DIRTY_META);
1134
	}
1135 1136
	if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
		list_add_tail(&F2FS_I(inode)->gdirty_list,
1137
				&sbi->inode_list[DIRTY_META]);
1138 1139
		inc_page_count(sbi, F2FS_DIRTY_IMETA);
	}
1140
	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1141
	return ret;
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
}

void f2fs_inode_synced(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	spin_lock(&sbi->inode_lock[DIRTY_META]);
	if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
		spin_unlock(&sbi->inode_lock[DIRTY_META]);
		return;
	}
1153 1154 1155 1156
	if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
		list_del_init(&F2FS_I(inode)->gdirty_list);
		dec_page_count(sbi, F2FS_DIRTY_IMETA);
	}
1157
	clear_inode_flag(inode, FI_DIRTY_INODE);
1158
	clear_inode_flag(inode, FI_AUTO_RECOVER);
1159
	stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1160
	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1161 1162
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
/*
 * f2fs_dirty_inode() is called from __mark_inode_dirty()
 *
 * We should call set_dirty_inode to write the dirty inode through write_inode.
 */
static void f2fs_dirty_inode(struct inode *inode, int flags)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
			inode->i_ino == F2FS_META_INO(sbi))
		return;

	if (flags == I_DIRTY_TIME)
		return;

	if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
		clear_inode_flag(inode, FI_AUTO_RECOVER);

1182
	f2fs_inode_dirtied(inode, false);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1183 1184
}

Al Viro's avatar
Al Viro committed
1185
static void f2fs_free_inode(struct inode *inode)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1186
{
1187
	fscrypt_free_inode(inode);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1188 1189 1190
	kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
}

1191 1192
static void destroy_percpu_info(struct f2fs_sb_info *sbi)
{
1193
	percpu_counter_destroy(&sbi->alloc_valid_block_count);
1194
	percpu_counter_destroy(&sbi->total_valid_inode_count);
1195 1196
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1197 1198 1199 1200 1201 1202 1203
static void destroy_device_list(struct f2fs_sb_info *sbi)
{
	int i;

	for (i = 0; i < sbi->s_ndevs; i++) {
		blkdev_put(FDEV(i).bdev, FMODE_EXCL);
#ifdef CONFIG_BLK_DEV_ZONED
1204
		kvfree(FDEV(i).blkz_seq);
1205
		kfree(FDEV(i).zone_capacity_blocks);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1206 1207
#endif
	}
1208
	kvfree(sbi->devs);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1209 1210
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1211 1212 1213
static void f2fs_put_super(struct super_block *sb)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
Chao Yu's avatar
Chao Yu committed
1214
	int i;
1215
	bool dropped;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1216

Li Guifu's avatar
Li Guifu committed
1217 1218 1219
	/* unregister procfs/sysfs entries in advance to avoid race case */
	f2fs_unregister_sysfs(sbi);

1220
	f2fs_quota_off_umount(sb);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1221

1222 1223 1224
	/* prevent remaining shrinker jobs */
	mutex_lock(&sbi->umount_mutex);

1225 1226 1227 1228 1229
	/*
	 * We don't need to do checkpoint when superblock is clean.
	 * But, the previous checkpoint was not done by umount, it needs to do
	 * clean checkpoint again.
	 */
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1230 1231
	if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
			!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1232 1233 1234
		struct cp_control cpc = {
			.reason = CP_UMOUNT,
		};
Chao Yu's avatar
Chao Yu committed
1235
		f2fs_write_checkpoint(sbi, &cpc);
1236
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1237

1238
	/* be sure to wait for any on-going discard commands */
1239
	dropped = f2fs_issue_discard_timeout(sbi);
1240

1241 1242
	if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
					!sbi->discard_blks && !dropped) {
1243 1244 1245
		struct cp_control cpc = {
			.reason = CP_UMOUNT | CP_TRIMMED,
		};
Chao Yu's avatar
Chao Yu committed
1246
		f2fs_write_checkpoint(sbi, &cpc);
1247 1248
	}

1249 1250 1251 1252
	/*
	 * normally superblock is clean, so we need to release this.
	 * In addition, EIO will skip do checkpoint, we need this as well.
	 */
Chao Yu's avatar
Chao Yu committed
1253
	f2fs_release_ino_entry(sbi, true);
1254

1255 1256 1257
	f2fs_leave_shrinker(sbi);
	mutex_unlock(&sbi->umount_mutex);

1258
	/* our cp_error case, we can wait for any writeback page */
1259
	f2fs_flush_merged_writes(sbi);
1260

1261
	f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1262 1263 1264

	f2fs_bug_on(sbi, sbi->fsync_node_num);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1265
	iput(sbi->node_inode);
1266 1267
	sbi->node_inode = NULL;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1268
	iput(sbi->meta_inode);
1269
	sbi->meta_inode = NULL;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1270

1271 1272 1273 1274 1275 1276
	/*
	 * iput() can update stat information, if f2fs_write_checkpoint()
	 * above failed with error.
	 */
	f2fs_destroy_stats(sbi);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1277
	/* destroy f2fs internal modules */
Chao Yu's avatar
Chao Yu committed
1278 1279
	f2fs_destroy_node_manager(sbi);
	f2fs_destroy_segment_manager(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1280

Chao Yu's avatar
Chao Yu committed
1281 1282
	f2fs_destroy_post_read_wq(sbi);

1283
	kvfree(sbi->ckpt);
Chao Yu's avatar
Chao Yu committed
1284

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1285
	sb->s_fs_info = NULL;
1286 1287
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
1288
	kfree(sbi->raw_super);
1289

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1290
	destroy_device_list(sbi);
1291
	f2fs_destroy_page_array_cache(sbi);
1292
	f2fs_destroy_xattr_caches(sbi);
1293
	mempool_destroy(sbi->write_io_dummy);
Chao Yu's avatar
Chao Yu committed
1294 1295
#ifdef CONFIG_QUOTA
	for (i = 0; i < MAXQUOTAS; i++)
1296
		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
Chao Yu's avatar
Chao Yu committed
1297
#endif
1298
	fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1299
	destroy_percpu_info(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1300
	for (i = 0; i < NR_PAGE_TYPE; i++)
1301
		kvfree(sbi->write_io[i]);
1302
#ifdef CONFIG_UNICODE
1303
	utf8_unload(sb->s_encoding);
1304
#endif
1305
	kfree(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1306 1307 1308 1309 1310
}

int f2fs_sync_fs(struct super_block *sb, int sync)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
Chao Yu's avatar
Chao Yu committed
1311
	int err = 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1312

1313 1314
	if (unlikely(f2fs_cp_error(sbi)))
		return 0;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1315 1316
	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
		return 0;
1317

1318 1319
	trace_f2fs_sync_fs(sb, sync);

Chao Yu's avatar
Chao Yu committed
1320 1321 1322
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		return -EAGAIN;

1323
	if (sync) {
1324 1325
		struct cp_control cpc;

1326 1327
		cpc.reason = __get_cp_reason(sbi);

1328
		down_write(&sbi->gc_lock);
Chao Yu's avatar
Chao Yu committed
1329
		err = f2fs_write_checkpoint(sbi, &cpc);
1330
		up_write(&sbi->gc_lock);
1331
	}
1332
	f2fs_trace_ios(NULL, 1);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1333

Chao Yu's avatar
Chao Yu committed
1334
	return err;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1335 1336
}

1337 1338
static int f2fs_freeze(struct super_block *sb)
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1339
	if (f2fs_readonly(sb))
1340 1341
		return 0;

1342 1343 1344 1345 1346 1347 1348 1349
	/* IO error happened before */
	if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
		return -EIO;

	/* must be clean, since sync_filesystem() was already called */
	if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
		return -EINVAL;
	return 0;
1350 1351 1352 1353 1354 1355 1356
}

static int f2fs_unfreeze(struct super_block *sb)
{
	return 0;
}

Chao Yu's avatar
Chao Yu committed
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
#ifdef CONFIG_QUOTA
static int f2fs_statfs_project(struct super_block *sb,
				kprojid_t projid, struct kstatfs *buf)
{
	struct kqid qid;
	struct dquot *dquot;
	u64 limit;
	u64 curblock;

	qid = make_kqid_projid(projid);
	dquot = dqget(sb, qid);
	if (IS_ERR(dquot))
		return PTR_ERR(dquot);
1370
	spin_lock(&dquot->dq_dqb_lock);
Chao Yu's avatar
Chao Yu committed
1371

1372 1373
	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
					dquot->dq_dqb.dqb_bhardlimit);
1374 1375
	if (limit)
		limit >>= sb->s_blocksize_bits;
1376

Chao Yu's avatar
Chao Yu committed
1377
	if (limit && buf->f_blocks > limit) {
1378 1379
		curblock = (dquot->dq_dqb.dqb_curspace +
			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
Chao Yu's avatar
Chao Yu committed
1380 1381 1382 1383 1384 1385
		buf->f_blocks = limit;
		buf->f_bfree = buf->f_bavail =
			(buf->f_blocks > curblock) ?
			 (buf->f_blocks - curblock) : 0;
	}

1386 1387
	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
					dquot->dq_dqb.dqb_ihardlimit);
1388

Chao Yu's avatar
Chao Yu committed
1389 1390 1391 1392 1393 1394 1395
	if (limit && buf->f_files > limit) {
		buf->f_files = limit;
		buf->f_ffree =
			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
	}

1396
	spin_unlock(&dquot->dq_dqb_lock);
Chao Yu's avatar
Chao Yu committed
1397 1398 1399 1400 1401
	dqput(dquot);
	return 0;
}
#endif

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1402 1403 1404 1405 1406
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
	struct super_block *sb = dentry->d_sb;
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1407
	block_t total_count, user_block_count, start_count;
1408
	u64 avail_node_count;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1409 1410 1411 1412 1413 1414 1415 1416

	total_count = le64_to_cpu(sbi->raw_super->block_count);
	user_block_count = sbi->user_block_count;
	start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
	buf->f_type = F2FS_SUPER_MAGIC;
	buf->f_bsize = sbi->blocksize;

	buf->f_blocks = total_count - start_count;
1417
	buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1418
						sbi->current_reserved_blocks;
1419 1420

	spin_lock(&sbi->stat_lock);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1421 1422 1423 1424
	if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
		buf->f_bfree = 0;
	else
		buf->f_bfree -= sbi->unusable_block_count;
1425
	spin_unlock(&sbi->stat_lock);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1426

1427 1428 1429
	if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
		buf->f_bavail = buf->f_bfree -
				F2FS_OPTION(sbi).root_reserved_blocks;
1430 1431
	else
		buf->f_bavail = 0;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1432

1433
	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1434 1435 1436 1437 1438 1439 1440 1441 1442

	if (avail_node_count > user_block_count) {
		buf->f_files = user_block_count;
		buf->f_ffree = buf->f_bavail;
	} else {
		buf->f_files = avail_node_count;
		buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
					buf->f_bavail);
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1443

1444
	buf->f_namelen = F2FS_NAME_LEN;
1445
	buf->f_fsid    = u64_to_fsid(id);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1446

Chao Yu's avatar
Chao Yu committed
1447 1448 1449 1450 1451 1452
#ifdef CONFIG_QUOTA
	if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
			sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
		f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
	}
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1453 1454 1455
	return 0;
}

Chao Yu's avatar
Chao Yu committed
1456 1457 1458 1459 1460 1461
static inline void f2fs_show_quota_options(struct seq_file *seq,
					   struct super_block *sb)
{
#ifdef CONFIG_QUOTA
	struct f2fs_sb_info *sbi = F2FS_SB(sb);

1462
	if (F2FS_OPTION(sbi).s_jquota_fmt) {
Chao Yu's avatar
Chao Yu committed
1463 1464
		char *fmtname = "";

1465
		switch (F2FS_OPTION(sbi).s_jquota_fmt) {
Chao Yu's avatar
Chao Yu committed
1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		case QFMT_VFS_OLD:
			fmtname = "vfsold";
			break;
		case QFMT_VFS_V0:
			fmtname = "vfsv0";
			break;
		case QFMT_VFS_V1:
			fmtname = "vfsv1";
			break;
		}
		seq_printf(seq, ",jqfmt=%s", fmtname);
	}

1479 1480 1481
	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
		seq_show_option(seq, "usrjquota",
			F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
Chao Yu's avatar
Chao Yu committed
1482

1483 1484 1485
	if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
		seq_show_option(seq, "grpjquota",
			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
Chao Yu's avatar
Chao Yu committed
1486

1487 1488 1489
	if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
		seq_show_option(seq, "prjjquota",
			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
Chao Yu's avatar
Chao Yu committed
1490 1491 1492
#endif
}

Chao Yu's avatar
Chao Yu committed
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
static inline void f2fs_show_compress_options(struct seq_file *seq,
							struct super_block *sb)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	char *algtype = "";
	int i;

	if (!f2fs_sb_has_compression(sbi))
		return;

	switch (F2FS_OPTION(sbi).compress_algorithm) {
	case COMPRESS_LZO:
		algtype = "lzo";
		break;
	case COMPRESS_LZ4:
		algtype = "lz4";
		break;
1510 1511 1512
	case COMPRESS_ZSTD:
		algtype = "zstd";
		break;
1513 1514 1515
	case COMPRESS_LZORLE:
		algtype = "lzo-rle";
		break;
Chao Yu's avatar
Chao Yu committed
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
	}
	seq_printf(seq, ",compress_algorithm=%s", algtype);

	seq_printf(seq, ",compress_log_size=%u",
			F2FS_OPTION(sbi).compress_log_size);

	for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
		seq_printf(seq, ",compress_extension=%s",
			F2FS_OPTION(sbi).extensions[i]);
	}
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1528 1529 1530 1531
static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
{
	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);

Chao Yu's avatar
Chao Yu committed
1532 1533 1534 1535 1536
	if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
		seq_printf(seq, ",background_gc=%s", "sync");
	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
		seq_printf(seq, ",background_gc=%s", "on");
	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1537
		seq_printf(seq, ",background_gc=%s", "off");
Chao Yu's avatar
Chao Yu committed
1538

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1539 1540
	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
		seq_puts(seq, ",disable_roll_forward");
1541 1542
	if (test_opt(sbi, NORECOVERY))
		seq_puts(seq, ",norecovery");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1543 1544
	if (test_opt(sbi, DISCARD))
		seq_puts(seq, ",discard");
1545 1546
	else
		seq_puts(seq, ",nodiscard");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1547
	if (test_opt(sbi, NOHEAP))
1548 1549 1550
		seq_puts(seq, ",no_heap");
	else
		seq_puts(seq, ",heap");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1551 1552 1553 1554 1555
#ifdef CONFIG_F2FS_FS_XATTR
	if (test_opt(sbi, XATTR_USER))
		seq_puts(seq, ",user_xattr");
	else
		seq_puts(seq, ",nouser_xattr");
1556 1557
	if (test_opt(sbi, INLINE_XATTR))
		seq_puts(seq, ",inline_xattr");
1558 1559
	else
		seq_puts(seq, ",noinline_xattr");
1560 1561
	if (test_opt(sbi, INLINE_XATTR_SIZE))
		seq_printf(seq, ",inline_xattr_size=%u",
1562
					F2FS_OPTION(sbi).inline_xattr_size);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1563 1564 1565 1566 1567 1568 1569 1570
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	if (test_opt(sbi, POSIX_ACL))
		seq_puts(seq, ",acl");
	else
		seq_puts(seq, ",noacl");
#endif
	if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1571
		seq_puts(seq, ",disable_ext_identify");
1572 1573
	if (test_opt(sbi, INLINE_DATA))
		seq_puts(seq, ",inline_data");
1574 1575
	else
		seq_puts(seq, ",noinline_data");
1576 1577
	if (test_opt(sbi, INLINE_DENTRY))
		seq_puts(seq, ",inline_dentry");
1578 1579
	else
		seq_puts(seq, ",noinline_dentry");
1580
	if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1581
		seq_puts(seq, ",flush_merge");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1582 1583
	if (test_opt(sbi, NOBARRIER))
		seq_puts(seq, ",nobarrier");
1584 1585
	if (test_opt(sbi, FASTBOOT))
		seq_puts(seq, ",fastboot");
1586 1587
	if (test_opt(sbi, EXTENT_CACHE))
		seq_puts(seq, ",extent_cache");
1588 1589
	else
		seq_puts(seq, ",noextent_cache");
1590 1591
	if (test_opt(sbi, DATA_FLUSH))
		seq_puts(seq, ",data_flush");
1592 1593

	seq_puts(seq, ",mode=");
1594
	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
1595
		seq_puts(seq, "adaptive");
1596
	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
1597
		seq_puts(seq, "lfs");
1598
	seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1599
	if (test_opt(sbi, RESERVE_ROOT))
1600
		seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1601 1602 1603 1604 1605
				F2FS_OPTION(sbi).root_reserved_blocks,
				from_kuid_munged(&init_user_ns,
					F2FS_OPTION(sbi).s_resuid),
				from_kgid_munged(&init_user_ns,
					F2FS_OPTION(sbi).s_resgid));
1606
	if (F2FS_IO_SIZE_BITS(sbi))
1607 1608
		seq_printf(seq, ",io_bits=%u",
				F2FS_OPTION(sbi).write_io_size_bits);
1609
#ifdef CONFIG_F2FS_FAULT_INJECTION
1610
	if (test_opt(sbi, FAULT_INJECTION)) {
1611
		seq_printf(seq, ",fault_injection=%u",
1612
				F2FS_OPTION(sbi).fault_info.inject_rate);
1613 1614 1615
		seq_printf(seq, ",fault_type=%u",
				F2FS_OPTION(sbi).fault_info.inject_type);
	}
1616
#endif
1617
#ifdef CONFIG_QUOTA
Chao Yu's avatar
Chao Yu committed
1618 1619
	if (test_opt(sbi, QUOTA))
		seq_puts(seq, ",quota");
1620 1621 1622 1623
	if (test_opt(sbi, USRQUOTA))
		seq_puts(seq, ",usrquota");
	if (test_opt(sbi, GRPQUOTA))
		seq_puts(seq, ",grpquota");
Chao Yu's avatar
Chao Yu committed
1624 1625
	if (test_opt(sbi, PRJQUOTA))
		seq_puts(seq, ",prjquota");
1626
#endif
Chao Yu's avatar
Chao Yu committed
1627
	f2fs_show_quota_options(seq, sbi->sb);
1628
	if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1629
		seq_printf(seq, ",whint_mode=%s", "user-based");
1630
	else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1631
		seq_printf(seq, ",whint_mode=%s", "fs-based");
1632 1633

	fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1634

1635 1636 1637
	if (sbi->sb->s_flags & SB_INLINECRYPT)
		seq_puts(seq, ",inlinecrypt");

1638
	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1639
		seq_printf(seq, ",alloc_mode=%s", "default");
1640
	else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1641
		seq_printf(seq, ",alloc_mode=%s", "reuse");
1642

Daniel Rosenberg's avatar
Daniel Rosenberg committed
1643
	if (test_opt(sbi, DISABLE_CHECKPOINT))
1644 1645
		seq_printf(seq, ",checkpoint=disable:%u",
				F2FS_OPTION(sbi).unusable_cap);
1646
	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1647
		seq_printf(seq, ",fsync_mode=%s", "posix");
1648
	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1649
		seq_printf(seq, ",fsync_mode=%s", "strict");
1650 1651
	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
		seq_printf(seq, ",fsync_mode=%s", "nobarrier");
Chao Yu's avatar
Chao Yu committed
1652

1653
#ifdef CONFIG_F2FS_FS_COMPRESSION
Chao Yu's avatar
Chao Yu committed
1654
	f2fs_show_compress_options(seq, sbi->sb);
1655
#endif
1656 1657 1658

	if (test_opt(sbi, ATGC))
		seq_puts(seq, ",atgc");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1659 1660 1661
	return 0;
}

1662 1663 1664
static void default_options(struct f2fs_sb_info *sbi)
{
	/* init some FS parameters */
Chao Yu's avatar
Chao Yu committed
1665
	F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
1666 1667 1668 1669
	F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
	F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
	F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
	F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1670 1671
	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
1672
	F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
Chao Yu's avatar
Chao Yu committed
1673 1674
	F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
	F2FS_OPTION(sbi).compress_ext_cnt = 0;
Chao Yu's avatar
Chao Yu committed
1675
	F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
1676

1677 1678
	sbi->sb->s_flags &= ~SB_INLINECRYPT;

1679
	set_opt(sbi, INLINE_XATTR);
1680
	set_opt(sbi, INLINE_DATA);
1681
	set_opt(sbi, INLINE_DENTRY);
1682
	set_opt(sbi, EXTENT_CACHE);
1683
	set_opt(sbi, NOHEAP);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1684
	clear_opt(sbi, DISABLE_CHECKPOINT);
1685
	F2FS_OPTION(sbi).unusable_cap = 0;
1686
	sbi->sb->s_flags |= SB_LAZYTIME;
1687
	set_opt(sbi, FLUSH_MERGE);
1688
	set_opt(sbi, DISCARD);
1689
	if (f2fs_sb_has_blkzoned(sbi))
1690
		F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
1691
	else
1692
		F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
1693 1694 1695 1696 1697 1698 1699

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
1700

1701
	f2fs_build_fault_attr(sbi, 0, 0);
1702 1703
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1704 1705 1706
#ifdef CONFIG_QUOTA
static int f2fs_enable_quotas(struct super_block *sb);
#endif
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1707 1708 1709

static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
{
1710
	unsigned int s_flags = sbi->sb->s_flags;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1711
	struct cp_control cpc;
1712 1713
	int err = 0;
	int ret;
1714
	block_t unusable;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1715

1716
	if (s_flags & SB_RDONLY) {
1717
		f2fs_err(sbi, "checkpoint=disable on readonly fs");
1718 1719
		return -EINVAL;
	}
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1720 1721 1722 1723 1724
	sbi->sb->s_flags |= SB_ACTIVE;

	f2fs_update_time(sbi, DISABLE_TIME);

	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
1725
		down_write(&sbi->gc_lock);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1726
		err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1727 1728
		if (err == -ENODATA) {
			err = 0;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1729
			break;
1730
		}
1731
		if (err && err != -EAGAIN)
1732
			break;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1733 1734
	}

1735 1736 1737 1738 1739
	ret = sync_filesystem(sbi->sb);
	if (ret || err) {
		err = ret ? ret: err;
		goto restore_flag;
	}
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1740

1741 1742
	unusable = f2fs_get_unusable_blocks(sbi);
	if (f2fs_disable_cp_again(sbi, unusable)) {
1743 1744 1745
		err = -EAGAIN;
		goto restore_flag;
	}
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1746

1747
	down_write(&sbi->gc_lock);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1748 1749
	cpc.reason = CP_PAUSE;
	set_sbi_flag(sbi, SBI_CP_DISABLED);
1750 1751 1752
	err = f2fs_write_checkpoint(sbi, &cpc);
	if (err)
		goto out_unlock;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1753

1754
	spin_lock(&sbi->stat_lock);
1755
	sbi->unusable_block_count = unusable;
1756 1757
	spin_unlock(&sbi->stat_lock);

1758
out_unlock:
1759
	up_write(&sbi->gc_lock);
1760
restore_flag:
Chao Yu's avatar
Chao Yu committed
1761
	sbi->sb->s_flags = s_flags;	/* Restore SB_RDONLY status */
1762
	return err;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1763 1764 1765 1766
}

static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
{
1767
	down_write(&sbi->gc_lock);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1768 1769 1770 1771
	f2fs_dirty_to_prefree(sbi);

	clear_sbi_flag(sbi, SBI_CP_DISABLED);
	set_sbi_flag(sbi, SBI_IS_DIRTY);
1772
	up_write(&sbi->gc_lock);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1773 1774 1775 1776

	f2fs_sync_fs(sbi->sb, 1);
}

1777 1778 1779 1780
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	struct f2fs_mount_info org_mount_opt;
1781
	unsigned long old_sb_flags;
1782
	int err;
1783 1784
	bool need_restart_gc = false;
	bool need_stop_gc = false;
1785
	bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1786
	bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
1787
	bool no_io_align = !F2FS_IO_ALIGNED(sbi);
1788
	bool no_atgc = !test_opt(sbi, ATGC);
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1789
	bool checkpoint_changed;
Chao Yu's avatar
Chao Yu committed
1790 1791 1792
#ifdef CONFIG_QUOTA
	int i, j;
#endif
1793 1794 1795 1796 1797 1798

	/*
	 * Save the old mount options in case we
	 * need to restore them.
	 */
	org_mount_opt = sbi->mount_opt;
1799
	old_sb_flags = sb->s_flags;
1800

Chao Yu's avatar
Chao Yu committed
1801
#ifdef CONFIG_QUOTA
1802
	org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
Chao Yu's avatar
Chao Yu committed
1803
	for (i = 0; i < MAXQUOTAS; i++) {
1804 1805 1806 1807 1808
		if (F2FS_OPTION(sbi).s_qf_names[i]) {
			org_mount_opt.s_qf_names[i] =
				kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
				GFP_KERNEL);
			if (!org_mount_opt.s_qf_names[i]) {
Chao Yu's avatar
Chao Yu committed
1809
				for (j = 0; j < i; j++)
1810
					kfree(org_mount_opt.s_qf_names[j]);
Chao Yu's avatar
Chao Yu committed
1811 1812 1813
				return -ENOMEM;
			}
		} else {
1814
			org_mount_opt.s_qf_names[i] = NULL;
Chao Yu's avatar
Chao Yu committed
1815 1816 1817 1818
		}
	}
#endif

1819
	/* recover superblocks we couldn't write due to previous RO mount */
1820
	if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1821
		err = f2fs_commit_super(sbi, false);
1822 1823
		f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
			  err);
1824 1825 1826 1827
		if (!err)
			clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
	}

1828
	default_options(sbi);
1829

1830
	/* parse mount options */
1831
	err = parse_options(sb, data, true);
1832 1833
	if (err)
		goto restore_opts;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1834 1835
	checkpoint_changed =
			disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
1836 1837 1838

	/*
	 * Previous and new state of filesystem is RO,
1839
	 * so skip checking GC and FLUSH_MERGE conditions.
1840
	 */
1841
	if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
1842 1843
		goto skip;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1844
#ifdef CONFIG_QUOTA
1845
	if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
1846 1847 1848
		err = dquot_suspend(sb, -1);
		if (err < 0)
			goto restore_opts;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1849
	} else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
1850
		/* dquot_resume needs RW */
1851
		sb->s_flags &= ~SB_RDONLY;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1852 1853
		if (sb_any_quota_suspended(sb)) {
			dquot_resume(sb, -1);
1854
		} else if (f2fs_sb_has_quota_ino(sbi)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1855 1856 1857 1858
			err = f2fs_enable_quotas(sb);
			if (err)
				goto restore_opts;
		}
1859
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1860
#endif
1861 1862 1863 1864 1865 1866 1867
	/* disallow enable atgc dynamically */
	if (no_atgc == !!test_opt(sbi, ATGC)) {
		err = -EINVAL;
		f2fs_warn(sbi, "switch atgc option is not allowed");
		goto restore_opts;
	}

1868 1869 1870
	/* disallow enable/disable extent_cache dynamically */
	if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
		err = -EINVAL;
1871
		f2fs_warn(sbi, "switch extent_cache option is not allowed");
1872 1873 1874
		goto restore_opts;
	}

1875 1876 1877 1878 1879 1880
	if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
		err = -EINVAL;
		f2fs_warn(sbi, "switch io_bits option is not allowed");
		goto restore_opts;
	}

Daniel Rosenberg's avatar
Daniel Rosenberg committed
1881 1882
	if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
		err = -EINVAL;
1883
		f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
Daniel Rosenberg's avatar
Daniel Rosenberg committed
1884 1885 1886
		goto restore_opts;
	}

1887 1888 1889 1890 1891
	/*
	 * We stop the GC thread if FS is mounted as RO
	 * or if background_gc = off is passed in mount
	 * option. Also sync the filesystem.
	 */
Chao Yu's avatar
Chao Yu committed
1892 1893
	if ((*flags & SB_RDONLY) ||
			F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) {
1894
		if (sbi->gc_thread) {
Chao Yu's avatar
Chao Yu committed
1895
			f2fs_stop_gc_thread(sbi);
1896
			need_restart_gc = true;
1897
		}
1898
	} else if (!sbi->gc_thread) {
Chao Yu's avatar
Chao Yu committed
1899
		err = f2fs_start_gc_thread(sbi);
1900 1901
		if (err)
			goto restore_opts;
1902 1903 1904
		need_stop_gc = true;
	}

1905 1906
	if (*flags & SB_RDONLY ||
		F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
1907 1908 1909 1910 1911 1912 1913 1914
		sync_inodes_sb(sb);

		set_sbi_flag(sbi, SBI_IS_DIRTY);
		set_sbi_flag(sbi, SBI_IS_CLOSE);
		f2fs_sync_fs(sb, 1);
		clear_sbi_flag(sbi, SBI_IS_CLOSE);
	}

Daniel Rosenberg's avatar
Daniel Rosenberg committed
1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
	if (checkpoint_changed) {
		if (test_opt(sbi, DISABLE_CHECKPOINT)) {
			err = f2fs_disable_checkpoint(sbi);
			if (err)
				goto restore_gc;
		} else {
			f2fs_enable_checkpoint(sbi);
		}
	}

1925 1926 1927 1928
	/*
	 * We stop issue flush thread if FS is mounted as RO
	 * or if flush_merge is not passed in mount option.
	 */
1929
	if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1930
		clear_opt(sbi, FLUSH_MERGE);
Chao Yu's avatar
Chao Yu committed
1931
		f2fs_destroy_flush_cmd_control(sbi, false);
1932
	} else {
Chao Yu's avatar
Chao Yu committed
1933
		err = f2fs_create_flush_cmd_control(sbi);
1934
		if (err)
1935
			goto restore_gc;
1936 1937
	}
skip:
Chao Yu's avatar
Chao Yu committed
1938 1939 1940
#ifdef CONFIG_QUOTA
	/* Release old quota file names */
	for (i = 0; i < MAXQUOTAS; i++)
1941
		kfree(org_mount_opt.s_qf_names[i]);
Chao Yu's avatar
Chao Yu committed
1942
#endif
1943
	/* Update the POSIXACL Flag */
1944 1945
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
1946

1947
	limit_reserve_root(sbi);
1948
	adjust_unusable_cap_perc(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1949
	*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
1950
	return 0;
1951 1952
restore_gc:
	if (need_restart_gc) {
Chao Yu's avatar
Chao Yu committed
1953
		if (f2fs_start_gc_thread(sbi))
1954
			f2fs_warn(sbi, "background gc thread has stopped");
1955
	} else if (need_stop_gc) {
Chao Yu's avatar
Chao Yu committed
1956
		f2fs_stop_gc_thread(sbi);
1957
	}
1958
restore_opts:
Chao Yu's avatar
Chao Yu committed
1959
#ifdef CONFIG_QUOTA
1960
	F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
Chao Yu's avatar
Chao Yu committed
1961
	for (i = 0; i < MAXQUOTAS; i++) {
1962
		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1963
		F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
Chao Yu's avatar
Chao Yu committed
1964 1965
	}
#endif
1966
	sbi->mount_opt = org_mount_opt;
1967
	sb->s_flags = old_sb_flags;
1968 1969 1970
	return err;
}

1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
#ifdef CONFIG_QUOTA
/* Read data from quotafile */
static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
			       size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
	struct address_space *mapping = inode->i_mapping;
	block_t blkidx = F2FS_BYTES_TO_BLK(off);
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t toread;
	loff_t i_size = i_size_read(inode);
	struct page *page;
	char *kaddr;

	if (off > i_size)
		return 0;

	if (off + len > i_size)
		len = i_size - off;
	toread = len;
	while (toread > 0) {
		tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
repeat:
1995
		page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
1996 1997
		if (IS_ERR(page)) {
			if (PTR_ERR(page) == -ENOMEM) {
Chao Yu's avatar
Chao Yu committed
1998 1999
				congestion_wait(BLK_RW_ASYNC,
						DEFAULT_IO_TIMEOUT);
2000 2001
				goto repeat;
			}
2002
			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2003
			return PTR_ERR(page);
2004
		}
2005 2006 2007 2008 2009 2010 2011 2012 2013

		lock_page(page);

		if (unlikely(page->mapping != mapping)) {
			f2fs_put_page(page, 1);
			goto repeat;
		}
		if (unlikely(!PageUptodate(page))) {
			f2fs_put_page(page, 1);
2014
			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040
			return -EIO;
		}

		kaddr = kmap_atomic(page);
		memcpy(data, kaddr + offset, tocopy);
		kunmap_atomic(kaddr);
		f2fs_put_page(page, 1);

		offset = 0;
		toread -= tocopy;
		data += tocopy;
		blkidx++;
	}
	return len;
}

/* Write to quotafile */
static ssize_t f2fs_quota_write(struct super_block *sb, int type,
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
	struct address_space *mapping = inode->i_mapping;
	const struct address_space_operations *a_ops = mapping->a_ops;
	int offset = off & (sb->s_blocksize - 1);
	size_t towrite = len;
	struct page *page;
2041
	void *fsdata = NULL;
2042 2043 2044 2045 2046 2047 2048
	char *kaddr;
	int err = 0;
	int tocopy;

	while (towrite > 0) {
		tocopy = min_t(unsigned long, sb->s_blocksize - offset,
								towrite);
2049
retry:
2050
		err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
2051
							&page, &fsdata);
2052 2053
		if (unlikely(err)) {
			if (err == -ENOMEM) {
Chao Yu's avatar
Chao Yu committed
2054 2055
				congestion_wait(BLK_RW_ASYNC,
						DEFAULT_IO_TIMEOUT);
2056 2057
				goto retry;
			}
2058
			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2059
			break;
2060
		}
2061 2062 2063 2064 2065 2066 2067

		kaddr = kmap_atomic(page);
		memcpy(kaddr + offset, data, tocopy);
		kunmap_atomic(kaddr);
		flush_dcache_page(page);

		a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2068
						page, fsdata);
2069 2070 2071 2072 2073 2074 2075 2076
		offset = 0;
		towrite -= tocopy;
		off += tocopy;
		data += tocopy;
		cond_resched();
	}

	if (len == towrite)
2077
		return err;
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
	inode->i_mtime = inode->i_ctime = current_time(inode);
	f2fs_mark_inode_dirty_sync(inode, false);
	return len - towrite;
}

static struct dquot **f2fs_get_dquots(struct inode *inode)
{
	return F2FS_I(inode)->i_dquot;
}

static qsize_t *f2fs_get_reserved_space(struct inode *inode)
{
	return &F2FS_I(inode)->i_reserved_quota;
}

Chao Yu's avatar
Chao Yu committed
2093 2094
static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
{
2095
	if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2096
		f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2097 2098 2099
		return 0;
	}

2100 2101
	return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
					F2FS_OPTION(sbi).s_jquota_fmt, type);
Chao Yu's avatar
Chao Yu committed
2102 2103
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2104
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
Chao Yu's avatar
Chao Yu committed
2105
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2106 2107 2108
	int enabled = 0;
	int i, err;

2109
	if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2110 2111
		err = f2fs_enable_quotas(sbi->sb);
		if (err) {
2112
			f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2113 2114 2115 2116
			return 0;
		}
		return 1;
	}
Chao Yu's avatar
Chao Yu committed
2117 2118

	for (i = 0; i < MAXQUOTAS; i++) {
2119
		if (F2FS_OPTION(sbi).s_qf_names[i]) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2120 2121 2122 2123 2124
			err = f2fs_quota_on_mount(sbi, i);
			if (!err) {
				enabled = 1;
				continue;
			}
2125 2126
			f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
				 err, i);
Chao Yu's avatar
Chao Yu committed
2127 2128
		}
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
	return enabled;
}

static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags)
{
	struct inode *qf_inode;
	unsigned long qf_inum;
	int err;

2139
	BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2140 2141 2142 2143 2144 2145 2146

	qf_inum = f2fs_qf_ino(sb, type);
	if (!qf_inum)
		return -EPERM;

	qf_inode = f2fs_iget(sb, qf_inum);
	if (IS_ERR(qf_inode)) {
2147
		f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2148 2149 2150 2151 2152
		return PTR_ERR(qf_inode);
	}

	/* Don't account quota for quota files to avoid recursion */
	qf_inode->i_flags |= S_NOQUOTA;
2153
	err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2154 2155 2156 2157 2158 2159
	iput(qf_inode);
	return err;
}

static int f2fs_enable_quotas(struct super_block *sb)
{
2160
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2161 2162 2163
	int type, err = 0;
	unsigned long qf_inum;
	bool quota_mopt[MAXQUOTAS] = {
2164 2165 2166
		test_opt(sbi, USRQUOTA),
		test_opt(sbi, GRPQUOTA),
		test_opt(sbi, PRJQUOTA),
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2167 2168
	};

2169
	if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2170
		f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2171 2172 2173 2174 2175
		return 0;
	}

	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2176 2177 2178 2179 2180 2181 2182
	for (type = 0; type < MAXQUOTAS; type++) {
		qf_inum = f2fs_qf_ino(sb, type);
		if (qf_inum) {
			err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
				DQUOT_USAGE_ENABLED |
				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
			if (err) {
2183 2184
				f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
					 type, err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2185 2186
				for (type--; type >= 0; type--)
					dquot_quota_off(sb, type);
2187 2188
				set_sbi_flag(F2FS_SB(sb),
						SBI_QUOTA_NEED_REPAIR);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2189 2190
				return err;
			}
Chao Yu's avatar
Chao Yu committed
2191 2192
		}
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2193
	return 0;
Chao Yu's avatar
Chao Yu committed
2194 2195
}

2196
int f2fs_quota_sync(struct super_block *sb, int type)
2197
{
2198
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2199 2200 2201 2202
	struct quota_info *dqopt = sb_dqopt(sb);
	int cnt;
	int ret;

2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	/*
	 * do_quotactl
	 *  f2fs_quota_sync
	 *  down_read(quota_sem)
	 *  dquot_writeback_dquots()
	 *  f2fs_dquot_commit
	 *                            block_operation
	 *                            down_read(quota_sem)
	 */
	f2fs_lock_op(sbi);

	down_read(&sbi->quota_sem);
2215 2216
	ret = dquot_writeback_dquots(sb, type);
	if (ret)
2217
		goto out;
2218 2219 2220 2221 2222 2223

	/*
	 * Now when everything is written we can discard the pagecache so
	 * that userspace sees the changes.
	 */
	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2224 2225
		struct address_space *mapping;

2226 2227 2228 2229 2230
		if (type != -1 && cnt != type)
			continue;
		if (!sb_has_quota_active(sb, cnt))
			continue;

2231 2232 2233
		mapping = dqopt->files[cnt]->i_mapping;

		ret = filemap_fdatawrite(mapping);
2234
		if (ret)
2235 2236 2237 2238 2239 2240 2241 2242 2243
			goto out;

		/* if we are using journalled quota */
		if (is_journalled_quota(sbi))
			continue;

		ret = filemap_fdatawait(mapping);
		if (ret)
			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2244 2245 2246 2247 2248

		inode_lock(dqopt->files[cnt]);
		truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
		inode_unlock(dqopt->files[cnt]);
	}
2249 2250 2251
out:
	if (ret)
		set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2252 2253
	up_read(&sbi->quota_sem);
	f2fs_unlock_op(sbi);
2254
	return ret;
2255 2256 2257 2258 2259 2260 2261 2262
}

static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
							const struct path *path)
{
	struct inode *inode;
	int err;

2263 2264 2265 2266 2267 2268
	/* if quota sysfile exists, deny enabling quota with specific file */
	if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
		f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
		return -EBUSY;
	}

2269
	err = f2fs_quota_sync(sb, type);
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
	if (err)
		return err;

	err = dquot_quota_on(sb, type, format_id, path);
	if (err)
		return err;

	inode = d_inode(path->dentry);

	inode_lock(inode);
2280
	F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
Chao Yu's avatar
Chao Yu committed
2281
	f2fs_set_inode_flags(inode);
2282 2283 2284 2285 2286 2287
	inode_unlock(inode);
	f2fs_mark_inode_dirty_sync(inode, false);

	return 0;
}

2288
static int __f2fs_quota_off(struct super_block *sb, int type)
2289 2290 2291 2292 2293 2294 2295
{
	struct inode *inode = sb_dqopt(sb)->files[type];
	int err;

	if (!inode || !igrab(inode))
		return dquot_quota_off(sb, type);

2296 2297 2298
	err = f2fs_quota_sync(sb, type);
	if (err)
		goto out_put;
2299 2300

	err = dquot_quota_off(sb, type);
2301
	if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2302 2303 2304
		goto out_put;

	inode_lock(inode);
2305
	F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
Chao Yu's avatar
Chao Yu committed
2306
	f2fs_set_inode_flags(inode);
2307 2308 2309 2310 2311 2312 2313
	inode_unlock(inode);
	f2fs_mark_inode_dirty_sync(inode, false);
out_put:
	iput(inode);
	return err;
}

2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330
static int f2fs_quota_off(struct super_block *sb, int type)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	int err;

	err = __f2fs_quota_off(sb, type);

	/*
	 * quotactl can shutdown journalled quota, result in inconsistence
	 * between quota record and fs data by following updates, tag the
	 * flag to let fsck be aware of it.
	 */
	if (is_journalled_quota(sbi))
		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
	return err;
}

Chao Yu's avatar
Chao Yu committed
2331
void f2fs_quota_off_umount(struct super_block *sb)
2332 2333
{
	int type;
2334 2335 2336
	int err;

	for (type = 0; type < MAXQUOTAS; type++) {
2337
		err = __f2fs_quota_off(sb, type);
2338 2339
		if (err) {
			int ret = dquot_quota_off(sb, type);
2340

2341 2342
			f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
				 type, err, ret);
2343
			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2344 2345
		}
	}
2346 2347 2348 2349 2350 2351
	/*
	 * In case of checkpoint=disable, we must flush quota blocks.
	 * This can cause NULL exception for node_inode in end_io, since
	 * put_super already dropped it.
	 */
	sync_filesystem(sb);
2352 2353
}

2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
{
	struct quota_info *dqopt = sb_dqopt(sb);
	int type;

	for (type = 0; type < MAXQUOTAS; type++) {
		if (!dqopt->files[type])
			continue;
		f2fs_inode_synced(dqopt->files[type]);
	}
}

2366 2367
static int f2fs_dquot_commit(struct dquot *dquot)
{
2368
	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2369 2370
	int ret;

2371
	down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
2372 2373
	ret = dquot_commit(dquot);
	if (ret < 0)
2374 2375
		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
	up_read(&sbi->quota_sem);
2376 2377 2378 2379 2380
	return ret;
}

static int f2fs_dquot_acquire(struct dquot *dquot)
{
2381
	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2382 2383
	int ret;

2384
	down_read(&sbi->quota_sem);
2385 2386
	ret = dquot_acquire(dquot);
	if (ret < 0)
2387 2388
		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
	up_read(&sbi->quota_sem);
2389 2390 2391 2392 2393
	return ret;
}

static int f2fs_dquot_release(struct dquot *dquot)
{
2394
	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2395
	int ret = dquot_release(dquot);
2396 2397

	if (ret < 0)
2398
		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2399 2400 2401 2402 2403 2404 2405
	return ret;
}

static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
{
	struct super_block *sb = dquot->dq_sb;
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2406
	int ret = dquot_mark_dquot_dirty(dquot);
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416

	/* if we are using journalled quota */
	if (is_journalled_quota(sbi))
		set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);

	return ret;
}

static int f2fs_dquot_commit_info(struct super_block *sb, int type)
{
2417
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2418
	int ret = dquot_commit_info(sb, type);
2419 2420

	if (ret < 0)
2421
		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2422 2423
	return ret;
}
2424

2425
static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
Chao Yu's avatar
Chao Yu committed
2426 2427 2428 2429 2430
{
	*projid = F2FS_I(inode)->i_projid;
	return 0;
}

2431 2432
static const struct dquot_operations f2fs_quota_operations = {
	.get_reserved_space = f2fs_get_reserved_space,
2433 2434 2435 2436 2437
	.write_dquot	= f2fs_dquot_commit,
	.acquire_dquot	= f2fs_dquot_acquire,
	.release_dquot	= f2fs_dquot_release,
	.mark_dirty	= f2fs_dquot_mark_dquot_dirty,
	.write_info	= f2fs_dquot_commit_info,
2438 2439
	.alloc_dquot	= dquot_alloc,
	.destroy_dquot	= dquot_destroy,
Chao Yu's avatar
Chao Yu committed
2440
	.get_projid	= f2fs_get_projid,
2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
	.get_next_id	= dquot_get_next_id,
};

static const struct quotactl_ops f2fs_quotactl_ops = {
	.quota_on	= f2fs_quota_on,
	.quota_off	= f2fs_quota_off,
	.quota_sync	= f2fs_quota_sync,
	.get_state	= dquot_get_state,
	.set_info	= dquot_set_dqinfo,
	.get_dqblk	= dquot_get_dqblk,
	.set_dqblk	= dquot_set_dqblk,
	.get_nextdqblk	= dquot_get_next_dqblk,
};
#else
2455 2456 2457 2458 2459
int f2fs_quota_sync(struct super_block *sb, int type)
{
	return 0;
}

Chao Yu's avatar
Chao Yu committed
2460
void f2fs_quota_off_umount(struct super_block *sb)
2461 2462 2463 2464
{
}
#endif

2465
static const struct super_operations f2fs_sops = {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2466
	.alloc_inode	= f2fs_alloc_inode,
Al Viro's avatar
Al Viro committed
2467
	.free_inode	= f2fs_free_inode,
2468
	.drop_inode	= f2fs_drop_inode,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2469
	.write_inode	= f2fs_write_inode,
2470
	.dirty_inode	= f2fs_dirty_inode,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2471
	.show_options	= f2fs_show_options,
2472 2473 2474 2475 2476
#ifdef CONFIG_QUOTA
	.quota_read	= f2fs_quota_read,
	.quota_write	= f2fs_quota_write,
	.get_dquots	= f2fs_get_dquots,
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2477 2478 2479
	.evict_inode	= f2fs_evict_inode,
	.put_super	= f2fs_put_super,
	.sync_fs	= f2fs_sync_fs,
2480 2481
	.freeze_fs	= f2fs_freeze,
	.unfreeze_fs	= f2fs_unfreeze,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2482
	.statfs		= f2fs_statfs,
2483
	.remount_fs	= f2fs_remount,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2484 2485
};

2486
#ifdef CONFIG_FS_ENCRYPTION
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
{
	return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
				ctx, len, NULL);
}

static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
							void *fs_data)
{
2497 2498 2499 2500 2501 2502 2503 2504
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	/*
	 * Encrypting the root directory is not allowed because fsck
	 * expects lost+found directory to exist and remain unencrypted
	 * if LOST_FOUND feature is enabled.
	 *
	 */
2505
	if (f2fs_sb_has_lost_found(sbi) &&
2506 2507 2508
			inode->i_ino == F2FS_ROOT_INO(sbi))
		return -EPERM;

2509 2510 2511 2512 2513
	return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
				ctx, len, fs_data, XATTR_CREATE);
}

2514
static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
2515
{
2516
	return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
2517 2518
}

2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530
static bool f2fs_has_stable_inodes(struct super_block *sb)
{
	return true;
}

static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
				       int *ino_bits_ret, int *lblk_bits_ret)
{
	*ino_bits_ret = 8 * sizeof(nid_t);
	*lblk_bits_ret = 8 * sizeof(block_t);
}

2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549
static int f2fs_get_num_devices(struct super_block *sb)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);

	if (f2fs_is_multi_device(sbi))
		return sbi->s_ndevs;
	return 1;
}

static void f2fs_get_devices(struct super_block *sb,
			     struct request_queue **devs)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	int i;

	for (i = 0; i < sbi->s_ndevs; i++)
		devs[i] = bdev_get_queue(FDEV(i).bdev);
}

2550
static const struct fscrypt_operations f2fs_cryptops = {
2551 2552 2553
	.key_prefix		= "f2fs:",
	.get_context		= f2fs_get_context,
	.set_context		= f2fs_set_context,
2554
	.get_dummy_policy	= f2fs_get_dummy_policy,
2555 2556 2557 2558
	.empty_dir		= f2fs_empty_dir,
	.max_namelen		= F2FS_NAME_LEN,
	.has_stable_inodes	= f2fs_has_stable_inodes,
	.get_ino_and_lblk_bits	= f2fs_get_ino_and_lblk_bits,
2559 2560
	.get_num_devices	= f2fs_get_num_devices,
	.get_devices		= f2fs_get_devices,
2561 2562 2563
};
#endif

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2564 2565 2566 2567 2568 2569
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
		u64 ino, u32 generation)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	struct inode *inode;

Chao Yu's avatar
Chao Yu committed
2570
	if (f2fs_check_nid_range(sbi, ino))
2571
		return ERR_PTR(-ESTALE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2572 2573 2574 2575 2576 2577 2578 2579 2580

	/*
	 * f2fs_iget isn't quite right if the inode is currently unallocated!
	 * However f2fs_iget currently does appropriate checks to handle stale
	 * inodes so everything is OK.
	 */
	inode = f2fs_iget(sb, ino);
	if (IS_ERR(inode))
		return ERR_CAST(inode);
2581
	if (unlikely(generation && inode->i_generation != generation)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
		/* we didn't find the right inode.. */
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
	return inode;
}

static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
		int fh_len, int fh_type)
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    f2fs_nfs_get_inode);
}

static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
		int fh_len, int fh_type)
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    f2fs_nfs_get_inode);
}

static const struct export_operations f2fs_export_ops = {
	.fh_to_dentry = f2fs_fh_to_dentry,
	.fh_to_parent = f2fs_fh_to_parent,
	.get_parent = f2fs_get_parent,
};

2609
static loff_t max_file_blocks(void)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2610
{
2611
	loff_t result = 0;
2612
	loff_t leaf_count = DEF_ADDRS_PER_BLOCK;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2613

2614 2615
	/*
	 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2616
	 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2617 2618 2619 2620
	 * space in inode.i_addr, it will be more safe to reassign
	 * result as zero.
	 */

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
	/* two direct node blocks */
	result += (leaf_count * 2);

	/* two indirect node blocks */
	leaf_count *= NIDS_PER_BLOCK;
	result += (leaf_count * 2);

	/* one double indirect node block */
	leaf_count *= NIDS_PER_BLOCK;
	result += leaf_count;

	return result;
}

2635 2636 2637 2638 2639 2640 2641 2642 2643 2644
static int __f2fs_commit_super(struct buffer_head *bh,
			struct f2fs_super_block *super)
{
	lock_buffer(bh);
	if (super)
		memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
	set_buffer_dirty(bh);
	unlock_buffer(bh);

	/* it's rare case, we can do fua all the time */
2645
	return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2646 2647
}

2648
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2649
					struct buffer_head *bh)
2650
{
2651 2652
	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
					(bh->b_data + F2FS_SUPER_OFFSET);
2653
	struct super_block *sb = sbi->sb;
2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
	u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
	u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
	u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
	u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
	u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
	u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
	u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
	u32 segment_count = le32_to_cpu(raw_super->segment_count);
	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2667 2668 2669 2670
	u64 main_end_blkaddr = main_blkaddr +
				(segment_count_main << log_blocks_per_seg);
	u64 seg_end_blkaddr = segment0_blkaddr +
				(segment_count << log_blocks_per_seg);
2671 2672

	if (segment0_blkaddr != cp_blkaddr) {
2673 2674
		f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
			  segment0_blkaddr, cp_blkaddr);
2675 2676 2677 2678 2679
		return true;
	}

	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
							sit_blkaddr) {
2680 2681 2682
		f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
			  cp_blkaddr, sit_blkaddr,
			  segment_count_ckpt << log_blocks_per_seg);
2683 2684 2685 2686 2687
		return true;
	}

	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
							nat_blkaddr) {
2688 2689 2690
		f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
			  sit_blkaddr, nat_blkaddr,
			  segment_count_sit << log_blocks_per_seg);
2691 2692 2693 2694 2695
		return true;
	}

	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
							ssa_blkaddr) {
2696 2697 2698
		f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
			  nat_blkaddr, ssa_blkaddr,
			  segment_count_nat << log_blocks_per_seg);
2699 2700 2701 2702 2703
		return true;
	}

	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
							main_blkaddr) {
2704 2705 2706
		f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
			  ssa_blkaddr, main_blkaddr,
			  segment_count_ssa << log_blocks_per_seg);
2707 2708 2709
		return true;
	}

2710
	if (main_end_blkaddr > seg_end_blkaddr) {
2711 2712
		f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
			  main_blkaddr, seg_end_blkaddr,
2713
			  segment_count_main << log_blocks_per_seg);
2714
		return true;
2715 2716 2717 2718 2719 2720 2721 2722 2723
	} else if (main_end_blkaddr < seg_end_blkaddr) {
		int err = 0;
		char *res;

		/* fix in-memory information all the time */
		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
				segment0_blkaddr) >> log_blocks_per_seg);

		if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
2724
			set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2725 2726 2727 2728 2729
			res = "internally";
		} else {
			err = __f2fs_commit_super(bh, NULL);
			res = err ? "failed" : "done";
		}
2730 2731
		f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
			  res, main_blkaddr, seg_end_blkaddr,
2732
			  segment_count_main << log_blocks_per_seg);
2733 2734
		if (err)
			return true;
2735 2736 2737 2738
	}
	return false;
}

2739
static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2740
				struct buffer_head *bh)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2741
{
2742
	block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
2743
	block_t total_sections, blocks_per_seg;
2744 2745
	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
					(bh->b_data + F2FS_SUPER_OFFSET);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2746
	unsigned int blocksize;
2747 2748 2749
	size_t crc_offset = 0;
	__u32 crc = 0;

2750 2751 2752 2753 2754 2755
	if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
		f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
			  F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
		return -EINVAL;
	}

2756
	/* Check checksum_offset and crc in superblock */
2757
	if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
2758 2759 2760
		crc_offset = le32_to_cpu(raw_super->checksum_offset);
		if (crc_offset !=
			offsetof(struct f2fs_super_block, crc)) {
2761 2762
			f2fs_info(sbi, "Invalid SB checksum offset: %zu",
				  crc_offset);
2763
			return -EFSCORRUPTED;
2764 2765 2766
		}
		crc = le32_to_cpu(raw_super->crc);
		if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
2767
			f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
2768
			return -EFSCORRUPTED;
2769 2770
		}
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2771

2772
	/* Currently, support only 4KB page cache size */
2773
	if (F2FS_BLKSIZE != PAGE_SIZE) {
2774 2775
		f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
			  PAGE_SIZE);
2776
		return -EFSCORRUPTED;
2777 2778
	}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2779 2780
	/* Currently, support only 4KB block size */
	blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
2781
	if (blocksize != F2FS_BLKSIZE) {
2782 2783
		f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
			  blocksize);
2784
		return -EFSCORRUPTED;
2785
	}
2786

2787 2788
	/* check log blocks per segment */
	if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2789 2790
		f2fs_info(sbi, "Invalid log blocks per segment (%u)",
			  le32_to_cpu(raw_super->log_blocks_per_seg));
2791
		return -EFSCORRUPTED;
2792 2793
	}

Chao Yu's avatar
Chao Yu committed
2794 2795 2796 2797 2798
	/* Currently, support 512/1024/2048/4096 bytes sector size */
	if (le32_to_cpu(raw_super->log_sectorsize) >
				F2FS_MAX_LOG_SECTOR_SIZE ||
		le32_to_cpu(raw_super->log_sectorsize) <
				F2FS_MIN_LOG_SECTOR_SIZE) {
2799 2800
		f2fs_info(sbi, "Invalid log sectorsize (%u)",
			  le32_to_cpu(raw_super->log_sectorsize));
2801
		return -EFSCORRUPTED;
2802
	}
Chao Yu's avatar
Chao Yu committed
2803 2804 2805
	if (le32_to_cpu(raw_super->log_sectors_per_block) +
		le32_to_cpu(raw_super->log_sectorsize) !=
			F2FS_MAX_LOG_SECTOR_SIZE) {
2806 2807 2808
		f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
			  le32_to_cpu(raw_super->log_sectors_per_block),
			  le32_to_cpu(raw_super->log_sectorsize));
2809
		return -EFSCORRUPTED;
2810
	}
2811

2812
	segment_count = le32_to_cpu(raw_super->segment_count);
2813
	segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2814 2815 2816 2817 2818 2819 2820 2821 2822
	segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
	secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
	total_sections = le32_to_cpu(raw_super->section_count);

	/* blocks_per_seg should be 512, given the above check */
	blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);

	if (segment_count > F2FS_MAX_SEGMENT ||
				segment_count < F2FS_MIN_SEGMENTS) {
2823
		f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
2824
		return -EFSCORRUPTED;
2825 2826
	}

2827
	if (total_sections > segment_count_main || total_sections < 1 ||
2828
			segs_per_sec > segment_count || !segs_per_sec) {
2829 2830
		f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
			  segment_count, total_sections, segs_per_sec);
2831
		return -EFSCORRUPTED;
2832 2833
	}

2834 2835 2836 2837 2838 2839
	if (segment_count_main != total_sections * segs_per_sec) {
		f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
			  segment_count_main, total_sections, segs_per_sec);
		return -EFSCORRUPTED;
	}

2840
	if ((segment_count / segs_per_sec) < total_sections) {
2841 2842
		f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
			  segment_count, segs_per_sec, total_sections);
2843
		return -EFSCORRUPTED;
2844 2845
	}

2846
	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2847 2848
		f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
			  segment_count, le64_to_cpu(raw_super->block_count));
2849
		return -EFSCORRUPTED;
2850 2851
	}

2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
	if (RDEV(0).path[0]) {
		block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
		int i = 1;

		while (i < MAX_DEVICES && RDEV(i).path[0]) {
			dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
			i++;
		}
		if (segment_count != dev_seg_count) {
			f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
					segment_count, dev_seg_count);
			return -EFSCORRUPTED;
		}
2865 2866 2867 2868 2869 2870
	} else {
		if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
					!bdev_is_zoned(sbi->sb->s_bdev)) {
			f2fs_info(sbi, "Zoned block device path is missing");
			return -EFSCORRUPTED;
		}
2871 2872
	}

2873
	if (secs_per_zone > total_sections || !secs_per_zone) {
2874 2875
		f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
			  secs_per_zone, total_sections);
2876
		return -EFSCORRUPTED;
2877 2878 2879 2880 2881
	}
	if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
			raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
			(le32_to_cpu(raw_super->extension_count) +
			raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
2882 2883 2884 2885
		f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
			  le32_to_cpu(raw_super->extension_count),
			  raw_super->hot_ext_count,
			  F2FS_MAX_EXTENSION);
2886
		return -EFSCORRUPTED;
2887 2888 2889 2890
	}

	if (le32_to_cpu(raw_super->cp_payload) >
				(blocks_per_seg - F2FS_CP_PACKS)) {
2891 2892 2893
		f2fs_info(sbi, "Insane cp_payload (%u > %u)",
			  le32_to_cpu(raw_super->cp_payload),
			  blocks_per_seg - F2FS_CP_PACKS);
2894
		return -EFSCORRUPTED;
2895 2896
	}

2897 2898 2899 2900
	/* check reserved ino info */
	if (le32_to_cpu(raw_super->node_ino) != 1 ||
		le32_to_cpu(raw_super->meta_ino) != 2 ||
		le32_to_cpu(raw_super->root_ino) != 3) {
2901 2902 2903 2904
		f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
			  le32_to_cpu(raw_super->node_ino),
			  le32_to_cpu(raw_super->meta_ino),
			  le32_to_cpu(raw_super->root_ino));
2905
		return -EFSCORRUPTED;
2906 2907 2908
	}

	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2909
	if (sanity_check_area_boundary(sbi, bh))
2910
		return -EFSCORRUPTED;
2911

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2912 2913 2914
	return 0;
}

Chao Yu's avatar
Chao Yu committed
2915
int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2916 2917
{
	unsigned int total, fsmeta;
2918 2919
	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2920
	unsigned int ovp_segments, reserved_segments;
2921
	unsigned int main_segs, blocks_per_seg;
2922 2923 2924
	unsigned int sit_segs, nat_segs;
	unsigned int sit_bitmap_size, nat_bitmap_size;
	unsigned int log_blocks_per_seg;
2925
	unsigned int segment_count_main;
2926
	unsigned int cp_pack_start_sum, cp_payload;
2927 2928
	block_t user_block_count, valid_user_blocks;
	block_t avail_node_count, valid_node_count;
2929
	int i, j;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2930 2931 2932

	total = le32_to_cpu(raw_super->segment_count);
	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2933 2934 2935 2936
	sit_segs = le32_to_cpu(raw_super->segment_count_sit);
	fsmeta += sit_segs;
	nat_segs = le32_to_cpu(raw_super->segment_count_nat);
	fsmeta += nat_segs;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2937 2938 2939
	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);

2940
	if (unlikely(fsmeta >= total))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2941
		return 1;
2942

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2943 2944 2945
	ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
	reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);

2946
	if (unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2947
			ovp_segments == 0 || reserved_segments == 0)) {
2948
		f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2949 2950 2951
		return 1;
	}

2952 2953 2954 2955 2956
	user_block_count = le64_to_cpu(ckpt->user_block_count);
	segment_count_main = le32_to_cpu(raw_super->segment_count_main);
	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
	if (!user_block_count || user_block_count >=
			segment_count_main << log_blocks_per_seg) {
2957 2958
		f2fs_err(sbi, "Wrong user_block_count: %u",
			 user_block_count);
2959 2960 2961
		return 1;
	}

2962 2963
	valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
	if (valid_user_blocks > user_block_count) {
2964 2965
		f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
			 valid_user_blocks, user_block_count);
2966 2967 2968 2969
		return 1;
	}

	valid_node_count = le32_to_cpu(ckpt->valid_node_count);
2970
	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
2971
	if (valid_node_count > avail_node_count) {
2972 2973
		f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
			 valid_node_count, avail_node_count);
2974 2975 2976
		return 1;
	}

2977 2978 2979 2980 2981 2982 2983
	main_segs = le32_to_cpu(raw_super->segment_count_main);
	blocks_per_seg = sbi->blocks_per_seg;

	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
		if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
			le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
			return 1;
2984 2985 2986
		for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
				le32_to_cpu(ckpt->cur_node_segno[j])) {
2987 2988 2989
				f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
					 i, j,
					 le32_to_cpu(ckpt->cur_node_segno[i]));
2990 2991 2992
				return 1;
			}
		}
2993 2994 2995 2996 2997
	}
	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
		if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
			le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
			return 1;
2998 2999 3000
		for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
			if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
				le32_to_cpu(ckpt->cur_data_segno[j])) {
3001 3002 3003
				f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
					 i, j,
					 le32_to_cpu(ckpt->cur_data_segno[i]));
3004 3005 3006 3007 3008
				return 1;
			}
		}
	}
	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3009
		for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3010 3011
			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
				le32_to_cpu(ckpt->cur_data_segno[j])) {
3012
				f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3013 3014
					 i, j,
					 le32_to_cpu(ckpt->cur_node_segno[i]));
3015 3016 3017
				return 1;
			}
		}
3018 3019
	}

3020 3021 3022 3023 3024
	sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
	nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);

	if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
		nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3025 3026
		f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
			 sit_bitmap_size, nat_bitmap_size);
3027 3028
		return 1;
	}
3029 3030 3031 3032 3033

	cp_pack_start_sum = __start_sum_addr(sbi);
	cp_payload = __cp_payload(sbi);
	if (cp_pack_start_sum < cp_payload + 1 ||
		cp_pack_start_sum > blocks_per_seg - 1 -
Chao Yu's avatar
Chao Yu committed
3034
			NR_CURSEG_PERSIST_TYPE) {
3035 3036
		f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
			 cp_pack_start_sum);
3037 3038
		return 1;
	}
3039

3040 3041
	if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
		le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3042 3043 3044
		f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
			  "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
			  "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3045
			  le32_to_cpu(ckpt->checksum_offset));
3046 3047 3048
		return 1;
	}

3049
	if (unlikely(f2fs_cp_error(sbi))) {
3050
		f2fs_err(sbi, "A bug case: need to run fsck");
3051 3052
		return 1;
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3053 3054 3055 3056 3057 3058
	return 0;
}

static void init_sb_info(struct f2fs_sb_info *sbi)
{
	struct f2fs_super_block *raw_super = sbi->raw_super;
3059
	int i;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075

	sbi->log_sectors_per_block =
		le32_to_cpu(raw_super->log_sectors_per_block);
	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
	sbi->blocksize = 1 << sbi->log_blocksize;
	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
	sbi->total_sections = le32_to_cpu(raw_super->section_count);
	sbi->total_node_count =
		(le32_to_cpu(raw_super->segment_count_nat) / 2)
			* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
	sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
	sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
	sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
3076
	sbi->cur_victim_sec = NULL_SECNO;
3077 3078
	sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
	sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3079
	sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3080
	sbi->migration_granularity = sbi->segs_per_sec;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3081

3082
	sbi->dir_level = DEF_DIR_LEVEL;
3083
	sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3084
	sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3085 3086
	sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
	sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
3087
	sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3088 3089
	sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
				DEF_UMOUNT_DISCARD_TIMEOUT;
3090
	clear_sbi_flag(sbi, SBI_NEED_FSCK);
3091

3092 3093 3094
	for (i = 0; i < NR_COUNT_TYPE; i++)
		atomic_set(&sbi->nr_pages[i], 0);

3095 3096
	for (i = 0; i < META; i++)
		atomic_set(&sbi->wb_sync_req[i], 0);
3097

3098 3099
	INIT_LIST_HEAD(&sbi->s_list);
	mutex_init(&sbi->umount_mutex);
3100
	init_rwsem(&sbi->io_order_lock);
3101
	spin_lock_init(&sbi->cp_lock);
3102 3103 3104

	sbi->dirty_device = 0;
	spin_lock_init(&sbi->dev_lock);
3105

3106
	init_rwsem(&sbi->sb_lock);
3107
	init_rwsem(&sbi->pin_sem);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3108 3109
}

3110 3111
static int init_percpu_info(struct f2fs_sb_info *sbi)
{
3112
	int err;
3113

3114 3115 3116 3117
	err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
	if (err)
		return err;

3118
	err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3119
								GFP_KERNEL);
3120 3121 3122 3123
	if (err)
		percpu_counter_destroy(&sbi->alloc_valid_block_count);

	return err;
3124 3125
}

3126
#ifdef CONFIG_BLK_DEV_ZONED
3127 3128 3129 3130 3131 3132

struct f2fs_report_zones_args {
	struct f2fs_dev_info *dev;
	bool zone_cap_mismatch;
};

3133
static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3134
			      void *data)
3135
{
3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
	struct f2fs_report_zones_args *rz_args = data;

	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
		return 0;

	set_bit(idx, rz_args->dev->blkz_seq);
	rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
						F2FS_LOG_SECTORS_PER_BLOCK;
	if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
		rz_args->zone_cap_mismatch = true;
3146 3147 3148 3149

	return 0;
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3150
static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3151
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3152
	struct block_device *bdev = FDEV(devi).bdev;
3153
	sector_t nr_sectors = bdev->bd_part->nr_sects;
3154
	struct f2fs_report_zones_args rep_zone_arg;
3155
	int ret;
3156

3157
	if (!f2fs_sb_has_blkzoned(sbi))
3158 3159
		return 0;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3160
	if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3161
				SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3162
		return -EINVAL;
3163
	sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3164 3165 3166
	if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
				__ilog2_u32(sbi->blocks_per_blkz))
		return -EINVAL;
3167
	sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3168 3169
	FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
					sbi->log_blocks_per_blkz;
3170
	if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3171
		FDEV(devi).nr_blkz++;
3172

3173
	FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3174 3175 3176 3177
					BITS_TO_LONGS(FDEV(devi).nr_blkz)
					* sizeof(unsigned long),
					GFP_KERNEL);
	if (!FDEV(devi).blkz_seq)
3178 3179
		return -ENOMEM;

3180 3181 3182 3183 3184 3185 3186 3187 3188 3189
	/* Get block zones type and zone-capacity */
	FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
					FDEV(devi).nr_blkz * sizeof(block_t),
					GFP_KERNEL);
	if (!FDEV(devi).zone_capacity_blocks)
		return -ENOMEM;

	rep_zone_arg.dev = &FDEV(devi);
	rep_zone_arg.zone_cap_mismatch = false;

3190
	ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3191
				  &rep_zone_arg);
3192 3193
	if (ret < 0)
		return ret;
3194

3195 3196 3197 3198 3199
	if (!rep_zone_arg.zone_cap_mismatch) {
		kfree(FDEV(devi).zone_capacity_blocks);
		FDEV(devi).zone_capacity_blocks = NULL;
	}

3200
	return 0;
3201 3202 3203
}
#endif

3204 3205
/*
 * Read f2fs raw super block.
3206 3207 3208
 * Because we have two copies of super block, so read both of them
 * to get the first valid one. If any one of them is broken, we pass
 * them recovery flag back to the caller.
3209
 */
3210
static int read_raw_super_block(struct f2fs_sb_info *sbi,
3211
			struct f2fs_super_block **raw_super,
3212
			int *valid_super_block, int *recovery)
3213
{
3214
	struct super_block *sb = sbi->sb;
3215
	int block;
3216
	struct buffer_head *bh;
3217
	struct f2fs_super_block *super;
3218
	int err = 0;
3219

Yunlei He's avatar
Yunlei He committed
3220 3221 3222
	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
	if (!super)
		return -ENOMEM;
3223 3224 3225 3226

	for (block = 0; block < 2; block++) {
		bh = sb_bread(sb, block);
		if (!bh) {
3227 3228
			f2fs_err(sbi, "Unable to read %dth superblock",
				 block + 1);
3229
			err = -EIO;
3230
			*recovery = 1;
3231 3232
			continue;
		}
3233

3234
		/* sanity checking of raw super */
3235 3236
		err = sanity_check_raw_super(sbi, bh);
		if (err) {
3237 3238
			f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
				 block + 1);
3239
			brelse(bh);
3240
			*recovery = 1;
3241 3242
			continue;
		}
3243

3244
		if (!*raw_super) {
3245 3246
			memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
							sizeof(*super));
3247 3248 3249 3250
			*valid_super_block = block;
			*raw_super = super;
		}
		brelse(bh);
3251 3252 3253
	}

	/* No valid superblock */
3254
	if (!*raw_super)
3255
		kfree(super);
3256 3257
	else
		err = 0;
3258

3259
	return err;
3260 3261
}

3262
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3263
{
3264
	struct buffer_head *bh;
3265
	__u32 crc = 0;
3266 3267
	int err;

3268 3269 3270
	if ((recover && f2fs_readonly(sbi->sb)) ||
				bdev_read_only(sbi->sb->s_bdev)) {
		set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3271
		return -EROFS;
3272
	}
3273

3274
	/* we should update superblock crc here */
3275
	if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3276 3277 3278 3279 3280
		crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
				offsetof(struct f2fs_super_block, crc));
		F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
	}

3281
	/* write back-up superblock first */
3282
	bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3283 3284
	if (!bh)
		return -EIO;
3285
	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3286
	brelse(bh);
3287 3288 3289

	/* if we are in recovery path, skip writing valid superblock */
	if (recover || err)
3290
		return err;
3291 3292

	/* write current valid superblock */
3293
	bh = sb_bread(sbi->sb, sbi->valid_super_block);
3294 3295 3296 3297 3298
	if (!bh)
		return -EIO;
	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
	brelse(bh);
	return err;
3299 3300
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3301 3302 3303
static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
{
	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3304
	unsigned int max_devices = MAX_DEVICES;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3305 3306
	int i;

3307 3308 3309
	/* Initialize single device information */
	if (!RDEV(0).path[0]) {
		if (!bdev_is_zoned(sbi->sb->s_bdev))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3310
			return 0;
3311 3312
		max_devices = 1;
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3313

3314 3315 3316 3317
	/*
	 * Initialize multiple devices information, or single
	 * zoned block device information.
	 */
3318 3319 3320 3321
	sbi->devs = f2fs_kzalloc(sbi,
				 array_size(max_devices,
					    sizeof(struct f2fs_dev_info)),
				 GFP_KERNEL);
3322 3323
	if (!sbi->devs)
		return -ENOMEM;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3324

3325
	for (i = 0; i < max_devices; i++) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3326

3327 3328 3329 3330 3331 3332 3333
		if (i > 0 && !RDEV(i).path[0])
			break;

		if (max_devices == 1) {
			/* Single zoned block device mount */
			FDEV(0).bdev =
				blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3334
					sbi->sb->s_mode, sbi->sb->s_type);
3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352
		} else {
			/* Multi-device mount */
			memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
			FDEV(i).total_segments =
				le32_to_cpu(RDEV(i).total_segments);
			if (i == 0) {
				FDEV(i).start_blk = 0;
				FDEV(i).end_blk = FDEV(i).start_blk +
				    (FDEV(i).total_segments <<
				    sbi->log_blocks_per_seg) - 1 +
				    le32_to_cpu(raw_super->segment0_blkaddr);
			} else {
				FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
				FDEV(i).end_blk = FDEV(i).start_blk +
					(FDEV(i).total_segments <<
					sbi->log_blocks_per_seg) - 1;
			}
			FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3353
					sbi->sb->s_mode, sbi->sb->s_type);
3354
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3355 3356 3357 3358 3359 3360 3361 3362
		if (IS_ERR(FDEV(i).bdev))
			return PTR_ERR(FDEV(i).bdev);

		/* to release errored devices */
		sbi->s_ndevs = i + 1;

#ifdef CONFIG_BLK_DEV_ZONED
		if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3363
				!f2fs_sb_has_blkzoned(sbi)) {
3364
			f2fs_err(sbi, "Zoned block device feature not enabled\n");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3365 3366 3367 3368
			return -EINVAL;
		}
		if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
			if (init_blkz_info(sbi, i)) {
3369
				f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3370 3371
				return -EINVAL;
			}
3372 3373
			if (max_devices == 1)
				break;
3374 3375 3376 3377 3378 3379
			f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
				  i, FDEV(i).path,
				  FDEV(i).total_segments,
				  FDEV(i).start_blk, FDEV(i).end_blk,
				  bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
				  "Host-aware" : "Host-managed");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3380 3381 3382
			continue;
		}
#endif
3383 3384 3385 3386 3387 3388 3389
		f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
			  i, FDEV(i).path,
			  FDEV(i).total_segments,
			  FDEV(i).start_blk, FDEV(i).end_blk);
	}
	f2fs_info(sbi,
		  "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3390 3391 3392
	return 0;
}

3393 3394 3395
static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
{
#ifdef CONFIG_UNICODE
3396
	if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420
		const struct f2fs_sb_encodings *encoding_info;
		struct unicode_map *encoding;
		__u16 encoding_flags;

		if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
					  &encoding_flags)) {
			f2fs_err(sbi,
				 "Encoding requested by superblock is unknown");
			return -EINVAL;
		}

		encoding = utf8_load(encoding_info->version);
		if (IS_ERR(encoding)) {
			f2fs_err(sbi,
				 "can't mount with superblock charset: %s-%s "
				 "not supported by the kernel. flags: 0x%x.",
				 encoding_info->name, encoding_info->version,
				 encoding_flags);
			return PTR_ERR(encoding);
		}
		f2fs_info(sbi, "Using encoding defined by superblock: "
			 "%s-%s with flags 0x%hx", encoding_info->name,
			 encoding_info->version?:"\b", encoding_flags);

3421 3422
		sbi->sb->s_encoding = encoding;
		sbi->sb->s_encoding_flags = encoding_flags;
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432
	}
#else
	if (f2fs_sb_has_casefold(sbi)) {
		f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
		return -EINVAL;
	}
#endif
	return 0;
}

3433 3434 3435 3436 3437 3438
static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
{
	struct f2fs_sm_info *sm_i = SM_I(sbi);

	/* adjust parameters according to the volume size */
	if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3439
		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3440 3441 3442
		sm_i->dcc_info->discard_granularity = 1;
		sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
	}
3443 3444

	sbi->readdir_ra = 1;
3445 3446
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3447 3448 3449
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
3450
	struct f2fs_super_block *raw_super;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3451
	struct inode *root;
3452
	int err;
3453
	bool skip_recovery = false, need_fsck = false;
3454
	char *options = NULL;
3455
	int recovery, i, valid_super_block;
3456
	struct curseg_info *seg_i;
3457
	int retry_cnt = 1;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3458

3459
try_onemore:
3460 3461
	err = -EINVAL;
	raw_super = NULL;
3462
	valid_super_block = -1;
3463 3464
	recovery = 0;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3465 3466 3467 3468 3469
	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

3470 3471
	sbi->sb = sb;

3472 3473 3474
	/* Load the checksum driver */
	sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
	if (IS_ERR(sbi->s_chksum_driver)) {
3475
		f2fs_err(sbi, "Cannot load crc32 driver.");
3476 3477 3478 3479 3480
		err = PTR_ERR(sbi->s_chksum_driver);
		sbi->s_chksum_driver = NULL;
		goto free_sbi;
	}

3481
	/* set a block size */
3482
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3483
		f2fs_err(sbi, "unable to set blocksize");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3484
		goto free_sbi;
3485
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3486

3487
	err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
3488
								&recovery);
3489 3490 3491
	if (err)
		goto free_sbi;

3492
	sb->s_fs_info = sbi;
3493 3494
	sbi->raw_super = raw_super;

Chao Yu's avatar
Chao Yu committed
3495
	/* precompute checksum seed for metadata */
3496
	if (f2fs_sb_has_inode_chksum(sbi))
Chao Yu's avatar
Chao Yu committed
3497 3498 3499
		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
						sizeof(raw_super->uuid));

3500
	default_options(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3501
	/* parse mount options */
3502 3503 3504
	options = kstrdup((const char *)data, GFP_KERNEL);
	if (data && !options) {
		err = -ENOMEM;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3505
		goto free_sb_buf;
3506 3507
	}

3508
	err = parse_options(sb, options, false);
3509 3510
	if (err)
		goto free_options;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3511

3512 3513 3514
	sbi->max_file_blocks = max_file_blocks();
	sb->s_maxbytes = sbi->max_file_blocks <<
				le32_to_cpu(raw_super->log_blocksize);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3515 3516
	sb->s_max_links = F2FS_LINK_MAX;

3517 3518 3519 3520
	err = f2fs_setup_casefold(sbi);
	if (err)
		goto free_options;

3521 3522
#ifdef CONFIG_QUOTA
	sb->dq_op = &f2fs_quota_operations;
3523
	sb->s_qcop = &f2fs_quotactl_ops;
Chao Yu's avatar
Chao Yu committed
3524
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3525

3526
	if (f2fs_sb_has_quota_ino(sbi)) {
3527 3528 3529 3530 3531
		for (i = 0; i < MAXQUOTAS; i++) {
			if (f2fs_qf_ino(sbi->sb, i))
				sbi->nquota_files++;
		}
	}
3532 3533
#endif

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3534
	sb->s_op = &f2fs_sops;
3535
#ifdef CONFIG_FS_ENCRYPTION
3536
	sb->s_cop = &f2fs_cryptops;
Eric Biggers's avatar
Eric Biggers committed
3537 3538 3539
#endif
#ifdef CONFIG_FS_VERITY
	sb->s_vop = &f2fs_verityops;
3540
#endif
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3541 3542 3543 3544
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
3545 3546
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
3547
	memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
3548
	sb->s_iflags |= SB_I_CGROUPWB;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3549 3550

	/* init f2fs-specific super block info */
3551
	sbi->valid_super_block = valid_super_block;
3552
	init_rwsem(&sbi->gc_lock);
3553
	mutex_init(&sbi->writepages);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3554
	mutex_init(&sbi->cp_mutex);
3555
	init_rwsem(&sbi->node_write);
3556
	init_rwsem(&sbi->node_change);
3557 3558 3559

	/* disallow all the data/node/meta page writes */
	set_sbi_flag(sbi, SBI_POR_DOING);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3560
	spin_lock_init(&sbi->stat_lock);
3561

Chao Yu's avatar
Chao Yu committed
3562 3563 3564
	/* init iostat info */
	spin_lock_init(&sbi->iostat_lock);
	sbi->iostat_enable = false;
3565
	sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
Chao Yu's avatar
Chao Yu committed
3566

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3567
	for (i = 0; i < NR_PAGE_TYPE; i++) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3568 3569 3570
		int n = (i == META) ? 1: NR_TEMP_TYPE;
		int j;

3571 3572 3573 3574 3575
		sbi->write_io[i] =
			f2fs_kmalloc(sbi,
				     array_size(n,
						sizeof(struct f2fs_bio_info)),
				     GFP_KERNEL);
3576 3577
		if (!sbi->write_io[i]) {
			err = -ENOMEM;
3578
			goto free_bio_info;
3579
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3580 3581 3582 3583 3584

		for (j = HOT; j < n; j++) {
			init_rwsem(&sbi->write_io[i][j].io_rwsem);
			sbi->write_io[i][j].sbi = sbi;
			sbi->write_io[i][j].bio = NULL;
3585 3586
			spin_lock_init(&sbi->write_io[i][j].io_lock);
			INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
Chao Yu's avatar
Chao Yu committed
3587 3588
			INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
			init_rwsem(&sbi->write_io[i][j].bio_list_lock);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3589
		}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3590
	}
3591

3592
	init_rwsem(&sbi->cp_rwsem);
3593
	init_rwsem(&sbi->quota_sem);
3594
	init_waitqueue_head(&sbi->cp_wait);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3595 3596
	init_sb_info(sbi);

3597 3598
	err = init_percpu_info(sbi);
	if (err)
3599
		goto free_bio_info;
3600

3601
	if (F2FS_IO_ALIGNED(sbi)) {
3602
		sbi->write_io_dummy =
3603
			mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
3604 3605
		if (!sbi->write_io_dummy) {
			err = -ENOMEM;
3606
			goto free_percpu;
3607
		}
3608 3609
	}

3610 3611 3612 3613
	/* init per sbi slab cache */
	err = f2fs_init_xattr_caches(sbi);
	if (err)
		goto free_io_dummy;
3614 3615 3616
	err = f2fs_init_page_array_cache(sbi);
	if (err)
		goto free_xattr_cache;
3617

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3618 3619 3620
	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
3621
		f2fs_err(sbi, "Failed to read F2FS meta data inode");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3622
		err = PTR_ERR(sbi->meta_inode);
3623
		goto free_page_array_cache;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3624 3625
	}

Chao Yu's avatar
Chao Yu committed
3626
	err = f2fs_get_valid_checkpoint(sbi);
3627
	if (err) {
3628
		f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3629
		goto free_meta_inode;
3630
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3631

3632 3633
	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3634 3635 3636 3637
	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
		sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
	}
3638

3639 3640 3641
	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
		set_sbi_flag(sbi, SBI_NEED_FSCK);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3642 3643 3644
	/* Initialize device list */
	err = f2fs_scan_devices(sbi);
	if (err) {
3645
		f2fs_err(sbi, "Failed to find devices");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3646 3647 3648
		goto free_devices;
	}

Chao Yu's avatar
Chao Yu committed
3649 3650 3651 3652 3653 3654
	err = f2fs_init_post_read_wq(sbi);
	if (err) {
		f2fs_err(sbi, "Failed to initialize post read workqueue");
		goto free_devices;
	}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3655 3656
	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
3657 3658
	percpu_counter_set(&sbi->total_valid_inode_count,
				le32_to_cpu(sbi->ckpt->valid_inode_count));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3659 3660 3661 3662
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
3663
	sbi->reserved_blocks = 0;
3664
	sbi->current_reserved_blocks = 0;
3665
	limit_reserve_root(sbi);
3666
	adjust_unusable_cap_perc(sbi);
3667

3668 3669 3670 3671
	for (i = 0; i < NR_INODE_TYPE; i++) {
		INIT_LIST_HEAD(&sbi->inode_list[i]);
		spin_lock_init(&sbi->inode_lock[i]);
	}
3672
	mutex_init(&sbi->flush_lock);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3673

Chao Yu's avatar
Chao Yu committed
3674
	f2fs_init_extent_cache_info(sbi);
Chao Yu's avatar
Chao Yu committed
3675

Chao Yu's avatar
Chao Yu committed
3676
	f2fs_init_ino_entry_info(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3677

3678 3679
	f2fs_init_fsync_node_info(sbi);

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3680
	/* setup f2fs internal modules */
Chao Yu's avatar
Chao Yu committed
3681
	err = f2fs_build_segment_manager(sbi);
3682
	if (err) {
3683 3684
		f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
			 err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3685
		goto free_sm;
3686
	}
Chao Yu's avatar
Chao Yu committed
3687
	err = f2fs_build_node_manager(sbi);
3688
	if (err) {
3689 3690
		f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
			 err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3691
		goto free_nm;
3692
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3693

3694 3695 3696
	/* For write statistics */
	if (sb->s_bdev->bd_part)
		sbi->sectors_written_start =
3697 3698
			(u64)part_stat_read(sb->s_bdev->bd_part,
					    sectors[STAT_WRITE]);
3699 3700 3701 3702 3703

	/* Read accumulated write IO statistics if exists */
	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
	if (__exist_node_summaries(sbi))
		sbi->kbytes_written =
3704
			le64_to_cpu(seg_i->journal->info.kbytes_written);
3705

Chao Yu's avatar
Chao Yu committed
3706
	f2fs_build_gc_manager(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3707

3708 3709 3710 3711
	err = f2fs_build_stats(sbi);
	if (err)
		goto free_nm;

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3712 3713 3714
	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
3715
		f2fs_err(sbi, "Failed to read node inode");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3716
		err = PTR_ERR(sbi->node_inode);
3717
		goto free_stats;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3718 3719 3720 3721 3722
	}

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
3723
		f2fs_err(sbi, "Failed to read root inode");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3724
		err = PTR_ERR(root);
3725
		goto free_node_inode;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3726
	}
3727 3728
	if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
			!root->i_size || !root->i_nlink) {
3729
		iput(root);
3730
		err = -EINVAL;
3731
		goto free_node_inode;
3732
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3733 3734 3735 3736

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
3737
		goto free_node_inode;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3738 3739
	}

3740
	err = f2fs_register_sysfs(sbi);
3741
	if (err)
Chao Yu's avatar
Chao Yu committed
3742
		goto free_root_inode;
3743

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3744
#ifdef CONFIG_QUOTA
3745
	/* Enable quota usage during mount */
3746
	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3747
		err = f2fs_enable_quotas(sb);
3748
		if (err)
3749
			f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3750 3751
	}
#endif
Chao Yu's avatar
Chao Yu committed
3752
	/* if there are any orphan inodes, free them */
Chao Yu's avatar
Chao Yu committed
3753
	err = f2fs_recover_orphan_inodes(sbi);
Chao Yu's avatar
Chao Yu committed
3754
	if (err)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3755
		goto free_meta;
Chao Yu's avatar
Chao Yu committed
3756

Daniel Rosenberg's avatar
Daniel Rosenberg committed
3757
	if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
3758
		goto reset_checkpoint;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
3759

3760
	/* recover fsynced data */
3761 3762
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
			!test_opt(sbi, NORECOVERY)) {
3763 3764 3765 3766
		/*
		 * mount should be failed, when device has readonly mode, and
		 * previous checkpoint was not done by clean system shutdown.
		 */
3767 3768 3769
		if (f2fs_hw_is_readonly(sbi)) {
			if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
				err = -EROFS;
3770
				f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
3771 3772
				goto free_meta;
			}
3773
			f2fs_info(sbi, "write access unavailable, skipping recovery");
3774
			goto reset_checkpoint;
3775
		}
3776 3777 3778 3779

		if (need_fsck)
			set_sbi_flag(sbi, SBI_NEED_FSCK);

3780 3781
		if (skip_recovery)
			goto reset_checkpoint;
3782

Chao Yu's avatar
Chao Yu committed
3783
		err = f2fs_recover_fsync_data(sbi, false);
3784
		if (err < 0) {
3785 3786
			if (err != -ENOMEM)
				skip_recovery = true;
3787
			need_fsck = true;
3788 3789
			f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
				 err);
Chao Yu's avatar
Chao Yu committed
3790
			goto free_meta;
3791
		}
3792
	} else {
Chao Yu's avatar
Chao Yu committed
3793
		err = f2fs_recover_fsync_data(sbi, true);
3794 3795 3796

		if (!f2fs_readonly(sb) && err > 0) {
			err = -EINVAL;
3797
			f2fs_err(sbi, "Need to recover fsync data");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3798
			goto free_meta;
3799
		}
3800
	}
3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811

	/*
	 * If the f2fs is not readonly and fsync data recovery succeeds,
	 * check zoned block devices' write pointer consistency.
	 */
	if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
		err = f2fs_check_write_pointer(sbi);
		if (err)
			goto free_meta;
	}

3812
reset_checkpoint:
3813 3814
	f2fs_init_inmem_curseg(sbi);

Chao Yu's avatar
Chao Yu committed
3815
	/* f2fs_recover_fsync_data() cleared this already */
3816
	clear_sbi_flag(sbi, SBI_POR_DOING);
3817

Daniel Rosenberg's avatar
Daniel Rosenberg committed
3818 3819 3820
	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
		err = f2fs_disable_checkpoint(sbi);
		if (err)
3821
			goto sync_free_meta;
Daniel Rosenberg's avatar
Daniel Rosenberg committed
3822 3823 3824 3825
	} else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
		f2fs_enable_checkpoint(sbi);
	}

3826 3827 3828 3829
	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
Chao Yu's avatar
Chao Yu committed
3830
	if (F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF && !f2fs_readonly(sb)) {
3831
		/* After POR, we can run background GC thread.*/
Chao Yu's avatar
Chao Yu committed
3832
		err = f2fs_start_gc_thread(sbi);
3833
		if (err)
3834
			goto sync_free_meta;
3835
	}
3836
	kvfree(options);
3837 3838

	/* recover broken superblock */
3839
	if (recovery) {
3840
		err = f2fs_commit_super(sbi, true);
3841 3842
		f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
			  sbi->valid_super_block ? 1 : 2, err);
3843 3844
	}

3845 3846
	f2fs_join_shrinker(sbi);

3847 3848
	f2fs_tuning_parameters(sbi);

3849 3850
	f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
		    cur_cp_version(F2FS_CKPT(sbi)));
3851
	f2fs_update_time(sbi, CP_TIME);
3852
	f2fs_update_time(sbi, REQ_TIME);
3853
	clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3854
	return 0;
3855

3856 3857 3858
sync_free_meta:
	/* safe to flush all the data */
	sync_filesystem(sbi->sb);
3859
	retry_cnt = 0;
3860

Chao Yu's avatar
Chao Yu committed
3861
free_meta:
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3862
#ifdef CONFIG_QUOTA
3863
	f2fs_truncate_quota_inode_pages(sb);
3864
	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3865 3866
		f2fs_quota_off_umount(sbi->sb);
#endif
Chao Yu's avatar
Chao Yu committed
3867
	/*
Chao Yu's avatar
Chao Yu committed
3868
	 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
Chao Yu's avatar
Chao Yu committed
3869
	 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
Chao Yu's avatar
Chao Yu committed
3870 3871
	 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
	 * falls into an infinite loop in f2fs_sync_meta_pages().
Chao Yu's avatar
Chao Yu committed
3872 3873
	 */
	truncate_inode_pages_final(META_MAPPING(sbi));
3874 3875
	/* evict some inodes being cached by GC */
	evict_inodes(sb);
3876
	f2fs_unregister_sysfs(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3877 3878 3879 3880
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
Chao Yu's avatar
Chao Yu committed
3881
	f2fs_release_ino_entry(sbi, true);
3882
	truncate_inode_pages_final(NODE_MAPPING(sbi));
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3883
	iput(sbi->node_inode);
3884
	sbi->node_inode = NULL;
3885 3886
free_stats:
	f2fs_destroy_stats(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3887
free_nm:
Chao Yu's avatar
Chao Yu committed
3888
	f2fs_destroy_node_manager(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3889
free_sm:
Chao Yu's avatar
Chao Yu committed
3890
	f2fs_destroy_segment_manager(sbi);
Chao Yu's avatar
Chao Yu committed
3891
	f2fs_destroy_post_read_wq(sbi);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3892 3893
free_devices:
	destroy_device_list(sbi);
3894
	kvfree(sbi->ckpt);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3895 3896 3897
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
3898
	sbi->meta_inode = NULL;
3899 3900
free_page_array_cache:
	f2fs_destroy_page_array_cache(sbi);
3901 3902
free_xattr_cache:
	f2fs_destroy_xattr_caches(sbi);
3903 3904
free_io_dummy:
	mempool_destroy(sbi->write_io_dummy);
3905 3906 3907
free_percpu:
	destroy_percpu_info(sbi);
free_bio_info:
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3908
	for (i = 0; i < NR_PAGE_TYPE; i++)
3909
		kvfree(sbi->write_io[i]);
3910 3911

#ifdef CONFIG_UNICODE
3912
	utf8_unload(sb->s_encoding);
3913
	sb->s_encoding = NULL;
3914
#endif
3915
free_options:
Chao Yu's avatar
Chao Yu committed
3916 3917
#ifdef CONFIG_QUOTA
	for (i = 0; i < MAXQUOTAS; i++)
3918
		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
Chao Yu's avatar
Chao Yu committed
3919
#endif
3920
	fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
3921
	kvfree(options);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3922
free_sb_buf:
3923
	kfree(raw_super);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3924
free_sbi:
3925 3926
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
3927
	kfree(sbi);
3928 3929

	/* give only one another chance */
3930 3931
	if (retry_cnt > 0 && skip_recovery) {
		retry_cnt--;
3932 3933 3934
		shrink_dcache_sb(sb);
		goto try_onemore;
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3935 3936 3937 3938 3939 3940 3941 3942 3943
	return err;
}

static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
			const char *dev_name, void *data)
{
	return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
}

3944 3945
static void kill_f2fs_super(struct super_block *sb)
{
3946
	if (sb->s_root) {
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959
		struct f2fs_sb_info *sbi = F2FS_SB(sb);

		set_sbi_flag(sbi, SBI_IS_CLOSE);
		f2fs_stop_gc_thread(sbi);
		f2fs_stop_discard_thread(sbi);

		if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
				!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
			struct cp_control cpc = {
				.reason = CP_UMOUNT,
			};
			f2fs_write_checkpoint(sbi, &cpc);
		}
3960 3961 3962

		if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
			sb->s_flags &= ~SB_RDONLY;
3963
	}
3964 3965 3966
	kill_block_super(sb);
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
3967 3968 3969 3970
static struct file_system_type f2fs_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "f2fs",
	.mount		= f2fs_mount,
3971
	.kill_sb	= kill_f2fs_super,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3972 3973
	.fs_flags	= FS_REQUIRES_DEV,
};
3974
MODULE_ALIAS_FS("f2fs");
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3975

3976
static int __init init_inodecache(void)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3977
{
3978 3979 3980
	f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
			sizeof(struct f2fs_inode_info), 0,
			SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
3981
	if (!f2fs_inode_cachep)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
	kmem_cache_destroy(f2fs_inode_cachep);
}

static int __init init_f2fs_fs(void)
{
	int err;

4000 4001 4002 4003 4004 4005
	if (PAGE_SIZE != F2FS_BLKSIZE) {
		printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
				PAGE_SIZE, F2FS_BLKSIZE);
		return -EINVAL;
	}

4006 4007
	f2fs_build_trace_ios();

Jaegeuk Kim's avatar
Jaegeuk Kim committed
4008 4009 4010
	err = init_inodecache();
	if (err)
		goto fail;
Chao Yu's avatar
Chao Yu committed
4011
	err = f2fs_create_node_manager_caches();
Jaegeuk Kim's avatar
Jaegeuk Kim committed
4012
	if (err)
4013
		goto free_inodecache;
Chao Yu's avatar
Chao Yu committed
4014
	err = f2fs_create_segment_manager_caches();
Jaegeuk Kim's avatar
Jaegeuk Kim committed
4015
	if (err)
4016
		goto free_node_manager_caches;
Chao Yu's avatar
Chao Yu committed
4017
	err = f2fs_create_checkpoint_caches();
Jaegeuk Kim's avatar
Jaegeuk Kim committed
4018
	if (err)
4019
		goto free_segment_manager_caches;
Chao Yu's avatar
Chao Yu committed
4020
	err = f2fs_create_extent_cache();
Chao Yu's avatar
Chao Yu committed
4021 4022
	if (err)
		goto free_checkpoint_caches;
4023
	err = f2fs_create_garbage_collection_cache();
Chao Yu's avatar
Chao Yu committed
4024
	if (err)
Chao Yu's avatar
Chao Yu committed
4025
		goto free_extent_cache;
4026 4027 4028
	err = f2fs_init_sysfs();
	if (err)
		goto free_garbage_collection_cache;
4029
	err = register_shrinker(&f2fs_shrinker_info);
4030
	if (err)
Chao Yu's avatar
Chao Yu committed
4031
		goto free_sysfs;
4032 4033 4034
	err = register_filesystem(&f2fs_fs_type);
	if (err)
		goto free_shrinker;
4035
	f2fs_create_root_stats();
4036 4037 4038
	err = f2fs_init_post_read_processing();
	if (err)
		goto free_root_stats;
Chao Yu's avatar
Chao Yu committed
4039 4040 4041
	err = f2fs_init_bio_entry_cache();
	if (err)
		goto free_post_read;
Chao Yu's avatar
Chao Yu committed
4042 4043 4044
	err = f2fs_init_bioset();
	if (err)
		goto free_bio_enrty_cache;
4045 4046 4047
	err = f2fs_init_compress_mempool();
	if (err)
		goto free_bioset;
4048 4049 4050
	err = f2fs_init_compress_cache();
	if (err)
		goto free_compress_mempool;
4051
	return 0;
4052 4053
free_compress_mempool:
	f2fs_destroy_compress_mempool();
4054 4055
free_bioset:
	f2fs_destroy_bioset();
Chao Yu's avatar
Chao Yu committed
4056 4057
free_bio_enrty_cache:
	f2fs_destroy_bio_entry_cache();
Chao Yu's avatar
Chao Yu committed
4058 4059
free_post_read:
	f2fs_destroy_post_read_processing();
4060 4061
free_root_stats:
	f2fs_destroy_root_stats();
4062
	unregister_filesystem(&f2fs_fs_type);
4063 4064
free_shrinker:
	unregister_shrinker(&f2fs_shrinker_info);
Chao Yu's avatar
Chao Yu committed
4065
free_sysfs:
4066
	f2fs_exit_sysfs();
4067 4068
free_garbage_collection_cache:
	f2fs_destroy_garbage_collection_cache();
Chao Yu's avatar
Chao Yu committed
4069
free_extent_cache:
Chao Yu's avatar
Chao Yu committed
4070
	f2fs_destroy_extent_cache();
4071
free_checkpoint_caches:
Chao Yu's avatar
Chao Yu committed
4072
	f2fs_destroy_checkpoint_caches();
4073
free_segment_manager_caches:
Chao Yu's avatar
Chao Yu committed
4074
	f2fs_destroy_segment_manager_caches();
4075
free_node_manager_caches:
Chao Yu's avatar
Chao Yu committed
4076
	f2fs_destroy_node_manager_caches();
4077 4078
free_inodecache:
	destroy_inodecache();
Jaegeuk Kim's avatar
Jaegeuk Kim committed
4079 4080 4081 4082 4083 4084
fail:
	return err;
}

static void __exit exit_f2fs_fs(void)
{
4085
	f2fs_destroy_compress_cache();
4086
	f2fs_destroy_compress_mempool();
Chao Yu's avatar
Chao Yu committed
4087
	f2fs_destroy_bioset();
Chao Yu's avatar
Chao Yu committed
4088
	f2fs_destroy_bio_entry_cache();
4089
	f2fs_destroy_post_read_processing();
4090
	f2fs_destroy_root_stats();
Jaegeuk Kim's avatar
Jaegeuk Kim committed
4091
	unregister_filesystem(&f2fs_fs_type);
4092
	unregister_shrinker(&f2fs_shrinker_info);
4093
	f2fs_exit_sysfs();
4094
	f2fs_destroy_garbage_collection_cache();
Chao Yu's avatar
Chao Yu committed
4095 4096 4097 4098
	f2fs_destroy_extent_cache();
	f2fs_destroy_checkpoint_caches();
	f2fs_destroy_segment_manager_caches();
	f2fs_destroy_node_manager_caches();
Jaegeuk Kim's avatar
Jaegeuk Kim committed
4099
	destroy_inodecache();
4100
	f2fs_destroy_trace_ios();
Jaegeuk Kim's avatar
Jaegeuk Kim committed
4101 4102 4103 4104 4105 4106 4107 4108
}

module_init(init_f2fs_fs)
module_exit(exit_f2fs_fs)

MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
4109