glops.c 20.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
David Teigland's avatar
David Teigland committed
2 3
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
5 6 7 8 9
 */

#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
10
#include <linux/gfs2_ondisk.h>
11
#include <linux/bio.h>
12
#include <linux/posix_acl.h>
13
#include <linux/security.h>
David Teigland's avatar
David Teigland committed
14 15

#include "gfs2.h"
16
#include "incore.h"
David Teigland's avatar
David Teigland committed
17 18 19 20 21 22 23 24
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "recovery.h"
#include "rgrp.h"
25
#include "util.h"
26
#include "trans.h"
27
#include "dir.h"
28
#include "lops.h"
David Teigland's avatar
David Teigland committed
29

30 31
struct workqueue_struct *gfs2_freeze_wq;

32 33
extern struct workqueue_struct *gfs2_control_wq;

34 35
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
36 37 38
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;

	fs_err(sdp,
39 40
	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
	       "state 0x%lx\n",
41
	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42
	       bh->b_folio->mapping, bh->b_folio->flags);
43
	fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
44 45
	       gl->gl_name.ln_type, gl->gl_name.ln_number,
	       gfs2_glock2aspace(gl));
46
	gfs2_lm(sdp, "AIL error\n");
47
	gfs2_withdraw_delayed(sdp);
48 49
}

50
/**
Steven Whitehouse's avatar
Steven Whitehouse committed
51
 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
52
 * @gl: the glock
53
 * @fsync: set when called from fsync (not all buffers will be clean)
54
 * @nr_revokes: Number of buffers to revoke
55 56 57 58
 *
 * None of the buffers should be dirty, locked, or pinned.
 */

59 60
static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
			     unsigned int nr_revokes)
61
{
62
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
63
	struct list_head *head = &gl->gl_ail_list;
64
	struct gfs2_bufdata *bd, *tmp;
65
	struct buffer_head *bh;
66
	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
67

68
	gfs2_log_lock(sdp);
Dave Chinner's avatar
Dave Chinner committed
69
	spin_lock(&sdp->sd_ail_lock);
70 71 72
	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
		if (nr_revokes == 0)
			break;
73
		bh = bd->bd_bh;
74 75 76
		if (bh->b_state & b_state) {
			if (fsync)
				continue;
77
			gfs2_ail_error(gl, bh);
78
		}
79
		gfs2_trans_add_revoke(sdp, bd);
80
		nr_revokes--;
81
	}
82
	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
Dave Chinner's avatar
Dave Chinner committed
83
	spin_unlock(&sdp->sd_ail_lock);
84
	gfs2_log_unlock(sdp);
Steven Whitehouse's avatar
Steven Whitehouse committed
85 86 87
}


88
static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
Steven Whitehouse's avatar
Steven Whitehouse committed
89
{
90
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse's avatar
Steven Whitehouse committed
91
	struct gfs2_trans tr;
92
	unsigned int revokes;
93
	int ret = 0;
Steven Whitehouse's avatar
Steven Whitehouse committed
94

95
	revokes = atomic_read(&gl->gl_ail_count);
Steven Whitehouse's avatar
Steven Whitehouse committed
96

97
	if (!revokes) {
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
		bool have_revokes;
		bool log_in_flight;

		/*
		 * We have nothing on the ail, but there could be revokes on
		 * the sdp revoke queue, in which case, we still want to flush
		 * the log and wait for it to finish.
		 *
		 * If the sdp revoke list is empty too, we might still have an
		 * io outstanding for writing revokes, so we should wait for
		 * it before returning.
		 *
		 * If none of these conditions are true, our revokes are all
		 * flushed and we can return.
		 */
		gfs2_log_lock(sdp);
		have_revokes = !list_empty(&sdp->sd_log_revokes);
		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
		gfs2_log_unlock(sdp);
		if (have_revokes)
			goto flush;
		if (log_in_flight)
			log_flush_wait(sdp);
121
		return 0;
122
	}
Steven Whitehouse's avatar
Steven Whitehouse committed
123

124 125 126
	memset(&tr, 0, sizeof(tr));
	set_bit(TR_ONSTACK, &tr.tr_flags);
	ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
127 128
	if (ret) {
		fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
129
		goto flush;
130
	}
131
	__gfs2_ail_flush(gl, 0, revokes);
Steven Whitehouse's avatar
Steven Whitehouse committed
132
	gfs2_trans_end(sdp);
133

134
flush:
135 136 137
	if (!ret)
		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
				GFS2_LFC_AIL_EMPTY_GL);
138
	return ret;
Steven Whitehouse's avatar
Steven Whitehouse committed
139
}
140

141
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
Steven Whitehouse's avatar
Steven Whitehouse committed
142
{
143
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse's avatar
Steven Whitehouse committed
144 145 146 147 148 149
	unsigned int revokes = atomic_read(&gl->gl_ail_count);
	int ret;

	if (!revokes)
		return;

150
	ret = gfs2_trans_begin(sdp, 0, revokes);
Steven Whitehouse's avatar
Steven Whitehouse committed
151 152
	if (ret)
		return;
153
	__gfs2_ail_flush(gl, fsync, revokes);
154
	gfs2_trans_end(sdp);
155 156
	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
		       GFS2_LFC_AIL_FLUSH);
157
}
Steven Whitehouse's avatar
Steven Whitehouse committed
158

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
/**
 * gfs2_rgrp_metasync - sync out the metadata of a resource group
 * @gl: the glock protecting the resource group
 *
 */

static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct address_space *metamapping = &sdp->sd_aspace;
	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
	const unsigned bsize = sdp->sd_sb.sb_bsize;
	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
	loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
	int error;

	filemap_fdatawrite_range(metamapping, start, end);
	error = filemap_fdatawait_range(metamapping, start, end);
177
	WARN_ON_ONCE(error && !gfs2_withdrawing_or_withdrawn(sdp));
178 179 180 181 182 183
	mapping_set_error(metamapping, error);
	if (error)
		gfs2_io_error(sdp);
	return error;
}

Steven Whitehouse's avatar
Steven Whitehouse committed
184
/**
185
 * rgrp_go_sync - sync out the metadata for this glock
David Teigland's avatar
David Teigland committed
186 187 188 189
 * @gl: the glock
 *
 * Called when demoting or unlocking an EX glock.  We must flush
 * to disk all dirty buffers/pages relating to this glock, and must not
190
 * return to caller to demote/unlock the glock until I/O is complete.
David Teigland's avatar
David Teigland committed
191 192
 */

193
static int rgrp_go_sync(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
194
{
195
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
196
	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
197 198
	int error;

199
	if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
200
		return 0;
201
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
202

203 204
	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
		       GFS2_LFC_RGRP_GO_SYNC);
205
	error = gfs2_rgrp_metasync(gl);
206 207
	if (!error)
		error = gfs2_ail_empty_gl(gl);
Bob Peterson's avatar
Bob Peterson committed
208
	gfs2_free_clones(rgd);
209
	return error;
David Teigland's avatar
David Teigland committed
210 211 212
}

/**
213
 * rgrp_go_inval - invalidate the metadata for this glock
David Teigland's avatar
David Teigland committed
214 215 216
 * @gl: the glock
 * @flags:
 *
217 218 219
 * We never used LM_ST_DEFERRED with resource groups, so that we
 * should always see the metadata flag set here.
 *
David Teigland's avatar
David Teigland committed
220 221
 */

222
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
David Teigland's avatar
David Teigland committed
223
{
224
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
225
	struct address_space *mapping = &sdp->sd_aspace;
226
	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
Bob Peterson's avatar
Bob Peterson committed
227
	const unsigned bsize = sdp->sd_sb.sb_bsize;
228
	loff_t start, end;
229

230 231 232 233
	if (!rgd)
		return;
	start = (rgd->rd_addr * bsize) & PAGE_MASK;
	end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
Bob Peterson's avatar
Bob Peterson committed
234
	gfs2_rgrp_brelse(rgd);
235
	WARN_ON_ONCE(!(flags & DIO_METADATA));
Bob Peterson's avatar
Bob Peterson committed
236
	truncate_inode_pages_range(mapping, start, end);
David Teigland's avatar
David Teigland committed
237 238
}

239
static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
240 241
			      const char *fs_id_buf)
{
242
	struct gfs2_rgrpd *rgd = gl->gl_object;
243 244 245 246 247

	if (rgd)
		gfs2_rgrp_dump(seq, rgd, fs_id_buf);
}

248 249 250 251 252 253 254 255 256 257 258 259
static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip;

	spin_lock(&gl->gl_lockref.lock);
	ip = gl->gl_object;
	if (ip)
		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
	spin_unlock(&gl->gl_lockref.lock);
	return ip;
}

260 261 262 263 264 265 266 267 268 269 270
struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
{
	struct gfs2_rgrpd *rgd;

	spin_lock(&gl->gl_lockref.lock);
	rgd = gl->gl_object;
	spin_unlock(&gl->gl_lockref.lock);

	return rgd;
}

271 272 273 274 275 276 277 278 279
static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
{
	if (!ip)
		return;

	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
}

280
/**
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
 * gfs2_inode_metasync - sync out the metadata of an inode
 * @gl: the glock protecting the inode
 *
 */
int gfs2_inode_metasync(struct gfs2_glock *gl)
{
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	int error;

	filemap_fdatawrite(metamapping);
	error = filemap_fdatawait(metamapping);
	if (error)
		gfs2_io_error(gl->gl_name.ln_sbd);
	return error;
}

/**
 * inode_go_sync - Sync the dirty metadata of an inode
299 300 301 302
 * @gl: the glock protecting the inode
 *
 */

303
static int inode_go_sync(struct gfs2_glock *gl)
304
{
305 306
	struct gfs2_inode *ip = gfs2_glock2inode(gl);
	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
307
	struct address_space *metamapping = gfs2_glock2aspace(gl);
308
	int error = 0, ret;
309

310
	if (isreg) {
311 312 313 314
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
315
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
316
		goto out;
317

318
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
319

320 321
	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
		       GFS2_LFC_INODE_GO_SYNC);
322
	filemap_fdatawrite(metamapping);
323
	if (isreg) {
324 325 326 327
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
328
	}
329
	ret = gfs2_inode_metasync(gl);
330 331
	if (!error)
		error = ret;
332 333 334
	ret = gfs2_ail_empty_gl(gl);
	if (!error)
		error = ret;
335 336 337 338
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
339
	smp_mb__before_atomic();
340
	clear_bit(GLF_DIRTY, &gl->gl_flags);
341 342 343

out:
	gfs2_clear_glop_pending(ip);
344
	return error;
345 346
}

David Teigland's avatar
David Teigland committed
347 348 349 350
/**
 * inode_go_inval - prepare a inode glock to be released
 * @gl: the glock
 * @flags:
351 352
 *
 * Normally we invalidate everything, but if we are moving into
353 354
 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
 * can keep hold of the metadata, since it won't have changed.
David Teigland's avatar
David Teigland committed
355 356 357 358 359
 *
 */

static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
360
	struct gfs2_inode *ip = gfs2_glock2inode(gl);
David Teigland's avatar
David Teigland committed
361

362
	if (flags & DIO_METADATA) {
363
		struct address_space *mapping = gfs2_glock2aspace(gl);
364
		truncate_inode_pages(mapping, 0);
365
		if (ip) {
366
			set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
367
			forget_all_cached_acls(&ip->i_inode);
368
			security_inode_invalidate_secctx(&ip->i_inode);
369
			gfs2_dir_hash_inval(ip);
370
		}
371 372
	}

373
	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
374
		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
375 376
			       GFS2_LOG_HEAD_FLUSH_NORMAL |
			       GFS2_LFC_INODE_GO_INVAL);
377
		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
378
	}
379
	if (ip && S_ISREG(ip->i_inode.i_mode))
380
		truncate_inode_pages(ip->i_inode.i_mapping, 0);
381 382

	gfs2_clear_glop_pending(ip);
David Teigland's avatar
David Teigland committed
383 384 385 386 387 388 389 390 391
}

/**
 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

392
static int inode_go_demote_ok(const struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
393
{
394
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
395

396 397
	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
		return 0;
398

399
	return 1;
David Teigland's avatar
David Teigland committed
400 401
}

402 403
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
404
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
405
	const struct gfs2_dinode *str = buf;
406
	struct timespec64 atime, iatime;
407
	u16 height, depth;
408
	umode_t mode = be32_to_cpu(str->di_mode);
409 410
	struct inode *inode = &ip->i_inode;
	bool is_new = inode->i_state & I_NEW;
411

412 413 414 415 416 417 418 419
	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
		gfs2_consist_inode(ip);
		return -EIO;
	}
	if (unlikely(!is_new && inode_wrong_type(inode, mode))) {
		gfs2_consist_inode(ip);
		return -EIO;
	}
420
	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
421
	inode->i_mode = mode;
422
	if (is_new) {
423
		inode->i_rdev = 0;
424 425 426
		switch (mode & S_IFMT) {
		case S_IFBLK:
		case S_IFCHR:
427 428
			inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
					      be32_to_cpu(str->di_minor));
429 430
			break;
		}
431
	}
432

433 434 435 436 437
	i_uid_write(inode, be32_to_cpu(str->di_uid));
	i_gid_write(inode, be32_to_cpu(str->di_gid));
	set_nlink(inode, be32_to_cpu(str->di_nlink));
	i_size_write(inode, be64_to_cpu(str->di_size));
	gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
438 439
	atime.tv_sec = be64_to_cpu(str->di_atime);
	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
440 441 442 443 444
	iatime = inode_get_atime(inode);
	if (timespec64_compare(&iatime, &atime) < 0)
		inode_set_atime_to_ts(inode, atime);
	inode_set_mtime(inode, be64_to_cpu(str->di_mtime),
			be32_to_cpu(str->di_mtime_nsec));
445 446
	inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
			be32_to_cpu(str->di_ctime_nsec));
447 448 449 450 451

	ip->i_goal = be64_to_cpu(str->di_goal_meta);
	ip->i_generation = be64_to_cpu(str->di_generation);

	ip->i_diskflags = be32_to_cpu(str->di_flags);
452 453
	ip->i_eattr = be64_to_cpu(str->di_eattr);
	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
454
	gfs2_set_inode_flags(inode);
455
	height = be16_to_cpu(str->di_height);
456 457 458 459
	if (unlikely(height > sdp->sd_max_height)) {
		gfs2_consist_inode(ip);
		return -EIO;
	}
460 461 462
	ip->i_height = (u8)height;

	depth = be16_to_cpu(str->di_depth);
463 464 465 466
	if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) {
		gfs2_consist_inode(ip);
		return -EIO;
	}
467 468 469
	ip->i_depth = (u8)depth;
	ip->i_entries = be32_to_cpu(str->di_entries);

470 471 472 473
	if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) {
		gfs2_consist_inode(ip);
		return -EIO;
	}
474 475
	if (S_ISREG(inode->i_mode))
		gfs2_set_aops(inode);
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500

	return 0;
}

/**
 * gfs2_inode_refresh - Refresh the incore copy of the dinode
 * @ip: The GFS2 inode
 *
 * Returns: errno
 */

int gfs2_inode_refresh(struct gfs2_inode *ip)
{
	struct buffer_head *dibh;
	int error;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (error)
		return error;

	error = gfs2_dinode_in(ip, dibh->b_data);
	brelse(dibh);
	return error;
}

David Teigland's avatar
David Teigland committed
501
/**
502
 * inode_go_instantiate - read in an inode if necessary
503
 * @gl: The glock
David Teigland's avatar
David Teigland committed
504 505 506 507
 *
 * Returns: errno
 */

508
static int inode_go_instantiate(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
509
{
510
	struct gfs2_inode *ip = gl->gl_object;
David Teigland's avatar
David Teigland committed
511

512
	if (!ip) /* no inode to populate - read it in later */
513
		return 0;
David Teigland's avatar
David Teigland committed
514

515 516 517 518 519 520 521 522 523 524 525
	return gfs2_inode_refresh(ip);
}

static int inode_go_held(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
	struct gfs2_inode *ip = gl->gl_object;
	int error = 0;

	if (!ip) /* no inode to populate - read it in later */
		return 0;
David Teigland's avatar
David Teigland committed
526

527 528 529
	if (gh->gh_state != LM_ST_DEFERRED)
		inode_dio_wait(&ip->i_inode);

530
	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
David Teigland's avatar
David Teigland committed
531
	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
532 533
	    (gh->gh_state == LM_ST_EXCLUSIVE))
		error = gfs2_truncatei_resume(ip);
David Teigland's avatar
David Teigland committed
534 535 536 537

	return error;
}

538 539 540
/**
 * inode_go_dump - print information about an inode
 * @seq: The iterator
541
 * @gl: The glock
542
 * @fs_id_buf: file system id (may be empty)
543 544 545
 *
 */

546
static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
547
			  const char *fs_id_buf)
548
{
549
	struct gfs2_inode *ip = gl->gl_object;
550
	const struct inode *inode = &ip->i_inode;
551

552
	if (ip == NULL)
553
		return;
554

555 556
	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
		       "p:%lu\n", fs_id_buf,
557 558
		  (unsigned long long)ip->i_no_formal_ino,
		  (unsigned long long)ip->i_no_addr,
559
		  IF2DT(inode->i_mode), ip->i_flags,
560
		  (unsigned int)ip->i_diskflags,
561 562
		  (unsigned long long)i_size_read(inode),
		  inode->i_data.nrpages);
563 564
}

David Teigland's avatar
David Teigland committed
565
/**
566
 * freeze_go_callback - A cluster node is requesting a freeze
David Teigland's avatar
David Teigland committed
567
 * @gl: the glock
568
 * @remote: true if this came from a different cluster node
David Teigland's avatar
David Teigland committed
569 570
 */

571
static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
David Teigland's avatar
David Teigland committed
572
{
573
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
574 575 576
	struct super_block *sb = sdp->sd_vfs;

	if (!remote ||
577 578
	    (gl->gl_state != LM_ST_SHARED &&
	     gl->gl_state != LM_ST_UNLOCKED) ||
579 580
	    gl->gl_demote_state != LM_ST_UNLOCKED)
		return;
David Teigland's avatar
David Teigland committed
581

582
	/*
583
	 * Try to get an active super block reference to prevent racing with
584 585 586
	 * unmount (see super_trylock_shared()).  But note that unmount isn't
	 * the only place where a write lock on s_umount is taken, and we can
	 * fail here because of things like remount as well.
587
	 */
588 589 590 591 592
	if (down_read_trylock(&sb->s_umount)) {
		atomic_inc(&sb->s_active);
		up_read(&sb->s_umount);
		if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
			deactivate_super(sb);
David Teigland's avatar
David Teigland committed
593 594 595 596
	}
}

/**
597
 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
David Teigland's avatar
David Teigland committed
598 599
 * @gl: the glock
 */
600
static int freeze_go_xmote_bh(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
601
{
602
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
603
	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
604
	struct gfs2_glock *j_gl = ip->i_gl;
605
	struct gfs2_log_header_host head;
David Teigland's avatar
David Teigland committed
606 607
	int error;

608
	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
609
		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
David Teigland's avatar
David Teigland committed
610

611
		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
612 613 614 615 616 617 618
		if (gfs2_assert_withdraw_delayed(sdp, !error))
			return error;
		if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
						 GFS2_LOG_HEAD_UNMOUNT))
			return -EIO;
		sdp->sd_log_sequence = head.lh_sequence + 1;
		gfs2_log_pointers_init(sdp, head.lh_blkno);
David Teigland's avatar
David Teigland committed
619
	}
620
	return 0;
David Teigland's avatar
David Teigland committed
621 622
}

623 624 625
/**
 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
 * @gl: the glock
626
 * @remote: true if this came from a different cluster node
627
 *
628
 * gl_lockref.lock lock is held while calling this
629
 */
630
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
631
{
632
	struct gfs2_inode *ip = gl->gl_object;
633
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
634

635
	if (!remote || sb_rdonly(sdp->sd_vfs) ||
636
	    test_bit(SDF_KILL, &sdp->sd_flags))
637
		return;
638 639

	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
640
	    gl->gl_state == LM_ST_SHARED && ip) {
641
		gl->gl_lockref.count++;
642
		if (!gfs2_queue_try_to_evict(gl))
643
			gl->gl_lockref.count--;
644 645 646
	}
}

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
/**
 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
 * @gl: glock being freed
 *
 * For now, this is only used for the journal inode glock. In withdraw
 * situations, we need to wait for the glock to be freed so that we know
 * other nodes may proceed with recovery / journal replay.
 */
static void inode_go_free(struct gfs2_glock *gl)
{
	/* Note that we cannot reference gl_object because it's already set
	 * to NULL by this point in its lifecycle. */
	if (!test_bit(GLF_FREEING, &gl->gl_flags))
		return;
	clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
	wake_up_bit(&gl->gl_flags, GLF_FREEING);
}

/**
 * nondisk_go_callback - used to signal when a node did a withdraw
 * @gl: the nondisk glock
 * @remote: true if this came from a different cluster node
 *
 */
static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
{
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;

	/* Ignore the callback unless it's from another node, and it's the
	   live lock. */
	if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
		return;

	/* First order of business is to cancel the demote request. We don't
	 * really want to demote a nondisk glock. At best it's just to inform
	 * us of another node's withdraw. We'll keep it in SH mode. */
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
	clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);

	/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
	    test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
	    test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
		return;

	/* We only care when a node wants us to unlock, because that means
	 * they want a journal recovered. */
	if (gl->gl_demote_state != LM_ST_UNLOCKED)
		return;

	if (sdp->sd_args.ar_spectator) {
		fs_warn(sdp, "Spectator node cannot recover journals.\n");
		return;
	}

	fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
	set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
	/*
	 * We can't call remote_withdraw directly here or gfs2_recover_journal
	 * because this is called from the glock unlock function and the
	 * remote_withdraw needs to enqueue and dequeue the same "live" glock
	 * we were called from. So we queue it to the control work queue in
	 * lock_dlm.
	 */
	queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
}

714
const struct gfs2_glock_operations gfs2_meta_glops = {
715
	.go_type = LM_TYPE_META,
716
	.go_flags = GLOF_NONDISK,
David Teigland's avatar
David Teigland committed
717 718
};

719
const struct gfs2_glock_operations gfs2_inode_glops = {
720
	.go_sync = inode_go_sync,
David Teigland's avatar
David Teigland committed
721 722
	.go_inval = inode_go_inval,
	.go_demote_ok = inode_go_demote_ok,
723
	.go_instantiate = inode_go_instantiate,
724
	.go_held = inode_go_held,
725
	.go_dump = inode_go_dump,
726
	.go_type = LM_TYPE_INODE,
727
	.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
728
	.go_free = inode_go_free,
David Teigland's avatar
David Teigland committed
729 730
};

731
const struct gfs2_glock_operations gfs2_rgrp_glops = {
732
	.go_sync = rgrp_go_sync,
733
	.go_inval = rgrp_go_inval,
734
	.go_instantiate = gfs2_rgrp_go_instantiate,
735
	.go_dump = gfs2_rgrp_go_dump,
736
	.go_type = LM_TYPE_RGRP,
737
	.go_flags = GLOF_LVB,
David Teigland's avatar
David Teigland committed
738 739
};

740 741
const struct gfs2_glock_operations gfs2_freeze_glops = {
	.go_xmote_bh = freeze_go_xmote_bh,
742
	.go_callback = freeze_go_callback,
743
	.go_type = LM_TYPE_NONDISK,
744
	.go_flags = GLOF_NONDISK,
David Teigland's avatar
David Teigland committed
745 746
};

747
const struct gfs2_glock_operations gfs2_iopen_glops = {
748
	.go_type = LM_TYPE_IOPEN,
749
	.go_callback = iopen_go_callback,
750
	.go_dump = inode_go_dump,
751
	.go_flags = GLOF_LRU | GLOF_NONDISK,
752
	.go_subclass = 1,
David Teigland's avatar
David Teigland committed
753 754
};

755
const struct gfs2_glock_operations gfs2_flock_glops = {
756
	.go_type = LM_TYPE_FLOCK,
757
	.go_flags = GLOF_LRU | GLOF_NONDISK,
David Teigland's avatar
David Teigland committed
758 759
};

760
const struct gfs2_glock_operations gfs2_nondisk_glops = {
761
	.go_type = LM_TYPE_NONDISK,
762
	.go_flags = GLOF_NONDISK,
763
	.go_callback = nondisk_go_callback,
David Teigland's avatar
David Teigland committed
764 765
};

766
const struct gfs2_glock_operations gfs2_quota_glops = {
767
	.go_type = LM_TYPE_QUOTA,
768
	.go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
David Teigland's avatar
David Teigland committed
769 770
};

771
const struct gfs2_glock_operations gfs2_journal_glops = {
772
	.go_type = LM_TYPE_JOURNAL,
773
	.go_flags = GLOF_NONDISK,
David Teigland's avatar
David Teigland committed
774 775
};

776 777 778 779 780 781 782 783 784 785 786
const struct gfs2_glock_operations *gfs2_glops_list[] = {
	[LM_TYPE_META] = &gfs2_meta_glops,
	[LM_TYPE_INODE] = &gfs2_inode_glops,
	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
};