Commit f6e1e1d1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

 - Log space and revoke accounting rework to fix some failed asserts.

 - Local resource group glock sharing for better local performance.

 - Add support for version 1802 filesystems: trusted xattr support and
   '-o rgrplvb' mounts by default.

 - Actually synchronize on the inode glock's FREEING bit during withdraw
   ("gfs2: fix glock confusion in function signal_our_withdraw").

 - Fix parallel recovery of multiple journals ("gfs2: keep bios separate
   for each journal").

 - Various other bug fixes.

* tag 'gfs2-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (49 commits)
  gfs2: Don't get stuck with I/O plugged in gfs2_ail1_flush
  gfs2: Per-revoke accounting in transactions
  gfs2: Rework the log space allocation logic
  gfs2: Minor calc_reserved cleanup
  gfs2: Use resource group glock sharing
  gfs2: Allow node-wide exclusive glock sharing
  gfs2: Add local resource group locking
  gfs2: Add per-reservation reserved block accounting
  gfs2: Rename rs_{free -> requested} and rd_{reserved -> requested}
  gfs2: Check for active reservation in gfs2_release
  gfs2: Don't search for unreserved space twice
  gfs2: Only pass reservation down to gfs2_rbm_find
  gfs2: Also reflect single-block allocations in rgd->rd_extfail_pt
  gfs2: Recursive gfs2_quota_hold in gfs2_iomap_end
  gfs2: Add trusted xattr support
  gfs2: Enable rgrplvb for sb_fs_format 1802
  gfs2: Don't skip dlm unlock if glock has an lvb
  gfs2: Lock imbalance on error path in gfs2_recover_one
  gfs2: Move function gfs2_ail_empty_tr
  gfs2: Get rid of current_tail()
  ...
parents 7d6beb71 17d77684
...@@ -1230,6 +1230,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, ...@@ -1230,6 +1230,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
gfs2_inplace_release(ip); gfs2_inplace_release(ip);
if (ip->i_qadata && ip->i_qadata->qa_qd_num)
gfs2_quota_unlock(ip);
if (length != written && (iomap->flags & IOMAP_F_NEW)) { if (length != written && (iomap->flags & IOMAP_F_NEW)) {
/* Deallocate blocks that were just allocated. */ /* Deallocate blocks that were just allocated. */
loff_t blockmask = i_blocksize(inode) - 1; loff_t blockmask = i_blocksize(inode) - 1;
...@@ -1242,9 +1245,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, ...@@ -1242,9 +1245,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
} }
} }
if (ip->i_qadata && ip->i_qadata->qa_qd_num)
gfs2_quota_unlock(ip);
if (unlikely(!written)) if (unlikely(!written))
goto out_unlock; goto out_unlock;
...@@ -1538,13 +1538,13 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh, ...@@ -1538,13 +1538,13 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
goto out; goto out;
} }
ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
0, rd_gh); LM_FLAG_NODE_SCOPE, rd_gh);
if (ret) if (ret)
goto out; goto out;
/* Must be done with the rgrp glock held: */ /* Must be done with the rgrp glock held: */
if (gfs2_rs_active(&ip->i_res) && if (gfs2_rs_active(&ip->i_res) &&
rgd == ip->i_res.rs_rbm.rgd) rgd == ip->i_res.rs_rgd)
gfs2_rs_deltree(&ip->i_res); gfs2_rs_deltree(&ip->i_res);
} }
......
...@@ -716,10 +716,10 @@ static int gfs2_release(struct inode *inode, struct file *file) ...@@ -716,10 +716,10 @@ static int gfs2_release(struct inode *inode, struct file *file)
kfree(file->private_data); kfree(file->private_data);
file->private_data = NULL; file->private_data = NULL;
if (file->f_mode & FMODE_WRITE) { if (gfs2_rs_active(&ip->i_res))
gfs2_rs_delete(ip, &inode->i_writecount); gfs2_rs_delete(ip, &inode->i_writecount);
if (file->f_mode & FMODE_WRITE)
gfs2_qa_put(ip); gfs2_qa_put(ip);
}
return 0; return 0;
} }
...@@ -1112,8 +1112,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t ...@@ -1112,8 +1112,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
goto out_qunlock; goto out_qunlock;
/* check if the selected rgrp limits our max_blks further */ /* check if the selected rgrp limits our max_blks further */
if (ap.allowed && ap.allowed < max_blks) if (ip->i_res.rs_reserved < max_blks)
max_blks = ap.allowed; max_blks = ip->i_res.rs_reserved;
/* Almost done. Calculate bytes that can be written using /* Almost done. Calculate bytes that can be written using
* max_blks. We also recompute max_bytes, data_blocks and * max_blks. We also recompute max_bytes, data_blocks and
......
...@@ -313,9 +313,23 @@ void gfs2_glock_put(struct gfs2_glock *gl) ...@@ -313,9 +313,23 @@ void gfs2_glock_put(struct gfs2_glock *gl)
static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{ {
const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list); const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
if ((gh->gh_state == LM_ST_EXCLUSIVE ||
gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) if (gh != gh_head) {
return 0; /**
* Here we make a special exception to grant holders who agree
* to share the EX lock with other holders who also have the
* bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit
* is set, we grant more holders with the bit set.
*/
if (gh_head->gh_state == LM_ST_EXCLUSIVE &&
(gh_head->gh_flags & LM_FLAG_NODE_SCOPE) &&
gh->gh_state == LM_ST_EXCLUSIVE &&
(gh->gh_flags & LM_FLAG_NODE_SCOPE))
return 1;
if ((gh->gh_state == LM_ST_EXCLUSIVE ||
gh_head->gh_state == LM_ST_EXCLUSIVE))
return 0;
}
if (gl->gl_state == gh->gh_state) if (gl->gl_state == gh->gh_state)
return 1; return 1;
if (gh->gh_flags & GL_EXACT) if (gh->gh_flags & GL_EXACT)
...@@ -2030,6 +2044,8 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags) ...@@ -2030,6 +2044,8 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
*p++ = 'A'; *p++ = 'A';
if (flags & LM_FLAG_PRIORITY) if (flags & LM_FLAG_PRIORITY)
*p++ = 'p'; *p++ = 'p';
if (flags & LM_FLAG_NODE_SCOPE)
*p++ = 'n';
if (flags & GL_ASYNC) if (flags & GL_ASYNC)
*p++ = 'a'; *p++ = 'a';
if (flags & GL_EXACT) if (flags & GL_EXACT)
......
...@@ -75,6 +75,11 @@ enum { ...@@ -75,6 +75,11 @@ enum {
* request and directly join the other shared lock. A shared lock request * request and directly join the other shared lock. A shared lock request
* without the priority flag might be forced to wait until the deferred * without the priority flag might be forced to wait until the deferred
* requested had acquired and released the lock. * requested had acquired and released the lock.
*
* LM_FLAG_NODE_SCOPE
* This holder agrees to share the lock within this node. In other words,
* the glock is held in EX mode according to DLM, but local holders on the
* same node can share it.
*/ */
#define LM_FLAG_TRY 0x0001 #define LM_FLAG_TRY 0x0001
...@@ -82,6 +87,7 @@ enum { ...@@ -82,6 +87,7 @@ enum {
#define LM_FLAG_NOEXP 0x0004 #define LM_FLAG_NOEXP 0x0004
#define LM_FLAG_ANY 0x0008 #define LM_FLAG_ANY 0x0008
#define LM_FLAG_PRIORITY 0x0010 #define LM_FLAG_PRIORITY 0x0010
#define LM_FLAG_NODE_SCOPE 0x0020
#define GL_ASYNC 0x0040 #define GL_ASYNC 0x0040
#define GL_EXACT 0x0080 #define GL_EXACT 0x0080
#define GL_SKIP 0x0100 #define GL_SKIP 0x0100
......
...@@ -86,16 +86,12 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl) ...@@ -86,16 +86,12 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_trans tr; struct gfs2_trans tr;
unsigned int revokes;
int ret; int ret;
memset(&tr, 0, sizeof(tr)); revokes = atomic_read(&gl->gl_ail_count);
INIT_LIST_HEAD(&tr.tr_buf);
INIT_LIST_HEAD(&tr.tr_databuf);
INIT_LIST_HEAD(&tr.tr_ail1_list);
INIT_LIST_HEAD(&tr.tr_ail2_list);
tr.tr_revokes = atomic_read(&gl->gl_ail_count);
if (!tr.tr_revokes) { if (!revokes) {
bool have_revokes; bool have_revokes;
bool log_in_flight; bool log_in_flight;
...@@ -122,20 +118,14 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl) ...@@ -122,20 +118,14 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
return 0; return 0;
} }
/* A shortened, inline version of gfs2_trans_begin() memset(&tr, 0, sizeof(tr));
* tr->alloced is not set since the transaction structure is set_bit(TR_ONSTACK, &tr.tr_flags);
* on the stack */ ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes); if (ret)
tr.tr_ip = _RET_IP_; goto flush;
ret = gfs2_log_reserve(sdp, tr.tr_reserved); __gfs2_ail_flush(gl, 0, revokes);
if (ret < 0)
return ret;
WARN_ON_ONCE(current->journal_info);
current->journal_info = &tr;
__gfs2_ail_flush(gl, 0, tr.tr_revokes);
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
flush: flush:
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_AIL_EMPTY_GL); GFS2_LFC_AIL_EMPTY_GL);
...@@ -146,19 +136,15 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) ...@@ -146,19 +136,15 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
unsigned int revokes = atomic_read(&gl->gl_ail_count); unsigned int revokes = atomic_read(&gl->gl_ail_count);
unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
int ret; int ret;
if (!revokes) if (!revokes)
return; return;
while (revokes > max_revokes) ret = gfs2_trans_begin(sdp, 0, revokes);
max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
ret = gfs2_trans_begin(sdp, 0, max_revokes);
if (ret) if (ret)
return; return;
__gfs2_ail_flush(gl, fsync, max_revokes); __gfs2_ail_flush(gl, fsync, revokes);
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_AIL_FLUSH); GFS2_LFC_AIL_FLUSH);
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/lockref.h> #include <linux/lockref.h>
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <linux/mutex.h>
#define DIO_WAIT 0x00000010 #define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020 #define DIO_METADATA 0x00000020
...@@ -106,7 +107,8 @@ struct gfs2_rgrpd { ...@@ -106,7 +107,8 @@ struct gfs2_rgrpd {
u32 rd_data; /* num of data blocks in rgrp */ u32 rd_data; /* num of data blocks in rgrp */
u32 rd_bitbytes; /* number of bytes in data bitmaps */ u32 rd_bitbytes; /* number of bytes in data bitmaps */
u32 rd_free; u32 rd_free;
u32 rd_reserved; /* number of blocks reserved */ u32 rd_requested; /* number of blocks in rd_rstree */
u32 rd_reserved; /* number of reserved blocks */
u32 rd_free_clone; u32 rd_free_clone;
u32 rd_dinodes; u32 rd_dinodes;
u64 rd_igeneration; u64 rd_igeneration;
...@@ -122,34 +124,10 @@ struct gfs2_rgrpd { ...@@ -122,34 +124,10 @@ struct gfs2_rgrpd {
#define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */ #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */ #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
spinlock_t rd_rsspin; /* protects reservation related vars */ spinlock_t rd_rsspin; /* protects reservation related vars */
struct mutex rd_mutex;
struct rb_root rd_rstree; /* multi-block reservation tree */ struct rb_root rd_rstree; /* multi-block reservation tree */
}; };
struct gfs2_rbm {
struct gfs2_rgrpd *rgd;
u32 offset; /* The offset is bitmap relative */
int bii; /* Bitmap index */
};
static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
{
return rbm->rgd->rd_bits + rbm->bii;
}
static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
{
BUG_ON(rbm->offset >= rbm->rgd->rd_data);
return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
rbm->offset;
}
static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
const struct gfs2_rbm *rbm2)
{
return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
(rbm1->offset == rbm2->offset);
}
enum gfs2_state_bits { enum gfs2_state_bits {
BH_Pinned = BH_PrivateStart, BH_Pinned = BH_PrivateStart,
BH_Escaped = BH_PrivateStart + 1, BH_Escaped = BH_PrivateStart + 1,
...@@ -313,9 +291,11 @@ struct gfs2_qadata { /* quota allocation data */ ...@@ -313,9 +291,11 @@ struct gfs2_qadata { /* quota allocation data */
*/ */
struct gfs2_blkreserv { struct gfs2_blkreserv {
struct rb_node rs_node; /* link to other block reservations */ struct rb_node rs_node; /* node within rd_rstree */
struct gfs2_rbm rs_rbm; /* Start of reservation */ struct gfs2_rgrpd *rs_rgd;
u32 rs_free; /* how many blocks are still free */ u64 rs_start;
u32 rs_requested;
u32 rs_reserved; /* number of reserved blocks */
}; };
/* /*
...@@ -490,7 +470,7 @@ struct gfs2_quota_data { ...@@ -490,7 +470,7 @@ struct gfs2_quota_data {
enum { enum {
TR_TOUCHED = 1, TR_TOUCHED = 1,
TR_ATTACHED = 2, TR_ATTACHED = 2,
TR_ALLOCED = 3, TR_ONSTACK = 3,
}; };
struct gfs2_trans { struct gfs2_trans {
...@@ -506,7 +486,6 @@ struct gfs2_trans { ...@@ -506,7 +486,6 @@ struct gfs2_trans {
unsigned int tr_num_buf_rm; unsigned int tr_num_buf_rm;
unsigned int tr_num_databuf_rm; unsigned int tr_num_databuf_rm;
unsigned int tr_num_revoke; unsigned int tr_num_revoke;
unsigned int tr_num_revoke_rm;
struct list_head tr_list; struct list_head tr_list;
struct list_head tr_databuf; struct list_head tr_databuf;
...@@ -531,6 +510,7 @@ struct gfs2_jdesc { ...@@ -531,6 +510,7 @@ struct gfs2_jdesc {
unsigned int nr_extents; unsigned int nr_extents;
struct work_struct jd_work; struct work_struct jd_work;
struct inode *jd_inode; struct inode *jd_inode;
struct bio *jd_log_bio;
unsigned long jd_flags; unsigned long jd_flags;
#define JDF_RECOVERY 1 #define JDF_RECOVERY 1
unsigned int jd_jid; unsigned int jd_jid;
...@@ -585,6 +565,7 @@ struct gfs2_args { ...@@ -585,6 +565,7 @@ struct gfs2_args {
unsigned int ar_errors:2; /* errors=withdraw | panic */ unsigned int ar_errors:2; /* errors=withdraw | panic */
unsigned int ar_nobarrier:1; /* do not send barriers */ unsigned int ar_nobarrier:1; /* do not send barriers */
unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */ unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */
unsigned int ar_loccookie:1; /* use location based readdir unsigned int ar_loccookie:1; /* use location based readdir
cookies */ cookies */
s32 ar_commit; /* Commit interval */ s32 ar_commit; /* Commit interval */
...@@ -821,7 +802,6 @@ struct gfs2_sbd { ...@@ -821,7 +802,6 @@ struct gfs2_sbd {
struct gfs2_trans *sd_log_tr; struct gfs2_trans *sd_log_tr;
unsigned int sd_log_blks_reserved; unsigned int sd_log_blks_reserved;
int sd_log_committed_revoke;
atomic_t sd_log_pinned; atomic_t sd_log_pinned;
unsigned int sd_log_num_revoke; unsigned int sd_log_num_revoke;
...@@ -834,24 +814,22 @@ struct gfs2_sbd { ...@@ -834,24 +814,22 @@ struct gfs2_sbd {
atomic_t sd_log_thresh2; atomic_t sd_log_thresh2;
atomic_t sd_log_blks_free; atomic_t sd_log_blks_free;
atomic_t sd_log_blks_needed; atomic_t sd_log_blks_needed;
atomic_t sd_log_revokes_available;
wait_queue_head_t sd_log_waitq; wait_queue_head_t sd_log_waitq;
wait_queue_head_t sd_logd_waitq; wait_queue_head_t sd_logd_waitq;
u64 sd_log_sequence; u64 sd_log_sequence;
unsigned int sd_log_head;
unsigned int sd_log_tail;
int sd_log_idle; int sd_log_idle;
struct rw_semaphore sd_log_flush_lock; struct rw_semaphore sd_log_flush_lock;
atomic_t sd_log_in_flight; atomic_t sd_log_in_flight;
struct bio *sd_log_bio;
wait_queue_head_t sd_log_flush_wait; wait_queue_head_t sd_log_flush_wait;
int sd_log_error; /* First log error */ int sd_log_error; /* First log error */
wait_queue_head_t sd_withdraw_wait; wait_queue_head_t sd_withdraw_wait;
atomic_t sd_reserving_log; unsigned int sd_log_tail;
wait_queue_head_t sd_reserving_log_wait; unsigned int sd_log_flush_tail;
unsigned int sd_log_head;
unsigned int sd_log_flush_head; unsigned int sd_log_flush_head;
spinlock_t sd_ail_lock; spinlock_t sd_ail_lock;
......
...@@ -1147,7 +1147,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry) ...@@ -1147,7 +1147,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
if (!rgd) if (!rgd)
goto out_inodes; goto out_inodes;
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE, ghs + 2);
error = gfs2_glock_nq(ghs); /* parent */ error = gfs2_glock_nq(ghs); /* parent */
...@@ -1453,8 +1453,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, ...@@ -1453,8 +1453,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
error = -ENOENT; error = -ENOENT;
goto out_gunlock; goto out_gunlock;
} }
error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE,
&rd_gh); LM_FLAG_NODE_SCOPE, &rd_gh);
if (error) if (error)
goto out_gunlock; goto out_gunlock;
} }
......
...@@ -284,7 +284,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl) ...@@ -284,7 +284,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int lvb_needs_unlock = 0;
int error; int error;
if (gl->gl_lksb.sb_lkid == 0) { if (gl->gl_lksb.sb_lkid == 0) {
...@@ -297,13 +296,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl) ...@@ -297,13 +296,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_update_request_times(gl); gfs2_update_request_times(gl);
/* don't want to skip dlm_unlock writing the lvb when lock is ex */ /* don't want to skip dlm_unlock writing the lvb when lock has one */
if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
lvb_needs_unlock = 1;
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
!lvb_needs_unlock) { !gl->gl_lksb.sb_lvbptr) {
gfs2_glock_free(gl); gfs2_glock_free(gl);
return; return;
} }
......
This diff is collapsed.
...@@ -13,6 +13,13 @@ ...@@ -13,6 +13,13 @@
#include "incore.h" #include "incore.h"
#include "inode.h" #include "inode.h"
/*
* The minimum amount of log space required for a log flush is one block for
* revokes and one block for the log header. Log flushes other than
* GFS2_LOG_HEAD_FLUSH_NORMAL may write one or two more log headers.
*/
#define GFS2_LOG_FLUSH_MIN_BLOCKS 4
/** /**
* gfs2_log_lock - acquire the right to mess with the log manager * gfs2_log_lock - acquire the right to mess with the log manager
* @sdp: the filesystem * @sdp: the filesystem
...@@ -43,7 +50,9 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp, ...@@ -43,7 +50,9 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
if (++value == sdp->sd_jdesc->jd_blocks) { if (++value == sdp->sd_jdesc->jd_blocks) {
value = 0; value = 0;
} }
sdp->sd_log_head = sdp->sd_log_tail = value; sdp->sd_log_tail = value;
sdp->sd_log_flush_tail = value;
sdp->sd_log_head = value;
} }
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip) static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
...@@ -64,8 +73,13 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip) ...@@ -64,8 +73,13 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
extern void gfs2_ordered_del_inode(struct gfs2_inode *ip); extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct); extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd); extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
extern bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
extern void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks); extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); extern bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
unsigned int *extra_revokes);
extern void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
unsigned int *extra_revokes);
extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
u64 seq, u32 tail, u32 lblock, u32 flags, u64 seq, u32 tail, u32 lblock, u32 flags,
int op_flags); int op_flags);
...@@ -78,6 +92,6 @@ extern void log_flush_wait(struct gfs2_sbd *sdp); ...@@ -78,6 +92,6 @@ extern void log_flush_wait(struct gfs2_sbd *sdp);
extern int gfs2_logd(void *data); extern int gfs2_logd(void *data);
extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl); extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
extern void gfs2_write_revokes(struct gfs2_sbd *sdp); extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */ #endif /* __LOG_DOT_H__ */
...@@ -76,15 +76,20 @@ static void maybe_release_space(struct gfs2_bufdata *bd) ...@@ -76,15 +76,20 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
struct gfs2_bitmap *bi = rgd->rd_bits + index; struct gfs2_bitmap *bi = rgd->rd_bits + index;
rgrp_lock_local(rgd);
if (bi->bi_clone == NULL) if (bi->bi_clone == NULL)
return; goto out;
if (sdp->sd_args.ar_discard) if (sdp->sd_args.ar_discard)
gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
memcpy(bi->bi_clone + bi->bi_offset, memcpy(bi->bi_clone + bi->bi_offset,
bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes); bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
clear_bit(GBF_FULL, &bi->bi_flags); clear_bit(GBF_FULL, &bi->bi_flags);
rgd->rd_free_clone = rgd->rd_free; rgd->rd_free_clone = rgd->rd_free;
BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
rgd->rd_extfail_pt = rgd->rd_free; rgd->rd_extfail_pt = rgd->rd_free;
out:
rgrp_unlock_local(rgd);
} }
/** /**
...@@ -322,17 +327,18 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, ...@@ -322,17 +327,18 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
* then add the page segment to that. * then add the page segment to that.
*/ */
void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
unsigned size, unsigned offset, u64 blkno) struct page *page, unsigned size, unsigned offset,
u64 blkno)
{ {
struct bio *bio; struct bio *bio;
int ret; int ret;
bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE, bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
gfs2_end_log_write, false); gfs2_end_log_write, false);
ret = bio_add_page(bio, page, size, offset); ret = bio_add_page(bio, page, size, offset);
if (ret == 0) { if (ret == 0) {
bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
REQ_OP_WRITE, gfs2_end_log_write, true); REQ_OP_WRITE, gfs2_end_log_write, true);
ret = bio_add_page(bio, page, size, offset); ret = bio_add_page(bio, page, size, offset);
WARN_ON(ret == 0); WARN_ON(ret == 0);
...@@ -355,7 +361,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) ...@@ -355,7 +361,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
gfs2_log_incr_head(sdp); gfs2_log_incr_head(sdp);
gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock); gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
bh_offset(bh), dblock);
} }
/** /**
...@@ -369,14 +376,14 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) ...@@ -369,14 +376,14 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
* the page may be freed at any time. * the page may be freed at any time.
*/ */
void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
{ {
struct super_block *sb = sdp->sd_vfs; struct super_block *sb = sdp->sd_vfs;
u64 dblock; u64 dblock;
dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
gfs2_log_incr_head(sdp); gfs2_log_incr_head(sdp);
gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock); gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
} }
/** /**
...@@ -845,7 +852,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) ...@@ -845,7 +852,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
struct page *page; struct page *page;
unsigned int length; unsigned int length;
gfs2_write_revokes(sdp); gfs2_flush_revokes(sdp);
if (!sdp->sd_log_num_revoke) if (!sdp->sd_log_num_revoke)
return; return;
...@@ -857,7 +864,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) ...@@ -857,7 +864,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp->sd_log_num_revoke--; sdp->sd_log_num_revoke--;
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
gfs2_log_write_page(sdp, page); gfs2_log_write_page(sdp, page);
page = mempool_alloc(gfs2_page_pool, GFP_NOIO); page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
mh = page_address(page); mh = page_address(page);
......
...@@ -10,37 +10,24 @@ ...@@ -10,37 +10,24 @@
#include <linux/list.h> #include <linux/list.h>
#include "incore.h" #include "incore.h"
#define BUF_OFFSET \
((sizeof(struct gfs2_log_descriptor) + sizeof(__be64) - 1) & \
~(sizeof(__be64) - 1))
#define DATABUF_OFFSET \
((sizeof(struct gfs2_log_descriptor) + (2 * sizeof(__be64) - 1)) & \
~(2 * sizeof(__be64) - 1))
extern const struct gfs2_log_operations *gfs2_log_ops[]; extern const struct gfs2_log_operations *gfs2_log_ops[];
extern void gfs2_log_incr_head(struct gfs2_sbd *sdp); extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn); extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, extern void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
unsigned size, unsigned offset, u64 blkno); struct page *page, unsigned size, unsigned offset,
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); u64 blkno);
extern void gfs2_log_submit_bio(struct bio **biop, int opf); extern void gfs2_log_submit_bio(struct bio **biop, int opf);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
extern int gfs2_find_jhead(struct gfs2_jdesc *jd, extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, bool keep_cache); struct gfs2_log_header_host *head, bool keep_cache);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp) static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{ {
unsigned int limit; return sdp->sd_ldptrs;
limit = (sdp->sd_sb.sb_bsize - BUF_OFFSET) / sizeof(__be64);
return limit;
} }
static inline unsigned int databuf_limit(struct gfs2_sbd *sdp) static inline unsigned int databuf_limit(struct gfs2_sbd *sdp)
{ {
unsigned int limit; return sdp->sd_ldptrs / 2;
limit = (sdp->sd_sb.sb_bsize - DATABUF_OFFSET) / (2 * sizeof(__be64));
return limit;
} }
static inline void lops_before_commit(struct gfs2_sbd *sdp, static inline void lops_before_commit(struct gfs2_sbd *sdp,
......
...@@ -98,7 +98,7 @@ static int __init init_gfs2_fs(void) ...@@ -98,7 +98,7 @@ static int __init init_gfs2_fs(void)
error = -ENOMEM; error = -ENOMEM;
gfs2_glock_cachep = kmem_cache_create("gfs2_glock", gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
sizeof(struct gfs2_glock), sizeof(struct gfs2_glock),
0, 0, 0, SLAB_RECLAIM_ACCOUNT,
gfs2_init_glock_once); gfs2_init_glock_once);
if (!gfs2_glock_cachep) if (!gfs2_glock_cachep)
goto fail_cachep1; goto fail_cachep1;
...@@ -134,7 +134,7 @@ static int __init init_gfs2_fs(void) ...@@ -134,7 +134,7 @@ static int __init init_gfs2_fs(void)
gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad", gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
sizeof(struct gfs2_quota_data), sizeof(struct gfs2_quota_data),
0, 0, NULL); 0, SLAB_RECLAIM_ACCOUNT, NULL);
if (!gfs2_quotad_cachep) if (!gfs2_quotad_cachep)
goto fail_cachep6; goto fail_cachep6;
......
...@@ -136,8 +136,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) ...@@ -136,8 +136,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
init_rwsem(&sdp->sd_log_flush_lock); init_rwsem(&sdp->sd_log_flush_lock);
atomic_set(&sdp->sd_log_in_flight, 0); atomic_set(&sdp->sd_log_in_flight, 0);
atomic_set(&sdp->sd_reserving_log, 0);
init_waitqueue_head(&sdp->sd_reserving_log_wait);
init_waitqueue_head(&sdp->sd_log_flush_wait); init_waitqueue_head(&sdp->sd_log_flush_wait);
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
mutex_init(&sdp->sd_freeze_mutex); mutex_init(&sdp->sd_freeze_mutex);
...@@ -171,7 +169,8 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) ...@@ -171,7 +169,8 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
return -EINVAL; return -EINVAL;
} }
if (sb->sb_fs_format != GFS2_FORMAT_FS || if (sb->sb_fs_format < GFS2_FS_FORMAT_MIN ||
sb->sb_fs_format > GFS2_FS_FORMAT_MAX ||
sb->sb_multihost_format != GFS2_FORMAT_MULTI) { sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
return -EINVAL; return -EINVAL;
...@@ -179,7 +178,7 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) ...@@ -179,7 +178,7 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE || if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
(sb->sb_bsize & (sb->sb_bsize - 1))) { (sb->sb_bsize & (sb->sb_bsize - 1))) {
pr_warn("Invalid superblock size\n"); pr_warn("Invalid block size\n");
return -EINVAL; return -EINVAL;
} }
...@@ -317,6 +316,13 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent) ...@@ -317,6 +316,13 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
sizeof(struct gfs2_meta_header)) sizeof(struct gfs2_meta_header))
* GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */ * GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
/*
* We always keep at least one block reserved for revokes in
* transactions. This greatly simplifies allocating additional
* revoke blocks.
*/
atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
/* Compute maximum reservation required to add a entry to a directory */ /* Compute maximum reservation required to add a entry to a directory */
hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH), hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
...@@ -488,6 +494,19 @@ static int init_sb(struct gfs2_sbd *sdp, int silent) ...@@ -488,6 +494,19 @@ static int init_sb(struct gfs2_sbd *sdp, int silent)
goto out; goto out;
} }
switch(sdp->sd_sb.sb_fs_format) {
case GFS2_FS_FORMAT_MAX:
sb->s_xattr = gfs2_xattr_handlers_max;
break;
case GFS2_FS_FORMAT_MIN:
sb->s_xattr = gfs2_xattr_handlers_min;
break;
default:
BUG();
}
/* Set up the buffer cache and SB for real */ /* Set up the buffer cache and SB for real */
if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) { if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -1032,13 +1051,14 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) ...@@ -1032,13 +1051,14 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
} }
if (lm->lm_mount == NULL) { if (lm->lm_mount == NULL) {
fs_info(sdp, "Now mounting FS...\n"); fs_info(sdp, "Now mounting FS (format %u)...\n", sdp->sd_sb.sb_fs_format);
complete_all(&sdp->sd_locking_init); complete_all(&sdp->sd_locking_init);
return 0; return 0;
} }
ret = lm->lm_mount(sdp, table); ret = lm->lm_mount(sdp, table);
if (ret == 0) if (ret == 0)
fs_info(sdp, "Joined cluster. Now mounting FS...\n"); fs_info(sdp, "Joined cluster. Now mounting FS (format %u)...\n",
sdp->sd_sb.sb_fs_format);
complete_all(&sdp->sd_locking_init); complete_all(&sdp->sd_locking_init);
return ret; return ret;
} }
...@@ -1084,6 +1104,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -1084,6 +1104,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
int silent = fc->sb_flags & SB_SILENT; int silent = fc->sb_flags & SB_SILENT;
struct gfs2_sbd *sdp; struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh; struct gfs2_holder mount_gh;
struct gfs2_holder freeze_gh;
int error; int error;
sdp = init_sbd(sb); sdp = init_sbd(sb);
...@@ -1107,7 +1128,6 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -1107,7 +1128,6 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_op = &gfs2_super_ops; sb->s_op = &gfs2_super_ops;
sb->s_d_op = &gfs2_dops; sb->s_d_op = &gfs2_dops;
sb->s_export_op = &gfs2_export_ops; sb->s_export_op = &gfs2_export_ops;
sb->s_xattr = gfs2_xattr_handlers;
sb->s_qcop = &gfs2_quotactl_ops; sb->s_qcop = &gfs2_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
...@@ -1156,6 +1176,10 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -1156,6 +1176,10 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
if (error) if (error)
goto fail_locking; goto fail_locking;
/* Turn rgrplvb on by default if fs format is recent enough */
if (!sdp->sd_args.ar_got_rgrplvb && sdp->sd_sb.sb_fs_format > 1801)
sdp->sd_args.ar_rgrplvb = 1;
error = wait_on_journal(sdp); error = wait_on_journal(sdp);
if (error) if (error)
goto fail_sb; goto fail_sb;
...@@ -1195,25 +1219,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -1195,25 +1219,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
goto fail_per_node; goto fail_per_node;
} }
if (sb_rdonly(sb)) { error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
struct gfs2_holder freeze_gh; if (error)
goto fail_per_node;
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, if (!sb_rdonly(sb))
LM_FLAG_NOEXP | GL_EXACT,
&freeze_gh);
if (error) {
fs_err(sdp, "can't make FS RO: %d\n", error);
goto fail_per_node;
}
gfs2_glock_dq_uninit(&freeze_gh);
} else {
error = gfs2_make_fs_rw(sdp); error = gfs2_make_fs_rw(sdp);
if (error) {
fs_err(sdp, "can't make FS RW: %d\n", error);
goto fail_per_node;
}
}
gfs2_freeze_unlock(&freeze_gh);
if (error) {
fs_err(sdp, "can't make FS RW: %d\n", error);
goto fail_per_node;
}
gfs2_glock_dq_uninit(&mount_gh); gfs2_glock_dq_uninit(&mount_gh);
gfs2_online_uevent(sdp); gfs2_online_uevent(sdp);
return 0; return 0;
...@@ -1456,6 +1473,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param) ...@@ -1456,6 +1473,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
break; break;
case Opt_rgrplvb: case Opt_rgrplvb:
args->ar_rgrplvb = result.boolean; args->ar_rgrplvb = result.boolean;
args->ar_got_rgrplvb = 1;
break; break;
case Opt_loccookie: case Opt_loccookie:
args->ar_loccookie = result.boolean; args->ar_loccookie = result.boolean;
...@@ -1514,6 +1532,12 @@ static int gfs2_reconfigure(struct fs_context *fc) ...@@ -1514,6 +1532,12 @@ static int gfs2_reconfigure(struct fs_context *fc)
fc->sb_flags |= SB_RDONLY; fc->sb_flags |= SB_RDONLY;
if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) { if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
struct gfs2_holder freeze_gh;
error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
if (error)
return -EINVAL;
if (fc->sb_flags & SB_RDONLY) { if (fc->sb_flags & SB_RDONLY) {
error = gfs2_make_fs_ro(sdp); error = gfs2_make_fs_ro(sdp);
if (error) if (error)
...@@ -1523,6 +1547,7 @@ static int gfs2_reconfigure(struct fs_context *fc) ...@@ -1523,6 +1547,7 @@ static int gfs2_reconfigure(struct fs_context *fc)
if (error) if (error)
errorfc(fc, "unable to remount read-write"); errorfc(fc, "unable to remount read-write");
} }
gfs2_freeze_unlock(&freeze_gh);
} }
sdp->sd_args = *newargs; sdp->sd_args = *newargs;
......
...@@ -470,9 +470,7 @@ void gfs2_recover_func(struct work_struct *work) ...@@ -470,9 +470,7 @@ void gfs2_recover_func(struct work_struct *work)
/* Acquire a shared hold on the freeze lock */ /* Acquire a shared hold on the freeze lock */
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
GL_EXACT, &thaw_gh);
if (error) if (error)
goto fail_gunlock_ji; goto fail_gunlock_ji;
...@@ -507,22 +505,24 @@ void gfs2_recover_func(struct work_struct *work) ...@@ -507,22 +505,24 @@ void gfs2_recover_func(struct work_struct *work)
/* We take the sd_log_flush_lock here primarily to prevent log /* We take the sd_log_flush_lock here primarily to prevent log
* flushes and simultaneous journal replays from stomping on * flushes and simultaneous journal replays from stomping on
* each other wrt sd_log_bio. */ * each other wrt jd_log_bio. */
down_read(&sdp->sd_log_flush_lock); down_read(&sdp->sd_log_flush_lock);
for (pass = 0; pass < 2; pass++) { for (pass = 0; pass < 2; pass++) {
lops_before_scan(jd, &head, pass); lops_before_scan(jd, &head, pass);
error = foreach_descriptor(jd, head.lh_tail, error = foreach_descriptor(jd, head.lh_tail,
head.lh_blkno, pass); head.lh_blkno, pass);
lops_after_scan(jd, error, pass); lops_after_scan(jd, error, pass);
if (error) if (error) {
up_read(&sdp->sd_log_flush_lock);
goto fail_gunlock_thaw; goto fail_gunlock_thaw;
}
} }
recover_local_statfs(jd, &head); recover_local_statfs(jd, &head);
clean_journal(jd, &head); clean_journal(jd, &head);
up_read(&sdp->sd_log_flush_lock); up_read(&sdp->sd_log_flush_lock);
gfs2_glock_dq_uninit(&thaw_gh); gfs2_freeze_unlock(&thaw_gh);
t_rep = ktime_get(); t_rep = ktime_get();
fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, " fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
"jhead:%lldms, tlck:%lldms, replay:%lldms]\n", "jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
...@@ -544,7 +544,7 @@ void gfs2_recover_func(struct work_struct *work) ...@@ -544,7 +544,7 @@ void gfs2_recover_func(struct work_struct *work)
goto done; goto done;
fail_gunlock_thaw: fail_gunlock_thaw:
gfs2_glock_dq_uninit(&thaw_gh); gfs2_freeze_unlock(&thaw_gh);
fail_gunlock_ji: fail_gunlock_ji:
if (jlocked) { if (jlocked) {
gfs2_glock_dq_uninit(&ji_gh); gfs2_glock_dq_uninit(&ji_gh);
......
This diff is collapsed.
...@@ -77,7 +77,7 @@ extern int gfs2_fitrim(struct file *filp, void __user *argp); ...@@ -77,7 +77,7 @@ extern int gfs2_fitrim(struct file *filp, void __user *argp);
/* This is how to tell if a reservation is in the rgrp tree: */ /* This is how to tell if a reservation is in the rgrp tree: */
static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs) static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
{ {
return rs && !RB_EMPTY_NODE(&rs->rs_node); return !RB_EMPTY_NODE(&rs->rs_node);
} }
static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
...@@ -88,4 +88,8 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) ...@@ -88,4 +88,8 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
} }
extern void check_and_update_goal(struct gfs2_inode *ip); extern void check_and_update_goal(struct gfs2_inode *ip);
extern void rgrp_lock_local(struct gfs2_rgrpd *rgd);
extern void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
#endif /* __RGRP_DOT_H__ */ #endif /* __RGRP_DOT_H__ */
...@@ -81,19 +81,12 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp) ...@@ -81,19 +81,12 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp)
static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
{ {
struct gfs2_jdesc *jd; struct gfs2_jdesc *jd;
int found = 0;
list_for_each_entry(jd, head, jd_list) { list_for_each_entry(jd, head, jd_list) {
if (jd->jd_jid == jid) { if (jd->jd_jid == jid)
found = 1; return jd;
break;
}
} }
return NULL;
if (!found)
jd = NULL;
return jd;
} }
struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid) struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
...@@ -165,7 +158,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) ...@@ -165,7 +158,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
{ {
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
struct gfs2_glock *j_gl = ip->i_gl; struct gfs2_glock *j_gl = ip->i_gl;
struct gfs2_holder freeze_gh;
struct gfs2_log_header_host head; struct gfs2_log_header_host head;
int error; int error;
...@@ -173,12 +165,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) ...@@ -173,12 +165,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
if (error) if (error)
return error; return error;
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
LM_FLAG_NOEXP | GL_EXACT,
&freeze_gh);
if (error)
goto fail_threads;
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
if (gfs2_withdrawn(sdp)) { if (gfs2_withdrawn(sdp)) {
error = -EIO; error = -EIO;
...@@ -205,13 +191,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) ...@@ -205,13 +191,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
gfs2_glock_dq_uninit(&freeze_gh);
return 0; return 0;
fail: fail:
gfs2_glock_dq_uninit(&freeze_gh);
fail_threads:
if (sdp->sd_quotad_process) if (sdp->sd_quotad_process)
kthread_stop(sdp->sd_quotad_process); kthread_stop(sdp->sd_quotad_process);
sdp->sd_quotad_process = NULL; sdp->sd_quotad_process = NULL;
...@@ -452,7 +434,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) ...@@ -452,7 +434,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
} }
if (error) if (error)
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); gfs2_freeze_unlock(&sdp->sd_freeze_gh);
out: out:
while (!list_empty(&list)) { while (!list_empty(&list)) {
...@@ -607,30 +589,9 @@ static void gfs2_dirty_inode(struct inode *inode, int flags) ...@@ -607,30 +589,9 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
int gfs2_make_fs_ro(struct gfs2_sbd *sdp) int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
{ {
struct gfs2_holder freeze_gh;
int error = 0; int error = 0;
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
gfs2_holder_mark_uninitialized(&freeze_gh);
if (sdp->sd_freeze_gl &&
!gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
if (!log_write_allowed) {
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
LM_ST_SHARED, LM_FLAG_TRY |
LM_FLAG_NOEXP | GL_EXACT,
&freeze_gh);
if (error == GLR_TRYFAILED)
error = 0;
} else {
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
LM_ST_SHARED,
LM_FLAG_NOEXP | GL_EXACT,
&freeze_gh);
if (error && !gfs2_withdrawn(sdp))
return error;
}
}
gfs2_flush_delete_work(sdp); gfs2_flush_delete_work(sdp);
if (!log_write_allowed && current == sdp->sd_quotad_process) if (!log_write_allowed && current == sdp->sd_quotad_process)
fs_warn(sdp, "The quotad daemon is withdrawing.\n"); fs_warn(sdp, "The quotad daemon is withdrawing.\n");
...@@ -650,18 +611,15 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp) ...@@ -650,18 +611,15 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
GFS2_LFC_MAKE_FS_RO); GFS2_LFC_MAKE_FS_RO);
wait_event(sdp->sd_reserving_log_wait, wait_event_timeout(sdp->sd_log_waitq,
atomic_read(&sdp->sd_reserving_log) == 0); gfs2_log_is_empty(sdp),
gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == HZ * 5);
sdp->sd_jdesc->jd_blocks); gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
} else { } else {
wait_event_timeout(sdp->sd_reserving_log_wait, wait_event_timeout(sdp->sd_log_waitq,
atomic_read(&sdp->sd_reserving_log) == 0, gfs2_log_is_empty(sdp),
HZ * 5); HZ * 5);
} }
if (gfs2_holder_initialized(&freeze_gh))
gfs2_glock_dq_uninit(&freeze_gh);
gfs2_quota_cleanup(sdp); gfs2_quota_cleanup(sdp);
if (!log_write_allowed) if (!log_write_allowed)
...@@ -770,10 +728,8 @@ void gfs2_freeze_func(struct work_struct *work) ...@@ -770,10 +728,8 @@ void gfs2_freeze_func(struct work_struct *work)
struct super_block *sb = sdp->sd_vfs; struct super_block *sb = sdp->sd_vfs;
atomic_inc(&sb->s_active); atomic_inc(&sb->s_active);
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
if (error) { if (error) {
fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
gfs2_assert_withdraw(sdp, 0); gfs2_assert_withdraw(sdp, 0);
} else { } else {
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
...@@ -783,7 +739,7 @@ void gfs2_freeze_func(struct work_struct *work) ...@@ -783,7 +739,7 @@ void gfs2_freeze_func(struct work_struct *work)
error); error);
gfs2_assert_withdraw(sdp, 0); gfs2_assert_withdraw(sdp, 0);
} }
gfs2_glock_dq_uninit(&freeze_gh); gfs2_freeze_unlock(&freeze_gh);
} }
deactivate_super(sb); deactivate_super(sb);
clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags); clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
...@@ -851,7 +807,7 @@ static int gfs2_unfreeze(struct super_block *sb) ...@@ -851,7 +807,7 @@ static int gfs2_unfreeze(struct super_block *sb)
return 0; return 0;
} }
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); gfs2_freeze_unlock(&sdp->sd_freeze_gh);
mutex_unlock(&sdp->sd_freeze_mutex); mutex_unlock(&sdp->sd_freeze_mutex);
return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE); return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
} }
...@@ -1227,7 +1183,8 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip) ...@@ -1227,7 +1183,8 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
goto out_qs; goto out_qs;
} }
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
LM_FLAG_NODE_SCOPE, &gh);
if (error) if (error)
goto out_qs; goto out_qs;
......
...@@ -11,6 +11,10 @@ ...@@ -11,6 +11,10 @@
#include <linux/dcache.h> #include <linux/dcache.h>
#include "incore.h" #include "incore.h"
/* Supported fs format version range */
#define GFS2_FS_FORMAT_MIN (1801)
#define GFS2_FS_FORMAT_MAX (1802)
extern void gfs2_lm_unmount(struct gfs2_sbd *sdp); extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
...@@ -54,7 +58,9 @@ extern struct file_system_type gfs2meta_fs_type; ...@@ -54,7 +58,9 @@ extern struct file_system_type gfs2meta_fs_type;
extern const struct export_operations gfs2_export_ops; extern const struct export_operations gfs2_export_ops;
extern const struct super_operations gfs2_super_ops; extern const struct super_operations gfs2_super_ops;
extern const struct dentry_operations gfs2_dops; extern const struct dentry_operations gfs2_dops;
extern const struct xattr_handler *gfs2_xattr_handlers[];
extern const struct xattr_handler *gfs2_xattr_handlers_max[];
extern const struct xattr_handler **gfs2_xattr_handlers_min;
#endif /* __SUPER_DOT_H__ */ #endif /* __SUPER_DOT_H__ */
...@@ -560,6 +560,7 @@ TRACE_EVENT(gfs2_block_alloc, ...@@ -560,6 +560,7 @@ TRACE_EVENT(gfs2_block_alloc,
__field( u8, block_state ) __field( u8, block_state )
__field( u64, rd_addr ) __field( u64, rd_addr )
__field( u32, rd_free_clone ) __field( u32, rd_free_clone )
__field( u32, rd_requested )
__field( u32, rd_reserved ) __field( u32, rd_reserved )
), ),
...@@ -571,17 +572,20 @@ TRACE_EVENT(gfs2_block_alloc, ...@@ -571,17 +572,20 @@ TRACE_EVENT(gfs2_block_alloc,
__entry->block_state = block_state; __entry->block_state = block_state;
__entry->rd_addr = rgd->rd_addr; __entry->rd_addr = rgd->rd_addr;
__entry->rd_free_clone = rgd->rd_free_clone; __entry->rd_free_clone = rgd->rd_free_clone;
__entry->rd_requested = rgd->rd_requested;
__entry->rd_reserved = rgd->rd_reserved; __entry->rd_reserved = rgd->rd_reserved;
), ),
TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rr:%lu", TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rq:%u rr:%u",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->inum, (unsigned long long)__entry->inum,
(unsigned long long)__entry->start, (unsigned long long)__entry->start,
(unsigned long)__entry->len, (unsigned long)__entry->len,
block_state_name(__entry->block_state), block_state_name(__entry->block_state),
(unsigned long long)__entry->rd_addr, (unsigned long long)__entry->rd_addr,
__entry->rd_free_clone, (unsigned long)__entry->rd_reserved) __entry->rd_free_clone,
__entry->rd_requested,
__entry->rd_reserved)
); );
/* Keep track of multi-block reservations as they are allocated/freed */ /* Keep track of multi-block reservations as they are allocated/freed */
...@@ -595,33 +599,40 @@ TRACE_EVENT(gfs2_rs, ...@@ -595,33 +599,40 @@ TRACE_EVENT(gfs2_rs,
__field( dev_t, dev ) __field( dev_t, dev )
__field( u64, rd_addr ) __field( u64, rd_addr )
__field( u32, rd_free_clone ) __field( u32, rd_free_clone )
__field( u32, rd_requested )
__field( u32, rd_reserved ) __field( u32, rd_reserved )
__field( u64, inum ) __field( u64, inum )
__field( u64, start ) __field( u64, start )
__field( u32, free ) __field( u32, requested )
__field( u32, reserved )
__field( u8, func ) __field( u8, func )
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev; __entry->dev = rs->rs_rgd->rd_sbd->sd_vfs->s_dev;
__entry->rd_addr = rs->rs_rbm.rgd->rd_addr; __entry->rd_addr = rs->rs_rgd->rd_addr;
__entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone; __entry->rd_free_clone = rs->rs_rgd->rd_free_clone;
__entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved; __entry->rd_requested = rs->rs_rgd->rd_requested;
__entry->rd_reserved = rs->rs_rgd->rd_reserved;
__entry->inum = container_of(rs, struct gfs2_inode, __entry->inum = container_of(rs, struct gfs2_inode,
i_res)->i_no_addr; i_res)->i_no_addr;
__entry->start = gfs2_rbm_to_block(&rs->rs_rbm); __entry->start = rs->rs_start;
__entry->free = rs->rs_free; __entry->requested = rs->rs_requested;
__entry->reserved = rs->rs_reserved;
__entry->func = func; __entry->func = func;
), ),
TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s f:%lu", TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%u rq:%u rr:%u %s q:%u r:%u",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->inum, (unsigned long long)__entry->inum,
(unsigned long long)__entry->start, (unsigned long long)__entry->start,
(unsigned long long)__entry->rd_addr, (unsigned long long)__entry->rd_addr,
(unsigned long)__entry->rd_free_clone, __entry->rd_free_clone,
(unsigned long)__entry->rd_reserved, __entry->rd_requested,
rs_func_name(__entry->func), (unsigned long)__entry->free) __entry->rd_reserved,
rs_func_name(__entry->func),
__entry->requested,
__entry->reserved)
); );
#endif /* _TRACE_GFS2_H */ #endif /* _TRACE_GFS2_H */
......
...@@ -31,17 +31,17 @@ static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr) ...@@ -31,17 +31,17 @@ static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n", fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
test_bit(TR_TOUCHED, &tr->tr_flags)); test_bit(TR_TOUCHED, &tr->tr_flags));
fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n", fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
tr->tr_num_buf_new, tr->tr_num_buf_rm, tr->tr_num_buf_new, tr->tr_num_buf_rm,
tr->tr_num_databuf_new, tr->tr_num_databuf_rm, tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
tr->tr_num_revoke, tr->tr_num_revoke_rm); tr->tr_num_revoke);
} }
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
unsigned int revokes) unsigned int blocks, unsigned int revokes,
unsigned long ip)
{ {
struct gfs2_trans *tr; unsigned int extra_revokes;
int error;
if (current->journal_info) { if (current->journal_info) {
gfs2_print_trans(sdp, current->journal_info); gfs2_print_trans(sdp, current->journal_info);
...@@ -52,39 +52,72 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, ...@@ -52,39 +52,72 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
return -EROFS; return -EROFS;
tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS); tr->tr_ip = ip;
if (!tr)
return -ENOMEM;
tr->tr_ip = _RET_IP_;
tr->tr_blocks = blocks; tr->tr_blocks = blocks;
tr->tr_revokes = revokes; tr->tr_revokes = revokes;
tr->tr_reserved = 1; tr->tr_reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
set_bit(TR_ALLOCED, &tr->tr_flags); if (blocks) {
if (blocks) /*
tr->tr_reserved += 6 + blocks; * The reserved blocks are either used for data or metadata.
if (revokes) * We can have mixed data and metadata, each with its own log
tr->tr_reserved += gfs2_struct2blk(sdp, revokes); * descriptor block; see calc_reserved().
*/
tr->tr_reserved += blocks + 1 + DIV_ROUND_UP(blocks - 1, databuf_limit(sdp));
}
INIT_LIST_HEAD(&tr->tr_databuf); INIT_LIST_HEAD(&tr->tr_databuf);
INIT_LIST_HEAD(&tr->tr_buf); INIT_LIST_HEAD(&tr->tr_buf);
INIT_LIST_HEAD(&tr->tr_list); INIT_LIST_HEAD(&tr->tr_list);
INIT_LIST_HEAD(&tr->tr_ail1_list); INIT_LIST_HEAD(&tr->tr_ail1_list);
INIT_LIST_HEAD(&tr->tr_ail2_list); INIT_LIST_HEAD(&tr->tr_ail2_list);
if (gfs2_assert_warn(sdp, tr->tr_reserved <= sdp->sd_jdesc->jd_blocks))
return -EINVAL;
sb_start_intwrite(sdp->sd_vfs); sb_start_intwrite(sdp->sd_vfs);
error = gfs2_log_reserve(sdp, tr->tr_reserved); /*
if (error) * Try the reservations under sd_log_flush_lock to prevent log flushes
goto fail; * from creating inconsistencies between the number of allocated and
* reserved revokes. If that fails, do a full-block allocation outside
* of the lock to avoid stalling log flushes. Then, allot the
* appropriate number of blocks to revokes, use as many revokes locally
* as needed, and "release" the surplus into the revokes pool.
*/
down_read(&sdp->sd_log_flush_lock);
if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
goto reserved;
up_read(&sdp->sd_log_flush_lock);
gfs2_log_reserve(sdp, tr, &extra_revokes);
down_read(&sdp->sd_log_flush_lock);
reserved:
gfs2_log_release_revokes(sdp, extra_revokes);
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
gfs2_log_release_revokes(sdp, tr->tr_revokes);
up_read(&sdp->sd_log_flush_lock);
gfs2_log_release(sdp, tr->tr_reserved);
sb_end_intwrite(sdp->sd_vfs);
return -EROFS;
}
current->journal_info = tr; current->journal_info = tr;
return 0; return 0;
}
fail: int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
sb_end_intwrite(sdp->sd_vfs); unsigned int revokes)
kmem_cache_free(gfs2_trans_cachep, tr); {
struct gfs2_trans *tr;
int error;
tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
if (!tr)
return -ENOMEM;
error = __gfs2_trans_begin(tr, sdp, blocks, revokes, _RET_IP_);
if (error)
kmem_cache_free(gfs2_trans_cachep, tr);
return error; return error;
} }
...@@ -92,37 +125,39 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) ...@@ -92,37 +125,39 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
{ {
struct gfs2_trans *tr = current->journal_info; struct gfs2_trans *tr = current->journal_info;
s64 nbuf; s64 nbuf;
int alloced = test_bit(TR_ALLOCED, &tr->tr_flags);
current->journal_info = NULL; current->journal_info = NULL;
if (!test_bit(TR_TOUCHED, &tr->tr_flags)) { if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
gfs2_log_release_revokes(sdp, tr->tr_revokes);
up_read(&sdp->sd_log_flush_lock);
gfs2_log_release(sdp, tr->tr_reserved); gfs2_log_release(sdp, tr->tr_reserved);
if (alloced) { if (!test_bit(TR_ONSTACK, &tr->tr_flags))
gfs2_trans_free(sdp, tr); gfs2_trans_free(sdp, tr);
sb_end_intwrite(sdp->sd_vfs); sb_end_intwrite(sdp->sd_vfs);
}
return; return;
} }
gfs2_log_release_revokes(sdp, tr->tr_revokes - tr->tr_num_revoke);
nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new; nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
nbuf -= tr->tr_num_buf_rm; nbuf -= tr->tr_num_buf_rm;
nbuf -= tr->tr_num_databuf_rm; nbuf -= tr->tr_num_databuf_rm;
if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) && if (gfs2_assert_withdraw(sdp, nbuf <= tr->tr_blocks) ||
(tr->tr_num_revoke <= tr->tr_revokes))) gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes))
gfs2_print_trans(sdp, tr); gfs2_print_trans(sdp, tr);
gfs2_log_commit(sdp, tr); gfs2_log_commit(sdp, tr);
if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags)) if (!test_bit(TR_ONSTACK, &tr->tr_flags) &&
!test_bit(TR_ATTACHED, &tr->tr_flags))
gfs2_trans_free(sdp, tr); gfs2_trans_free(sdp, tr);
up_read(&sdp->sd_log_flush_lock); up_read(&sdp->sd_log_flush_lock);
if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS) if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_TRANS_END); GFS2_LFC_TRANS_END);
if (alloced) sb_end_intwrite(sdp->sd_vfs);
sb_end_intwrite(sdp->sd_vfs);
} }
static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
...@@ -262,7 +297,6 @@ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) ...@@ -262,7 +297,6 @@ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
{ {
struct gfs2_bufdata *bd, *tmp; struct gfs2_bufdata *bd, *tmp;
struct gfs2_trans *tr = current->journal_info;
unsigned int n = len; unsigned int n = len;
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
...@@ -274,7 +308,7 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) ...@@ -274,7 +308,7 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
if (bd->bd_gl) if (bd->bd_gl)
gfs2_glock_remove_revoke(bd->bd_gl); gfs2_glock_remove_revoke(bd->bd_gl);
kmem_cache_free(gfs2_bufdata_cachep, bd); kmem_cache_free(gfs2_bufdata_cachep, bd);
tr->tr_num_revoke_rm++; gfs2_log_release_revokes(sdp, 1);
if (--n == 0) if (--n == 0)
break; break;
} }
......
...@@ -27,13 +27,16 @@ struct gfs2_glock; ...@@ -27,13 +27,16 @@ struct gfs2_glock;
* block, or all of the blocks in the rg, whichever is smaller */ * block, or all of the blocks in the rg, whichever is smaller */
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested) static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested)
{ {
struct gfs2_rgrpd *rgd = ip->i_res.rs_rbm.rgd; struct gfs2_rgrpd *rgd = ip->i_res.rs_rgd;
if (requested < rgd->rd_length) if (requested < rgd->rd_length)
return requested + 1; return requested + 1;
return rgd->rd_length; return rgd->rd_length;
} }
extern int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
unsigned int blocks, unsigned int revokes,
unsigned long ip);
extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
unsigned int revokes); unsigned int revokes);
......
...@@ -91,12 +91,39 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, ...@@ -91,12 +91,39 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
return error; return error;
} }
/**
* gfs2_freeze_lock - hold the freeze glock
* @sdp: the superblock
* @freeze_gh: pointer to the requested holder
* @caller_flags: any additional flags needed by the caller
*/
int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
int caller_flags)
{
int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags;
int error;
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
freeze_gh);
if (error && error != GLR_TRYFAILED)
fs_err(sdp, "can't lock the freeze lock: %d\n", error);
return error;
}
void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh)
{
if (gfs2_holder_initialized(freeze_gh))
gfs2_glock_dq_uninit(freeze_gh);
}
static void signal_our_withdraw(struct gfs2_sbd *sdp) static void signal_our_withdraw(struct gfs2_sbd *sdp)
{ {
struct gfs2_glock *gl = sdp->sd_live_gh.gh_gl; struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
struct inode *inode = sdp->sd_jdesc->jd_inode; struct inode *inode = sdp->sd_jdesc->jd_inode;
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_glock *i_gl = ip->i_gl;
u64 no_formal_ino = ip->i_no_formal_ino; u64 no_formal_ino = ip->i_no_formal_ino;
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
int ret = 0; int ret = 0;
int tries; int tries;
...@@ -117,8 +144,21 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) ...@@ -117,8 +144,21 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
* therefore we need to clear SDF_JOURNAL_LIVE manually. * therefore we need to clear SDF_JOURNAL_LIVE manually.
*/ */
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
if (!sb_rdonly(sdp->sd_vfs)) if (!sb_rdonly(sdp->sd_vfs)) {
ret = gfs2_make_fs_ro(sdp); struct gfs2_holder freeze_gh;
gfs2_holder_mark_uninitialized(&freeze_gh);
if (sdp->sd_freeze_gl &&
!gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
ret = gfs2_freeze_lock(sdp, &freeze_gh,
log_write_allowed ? 0 : LM_FLAG_TRY);
if (ret == GLR_TRYFAILED)
ret = 0;
}
if (!ret)
ret = gfs2_make_fs_ro(sdp);
gfs2_freeze_unlock(&freeze_gh);
}
if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */ if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
if (!ret) if (!ret)
...@@ -141,7 +181,8 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) ...@@ -141,7 +181,8 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
thaw_super(sdp->sd_vfs); thaw_super(sdp->sd_vfs);
} else { } else {
wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE,
TASK_UNINTERRUPTIBLE);
} }
/* /*
...@@ -161,15 +202,15 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) ...@@ -161,15 +202,15 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
* on other nodes to be successful, otherwise we remain the owner of * on other nodes to be successful, otherwise we remain the owner of
* the glock as far as dlm is concerned. * the glock as far as dlm is concerned.
*/ */
if (gl->gl_ops->go_free) { if (i_gl->gl_ops->go_free) {
set_bit(GLF_FREEING, &gl->gl_flags); set_bit(GLF_FREEING, &i_gl->gl_flags);
wait_on_bit(&gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE); wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
} }
/* /*
* Dequeue the "live" glock, but keep a reference so it's never freed. * Dequeue the "live" glock, but keep a reference so it's never freed.
*/ */
gfs2_glock_hold(gl); gfs2_glock_hold(live_gl);
gfs2_glock_dq_wait(&sdp->sd_live_gh); gfs2_glock_dq_wait(&sdp->sd_live_gh);
/* /*
* We enqueue the "live" glock in EX so that all other nodes * We enqueue the "live" glock in EX so that all other nodes
...@@ -208,7 +249,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) ...@@ -208,7 +249,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
gfs2_glock_nq(&sdp->sd_live_gh); gfs2_glock_nq(&sdp->sd_live_gh);
} }
gfs2_glock_queue_put(gl); /* drop the extra reference we acquired */ gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
/* /*
......
...@@ -149,6 +149,9 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, ...@@ -149,6 +149,9 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
bool verbose); bool verbose);
extern int gfs2_freeze_lock(struct gfs2_sbd *sdp,
struct gfs2_holder *freeze_gh, int caller_flags);
extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
#define gfs2_io_error(sdp) \ #define gfs2_io_error(sdp) \
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__) gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
......
...@@ -70,6 +70,20 @@ static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize) ...@@ -70,6 +70,20 @@ static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
return 0; return 0;
} }
static bool gfs2_eatype_valid(struct gfs2_sbd *sdp, u8 type)
{
switch(sdp->sd_sb.sb_fs_format) {
case GFS2_FS_FORMAT_MAX:
return true;
case GFS2_FS_FORMAT_MIN:
return type <= GFS2_EATYPE_SECURITY;
default:
return false;
}
}
typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh, typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct gfs2_ea_header *ea,
struct gfs2_ea_header *prev, void *private); struct gfs2_ea_header *prev, void *private);
...@@ -77,6 +91,7 @@ typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -77,6 +91,7 @@ typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh, static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
ea_call_t ea_call, void *data) ea_call_t ea_call, void *data)
{ {
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_ea_header *ea, *prev = NULL; struct gfs2_ea_header *ea, *prev = NULL;
int error = 0; int error = 0;
...@@ -89,9 +104,8 @@ static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -89,9 +104,8 @@ static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <= if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
bh->b_data + bh->b_size)) bh->b_data + bh->b_size))
goto fail; goto fail;
if (!GFS2_EATYPE_VALID(ea->ea_type)) if (!gfs2_eatype_valid(sdp, ea->ea_type))
goto fail; goto fail;
error = ea_call(ip, bh, ea, prev, data); error = ea_call(ip, bh, ea, prev, data);
if (error) if (error)
return error; return error;
...@@ -259,7 +273,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -259,7 +273,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
return -EIO; return -EIO;
} }
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh); error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
LM_FLAG_NODE_SCOPE, &rg_gh);
if (error) if (error)
return error; return error;
...@@ -344,6 +359,7 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -344,6 +359,7 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
void *private) void *private)
{ {
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct ea_list *ei = private; struct ea_list *ei = private;
struct gfs2_ea_request *er = ei->ei_er; struct gfs2_ea_request *er = ei->ei_er;
unsigned int ea_size; unsigned int ea_size;
...@@ -353,6 +369,8 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -353,6 +369,8 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
if (ea->ea_type == GFS2_EATYPE_UNUSED) if (ea->ea_type == GFS2_EATYPE_UNUSED)
return 0; return 0;
BUG_ON(ea->ea_type > GFS2_EATYPE_SECURITY &&
sdp->sd_sb.sb_fs_format == GFS2_FS_FORMAT_MIN);
switch (ea->ea_type) { switch (ea->ea_type) {
case GFS2_EATYPE_USR: case GFS2_EATYPE_USR:
prefix = "user."; prefix = "user.";
...@@ -366,8 +384,12 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -366,8 +384,12 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
prefix = "security."; prefix = "security.";
l = 9; l = 9;
break; break;
case GFS2_EATYPE_TRUSTED:
prefix = "trusted.";
l = 8;
break;
default: default:
BUG(); return 0;
} }
ea_size = l + ea->ea_name_len + 1; ea_size = l + ea->ea_name_len + 1;
...@@ -1386,7 +1408,8 @@ static int ea_dealloc_block(struct gfs2_inode *ip) ...@@ -1386,7 +1408,8 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
return -EIO; return -EIO;
} }
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
LM_FLAG_NODE_SCOPE, &gh);
if (error) if (error)
return error; return error;
...@@ -1464,7 +1487,25 @@ static const struct xattr_handler gfs2_xattr_security_handler = { ...@@ -1464,7 +1487,25 @@ static const struct xattr_handler gfs2_xattr_security_handler = {
.set = gfs2_xattr_set, .set = gfs2_xattr_set,
}; };
const struct xattr_handler *gfs2_xattr_handlers[] = { static bool
gfs2_xattr_trusted_list(struct dentry *dentry)
{
return capable(CAP_SYS_ADMIN);
}
static const struct xattr_handler gfs2_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.flags = GFS2_EATYPE_TRUSTED,
.list = gfs2_xattr_trusted_list,
.get = gfs2_xattr_get,
.set = gfs2_xattr_set,
};
const struct xattr_handler *gfs2_xattr_handlers_max[] = {
/* GFS2_FS_FORMAT_MAX */
&gfs2_xattr_trusted_handler,
/* GFS2_FS_FORMAT_MIN */
&gfs2_xattr_user_handler, &gfs2_xattr_user_handler,
&gfs2_xattr_security_handler, &gfs2_xattr_security_handler,
&posix_acl_access_xattr_handler, &posix_acl_access_xattr_handler,
...@@ -1472,3 +1513,4 @@ const struct xattr_handler *gfs2_xattr_handlers[] = { ...@@ -1472,3 +1513,4 @@ const struct xattr_handler *gfs2_xattr_handlers[] = {
NULL, NULL,
}; };
const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#define GFS2_FORMAT_DE 1200 #define GFS2_FORMAT_DE 1200
#define GFS2_FORMAT_QU 1500 #define GFS2_FORMAT_QU 1500
/* These are part of the superblock */ /* These are part of the superblock */
#define GFS2_FORMAT_FS 1801 #define GFS2_FORMAT_FS 1802
#define GFS2_FORMAT_MULTI 1900 #define GFS2_FORMAT_MULTI 1900
/* /*
...@@ -389,8 +389,9 @@ struct gfs2_leaf { ...@@ -389,8 +389,9 @@ struct gfs2_leaf {
#define GFS2_EATYPE_USR 1 #define GFS2_EATYPE_USR 1
#define GFS2_EATYPE_SYS 2 #define GFS2_EATYPE_SYS 2
#define GFS2_EATYPE_SECURITY 3 #define GFS2_EATYPE_SECURITY 3
#define GFS2_EATYPE_TRUSTED 4
#define GFS2_EATYPE_LAST 3 #define GFS2_EATYPE_LAST 4
#define GFS2_EATYPE_VALID(x) ((x) <= GFS2_EATYPE_LAST) #define GFS2_EATYPE_VALID(x) ((x) <= GFS2_EATYPE_LAST)
#define GFS2_EAFLAG_LAST 0x01 /* last ea in block */ #define GFS2_EAFLAG_LAST 0x01 /* last ea in block */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment