Commit 11d8b79e authored by Kees Cook's avatar Kees Cook Committed by Andreas Gruenbacher

gfs2: Use container_of() for gfs2_glock(aspace)

Clang's structure layout randomization feature gets upset when it sees
struct address_space (which is randomized) cast to struct gfs2_glock.
This is due to seeing the mapping pointer as being treated as an array
of gfs2_glock, rather than "something else, before struct address_space":

In file included from fs/gfs2/acl.c:23:
fs/gfs2/meta_io.h:44:12: error: casting from randomized structure pointer type 'struct address_space *' to 'struct gfs2_glock *'
	return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd;
		^

Replace the instances of open-coded pointer math with container_of()
usage, and update the allocator to match.

Some cleanups and conversion of gfs2_glock_get() and
gfs2_glock_dealloc() by Andreas.
Reported-by: default avatarkernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/lkml/202205041550.naKxwCBj-lkp@intel.com
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Bill Wendling <morbo@google.com>
Cc: cluster-devel@redhat.com
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
parent 53bb540f
...@@ -127,9 +127,11 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu) ...@@ -127,9 +127,11 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
kfree(gl->gl_lksb.sb_lvbptr); kfree(gl->gl_lksb.sb_lvbptr);
if (gl->gl_ops->go_flags & GLOF_ASPACE) if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl); struct gfs2_glock_aspace *gla =
else container_of(gl, struct gfs2_glock_aspace, glock);
kmem_cache_free(gfs2_glock_aspace_cachep, gla);
} else
kmem_cache_free(gfs2_glock_cachep, gl); kmem_cache_free(gfs2_glock_cachep, gl);
} }
...@@ -1159,7 +1161,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -1159,7 +1161,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
.ln_sbd = sdp }; .ln_sbd = sdp };
struct gfs2_glock *gl, *tmp; struct gfs2_glock *gl, *tmp;
struct address_space *mapping; struct address_space *mapping;
struct kmem_cache *cachep;
int ret = 0; int ret = 0;
gl = find_insert_glock(&name, NULL); gl = find_insert_glock(&name, NULL);
...@@ -1170,20 +1171,24 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -1170,20 +1171,24 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!create) if (!create)
return -ENOENT; return -ENOENT;
if (glops->go_flags & GLOF_ASPACE) if (glops->go_flags & GLOF_ASPACE) {
cachep = gfs2_glock_aspace_cachep; struct gfs2_glock_aspace *gla =
else kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS);
cachep = gfs2_glock_cachep; if (!gla)
gl = kmem_cache_alloc(cachep, GFP_NOFS); return -ENOMEM;
gl = &gla->glock;
} else {
gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS);
if (!gl) if (!gl)
return -ENOMEM; return -ENOMEM;
}
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
gl->gl_ops = glops;
if (glops->go_flags & GLOF_LVB) { if (glops->go_flags & GLOF_LVB) {
gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
if (!gl->gl_lksb.sb_lvbptr) { if (!gl->gl_lksb.sb_lvbptr) {
kmem_cache_free(cachep, gl); gfs2_glock_dealloc(&gl->gl_rcu);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -1197,7 +1202,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -1197,7 +1202,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_state = LM_ST_UNLOCKED; gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED; gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE; gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_ops = glops;
gl->gl_dstamp = 0; gl->gl_dstamp = 0;
preempt_disable(); preempt_disable();
/* We use the global stats to estimate the initial per-glock stats */ /* We use the global stats to estimate the initial per-glock stats */
...@@ -1234,8 +1238,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -1234,8 +1238,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
*glp = tmp; *glp = tmp;
out_free: out_free:
kfree(gl->gl_lksb.sb_lvbptr); gfs2_glock_dealloc(&gl->gl_rcu);
kmem_cache_free(cachep, gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal)) if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait); wake_up(&sdp->sd_glock_wait);
......
...@@ -138,6 +138,11 @@ struct lm_lockops { ...@@ -138,6 +138,11 @@ struct lm_lockops {
const match_table_t *lm_tokens; const match_table_t *lm_tokens;
}; };
struct gfs2_glock_aspace {
struct gfs2_glock glock;
struct address_space mapping;
};
extern struct workqueue_struct *gfs2_delete_workqueue; extern struct workqueue_struct *gfs2_delete_workqueue;
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{ {
...@@ -179,8 +184,11 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) ...@@ -179,8 +184,11 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
{ {
if (gl->gl_ops->go_flags & GLOF_ASPACE) if (gl->gl_ops->go_flags & GLOF_ASPACE) {
return (struct address_space *)(gl + 1); struct gfs2_glock_aspace *gla =
container_of(gl, struct gfs2_glock_aspace, glock);
return &gla->mapping;
}
return NULL; return NULL;
} }
......
...@@ -62,11 +62,10 @@ static void gfs2_init_glock_once(void *foo) ...@@ -62,11 +62,10 @@ static void gfs2_init_glock_once(void *foo)
static void gfs2_init_gl_aspace_once(void *foo) static void gfs2_init_gl_aspace_once(void *foo)
{ {
struct gfs2_glock *gl = foo; struct gfs2_glock_aspace *gla = foo;
struct address_space *mapping = (struct address_space *)(gl + 1);
gfs2_init_glock_once(gl); gfs2_init_glock_once(&gla->glock);
address_space_init_once(mapping); address_space_init_once(&gla->mapping);
} }
/** /**
...@@ -104,8 +103,7 @@ static int __init init_gfs2_fs(void) ...@@ -104,8 +103,7 @@ static int __init init_gfs2_fs(void)
goto fail_cachep1; goto fail_cachep1;
gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)", gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
sizeof(struct gfs2_glock) + sizeof(struct gfs2_glock_aspace),
sizeof(struct address_space),
0, 0, gfs2_init_gl_aspace_once); 0, 0, gfs2_init_gl_aspace_once);
if (!gfs2_glock_aspace_cachep) if (!gfs2_glock_aspace_cachep)
......
...@@ -40,9 +40,11 @@ extern const struct address_space_operations gfs2_rgrp_aops; ...@@ -40,9 +40,11 @@ extern const struct address_space_operations gfs2_rgrp_aops;
static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
if (mapping->a_ops == &gfs2_meta_aops) if (mapping->a_ops == &gfs2_meta_aops) {
return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd; struct gfs2_glock_aspace *gla =
else if (mapping->a_ops == &gfs2_rgrp_aops) container_of(mapping, struct gfs2_glock_aspace, mapping);
return gla->glock.gl_name.ln_sbd;
} else if (mapping->a_ops == &gfs2_rgrp_aops)
return container_of(mapping, struct gfs2_sbd, sd_aspace); return container_of(mapping, struct gfs2_sbd, sd_aspace);
else else
return inode->i_sb->s_fs_info; return inode->i_sb->s_fs_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment