Commit af1e76d6 authored by Mingming Cao's avatar Mingming Cao Committed by Theodore Ts'o

JBD2: jbd2 slab allocation cleanups

JBD2: Replace slab allocations with page allocations

JBD2 allocate memory for committed_data and frozen_data from slab. However
JBD2 should not pass slab pages down to the block layer. Use page allocator
pages instead. This will also prepare JBD for the large blocksize patchset.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarMingming Cao <cmm@us.ibm.com>
parent c089d490
...@@ -384,7 +384,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) ...@@ -384,7 +384,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
struct buffer_head *bh = jh2bh(jh); struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_state(bh); jbd_lock_bh_state(bh);
jbd2_slab_free(jh->b_committed_data, bh->b_size); jbd2_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL; jh->b_committed_data = NULL;
jbd_unlock_bh_state(bh); jbd_unlock_bh_state(bh);
} }
...@@ -801,14 +801,14 @@ void jbd2_journal_commit_transaction(journal_t *journal) ...@@ -801,14 +801,14 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* Otherwise, we can just throw away the frozen data now. * Otherwise, we can just throw away the frozen data now.
*/ */
if (jh->b_committed_data) { if (jh->b_committed_data) {
jbd2_slab_free(jh->b_committed_data, bh->b_size); jbd2_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL; jh->b_committed_data = NULL;
if (jh->b_frozen_data) { if (jh->b_frozen_data) {
jh->b_committed_data = jh->b_frozen_data; jh->b_committed_data = jh->b_frozen_data;
jh->b_frozen_data = NULL; jh->b_frozen_data = NULL;
} }
} else if (jh->b_frozen_data) { } else if (jh->b_frozen_data) {
jbd2_slab_free(jh->b_frozen_data, bh->b_size); jbd2_free(jh->b_frozen_data, bh->b_size);
jh->b_frozen_data = NULL; jh->b_frozen_data = NULL;
} }
......
...@@ -84,7 +84,6 @@ EXPORT_SYMBOL(jbd2_journal_force_commit); ...@@ -84,7 +84,6 @@ EXPORT_SYMBOL(jbd2_journal_force_commit);
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno); static void __journal_abort_soft (journal_t *journal, int errno);
static int jbd2_journal_create_jbd_slab(size_t slab_size);
/* /*
* Helper function used to manage commit timeouts * Helper function used to manage commit timeouts
...@@ -335,10 +334,10 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, ...@@ -335,10 +334,10 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
char *tmp; char *tmp;
jbd_unlock_bh_state(bh_in); jbd_unlock_bh_state(bh_in);
tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS); tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
jbd_lock_bh_state(bh_in); jbd_lock_bh_state(bh_in);
if (jh_in->b_frozen_data) { if (jh_in->b_frozen_data) {
jbd2_slab_free(tmp, bh_in->b_size); jbd2_free(tmp, bh_in->b_size);
goto repeat; goto repeat;
} }
...@@ -1096,13 +1095,6 @@ int jbd2_journal_load(journal_t *journal) ...@@ -1096,13 +1095,6 @@ int jbd2_journal_load(journal_t *journal)
} }
} }
/*
* Create a slab for this blocksize
*/
err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
if (err)
return err;
/* Let the recovery code check whether it needs to recover any /* Let the recovery code check whether it needs to recover any
* data from the journal. */ * data from the journal. */
if (jbd2_journal_recover(journal)) if (jbd2_journal_recover(journal))
...@@ -1635,77 +1627,6 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry) ...@@ -1635,77 +1627,6 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0)); return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
} }
/*
* jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
* and allocate frozen and commit buffers from these slabs.
*
* Reason for doing this is to avoid, SLAB_DEBUG - since it could
* cause bh to cross page boundary.
*/
#define JBD_MAX_SLABS 5
#define JBD_SLAB_INDEX(size) (size >> 11)
static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
static const char *jbd_slab_names[JBD_MAX_SLABS] = {
"jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
};
static void jbd2_journal_destroy_jbd_slabs(void)
{
int i;
for (i = 0; i < JBD_MAX_SLABS; i++) {
if (jbd_slab[i])
kmem_cache_destroy(jbd_slab[i]);
jbd_slab[i] = NULL;
}
}
static int jbd2_journal_create_jbd_slab(size_t slab_size)
{
int i = JBD_SLAB_INDEX(slab_size);
BUG_ON(i >= JBD_MAX_SLABS);
/*
* Check if we already have a slab created for this size
*/
if (jbd_slab[i])
return 0;
/*
* Create a slab and force alignment to be same as slabsize -
* this will make sure that allocations won't cross the page
* boundary.
*/
jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
slab_size, slab_size, 0, NULL);
if (!jbd_slab[i]) {
printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
return -ENOMEM;
}
return 0;
}
void * jbd2_slab_alloc(size_t size, gfp_t flags)
{
int idx;
idx = JBD_SLAB_INDEX(size);
BUG_ON(jbd_slab[idx] == NULL);
return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
}
void jbd2_slab_free(void *ptr, size_t size)
{
int idx;
idx = JBD_SLAB_INDEX(size);
BUG_ON(jbd_slab[idx] == NULL);
kmem_cache_free(jbd_slab[idx], ptr);
}
/* /*
* Journal_head storage management * Journal_head storage management
*/ */
...@@ -1893,13 +1814,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh) ...@@ -1893,13 +1814,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
printk(KERN_WARNING "%s: freeing " printk(KERN_WARNING "%s: freeing "
"b_frozen_data\n", "b_frozen_data\n",
__FUNCTION__); __FUNCTION__);
jbd2_slab_free(jh->b_frozen_data, bh->b_size); jbd2_free(jh->b_frozen_data, bh->b_size);
} }
if (jh->b_committed_data) { if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing " printk(KERN_WARNING "%s: freeing "
"b_committed_data\n", "b_committed_data\n",
__FUNCTION__); __FUNCTION__);
jbd2_slab_free(jh->b_committed_data, bh->b_size); jbd2_free(jh->b_committed_data, bh->b_size);
} }
bh->b_private = NULL; bh->b_private = NULL;
jh->b_bh = NULL; /* debug, really */ jh->b_bh = NULL; /* debug, really */
...@@ -2040,7 +1961,6 @@ static void jbd2_journal_destroy_caches(void) ...@@ -2040,7 +1961,6 @@ static void jbd2_journal_destroy_caches(void)
jbd2_journal_destroy_revoke_caches(); jbd2_journal_destroy_revoke_caches();
jbd2_journal_destroy_jbd2_journal_head_cache(); jbd2_journal_destroy_jbd2_journal_head_cache();
jbd2_journal_destroy_handle_cache(); jbd2_journal_destroy_handle_cache();
jbd2_journal_destroy_jbd_slabs();
} }
static int __init journal_init(void) static int __init journal_init(void)
......
...@@ -236,7 +236,7 @@ static int start_this_handle(journal_t *journal, handle_t *handle) ...@@ -236,7 +236,7 @@ static int start_this_handle(journal_t *journal, handle_t *handle)
/* Allocate a new handle. This should probably be in a slab... */ /* Allocate a new handle. This should probably be in a slab... */
static handle_t *new_handle(int nblocks) static handle_t *new_handle(int nblocks)
{ {
handle_t *handle = jbd_alloc_handle(GFP_NOFS); handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
if (!handle) if (!handle)
return NULL; return NULL;
memset(handle, 0, sizeof(*handle)); memset(handle, 0, sizeof(*handle));
...@@ -282,7 +282,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) ...@@ -282,7 +282,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
err = start_this_handle(journal, handle); err = start_this_handle(journal, handle);
if (err < 0) { if (err < 0) {
jbd_free_handle(handle); jbd2_free_handle(handle);
current->journal_info = NULL; current->journal_info = NULL;
handle = ERR_PTR(err); handle = ERR_PTR(err);
} }
...@@ -668,7 +668,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, ...@@ -668,7 +668,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
JBUFFER_TRACE(jh, "allocate memory for buffer"); JBUFFER_TRACE(jh, "allocate memory for buffer");
jbd_unlock_bh_state(bh); jbd_unlock_bh_state(bh);
frozen_buffer = frozen_buffer =
jbd2_slab_alloc(jh2bh(jh)->b_size, jbd2_alloc(jh2bh(jh)->b_size,
GFP_NOFS); GFP_NOFS);
if (!frozen_buffer) { if (!frozen_buffer) {
printk(KERN_EMERG printk(KERN_EMERG
...@@ -728,7 +728,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, ...@@ -728,7 +728,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
out: out:
if (unlikely(frozen_buffer)) /* It's usually NULL */ if (unlikely(frozen_buffer)) /* It's usually NULL */
jbd2_slab_free(frozen_buffer, bh->b_size); jbd2_free(frozen_buffer, bh->b_size);
JBUFFER_TRACE(jh, "exit"); JBUFFER_TRACE(jh, "exit");
return error; return error;
...@@ -881,7 +881,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) ...@@ -881,7 +881,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
repeat: repeat:
if (!jh->b_committed_data) { if (!jh->b_committed_data) {
committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) { if (!committed_data) {
printk(KERN_EMERG "%s: No memory for committed data\n", printk(KERN_EMERG "%s: No memory for committed data\n",
__FUNCTION__); __FUNCTION__);
...@@ -908,7 +908,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) ...@@ -908,7 +908,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
out: out:
jbd2_journal_put_journal_head(jh); jbd2_journal_put_journal_head(jh);
if (unlikely(committed_data)) if (unlikely(committed_data))
jbd2_slab_free(committed_data, bh->b_size); jbd2_free(committed_data, bh->b_size);
return err; return err;
} }
...@@ -1411,7 +1411,7 @@ int jbd2_journal_stop(handle_t *handle) ...@@ -1411,7 +1411,7 @@ int jbd2_journal_stop(handle_t *handle)
spin_unlock(&journal->j_state_lock); spin_unlock(&journal->j_state_lock);
} }
jbd_free_handle(handle); jbd2_free_handle(handle);
return err; return err;
} }
......
...@@ -72,14 +72,22 @@ extern u8 jbd2_journal_enable_debug; ...@@ -72,14 +72,22 @@ extern u8 jbd2_journal_enable_debug;
#endif #endif
extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry); extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
extern void * jbd2_slab_alloc(size_t size, gfp_t flags);
extern void jbd2_slab_free(void *ptr, size_t size);
#define jbd_kmalloc(size, flags) \ #define jbd_kmalloc(size, flags) \
__jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
#define jbd_rep_kmalloc(size, flags) \ #define jbd_rep_kmalloc(size, flags) \
__jbd2_kmalloc(__FUNCTION__, (size), (flags), 1) __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1)
static inline void *jbd2_alloc(size_t size, gfp_t flags)
{
return (void *)__get_free_pages(flags, get_order(size));
}
static inline void jbd2_free(void *ptr, size_t size)
{
free_pages((unsigned long)ptr, get_order(size));
};
#define JBD2_MIN_JOURNAL_BLOCKS 1024 #define JBD2_MIN_JOURNAL_BLOCKS 1024
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -959,12 +967,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh); ...@@ -959,12 +967,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
*/ */
extern struct kmem_cache *jbd2_handle_cache; extern struct kmem_cache *jbd2_handle_cache;
static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags)
{ {
return kmem_cache_alloc(jbd2_handle_cache, gfp_flags); return kmem_cache_alloc(jbd2_handle_cache, gfp_flags);
} }
static inline void jbd_free_handle(handle_t *handle) static inline void jbd2_free_handle(handle_t *handle)
{ {
kmem_cache_free(jbd2_handle_cache, handle); kmem_cache_free(jbd2_handle_cache, handle);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment