Commit ea6d6fc7 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] mbcache: add gfp_mask parameter to free() callback,

Patch from Andreas Gruenbacher <agruen@suse.de>

Add a gfp_mask parameter to the free() callback so that the callback can
safely do I/O, etc. The free callback can now also fail.  This will be
needed by reiserfs.

The order of entries on the cache entry lru is reversed so that
list_for_each_safe() can be used. Several helper functions that don't
make the code any better are removed. Finally, a couple of cosmetic
things.
parent 0828e38f
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
* *
* There can only be one cache entry in a cache per device and block number. * There can only be one cache entry in a cache per device and block number.
* Additional indexes need not be unique in this sense. The number of * Additional indexes need not be unique in this sense. The number of
* additional indexes (=other criteria) can be hardwired (at compile time) * additional indexes (=other criteria) can be hardwired at compile time
* or specified at cache create time. * or specified at cache create time.
* *
* Each cache entry is of fixed size. An entry may be `valid' or `invalid' * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
...@@ -22,7 +22,8 @@ ...@@ -22,7 +22,8 @@
* *
* A valid cache entry is only in the lru list if no handles refer to it. * A valid cache entry is only in the lru list if no handles refer to it.
* Invalid cache entries will be freed when the last handle to the cache * Invalid cache entries will be freed when the last handle to the cache
* entry is released. * entry is released. Entries that cannot be freed immediately are put
* back on the lru list.
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -76,8 +77,8 @@ EXPORT_SYMBOL(mb_cache_entry_find_next); ...@@ -76,8 +77,8 @@ EXPORT_SYMBOL(mb_cache_entry_find_next);
/* /*
* Global data: list of all mbcache's, lru list, and a spinlock for * Global data: list of all mbcache's, lru list, and a spinlock for
* accessing cache data structures on SMP machines. (The lru list is * accessing cache data structures on SMP machines. The lru list is
* global across all mbcaches.) * global across all mbcaches.
*/ */
static LIST_HEAD(mb_cache_list); static LIST_HEAD(mb_cache_list);
...@@ -101,90 +102,43 @@ mb_cache_indexes(struct mb_cache *cache) ...@@ -101,90 +102,43 @@ mb_cache_indexes(struct mb_cache *cache)
static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask); static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask);
static inline void
__mb_cache_entry_takeout_lru(struct mb_cache_entry *ce)
{
if (!list_empty(&ce->e_lru_list))
list_del_init(&ce->e_lru_list);
}
static inline void
__mb_cache_entry_into_lru(struct mb_cache_entry *ce)
{
list_add(&ce->e_lru_list, &mb_cache_lru_list);
}
static inline int static inline int
__mb_cache_entry_in_lru(struct mb_cache_entry *ce) __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
{
return (!list_empty(&ce->e_lru_list));
}
/*
* Insert the cache entry into all hashes.
*/
static inline void
__mb_cache_entry_link(struct mb_cache_entry *ce)
{ {
struct mb_cache *cache = ce->e_cache; return !list_empty(&ce->e_block_list);
unsigned int bucket;
int n;
bucket = hash_long((unsigned long)ce->e_bdev +
(ce->e_block & 0xffffff), cache->c_bucket_bits);
list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
for (n=0; n<mb_cache_indexes(cache); n++) {
bucket = hash_long(ce->e_indexes[n].o_key,
cache->c_bucket_bits);
list_add(&ce->e_indexes[n].o_list,
&cache->c_indexes_hash[n][bucket]);
}
} }
/*
* Remove the cache entry from all hashes.
*/
static inline void static inline void
__mb_cache_entry_unlink(struct mb_cache_entry *ce) __mb_cache_entry_unhash(struct mb_cache_entry *ce)
{ {
int n; int n;
list_del_init(&ce->e_block_list); if (__mb_cache_entry_is_hashed(ce)) {
for (n = 0; n < mb_cache_indexes(ce->e_cache); n++) list_del_init(&ce->e_block_list);
list_del(&ce->e_indexes[n].o_list); for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
} list_del(&ce->e_indexes[n].o_list);
}
static inline int
__mb_cache_entry_is_linked(struct mb_cache_entry *ce)
{
return (!list_empty(&ce->e_block_list));
}
static inline struct mb_cache_entry *
__mb_cache_entry_read(struct mb_cache_entry *ce)
{
__mb_cache_entry_takeout_lru(ce);
atomic_inc(&ce->e_used);
return ce;
} }
static inline void static inline void
__mb_cache_entry_forget(struct mb_cache_entry *ce) __mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
{ {
struct mb_cache *cache = ce->e_cache; struct mb_cache *cache = ce->e_cache;
mb_assert(atomic_read(&ce->e_used) == 0); mb_assert(atomic_read(&ce->e_used) == 0);
atomic_dec(&cache->c_entry_count); if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
if (cache->c_op.free) /* free failed -- put back on the lru list
cache->c_op.free(ce); for freeing later. */
kmem_cache_free(cache->c_entry_cache, ce); spin_lock(&mb_cache_spinlock);
list_add(&ce->e_lru_list, &mb_cache_lru_list);
spin_unlock(&mb_cache_spinlock);
} else {
kmem_cache_free(cache->c_entry_cache, ce);
atomic_dec(&cache->c_entry_count);
}
} }
...@@ -192,15 +146,15 @@ static inline void ...@@ -192,15 +146,15 @@ static inline void
__mb_cache_entry_release_unlock(struct mb_cache_entry *ce) __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
{ {
if (atomic_dec_and_test(&ce->e_used)) { if (atomic_dec_and_test(&ce->e_used)) {
if (!__mb_cache_entry_is_linked(ce)) if (!__mb_cache_entry_is_hashed(ce))
goto forget; goto forget;
__mb_cache_entry_into_lru(ce); list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
} }
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
return; return;
forget: forget:
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
__mb_cache_entry_forget(ce); __mb_cache_entry_forget(ce, GFP_KERNEL);
} }
...@@ -219,11 +173,11 @@ static int ...@@ -219,11 +173,11 @@ static int
mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask) mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask)
{ {
LIST_HEAD(free_list); LIST_HEAD(free_list);
struct list_head *l; struct list_head *l, *ltmp;
int count = 0; int count = 0;
spin_lock(&mb_cache_spinlock); spin_lock(&mb_cache_spinlock);
list_for_each_prev(l, &mb_cache_list) { list_for_each(l, &mb_cache_list) {
struct mb_cache *cache = struct mb_cache *cache =
list_entry(l, struct mb_cache, c_cache_list); list_entry(l, struct mb_cache, c_cache_list);
mb_debug("cache %s (%d)", cache->c_name, mb_debug("cache %s (%d)", cache->c_name,
...@@ -235,26 +189,19 @@ mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask) ...@@ -235,26 +189,19 @@ mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask)
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
goto out; goto out;
} }
while (nr_to_scan && !list_empty(&mb_cache_lru_list)) { while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
struct mb_cache_entry *ce = struct mb_cache_entry *ce =
list_entry(mb_cache_lru_list.prev, list_entry(mb_cache_lru_list.next,
struct mb_cache_entry, e_lru_list); struct mb_cache_entry, e_lru_list);
list_move(&ce->e_lru_list, &free_list); list_move_tail(&ce->e_lru_list, &free_list);
if (__mb_cache_entry_is_linked(ce)) __mb_cache_entry_unhash(ce);
__mb_cache_entry_unlink(ce);
nr_to_scan--;
} }
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
l = free_list.prev; list_for_each_safe(l, ltmp, &free_list) {
while (l != &free_list) { __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
struct mb_cache_entry *ce = list_entry(l, e_lru_list), gfp_mask);
struct mb_cache_entry, e_lru_list);
l = l->prev;
__mb_cache_entry_forget(ce);
count--;
} }
out: out:
mb_debug("%d remaining entries ", count);
return count; return count;
} }
...@@ -292,10 +239,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op, ...@@ -292,10 +239,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
if (!cache) if (!cache)
goto fail; goto fail;
cache->c_name = name; cache->c_name = name;
cache->c_op.free = NULL;
if (cache_op) if (cache_op)
cache->c_op.free = cache_op->free; cache->c_op.free = cache_op->free;
else
cache->c_op.free = NULL;
atomic_set(&cache->c_entry_count, 0); atomic_set(&cache->c_entry_count, 0);
cache->c_bucket_bits = bucket_bits; cache->c_bucket_bits = bucket_bits;
#ifdef MB_CACHE_INDEXES_COUNT #ifdef MB_CACHE_INDEXES_COUNT
...@@ -354,27 +300,21 @@ void ...@@ -354,27 +300,21 @@ void
mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev) mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev)
{ {
LIST_HEAD(free_list); LIST_HEAD(free_list);
struct list_head *l; struct list_head *l, *ltmp;
spin_lock(&mb_cache_spinlock); spin_lock(&mb_cache_spinlock);
l = mb_cache_lru_list.prev; list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
while (l != &mb_cache_lru_list) {
struct mb_cache_entry *ce = struct mb_cache_entry *ce =
list_entry(l, struct mb_cache_entry, e_lru_list); list_entry(l, struct mb_cache_entry, e_lru_list);
l = l->prev;
if (ce->e_bdev == bdev) { if (ce->e_bdev == bdev) {
list_move(&ce->e_lru_list, &free_list); list_move_tail(&ce->e_lru_list, &free_list);
if (__mb_cache_entry_is_linked(ce)) __mb_cache_entry_unhash(ce);
__mb_cache_entry_unlink(ce);
} }
} }
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
l = free_list.prev; list_for_each_safe(l, ltmp, &free_list) {
while (l != &free_list) { __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
struct mb_cache_entry *ce = e_lru_list), GFP_KERNEL);
list_entry(l, struct mb_cache_entry, e_lru_list);
l = l->prev;
__mb_cache_entry_forget(ce);
} }
} }
...@@ -390,30 +330,24 @@ void ...@@ -390,30 +330,24 @@ void
mb_cache_destroy(struct mb_cache *cache) mb_cache_destroy(struct mb_cache *cache)
{ {
LIST_HEAD(free_list); LIST_HEAD(free_list);
struct list_head *l; struct list_head *l, *ltmp;
int n; int n;
spin_lock(&mb_cache_spinlock); spin_lock(&mb_cache_spinlock);
l = mb_cache_lru_list.prev; list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
while (l != &mb_cache_lru_list) {
struct mb_cache_entry *ce = struct mb_cache_entry *ce =
list_entry(l, struct mb_cache_entry, e_lru_list); list_entry(l, struct mb_cache_entry, e_lru_list);
l = l->prev;
if (ce->e_cache == cache) { if (ce->e_cache == cache) {
list_move(&ce->e_lru_list, &free_list); list_move_tail(&ce->e_lru_list, &free_list);
if (__mb_cache_entry_is_linked(ce)) __mb_cache_entry_unhash(ce);
__mb_cache_entry_unlink(ce);
} }
} }
list_del(&cache->c_cache_list); list_del(&cache->c_cache_list);
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
l = free_list.prev; list_for_each_safe(l, ltmp, &free_list) {
while (l != &free_list) { __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
struct mb_cache_entry *ce = e_lru_list), GFP_KERNEL);
list_entry(l, struct mb_cache_entry, e_lru_list);
l = l->prev;
__mb_cache_entry_forget(ce);
} }
if (atomic_read(&cache->c_entry_count) > 0) { if (atomic_read(&cache->c_entry_count) > 0) {
...@@ -427,7 +361,6 @@ mb_cache_destroy(struct mb_cache *cache) ...@@ -427,7 +361,6 @@ mb_cache_destroy(struct mb_cache *cache)
for (n=0; n < mb_cache_indexes(cache); n++) for (n=0; n < mb_cache_indexes(cache); n++)
kfree(cache->c_indexes_hash[n]); kfree(cache->c_indexes_hash[n]);
kfree(cache->c_block_hash); kfree(cache->c_block_hash);
kfree(cache); kfree(cache);
} }
...@@ -481,8 +414,8 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, ...@@ -481,8 +414,8 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
struct list_head *l; struct list_head *l;
int error = -EBUSY, n; int error = -EBUSY, n;
bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
cache->c_bucket_bits); cache->c_bucket_bits);
spin_lock(&mb_cache_spinlock); spin_lock(&mb_cache_spinlock);
list_for_each_prev(l, &cache->c_block_hash[bucket]) { list_for_each_prev(l, &cache->c_block_hash[bucket]) {
struct mb_cache_entry *ce = struct mb_cache_entry *ce =
...@@ -490,12 +423,16 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, ...@@ -490,12 +423,16 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
if (ce->e_bdev == bdev && ce->e_block == block) if (ce->e_bdev == bdev && ce->e_block == block)
goto out; goto out;
} }
mb_assert(!__mb_cache_entry_is_linked(ce)); __mb_cache_entry_unhash(ce);
ce->e_bdev = bdev; ce->e_bdev = bdev;
ce->e_block = block; ce->e_block = block;
for (n=0; n<mb_cache_indexes(cache); n++) list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
for (n=0; n<mb_cache_indexes(cache); n++) {
ce->e_indexes[n].o_key = keys[n]; ce->e_indexes[n].o_key = keys[n];
__mb_cache_entry_link(ce); bucket = hash_long(keys[n], cache->c_bucket_bits);
list_add(&ce->e_indexes[n].o_list,
&cache->c_indexes_hash[n][bucket]);
}
out: out:
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
return error; return error;
...@@ -528,9 +465,8 @@ void ...@@ -528,9 +465,8 @@ void
mb_cache_entry_takeout(struct mb_cache_entry *ce) mb_cache_entry_takeout(struct mb_cache_entry *ce)
{ {
spin_lock(&mb_cache_spinlock); spin_lock(&mb_cache_spinlock);
mb_assert(!__mb_cache_entry_in_lru(ce)); mb_assert(list_empty(&ce->e_lru_list));
if (__mb_cache_entry_is_linked(ce)) __mb_cache_entry_unhash(ce);
__mb_cache_entry_unlink(ce);
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
} }
...@@ -545,9 +481,8 @@ void ...@@ -545,9 +481,8 @@ void
mb_cache_entry_free(struct mb_cache_entry *ce) mb_cache_entry_free(struct mb_cache_entry *ce)
{ {
spin_lock(&mb_cache_spinlock); spin_lock(&mb_cache_spinlock);
mb_assert(!__mb_cache_entry_in_lru(ce)); mb_assert(list_empty(&ce->e_lru_list));
if (__mb_cache_entry_is_linked(ce)) __mb_cache_entry_unhash(ce);
__mb_cache_entry_unlink(ce);
__mb_cache_entry_release_unlock(ce); __mb_cache_entry_release_unlock(ce);
} }
...@@ -587,7 +522,9 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, ...@@ -587,7 +522,9 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
list_for_each(l, &cache->c_block_hash[bucket]) { list_for_each(l, &cache->c_block_hash[bucket]) {
ce = list_entry(l, struct mb_cache_entry, e_block_list); ce = list_entry(l, struct mb_cache_entry, e_block_list);
if (ce->e_bdev == bdev && ce->e_block == block) { if (ce->e_bdev == bdev && ce->e_block == block) {
ce = __mb_cache_entry_read(ce); if (!list_empty(&ce->e_lru_list))
list_del_init(&ce->e_lru_list);
atomic_inc(&ce->e_used);
goto cleanup; goto cleanup;
} }
} }
...@@ -608,11 +545,11 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head, ...@@ -608,11 +545,11 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
struct mb_cache_entry *ce = struct mb_cache_entry *ce =
list_entry(l, struct mb_cache_entry, list_entry(l, struct mb_cache_entry,
e_indexes[index].o_list); e_indexes[index].o_list);
if (ce->e_bdev == bdev && if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
ce->e_indexes[index].o_key == key) { if (!list_empty(&ce->e_lru_list))
ce = __mb_cache_entry_read(ce); list_del_init(&ce->e_lru_list);
if (ce) atomic_inc(&ce->e_used);
return ce; return ce;
} }
l = l->next; l = l->next;
} }
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
struct mb_cache_entry; struct mb_cache_entry;
struct mb_cache_op { struct mb_cache_op {
void (*free)(struct mb_cache_entry *); int (*free)(struct mb_cache_entry *, int);
}; };
struct mb_cache { struct mb_cache {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment