Commit cc330b55 authored by David S. Miller's avatar David S. Miller

Merge branch 'rhashtable-next'

Herbert Xu says:

====================
rhashtable: Multiple rehashing

This series introduces multiple rehashing.

Recall that the original implementation in br_multicast used
two list pointers per hash node and therefore is limited to at
most one rehash at a time since you need one list pointer for
the old table and one for the new table.

Thanks to Josh Triplett's suggestion of using a single list pointer
we're no longer limited by that.  So it is perfectly OK to have
an arbitrary number of tables in existence at any one time.

The reader and removal simply has to walk from the oldest table
to the newest table in order not to miss anything.  Insertion
without lookup are just as easy as we simply go to the last table
that we can find and add the entry there.

However, insertion with uniqueness lookup is more complicated
because we need to ensure that two simultaneous insertions of the
same key do not both succeed.  To achieve this, all insertions
including those without lookups are required to obtain the bucket
lock from the oldest hash table that is still alive.  This is
determined by having the rehasher (there is only one rehashing
thread in the system) keep a pointer of where it is up to.  If
a bucket has already been rehashed then it is dead, i.e., there
cannot be any more insertions to it, otherwise it is considered
alive.  This guarantees that the same key cannot be inserted
in two different tables in parallel.

Patch 1 is actually a bug fix for the walker.

Patch 2-5 eliminates unnecessary out-of-line copies of jhash.

Patch 6 makes rhashtable_shrink shrink to fit.

Patch 7 introduces multiple rehashing.  This means that if we
decide to grow then we will grow regardless of whether the previous
one has finished.  However, this is still asynchronous meaning
that if insertions come fast enough we may still end up with a
table that is overutilised.

Patch 8 adds support for GFP_ATOMIC allocations of struct bucket_table.

Finally patch 9 enables immediate rehashing.  This is done either
when the table reaches 100% utilisation, or when the chain length
exceeds 16 (the latter can be disabled on request, e.g., for
nft_hash.

With these patches the system should no longer have any trouble
dealing with fast insertions on a small table.  In the worst
case you end up with a list of tables that's log N in length
while the rehasher catches up.

v3 restores rhashtable_shrink and fixes a number of bugs in the
multiple rehashing patches (7 and 9).
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e167359b ccd57b1b
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h> #include <linux/list_nulls.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mutex.h> #include <linux/mutex.h>
...@@ -102,8 +103,9 @@ struct rhashtable; ...@@ -102,8 +103,9 @@ struct rhashtable;
* @max_size: Maximum size while expanding * @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking * @min_size: Minimum size while shrinking
* @nulls_base: Base value to generate nulls marker * @nulls_base: Base value to generate nulls marker
* @insecure_elasticity: Set to true to disable chain length checks
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Function to hash key * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object * @obj_hashfn: Function to hash object
* @obj_cmpfn: Function to compare key with object * @obj_cmpfn: Function to compare key with object
*/ */
...@@ -115,6 +117,7 @@ struct rhashtable_params { ...@@ -115,6 +117,7 @@ struct rhashtable_params {
unsigned int max_size; unsigned int max_size;
unsigned int min_size; unsigned int min_size;
u32 nulls_base; u32 nulls_base;
bool insecure_elasticity;
size_t locks_mul; size_t locks_mul;
rht_hashfn_t hashfn; rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn; rht_obj_hashfn_t obj_hashfn;
...@@ -125,6 +128,8 @@ struct rhashtable_params { ...@@ -125,6 +128,8 @@ struct rhashtable_params {
* struct rhashtable - Hash table handle * struct rhashtable - Hash table handle
* @tbl: Bucket table * @tbl: Bucket table
* @nelems: Number of elements in table * @nelems: Number of elements in table
* @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters * @p: Configuration parameters
* @run_work: Deferred worker to expand/shrink asynchronously * @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping * @mutex: Mutex to protect current/future table swapping
...@@ -134,6 +139,8 @@ struct rhashtable { ...@@ -134,6 +139,8 @@ struct rhashtable {
struct bucket_table __rcu *tbl; struct bucket_table __rcu *tbl;
atomic_t nelems; atomic_t nelems;
bool being_destroyed; bool being_destroyed;
unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p; struct rhashtable_params p;
struct work_struct run_work; struct work_struct run_work;
struct mutex mutex; struct mutex mutex;
...@@ -199,9 +206,31 @@ static inline unsigned int rht_key_hashfn( ...@@ -199,9 +206,31 @@ static inline unsigned int rht_key_hashfn(
struct rhashtable *ht, const struct bucket_table *tbl, struct rhashtable *ht, const struct bucket_table *tbl,
const void *key, const struct rhashtable_params params) const void *key, const struct rhashtable_params params)
{ {
return rht_bucket_index(tbl, params.hashfn(key, params.key_len ?: unsigned hash;
ht->p.key_len,
tbl->hash_rnd)); /* params must be equal to ht->p if it isn't constant. */
if (!__builtin_constant_p(params.key_len))
hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
else if (params.key_len) {
unsigned key_len = params.key_len;
if (params.hashfn)
hash = params.hashfn(key, key_len, tbl->hash_rnd);
else if (key_len & (sizeof(u32) - 1))
hash = jhash(key, key_len, tbl->hash_rnd);
else
hash = jhash2(key, key_len / sizeof(u32),
tbl->hash_rnd);
} else {
unsigned key_len = ht->p.key_len;
if (params.hashfn)
hash = params.hashfn(key, key_len, tbl->hash_rnd);
else
hash = jhash(key, key_len, tbl->hash_rnd);
}
return rht_bucket_index(tbl, hash);
} }
static inline unsigned int rht_head_hashfn( static inline unsigned int rht_head_hashfn(
...@@ -241,6 +270,17 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht, ...@@ -241,6 +270,17 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht,
tbl->size > ht->p.min_size; tbl->size > ht->p.min_size;
} }
/**
* rht_grow_above_100 - returns true if nelems > table-size
* @ht: hash table
* @tbl: current table
*/
static inline bool rht_grow_above_100(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
return atomic_read(&ht->nelems) > tbl->size;
}
/* The bucket lock is selected based on the hash and protects mutations /* The bucket lock is selected based on the hash and protects mutations
* on a group of hash buckets. * on a group of hash buckets.
* *
...@@ -282,9 +322,7 @@ int rhashtable_init(struct rhashtable *ht, ...@@ -282,9 +322,7 @@ int rhashtable_init(struct rhashtable *ht,
int rhashtable_insert_slow(struct rhashtable *ht, const void *key, int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj, struct rhash_head *obj,
struct bucket_table *old_tbl); struct bucket_table *old_tbl);
int rhashtable_insert_rehash(struct rhashtable *ht);
int rhashtable_expand(struct rhashtable *ht);
int rhashtable_shrink(struct rhashtable *ht);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter);
...@@ -507,43 +545,64 @@ static inline int __rhashtable_insert_fast( ...@@ -507,43 +545,64 @@ static inline int __rhashtable_insert_fast(
.ht = ht, .ht = ht,
.key = key, .key = key,
}; };
int err = -EEXIST;
struct bucket_table *tbl, *new_tbl; struct bucket_table *tbl, *new_tbl;
struct rhash_head *head; struct rhash_head *head;
spinlock_t *lock; spinlock_t *lock;
unsigned elasticity;
unsigned hash; unsigned hash;
int err;
restart:
rcu_read_lock(); rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht); tbl = rht_dereference_rcu(ht->tbl, ht);
/* All insertions must grab the oldest table containing
* the hashed bucket that is yet to be rehashed.
*/
for (;;) {
hash = rht_head_hashfn(ht, tbl, obj, params); hash = rht_head_hashfn(ht, tbl, obj, params);
lock = rht_bucket_lock(tbl, hash); lock = rht_bucket_lock(tbl, hash);
spin_lock_bh(lock); spin_lock_bh(lock);
/* Because we have already taken the bucket lock in tbl, if (tbl->rehash <= hash)
* if we find that future_tbl is not yet visible then break;
* that guarantees all other insertions of the same entry
* will also grab the bucket lock in tbl because until spin_unlock_bh(lock);
* the rehash completes ht->tbl won't be changed. tbl = rht_dereference_rcu(tbl->future_tbl, ht);
*/ }
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(new_tbl)) { if (unlikely(new_tbl)) {
err = rhashtable_insert_slow(ht, key, obj, new_tbl); err = rhashtable_insert_slow(ht, key, obj, new_tbl);
if (err == -EAGAIN)
goto slow_path;
goto out; goto out;
} }
if (!key) if (unlikely(rht_grow_above_100(ht, tbl))) {
goto skip_lookup; slow_path:
spin_unlock_bh(lock);
rcu_read_unlock();
err = rhashtable_insert_rehash(ht);
if (err)
return err;
goto restart;
}
err = -EEXIST;
elasticity = ht->elasticity;
rht_for_each(head, tbl, hash) { rht_for_each(head, tbl, hash) {
if (unlikely(!(params.obj_cmpfn ? if (key &&
unlikely(!(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) : params.obj_cmpfn(&arg, rht_obj(ht, head)) :
rhashtable_compare(&arg, rht_obj(ht, head))))) rhashtable_compare(&arg, rht_obj(ht, head)))))
goto out; goto out;
if (!--elasticity)
goto slow_path;
} }
skip_lookup:
err = 0; err = 0;
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
......
This diff is collapsed.
...@@ -155,30 +155,6 @@ static int __init test_rhashtable(struct rhashtable *ht) ...@@ -155,30 +155,6 @@ static int __init test_rhashtable(struct rhashtable *ht)
test_rht_lookup(ht); test_rht_lookup(ht);
rcu_read_unlock(); rcu_read_unlock();
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table expansion iteration %u...\n", i);
mutex_lock(&ht->mutex);
rhashtable_expand(ht);
mutex_unlock(&ht->mutex);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
test_rht_lookup(ht);
rcu_read_unlock();
}
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table shrinkage iteration %u...\n", i);
mutex_lock(&ht->mutex);
rhashtable_shrink(ht);
mutex_unlock(&ht->mutex);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
test_rht_lookup(ht);
rcu_read_unlock();
}
rcu_read_lock(); rcu_read_lock();
test_bucket_stats(ht, true); test_bucket_stats(ht, true);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -3133,13 +3133,12 @@ static inline u32 netlink_hash(const void *data, u32 seed) ...@@ -3133,13 +3133,12 @@ static inline u32 netlink_hash(const void *data, u32 seed)
struct netlink_compare_arg arg; struct netlink_compare_arg arg;
netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid); netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
return jhash(&arg, netlink_compare_arg_len, seed); return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
} }
static const struct rhashtable_params netlink_rhashtable_params = { static const struct rhashtable_params netlink_rhashtable_params = {
.head_offset = offsetof(struct netlink_sock, node), .head_offset = offsetof(struct netlink_sock, node),
.key_len = netlink_compare_arg_len, .key_len = netlink_compare_arg_len,
.hashfn = jhash,
.obj_hashfn = netlink_hash, .obj_hashfn = netlink_hash,
.obj_cmpfn = netlink_compare, .obj_cmpfn = netlink_compare,
.max_size = 65536, .max_size = 65536,
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
*/ */
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <linux/jhash.h>
#include "core.h" #include "core.h"
#include "name_table.h" #include "name_table.h"
#include "node.h" #include "node.h"
...@@ -2294,7 +2293,6 @@ static const struct rhashtable_params tsk_rht_params = { ...@@ -2294,7 +2293,6 @@ static const struct rhashtable_params tsk_rht_params = {
.head_offset = offsetof(struct tipc_sock, node), .head_offset = offsetof(struct tipc_sock, node),
.key_offset = offsetof(struct tipc_sock, portid), .key_offset = offsetof(struct tipc_sock, portid),
.key_len = sizeof(u32), /* portid */ .key_len = sizeof(u32), /* portid */
.hashfn = jhash,
.max_size = 1048576, .max_size = 1048576,
.min_size = 256, .min_size = 256,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment