Commit 5cb5ce33 authored by David S. Miller's avatar David S. Miller

Merge branch 'rhash-cleanups'

NeilBrown says:

====================
A few rhashtables cleanups

2 patches fixes documentation
1 fixes a bit in rhashtable_walk_start()
1 improves rhashtable_walk stability.

All reviewed and Acked.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8c2320e8 5d240a89
...@@ -836,9 +836,8 @@ static inline void *__rhashtable_insert_fast( ...@@ -836,9 +836,8 @@ static inline void *__rhashtable_insert_fast(
* *
* It is safe to call this function from atomic context. * It is safe to call this function from atomic context.
* *
* Will trigger an automatic deferred table resizing if the size grows * Will trigger an automatic deferred table resizing if residency in the
* beyond the watermark indicated by grow_decision() which can be passed * table grows beyond 70%.
* to rhashtable_init().
*/ */
static inline int rhashtable_insert_fast( static inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj, struct rhashtable *ht, struct rhash_head *obj,
...@@ -866,9 +865,8 @@ static inline int rhashtable_insert_fast( ...@@ -866,9 +865,8 @@ static inline int rhashtable_insert_fast(
* *
* It is safe to call this function from atomic context. * It is safe to call this function from atomic context.
* *
* Will trigger an automatic deferred table resizing if the size grows * Will trigger an automatic deferred table resizing if residency in the
* beyond the watermark indicated by grow_decision() which can be passed * table grows beyond 70%.
* to rhashtable_init().
*/ */
static inline int rhltable_insert_key( static inline int rhltable_insert_key(
struct rhltable *hlt, const void *key, struct rhlist_head *list, struct rhltable *hlt, const void *key, struct rhlist_head *list,
...@@ -890,9 +888,8 @@ static inline int rhltable_insert_key( ...@@ -890,9 +888,8 @@ static inline int rhltable_insert_key(
* *
* It is safe to call this function from atomic context. * It is safe to call this function from atomic context.
* *
* Will trigger an automatic deferred table resizing if the size grows * Will trigger an automatic deferred table resizing if residency in the
* beyond the watermark indicated by grow_decision() which can be passed * table grows beyond 70%.
* to rhashtable_init().
*/ */
static inline int rhltable_insert( static inline int rhltable_insert(
struct rhltable *hlt, struct rhlist_head *list, struct rhltable *hlt, struct rhlist_head *list,
...@@ -922,9 +919,8 @@ static inline int rhltable_insert( ...@@ -922,9 +919,8 @@ static inline int rhltable_insert(
* *
* It is safe to call this function from atomic context. * It is safe to call this function from atomic context.
* *
* Will trigger an automatic deferred table resizing if the size grows * Will trigger an automatic deferred table resizing if residency in the
* beyond the watermark indicated by grow_decision() which can be passed * table grows beyond 70%.
* to rhashtable_init().
*/ */
static inline int rhashtable_lookup_insert_fast( static inline int rhashtable_lookup_insert_fast(
struct rhashtable *ht, struct rhash_head *obj, struct rhashtable *ht, struct rhash_head *obj,
...@@ -981,9 +977,8 @@ static inline void *rhashtable_lookup_get_insert_fast( ...@@ -981,9 +977,8 @@ static inline void *rhashtable_lookup_get_insert_fast(
* *
* Lookups may occur in parallel with hashtable mutations and resizing. * Lookups may occur in parallel with hashtable mutations and resizing.
* *
* Will trigger an automatic deferred table resizing if the size grows * Will trigger an automatic deferred table resizing if residency in the
* beyond the watermark indicated by grow_decision() which can be passed * table grows beyond 70%.
* to rhashtable_init().
* *
* Returns zero on success. * Returns zero on success.
*/ */
...@@ -1134,8 +1129,8 @@ static inline int __rhashtable_remove_fast( ...@@ -1134,8 +1129,8 @@ static inline int __rhashtable_remove_fast(
* walk the bucket chain upon removal. The removal operation is thus * walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized. * considerable slow if the hash table is not correctly sized.
* *
* Will automatically shrink the table via rhashtable_expand() if the * Will automatically shrink the table if permitted when residency drops
* shrink_decision function specified at rhashtable_init() returns true. * below 30%.
* *
* Returns zero on success, -ENOENT if the entry could not be found. * Returns zero on success, -ENOENT if the entry could not be found.
*/ */
...@@ -1156,8 +1151,8 @@ static inline int rhashtable_remove_fast( ...@@ -1156,8 +1151,8 @@ static inline int rhashtable_remove_fast(
* walk the bucket chain upon removal. The removal operation is thus * walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized. * considerable slow if the hash table is not correctly sized.
* *
* Will automatically shrink the table via rhashtable_expand() if the * Will automatically shrink the table if permitted when residency drops
* shrink_decision function specified at rhashtable_init() returns true. * below 30%
* *
* Returns zero on success, -ENOENT if the entry could not be found. * Returns zero on success, -ENOENT if the entry could not be found.
*/ */
...@@ -1273,8 +1268,9 @@ static inline int rhashtable_walk_init(struct rhashtable *ht, ...@@ -1273,8 +1268,9 @@ static inline int rhashtable_walk_init(struct rhashtable *ht,
* For a completely stable walk you should construct your own data * For a completely stable walk you should construct your own data
* structure outside the hash table. * structure outside the hash table.
* *
* This function may sleep so you must not call it from interrupt * This function may be called from any process context, including
* context or with spin locks held. * non-preemptable context, but cannot be called from softirq or
* hardirq context.
* *
* You must call rhashtable_walk_exit after this function returns. * You must call rhashtable_walk_exit after this function returns.
*/ */
......
...@@ -668,8 +668,9 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); ...@@ -668,8 +668,9 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* For a completely stable walk you should construct your own data * For a completely stable walk you should construct your own data
* structure outside the hash table. * structure outside the hash table.
* *
* This function may sleep so you must not call it from interrupt * This function may be called from any process context, including
* context or with spin locks held. * non-preemptable context, but cannot be called from softirq or
* hardirq context.
* *
* You must call rhashtable_walk_exit after this function returns. * You must call rhashtable_walk_exit after this function returns.
*/ */
...@@ -726,6 +727,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter) ...@@ -726,6 +727,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
__acquires(RCU) __acquires(RCU)
{ {
struct rhashtable *ht = iter->ht; struct rhashtable *ht = iter->ht;
bool rhlist = ht->rhlist;
rcu_read_lock(); rcu_read_lock();
...@@ -734,11 +736,52 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter) ...@@ -734,11 +736,52 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
list_del(&iter->walker.list); list_del(&iter->walker.list);
spin_unlock(&ht->lock); spin_unlock(&ht->lock);
if (!iter->walker.tbl && !iter->end_of_table) { if (iter->end_of_table)
return 0;
if (!iter->walker.tbl) {
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
iter->slot = 0;
iter->skip = 0;
return -EAGAIN; return -EAGAIN;
} }
if (iter->p && !rhlist) {
/*
* We need to validate that 'p' is still in the table, and
* if so, update 'skip'
*/
struct rhash_head *p;
int skip = 0;
rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
skip++;
if (p == iter->p) {
iter->skip = skip;
goto found;
}
}
iter->p = NULL;
} else if (iter->p && rhlist) {
/* Need to validate that 'list' is still in the table, and
* if so, update 'skip' and 'p'.
*/
struct rhash_head *p;
struct rhlist_head *list;
int skip = 0;
rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
for (list = container_of(p, struct rhlist_head, rhead);
list;
list = rcu_dereference(list->next)) {
skip++;
if (list == iter->list) {
iter->p = p;
skip = skip;
goto found;
}
}
}
iter->p = NULL;
}
found:
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
...@@ -914,8 +957,6 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) ...@@ -914,8 +957,6 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
iter->walker.tbl = NULL; iter->walker.tbl = NULL;
spin_unlock(&ht->lock); spin_unlock(&ht->lock);
iter->p = NULL;
out: out:
rcu_read_unlock(); rcu_read_unlock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment