Commit 44430612 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

rxrpc: abstract away knowledge of IDR internals

Add idr_get_cursor() / idr_set_cursor() APIs, and remove the reference
to IDR_SIZE.

Link: http://lkml.kernel.org/r/1480369871-5271-65-git-send-email-mawilcox@linuxonhyperv.comSigned-off-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Tested-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 37f4915f
...@@ -55,6 +55,32 @@ struct idr { ...@@ -55,6 +55,32 @@ struct idr {
} }
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) #define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
/**
* idr_get_cursor - Return the current position of the cyclic allocator
* @idr: idr handle
*
* The value returned is the value that will be next returned from
* idr_alloc_cyclic() if it is free (otherwise the search will start from
* this position).
*/
static inline unsigned int idr_get_cursor(struct idr *idr)
{
return READ_ONCE(idr->cur);
}
/**
* idr_set_cursor - Set the current position of the cyclic allocator
* @idr: idr handle
* @val: new position
*
* The next call to idr_alloc_cyclic() will return @val if it is free
* (otherwise the search will start from this position).
*/
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
{
WRITE_ONCE(idr->cur, val);
}
/** /**
* DOC: idr sync * DOC: idr sync
* idr synchronization (stolen from radix-tree.h) * idr synchronization (stolen from radix-tree.h)
......
...@@ -762,16 +762,17 @@ static const struct net_proto_family rxrpc_family_ops = { ...@@ -762,16 +762,17 @@ static const struct net_proto_family rxrpc_family_ops = {
static int __init af_rxrpc_init(void) static int __init af_rxrpc_init(void)
{ {
int ret = -1; int ret = -1;
unsigned int tmp;
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb)); BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch)); get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch));
rxrpc_epoch |= RXRPC_RANDOM_EPOCH; rxrpc_epoch |= RXRPC_RANDOM_EPOCH;
get_random_bytes(&rxrpc_client_conn_ids.cur, get_random_bytes(&tmp, sizeof(tmp));
sizeof(rxrpc_client_conn_ids.cur)); tmp &= 0x3fffffff;
rxrpc_client_conn_ids.cur &= 0x3fffffff; if (tmp == 0)
if (rxrpc_client_conn_ids.cur == 0) tmp = 1;
rxrpc_client_conn_ids.cur = 1; idr_set_cursor(&rxrpc_client_conn_ids, tmp);
ret = -ENOMEM; ret = -ENOMEM;
rxrpc_call_jar = kmem_cache_create( rxrpc_call_jar = kmem_cache_create(
......
...@@ -263,12 +263,12 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) ...@@ -263,12 +263,12 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
* times the maximum number of client conns away from the current * times the maximum number of client conns away from the current
* allocation point to try and keep the IDs concentrated. * allocation point to try and keep the IDs concentrated.
*/ */
id_cursor = READ_ONCE(rxrpc_client_conn_ids.cur); id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
id = conn->proto.cid >> RXRPC_CIDSHIFT; id = conn->proto.cid >> RXRPC_CIDSHIFT;
distance = id - id_cursor; distance = id - id_cursor;
if (distance < 0) if (distance < 0)
distance = -distance; distance = -distance;
limit = round_up(rxrpc_max_client_connections, IDR_SIZE) * 4; limit = max(rxrpc_max_client_connections * 4, 1024U);
if (distance > limit) if (distance > limit)
goto mark_dont_reuse; goto mark_dont_reuse;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment