Commit f6d8cb2e authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

inet: reduce TLB pressure for listeners

It seems overkill to use vmalloc() for typical listeners with less than
2048 hash buckets. Try kmalloc() and fallback to vmalloc() to reduce TLB
pressure.

Use kvfree() helper as it is now available.
Use ilog2() instead of a loop.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bb446c19
...@@ -41,27 +41,27 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, ...@@ -41,27 +41,27 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
unsigned int nr_table_entries) unsigned int nr_table_entries)
{ {
size_t lopt_size = sizeof(struct listen_sock); size_t lopt_size = sizeof(struct listen_sock);
struct listen_sock *lopt; struct listen_sock *lopt = NULL;
nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog); nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
nr_table_entries = max_t(u32, nr_table_entries, 8); nr_table_entries = max_t(u32, nr_table_entries, 8);
nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
lopt_size += nr_table_entries * sizeof(struct request_sock *); lopt_size += nr_table_entries * sizeof(struct request_sock *);
if (lopt_size > PAGE_SIZE)
if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
lopt = kzalloc(lopt_size, GFP_KERNEL |
__GFP_NOWARN |
__GFP_NORETRY);
if (!lopt)
lopt = vzalloc(lopt_size); lopt = vzalloc(lopt_size);
else if (!lopt)
lopt = kzalloc(lopt_size, GFP_KERNEL);
if (lopt == NULL)
return -ENOMEM; return -ENOMEM;
for (lopt->max_qlen_log = 3;
(1 << lopt->max_qlen_log) < nr_table_entries;
lopt->max_qlen_log++);
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
rwlock_init(&queue->syn_wait_lock); rwlock_init(&queue->syn_wait_lock);
queue->rskq_accept_head = NULL; queue->rskq_accept_head = NULL;
lopt->nr_table_entries = nr_table_entries; lopt->nr_table_entries = nr_table_entries;
lopt->max_qlen_log = ilog2(nr_table_entries);
write_lock_bh(&queue->syn_wait_lock); write_lock_bh(&queue->syn_wait_lock);
queue->listen_opt = lopt; queue->listen_opt = lopt;
...@@ -72,22 +72,8 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, ...@@ -72,22 +72,8 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
void __reqsk_queue_destroy(struct request_sock_queue *queue) void __reqsk_queue_destroy(struct request_sock_queue *queue)
{ {
struct listen_sock *lopt; /* This is an error recovery path only, no locking needed */
size_t lopt_size; kvfree(queue->listen_opt);
/*
* this is an error recovery path only
* no locking needed and the lopt is not NULL
*/
lopt = queue->listen_opt;
lopt_size = sizeof(struct listen_sock) +
lopt->nr_table_entries * sizeof(struct request_sock *);
if (lopt_size > PAGE_SIZE)
vfree(lopt);
else
kfree(lopt);
} }
static inline struct listen_sock *reqsk_queue_yank_listen_sk( static inline struct listen_sock *reqsk_queue_yank_listen_sk(
...@@ -107,8 +93,6 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) ...@@ -107,8 +93,6 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
{ {
/* make all the listen_opt local to us */ /* make all the listen_opt local to us */
struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
size_t lopt_size = sizeof(struct listen_sock) +
lopt->nr_table_entries * sizeof(struct request_sock *);
if (lopt->qlen != 0) { if (lopt->qlen != 0) {
unsigned int i; unsigned int i;
...@@ -125,10 +109,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) ...@@ -125,10 +109,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
} }
WARN_ON(lopt->qlen != 0); WARN_ON(lopt->qlen != 0);
if (lopt_size > PAGE_SIZE) kvfree(lopt);
vfree(lopt);
else
kfree(lopt);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment