Commit 9859a790 authored by Pavel Emelyanov's avatar Pavel Emelyanov Committed by David S. Miller

[NET]: Compact sk_stream_mem_schedule() code

This function references sk->sk_prot->xxx for many times.
It turned out, that there's so many code in it, that gcc
cannot always optimize access to sk->sk_prot's fields.

After saving the sk->sk_prot on the stack and comparing
disassembled code, it turned out that the function became
~10 bytes shorter and made less dereferences (on i386 and
x86_64). Stack consumption didn't grow.

Besides, this patch drives most of this function into the
80 columns limit.
Signed-off-by: default avatarPavel Emelyanov <xemul@openvz.org>
Acked-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3ef1355d
...@@ -210,35 +210,36 @@ EXPORT_SYMBOL(__sk_stream_mem_reclaim); ...@@ -210,35 +210,36 @@ EXPORT_SYMBOL(__sk_stream_mem_reclaim);
int sk_stream_mem_schedule(struct sock *sk, int size, int kind) int sk_stream_mem_schedule(struct sock *sk, int size, int kind)
{ {
int amt = sk_stream_pages(size); int amt = sk_stream_pages(size);
struct proto *prot = sk->sk_prot;
sk->sk_forward_alloc += amt * SK_STREAM_MEM_QUANTUM; sk->sk_forward_alloc += amt * SK_STREAM_MEM_QUANTUM;
atomic_add(amt, sk->sk_prot->memory_allocated); atomic_add(amt, prot->memory_allocated);
/* Under limit. */ /* Under limit. */
if (atomic_read(sk->sk_prot->memory_allocated) < sk->sk_prot->sysctl_mem[0]) { if (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]) {
if (*sk->sk_prot->memory_pressure) if (*prot->memory_pressure)
*sk->sk_prot->memory_pressure = 0; *prot->memory_pressure = 0;
return 1; return 1;
} }
/* Over hard limit. */ /* Over hard limit. */
if (atomic_read(sk->sk_prot->memory_allocated) > sk->sk_prot->sysctl_mem[2]) { if (atomic_read(prot->memory_allocated) > prot->sysctl_mem[2]) {
sk->sk_prot->enter_memory_pressure(); prot->enter_memory_pressure();
goto suppress_allocation; goto suppress_allocation;
} }
/* Under pressure. */ /* Under pressure. */
if (atomic_read(sk->sk_prot->memory_allocated) > sk->sk_prot->sysctl_mem[1]) if (atomic_read(prot->memory_allocated) > prot->sysctl_mem[1])
sk->sk_prot->enter_memory_pressure(); prot->enter_memory_pressure();
if (kind) { if (kind) {
if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_prot->sysctl_rmem[0]) if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
return 1; return 1;
} else if (sk->sk_wmem_queued < sk->sk_prot->sysctl_wmem[0]) } else if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
return 1; return 1;
if (!*sk->sk_prot->memory_pressure || if (!*prot->memory_pressure ||
sk->sk_prot->sysctl_mem[2] > atomic_read(sk->sk_prot->sockets_allocated) * prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) *
sk_stream_pages(sk->sk_wmem_queued + sk_stream_pages(sk->sk_wmem_queued +
atomic_read(&sk->sk_rmem_alloc) + atomic_read(&sk->sk_rmem_alloc) +
sk->sk_forward_alloc)) sk->sk_forward_alloc))
...@@ -258,7 +259,7 @@ int sk_stream_mem_schedule(struct sock *sk, int size, int kind) ...@@ -258,7 +259,7 @@ int sk_stream_mem_schedule(struct sock *sk, int size, int kind)
/* Alas. Undo changes. */ /* Alas. Undo changes. */
sk->sk_forward_alloc -= amt * SK_STREAM_MEM_QUANTUM; sk->sk_forward_alloc -= amt * SK_STREAM_MEM_QUANTUM;
atomic_sub(amt, sk->sk_prot->memory_allocated); atomic_sub(amt, prot->memory_allocated);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment