[NET] Generalise tcp_{writequeue_purge,rmem_schedule,alloc_{pskb,page}}

Only new requirement was to add a max_header field to struct sock sk_prot
member.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@conectiva.com.br>
parent 3ac2a2d4
......@@ -553,6 +553,7 @@ struct proto {
int sysctl_mem[3];
int sysctl_wmem[3];
int sysctl_rmem[3];
int max_header;
char name[32];
......@@ -660,6 +661,21 @@ static inline void sk_stream_mem_reclaim(struct sock *sk)
__sk_stream_mem_reclaim(sk);
}
static inline void sk_stream_writequeue_purge(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
sk_stream_free_skb(sk, skb);
sk_stream_mem_reclaim(sk);
}
static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
{
return (int)skb->truesize <= sk->sk_forward_alloc ||
sk_stream_mem_schedule(sk, skb->truesize, 1);
}
/* Used by processes to "lock" a socket state, so that
* interrupts and bottom half handlers won't change it
* from under us. It essentially blocks any incoming
......@@ -1141,6 +1157,46 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
}
}
static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
int size, int mem, int gfp)
{
struct sk_buff *skb = alloc_skb(size + sk->sk_prot->max_header, gfp);
if (skb) {
skb->truesize += mem;
if (sk->sk_forward_alloc >= (int)skb->truesize ||
sk_stream_mem_schedule(sk, skb->truesize, 0)) {
skb_reserve(skb, sk->sk_prot->max_header);
return skb;
}
__kfree_skb(skb);
} else {
sk->sk_prot->enter_memory_pressure();
sk_stream_moderate_sndbuf(sk);
}
return NULL;
}
static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
int size, int gfp)
{
return sk_stream_alloc_pskb(sk, size, 0, gfp);
}
static inline struct page *sk_stream_alloc_page(struct sock *sk)
{
struct page *page = NULL;
if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
page = alloc_pages(sk->sk_allocation, 0);
else {
sk->sk_prot->enter_memory_pressure();
sk_stream_moderate_sndbuf(sk);
}
return page;
}
#define sk_stream_for_retrans_queue(skb, sk) \
for (skb = (sk)->sk_write_queue.next; \
(skb != (sk)->sk_send_head) && \
......
......@@ -1862,52 +1862,6 @@ static __inline__ void tcp_openreq_init(struct open_request *req,
extern void tcp_enter_memory_pressure(void);
static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
{
struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp);
if (skb) {
skb->truesize += mem;
if (sk->sk_forward_alloc >= (int)skb->truesize ||
sk_stream_mem_schedule(sk, skb->truesize, 0)) {
skb_reserve(skb, MAX_TCP_HEADER);
return skb;
}
__kfree_skb(skb);
} else {
tcp_enter_memory_pressure();
sk_stream_moderate_sndbuf(sk);
}
return NULL;
}
static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp)
{
return tcp_alloc_pskb(sk, size, 0, gfp);
}
static inline struct page * tcp_alloc_page(struct sock *sk)
{
if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
sk_stream_mem_schedule(sk, PAGE_SIZE, 0)) {
struct page *page = alloc_pages(sk->sk_allocation, 0);
if (page)
return page;
}
tcp_enter_memory_pressure();
sk_stream_moderate_sndbuf(sk);
return NULL;
}
static inline void tcp_writequeue_purge(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
sk_stream_free_skb(sk, skb);
sk_stream_mem_reclaim(sk);
}
extern void tcp_listen_wlock(void);
/* - We may sleep inside this lock.
......
......@@ -639,7 +639,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
skb = tcp_alloc_pskb(sk, 0, tp->mss_cache,
skb = sk_stream_alloc_pskb(sk, 0, tp->mss_cache,
sk->sk_allocation);
if (!skb)
goto wait_for_memory;
......@@ -806,7 +806,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
skb = tcp_alloc_pskb(sk, select_size(sk, tp),
skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
0, sk->sk_allocation);
if (!skb)
goto wait_for_memory;
......@@ -868,7 +868,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!page) {
/* Allocate new cache page. */
if (!(page = tcp_alloc_page(sk)))
if (!(page = sk_stream_alloc_page(sk)))
goto wait_for_memory;
off = 0;
}
......@@ -1778,7 +1778,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
tcp_writequeue_purge(sk);
sk_stream_writequeue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
inet->dport = 0;
......
......@@ -3390,12 +3390,6 @@ static void tcp_ofo_queue(struct sock *sk)
}
}
static inline int tcp_rmem_schedule(struct sock *sk, struct sk_buff *skb)
{
return (int)skb->truesize <= sk->sk_forward_alloc ||
sk_stream_mem_schedule(sk, skb->truesize, 1);
}
static int tcp_prune_queue(struct sock *sk);
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
......@@ -3449,8 +3443,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
queue_and_out:
if (eaten < 0 &&
(atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!tcp_rmem_schedule(sk, skb))) {
if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb))
!sk_stream_rmem_schedule(sk, skb))) {
if (tcp_prune_queue(sk) < 0 ||
!sk_stream_rmem_schedule(sk, skb))
goto drop;
}
sk_stream_set_owner_r(skb, sk);
......@@ -3522,8 +3517,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
TCP_ECN_check_ce(tp, skb);
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!tcp_rmem_schedule(sk, skb)) {
if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb))
!sk_stream_rmem_schedule(sk, skb)) {
if (tcp_prune_queue(sk) < 0 ||
!sk_stream_rmem_schedule(sk, skb))
goto drop;
}
......
......@@ -2101,7 +2101,7 @@ static int tcp_v4_destroy_sock(struct sock *sk)
tcp_clear_xmit_timers(sk);
/* Cleanup up the write buffer. */
tcp_writequeue_purge(sk);
sk_stream_writequeue_purge(sk);
/* Cleans up our, hopefully empty, out_of_order_queue. */
__skb_queue_purge(&tp->out_of_order_queue);
......@@ -2602,6 +2602,7 @@ struct proto tcp_prot = {
.enter_memory_pressure = tcp_enter_memory_pressure,
.sysctl_wmem = { 4 * 1024, 16 * 1024, 128 * 1024 },
.sysctl_rmem = { 4 * 1024, 87380, 87380 * 2 },
.max_header = MAX_TCP_HEADER,
};
......
......@@ -372,7 +372,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
return -ENOMEM;
/* Get a new skb... force flag on. */
buff = tcp_alloc_skb(sk, nsize, GFP_ATOMIC);
buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
if (buff == NULL)
return -ENOMEM; /* We'll just try again later. */
sk_charge_skb(sk, buff);
......
......@@ -1898,7 +1898,7 @@ static int tcp_v6_destroy_sock(struct sock *sk)
tcp_clear_xmit_timers(sk);
/* Cleanup up the write buffer. */
tcp_writequeue_purge(sk);
sk_stream_writequeue_purge(sk);
/* Cleans up our, hopefully empty, out_of_order_queue. */
__skb_queue_purge(&tp->out_of_order_queue);
......@@ -2096,6 +2096,7 @@ struct proto tcpv6_prot = {
.hash = tcp_v6_hash,
.unhash = tcp_unhash,
.get_port = tcp_v6_get_port,
.max_header = MAX_TCP_HEADER,
};
static struct inet6_protocol tcpv6_protocol = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment