Commit 9dde27de authored by Xin Long's avatar Xin Long Committed by David S. Miller

sctp: implement memory accounting on rx path

sk_forward_alloc's updating is also done on rx path, but to be consistent
we change to use sk_mem_charge() in sctp_skb_set_owner_r().

In sctp_eat_data(), it's not enough to check sctp_memory_pressure only,
which doesn't work for mem_cgroup_sockets_enabled, so we change to use
sk_under_memory_pressure().

When it's under memory pressure, sk_mem_reclaim() and sk_rmem_schedule()
should be called on both RENEGE or CHUNK DELIVERY path exit the memory
pressure status as soon as possible.

Note that sk_rmem_schedule() is using datalen to make things easy there.
Reported-by: default avatarMatteo Croce <mcroce@redhat.com>
Tested-by: default avatarMatteo Croce <mcroce@redhat.com>
Acked-by: default avatarNeil Horman <nhorman@tuxdriver.com>
Acked-by: default avatarMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: default avatarXin Long <lucien.xin@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1033990a
...@@ -421,7 +421,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) ...@@ -421,7 +421,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
/* /*
* This mimics the behavior of skb_set_owner_r * This mimics the behavior of skb_set_owner_r
*/ */
sk->sk_forward_alloc -= event->rmem_len; sk_mem_charge(sk, event->rmem_len);
} }
/* Tests if the list has one and only one entry. */ /* Tests if the list has one and only one entry. */
......
...@@ -6412,13 +6412,15 @@ static int sctp_eat_data(const struct sctp_association *asoc, ...@@ -6412,13 +6412,15 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
* memory usage too much * memory usage too much
*/ */
if (*sk->sk_prot_creator->memory_pressure) { if (sk_under_memory_pressure(sk)) {
if (sctp_tsnmap_has_gap(map) && if (sctp_tsnmap_has_gap(map) &&
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) { (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
pr_debug("%s: under pressure, reneging for tsn:%u\n", pr_debug("%s: under pressure, reneging for tsn:%u\n",
__func__, tsn); __func__, tsn);
deliver = SCTP_CMD_RENEGE; deliver = SCTP_CMD_RENEGE;
} } else {
sk_mem_reclaim(sk);
}
} }
/* /*
......
...@@ -634,8 +634,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, ...@@ -634,8 +634,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
gfp_t gfp) gfp_t gfp)
{ {
struct sctp_ulpevent *event = NULL; struct sctp_ulpevent *event = NULL;
struct sk_buff *skb; struct sk_buff *skb = chunk->skb;
size_t padding, len; struct sock *sk = asoc->base.sk;
size_t padding, datalen;
int rx_count; int rx_count;
/* /*
...@@ -646,15 +647,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, ...@@ -646,15 +647,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
if (asoc->ep->rcvbuf_policy) if (asoc->ep->rcvbuf_policy)
rx_count = atomic_read(&asoc->rmem_alloc); rx_count = atomic_read(&asoc->rmem_alloc);
else else
rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); rx_count = atomic_read(&sk->sk_rmem_alloc);
if (rx_count >= asoc->base.sk->sk_rcvbuf) { datalen = ntohs(chunk->chunk_hdr->length);
if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
(!sk_rmem_schedule(asoc->base.sk, chunk->skb, goto fail;
chunk->skb->truesize)))
goto fail;
}
/* Clone the original skb, sharing the data. */ /* Clone the original skb, sharing the data. */
skb = skb_clone(chunk->skb, gfp); skb = skb_clone(chunk->skb, gfp);
...@@ -681,8 +679,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, ...@@ -681,8 +679,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
* The sender should never pad with more than 3 bytes. The receiver * The sender should never pad with more than 3 bytes. The receiver
* MUST ignore the padding bytes. * MUST ignore the padding bytes.
*/ */
len = ntohs(chunk->chunk_hdr->length); padding = SCTP_PAD4(datalen) - datalen;
padding = SCTP_PAD4(len) - len;
/* Fixup cloned skb with just this chunks data. */ /* Fixup cloned skb with just this chunks data. */
skb_trim(skb, chunk->chunk_end - padding - skb->data); skb_trim(skb, chunk->chunk_end - padding - skb->data);
......
...@@ -1104,7 +1104,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, ...@@ -1104,7 +1104,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
freed += sctp_ulpq_renege_frags(ulpq, needed - freed); freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
} }
/* If able to free enough room, accept this chunk. */ /* If able to free enough room, accept this chunk. */
if (freed >= needed) { if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
freed >= needed) {
int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
/* /*
* Enter partial delivery if chunk has not been * Enter partial delivery if chunk has not been
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment