Commit 9eb0a5d1 authored by Daniel Pieczko's avatar Daniel Pieczko Committed by David S. Miller

sfc: free multiple Rx buffers when required

When Rx packet data must be dropped, all the buffers
associated with that Rx packet must be freed. Extend
and rename efx_free_rx_buffer() to efx_free_rx_buffers()
and loop through all the fragments.
By doing so this patch fixes a possible memory leak.
Signed-off-by: default avatarShradha Shah <sshah@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fdf7c644
...@@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, ...@@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
} }
} }
static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
unsigned int num_bufs)
{ {
if (rx_buf->page) { do {
put_page(rx_buf->page); if (rx_buf->page) {
rx_buf->page = NULL; put_page(rx_buf->page);
} rx_buf->page = NULL;
}
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
} while (--num_bufs);
} }
/* Attempt to recycle the page if there is an RX recycle ring; the page can /* Attempt to recycle the page if there is an RX recycle ring; the page can
...@@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, ...@@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
/* If this is the last buffer in a page, unmap and free it. */ /* If this is the last buffer in a page, unmap and free it. */
if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
efx_unmap_rx_buffer(rx_queue->efx, rx_buf); efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
efx_free_rx_buffer(rx_buf); efx_free_rx_buffers(rx_queue, rx_buf, 1);
} }
rx_buf->page = NULL; rx_buf->page = NULL;
} }
...@@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel, ...@@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
efx_recycle_rx_pages(channel, rx_buf, n_frags); efx_recycle_rx_pages(channel, rx_buf, n_frags);
do { efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
efx_free_rx_buffer(rx_buf);
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
} while (--n_frags);
} }
/** /**
...@@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, ...@@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
skb = napi_get_frags(napi); skb = napi_get_frags(napi);
if (unlikely(!skb)) { if (unlikely(!skb)) {
while (n_frags--) { struct efx_rx_queue *rx_queue;
put_page(rx_buf->page);
rx_buf->page = NULL; rx_queue = efx_channel_get_rx_queue(channel);
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
}
return; return;
} }
...@@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, ...@@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
if (unlikely(skb == NULL)) { if (unlikely(skb == NULL)) {
efx_free_rx_buffer(rx_buf); struct efx_rx_queue *rx_queue;
rx_queue = efx_channel_get_rx_queue(channel);
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
return; return;
} }
skb_record_rx_queue(skb, channel->rx_queue.core_index); skb_record_rx_queue(skb, channel->rx_queue.core_index);
...@@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel) ...@@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel)
* loopback layer, and free the rx_buf here * loopback layer, and free the rx_buf here
*/ */
if (unlikely(efx->loopback_selftest)) { if (unlikely(efx->loopback_selftest)) {
struct efx_rx_queue *rx_queue;
efx_loopback_rx_packet(efx, eh, rx_buf->len); efx_loopback_rx_packet(efx, eh, rx_buf->len);
efx_free_rx_buffer(rx_buf); rx_queue = efx_channel_get_rx_queue(channel);
efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment