Commit db339569 authored by Ben Hutchings's avatar Ben Hutchings

sfc: Replace efx_rx_buffer::is_page and other booleans with a flags field

Replace checksummed and discard booleans from efx_handle_rx_event()
with a bitmask, added to the flags field.
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
parent 1ddceb4c
...@@ -229,8 +229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget) ...@@ -229,8 +229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
/* Deliver last RX packet. */ /* Deliver last RX packet. */
if (channel->rx_pkt) { if (channel->rx_pkt) {
__efx_rx_packet(channel, channel->rx_pkt, __efx_rx_packet(channel, channel->rx_pkt);
channel->rx_pkt_csummed);
channel->rx_pkt = NULL; channel->rx_pkt = NULL;
} }
......
...@@ -40,9 +40,9 @@ extern void efx_rx_strategy(struct efx_channel *channel); ...@@ -40,9 +40,9 @@ extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
extern void efx_rx_slow_fill(unsigned long context); extern void efx_rx_slow_fill(unsigned long context);
extern void __efx_rx_packet(struct efx_channel *channel, extern void __efx_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, bool checksummed); struct efx_rx_buffer *rx_buf);
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard); unsigned int len, u16 flags);
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL #define EFX_MAX_DMAQ_SIZE 4096UL
......
...@@ -205,12 +205,12 @@ struct efx_tx_queue { ...@@ -205,12 +205,12 @@ struct efx_tx_queue {
/** /**
* struct efx_rx_buffer - An Efx RX data buffer * struct efx_rx_buffer - An Efx RX data buffer
* @dma_addr: DMA base address of the buffer * @dma_addr: DMA base address of the buffer
* @skb: The associated socket buffer, if any. * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE).
* If both this and page are %NULL, the buffer slot is currently free. * Will be %NULL if the buffer slot is currently free.
* @page: The associated page buffer, if any. * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
* If both this and skb are %NULL, the buffer slot is currently free. * Will be %NULL if the buffer slot is currently free.
* @len: Buffer length, in bytes. * @len: Buffer length, in bytes.
* @is_page: Indicates if @page is valid. If false, @skb is valid. * @flags: Flags for buffer and packet state.
*/ */
struct efx_rx_buffer { struct efx_rx_buffer {
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -219,8 +219,11 @@ struct efx_rx_buffer { ...@@ -219,8 +219,11 @@ struct efx_rx_buffer {
struct page *page; struct page *page;
} u; } u;
unsigned int len; unsigned int len;
bool is_page; u16 flags;
}; };
#define EFX_RX_BUF_PAGE 0x0001
#define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004
/** /**
* struct efx_rx_page_state - Page-based rx buffer state * struct efx_rx_page_state - Page-based rx buffer state
...@@ -378,7 +381,6 @@ struct efx_channel { ...@@ -378,7 +381,6 @@ struct efx_channel {
* access with prefetches. * access with prefetches.
*/ */
struct efx_rx_buffer *rx_pkt; struct efx_rx_buffer *rx_pkt;
bool rx_pkt_csummed;
struct efx_rx_queue rx_queue; struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
......
...@@ -743,10 +743,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -743,10 +743,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
} }
/* Detect errors included in the rx_evt_pkt_ok bit. */ /* Detect errors included in the rx_evt_pkt_ok bit. */
static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
const efx_qword_t *event, const efx_qword_t *event)
bool *rx_ev_pkt_ok,
bool *discard)
{ {
struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
...@@ -791,10 +789,6 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, ...@@ -791,10 +789,6 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
++channel->n_rx_tcp_udp_chksum_err; ++channel->n_rx_tcp_udp_chksum_err;
} }
/* The frame must be discarded if any of these are true. */
*discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
rx_ev_tobe_disc | rx_ev_pause_frm);
/* TOBE_DISC is expected on unicast mismatches; don't print out an /* TOBE_DISC is expected on unicast mismatches; don't print out an
* error message. FRM_TRUNC indicates RXDP dropped the packet due * error message. FRM_TRUNC indicates RXDP dropped the packet due
* to a FIFO overflow. * to a FIFO overflow.
...@@ -817,6 +811,11 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, ...@@ -817,6 +811,11 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_pause_frm ? " [PAUSE]" : ""); rx_ev_pause_frm ? " [PAUSE]" : "");
} }
#endif #endif
/* The frame must be discarded if any of these are true. */
return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
rx_ev_tobe_disc | rx_ev_pause_frm) ?
EFX_RX_PKT_DISCARD : 0;
} }
/* Handle receive events that are not in-order. */ /* Handle receive events that are not in-order. */
...@@ -849,7 +848,8 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -849,7 +848,8 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr; unsigned expected_ptr;
bool rx_ev_pkt_ok, discard = false, checksummed; bool rx_ev_pkt_ok;
u16 flags;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
/* Basic packet information */ /* Basic packet information */
...@@ -872,12 +872,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -872,12 +872,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
/* If packet is marked as OK and packet type is TCP/IP or /* If packet is marked as OK and packet type is TCP/IP or
* UDP/IP, then we can rely on the hardware checksum. * UDP/IP, then we can rely on the hardware checksum.
*/ */
checksummed = flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP; EFX_RX_PKT_CSUMMED : 0;
} else { } else {
efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); flags = efx_handle_rx_not_ok(rx_queue, event);
checksummed = false;
} }
/* Detect multicast packets that didn't match the filter */ /* Detect multicast packets that didn't match the filter */
...@@ -888,15 +887,14 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -888,15 +887,14 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
if (unlikely(!rx_ev_mcast_hash_match)) { if (unlikely(!rx_ev_mcast_hash_match)) {
++channel->n_rx_mcast_mismatch; ++channel->n_rx_mcast_mismatch;
discard = true; flags |= EFX_RX_PKT_DISCARD;
} }
} }
channel->irq_mod_score += 2; channel->irq_mod_score += 2;
/* Handle received packet */ /* Handle received packet */
efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
checksummed, discard);
} }
static void static void
......
...@@ -108,7 +108,7 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) ...@@ -108,7 +108,7 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
{ {
if (buf->is_page) if (buf->flags & EFX_RX_BUF_PAGE)
return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
else else
return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
...@@ -158,7 +158,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) ...@@ -158,7 +158,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
/* Adjust the SKB for padding and checksum */ /* Adjust the SKB for padding and checksum */
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
rx_buf->len = skb_len - NET_IP_ALIGN; rx_buf->len = skb_len - NET_IP_ALIGN;
rx_buf->is_page = false; rx_buf->flags = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
rx_buf->dma_addr = pci_map_single(efx->pci_dev, rx_buf->dma_addr = pci_map_single(efx->pci_dev,
...@@ -227,7 +227,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -227,7 +227,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page; rx_buf->u.page = page;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
rx_buf->is_page = true; rx_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count; ++rx_queue->added_count;
++rx_queue->alloc_page_count; ++rx_queue->alloc_page_count;
++state->refcnt; ++state->refcnt;
...@@ -248,7 +248,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -248,7 +248,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
static void efx_unmap_rx_buffer(struct efx_nic *efx, static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf) struct efx_rx_buffer *rx_buf)
{ {
if (rx_buf->is_page && rx_buf->u.page) { if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
struct efx_rx_page_state *state; struct efx_rx_page_state *state;
state = page_address(rx_buf->u.page); state = page_address(rx_buf->u.page);
...@@ -258,7 +258,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, ...@@ -258,7 +258,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
efx_rx_buf_size(efx), efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
} }
} else if (!rx_buf->is_page && rx_buf->u.skb) { } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
rx_buf->len, PCI_DMA_FROMDEVICE); rx_buf->len, PCI_DMA_FROMDEVICE);
} }
...@@ -267,10 +267,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, ...@@ -267,10 +267,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
static void efx_free_rx_buffer(struct efx_nic *efx, static void efx_free_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf) struct efx_rx_buffer *rx_buf)
{ {
if (rx_buf->is_page && rx_buf->u.page) { if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
__free_pages(rx_buf->u.page, efx->rx_buffer_order); __free_pages(rx_buf->u.page, efx->rx_buffer_order);
rx_buf->u.page = NULL; rx_buf->u.page = NULL;
} else if (!rx_buf->is_page && rx_buf->u.skb) { } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
dev_kfree_skb_any(rx_buf->u.skb); dev_kfree_skb_any(rx_buf->u.skb);
rx_buf->u.skb = NULL; rx_buf->u.skb = NULL;
} }
...@@ -310,7 +310,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, ...@@ -310,7 +310,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
new_buf->u.page = rx_buf->u.page; new_buf->u.page = rx_buf->u.page;
new_buf->len = rx_buf->len; new_buf->len = rx_buf->len;
new_buf->is_page = true; new_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count; ++rx_queue->added_count;
} }
...@@ -324,7 +324,10 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, ...@@ -324,7 +324,10 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *new_buf; struct efx_rx_buffer *new_buf;
unsigned index; unsigned index;
if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && rx_buf->flags &= EFX_RX_BUF_PAGE;
if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
page_count(rx_buf->u.page) == 1) page_count(rx_buf->u.page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf); efx_resurrect_rx_buffer(rx_queue, rx_buf);
...@@ -411,8 +414,7 @@ void efx_rx_slow_fill(unsigned long context) ...@@ -411,8 +414,7 @@ void efx_rx_slow_fill(unsigned long context)
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf, struct efx_rx_buffer *rx_buf,
int len, bool *discard, int len, bool *leak_packet)
bool *leak_packet)
{ {
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
...@@ -423,7 +425,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, ...@@ -423,7 +425,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
/* The packet must be discarded, but this is only a fatal error /* The packet must be discarded, but this is only a fatal error
* if the caller indicated it was * if the caller indicated it was
*/ */
*discard = true; rx_buf->flags |= EFX_RX_PKT_DISCARD;
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
if (net_ratelimit()) if (net_ratelimit())
...@@ -436,7 +438,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, ...@@ -436,7 +438,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
* data at the end of the skb will be trashed. So * data at the end of the skb will be trashed. So
* we have no choice but to leak the fragment. * we have no choice but to leak the fragment.
*/ */
*leak_packet = !rx_buf->is_page; *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else { } else {
if (net_ratelimit()) if (net_ratelimit())
...@@ -456,13 +458,13 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, ...@@ -456,13 +458,13 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*/ */
static void efx_rx_packet_gro(struct efx_channel *channel, static void efx_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, struct efx_rx_buffer *rx_buf,
const u8 *eh, bool checksummed) const u8 *eh)
{ {
struct napi_struct *napi = &channel->napi_str; struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result; gro_result_t gro_result;
/* Pass the skb/page into the GRO engine */ /* Pass the skb/page into the GRO engine */
if (rx_buf->is_page) { if (rx_buf->flags & EFX_RX_BUF_PAGE) {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
struct page *page = rx_buf->u.page; struct page *page = rx_buf->u.page;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -484,8 +486,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel, ...@@ -484,8 +486,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
skb->len = rx_buf->len; skb->len = rx_buf->len;
skb->data_len = rx_buf->len; skb->data_len = rx_buf->len;
skb->truesize += rx_buf->len; skb->truesize += rx_buf->len;
skb->ip_summed = skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
skb_record_rx_queue(skb, channel->channel); skb_record_rx_queue(skb, channel->channel);
...@@ -493,7 +495,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel, ...@@ -493,7 +495,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
} else { } else {
struct sk_buff *skb = rx_buf->u.skb; struct sk_buff *skb = rx_buf->u.skb;
EFX_BUG_ON_PARANOID(!checksummed); EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
rx_buf->u.skb = NULL; rx_buf->u.skb = NULL;
gro_result = napi_gro_receive(napi, skb); gro_result = napi_gro_receive(napi, skb);
...@@ -508,7 +510,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel, ...@@ -508,7 +510,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
} }
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard) unsigned int len, u16 flags)
{ {
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
...@@ -516,6 +518,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -516,6 +518,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
bool leak_packet = false; bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index); rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->flags |= flags;
/* This allows the refill path to post another buffer. /* This allows the refill path to post another buffer.
* EFX_RXD_HEAD_ROOM ensures that the slot we are using * EFX_RXD_HEAD_ROOM ensures that the slot we are using
...@@ -524,18 +527,17 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -524,18 +527,17 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
rx_queue->removed_count++; rx_queue->removed_count++;
/* Validate the length encoded in the event vs the descriptor pushed */ /* Validate the length encoded in the event vs the descriptor pushed */
efx_rx_packet__check_len(rx_queue, rx_buf, len, efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
&discard, &leak_packet);
netif_vdbg(efx, rx_status, efx->net_dev, netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received id %x at %llx+%x %s%s\n", "RX queue %d received id %x at %llx+%x %s%s\n",
efx_rx_queue_index(rx_queue), index, efx_rx_queue_index(rx_queue), index,
(unsigned long long)rx_buf->dma_addr, len, (unsigned long long)rx_buf->dma_addr, len,
(checksummed ? " [SUMMED]" : ""), (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
(discard ? " [DISCARD]" : "")); (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
/* Discard packet, if instructed to do so */ /* Discard packet, if instructed to do so */
if (unlikely(discard)) { if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
if (unlikely(leak_packet)) if (unlikely(leak_packet))
channel->n_skbuff_leaks++; channel->n_skbuff_leaks++;
else else
...@@ -562,10 +564,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -562,10 +564,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
rx_buf->len = len - efx->type->rx_buffer_hash_size; rx_buf->len = len - efx->type->rx_buffer_hash_size;
out: out:
if (channel->rx_pkt) if (channel->rx_pkt)
__efx_rx_packet(channel, __efx_rx_packet(channel, channel->rx_pkt);
channel->rx_pkt, channel->rx_pkt_csummed);
channel->rx_pkt = rx_buf; channel->rx_pkt = rx_buf;
channel->rx_pkt_csummed = checksummed;
} }
static void efx_rx_deliver(struct efx_channel *channel, static void efx_rx_deliver(struct efx_channel *channel,
...@@ -588,8 +588,7 @@ static void efx_rx_deliver(struct efx_channel *channel, ...@@ -588,8 +588,7 @@ static void efx_rx_deliver(struct efx_channel *channel,
} }
/* Handle a received packet. Second half: Touches packet payload. */ /* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel, void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
struct efx_rx_buffer *rx_buf, bool checksummed)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
u8 *eh = efx_rx_buf_eh(efx, rx_buf); u8 *eh = efx_rx_buf_eh(efx, rx_buf);
...@@ -603,7 +602,7 @@ void __efx_rx_packet(struct efx_channel *channel, ...@@ -603,7 +602,7 @@ void __efx_rx_packet(struct efx_channel *channel,
return; return;
} }
if (!rx_buf->is_page) { if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
struct sk_buff *skb = rx_buf->u.skb; struct sk_buff *skb = rx_buf->u.skb;
prefetch(skb_shinfo(skb)); prefetch(skb_shinfo(skb));
...@@ -622,10 +621,10 @@ void __efx_rx_packet(struct efx_channel *channel, ...@@ -622,10 +621,10 @@ void __efx_rx_packet(struct efx_channel *channel,
} }
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
checksummed = false; rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
if (likely(checksummed || rx_buf->is_page)) if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
efx_rx_packet_gro(channel, rx_buf, eh, checksummed); efx_rx_packet_gro(channel, rx_buf, eh);
else else
efx_rx_deliver(channel, rx_buf); efx_rx_deliver(channel, rx_buf);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment