Commit 97d48a10 authored by Alexandre Rames's avatar Alexandre Rames Committed by Ben Hutchings

sfc: Remove rx_alloc_method SKB

[bwh: Remove more dead code, and make efx_ptp_rx() pull the data it
 needs into the header area.]
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
parent 9230451a
...@@ -247,11 +247,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget) ...@@ -247,11 +247,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
__efx_rx_packet(channel, channel->rx_pkt); __efx_rx_packet(channel, channel->rx_pkt);
channel->rx_pkt = NULL; channel->rx_pkt = NULL;
} }
if (rx_queue->enabled) { if (rx_queue->enabled)
efx_rx_strategy(channel);
efx_fast_push_rx_descriptors(rx_queue); efx_fast_push_rx_descriptors(rx_queue);
} }
}
return spent; return spent;
} }
...@@ -655,16 +653,12 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -655,16 +653,12 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue(tx_queue); efx_init_tx_queue(tx_queue);
/* The rx buffer allocation strategy is MTU dependent */
efx_rx_strategy(channel);
efx_for_each_channel_rx_queue(rx_queue, channel) { efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue); efx_init_rx_queue(rx_queue);
efx_nic_generate_fill_event(rx_queue); efx_nic_generate_fill_event(rx_queue);
} }
WARN_ON(channel->rx_pkt != NULL); WARN_ON(channel->rx_pkt != NULL);
efx_rx_strategy(channel);
} }
if (netif_device_present(efx->net_dev)) if (netif_device_present(efx->net_dev))
......
...@@ -37,7 +37,6 @@ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); ...@@ -37,7 +37,6 @@ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
extern void efx_rx_slow_fill(unsigned long context); extern void efx_rx_slow_fill(unsigned long context);
extern void __efx_rx_packet(struct efx_channel *channel, extern void __efx_rx_packet(struct efx_channel *channel,
......
...@@ -206,25 +206,19 @@ struct efx_tx_queue { ...@@ -206,25 +206,19 @@ struct efx_tx_queue {
/** /**
* struct efx_rx_buffer - An Efx RX data buffer * struct efx_rx_buffer - An Efx RX data buffer
* @dma_addr: DMA base address of the buffer * @dma_addr: DMA base address of the buffer
* @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE). * @page: The associated page buffer.
* Will be %NULL if the buffer slot is currently free. * Will be %NULL if the buffer slot is currently free.
* @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE. * @page_offset: Offset within page
* Will be %NULL if the buffer slot is currently free.
* @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
* @len: Buffer length, in bytes. * @len: Buffer length, in bytes.
* @flags: Flags for buffer and packet state. * @flags: Flags for buffer and packet state.
*/ */
struct efx_rx_buffer { struct efx_rx_buffer {
dma_addr_t dma_addr; dma_addr_t dma_addr;
union {
struct sk_buff *skb;
struct page *page; struct page *page;
} u;
u16 page_offset; u16 page_offset;
u16 len; u16 len;
u16 flags; u16 flags;
}; };
#define EFX_RX_BUF_PAGE 0x0001
#define EFX_RX_PKT_CSUMMED 0x0002 #define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004 #define EFX_RX_PKT_DISCARD 0x0004
...@@ -266,8 +260,6 @@ struct efx_rx_page_state { ...@@ -266,8 +260,6 @@ struct efx_rx_page_state {
* @min_fill: RX descriptor minimum non-zero fill level. * @min_fill: RX descriptor minimum non-zero fill level.
* This records the minimum fill level observed when a ring * This records the minimum fill level observed when a ring
* refill was triggered. * refill was triggered.
* @alloc_page_count: RX allocation strategy counter.
* @alloc_skb_count: RX allocation strategy counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event(). * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
*/ */
struct efx_rx_queue { struct efx_rx_queue {
...@@ -286,8 +278,6 @@ struct efx_rx_queue { ...@@ -286,8 +278,6 @@ struct efx_rx_queue {
unsigned int fast_fill_trigger; unsigned int fast_fill_trigger;
unsigned int min_fill; unsigned int min_fill;
unsigned int min_overfill; unsigned int min_overfill;
unsigned int alloc_page_count;
unsigned int alloc_skb_count;
struct timer_list slow_fill; struct timer_list slow_fill;
unsigned int slow_fill_count; unsigned int slow_fill_count;
}; };
...@@ -336,10 +326,6 @@ enum efx_rx_alloc_method { ...@@ -336,10 +326,6 @@ enum efx_rx_alloc_method {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision * @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score * @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
* and diagnostic counters
* @rx_alloc_push_pages: RX allocation method currently in use for pushing
* descriptors
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
...@@ -371,9 +357,6 @@ struct efx_channel { ...@@ -371,9 +357,6 @@ struct efx_channel {
unsigned int rfs_filters_added; unsigned int rfs_filters_added;
#endif #endif
int rx_alloc_level;
int rx_alloc_push_pages;
unsigned n_rx_tobe_disc; unsigned n_rx_tobe_disc;
unsigned n_rx_ip_hdr_chksum_err; unsigned n_rx_ip_hdr_chksum_err;
unsigned n_rx_tcp_udp_chksum_err; unsigned n_rx_tcp_udp_chksum_err;
......
...@@ -1000,7 +1000,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) ...@@ -1000,7 +1000,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* Correct version? */ /* Correct version? */
if (ptp->mode == MC_CMD_PTP_MODE_V1) { if (ptp->mode == MC_CMD_PTP_MODE_V1) {
if (skb->len < PTP_V1_MIN_LENGTH) { if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
return false; return false;
} }
version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
...@@ -1014,7 +1014,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) ...@@ -1014,7 +1014,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
match_data_012 = skb->data + PTP_V1_UUID_OFFSET; match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3; match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
} else { } else {
if (skb->len < PTP_V2_MIN_LENGTH) { if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
return false; return false;
} }
version = skb->data[PTP_V2_VERSION_OFFSET]; version = skb->data[PTP_V2_VERSION_OFFSET];
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment