Commit 1ee910d7 authored by Anton Blanchard's avatar Anton Blanchard Committed by David S. Miller

[NET]: Allow IP header alignment to be overriden.

Author: Anton Blanchard <anton@samba.org>
Signed-off-by: default avatarDavid S. Miller <davem@redhat.com>
parent 335769bc
...@@ -1004,11 +1004,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) ...@@ -1004,11 +1004,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
struct sk_buff *skb; struct sk_buff *skb;
if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + 2, GFP_KERNEL))) { if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
GFP_KERNEL))) {
ret_val = 6; ret_val = 6;
goto err_nomem; goto err_nomem;
} }
skb_reserve(skb, 2); skb_reserve(skb, NET_IP_ALIGN);
rxdr->buffer_info[i].skb = skb; rxdr->buffer_info[i].skb = skb;
rxdr->buffer_info[i].length = E1000_RXBUFFER_2048; rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
rxdr->buffer_info[i].dma = rxdr->buffer_info[i].dma =
......
...@@ -2367,7 +2367,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) ...@@ -2367,7 +2367,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
struct e1000_rx_desc *rx_desc; struct e1000_rx_desc *rx_desc;
struct e1000_buffer *buffer_info; struct e1000_buffer *buffer_info;
struct sk_buff *skb; struct sk_buff *skb;
int reserve_len = 2;
unsigned int i; unsigned int i;
i = rx_ring->next_to_use; i = rx_ring->next_to_use;
...@@ -2376,7 +2375,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) ...@@ -2376,7 +2375,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
while(!buffer_info->skb) { while(!buffer_info->skb) {
rx_desc = E1000_RX_DESC(*rx_ring, i); rx_desc = E1000_RX_DESC(*rx_ring, i);
skb = dev_alloc_skb(adapter->rx_buffer_len + reserve_len); skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
if(!skb) { if(!skb) {
/* Better luck next round */ /* Better luck next round */
...@@ -2387,7 +2386,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) ...@@ -2387,7 +2386,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
* this will result in a 16 byte aligned IP header after * this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed * the 14 byte MAC header is removed
*/ */
skb_reserve(skb, reserve_len); skb_reserve(skb, NET_IP_ALIGN);
skb->dev = netdev; skb->dev = netdev;
......
...@@ -1876,7 +1876,6 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) ...@@ -1876,7 +1876,6 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
struct ixgb_rx_desc *rx_desc; struct ixgb_rx_desc *rx_desc;
struct ixgb_buffer *buffer_info; struct ixgb_buffer *buffer_info;
struct sk_buff *skb; struct sk_buff *skb;
int reserve_len = 2;
unsigned int i; unsigned int i;
int num_group_tail_writes; int num_group_tail_writes;
long cleancount; long cleancount;
...@@ -1895,7 +1894,7 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) ...@@ -1895,7 +1894,7 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
while (--cleancount > 0) { while (--cleancount > 0) {
rx_desc = IXGB_RX_DESC(*rx_ring, i); rx_desc = IXGB_RX_DESC(*rx_ring, i);
skb = dev_alloc_skb(adapter->rx_buffer_len + reserve_len); skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
if (unlikely(!skb)) { if (unlikely(!skb)) {
/* Better luck next round */ /* Better luck next round */
...@@ -1906,7 +1905,7 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) ...@@ -1906,7 +1905,7 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
* this will result in a 16 byte aligned IP header after * this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed * the 14 byte MAC header is removed
*/ */
skb_reserve(skb, reserve_len); skb_reserve(skb, NET_IP_ALIGN);
skb->dev = netdev; skb->dev = netdev;
......
...@@ -1425,13 +1425,13 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no) ...@@ -1425,13 +1425,13 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
goto end; goto end;
} }
skb = dev_alloc_skb(size + HEADER_ALIGN_LAYER_3); skb = dev_alloc_skb(size + NET_IP_ALIGN);
if (!skb) { if (!skb) {
DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
return -ENOMEM; return -ENOMEM;
} }
skb_reserve(skb, HEADER_ALIGN_LAYER_3); skb_reserve(skb, NET_IP_ALIGN);
memset(rxdp, 0, sizeof(RxD_t)); memset(rxdp, 0, sizeof(RxD_t));
rxdp->Buffer0_ptr = pci_map_single rxdp->Buffer0_ptr = pci_map_single
(nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE); (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
......
...@@ -411,7 +411,6 @@ struct config_param { ...@@ -411,7 +411,6 @@ struct config_param {
#define HEADER_802_2_SIZE 3 #define HEADER_802_2_SIZE 3
#define HEADER_SNAP_SIZE 5 #define HEADER_SNAP_SIZE 5
#define HEADER_VLAN_SIZE 4 #define HEADER_VLAN_SIZE 4
#define HEADER_ALIGN_LAYER_3 2
#define MIN_MTU 46 #define MIN_MTU 46
#define MAX_PYLD 1500 #define MAX_PYLD 1500
......
...@@ -277,5 +277,14 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -277,5 +277,14 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
(unsigned long)_n_, sizeof(*(ptr))); \ (unsigned long)_n_, sizeof(*(ptr))); \
}) })
/*
* We handle most unaligned accesses in hardware. On the other hand
* unaligned DMA can be very expensive on some ppc64 IO chips (it does
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
*/
#define NET_IP_ALIGN 0
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -816,6 +816,30 @@ static inline void skb_reserve(struct sk_buff *skb, unsigned int len) ...@@ -816,6 +816,30 @@ static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
skb->tail += len; skb->tail += len;
} }
/*
* CPUs often take a performance hit when accessing unaligned memory
* locations. The actual performance hit varies, it can be small if the
* hardware handles it or large if we have to take an exception and fix it
* in software.
*
* Since an ethernet header is 14 bytes network drivers often end up with
* the IP header at an unaligned offset. The IP header can be aligned by
* shifting the start of the packet by 2 bytes. Drivers should do this
* with:
*
* skb_reserve(NET_IP_ALIGN);
*
* The downside to this alignment of the IP header is that the DMA is now
* unaligned. On some architectures the cost of an unaligned DMA is high
* and this cost outweighs the gains made by aligning the IP header.
*
* Since this trade off varies between architectures, we allow NET_IP_ALIGN
* to be overridden.
*/
#ifndef NET_IP_ALIGN
#define NET_IP_ALIGN 2
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
static inline void __skb_trim(struct sk_buff *skb, unsigned int len) static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment