Commit 0bdadad1 authored by Ben Hutchings's avatar Ben Hutchings Committed by David S. Miller

sfc: Replace TSOH_OFFSET with the equivalent NET_IP_ALIGN

If CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined then NET_IP_ALIGN
will be defined as 0, so this macro is redundant.
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
Signed-off-by: default avatarShradha Shah <sshah@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 92d8f766
...@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* Requires TX checksum offload support. * Requires TX checksum offload support.
*/ */
/* Number of bytes inserted at the start of a TSO header buffer,
* similar to NET_IP_ALIGN.
*/
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#define TSOH_OFFSET 0
#else
#define TSOH_OFFSET NET_IP_ALIGN
#endif
#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
/** /**
...@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, ...@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(buffer->flags); EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->unmap_len);
if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
unsigned index = unsigned index =
(tx_queue->insert_count & tx_queue->ptr_mask) / 2; (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
struct efx_buffer *page_buf = struct efx_buffer *page_buf =
&tx_queue->tsoh_page[index / TSOH_PER_PAGE]; &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
unsigned offset = unsigned offset =
TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
if (unlikely(!page_buf->addr) && if (unlikely(!page_buf->addr) &&
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
...@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, ...@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
} else { } else {
tx_queue->tso_long_headers++; tx_queue->tso_long_headers++;
buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
if (unlikely(!buffer->heap_buf)) if (unlikely(!buffer->heap_buf))
return NULL; return NULL;
result = (u8 *)buffer->heap_buf + TSOH_OFFSET; result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment