Commit 5f6e9800 authored by Ido Shamay's avatar Ido Shamay Committed by David S. Miller

net/mlx4_en: Remove RX buffers alignment to IP_ALIGN

When IP_ALIGN has a non zero value, hardware will write to a non aligned
address. The only reader from this address is when copying the header
from the first frag into the linear buffer (further access to the IP
address will be from the linear buffer, in which the headers are
aligned). Since the penalty of non align access by the hardware is
greater than the software memcpy, changing the frag_align to always be 0.
Signed-off-by: default avatarIdo Shamay <idos@mellanox.com>
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0a984556
...@@ -74,7 +74,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, ...@@ -74,7 +74,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
page_alloc->page_size = PAGE_SIZE << order; page_alloc->page_size = PAGE_SIZE << order;
page_alloc->page = page; page_alloc->page = page;
page_alloc->dma = dma; page_alloc->dma = dma;
page_alloc->page_offset = frag_info->frag_align; page_alloc->page_offset = 0;
/* Not doing get_page() for each frag is a big win /* Not doing get_page() for each frag is a big win
* on asymetric workloads. Note we can not use atomic_set(). * on asymetric workloads. Note we can not use atomic_set().
*/ */
...@@ -945,15 +945,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) ...@@ -945,15 +945,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
(eff_mtu > buf_size + frag_sizes[i]) ? (eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size; frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size; priv->frag_info[i].frag_prefix_size = buf_size;
if (!i) { priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
priv->frag_info[i].frag_align = NET_IP_ALIGN; SMP_CACHE_BYTES);
priv->frag_info[i].frag_stride =
ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
} else {
priv->frag_info[i].frag_align = 0;
priv->frag_info[i].frag_stride =
ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
}
buf_size += priv->frag_info[i].frag_size; buf_size += priv->frag_info[i].frag_size;
i++; i++;
} }
...@@ -966,11 +959,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) ...@@ -966,11 +959,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
eff_mtu, priv->num_frags); eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) { for (i = 0; i < priv->num_frags; i++) {
en_err(priv, en_err(priv,
" frag:%d - size:%d prefix:%d align:%d stride:%d\n", " frag:%d - size:%d prefix:%d stride:%d\n",
i, i,
priv->frag_info[i].frag_size, priv->frag_info[i].frag_size,
priv->frag_info[i].frag_prefix_size, priv->frag_info[i].frag_prefix_size,
priv->frag_info[i].frag_align,
priv->frag_info[i].frag_stride); priv->frag_info[i].frag_stride);
} }
} }
......
...@@ -481,7 +481,6 @@ struct mlx4_en_frag_info { ...@@ -481,7 +481,6 @@ struct mlx4_en_frag_info {
u16 frag_size; u16 frag_size;
u16 frag_prefix_size; u16 frag_prefix_size;
u16 frag_stride; u16 frag_stride;
u16 frag_align;
}; };
#ifdef CONFIG_MLX4_EN_DCB #ifdef CONFIG_MLX4_EN_DCB
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment