Commit b909cfc8 authored by Anjali Singhai Jain's avatar Anjali Singhai Jain Committed by Greg Kroah-Hartman

i40e: Fix Rx hash reported to the stack by our driver

[ Upstream commit 857942fd ]

If the driver calls skb_set_hash even with a zero hash, that
indicates to the stack that the hash calculation is offloaded
in hardware. So the Stack doesn't do a SW hash which is required
for load balancing if the user decides to turn of rx-hashing
on our device.

This patch fixes the path so that we do not call skb_set_hash
if the feature is disabled.

Change-ID: Ic4debfa4ff91b5a72e447348a75768ed7a2d3e1b
Signed-off-by: default avatarAnjali Singhai Jain <anjali.singhai@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 42622b1b
...@@ -1425,31 +1425,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1425,31 +1425,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
} }
/** /**
* i40e_rx_hash - returns the hash value from the Rx descriptor * i40e_ptype_to_htype - get a hash type
* @ring: descriptor ring
* @rx_desc: specific descriptor
**/
static inline u32 i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc)
{
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if ((ring->netdev->features & NETIF_F_RXHASH) &&
(rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
else
return 0;
}
/**
* i40e_ptype_to_hash - get a hash type
* @ptype: the ptype value from the descriptor * @ptype: the ptype value from the descriptor
* *
* Returns a hash type to be used by skb_set_hash * Returns a hash type to be used by skb_set_hash
**/ **/
static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
{ {
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
...@@ -1466,6 +1447,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) ...@@ -1466,6 +1447,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
return PKT_HASH_TYPE_L2; return PKT_HASH_TYPE_L2;
} }
/**
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
**/
static inline void i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb,
u8 rx_ptype)
{
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if (ring->netdev->features & NETIF_F_RXHASH)
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
}
}
/** /**
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean * @rx_ring: rx ring to clean
...@@ -1615,8 +1620,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1615,8 +1620,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
continue; continue;
} }
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
i40e_ptype_to_hash(rx_ptype));
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
...@@ -1745,8 +1750,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1745,8 +1750,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
continue; continue;
} }
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
i40e_ptype_to_hash(rx_ptype));
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
......
...@@ -879,31 +879,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -879,31 +879,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
} }
/** /**
* i40e_rx_hash - returns the hash value from the Rx descriptor * i40e_ptype_to_htype - get a hash type
* @ring: descriptor ring
* @rx_desc: specific descriptor
**/
static inline u32 i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc)
{
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if ((ring->netdev->features & NETIF_F_RXHASH) &&
(rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
else
return 0;
}
/**
* i40e_ptype_to_hash - get a hash type
* @ptype: the ptype value from the descriptor * @ptype: the ptype value from the descriptor
* *
* Returns a hash type to be used by skb_set_hash * Returns a hash type to be used by skb_set_hash
**/ **/
static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
{ {
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
...@@ -920,6 +901,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) ...@@ -920,6 +901,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
return PKT_HASH_TYPE_L2; return PKT_HASH_TYPE_L2;
} }
/**
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
**/
static inline void i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb,
u8 rx_ptype)
{
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if (ring->netdev->features & NETIF_F_RXHASH)
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
}
}
/** /**
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean * @rx_ring: rx ring to clean
...@@ -1061,8 +1066,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1061,8 +1066,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
continue; continue;
} }
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
i40e_ptype_to_hash(rx_ptype));
/* probably a little skewed due to removing CRC */ /* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len; total_rx_bytes += skb->len;
total_rx_packets++; total_rx_packets++;
...@@ -1179,8 +1184,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1179,8 +1184,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
continue; continue;
} }
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
i40e_ptype_to_hash(rx_ptype));
/* probably a little skewed due to removing CRC */ /* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len; total_rx_bytes += skb->len;
total_rx_packets++; total_rx_packets++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment