Commit 45b9f509 authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

ixgbe: update ntuple filter configuration

This change fixes several issues found in ntuple filtering while I was
doing the ATR refactor.

Specifically I updated the masks to work correctly with the latest version
of ethtool, I cleaned up the exception handling and added detailed error
output when a filter is rejected, and corrected several bits that were set
incorrectly in ixgbe_type.h.

The previous version of this patch included a printk that was left over from
me fixing the filter setup.  This patch does not include that printk.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarStephen Ko <stephen.s.ko@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 69830529
......@@ -533,20 +533,6 @@ extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue);
extern s32 ixgbe_atr_set_vlan_id_82599(union ixgbe_atr_input *input,
u16 vlan_id);
extern s32 ixgbe_atr_set_src_ipv4_82599(union ixgbe_atr_input *input,
u32 src_addr);
extern s32 ixgbe_atr_set_dst_ipv4_82599(union ixgbe_atr_input *input,
u32 dst_addr);
extern s32 ixgbe_atr_set_src_port_82599(union ixgbe_atr_input *input,
u16 src_port);
extern s32 ixgbe_atr_set_dst_port_82599(union ixgbe_atr_input *input,
u16 dst_port);
extern s32 ixgbe_atr_set_flex_byte_82599(union ixgbe_atr_input *input,
u16 flex_byte);
extern s32 ixgbe_atr_set_l4type_82599(union ixgbe_atr_input *input,
u8 l4type);
extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring);
extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
......
This diff is collapsed.
......@@ -2277,10 +2277,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
struct ethtool_rx_ntuple *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
union ixgbe_atr_input input_struct;
struct ixgbe_atr_input_masks input_masks;
int target_queue;
int err;
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
return -EOPNOTSUPP;
......@@ -2289,67 +2290,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
* Don't allow programming if the action is a queue greater than
* the number of online Tx queues.
*/
if ((fs.action >= adapter->num_tx_queues) ||
(fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
if ((fs->action >= adapter->num_tx_queues) ||
(fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
return -EINVAL;
memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
input_masks.vlan_id_mask = fs.vlan_tag_mask;
/* only use the lowest 2 bytes for flex bytes */
input_masks.data_mask = (fs.data_mask & 0xffff);
switch (fs.flow_type) {
/* record flow type */
switch (fs->flow_type) {
case IPV4_FLOW:
input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
break;
case TCP_V4_FLOW:
ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case UDP_V4_FLOW:
ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case SCTP_V4_FLOW:
ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
default:
return -1;
}
/* Mask bits from the inputs based on user-supplied mask */
ixgbe_atr_set_src_ipv4_82599(&input_struct,
(fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
ixgbe_atr_set_dst_ipv4_82599(&input_struct,
(fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
/* 82599 expects these to be byte-swapped for perfect filtering */
ixgbe_atr_set_src_port_82599(&input_struct,
((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
ixgbe_atr_set_dst_port_82599(&input_struct,
((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
/* VLAN and Flex bytes are either completely masked or not */
if (!fs.vlan_tag_mask)
ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
if (!input_masks.data_mask)
/* make sure we only use the first 2 bytes of user data */
ixgbe_atr_set_flex_byte_82599(&input_struct,
(fs.data & 0xffff));
/* copy vlan tag minus the CFI bit */
if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
if (!fs->vlan_tag_mask) {
input_masks.vlan_id_mask = htons(0xEFFF);
} else {
switch (~fs->vlan_tag_mask & 0xEFFF) {
/* all of these are valid vlan-mask values */
case 0xEFFF:
case 0xE000:
case 0x0FFF:
case 0x0000:
input_masks.vlan_id_mask =
htons(~fs->vlan_tag_mask);
break;
/* exit with error if vlan-mask is invalid */
default:
e_err(drv, "Partial VLAN ID or "
"priority mask in vlan-mask is not "
"supported by hardware\n");
return -1;
}
}
}
/* make sure we only use the first 2 bytes of user data */
if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
if (!(fs->data_mask & 0xFFFF)) {
input_masks.flex_mask = 0xFFFF;
} else if (~fs->data_mask & 0xFFFF) {
e_err(drv, "Partial user-def-mask is not "
"supported by hardware\n");
return -1;
}
}
/*
* Copy input into formatted structures
*
* These assignments are based on the following logic
* If neither input or mask are set assume value is masked out.
* If input is set, but mask is not mask should default to accept all.
* If input is not set, but mask is set then mask likely results in 0.
* If input is set and mask is set then assign both.
*/
if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
if (!fs->m_u.tcp_ip4_spec.ip4src)
input_masks.src_ip_mask[0] = 0xFFFFFFFF;
else
input_masks.src_ip_mask[0] =
~fs->m_u.tcp_ip4_spec.ip4src;
}
if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
if (!fs->m_u.tcp_ip4_spec.ip4dst)
input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
else
input_masks.dst_ip_mask[0] =
~fs->m_u.tcp_ip4_spec.ip4dst;
}
if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
if (!fs->m_u.tcp_ip4_spec.psrc)
input_masks.src_port_mask = 0xFFFF;
else
input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
}
if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
if (!fs->m_u.tcp_ip4_spec.pdst)
input_masks.dst_port_mask = 0xFFFF;
else
input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
}
/* determine if we need to drop or route the packet */
if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
target_queue = MAX_RX_QUEUES - 1;
else
target_queue = fs.action;
target_queue = fs->action;
spin_lock(&adapter->fdir_perfect_lock);
ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
&input_masks, 0, target_queue);
err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
&input_struct,
&input_masks, 0,
target_queue);
spin_unlock(&adapter->fdir_perfect_lock);
return 0;
return err ? -1 : 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
......
......@@ -4821,6 +4821,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
e_err(probe,
"Flow Director is not supported while multiple "
"queues are disabled. Disabling Flow Director\n");
}
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
......@@ -5126,16 +5132,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
if (dev->features & NETIF_F_NTUPLE) {
/* Flow Director perfect filter enabled */
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
spin_lock_init(&adapter->fdir_perfect_lock);
} else {
/* Flow Director hash filters enabled */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
}
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
/* Flow Director hash filters enabled */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = 0;
......
......@@ -1947,10 +1947,9 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRM_VLANID 0x00000001
#define IXGBE_FDIRM_VLANP 0x00000002
#define IXGBE_FDIRM_POOL 0x00000004
#define IXGBE_FDIRM_L3P 0x00000008
#define IXGBE_FDIRM_L4P 0x00000010
#define IXGBE_FDIRM_FLEX 0x00000020
#define IXGBE_FDIRM_DIPv6 0x00000040
#define IXGBE_FDIRM_L4P 0x00000008
#define IXGBE_FDIRM_FLEX 0x00000010
#define IXGBE_FDIRM_DIPv6 0x00000020
#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
#define IXGBE_FDIRFREE_FREE_SHIFT 0
......@@ -2215,12 +2214,13 @@ union ixgbe_atr_hash_dword {
};
struct ixgbe_atr_input_masks {
__be32 src_ip_mask;
__be32 dst_ip_mask;
__be16 rsvd0;
__be16 vlan_id_mask;
__be32 dst_ip_mask[4];
__be32 src_ip_mask[4];
__be16 src_port_mask;
__be16 dst_port_mask;
__be16 vlan_id_mask;
__be16 data_mask;
__be16 flex_mask;
};
enum ixgbe_eeprom_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment