Commit fa663c09 authored by Jian Shen's avatar Jian Shen Committed by David S. Miller

net: hns3: split out hclge_get_fd_rule_info()

hclge_get_fd_rule_info() is bloated, this patch separates
it into several standalone functions for readability and
maintainability.
Signed-off-by: default avatarJian Shen <shenjian15@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 736fc0e1
...@@ -5938,178 +5938,131 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, ...@@ -5938,178 +5938,131 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
return 0; return 0;
} }
static int hclge_get_fd_rule_info(struct hnae3_handle *handle, static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
struct ethtool_rxnfc *cmd) struct ethtool_tcpip4_spec *spec,
struct ethtool_tcpip4_spec *spec_mask)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
struct hclge_fd_rule *rule = NULL; spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
struct hlist_node *node2;
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= fs->location)
break;
}
if (!rule || fs->location != rule->location) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -ENOENT;
}
fs->flow_type = rule->flow_type;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
fs->h_u.tcp_ip4_spec.ip4src =
cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4src =
rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.ip4dst = spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
fs->m_u.tcp_ip4_spec.ip4dst =
rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); spec->psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip4_spec.psrc = spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
rule->unused_tuple & BIT(INNER_SRC_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.src_port); 0 : cpu_to_be16(rule->tuples_mask.src_port);
fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); spec->pdst = cpu_to_be16(rule->tuples.dst_port);
fs->m_u.tcp_ip4_spec.pdst = spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
rule->unused_tuple & BIT(INNER_DST_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.dst_port); 0 : cpu_to_be16(rule->tuples_mask.dst_port);
fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; spec->tos = rule->tuples.ip_tos;
fs->m_u.tcp_ip4_spec.tos = spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
rule->unused_tuple & BIT(INNER_IP_TOS) ?
0 : rule->tuples_mask.ip_tos; 0 : rule->tuples_mask.ip_tos;
}
break; static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
case IP_USER_FLOW: struct ethtool_usrip4_spec *spec,
fs->h_u.usr_ip4_spec.ip4src = struct ethtool_usrip4_spec *spec_mask)
cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); {
fs->m_u.tcp_ip4_spec.ip4src = spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
rule->unused_tuple & BIT(INNER_SRC_IP) ? spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.ip4dst = spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
fs->m_u.usr_ip4_spec.ip4dst =
rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; spec->tos = rule->tuples.ip_tos;
fs->m_u.usr_ip4_spec.tos = spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
rule->unused_tuple & BIT(INNER_IP_TOS) ?
0 : rule->tuples_mask.ip_tos; 0 : rule->tuples_mask.ip_tos;
fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; spec->proto = rule->tuples.ip_proto;
fs->m_u.usr_ip4_spec.proto = spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
rule->unused_tuple & BIT(INNER_IP_PROTO) ?
0 : rule->tuples_mask.ip_proto; 0 : rule->tuples_mask.ip_proto;
fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; spec->ip_ver = ETH_RX_NFC_IP4;
}
break; static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
case SCTP_V6_FLOW: struct ethtool_tcpip6_spec *spec,
case TCP_V6_FLOW: struct ethtool_tcpip6_spec *spec_mask)
case UDP_V6_FLOW: {
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, cpu_to_be32_array(spec->ip6src,
rule->tuples.src_ip, IPV6_SIZE); rule->tuples.src_ip, IPV6_SIZE);
cpu_to_be32_array(spec->ip6dst,
rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP)) if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(fs->m_u.tcp_ip6_spec.ip6src, 0, memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
sizeof(int) * IPV6_SIZE);
else else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
rule->tuples_mask.src_ip, IPV6_SIZE); IPV6_SIZE);
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP)) if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
sizeof(int) * IPV6_SIZE);
else else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
rule->tuples_mask.dst_ip, IPV6_SIZE); IPV6_SIZE);
fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); spec->psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip6_spec.psrc = spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
rule->unused_tuple & BIT(INNER_SRC_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.src_port); 0 : cpu_to_be16(rule->tuples_mask.src_port);
fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); spec->pdst = cpu_to_be16(rule->tuples.dst_port);
fs->m_u.tcp_ip6_spec.pdst = spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
rule->unused_tuple & BIT(INNER_DST_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.dst_port); 0 : cpu_to_be16(rule->tuples_mask.dst_port);
}
break; static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
case IPV6_USER_FLOW: struct ethtool_usrip6_spec *spec,
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, struct ethtool_usrip6_spec *spec_mask)
rule->tuples.src_ip, IPV6_SIZE); {
cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP)) if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(fs->m_u.usr_ip6_spec.ip6src, 0, memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
sizeof(int) * IPV6_SIZE);
else else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, cpu_to_be32_array(spec_mask->ip6src,
rule->tuples_mask.src_ip, IPV6_SIZE); rule->tuples_mask.src_ip, IPV6_SIZE);
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP)) if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(fs->m_u.usr_ip6_spec.ip6dst, 0, memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
sizeof(int) * IPV6_SIZE);
else else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, cpu_to_be32_array(spec_mask->ip6dst,
rule->tuples_mask.dst_ip, IPV6_SIZE); rule->tuples_mask.dst_ip, IPV6_SIZE);
fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; spec->l4_proto = rule->tuples.ip_proto;
fs->m_u.usr_ip6_spec.l4_proto = spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
rule->unused_tuple & BIT(INNER_IP_PROTO) ?
0 : rule->tuples_mask.ip_proto; 0 : rule->tuples_mask.ip_proto;
}
static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
struct ethhdr *spec,
struct ethhdr *spec_mask)
{
ether_addr_copy(spec->h_source, rule->tuples.src_mac);
ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
break;
case ETHER_FLOW:
ether_addr_copy(fs->h_u.ether_spec.h_source,
rule->tuples.src_mac);
if (rule->unused_tuple & BIT(INNER_SRC_MAC)) if (rule->unused_tuple & BIT(INNER_SRC_MAC))
eth_zero_addr(fs->m_u.ether_spec.h_source); eth_zero_addr(spec_mask->h_source);
else else
ether_addr_copy(fs->m_u.ether_spec.h_source, ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
rule->tuples_mask.src_mac);
ether_addr_copy(fs->h_u.ether_spec.h_dest,
rule->tuples.dst_mac);
if (rule->unused_tuple & BIT(INNER_DST_MAC)) if (rule->unused_tuple & BIT(INNER_DST_MAC))
eth_zero_addr(fs->m_u.ether_spec.h_dest); eth_zero_addr(spec_mask->h_dest);
else else
ether_addr_copy(fs->m_u.ether_spec.h_dest, ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
rule->tuples_mask.dst_mac);
fs->h_u.ether_spec.h_proto = spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
cpu_to_be16(rule->tuples.ether_proto); spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
fs->m_u.ether_spec.h_proto =
rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
0 : cpu_to_be16(rule->tuples_mask.ether_proto); 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
}
break; static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
default: struct hclge_fd_rule *rule)
spin_unlock_bh(&hdev->fd_rule_lock); {
return -EOPNOTSUPP;
}
if (fs->flow_type & FLOW_EXT) { if (fs->flow_type & FLOW_EXT) {
fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
fs->m_ext.vlan_tci = fs->m_ext.vlan_tci =
...@@ -6126,6 +6079,68 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, ...@@ -6126,6 +6079,68 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
ether_addr_copy(fs->m_u.ether_spec.h_dest, ether_addr_copy(fs->m_u.ether_spec.h_dest,
rule->tuples_mask.dst_mac); rule->tuples_mask.dst_mac);
} }
}
static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_fd_rule *rule = NULL;
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
struct hlist_node *node2;
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= fs->location)
break;
}
if (!rule || fs->location != rule->location) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -ENOENT;
}
fs->flow_type = rule->flow_type;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
&fs->m_u.tcp_ip4_spec);
break;
case IP_USER_FLOW:
hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
&fs->m_u.usr_ip4_spec);
break;
case SCTP_V6_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
&fs->m_u.tcp_ip6_spec);
break;
case IPV6_USER_FLOW:
hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
&fs->m_u.usr_ip6_spec);
break;
/* The flow type of fd rule has been checked before adding in to rule
* list. As other flow types have been handled, it must be ETHER_FLOW
* for the default case
*/
default:
hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
&fs->m_u.ether_spec);
break;
}
hclge_fd_get_ext_info(fs, rule);
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
fs->ring_cookie = RX_CLS_FLOW_DISC; fs->ring_cookie = RX_CLS_FLOW_DISC;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment