Commit 8fb0d2ce authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-ipv6-tunnel'

John Hurley says:

====================
Add ipv6 tunnel support to NFP

The following patches add support for IPv6 tunnel offload to the NFP
driver.

Patches 1-2 do some code tidy up and prepare existing code for reuse in
IPv6 tunnels.
Patches 3-4 handle IPv6 tunnel decap (match) rules.
Patches 5-8 handle encap (action) rules.
Patch 9 adds IPv6 support to the merge and pre-tunnel rule functions.

v1->v2:
- fix compiler warning when building without CONFIG_IPV6 set -
  Jakub Kicinski (patch 7)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a886ca6f 78346160
...@@ -22,8 +22,9 @@ ...@@ -22,8 +22,9 @@
#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ IP_TUNNEL_INFO_IPV6)
#define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
NFP_FL_TUNNEL_KEY | \ NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT) NFP_FL_TUNNEL_GENEVE_OPT)
...@@ -394,19 +395,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, ...@@ -394,19 +395,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
} }
static int static int
nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun, nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
const struct flow_action_entry *act, const struct flow_action_entry *act,
struct nfp_fl_pre_tunnel *pre_tun, struct nfp_fl_pre_tunnel *pre_tun,
enum nfp_flower_tun_type tun_type, enum nfp_flower_tun_type tun_type,
struct net_device *netdev, struct netlink_ext_ack *extack) struct net_device *netdev, struct netlink_ext_ack *extack)
{ {
size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
const struct ip_tunnel_info *ip_tun = act->tunnel; const struct ip_tunnel_info *ip_tun = act->tunnel;
bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6;
size_t act_size = sizeof(struct nfp_fl_set_tun);
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0; u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */ /* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0; int pretun_idx = 0;
if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
return -EOPNOTSUPP;
if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
return -EOPNOTSUPP;
BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM || BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
NFP_FL_TUNNEL_KEY != TUNNEL_KEY || NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT); NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
...@@ -417,19 +425,35 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun, ...@@ -417,19 +425,35 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */ /* Set tunnel type and pre-tunnel index. */
tmp_set_ip_tun_type_index |= tmp_set_ip_tun_type_index |=
FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) | FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) |
FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx); FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id; set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) { if (ip_tun->key.ttl) {
set_tun->ttl = ip_tun->key.ttl; set_tun->ttl = ip_tun->key.ttl;
#ifdef CONFIG_IPV6
} else if (ipv6) {
struct net *net = dev_net(netdev);
struct flowi6 flow = {};
struct dst_entry *dst;
flow.daddr = ip_tun->key.u.ipv6.dst;
flow.flowi4_proto = IPPROTO_UDP;
dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL);
if (!IS_ERR(dst)) {
set_tun->ttl = ip6_dst_hoplimit(dst);
dst_release(dst);
} else {
set_tun->ttl = net->ipv6.devconf_all->hop_limit;
}
#endif
} else { } else {
struct net *net = dev_net(netdev); struct net *net = dev_net(netdev);
struct flowi4 flow = {}; struct flowi4 flow = {};
...@@ -455,7 +479,7 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun, ...@@ -455,7 +479,7 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
set_tun->tos = ip_tun->key.tos; set_tun->tos = ip_tun->key.tos;
if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) || if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) { ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -467,7 +491,12 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun, ...@@ -467,7 +491,12 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
} }
/* Complete pre_tunnel action. */ /* Complete pre_tunnel action. */
pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; if (ipv6) {
pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6);
pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst;
} else {
pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
}
return 0; return 0;
} }
...@@ -956,8 +985,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, ...@@ -956,8 +985,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host, struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx) struct netlink_ext_ack *extack, int act_idx)
{ {
struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun; struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v; struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_push_mpls *psh_m; struct nfp_fl_push_mpls *psh_m;
struct nfp_fl_pop_vlan *pop_v; struct nfp_fl_pop_vlan *pop_v;
...@@ -1032,7 +1061,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, ...@@ -1032,7 +1061,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
* If none, the packet falls back before applying other actions. * If none, the packet falls back before applying other actions.
*/ */
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) + if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) { sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1046,11 +1075,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, ...@@ -1046,11 +1075,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return err; return err;
set_tun = (void *)&nfp_fl->action_data[*a_len]; set_tun = (void *)&nfp_fl->action_data[*a_len];
err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun, err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type,
*tun_type, netdev, extack); netdev, extack);
if (err) if (err)
return err; return err;
*a_len += sizeof(struct nfp_fl_set_ipv4_tun); *a_len += sizeof(struct nfp_fl_set_tun);
} }
break; break;
case FLOW_ACTION_TUNNEL_DECAP: case FLOW_ACTION_TUNNEL_DECAP:
......
...@@ -270,11 +270,17 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -270,11 +270,17 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
} }
goto err_default; goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb); nfp_tunnel_request_route_v4(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6:
nfp_tunnel_request_route_v6(app, skb);
break; break;
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb); nfp_tunnel_keep_alive(app, skb);
break; break;
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6:
nfp_tunnel_keep_alive_v6(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_QOS_STATS: case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
nfp_flower_stats_rlim_reply(app, skb); nfp_flower_stats_rlim_reply(app, skb);
break; break;
...@@ -361,7 +367,8 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -361,7 +367,8 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_process_mtu_ack(app, skb)) { nfp_flower_process_mtu_ack(app, skb)) {
/* Handle MTU acks outside wq to prevent RTNL conflict. */ /* Handle MTU acks outside wq to prevent RTNL conflict. */
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6) {
/* Acks from the NFP that the route is added - ignore. */ /* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) { } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define NFP_FLOWER_LAYER2_GRE BIT(0) #define NFP_FLOWER_LAYER2_GRE BIT(0)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5) #define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_LAYER2_TUN_IPV6 BIT(7)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12) #define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
...@@ -63,6 +64,7 @@ ...@@ -63,6 +64,7 @@
#define NFP_FL_MAX_GENEVE_OPT_ACT 32 #define NFP_FL_MAX_GENEVE_OPT_ACT 32
#define NFP_FL_MAX_GENEVE_OPT_CNT 64 #define NFP_FL_MAX_GENEVE_OPT_CNT 64
#define NFP_FL_MAX_GENEVE_OPT_KEY 32 #define NFP_FL_MAX_GENEVE_OPT_KEY 32
#define NFP_FL_MAX_GENEVE_OPT_KEY_V6 8
/* Action opcodes */ /* Action opcodes */
#define NFP_FL_ACTION_OPCODE_OUTPUT 0 #define NFP_FL_ACTION_OPCODE_OUTPUT 0
...@@ -70,7 +72,7 @@ ...@@ -70,7 +72,7 @@
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2 #define NFP_FL_ACTION_OPCODE_POP_VLAN 2
#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3 #define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
#define NFP_FL_ACTION_OPCODE_POP_MPLS 4 #define NFP_FL_ACTION_OPCODE_POP_MPLS 4
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 #define NFP_FL_ACTION_OPCODE_SET_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 #define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
#define NFP_FL_ACTION_OPCODE_SET_MPLS 8 #define NFP_FL_ACTION_OPCODE_SET_MPLS 8
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 #define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
...@@ -99,8 +101,8 @@ ...@@ -99,8 +101,8 @@
/* Tunnel ports */ /* Tunnel ports */
#define NFP_FL_PORT_TYPE_TUN 0x50000000 #define NFP_FL_PORT_TYPE_TUN 0x50000000
#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) #define NFP_FL_TUNNEL_TYPE GENMASK(7, 4)
#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) #define NFP_FL_PRE_TUN_INDEX GENMASK(2, 0)
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000 #define NFP_FLOWER_WORKQ_MAX_SKBS 30000
...@@ -206,13 +208,16 @@ struct nfp_fl_pre_lag { ...@@ -206,13 +208,16 @@ struct nfp_fl_pre_lag {
struct nfp_fl_pre_tunnel { struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head; struct nfp_fl_act_head head;
__be16 reserved; __be16 flags;
__be32 ipv4_dst; union {
/* reserved for use with IPv6 addresses */ __be32 ipv4_dst;
__be32 extra[3]; struct in6_addr ipv6_dst;
};
}; };
struct nfp_fl_set_ipv4_tun { #define NFP_FL_PRE_TUN_IPV6 BIT(0)
struct nfp_fl_set_tun {
struct nfp_fl_act_head head; struct nfp_fl_act_head head;
__be16 reserved; __be16 reserved;
__be64 tun_id __packed; __be64 tun_id __packed;
...@@ -387,6 +392,11 @@ struct nfp_flower_tun_ipv4 { ...@@ -387,6 +392,11 @@ struct nfp_flower_tun_ipv4 {
__be32 dst; __be32 dst;
}; };
struct nfp_flower_tun_ipv6 {
struct in6_addr src;
struct in6_addr dst;
};
struct nfp_flower_tun_ip_ext { struct nfp_flower_tun_ip_ext {
u8 tos; u8 tos;
u8 ttl; u8 ttl;
...@@ -416,6 +426,42 @@ struct nfp_flower_ipv4_udp_tun { ...@@ -416,6 +426,42 @@ struct nfp_flower_ipv4_udp_tun {
__be32 tun_id; __be32 tun_id;
}; };
/* Flow Frame IPv6 UDP TUNNEL --> Tunnel details (11W/44B)
* -----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_src, 31 - 0 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_src, 63 - 32 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_src, 95 - 64 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_src, 127 - 96 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_dst, 31 - 0 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_dst, 63 - 32 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_dst, 95 - 64 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_dst, 127 - 96 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved | tos | ttl |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | VNI | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ipv6_udp_tun {
struct nfp_flower_tun_ipv6 ipv6;
__be16 reserved1;
struct nfp_flower_tun_ip_ext ip_ext;
__be32 reserved2;
__be32 tun_id;
};
/* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B) /* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B)
* ----------------------------------------------------------------- * -----------------------------------------------------------------
* 3 2 1 * 3 2 1
...@@ -445,6 +491,46 @@ struct nfp_flower_ipv4_gre_tun { ...@@ -445,6 +491,46 @@ struct nfp_flower_ipv4_gre_tun {
__be32 reserved2; __be32 reserved2;
}; };
/* Flow Frame GRE TUNNEL V6 --> Tunnel details (12W/48B)
* -----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_src, 31 - 0 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_src, 63 - 32 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_src, 95 - 64 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_src, 127 - 96 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_dst, 31 - 0 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_dst, 63 - 32 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_dst, 95 - 64 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_addr_dst, 127 - 96 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | tun_flags | tos | ttl |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved | Ethertype |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Key |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ipv6_gre_tun {
struct nfp_flower_tun_ipv6 ipv6;
__be16 tun_flags;
struct nfp_flower_tun_ip_ext ip_ext;
__be16 reserved1;
__be16 ethertype;
__be32 tun_key;
__be32 reserved2;
};
struct nfp_flower_geneve_options { struct nfp_flower_geneve_options {
u8 data[NFP_FL_MAX_GENEVE_OPT_KEY]; u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
}; };
...@@ -485,6 +571,10 @@ enum nfp_flower_cmsg_type_port { ...@@ -485,6 +571,10 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19, NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20, NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6 = 22,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6 = 23,
NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 = 24,
NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6 = 25,
NFP_FLOWER_CMSG_TYPE_MAX = 32, NFP_FLOWER_CMSG_TYPE_MAX = 32,
}; };
......
...@@ -43,6 +43,7 @@ struct nfp_app; ...@@ -43,6 +43,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VF_RLIM BIT(4) #define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5) #define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6) #define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30) #define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31) #define NFP_FL_FEATS_LAG BIT(31)
...@@ -62,18 +63,26 @@ struct nfp_fl_stats_id { ...@@ -62,18 +63,26 @@ struct nfp_fl_stats_id {
* struct nfp_fl_tunnel_offloads - priv data for tunnel offloads * struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
* @offloaded_macs: Hashtable of the offloaded MAC addresses * @offloaded_macs: Hashtable of the offloaded MAC addresses
* @ipv4_off_list: List of IPv4 addresses to offload * @ipv4_off_list: List of IPv4 addresses to offload
* @neigh_off_list: List of neighbour offloads * @ipv6_off_list: List of IPv6 addresses to offload
* @neigh_off_list_v4: List of IPv4 neighbour offloads
* @neigh_off_list_v6: List of IPv6 neighbour offloads
* @ipv4_off_lock: Lock for the IPv4 address list * @ipv4_off_lock: Lock for the IPv4 address list
* @neigh_off_lock: Lock for the neighbour address list * @ipv6_off_lock: Lock for the IPv6 address list
* @neigh_off_lock_v4: Lock for the IPv4 neighbour address list
* @neigh_off_lock_v6: Lock for the IPv6 neighbour address list
* @mac_off_ids: IDA to manage id assignment for offloaded MACs * @mac_off_ids: IDA to manage id assignment for offloaded MACs
* @neigh_nb: Notifier to monitor neighbour state * @neigh_nb: Notifier to monitor neighbour state
*/ */
struct nfp_fl_tunnel_offloads { struct nfp_fl_tunnel_offloads {
struct rhashtable offloaded_macs; struct rhashtable offloaded_macs;
struct list_head ipv4_off_list; struct list_head ipv4_off_list;
struct list_head neigh_off_list; struct list_head ipv6_off_list;
struct list_head neigh_off_list_v4;
struct list_head neigh_off_list_v6;
struct mutex ipv4_off_lock; struct mutex ipv4_off_lock;
spinlock_t neigh_off_lock; struct mutex ipv6_off_lock;
spinlock_t neigh_off_lock_v4;
spinlock_t neigh_off_lock_v6;
struct ida mac_off_ids; struct ida mac_off_ids;
struct notifier_block neigh_nb; struct notifier_block neigh_nb;
}; };
...@@ -273,12 +282,25 @@ struct nfp_fl_stats { ...@@ -273,12 +282,25 @@ struct nfp_fl_stats {
u64 used; u64 used;
}; };
/**
* struct nfp_ipv6_addr_entry - cached IPv6 addresses
* @ipv6_addr: IP address
* @ref_count: number of rules currently using this IP
* @list: list pointer
*/
struct nfp_ipv6_addr_entry {
struct in6_addr ipv6_addr;
int ref_count;
struct list_head list;
};
struct nfp_fl_payload { struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta; struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie; unsigned long tc_flower_cookie;
struct rhash_head fl_node; struct rhash_head fl_node;
struct rcu_head rcu; struct rcu_head rcu;
__be32 nfp_tun_ipv4_addr; __be32 nfp_tun_ipv4_addr;
struct nfp_ipv6_addr_entry *nfp_tun_ipv6;
struct net_device *ingress_dev; struct net_device *ingress_dev;
char *unmasked_data; char *unmasked_data;
char *mask_data; char *mask_data;
...@@ -396,8 +418,14 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app, ...@@ -396,8 +418,14 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
unsigned long event, void *ptr); unsigned long event, void *ptr);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); void
nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry);
struct nfp_ipv6_addr_entry *
nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6);
void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_flower_lag_init(struct nfp_fl_lag *lag); void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag); int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
......
...@@ -10,9 +10,8 @@ ...@@ -10,9 +10,8 @@
static void static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk, struct nfp_flower_meta_tci *msk,
struct flow_cls_offload *flow, u8 key_type) struct flow_rule *rule, u8 key_type)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u16 tmp_tci; u16 tmp_tci;
memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
...@@ -77,11 +76,8 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, ...@@ -77,11 +76,8 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
static void static void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk, struct nfp_flower_mac_mpls *msk, struct flow_rule *rule)
struct flow_cls_offload *flow)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
...@@ -130,10 +126,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, ...@@ -130,10 +126,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
static void static void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk, struct nfp_flower_tp_ports *msk,
struct flow_cls_offload *flow) struct flow_rule *rule)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_tp_ports)); memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
memset(msk, 0, sizeof(struct nfp_flower_tp_ports)); memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
...@@ -150,11 +144,8 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, ...@@ -150,11 +144,8 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
static void static void
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
struct nfp_flower_ip_ext *msk, struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
struct flow_cls_offload *flow)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match; struct flow_match_basic match;
...@@ -224,10 +215,8 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, ...@@ -224,10 +215,8 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
static void static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk, struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
struct flow_cls_offload *flow)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_match_ipv4_addrs match; struct flow_match_ipv4_addrs match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4)); memset(ext, 0, sizeof(struct nfp_flower_ipv4));
...@@ -241,16 +230,13 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, ...@@ -241,16 +230,13 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
msk->ipv4_dst = match.mask->dst; msk->ipv4_dst = match.mask->dst;
} }
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
} }
static void static void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
struct nfp_flower_ipv6 *msk, struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
struct flow_cls_offload *flow)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_ipv6)); memset(ext, 0, sizeof(struct nfp_flower_ipv6));
memset(msk, 0, sizeof(struct nfp_flower_ipv6)); memset(msk, 0, sizeof(struct nfp_flower_ipv6));
...@@ -264,16 +250,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, ...@@ -264,16 +250,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
msk->ipv6_dst = match.mask->dst; msk->ipv6_dst = match.mask->dst;
} }
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
} }
static int static int
nfp_flower_compile_geneve_opt(void *ext, void *msk, nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
struct flow_cls_offload *flow)
{ {
struct flow_match_enc_opts match; struct flow_match_enc_opts match;
flow_rule_match_enc_opts(flow->rule, &match); flow_rule_match_enc_opts(rule, &match);
memcpy(ext, match.key->data, match.key->len); memcpy(ext, match.key->data, match.key->len);
memcpy(msk, match.mask->data, match.mask->len); memcpy(msk, match.mask->data, match.mask->len);
...@@ -283,10 +268,8 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk, ...@@ -283,10 +268,8 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
static void static void
nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext, nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
struct nfp_flower_tun_ipv4 *msk, struct nfp_flower_tun_ipv4 *msk,
struct flow_cls_offload *flow) struct flow_rule *rule)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match; struct flow_match_ipv4_addrs match;
...@@ -298,13 +281,27 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext, ...@@ -298,13 +281,27 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
} }
} }
static void
nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
struct nfp_flower_tun_ipv6 *msk,
struct flow_rule *rule)
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
flow_rule_match_enc_ipv6_addrs(rule, &match);
ext->src = match.key->src;
ext->dst = match.key->dst;
msk->src = match.mask->src;
msk->dst = match.mask->dst;
}
}
static void static void
nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext, nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
struct nfp_flower_tun_ip_ext *msk, struct nfp_flower_tun_ip_ext *msk,
struct flow_cls_offload *flow) struct flow_rule *rule)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
struct flow_match_ip match; struct flow_match_ip match;
...@@ -317,57 +314,97 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext, ...@@ -317,57 +314,97 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
} }
static void static void
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
struct nfp_flower_ipv4_gre_tun *msk, struct flow_rule *rule)
struct flow_cls_offload *flow)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow); if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); u32 vni;
memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
/* NVGRE is the only supported GRE tunnel type */ flow_rule_match_enc_keyid(rule, &match);
ext->ethertype = cpu_to_be16(ETH_P_TEB); vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
msk->ethertype = cpu_to_be16(~0); *key = cpu_to_be32(vni);
vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
*key_msk = cpu_to_be32(vni);
}
}
static void
nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
__be16 *flags_msk, struct flow_rule *rule)
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid match; struct flow_match_enc_keyid match;
flow_rule_match_enc_keyid(rule, &match); flow_rule_match_enc_keyid(rule, &match);
ext->tun_key = match.key->keyid; *key = match.key->keyid;
msk->tun_key = match.mask->keyid; *key_msk = match.mask->keyid;
ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
} }
}
static void
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
struct nfp_flower_ipv4_gre_tun *msk,
struct flow_rule *rule)
{
memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
/* NVGRE is the only supported GRE tunnel type */
ext->ethertype = cpu_to_be16(ETH_P_TEB);
msk->ethertype = cpu_to_be16(~0);
nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow); nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
&ext->tun_flags, &msk->tun_flags, rule);
} }
static void static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk, struct nfp_flower_ipv4_udp_tun *msk,
struct flow_cls_offload *flow) struct flow_rule *rule)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
struct flow_match_enc_keyid match; nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
u32 temp_vni; nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
}
flow_rule_match_enc_keyid(rule, &match); static void
temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
ext->tun_id = cpu_to_be32(temp_vni); struct nfp_flower_ipv6_udp_tun *msk,
temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; struct flow_rule *rule)
msk->tun_id = cpu_to_be32(temp_vni); {
} memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
}
static void
nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
struct nfp_flower_ipv6_gre_tun *msk,
struct flow_rule *rule)
{
memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
/* NVGRE is the only supported GRE tunnel type */
ext->ethertype = cpu_to_be16(ETH_P_TEB);
msk->ethertype = cpu_to_be16(~0);
nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow); nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
&ext->tun_flags, &msk->tun_flags, rule);
} }
int nfp_flower_compile_flow_match(struct nfp_app *app, int nfp_flower_compile_flow_match(struct nfp_app *app,
...@@ -378,6 +415,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -378,6 +415,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
enum nfp_flower_tun_type tun_type, enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u32 port_id; u32 port_id;
int err; int err;
u8 *ext; u8 *ext;
...@@ -393,7 +431,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -393,7 +431,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext, nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
(struct nfp_flower_meta_tci *)msk, (struct nfp_flower_meta_tci *)msk,
flow, key_ls->key_layer); rule, key_ls->key_layer);
ext += sizeof(struct nfp_flower_meta_tci); ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci); msk += sizeof(struct nfp_flower_meta_tci);
...@@ -425,7 +463,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -425,7 +463,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
(struct nfp_flower_mac_mpls *)msk, (struct nfp_flower_mac_mpls *)msk,
flow); rule);
ext += sizeof(struct nfp_flower_mac_mpls); ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls); msk += sizeof(struct nfp_flower_mac_mpls);
} }
...@@ -433,7 +471,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -433,7 +471,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) { if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext, nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
(struct nfp_flower_tp_ports *)msk, (struct nfp_flower_tp_ports *)msk,
flow); rule);
ext += sizeof(struct nfp_flower_tp_ports); ext += sizeof(struct nfp_flower_tp_ports);
msk += sizeof(struct nfp_flower_tp_ports); msk += sizeof(struct nfp_flower_tp_ports);
} }
...@@ -441,7 +479,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -441,7 +479,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) { if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext, nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
(struct nfp_flower_ipv4 *)msk, (struct nfp_flower_ipv4 *)msk,
flow); rule);
ext += sizeof(struct nfp_flower_ipv4); ext += sizeof(struct nfp_flower_ipv4);
msk += sizeof(struct nfp_flower_ipv4); msk += sizeof(struct nfp_flower_ipv4);
} }
...@@ -449,43 +487,83 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -449,43 +487,83 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) { if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext, nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
(struct nfp_flower_ipv6 *)msk, (struct nfp_flower_ipv6 *)msk,
flow); rule);
ext += sizeof(struct nfp_flower_ipv6); ext += sizeof(struct nfp_flower_ipv6);
msk += sizeof(struct nfp_flower_ipv6); msk += sizeof(struct nfp_flower_ipv6);
} }
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
__be32 tun_dst; if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
struct nfp_flower_ipv6_gre_tun *gre_match;
nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow); struct nfp_ipv6_addr_entry *entry;
tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst; struct in6_addr *dst;
ext += sizeof(struct nfp_flower_ipv4_gre_tun);
msk += sizeof(struct nfp_flower_ipv4_gre_tun); nfp_flower_compile_ipv6_gre_tun((void *)ext,
(void *)msk, rule);
/* Store the tunnel destination in the rule data. gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
* This must be present and be an exact match. dst = &gre_match->ipv6.dst;
*/ ext += sizeof(struct nfp_flower_ipv6_gre_tun);
nfp_flow->nfp_tun_ipv4_addr = tun_dst; msk += sizeof(struct nfp_flower_ipv6_gre_tun);
nfp_tunnel_add_ipv4_off(app, tun_dst);
entry = nfp_tunnel_add_ipv6_off(app, dst);
if (!entry)
return -EOPNOTSUPP;
nfp_flow->nfp_tun_ipv6 = entry;
} else {
__be32 dst;
nfp_flower_compile_ipv4_gre_tun((void *)ext,
(void *)msk, rule);
dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
ext += sizeof(struct nfp_flower_ipv4_gre_tun);
msk += sizeof(struct nfp_flower_ipv4_gre_tun);
/* Store the tunnel destination in the rule data.
* This must be present and be an exact match.
*/
nfp_flow->nfp_tun_ipv4_addr = dst;
nfp_tunnel_add_ipv4_off(app, dst);
}
} }
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
__be32 tun_dst; if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
struct nfp_flower_ipv6_udp_tun *udp_match;
nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow); struct nfp_ipv6_addr_entry *entry;
tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst; struct in6_addr *dst;
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun); nfp_flower_compile_ipv6_udp_tun((void *)ext,
(void *)msk, rule);
/* Store the tunnel destination in the rule data. udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
* This must be present and be an exact match. dst = &udp_match->ipv6.dst;
*/ ext += sizeof(struct nfp_flower_ipv6_udp_tun);
nfp_flow->nfp_tun_ipv4_addr = tun_dst; msk += sizeof(struct nfp_flower_ipv6_udp_tun);
nfp_tunnel_add_ipv4_off(app, tun_dst);
entry = nfp_tunnel_add_ipv6_off(app, dst);
if (!entry)
return -EOPNOTSUPP;
nfp_flow->nfp_tun_ipv6 = entry;
} else {
__be32 dst;
nfp_flower_compile_ipv4_udp_tun((void *)ext,
(void *)msk, rule);
dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Store the tunnel destination in the rule data.
* This must be present and be an exact match.
*/
nfp_flow->nfp_tun_ipv4_addr = dst;
nfp_tunnel_add_ipv4_off(app, dst);
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
err = nfp_flower_compile_geneve_opt(ext, msk, flow); err = nfp_flower_compile_geneve_opt(ext, msk, rule);
if (err) if (err)
return err; return err;
} }
......
...@@ -54,6 +54,10 @@ ...@@ -54,6 +54,10 @@
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
#define NFP_FLOWER_MERGE_FIELDS \ #define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \ (NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \ NFP_FLOWER_LAYER_MAC | \
...@@ -64,7 +68,8 @@ ...@@ -64,7 +68,8 @@
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \ (NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \ NFP_FLOWER_LAYER_MAC | \
NFP_FLOWER_LAYER_IPV4) NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
struct nfp_flower_merge_check { struct nfp_flower_merge_check {
union { union {
...@@ -146,10 +151,11 @@ static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f) ...@@ -146,10 +151,11 @@ static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
static int static int
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
u32 *key_layer_two, int *key_size, u32 *key_layer_two, int *key_size, bool ipv6,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) { if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
(ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -167,7 +173,7 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, ...@@ -167,7 +173,7 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
struct flow_dissector_key_enc_opts *enc_op, struct flow_dissector_key_enc_opts *enc_op,
u32 *key_layer_two, u8 *key_layer, int *key_size, u32 *key_layer_two, u8 *key_layer, int *key_size,
struct nfp_flower_priv *priv, struct nfp_flower_priv *priv,
enum nfp_flower_tun_type *tun_type, enum nfp_flower_tun_type *tun_type, bool ipv6,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
int err; int err;
...@@ -176,7 +182,15 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, ...@@ -176,7 +182,15 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
case htons(IANA_VXLAN_UDP_PORT): case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN; *tun_type = NFP_FL_TUNNEL_VXLAN;
*key_layer |= NFP_FLOWER_LAYER_VXLAN; *key_layer |= NFP_FLOWER_LAYER_VXLAN;
*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
if (ipv6) {
*key_layer |= NFP_FLOWER_LAYER_EXT_META;
*key_size += sizeof(struct nfp_flower_ext_meta);
*key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
*key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
} else {
*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
}
if (enc_op) { if (enc_op) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
...@@ -192,7 +206,13 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, ...@@ -192,7 +206,13 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
*key_layer |= NFP_FLOWER_LAYER_EXT_META; *key_layer |= NFP_FLOWER_LAYER_EXT_META;
*key_size += sizeof(struct nfp_flower_ext_meta); *key_size += sizeof(struct nfp_flower_ext_meta);
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
if (ipv6) {
*key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
*key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
} else {
*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
}
if (!enc_op) if (!enc_op)
break; break;
...@@ -200,8 +220,8 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, ...@@ -200,8 +220,8 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
key_size, extack); ipv6, extack);
if (err) if (err)
return err; return err;
break; break;
...@@ -237,6 +257,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -237,6 +257,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
/* If any tun dissector is used then the required set must be used. */ /* If any tun dissector is used then the required set must be used. */
if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) { != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
...@@ -268,8 +290,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -268,8 +290,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_match_enc_opts enc_op = { NULL, NULL }; struct flow_match_enc_opts enc_op = { NULL, NULL };
struct flow_match_ipv4_addrs ipv4_addrs; struct flow_match_ipv4_addrs ipv4_addrs;
struct flow_match_ipv6_addrs ipv6_addrs;
struct flow_match_control enc_ctl; struct flow_match_control enc_ctl;
struct flow_match_ports enc_ports; struct flow_match_ports enc_ports;
bool ipv6_tun = false;
flow_rule_match_enc_control(rule, &enc_ctl); flow_rule_match_enc_control(rule, &enc_ctl);
...@@ -277,38 +301,62 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -277,38 +301,62 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported"); ipv6_tun = enc_ctl.key->addr_type ==
FLOW_DISSECTOR_KEY_IPV6_ADDRS;
if (ipv6_tun &&
!(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/* These fields are already verified as used. */ if (!ipv6_tun &&
flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (ipv6_tun) {
flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
sizeof(ipv6_addrs.mask->dst))) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
return -EOPNOTSUPP;
}
} else {
flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
return -EOPNOTSUPP;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op); flow_rule_match_enc_opts(rule, &enc_op);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
/* check if GRE, which has no enc_ports */ /* check if GRE, which has no enc_ports */
if (netif_is_gretap(netdev)) { if (!netif_is_gretap(netdev)) {
*tun_type = NFP_FL_TUNNEL_GRE; NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
key_layer |= NFP_FLOWER_LAYER_EXT_META; return -EOPNOTSUPP;
key_size += sizeof(struct nfp_flower_ext_meta); }
key_layer_two |= NFP_FLOWER_LAYER2_GRE;
key_size +=
sizeof(struct nfp_flower_ipv4_gre_tun);
if (enc_op.key) { *tun_type = NFP_FL_TUNNEL_GRE;
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels"); key_layer |= NFP_FLOWER_LAYER_EXT_META;
return -EOPNOTSUPP; key_size += sizeof(struct nfp_flower_ext_meta);
} key_layer_two |= NFP_FLOWER_LAYER2_GRE;
if (ipv6_tun) {
key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
key_size +=
sizeof(struct nfp_flower_ipv6_udp_tun);
} else { } else {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); key_size +=
sizeof(struct nfp_flower_ipv4_udp_tun);
}
if (enc_op.key) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
} else { } else {
...@@ -323,7 +371,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -323,7 +371,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
&key_layer_two, &key_layer_two,
&key_layer, &key_layer,
&key_size, priv, &key_size, priv,
tun_type, extack); tun_type, ipv6_tun,
extack);
if (err) if (err)
return err; return err;
...@@ -491,6 +540,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) ...@@ -491,6 +540,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
goto err_free_mask; goto err_free_mask;
flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->nfp_tun_ipv6 = NULL;
flow_pay->meta.flags = 0; flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows); INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false; flow_pay->in_hw = false;
...@@ -517,10 +567,12 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, ...@@ -517,10 +567,12 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
struct nfp_fl_set_ip4_addrs *ipv4_add; struct nfp_fl_set_ip4_addrs *ipv4_add;
struct nfp_fl_set_ipv6_addr *ipv6_add; struct nfp_fl_set_ipv6_addr *ipv6_add;
struct nfp_fl_push_vlan *push_vlan; struct nfp_fl_push_vlan *push_vlan;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tport *tport; struct nfp_fl_set_tport *tport;
struct nfp_fl_set_eth *eth; struct nfp_fl_set_eth *eth;
struct nfp_fl_act_head *a; struct nfp_fl_act_head *a;
unsigned int act_off = 0; unsigned int act_off = 0;
bool ipv6_tun = false;
u8 act_id = 0; u8 act_id = 0;
u8 *ports; u8 *ports;
int i; int i;
...@@ -542,14 +594,18 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, ...@@ -542,14 +594,18 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
case NFP_FL_ACTION_OPCODE_POP_VLAN: case NFP_FL_ACTION_OPCODE_POP_VLAN:
merge->tci = cpu_to_be16(0); merge->tci = cpu_to_be16(0);
break; break;
case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL: case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
/* New tunnel header means l2 to l4 can be matched. */ /* New tunnel header means l2 to l4 can be matched. */
eth_broadcast_addr(&merge->l2.mac_dst[0]); eth_broadcast_addr(&merge->l2.mac_dst[0]);
eth_broadcast_addr(&merge->l2.mac_src[0]); eth_broadcast_addr(&merge->l2.mac_src[0]);
memset(&merge->l4, 0xff, memset(&merge->l4, 0xff,
sizeof(struct nfp_flower_tp_ports)); sizeof(struct nfp_flower_tp_ports));
memset(&merge->ipv4, 0xff, if (ipv6_tun)
sizeof(struct nfp_flower_ipv4)); memset(&merge->ipv6, 0xff,
sizeof(struct nfp_flower_ipv6));
else
memset(&merge->ipv4, 0xff,
sizeof(struct nfp_flower_ipv4));
break; break;
case NFP_FL_ACTION_OPCODE_SET_ETHERNET: case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
eth = (struct nfp_fl_set_eth *)a; eth = (struct nfp_fl_set_eth *)a;
...@@ -597,6 +653,10 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, ...@@ -597,6 +653,10 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
ports[i] |= tport->tp_port_mask[i]; ports[i] |= tport->tp_port_mask[i];
break; break;
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
pre_tun = (struct nfp_fl_pre_tunnel *)a;
ipv6_tun = be16_to_cpu(pre_tun->flags) &
NFP_FL_PRE_TUN_IPV6;
break;
case NFP_FL_ACTION_OPCODE_PRE_LAG: case NFP_FL_ACTION_OPCODE_PRE_LAG:
case NFP_FL_ACTION_OPCODE_PUSH_GENEVE: case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
break; break;
...@@ -765,15 +825,15 @@ nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) ...@@ -765,15 +825,15 @@ nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
static int static int
nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
{ {
struct nfp_fl_set_ipv4_tun *tun; struct nfp_fl_set_tun *tun;
struct nfp_fl_act_head *a; struct nfp_fl_act_head *a;
unsigned int act_off = 0; unsigned int act_off = 0;
while (act_off < len) { while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off]; a = (struct nfp_fl_act_head *)&acts[act_off];
if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) { if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
tun = (struct nfp_fl_set_ipv4_tun *)a; tun = (struct nfp_fl_set_tun *)a;
tun->outer_vlan_tpid = vlan->vlan_tpid; tun->outer_vlan_tpid = vlan->vlan_tpid;
tun->outer_vlan_tci = vlan->vlan_tci; tun->outer_vlan_tci = vlan->vlan_tci;
...@@ -1058,15 +1118,22 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, ...@@ -1058,15 +1118,22 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (key_layer & NFP_FLOWER_LAYER_IPV4) { if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
key_layer & NFP_FLOWER_LAYER_IPV6) {
/* Flags and proto fields have same offset in IPv4 and IPv6. */
int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
int size;
int i; int i;
size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
sizeof(struct nfp_flower_ipv4) :
sizeof(struct nfp_flower_ipv6);
mask += sizeof(struct nfp_flower_mac_mpls); mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */ /* Ensure proto and flags are the only IP layer fields. */
for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++) for (i = 0; i < size; i++)
if (mask[i] && i != ip_flags && i != ip_proto) { if (mask[i] && i != ip_flags && i != ip_proto) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1195,6 +1262,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -1195,6 +1262,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
err_release_metadata: err_release_metadata:
nfp_modify_flow_metadata(app, flow_pay); nfp_modify_flow_metadata(app, flow_pay);
err_destroy_flow: err_destroy_flow:
if (flow_pay->nfp_tun_ipv6)
nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
kfree(flow_pay->action_data); kfree(flow_pay->action_data);
kfree(flow_pay->mask_data); kfree(flow_pay->mask_data);
kfree(flow_pay->unmasked_data); kfree(flow_pay->unmasked_data);
...@@ -1311,6 +1380,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -1311,6 +1380,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr) if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
if (nfp_flow->nfp_tun_ipv6)
nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
if (!nfp_flow->in_hw) { if (!nfp_flow->in_hw) {
err = 0; err = 0;
goto err_free_merge_flow; goto err_free_merge_flow;
......
...@@ -54,6 +54,25 @@ struct nfp_tun_active_tuns { ...@@ -54,6 +54,25 @@ struct nfp_tun_active_tuns {
} tun_info[]; } tun_info[];
}; };
/**
* struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
* @seq: sequence number of the message
* @count: number of tunnels report in message
* @flags: options part of the request
* @tun_info.ipv6: dest IPv6 address of active route
* @tun_info.egress_port: port the encapsulated packet egressed
* @tun_info: tunnels that have sent traffic in reported period
*/
struct nfp_tun_active_tuns_v6 {
__be32 seq;
__be32 count;
__be32 flags;
struct route_ip_info_v6 {
struct in6_addr ipv6;
__be32 egress_port;
} tun_info[];
};
/** /**
* struct nfp_tun_neigh - neighbour/route entry on the NFP * struct nfp_tun_neigh - neighbour/route entry on the NFP
* @dst_ipv4: destination IPv4 address * @dst_ipv4: destination IPv4 address
...@@ -70,6 +89,22 @@ struct nfp_tun_neigh { ...@@ -70,6 +89,22 @@ struct nfp_tun_neigh {
__be32 port_id; __be32 port_id;
}; };
/**
* struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
* @dst_ipv6: destination IPv6 address
* @src_ipv6: source IPv6 address
* @dst_addr: destination MAC address
* @src_addr: source MAC address
* @port_id: NFP port to output packet on - associated with source IPv6
*/
struct nfp_tun_neigh_v6 {
struct in6_addr dst_ipv6;
struct in6_addr src_ipv6;
u8 dst_addr[ETH_ALEN];
u8 src_addr[ETH_ALEN];
__be32 port_id;
};
/** /**
* struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
* @ingress_port: ingress port of packet that signalled request * @ingress_port: ingress port of packet that signalled request
...@@ -83,13 +118,23 @@ struct nfp_tun_req_route_ipv4 { ...@@ -83,13 +118,23 @@ struct nfp_tun_req_route_ipv4 {
}; };
/** /**
* struct nfp_ipv4_route_entry - routes that are offloaded to the NFP * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
* @ipv4_addr: destination of route * @ingress_port: ingress port of packet that signalled request
* @ipv6_addr: destination ipv6 address for route
*/
struct nfp_tun_req_route_ipv6 {
__be32 ingress_port;
struct in6_addr ipv6_addr;
};
/**
* struct nfp_offloaded_route - routes that are offloaded to the NFP
* @list: list pointer * @list: list pointer
* @ip_add: destination of route - can be IPv4 or IPv6
*/ */
struct nfp_ipv4_route_entry { struct nfp_offloaded_route {
__be32 ipv4_addr;
struct list_head list; struct list_head list;
u8 ip_add[];
}; };
#define NFP_FL_IPV4_ADDRS_MAX 32 #define NFP_FL_IPV4_ADDRS_MAX 32
...@@ -116,6 +161,18 @@ struct nfp_ipv4_addr_entry { ...@@ -116,6 +161,18 @@ struct nfp_ipv4_addr_entry {
struct list_head list; struct list_head list;
}; };
#define NFP_FL_IPV6_ADDRS_MAX 4
/**
* struct nfp_tun_ipv6_addr - set the IP address list on the NFP
* @count: number of IPs populated in the array
* @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
*/
struct nfp_tun_ipv6_addr {
__be32 count;
struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
};
#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
/** /**
...@@ -206,6 +263,49 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) ...@@ -206,6 +263,49 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
rcu_read_unlock(); rcu_read_unlock();
} }
void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
struct nfp_tun_active_tuns_v6 *payload;
struct net_device *netdev;
int count, i, pay_len;
struct neighbour *n;
void *ipv6_add;
u32 port;
payload = nfp_flower_cmsg_get_data(skb);
count = be32_to_cpu(payload->count);
if (count > NFP_FL_IPV6_ADDRS_MAX) {
nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
return;
}
pay_len = nfp_flower_cmsg_get_data_len(skb);
if (pay_len != struct_size(payload, tun_info, count)) {
nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
return;
}
rcu_read_lock();
for (i = 0; i < count; i++) {
ipv6_add = &payload->tun_info[i].ipv6;
port = be32_to_cpu(payload->tun_info[i].egress_port);
netdev = nfp_app_dev_get(app, port, NULL);
if (!netdev)
continue;
n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
if (!n)
continue;
/* Update the used timestamp of neighbour */
neigh_event_send(n, NULL);
neigh_release(n);
}
rcu_read_unlock();
#endif
}
static int static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag) gfp_t flag)
...@@ -224,71 +324,126 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, ...@@ -224,71 +324,126 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
return 0; return 0;
} }
static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr) static bool
__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
void *add, int add_len)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_offloaded_route *entry;
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
spin_lock_bh(&priv->tun.neigh_off_lock); spin_lock_bh(list_lock);
list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { list_for_each_entry(entry, route_list, list)
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); if (!memcmp(entry->ip_add, add, add_len)) {
if (entry->ipv4_addr == ipv4_addr) { spin_unlock_bh(list_lock);
spin_unlock_bh(&priv->tun.neigh_off_lock);
return true; return true;
} }
} spin_unlock_bh(list_lock);
spin_unlock_bh(&priv->tun.neigh_off_lock);
return false; return false;
} }
static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr) static int
__nfp_tun_add_route_to_cache(struct list_head *route_list,
spinlock_t *list_lock, void *add, int add_len)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_offloaded_route *entry;
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
spin_lock_bh(&priv->tun.neigh_off_lock); spin_lock_bh(list_lock);
list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { list_for_each_entry(entry, route_list, list)
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); if (!memcmp(entry->ip_add, add, add_len)) {
if (entry->ipv4_addr == ipv4_addr) { spin_unlock_bh(list_lock);
spin_unlock_bh(&priv->tun.neigh_off_lock); return 0;
return;
} }
}
entry = kmalloc(sizeof(*entry), GFP_ATOMIC); entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
if (!entry) { if (!entry) {
spin_unlock_bh(&priv->tun.neigh_off_lock); spin_unlock_bh(list_lock);
nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n"); return -ENOMEM;
return;
} }
entry->ipv4_addr = ipv4_addr; memcpy(entry->ip_add, add, add_len);
list_add_tail(&entry->list, &priv->tun.neigh_off_list); list_add_tail(&entry->list, route_list);
spin_unlock_bh(&priv->tun.neigh_off_lock); spin_unlock_bh(list_lock);
return 0;
} }
static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) static void
__nfp_tun_del_route_from_cache(struct list_head *route_list,
spinlock_t *list_lock, void *add, int add_len)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_offloaded_route *entry;
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
spin_lock_bh(&priv->tun.neigh_off_lock); spin_lock_bh(list_lock);
list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { list_for_each_entry(entry, route_list, list)
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); if (!memcmp(entry->ip_add, add, add_len)) {
if (entry->ipv4_addr == ipv4_addr) {
list_del(&entry->list); list_del(&entry->list);
kfree(entry); kfree(entry);
break; break;
} }
} spin_unlock_bh(list_lock);
spin_unlock_bh(&priv->tun.neigh_off_lock); }
static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
{
struct nfp_flower_priv *priv = app->priv;
return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
&priv->tun.neigh_off_lock_v4, ipv4_addr,
sizeof(*ipv4_addr));
}
static bool
nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
{
struct nfp_flower_priv *priv = app->priv;
return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
&priv->tun.neigh_off_lock_v6, ipv6_addr,
sizeof(*ipv6_addr));
}
static void
nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
{
struct nfp_flower_priv *priv = app->priv;
__nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
&priv->tun.neigh_off_lock_v4, ipv4_addr,
sizeof(*ipv4_addr));
}
static void
nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
{
struct nfp_flower_priv *priv = app->priv;
__nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
&priv->tun.neigh_off_lock_v6, ipv6_addr,
sizeof(*ipv6_addr));
} }
static void static void
nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) {
struct nfp_flower_priv *priv = app->priv;
__nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
&priv->tun.neigh_off_lock_v4, ipv4_addr,
sizeof(*ipv4_addr));
}
static void
nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
{
struct nfp_flower_priv *priv = app->priv;
__nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
&priv->tun.neigh_off_lock_v6, ipv6_addr,
sizeof(*ipv6_addr));
}
static void
nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{ {
struct nfp_tun_neigh payload; struct nfp_tun_neigh payload;
u32 port_id; u32 port_id;
...@@ -302,7 +457,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, ...@@ -302,7 +457,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
/* If entry has expired send dst IP with all other fields 0. */ /* If entry has expired send dst IP with all other fields 0. */
if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
nfp_tun_del_route_from_cache(app, payload.dst_ipv4); nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
/* Trigger ARP to verify invalid neighbour state. */ /* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL); neigh_event_send(neigh, NULL);
goto send_msg; goto send_msg;
...@@ -314,7 +469,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, ...@@ -314,7 +469,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_ha_snapshot(payload.dst_addr, neigh, netdev); neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
payload.port_id = cpu_to_be32(port_id); payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */ /* Add destination of new route to NFP cache. */
nfp_tun_add_route_to_cache(app, payload.dst_ipv4); nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
send_msg: send_msg:
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
...@@ -322,16 +477,54 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, ...@@ -322,16 +477,54 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
(unsigned char *)&payload, flag); (unsigned char *)&payload, flag);
} }
static void
nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
{
struct nfp_tun_neigh_v6 payload;
u32 port_id;
port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
if (!port_id)
return;
memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
payload.dst_ipv6 = flow->daddr;
/* If entry has expired send dst IP with all other fields 0. */
if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
/* Trigger probe to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);
goto send_msg;
}
/* Have a valid neighbour so populate rest of entry. */
payload.src_ipv6 = flow->saddr;
ether_addr_copy(payload.src_addr, netdev->dev_addr);
neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */
nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
send_msg:
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
sizeof(struct nfp_tun_neigh_v6),
(unsigned char *)&payload, flag);
}
static int static int
nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
void *ptr) void *ptr)
{ {
struct nfp_flower_priv *app_priv; struct nfp_flower_priv *app_priv;
struct netevent_redirect *redir; struct netevent_redirect *redir;
struct flowi4 flow = {}; struct flowi4 flow4 = {};
struct flowi6 flow6 = {};
struct neighbour *n; struct neighbour *n;
struct nfp_app *app; struct nfp_app *app;
struct rtable *rt; struct rtable *rt;
bool ipv6 = false;
int err; int err;
switch (event) { switch (event) {
...@@ -346,7 +539,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, ...@@ -346,7 +539,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
flow.daddr = *(__be32 *)n->primary_key; if (n->tbl->family == AF_INET6)
ipv6 = true;
if (ipv6)
flow6.daddr = *(struct in6_addr *)n->primary_key;
else
flow4.daddr = *(__be32 *)n->primary_key;
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app; app = app_priv->app;
...@@ -356,28 +555,46 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, ...@@ -356,28 +555,46 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE; return NOTIFY_DONE;
/* Only concerned with changes to routes already added to NFP. */ /* Only concerned with changes to routes already added to NFP. */
if (!nfp_tun_has_route(app, flow.daddr)) if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
(!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
return NOTIFY_DONE; return NOTIFY_DONE;
#if IS_ENABLED(CONFIG_INET) #if IS_ENABLED(CONFIG_INET)
/* Do a route lookup to populate flow data. */ if (ipv6) {
rt = ip_route_output_key(dev_net(n->dev), &flow); #if IS_ENABLED(CONFIG_IPV6)
err = PTR_ERR_OR_ZERO(rt); struct dst_entry *dst;
if (err)
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
&flow6, NULL);
if (IS_ERR(dst))
return NOTIFY_DONE;
dst_release(dst);
flow6.flowi6_proto = IPPROTO_UDP;
nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
#else
return NOTIFY_DONE; return NOTIFY_DONE;
#endif /* CONFIG_IPV6 */
} else {
/* Do a route lookup to populate flow data. */
rt = ip_route_output_key(dev_net(n->dev), &flow4);
err = PTR_ERR_OR_ZERO(rt);
if (err)
return NOTIFY_DONE;
ip_rt_put(rt); ip_rt_put(rt);
flow4.flowi4_proto = IPPROTO_UDP;
nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
}
#else #else
return NOTIFY_DONE; return NOTIFY_DONE;
#endif #endif /* CONFIG_INET */
flow.flowi4_proto = IPPROTO_UDP;
nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
return NOTIFY_OK; return NOTIFY_OK;
} }
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
{ {
struct nfp_tun_req_route_ipv4 *payload; struct nfp_tun_req_route_ipv4 *payload;
struct net_device *netdev; struct net_device *netdev;
...@@ -411,7 +628,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) ...@@ -411,7 +628,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt); ip_rt_put(rt);
if (!n) if (!n)
goto fail_rcu_unlock; goto fail_rcu_unlock;
nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
neigh_release(n); neigh_release(n);
rcu_read_unlock(); rcu_read_unlock();
return; return;
...@@ -421,6 +638,48 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) ...@@ -421,6 +638,48 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_cmsg_warn(app, "Requested route not found.\n"); nfp_flower_cmsg_warn(app, "Requested route not found.\n");
} }
void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_tun_req_route_ipv6 *payload;
struct net_device *netdev;
struct flowi6 flow = {};
struct dst_entry *dst;
struct neighbour *n;
payload = nfp_flower_cmsg_get_data(skb);
rcu_read_lock();
netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
if (!netdev)
goto fail_rcu_unlock;
flow.daddr = payload->ipv6_addr;
flow.flowi6_proto = IPPROTO_UDP;
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
NULL);
if (IS_ERR(dst))
goto fail_rcu_unlock;
#else
goto fail_rcu_unlock;
#endif
n = dst_neigh_lookup(dst, &flow.daddr);
dst_release(dst);
if (!n)
goto fail_rcu_unlock;
nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
neigh_release(n);
rcu_read_unlock();
return;
fail_rcu_unlock:
rcu_read_unlock();
nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
}
static void nfp_tun_write_ipv4_list(struct nfp_app *app) static void nfp_tun_write_ipv4_list(struct nfp_app *app)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
...@@ -502,6 +761,78 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) ...@@ -502,6 +761,78 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
nfp_tun_write_ipv4_list(app); nfp_tun_write_ipv4_list(app);
} }
static void nfp_tun_write_ipv6_list(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_ipv6_addr_entry *entry;
struct nfp_tun_ipv6_addr payload;
int count = 0;
memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
mutex_lock(&priv->tun.ipv6_off_lock);
list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
if (count >= NFP_FL_IPV6_ADDRS_MAX) {
nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
break;
}
payload.ipv6_addr[count++] = entry->ipv6_addr;
}
mutex_unlock(&priv->tun.ipv6_off_lock);
payload.count = cpu_to_be32(count);
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
sizeof(struct nfp_tun_ipv6_addr),
&payload, GFP_KERNEL);
}
struct nfp_ipv6_addr_entry *
nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_ipv6_addr_entry *entry;
mutex_lock(&priv->tun.ipv6_off_lock);
list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
entry->ref_count++;
mutex_unlock(&priv->tun.ipv6_off_lock);
return entry;
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
mutex_unlock(&priv->tun.ipv6_off_lock);
nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
return NULL;
}
entry->ipv6_addr = *ipv6;
entry->ref_count = 1;
list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
mutex_unlock(&priv->tun.ipv6_off_lock);
nfp_tun_write_ipv6_list(app);
return entry;
}
void
nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
{
struct nfp_flower_priv *priv = app->priv;
bool freed = false;
mutex_lock(&priv->tun.ipv6_off_lock);
if (!--entry->ref_count) {
list_del(&entry->list);
kfree(entry);
freed = true;
}
mutex_unlock(&priv->tun.ipv6_off_lock);
if (freed)
nfp_tun_write_ipv6_list(app);
}
static int static int
__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del) __nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
{ {
...@@ -1013,13 +1344,17 @@ int nfp_tunnel_config_start(struct nfp_app *app) ...@@ -1013,13 +1344,17 @@ int nfp_tunnel_config_start(struct nfp_app *app)
ida_init(&priv->tun.mac_off_ids); ida_init(&priv->tun.mac_off_ids);
/* Initialise priv data for IPv4 offloading. */ /* Initialise priv data for IPv4/v6 offloading. */
mutex_init(&priv->tun.ipv4_off_lock); mutex_init(&priv->tun.ipv4_off_lock);
INIT_LIST_HEAD(&priv->tun.ipv4_off_list); INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
mutex_init(&priv->tun.ipv6_off_lock);
INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
/* Initialise priv data for neighbour offloading. */ /* Initialise priv data for neighbour offloading. */
spin_lock_init(&priv->tun.neigh_off_lock); spin_lock_init(&priv->tun.neigh_off_lock_v4);
INIT_LIST_HEAD(&priv->tun.neigh_off_list); INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
spin_lock_init(&priv->tun.neigh_off_lock_v6);
INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
err = register_netevent_notifier(&priv->tun.neigh_nb); err = register_netevent_notifier(&priv->tun.neigh_nb);
...@@ -1034,9 +1369,11 @@ int nfp_tunnel_config_start(struct nfp_app *app) ...@@ -1034,9 +1369,11 @@ int nfp_tunnel_config_start(struct nfp_app *app)
void nfp_tunnel_config_stop(struct nfp_app *app) void nfp_tunnel_config_stop(struct nfp_app *app)
{ {
struct nfp_offloaded_route *route_entry, *temp;
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
struct nfp_ipv4_route_entry *route_entry;
struct nfp_ipv4_addr_entry *ip_entry; struct nfp_ipv4_addr_entry *ip_entry;
struct nfp_tun_neigh_v6 ipv6_route;
struct nfp_tun_neigh ipv4_route;
struct list_head *ptr, *storage; struct list_head *ptr, *storage;
unregister_netevent_notifier(&priv->tun.neigh_nb); unregister_netevent_notifier(&priv->tun.neigh_nb);
...@@ -1050,12 +1387,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app) ...@@ -1050,12 +1387,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
kfree(ip_entry); kfree(ip_entry);
} }
/* Free any memory that may be occupied by the route list. */ mutex_destroy(&priv->tun.ipv6_off_lock);
list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
route_entry = list_entry(ptr, struct nfp_ipv4_route_entry, /* Free memory in the route list and remove entries from fw cache. */
list); list_for_each_entry_safe(route_entry, temp,
&priv->tun.neigh_off_list_v4, list) {
memset(&ipv4_route, 0, sizeof(ipv4_route));
memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
sizeof(ipv4_route.dst_ipv4));
list_del(&route_entry->list); list_del(&route_entry->list);
kfree(route_entry); kfree(route_entry);
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
sizeof(struct nfp_tun_neigh),
(unsigned char *)&ipv4_route,
GFP_KERNEL);
}
list_for_each_entry_safe(route_entry, temp,
&priv->tun.neigh_off_list_v6, list) {
memset(&ipv6_route, 0, sizeof(ipv6_route));
memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
sizeof(ipv6_route.dst_ipv6));
list_del(&route_entry->list);
kfree(route_entry);
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
sizeof(struct nfp_tun_neigh),
(unsigned char *)&ipv6_route,
GFP_KERNEL);
} }
/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment