Commit d49b9b07 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: allow matching on meta data

Michal Swiatkowski says:

This patchset is intended to improve the usability of the switchdev
slow path. Without matching on a meta data values slow path works
based on VF's MAC addresses. It causes a problem when the VF wants
to use more than one MAC address (e.g. when it is in trusted mode).

Parse all meta data in the same place where protocol type fields are
parsed. Add description for the currently implemented meta data. It is
important to note that depending on DDP not all described meta data can
be available. Using not available meta data leads to error returned by
function which is looking for correct words in profiles read from DDP.

There is also one small improvement, remove of rx field in rule info
structure (patch 2). It is redundant.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents efc3001f 0ef4479d
......@@ -10,16 +10,15 @@
#include "ice_tc_lib.h"
/**
* ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
* ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index
* @pf: pointer to PF struct
* @vf: pointer to VF struct
* @mac: VF's MAC address
*
* This function adds advanced rule that forwards packets with
* VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
* VF's VSI index to the corresponding switchdev ctrl VSI queue.
*/
int
ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
static int
ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
{
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
struct ice_adv_rule_info rule_info = { 0 };
......@@ -32,76 +31,41 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
if (!list)
return -ENOMEM;
list[0].type = ICE_MAC_OFOS;
ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
ice_rule_add_src_vsi_metadata(list);
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.flag = ICE_FLTR_TX;
rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
rule_info.rx = false;
rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
ctrl_vsi->rxq_map[vf->vf_id];
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
rule_info.flags_info.act_valid = true;
rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
rule_info.src_vsi = vf->lan_vsi_idx;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
vf->repr->mac_rule);
&vf->repr->sp_rule);
if (err)
dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d",
vf->vf_id);
else
vf->repr->rule_added = true;
kfree(list);
return err;
}
/**
* ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
* @vf: pointer to vF struct
*
* This function replays VF's MAC rule after reset.
*/
void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
{
int err;
if (!ice_is_switchdev_running(vf->pf))
return;
if (is_valid_ether_addr(vf->hw_lan_addr)) {
err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
vf->hw_lan_addr);
if (err) {
dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
vf->hw_lan_addr, vf->vf_id, err);
return;
}
vf->num_mac++;
ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
}
}
/**
* ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
* ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
* @vf: pointer to the VF struct
*
* Delete the advanced rule that was used to forward packets with the VF's MAC
* address (src MAC) to the corresponding switchdev ctrl VSI queue.
* Delete the advanced rule that was used to forward packets with the VF's VSI
* index to the corresponding switchdev ctrl VSI queue.
*/
void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
{
if (!ice_is_switchdev_running(vf->pf))
return;
if (!vf->repr->rule_added)
if (!vf->repr)
return;
ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
vf->repr->rule_added = false;
ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule);
}
/**
......@@ -237,6 +201,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
ice_eswitch_del_vf_sp_rule(vf);
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
......@@ -264,25 +229,30 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!vf->repr->dst) {
ice_fltr_add_mac_and_broadcast(vsi,
vf->hw_lan_addr,
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
goto err;
}
if (ice_eswitch_add_vf_sp_rule(pf, vf)) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
goto err;
}
if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
ice_fltr_add_mac_and_broadcast(vsi,
vf->hw_lan_addr,
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
ice_eswitch_del_vf_sp_rule(vf);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
goto err;
}
if (ice_vsi_add_vlan_zero(vsi)) {
ice_fltr_add_mac_and_broadcast(vsi,
vf->hw_lan_addr,
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
ice_eswitch_del_vf_sp_rule(vf);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
......
......@@ -20,11 +20,6 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
void ice_eswitch_update_repr(struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
int
ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
const u8 *mac);
void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf);
void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf);
void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
......@@ -34,15 +29,6 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
static inline void ice_eswitch_release(struct ice_pf *pf) { }
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { }
static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { }
static inline int
ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
const u8 *mac)
{
return -EOPNOTSUPP;
}
static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
......
......@@ -47,6 +47,7 @@ enum ice_protocol_type {
ICE_L2TPV3,
ICE_VLAN_EX,
ICE_VLAN_IN,
ICE_HW_METADATA,
ICE_VXLAN_GPE,
ICE_SCTP_IL,
ICE_PROTOCOL_LAST
......@@ -115,17 +116,7 @@ enum ice_prot_id {
#define ICE_L2TPV3_HW 104
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
#define ICE_MDID_SIZE 2
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MASK 0xFF
#define ICE_VLAN_FLAG_MDID 20
#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
#define ICE_TUN_FLAG_FV_IND 2
......@@ -230,6 +221,191 @@ struct ice_nvgre_hdr {
__be32 tni_flow;
};
/* Metadata information
*
* Not all MDIDs can be used by switch block. It depends on package version.
*
* MDID 16 (Rx offset)
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | A | B | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* A = Source port where the transaction came from (3b).
*
* B = Destination TC of the packet. The TC is relative to a port (5b).
*
* MDID 17
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | PTYPE | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* PTYPE = Encodes the packet type (10b).
*
* MDID 18
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Packet length | R |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Packet length = Length of the packet in bytes
* (packet always carriers CRC) (14b).
* R = Reserved (2b).
*
* MDID 19
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Source VSI | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Source VSI = Source VSI of packet loopbacked in switch (for egress) (10b).
*/
#define ICE_MDID_SOURCE_VSI_MASK GENMASK(9, 0)
/*
* MDID 20
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |A|B|C|D|E|F|R|R|G|H|I|J|K|L|M|N|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* A = DSI - set for DSI RX pkts.
* B = ipsec_decrypted - invalid on NIC.
* C = marker - this is a marker packet.
* D = from_network - for TX sets to 0
* for RX:
* * 1 - packet is from external link
* * 0 - packet source is from internal
* E = source_interface_is_rx - reflect the physical interface from where the
* packet was received:
* * 1 - Rx
* * 0 - Tx
* F = from_mng - The bit signals that the packet's origin is the management.
* G = ucast - Outer L2 MAC address is unicast.
* H = mcast - Outer L2 MAC address is multicast.
* I = bcast - Outer L2 MAC address is broadcast.
* J = second_outer_mac_present - 2 outer MAC headers are present in the packet.
* K = STAG or BVLAN - Outer L2 header has STAG (ethernet type 0x88a8) or
* BVLAN (ethernet type 0x88a8).
* L = ITAG - Outer L2 header has ITAG *ethernet type 0x88e7)
* M = EVLAN (0x8100) - Outer L2 header has EVLAN (ethernet type 0x8100)
* N = EVLAN (0x9100) - Outer L2 header has EVLAN (ethernet type 0x9100)
*/
#define ICE_PKT_VLAN_STAG BIT(12)
#define ICE_PKT_VLAN_ITAG BIT(13)
#define ICE_PKT_VLAN_EVLAN (BIT(14) | BIT(15))
#define ICE_PKT_VLAN_MASK (ICE_PKT_VLAN_STAG | ICE_PKT_VLAN_ITAG | \
ICE_PKT_VLAN_EVLAN)
/* MDID 21
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |A|B|C|D|E|F|G|H|I|J|R|R|K|L|M|N|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* A = VLAN (0x8100) - Outer L2 header has VLAN (ethernet type 0x8100)
* B = NSHoE - Outer L2 header has NSH (ethernet type 0x894f)
* C = MPLS (0x8847) - There is at least 1 MPLS tag in the outer header
* (ethernet type 0x8847)
* D = MPLS (0x8848) - There is at least 1 MPLS tag in the outer header
* (ethernet type 0x8848)
* E = multi MPLS - There is more than a single MPLS tag in the outer header
* F = inner MPLS - There is inner MPLS tag in the packet
* G = tunneled MAC - Set if the packet includes a tunneled MAC
* H = tunneled VLAN - Same as VLAN, but for a tunneled header
* I = pkt_is_frag - Packet is fragmented (ipv4 or ipv6)
* J = ipv6_ext - The packet has routing or destination ipv6 extension in inner
* or outer ipv6 headers
* K = RoCE - UDP packet detected as RoCEv2
* L = UDP_XSUM_0 - Set to 1 if L4 checksum is 0 in a UDP packet
* M = ESP - This is a ESP packet
* N = NAT_ESP - This is a ESP packet encapsulated in UDP NAT
*/
#define ICE_PKT_TUNNEL_MAC BIT(6)
#define ICE_PKT_TUNNEL_VLAN BIT(7)
#define ICE_PKT_TUNNEL_MASK (ICE_PKT_TUNNEL_MAC | ICE_PKT_TUNNEL_VLAN)
/* MDID 22
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |A|B|C|D|E|F| G |H|I|J| K |L|M|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* A = fin - fin flag in tcp header
* B = sync - sync flag in tcp header
* C = rst - rst flag in tcp header
* D = psh - psh flag in tcp header
* E = ack - ack flag in tcp header
* F = urg - urg flag in tcp header
* G = tunnel type (3b) - Flags used to decode tunnel type:
* * b000 - not a VXLAN/Geneve/GRE tunnel
* * b001 - VXLAN-GPE
* * b010 - VXLAN (non-GPE)
* * b011 - Geneve
* * b100 - GRE (no key, no xsum)
* * b101 - GREK (key, no xsum)
* * b110 - GREC (no key, xsum)
* * b111 - GREKC (key, xsum)
* H = UDP_GRE - Packet is UDP (VXLAN or VLAN_GPE or Geneve or MPLSoUDP or GRE)
* tunnel
* I = OAM - VXLAN/Geneve/tunneled NSH packet with the OAM bit set
* J = tunneled NSH - Packet has NSHoGRE or NSHoUDP
* K = switch (2b) - Direction on switch
* * b00 - normal
* * b01 - TX force only LAN
* * b10 - TX disable LAN
* * b11 - direct to VSI
* L = swpe - Represents SWPE bit in TX command
* M = sw_cmd - Switch command
*
* MDID 23
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |A|B|C|D| R |E|F|R|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* A = MAC error - Produced by MAC according to L2 error conditions
* B = PPRS no offload - FIFO overflow in PPRS or any problematic condition in
* PPRS ANA
* C = abort - Set when malicious packet is detected
* D = partial analysis - ANA's analysing got cut in the middle
* (header > 504B etc.)
* E = FLM - Flow director hit indication
* F = FDLONG - Flow direector long bucket indication
*
*/
#define ICE_MDID_SIZE 2
#define ICE_META_DATA_ID_HW 255
enum ice_hw_metadata_id {
ICE_SOURCE_PORT_MDID = 16,
ICE_PTYPE_MDID = 17,
ICE_PACKET_LENGTH_MDID = 18,
ICE_SOURCE_VSI_MDID = 19,
ICE_PKT_VLAN_MDID = 20,
ICE_PKT_TUNNEL_MDID = 21,
ICE_PKT_TCP_MDID = 22,
ICE_PKT_ERROR_MDID = 23,
};
enum ice_hw_metadata_offset {
ICE_SOURCE_PORT_MDID_OFFSET = ICE_MDID_SIZE * ICE_SOURCE_PORT_MDID,
ICE_PTYPE_MDID_OFFSET = ICE_MDID_SIZE * ICE_PTYPE_MDID,
ICE_PACKET_LENGTH_MDID_OFFSET = ICE_MDID_SIZE * ICE_PACKET_LENGTH_MDID,
ICE_SOURCE_VSI_MDID_OFFSET = ICE_MDID_SIZE * ICE_SOURCE_VSI_MDID,
ICE_PKT_VLAN_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_VLAN_MDID,
ICE_PKT_TUNNEL_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_TUNNEL_MDID,
ICE_PKT_TCP_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_TCP_MDID,
ICE_PKT_ERROR_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_ERROR_MDID,
};
enum ice_pkt_flags {
ICE_PKT_FLAGS_VLAN = 0,
ICE_PKT_FLAGS_TUNNEL = 1,
ICE_PKT_FLAGS_TCP = 2,
ICE_PKT_FLAGS_ERROR = 3,
};
struct ice_hw_metadata {
__be16 source_port;
__be16 ptype;
__be16 packet_length;
__be16 source_vsi;
__be16 flags[4];
};
union ice_prot_hdr {
struct ice_ether_hdr eth_hdr;
struct ice_ethtype_hdr ethertype;
......@@ -243,6 +419,7 @@ union ice_prot_hdr {
struct ice_udp_gtp_hdr gtp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
struct ice_hw_metadata metadata;
};
/* This is mapping table entry that maps every word within a given protocol
......
......@@ -298,14 +298,6 @@ static int ice_repr_add(struct ice_vf *vf)
if (!repr)
return -ENOMEM;
#ifdef CONFIG_ICE_SWITCHDEV
repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL);
if (!repr->mac_rule) {
err = -ENOMEM;
goto err_alloc_rule;
}
#endif
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
err = -ENOMEM;
......@@ -351,11 +343,6 @@ static int ice_repr_add(struct ice_vf *vf)
free_netdev(repr->netdev);
repr->netdev = NULL;
err_alloc:
#ifdef CONFIG_ICE_SWITCHDEV
kfree(repr->mac_rule);
repr->mac_rule = NULL;
err_alloc_rule:
#endif
kfree(repr);
vf->repr = NULL;
return err;
......@@ -376,10 +363,6 @@ static void ice_repr_rem(struct ice_vf *vf)
ice_devlink_destroy_vf_port(vf);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
#ifdef CONFIG_ICE_SWITCHDEV
kfree(vf->repr->mac_rule);
vf->repr->mac_rule = NULL;
#endif
kfree(vf->repr);
vf->repr = NULL;
......
......@@ -13,9 +13,8 @@ struct ice_repr {
struct net_device *netdev;
struct metadata_dst *dst;
#ifdef CONFIG_ICE_SWITCHDEV
/* info about slow path MAC rule */
struct ice_rule_query_data *mac_rule;
u8 rule_added;
/* info about slow path rule */
struct ice_rule_query_data sp_rule;
#endif
};
......
This diff is collapsed.
......@@ -10,7 +10,6 @@
#define ICE_DFLT_VSI_INVAL 0xff
#define ICE_FLTR_RX BIT(0)
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
......@@ -187,12 +186,13 @@ struct ice_adv_rule_flags_info {
};
struct ice_adv_rule_info {
/* Store metadata values in rule info */
enum ice_sw_tunnel_type tun_type;
struct ice_sw_act_ctrl sw_act;
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
u16 vlan_type;
u16 fltr_rule_id;
u32 priority;
u16 src_vsi;
struct ice_sw_act_ctrl sw_act;
struct ice_adv_rule_flags_info flags_info;
};
......@@ -342,6 +342,9 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup);
void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup);
void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup);
int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
......
......@@ -54,6 +54,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
lkups_cnt++;
/* is VLAN TPID specified */
if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID)
lkups_cnt++;
/* is CVLAN specified? */
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
lkups_cnt++;
......@@ -80,6 +84,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
lkups_cnt++;
/* matching for tunneled packets in metadata */
if (fltr->tunnel_type != TNL_LAST)
lkups_cnt++;
return lkups_cnt;
}
......@@ -320,6 +328,10 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
/* always fill matching on tunneled packets in metadata */
ice_rule_add_tunnel_metadata(&list[i]);
i++;
return i;
}
......@@ -390,10 +402,6 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* copy VLAN info */
if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
rule_info->vlan_type =
ice_check_supported_vlan_tpid(vlan_tpid);
if (flags & ICE_TC_FLWR_FIELD_CVLAN)
list[i].type = ICE_VLAN_EX;
else
......@@ -418,6 +426,15 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
i++;
}
if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) {
vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
rule_info->vlan_type =
ice_check_supported_vlan_tpid(vlan_tpid);
ice_rule_add_vlan_metadata(&list[i]);
i++;
}
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
list[i].type = ICE_VLAN_IN;
......@@ -698,12 +715,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else {
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.rx = false;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
}
......@@ -910,7 +925,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
tc_fltr->action.fwd.tc.tc_class,
rule_info.sw_act.vsi_handle, lkups_cnt);
......@@ -921,7 +935,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n",
tc_fltr->action.fwd.q.queue,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
......@@ -929,7 +942,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
case ICE_DROP_PACKET:
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
default:
......@@ -1460,8 +1472,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
VLAN_PRIO_MASK);
}
if (match.mask->vlan_tpid)
if (match.mask->vlan_tpid) {
headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_TPID;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
......
......@@ -33,6 +33,7 @@
#define ICE_TC_FLWR_FIELD_L2TPV3_SESSID BIT(26)
#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
......
......@@ -689,8 +689,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
*/
ice_vf_clear_all_promisc_modes(vf, vsi);
ice_eswitch_del_vf_mac_rule(vf);
ice_vf_fdir_exit(vf);
ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VF since it should be setup
......@@ -716,7 +714,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
}
ice_eswitch_update_repr(vsi);
ice_eswitch_replay_vf_mac_rule(vf);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
......
......@@ -3730,7 +3730,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
for (i = 0; i < al->num_elements; i++) {
u8 *mac_addr = al->list[i].addr;
int result;
if (!is_unicast_ether_addr(mac_addr) ||
ether_addr_equal(mac_addr, vf->hw_lan_addr))
......@@ -3742,13 +3741,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
goto handle_mac_exit;
}
result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
if (result) {
dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
mac_addr, vf->vf_id, result);
goto handle_mac_exit;
}
ice_vfhw_mac_add(vf, &al->list[i]);
vf->num_mac++;
break;
......
......@@ -219,7 +219,7 @@ static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = {
.rid = ICE_SW_LKUP_VLAN,
.fv_idx = ICE_PKT_FLAGS_0_TO_15_FV_IDX,
.ignore_valid = false,
.mask = ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK,
.mask = ICE_PKT_VLAN_MASK,
.mask_valid = true,
.lkup_idx = ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX,
},
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment