Commit 704bc986 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2021-10-28

This series contains updates to ice driver only.

Michal adds support for eswitch drop and redirect filters from and to
tunnel devices. From meaning from uplink to VF and to means from VF to
uplink. This is accomplished by adding support for indirect TC tunnel
notifications and adding appropriate training packets and match fields
for UDP tunnel headers. He also adds returning virtchannel responses for
blocked operations as returning a response is still needed.

Marcin sets netdev min and max MTU values on port representors to allow
for MTU changes over default values.

Brett adds detecting and reporting of PHY firmware load issues for devices
which support this.

Nathan Chancellor fixes a clang warning for implicit fallthrough.

Wang Hai fixes a return value for failed allocation.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0b3f8639 c8e51a01
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/if_bridge.h> #include <linux/if_bridge.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/auxiliary_bus.h> #include <linux/auxiliary_bus.h>
#include <linux/avf/virtchnl.h> #include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h> #include <linux/cpu_rmap.h>
...@@ -479,6 +480,7 @@ enum ice_pf_flags { ...@@ -479,6 +480,7 @@ enum ice_pf_flags {
ICE_FLAG_NO_MEDIA, ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_MOD_POWER_UNSUPPORTED, ICE_FLAG_MOD_POWER_UNSUPPORTED,
ICE_FLAG_PHY_FW_LOAD_FAILED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX, ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_VF_TRUE_PROMISC_ENA,
...@@ -610,6 +612,13 @@ struct ice_pf { ...@@ -610,6 +612,13 @@ struct ice_pf {
struct ice_netdev_priv { struct ice_netdev_priv {
struct ice_vsi *vsi; struct ice_vsi *vsi;
struct ice_repr *repr; struct ice_repr *repr;
/* indirect block callbacks on registered higher level devices
* (e.g. tunnel devices)
*
* tc_indr_block_cb_priv_list is used to look up indirect callback
* private data
*/
struct list_head tc_indr_block_priv_list;
}; };
/** /**
......
...@@ -1185,6 +1185,7 @@ struct ice_aqc_get_link_status_data { ...@@ -1185,6 +1185,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7) #define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
u8 link_cfg_err; u8 link_cfg_err;
#define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5) #define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
#define ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
#define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7) #define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
u8 link_info; u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */ #define ICE_AQ_LINK_UP BIT(0) /* Link Status */
...@@ -1268,6 +1269,7 @@ struct ice_aqc_set_event_mask { ...@@ -1268,6 +1269,7 @@ struct ice_aqc_set_event_mask {
#define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7) #define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7)
#define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8) #define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9) #define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
#define ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
u8 reserved1[6]; u8 reserved1[6];
}; };
......
...@@ -1565,6 +1565,30 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) ...@@ -1565,6 +1565,30 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
return bld; return bld;
} }
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
* @fv: pointer to the switch field vector
*/
static enum ice_prof_type
ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
{
u16 i;
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
fv->ew[i].off == ICE_VNI_OFFSET)
return ICE_PROF_TUN_UDP;
/* GRE tunnel will have GRE protocol */
if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
return ICE_PROF_TUN_GRE;
}
return ICE_PROF_NON_TUN;
}
/** /**
* ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
...@@ -1588,6 +1612,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, ...@@ -1588,6 +1612,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
bitmap_zero(bm, ICE_MAX_NUM_PROFILES); bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
ice_seg = hw->seg; ice_seg = hw->seg;
do { do {
enum ice_prof_type prof_type;
u32 offset; u32 offset;
fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
...@@ -1595,7 +1620,10 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, ...@@ -1595,7 +1620,10 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
ice_seg = NULL; ice_seg = NULL;
if (fv) { if (fv) {
if (req_profs & ICE_PROF_NON_TUN) /* Determine field vector type */
prof_type = ice_get_sw_prof_type(hw, fv);
if (req_profs & prof_type)
set_bit((u16)offset, bm); set_bit((u16)offset, bm);
} }
} while (fv); } while (fv);
......
...@@ -373,6 +373,7 @@ struct ice_pkg_enum { ...@@ -373,6 +373,7 @@ struct ice_pkg_enum {
enum ice_tunnel_type { enum ice_tunnel_type {
TNL_VXLAN = 0, TNL_VXLAN = 0,
TNL_GENEVE, TNL_GENEVE,
TNL_GRETAP,
__TNL_TYPE_CNT, __TNL_TYPE_CNT,
TNL_LAST = 0xFF, TNL_LAST = 0xFF,
TNL_ALL = 0xFF, TNL_ALL = 0xFF,
...@@ -614,6 +615,9 @@ struct ice_chs_chg { ...@@ -614,6 +615,9 @@ struct ice_chs_chg {
enum ice_prof_type { enum ice_prof_type {
ICE_PROF_NON_TUN = 0x1, ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_ALL = 0x6,
ICE_PROF_ALL = 0xFF, ICE_PROF_ALL = 0xFF,
}; };
#endif /* _ICE_FLEX_TYPE_H_ */ #endif /* _ICE_FLEX_TYPE_H_ */
...@@ -1983,6 +1983,7 @@ static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc) ...@@ -1983,6 +1983,7 @@ static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
case ICE_TX_CONTAINER: case ICE_TX_CONTAINER:
if (rc->tx_ring) if (rc->tx_ring)
return rc->tx_ring->q_vector; return rc->tx_ring->q_vector;
break;
default: default:
break; break;
} }
......
This diff is collapsed.
...@@ -37,10 +37,22 @@ enum ice_protocol_type { ...@@ -37,10 +37,22 @@ enum ice_protocol_type {
ICE_TCP_IL, ICE_TCP_IL,
ICE_UDP_OF, ICE_UDP_OF,
ICE_UDP_ILOS, ICE_UDP_ILOS,
ICE_VXLAN,
ICE_GENEVE,
ICE_NVGRE,
ICE_VXLAN_GPE,
ICE_SCTP_IL, ICE_SCTP_IL,
ICE_PROTOCOL_LAST ICE_PROTOCOL_LAST
}; };
enum ice_sw_tunnel_type {
ICE_NON_TUN = 0,
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_NVGRE,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
/* Decoders for ice_prot_id: /* Decoders for ice_prot_id:
* - F: First * - F: First
* - I: Inner * - I: Inner
...@@ -74,6 +86,8 @@ enum ice_prot_id { ...@@ -74,6 +86,8 @@ enum ice_prot_id {
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
}; };
#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
#define ICE_MAC_OFOS_HW 1 #define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4 #define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9 #define ICE_ETYPE_OL_HW 9
...@@ -85,8 +99,15 @@ enum ice_prot_id { ...@@ -85,8 +99,15 @@ enum ice_prot_id {
#define ICE_IPV6_IL_HW 41 #define ICE_IPV6_IL_HW 41
#define ICE_TCP_IL_HW 49 #define ICE_TCP_IL_HW 49
#define ICE_UDP_ILOS_HW 53 #define ICE_UDP_ILOS_HW 53
#define ICE_GRE_OF_HW 64
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */ #define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
#define ICE_MDID_SIZE 2
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MASK 0xFF
#define ICE_TUN_FLAG_FV_IND 2 #define ICE_TUN_FLAG_FV_IND 2
...@@ -152,6 +173,18 @@ struct ice_l4_hdr { ...@@ -152,6 +173,18 @@ struct ice_l4_hdr {
__be16 check; __be16 check;
}; };
struct ice_udp_tnl_hdr {
__be16 field;
__be16 proto_type;
__be32 vni; /* only use lower 24-bits */
};
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
__be32 tni_flow;
};
union ice_prot_hdr { union ice_prot_hdr {
struct ice_ether_hdr eth_hdr; struct ice_ether_hdr eth_hdr;
struct ice_ethtype_hdr ethertype; struct ice_ethtype_hdr ethertype;
...@@ -160,6 +193,8 @@ union ice_prot_hdr { ...@@ -160,6 +193,8 @@ union ice_prot_hdr {
struct ice_ipv6_hdr ipv6_hdr; struct ice_ipv6_hdr ipv6_hdr;
struct ice_l4_hdr l4_hdr; struct ice_l4_hdr l4_hdr;
struct ice_sctp_hdr sctp_hdr; struct ice_sctp_hdr sctp_hdr;
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
}; };
/* This is mapping table entry that maps every word within a given protocol /* This is mapping table entry that maps every word within a given protocol
......
...@@ -267,6 +267,9 @@ static int ice_repr_add(struct ice_vf *vf) ...@@ -267,6 +267,9 @@ static int ice_repr_add(struct ice_vf *vf)
if (err) if (err)
goto err_devlink; goto err_devlink;
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
err = ice_repr_reg_netdev(repr->netdev); err = ice_repr_reg_netdev(repr->netdev);
if (err) if (err)
goto err_netdev; goto err_netdev;
......
This diff is collapsed.
...@@ -171,6 +171,7 @@ struct ice_adv_rule_flags_info { ...@@ -171,6 +171,7 @@ struct ice_adv_rule_flags_info {
}; };
struct ice_adv_rule_info { struct ice_adv_rule_info {
enum ice_sw_tunnel_type tun_type;
struct ice_sw_act_ctrl sw_act; struct ice_sw_act_ctrl sw_act;
u32 priority; u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */ u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
...@@ -211,6 +212,8 @@ struct ice_sw_recipe { ...@@ -211,6 +212,8 @@ struct ice_sw_recipe {
/* Bit map specifying the IDs associated with this group of recipe */ /* Bit map specifying the IDs associated with this group of recipe */
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
enum ice_sw_tunnel_type tun_type;
/* List of type ice_fltr_mgmt_list_entry or adv_rule */ /* List of type ice_fltr_mgmt_list_entry or adv_rule */
u8 adv_rule; u8 adv_rule;
struct list_head filt_rules; struct list_head filt_rules;
......
This diff is collapsed.
...@@ -23,6 +23,14 @@ ...@@ -23,6 +23,14 @@
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) #define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) #define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
struct ice_indr_block_priv {
struct net_device *netdev;
struct ice_netdev_priv *np;
struct list_head list;
};
struct ice_tc_flower_action { struct ice_tc_flower_action {
u32 tc_class; u32 tc_class;
enum ice_sw_fwd_act_type fltr_act; enum ice_sw_fwd_act_type fltr_act;
...@@ -112,6 +120,7 @@ struct ice_tc_flower_fltr { ...@@ -112,6 +120,7 @@ struct ice_tc_flower_fltr {
struct ice_vsi *src_vsi; struct ice_vsi *src_vsi;
__be32 tenant_id; __be32 tenant_id;
u32 flags; u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action; struct ice_tc_flower_action action;
/* cache ptr which is used wherever needed to communicate netlink /* cache ptr which is used wherever needed to communicate netlink
...@@ -148,5 +157,6 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, ...@@ -148,5 +157,6 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
int int
ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf); void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
#endif /* _ICE_TC_LIB_H_ */ #endif /* _ICE_TC_LIB_H_ */
...@@ -4499,13 +4499,6 @@ void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) ...@@ -4499,13 +4499,6 @@ void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
*ops = ice_vc_vf_dflt_ops; *ops = ice_vc_vf_dflt_ops;
} }
static int
ice_vc_repr_no_action_msg(struct ice_vf __always_unused *vf,
u8 __always_unused *msg)
{
return 0;
}
/** /**
* ice_vc_repr_add_mac * ice_vc_repr_add_mac
* @vf: pointer to VF * @vf: pointer to VF
...@@ -4581,20 +4574,62 @@ ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg) ...@@ -4581,20 +4574,62 @@ ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
VIRTCHNL_STATUS_SUCCESS, NULL, 0); VIRTCHNL_STATUS_SUCCESS, NULL, 0);
} }
static int ice_vc_repr_no_action(struct ice_vf __always_unused *vf) static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
{ {
return 0; dev_dbg(ice_pf_to_dev(vf->pf),
"Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
}
static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
{
dev_dbg(ice_pf_to_dev(vf->pf),
"Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
}
static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
{
dev_dbg(ice_pf_to_dev(vf->pf),
"Can't enable VLAN stripping in switchdev mode for VF %d\n",
vf->vf_id);
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
NULL, 0);
}
static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
{
dev_dbg(ice_pf_to_dev(vf->pf),
"Can't disable VLAN stripping in switchdev mode for VF %d\n",
vf->vf_id);
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
NULL, 0);
}
static int
ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
{
dev_dbg(ice_pf_to_dev(vf->pf),
"Can't config promiscuous mode in switchdev mode for VF %d\n",
vf->vf_id);
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
NULL, 0);
} }
void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
{ {
ops->add_mac_addr_msg = ice_vc_repr_add_mac; ops->add_mac_addr_msg = ice_vc_repr_add_mac;
ops->del_mac_addr_msg = ice_vc_repr_del_mac; ops->del_mac_addr_msg = ice_vc_repr_del_mac;
ops->add_vlan_msg = ice_vc_repr_no_action_msg; ops->add_vlan_msg = ice_vc_repr_add_vlan;
ops->remove_vlan_msg = ice_vc_repr_no_action_msg; ops->remove_vlan_msg = ice_vc_repr_del_vlan;
ops->ena_vlan_stripping = ice_vc_repr_no_action; ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping;
ops->dis_vlan_stripping = ice_vc_repr_no_action; ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping;
ops->cfg_promiscuous_mode_msg = ice_vc_repr_no_action_msg; ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment