Commit 735c9ee9 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'wangxun-netdev-features-support'

Mengyuan Lou says:

====================
Wangxun netdev features support

Implement tx_csum and rx_csum to support hardware checksum offload.
Implement ndo_vlan_rx_add_vid and ndo_vlan_rx_kill_vid.
Implement ndo_set_features.
Enable macros in netdev features which wangxun can support.
====================

Link: https://lore.kernel.org/r/20230530022632.17938-1-mengyuanlou@net-swift.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 6f4b9814 7df4af51
......@@ -1182,12 +1182,28 @@ static void wx_enable_sec_rx_path(struct wx *wx)
WX_WRITE_FLUSH(wx);
}
static void wx_vlan_strip_control(struct wx *wx, bool enable)
{
int i, j;
for (i = 0; i < wx->num_rx_queues; i++) {
struct wx_ring *ring = wx->rx_ring[i];
j = ring->reg_idx;
wr32m(wx, WX_PX_RR_CFG(j), WX_PX_RR_CFG_VLAN,
enable ? WX_PX_RR_CFG_VLAN : 0);
}
}
void wx_set_rx_mode(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
netdev_features_t features;
u32 fctrl, vmolr, vlnctrl;
int count;
features = netdev->features;
/* Check for Promiscuous and All Multicast modes */
fctrl = rd32(wx, WX_PSR_CTL);
fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
......@@ -1254,6 +1270,13 @@ void wx_set_rx_mode(struct net_device *netdev)
wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
wr32(wx, WX_PSR_CTL, fctrl);
wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
(features & NETIF_F_HW_VLAN_STAG_RX))
wx_vlan_strip_control(wx, true);
else
wx_vlan_strip_control(wx, false);
}
EXPORT_SYMBOL(wx_set_rx_mode);
......@@ -1462,6 +1485,16 @@ static void wx_configure_tx(struct wx *wx)
WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
}
static void wx_restore_vlan(struct wx *wx)
{
u16 vid = 1;
wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), 0);
for_each_set_bit_from(vid, wx->active_vlans, VLAN_N_VID)
wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
}
/**
* wx_configure_rx - Configure Receive Unit after Reset
* @wx: pointer to private structure
......@@ -1527,7 +1560,7 @@ void wx_configure(struct wx *wx)
wx_configure_port(wx);
wx_set_rx_mode(wx->netdev);
wx_restore_vlan(wx);
wx_enable_sec_rx_path(wx);
wx_configure_tx(wx);
......@@ -1727,4 +1760,241 @@ int wx_sw_init(struct wx *wx)
}
EXPORT_SYMBOL(wx_sw_init);
/**
* wx_find_vlvf_slot - find the vlanid or the first empty slot
* @wx: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
*
* return the VLVF index where this VLAN id should be placed
*
**/
static int wx_find_vlvf_slot(struct wx *wx, u32 vlan)
{
u32 bits = 0, first_empty_slot = 0;
int regindex;
/* short cut the special case */
if (vlan == 0)
return 0;
/* Search for the vlan id in the VLVF entries. Save off the first empty
* slot found along the way
*/
for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
bits = rd32(wx, WX_PSR_VLAN_SWC);
if (!bits && !(first_empty_slot))
first_empty_slot = regindex;
else if ((bits & 0x0FFF) == vlan)
break;
}
if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) {
if (first_empty_slot)
regindex = first_empty_slot;
else
regindex = -ENOMEM;
}
return regindex;
}
/**
* wx_set_vlvf - Set VLAN Pool Filter
* @wx: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in VFVFB
* @vlan_on: boolean flag to turn on/off VLAN in VFVF
* @vfta_changed: pointer to boolean flag which indicates whether VFTA
* should be changed
*
* Turn on/off specified bit in VLVF table.
**/
static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
bool *vfta_changed)
{
int vlvf_index;
u32 vt, bits;
/* If VT Mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
vt = rd32(wx, WX_CFG_PORT_CTL);
if (!(vt & WX_CFG_PORT_CTL_NUM_VT_MASK))
return 0;
vlvf_index = wx_find_vlvf_slot(wx, vlan);
if (vlvf_index < 0)
return vlvf_index;
wr32(wx, WX_PSR_VLAN_SWC_IDX, vlvf_index);
if (vlan_on) {
/* set the pool bit */
if (vind < 32) {
bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
bits |= (1 << vind);
wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
} else {
bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
bits |= (1 << (vind - 32));
wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
}
} else {
/* clear the pool bit */
if (vind < 32) {
bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
bits &= ~(1 << vind);
wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
} else {
bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
bits &= ~(1 << (vind - 32));
wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
}
}
if (bits) {
wr32(wx, WX_PSR_VLAN_SWC, (WX_PSR_VLAN_SWC_VIEN | vlan));
if (!vlan_on && vfta_changed)
*vfta_changed = false;
} else {
wr32(wx, WX_PSR_VLAN_SWC, 0);
}
return 0;
}
/**
* wx_set_vfta - Set VLAN filter table
* @wx: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in VFVFB
* @vlan_on: boolean flag to turn on/off VLAN in VFVF
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
{
u32 bitindex, vfta, targetbit;
bool vfta_changed = false;
int regindex, ret;
/* this is a 2 part operation - first the VFTA, then the
* VLVF and VLVFB if VT Mode is set
* We don't write the VFTA until we know the VLVF part succeeded.
*/
/* Part 1
* The VFTA is a bitstring made up of 128 32-bit registers
* that enable the particular VLAN id, much like the MTA:
* bits[11-5]: which register
* bits[4-0]: which bit in the register
*/
regindex = (vlan >> 5) & 0x7F;
bitindex = vlan & 0x1F;
targetbit = (1 << bitindex);
/* errata 5 */
vfta = wx->mac.vft_shadow[regindex];
if (vlan_on) {
if (!(vfta & targetbit)) {
vfta |= targetbit;
vfta_changed = true;
}
} else {
if ((vfta & targetbit)) {
vfta &= ~targetbit;
vfta_changed = true;
}
}
/* Part 2
* Call wx_set_vlvf to set VLVFB and VLVF
*/
ret = wx_set_vlvf(wx, vlan, vind, vlan_on, &vfta_changed);
if (ret != 0)
return ret;
if (vfta_changed)
wr32(wx, WX_PSR_VLAN_TBL(regindex), vfta);
wx->mac.vft_shadow[regindex] = vfta;
return 0;
}
/**
* wx_clear_vfta - Clear VLAN filter table
* @wx: pointer to hardware structure
*
* Clears the VLAN filer table, and the VMDq index associated with the filter
**/
static void wx_clear_vfta(struct wx *wx)
{
u32 offset;
for (offset = 0; offset < wx->mac.vft_size; offset++) {
wr32(wx, WX_PSR_VLAN_TBL(offset), 0);
wx->mac.vft_shadow[offset] = 0;
}
for (offset = 0; offset < WX_PSR_VLAN_SWC_ENTRIES; offset++) {
wr32(wx, WX_PSR_VLAN_SWC_IDX, offset);
wr32(wx, WX_PSR_VLAN_SWC, 0);
wr32(wx, WX_PSR_VLAN_SWC_VM_L, 0);
wr32(wx, WX_PSR_VLAN_SWC_VM_H, 0);
}
}
int wx_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct wx *wx = netdev_priv(netdev);
/* add VID to filter table */
wx_set_vfta(wx, vid, VMDQ_P(0), true);
set_bit(vid, wx->active_vlans);
return 0;
}
EXPORT_SYMBOL(wx_vlan_rx_add_vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct wx *wx = netdev_priv(netdev);
/* remove VID from filter table */
if (vid)
wx_set_vfta(wx, vid, VMDQ_P(0), false);
clear_bit(vid, wx->active_vlans);
return 0;
}
EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
/**
* wx_start_hw - Prepare hardware for Tx/Rx
* @wx: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
void wx_start_hw(struct wx *wx)
{
int i;
/* Clear the VLAN filter table */
wx_clear_vfta(wx);
WX_WRITE_FLUSH(wx);
/* Clear the rate limiters */
for (i = 0; i < wx->mac.max_tx_queues; i++) {
wr32(wx, WX_TDM_RP_IDX, i);
wr32(wx, WX_TDM_RP_RATE, 0);
}
}
EXPORT_SYMBOL(wx_start_hw);
MODULE_LICENSE("GPL");
......@@ -26,10 +26,13 @@ void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
int wx_disable_pcie_master(struct wx *wx);
int wx_stop_adapter(struct wx *wx);
void wx_reset_misc(struct wx *wx);
int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
#endif /* _WX_HW_H_ */
......@@ -2,14 +2,157 @@
/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
#include <linux/etherdevice.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
#include <linux/iopoll.h>
#include <linux/sctp.h>
#include <linux/pci.h>
#include <net/tcp.h>
#include <net/ip.h>
#include "wx_type.h"
#include "wx_lib.h"
#include "wx_hw.h"
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static struct wx_dec_ptype wx_ptype_lookup[256] = {
/* L2: mac */
[0x11] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
[0x12] = WX_PTT(L2, NONE, NONE, NONE, TS, PAY2),
[0x13] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
[0x14] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
[0x15] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE),
[0x16] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
[0x17] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE),
/* L2: ethertype filter */
[0x18 ... 0x1F] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE),
/* L3: ip non-tunnel */
[0x21] = WX_PTT(IP, FGV4, NONE, NONE, NONE, PAY3),
[0x22] = WX_PTT(IP, IPV4, NONE, NONE, NONE, PAY3),
[0x23] = WX_PTT(IP, IPV4, NONE, NONE, UDP, PAY4),
[0x24] = WX_PTT(IP, IPV4, NONE, NONE, TCP, PAY4),
[0x25] = WX_PTT(IP, IPV4, NONE, NONE, SCTP, PAY4),
[0x29] = WX_PTT(IP, FGV6, NONE, NONE, NONE, PAY3),
[0x2A] = WX_PTT(IP, IPV6, NONE, NONE, NONE, PAY3),
[0x2B] = WX_PTT(IP, IPV6, NONE, NONE, UDP, PAY3),
[0x2C] = WX_PTT(IP, IPV6, NONE, NONE, TCP, PAY4),
[0x2D] = WX_PTT(IP, IPV6, NONE, NONE, SCTP, PAY4),
/* L2: fcoe */
[0x30 ... 0x34] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3),
[0x38 ... 0x3C] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3),
/* IPv4 --> IPv4/IPv6 */
[0x81] = WX_PTT(IP, IPV4, IPIP, FGV4, NONE, PAY3),
[0x82] = WX_PTT(IP, IPV4, IPIP, IPV4, NONE, PAY3),
[0x83] = WX_PTT(IP, IPV4, IPIP, IPV4, UDP, PAY4),
[0x84] = WX_PTT(IP, IPV4, IPIP, IPV4, TCP, PAY4),
[0x85] = WX_PTT(IP, IPV4, IPIP, IPV4, SCTP, PAY4),
[0x89] = WX_PTT(IP, IPV4, IPIP, FGV6, NONE, PAY3),
[0x8A] = WX_PTT(IP, IPV4, IPIP, IPV6, NONE, PAY3),
[0x8B] = WX_PTT(IP, IPV4, IPIP, IPV6, UDP, PAY4),
[0x8C] = WX_PTT(IP, IPV4, IPIP, IPV6, TCP, PAY4),
[0x8D] = WX_PTT(IP, IPV4, IPIP, IPV6, SCTP, PAY4),
/* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */
[0x90] = WX_PTT(IP, IPV4, IG, NONE, NONE, PAY3),
[0x91] = WX_PTT(IP, IPV4, IG, FGV4, NONE, PAY3),
[0x92] = WX_PTT(IP, IPV4, IG, IPV4, NONE, PAY3),
[0x93] = WX_PTT(IP, IPV4, IG, IPV4, UDP, PAY4),
[0x94] = WX_PTT(IP, IPV4, IG, IPV4, TCP, PAY4),
[0x95] = WX_PTT(IP, IPV4, IG, IPV4, SCTP, PAY4),
[0x99] = WX_PTT(IP, IPV4, IG, FGV6, NONE, PAY3),
[0x9A] = WX_PTT(IP, IPV4, IG, IPV6, NONE, PAY3),
[0x9B] = WX_PTT(IP, IPV4, IG, IPV6, UDP, PAY4),
[0x9C] = WX_PTT(IP, IPV4, IG, IPV6, TCP, PAY4),
[0x9D] = WX_PTT(IP, IPV4, IG, IPV6, SCTP, PAY4),
/* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */
[0xA0] = WX_PTT(IP, IPV4, IGM, NONE, NONE, PAY3),
[0xA1] = WX_PTT(IP, IPV4, IGM, FGV4, NONE, PAY3),
[0xA2] = WX_PTT(IP, IPV4, IGM, IPV4, NONE, PAY3),
[0xA3] = WX_PTT(IP, IPV4, IGM, IPV4, UDP, PAY4),
[0xA4] = WX_PTT(IP, IPV4, IGM, IPV4, TCP, PAY4),
[0xA5] = WX_PTT(IP, IPV4, IGM, IPV4, SCTP, PAY4),
[0xA9] = WX_PTT(IP, IPV4, IGM, FGV6, NONE, PAY3),
[0xAA] = WX_PTT(IP, IPV4, IGM, IPV6, NONE, PAY3),
[0xAB] = WX_PTT(IP, IPV4, IGM, IPV6, UDP, PAY4),
[0xAC] = WX_PTT(IP, IPV4, IGM, IPV6, TCP, PAY4),
[0xAD] = WX_PTT(IP, IPV4, IGM, IPV6, SCTP, PAY4),
/* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */
[0xB0] = WX_PTT(IP, IPV4, IGMV, NONE, NONE, PAY3),
[0xB1] = WX_PTT(IP, IPV4, IGMV, FGV4, NONE, PAY3),
[0xB2] = WX_PTT(IP, IPV4, IGMV, IPV4, NONE, PAY3),
[0xB3] = WX_PTT(IP, IPV4, IGMV, IPV4, UDP, PAY4),
[0xB4] = WX_PTT(IP, IPV4, IGMV, IPV4, TCP, PAY4),
[0xB5] = WX_PTT(IP, IPV4, IGMV, IPV4, SCTP, PAY4),
[0xB9] = WX_PTT(IP, IPV4, IGMV, FGV6, NONE, PAY3),
[0xBA] = WX_PTT(IP, IPV4, IGMV, IPV6, NONE, PAY3),
[0xBB] = WX_PTT(IP, IPV4, IGMV, IPV6, UDP, PAY4),
[0xBC] = WX_PTT(IP, IPV4, IGMV, IPV6, TCP, PAY4),
[0xBD] = WX_PTT(IP, IPV4, IGMV, IPV6, SCTP, PAY4),
/* IPv6 --> IPv4/IPv6 */
[0xC1] = WX_PTT(IP, IPV6, IPIP, FGV4, NONE, PAY3),
[0xC2] = WX_PTT(IP, IPV6, IPIP, IPV4, NONE, PAY3),
[0xC3] = WX_PTT(IP, IPV6, IPIP, IPV4, UDP, PAY4),
[0xC4] = WX_PTT(IP, IPV6, IPIP, IPV4, TCP, PAY4),
[0xC5] = WX_PTT(IP, IPV6, IPIP, IPV4, SCTP, PAY4),
[0xC9] = WX_PTT(IP, IPV6, IPIP, FGV6, NONE, PAY3),
[0xCA] = WX_PTT(IP, IPV6, IPIP, IPV6, NONE, PAY3),
[0xCB] = WX_PTT(IP, IPV6, IPIP, IPV6, UDP, PAY4),
[0xCC] = WX_PTT(IP, IPV6, IPIP, IPV6, TCP, PAY4),
[0xCD] = WX_PTT(IP, IPV6, IPIP, IPV6, SCTP, PAY4),
/* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */
[0xD0] = WX_PTT(IP, IPV6, IG, NONE, NONE, PAY3),
[0xD1] = WX_PTT(IP, IPV6, IG, FGV4, NONE, PAY3),
[0xD2] = WX_PTT(IP, IPV6, IG, IPV4, NONE, PAY3),
[0xD3] = WX_PTT(IP, IPV6, IG, IPV4, UDP, PAY4),
[0xD4] = WX_PTT(IP, IPV6, IG, IPV4, TCP, PAY4),
[0xD5] = WX_PTT(IP, IPV6, IG, IPV4, SCTP, PAY4),
[0xD9] = WX_PTT(IP, IPV6, IG, FGV6, NONE, PAY3),
[0xDA] = WX_PTT(IP, IPV6, IG, IPV6, NONE, PAY3),
[0xDB] = WX_PTT(IP, IPV6, IG, IPV6, UDP, PAY4),
[0xDC] = WX_PTT(IP, IPV6, IG, IPV6, TCP, PAY4),
[0xDD] = WX_PTT(IP, IPV6, IG, IPV6, SCTP, PAY4),
/* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */
[0xE0] = WX_PTT(IP, IPV6, IGM, NONE, NONE, PAY3),
[0xE1] = WX_PTT(IP, IPV6, IGM, FGV4, NONE, PAY3),
[0xE2] = WX_PTT(IP, IPV6, IGM, IPV4, NONE, PAY3),
[0xE3] = WX_PTT(IP, IPV6, IGM, IPV4, UDP, PAY4),
[0xE4] = WX_PTT(IP, IPV6, IGM, IPV4, TCP, PAY4),
[0xE5] = WX_PTT(IP, IPV6, IGM, IPV4, SCTP, PAY4),
[0xE9] = WX_PTT(IP, IPV6, IGM, FGV6, NONE, PAY3),
[0xEA] = WX_PTT(IP, IPV6, IGM, IPV6, NONE, PAY3),
[0xEB] = WX_PTT(IP, IPV6, IGM, IPV6, UDP, PAY4),
[0xEC] = WX_PTT(IP, IPV6, IGM, IPV6, TCP, PAY4),
[0xED] = WX_PTT(IP, IPV6, IGM, IPV6, SCTP, PAY4),
/* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */
[0xF0] = WX_PTT(IP, IPV6, IGMV, NONE, NONE, PAY3),
[0xF1] = WX_PTT(IP, IPV6, IGMV, FGV4, NONE, PAY3),
[0xF2] = WX_PTT(IP, IPV6, IGMV, IPV4, NONE, PAY3),
[0xF3] = WX_PTT(IP, IPV6, IGMV, IPV4, UDP, PAY4),
[0xF4] = WX_PTT(IP, IPV6, IGMV, IPV4, TCP, PAY4),
[0xF5] = WX_PTT(IP, IPV6, IGMV, IPV4, SCTP, PAY4),
[0xF9] = WX_PTT(IP, IPV6, IGMV, FGV6, NONE, PAY3),
[0xFA] = WX_PTT(IP, IPV6, IGMV, IPV6, NONE, PAY3),
[0xFB] = WX_PTT(IP, IPV6, IGMV, IPV6, UDP, PAY4),
[0xFC] = WX_PTT(IP, IPV6, IGMV, IPV6, TCP, PAY4),
[0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4),
};
static struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
{
return wx_ptype_lookup[ptype];
}
/* wx_test_staterr - tests bits in Rx descriptor status and error fields */
static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
const u32 stat_err_bits)
......@@ -419,6 +562,116 @@ static bool wx_cleanup_headers(struct wx_ring *rx_ring,
return false;
}
static void wx_rx_hash(struct wx_ring *ring,
union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
u16 rss_type;
if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
WX_RXD_RSSTYPE_MASK;
if (!rss_type)
return;
skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
(WX_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
/**
* wx_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: structure containing ring specific data
* @rx_desc: current Rx descriptor being processed
* @skb: skb currently being received and modified
**/
static void wx_rx_checksum(struct wx_ring *ring,
union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct wx_dec_ptype dptype = wx_decode_ptype(WX_RXD_PKTTYPE(rx_desc));
skb_checksum_none_assert(skb);
/* Rx csum disabled */
if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
/* if IPv4 header checksum error */
if ((wx_test_staterr(rx_desc, WX_RXD_STAT_IPCS) &&
wx_test_staterr(rx_desc, WX_RXD_ERR_IPE)) ||
(wx_test_staterr(rx_desc, WX_RXD_STAT_OUTERIPCS) &&
wx_test_staterr(rx_desc, WX_RXD_ERR_OUTERIPER))) {
ring->rx_stats.csum_err++;
return;
}
/* L4 checksum offload flag must set for the below code to work */
if (!wx_test_staterr(rx_desc, WX_RXD_STAT_L4CS))
return;
/* Hardware can't guarantee csum if IPv6 Dest Header found */
if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc))
return;
/* if L4 checksum error */
if (wx_test_staterr(rx_desc, WX_RXD_ERR_TCPE)) {
ring->rx_stats.csum_err++;
return;
}
/* It must be a TCP or UDP or SCTP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If there is an outer header present that might contain a checksum
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
if (dptype.etype >= WX_DEC_PTYPE_ETYPE_IG)
__skb_incr_checksum_unnecessary(skb);
ring->rx_stats.csum_good_cnt++;
}
static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
u16 ethertype;
u8 idx = 0;
if ((ring->netdev->features &
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) &&
wx_test_staterr(rx_desc, WX_RXD_STAT_VP)) {
idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
0x1c0) >> 6;
ethertype = ring->q_vector->wx->tpid[idx];
__vlan_hwaccel_put_tag(skb, htons(ethertype),
le16_to_cpu(rx_desc->wb.upper.vlan));
}
}
/**
* wx_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, protocol, and
* other fields within the skb.
**/
static void wx_process_skb_fields(struct wx_ring *rx_ring,
union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
wx_rx_hash(rx_ring, rx_desc, skb);
wx_rx_checksum(rx_ring, rx_desc, skb);
wx_rx_vlan(rx_ring, rx_desc, skb);
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
/**
* wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @q_vector: structure containing interrupt and ring information
......@@ -486,8 +739,8 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
/* populate checksum, timestamp, VLAN, and protocol */
wx_process_skb_fields(rx_ring, rx_desc, skb);
napi_gro_receive(&q_vector->napi, skb);
/* update budget accounting */
......@@ -707,11 +960,50 @@ static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size)
return 0;
}
static u32 wx_tx_cmd_type(u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
u32 cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS;
/* set HW vlan bit if vlan is present */
cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_HW_VLAN, WX_TXD_VLE);
/* set segmentation enable bits for TSO/FSO */
cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSO, WX_TXD_TSE);
/* set timestamp bit if present */
cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSTAMP, WX_TXD_MAC_TSTAMP);
cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_LINKSEC, WX_TXD_LINKSEC);
return cmd_type;
}
static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc,
u32 tx_flags, unsigned int paylen)
{
u32 olinfo_status = paylen << WX_TXD_PAYLEN_SHIFT;
/* enable L4 checksum for TSO and TX checksum offload */
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CSUM, WX_TXD_L4CS);
/* enable IPv4 checksum for TSO */
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPV4, WX_TXD_IIPCS);
/* enable outer IPv4 checksum for TSO */
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_OUTER_IPV4,
WX_TXD_EIPCS);
/* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
*/
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CC, WX_TXD_CC);
olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPSEC,
WX_TXD_IPSEC);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
static void wx_tx_map(struct wx_ring *tx_ring,
struct wx_tx_buffer *first)
struct wx_tx_buffer *first,
const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct wx_tx_buffer *tx_buffer;
u32 tx_flags = first->tx_flags;
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
union wx_tx_desc *tx_desc;
......@@ -719,10 +1011,9 @@ static void wx_tx_map(struct wx_ring *tx_ring,
dma_addr_t dma;
u32 cmd_type;
cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS;
cmd_type = wx_tx_cmd_type(tx_flags);
tx_desc = WX_TX_DESC(tx_ring, i);
tx_desc->read.olinfo_status = cpu_to_le32(skb->len << WX_TXD_PAYLEN_SHIFT);
wx_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
size = skb_headlen(skb);
data_len = skb->data_len;
......@@ -838,12 +1129,399 @@ static void wx_tx_map(struct wx_ring *tx_ring,
tx_ring->next_to_use = i;
}
static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
{
struct wx_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
context_desc = WX_TX_CTXTDESC(tx_ring, i);
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* set bits to identify this as an advanced context descriptor */
type_tucmd |= WX_TXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr)
{
struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset);
*nexthdr = hdr->nexthdr;
offset += sizeof(struct ipv6hdr);
while (ipv6_ext_hdr(*nexthdr)) {
struct ipv6_opt_hdr _hdr, *hp;
if (*nexthdr == NEXTHDR_NONE)
return;
hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr);
if (!hp)
return;
if (*nexthdr == NEXTHDR_FRAGMENT)
break;
*nexthdr = hp->nexthdr;
}
}
union network_header {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
void *raw;
};
static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
{
u8 tun_prot = 0, l4_prot = 0, ptype = 0;
struct sk_buff *skb = first->skb;
if (skb->encapsulation) {
union network_header hdr;
switch (first->protocol) {
case htons(ETH_P_IP):
tun_prot = ip_hdr(skb)->protocol;
ptype = WX_PTYPE_TUN_IPV4;
break;
case htons(ETH_P_IPV6):
wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot);
ptype = WX_PTYPE_TUN_IPV6;
break;
default:
return ptype;
}
if (tun_prot == IPPROTO_IPIP) {
hdr.raw = (void *)inner_ip_hdr(skb);
ptype |= WX_PTYPE_PKT_IPIP;
} else if (tun_prot == IPPROTO_UDP) {
hdr.raw = (void *)inner_ip_hdr(skb);
if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB)) {
ptype |= WX_PTYPE_PKT_IG;
} else {
if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto
== htons(ETH_P_8021Q))
ptype |= WX_PTYPE_PKT_IGMV;
else
ptype |= WX_PTYPE_PKT_IGM;
}
} else if (tun_prot == IPPROTO_GRE) {
hdr.raw = (void *)inner_ip_hdr(skb);
if (skb->inner_protocol == htons(ETH_P_IP) ||
skb->inner_protocol == htons(ETH_P_IPV6)) {
ptype |= WX_PTYPE_PKT_IG;
} else {
if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto
== htons(ETH_P_8021Q))
ptype |= WX_PTYPE_PKT_IGMV;
else
ptype |= WX_PTYPE_PKT_IGM;
}
} else {
return ptype;
}
switch (hdr.ipv4->version) {
case IPVERSION:
l4_prot = hdr.ipv4->protocol;
break;
case 6:
wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot);
ptype |= WX_PTYPE_PKT_IPV6;
break;
default:
return ptype;
}
} else {
switch (first->protocol) {
case htons(ETH_P_IP):
l4_prot = ip_hdr(skb)->protocol;
ptype = WX_PTYPE_PKT_IP;
break;
case htons(ETH_P_IPV6):
wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot);
ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6;
break;
default:
return WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC;
}
}
switch (l4_prot) {
case IPPROTO_TCP:
ptype |= WX_PTYPE_TYP_TCP;
break;
case IPPROTO_UDP:
ptype |= WX_PTYPE_TYP_UDP;
break;
case IPPROTO_SCTP:
ptype |= WX_PTYPE_TYP_SCTP;
break;
default:
ptype |= WX_PTYPE_TYP_IP;
break;
}
return ptype;
}
static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
u8 *hdr_len, u8 ptype)
{
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct net_device *netdev = tx_ring->netdev;
u32 l4len, tunhdr_eiplen_tunlen = 0;
struct sk_buff *skb = first->skb;
bool enc = skb->encapsulation;
struct ipv6hdr *ipv6h;
struct tcphdr *tcph;
struct iphdr *iph;
u8 tun_prot = 0;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
if (!skb_is_gso(skb))
return 0;
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
/* indicates the inner headers in the skbuff are valid. */
iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb);
if (iph->version == 4) {
tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr,
iph->daddr, 0,
IPPROTO_TCP, 0);
first->tx_flags |= WX_TX_FLAGS_TSO |
WX_TX_FLAGS_CSUM |
WX_TX_FLAGS_IPV4 |
WX_TX_FLAGS_CC;
} else if (iph->version == 6 && skb_is_gso_v6(skb)) {
ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
&ipv6h->daddr, 0,
IPPROTO_TCP, 0);
first->tx_flags |= WX_TX_FLAGS_TSO |
WX_TX_FLAGS_CSUM |
WX_TX_FLAGS_CC;
}
/* compute header lengths */
l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
*hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) :
skb_transport_offset(skb);
*hdr_len += l4len;
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 0 as index for TSO */
mss_l4len_idx = l4len << WX_TXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << WX_TXD_MSS_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
if (enc) {
switch (first->protocol) {
case htons(ETH_P_IP):
tun_prot = ip_hdr(skb)->protocol;
first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4;
break;
case htons(ETH_P_IPV6):
tun_prot = ipv6_hdr(skb)->nexthdr;
break;
default:
break;
}
switch (tun_prot) {
case IPPROTO_UDP:
tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP;
tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT) |
(((skb_inner_mac_header(skb) -
skb_transport_header(skb)) >> 1) <<
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_GRE:
tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE;
tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT) |
(((skb_inner_mac_header(skb) -
skb_transport_header(skb)) >> 1) <<
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
break;
default:
break;
}
vlan_macip_lens = skb_inner_network_header_len(skb) >> 1;
} else {
vlan_macip_lens = skb_network_header_len(skb) >> 1;
}
vlan_macip_lens |= skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK;
type_tucmd = ptype << 24;
if (skb->vlan_proto == htons(ETH_P_8021AD) &&
netdev->features & NETIF_F_HW_VLAN_STAG_TX)
type_tucmd |= WX_SET_FLAG(first->tx_flags,
WX_TX_FLAGS_HW_VLAN,
0x1 << WX_TXD_TAG_TPID_SEL_SHIFT);
wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
type_tucmd, mss_l4len_idx);
return 1;
}
static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
u8 ptype)
{
u32 tunhdr_eiplen_tunlen = 0, vlan_macip_lens = 0;
struct net_device *netdev = tx_ring->netdev;
u32 mss_l4len_idx = 0, type_tucmd;
struct sk_buff *skb = first->skb;
u8 tun_prot = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) &&
!(first->tx_flags & WX_TX_FLAGS_CC))
return;
vlan_macip_lens = skb_network_offset(skb) <<
WX_TXD_MACLEN_SHIFT;
} else {
u8 l4_prot = 0;
union {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
u8 *raw;
} network_hdr;
union {
struct tcphdr *tcphdr;
u8 *raw;
} transport_hdr;
if (skb->encapsulation) {
network_hdr.raw = skb_inner_network_header(skb);
transport_hdr.raw = skb_inner_transport_header(skb);
vlan_macip_lens = skb_network_offset(skb) <<
WX_TXD_MACLEN_SHIFT;
switch (first->protocol) {
case htons(ETH_P_IP):
tun_prot = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
tun_prot = ipv6_hdr(skb)->nexthdr;
break;
default:
return;
}
switch (tun_prot) {
case IPPROTO_UDP:
tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP;
tunhdr_eiplen_tunlen |=
((skb_network_header_len(skb) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT) |
(((skb_inner_mac_header(skb) -
skb_transport_header(skb)) >> 1) <<
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_GRE:
tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE;
tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT) |
(((skb_inner_mac_header(skb) -
skb_transport_header(skb)) >> 1) <<
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
break;
default:
break;
}
} else {
network_hdr.raw = skb_network_header(skb);
transport_hdr.raw = skb_transport_header(skb);
vlan_macip_lens = skb_network_offset(skb) <<
WX_TXD_MACLEN_SHIFT;
}
switch (network_hdr.ipv4->version) {
case IPVERSION:
vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
l4_prot = network_hdr.ipv4->protocol;
break;
case 6:
vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
l4_prot = network_hdr.ipv6->nexthdr;
break;
default:
break;
}
switch (l4_prot) {
case IPPROTO_TCP:
mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
WX_TXD_L4LEN_SHIFT;
break;
case IPPROTO_SCTP:
mss_l4len_idx = sizeof(struct sctphdr) <<
WX_TXD_L4LEN_SHIFT;
break;
case IPPROTO_UDP:
mss_l4len_idx = sizeof(struct udphdr) <<
WX_TXD_L4LEN_SHIFT;
break;
default:
break;
}
/* update TX checksum flag */
first->tx_flags |= WX_TX_FLAGS_CSUM;
}
first->tx_flags |= WX_TX_FLAGS_CC;
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK;
type_tucmd = ptype << 24;
if (skb->vlan_proto == htons(ETH_P_8021AD) &&
netdev->features & NETIF_F_HW_VLAN_STAG_TX)
type_tucmd |= WX_SET_FLAG(first->tx_flags,
WX_TX_FLAGS_HW_VLAN,
0x1 << WX_TXD_TAG_TPID_SEL_SHIFT);
wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
type_tucmd, mss_l4len_idx);
}
static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
struct wx_ring *tx_ring)
{
u16 count = TXD_USE_COUNT(skb_headlen(skb));
struct wx_tx_buffer *first;
u8 hdr_len = 0, ptype;
unsigned short f;
u32 tx_flags = 0;
int tso;
/* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD,
......@@ -864,7 +1542,29 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
wx_tx_map(tx_ring, first);
/* if we have a HW VLAN tag being added default to the HW one */
if (skb_vlan_tag_present(skb)) {
tx_flags |= skb_vlan_tag_get(skb) << WX_TX_FLAGS_VLAN_SHIFT;
tx_flags |= WX_TX_FLAGS_HW_VLAN;
}
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb);
ptype = wx_encode_tx_desc_ptype(first);
tso = wx_tso(tx_ring, first, &hdr_len, ptype);
if (tso < 0)
goto out_drop;
else if (!tso)
wx_tx_csum(tx_ring, first, ptype);
wx_tx_map(tx_ring, first, hdr_len);
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
return NETDEV_TX_OK;
}
......@@ -2004,4 +2704,24 @@ void wx_get_stats64(struct net_device *netdev,
}
EXPORT_SYMBOL(wx_get_stats64);
int wx_set_features(struct net_device *netdev, netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
struct wx *wx = netdev_priv(netdev);
if (changed & NETIF_F_RXHASH)
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
WX_RDB_RA_CTL_RSS_EN);
else
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
if (changed &
(NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX))
wx_set_rx_mode(netdev);
return 1;
}
EXPORT_SYMBOL(wx_set_features);
MODULE_LICENSE("GPL");
......@@ -28,5 +28,6 @@ void wx_free_resources(struct wx *wx);
int wx_setup_resources(struct wx *wx);
void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
int wx_set_features(struct net_device *netdev, netdev_features_t features);
#endif /* _NGBE_LIB_H_ */
......@@ -6,6 +6,8 @@
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#define WX_NCSI_SUP 0x8000
#define WX_NCSI_MASK 0x8000
......@@ -64,6 +66,8 @@
#define WX_CFG_PORT_CTL_QINQ BIT(2)
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
/* GPIO Registers */
#define WX_GPIO_DR 0x14800
......@@ -87,6 +91,8 @@
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
#define WX_TDM_RP_IDX 0x1820C
#define WX_TDM_RP_RATE 0x18404
/***************************** RDB registers *********************************/
/* receive packet buffer */
......@@ -105,6 +111,8 @@
#define WX_RDB_PL_CFG_L2HDR BIT(3)
#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
#define WX_RDB_RA_CTL 0x194F4
#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */
/******************************* PSR Registers *******************************/
/* psr control */
......@@ -150,6 +158,9 @@
#define WX_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16))
#define WX_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16))
/* vlan tbl */
#define WX_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4))
/* mac switcher */
#define WX_PSR_MAC_SWC_AD_L 0x16200
#define WX_PSR_MAC_SWC_AD_H 0x16204
......@@ -161,6 +172,15 @@
#define WX_PSR_MAC_SWC_IDX 0x16210
#define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU
/* vlan switch */
#define WX_PSR_VLAN_SWC 0x16220
#define WX_PSR_VLAN_SWC_VM_L 0x16224
#define WX_PSR_VLAN_SWC_VM_H 0x16228
#define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */
/* VLAN pool filtering masks */
#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */
#define WX_PSR_VLAN_SWC_ENTRIES 64
/********************************* RSEC **************************************/
/* general rsec */
#define WX_RSC_CTL 0x17000
......@@ -255,6 +275,7 @@
#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40))
#define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40))
/* PX_RR_CFG bit definitions */
#define WX_PX_RR_CFG_VLAN BIT(31)
#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
#define WX_PX_RR_CFG_RR_THER_SHIFT 16
#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
......@@ -296,6 +317,7 @@
#define WX_MAX_TXD 8192
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
#define VMDQ_P(p) p
/* Supported Rx Buffer Sizes */
#define WX_RXBUFFER_256 256 /* Used for skb receive header */
......@@ -315,17 +337,64 @@
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* Ether Types */
#define WX_ETH_P_CNM 0x22E7
#define WX_CFG_PORT_ST 0x14404
/******************* Receive Descriptor bit definitions **********************/
#define WX_RXD_STAT_DD BIT(0) /* Done */
#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */
#define WX_RXD_STAT_VP BIT(5) /* IEEE VLAN Pkt */
#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */
#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */
#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/
#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */
#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
#define WX_RXD_ERR_TCPE BIT(30) /* TCP/UDP Checksum Error */
#define WX_RXD_ERR_IPE BIT(31) /* IP Checksum Error */
/* RSS Hash results */
#define WX_RXD_RSSTYPE_MASK GENMASK(3, 0)
#define WX_RXD_RSSTYPE_IPV4_TCP 0x00000001U
#define WX_RXD_RSSTYPE_IPV6_TCP 0x00000003U
#define WX_RXD_RSSTYPE_IPV4_SCTP 0x00000004U
#define WX_RXD_RSSTYPE_IPV6_SCTP 0x00000006U
#define WX_RXD_RSSTYPE_IPV4_UDP 0x00000007U
#define WX_RXD_RSSTYPE_IPV6_UDP 0x00000008U
#define WX_RSS_L4_TYPES_MASK \
((1ul << WX_RXD_RSSTYPE_IPV4_TCP) | \
(1ul << WX_RXD_RSSTYPE_IPV4_UDP) | \
(1ul << WX_RXD_RSSTYPE_IPV4_SCTP) | \
(1ul << WX_RXD_RSSTYPE_IPV6_TCP) | \
(1ul << WX_RXD_RSSTYPE_IPV6_UDP) | \
(1ul << WX_RXD_RSSTYPE_IPV6_SCTP))
/* TUN */
#define WX_PTYPE_TUN_IPV4 0x80
#define WX_PTYPE_TUN_IPV6 0xC0
/* PKT for TUN */
#define WX_PTYPE_PKT_IPIP 0x00 /* IP+IP */
#define WX_PTYPE_PKT_IG 0x10 /* IP+GRE */
#define WX_PTYPE_PKT_IGM 0x20 /* IP+GRE+MAC */
#define WX_PTYPE_PKT_IGMV 0x30 /* IP+GRE+MAC+VLAN */
/* PKT for !TUN */
#define WX_PTYPE_PKT_MAC 0x10
#define WX_PTYPE_PKT_IP 0x20
/* TYP for PKT=mac */
#define WX_PTYPE_TYP_MAC 0x01
/* TYP for PKT=ip */
#define WX_PTYPE_PKT_IPV6 0x08
#define WX_PTYPE_TYP_IPFRAG 0x01
#define WX_PTYPE_TYP_IP 0x02
#define WX_PTYPE_TYP_UDP 0x03
#define WX_PTYPE_TYP_TCP 0x04
#define WX_PTYPE_TYP_SCTP 0x05
#define WX_RXD_PKTTYPE(_rxd) \
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
#define WX_RXD_IPV6EX(_rxd) \
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1)
/*********************** Transmit Descriptor Config Masks ****************/
#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */
#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */
......@@ -334,6 +403,113 @@
#define WX_TXD_IFCS BIT(25) /* Insert FCS */
#define WX_TXD_RS BIT(27) /* Report Status */
/*********************** Adv Transmit Descriptor Config Masks ****************/
#define WX_TXD_MAC_TSTAMP BIT(19) /* IEEE1588 time stamp */
#define WX_TXD_DTYP_CTXT BIT(20) /* Adv Context Desc */
#define WX_TXD_LINKSEC BIT(26) /* enable linksec */
#define WX_TXD_VLE BIT(30) /* VLAN pkt enable */
#define WX_TXD_TSE BIT(31) /* TCP Seg enable */
#define WX_TXD_CC BIT(7) /* Check Context */
#define WX_TXD_IPSEC BIT(8) /* enable ipsec esp */
#define WX_TXD_L4CS BIT(9)
#define WX_TXD_IIPCS BIT(10)
#define WX_TXD_EIPCS BIT(11)
#define WX_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */
#define WX_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define WX_TXD_TAG_TPID_SEL_SHIFT 11
#define WX_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define WX_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
#define WX_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */
#define WX_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */
#define WX_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */
#define WX_TXD_TUNNEL_UDP FIELD_PREP(BIT(WX_TXD_TUNNEL_TYPE_SHIFT), 0)
#define WX_TXD_TUNNEL_GRE FIELD_PREP(BIT(WX_TXD_TUNNEL_TYPE_SHIFT), 1)
enum wx_tx_flags {
/* cmd_type flags */
WX_TX_FLAGS_HW_VLAN = 0x01,
WX_TX_FLAGS_TSO = 0x02,
WX_TX_FLAGS_TSTAMP = 0x04,
/* olinfo flags */
WX_TX_FLAGS_CC = 0x08,
WX_TX_FLAGS_IPV4 = 0x10,
WX_TX_FLAGS_CSUM = 0x20,
WX_TX_FLAGS_OUTER_IPV4 = 0x100,
WX_TX_FLAGS_LINKSEC = 0x200,
WX_TX_FLAGS_IPSEC = 0x400,
};
/* VLAN info */
#define WX_TX_FLAGS_VLAN_MASK GENMASK(31, 16)
#define WX_TX_FLAGS_VLAN_SHIFT 16
/* wx_dec_ptype.mac: outer mac */
enum wx_dec_ptype_mac {
WX_DEC_PTYPE_MAC_IP = 0,
WX_DEC_PTYPE_MAC_L2 = 2,
WX_DEC_PTYPE_MAC_FCOE = 3,
};
/* wx_dec_ptype.[e]ip: outer&encaped ip */
#define WX_DEC_PTYPE_IP_FRAG 0x4
enum wx_dec_ptype_ip {
WX_DEC_PTYPE_IP_NONE = 0,
WX_DEC_PTYPE_IP_IPV4 = 1,
WX_DEC_PTYPE_IP_IPV6 = 2,
WX_DEC_PTYPE_IP_FGV4 = WX_DEC_PTYPE_IP_FRAG | WX_DEC_PTYPE_IP_IPV4,
WX_DEC_PTYPE_IP_FGV6 = WX_DEC_PTYPE_IP_FRAG | WX_DEC_PTYPE_IP_IPV6,
};
/* wx_dec_ptype.etype: encaped type */
enum wx_dec_ptype_etype {
WX_DEC_PTYPE_ETYPE_NONE = 0,
WX_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */
WX_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */
WX_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */
WX_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */
};
/* wx_dec_ptype.proto: payload proto */
enum wx_dec_ptype_prot {
WX_DEC_PTYPE_PROT_NONE = 0,
WX_DEC_PTYPE_PROT_UDP = 1,
WX_DEC_PTYPE_PROT_TCP = 2,
WX_DEC_PTYPE_PROT_SCTP = 3,
WX_DEC_PTYPE_PROT_ICMP = 4,
WX_DEC_PTYPE_PROT_TS = 5, /* time sync */
};
/* wx_dec_ptype.layer: payload layer */
enum wx_dec_ptype_layer {
WX_DEC_PTYPE_LAYER_NONE = 0,
WX_DEC_PTYPE_LAYER_PAY2 = 1,
WX_DEC_PTYPE_LAYER_PAY3 = 2,
WX_DEC_PTYPE_LAYER_PAY4 = 3,
};
struct wx_dec_ptype {
u32 known:1;
u32 mac:2; /* outer mac */
u32 ip:3; /* outer ip*/
u32 etype:3; /* encaped type */
u32 eip:3; /* encaped ip */
u32 prot:4; /* payload proto */
u32 layer:3; /* payload layer */
};
/* macro to make the table lines short */
#define WX_PTT(mac, ip, etype, eip, proto, layer)\
{1, \
WX_DEC_PTYPE_MAC_##mac, /* mac */\
WX_DEC_PTYPE_IP_##ip, /* ip */ \
WX_DEC_PTYPE_ETYPE_##etype, /* etype */\
WX_DEC_PTYPE_IP_##eip, /* eip */\
WX_DEC_PTYPE_PROT_##proto, /* proto */\
WX_DEC_PTYPE_LAYER_##layer /* layer */}
/* Host Interface Command Structures */
struct wx_hic_hdr {
u8 cmd;
......@@ -412,6 +588,8 @@ struct wx_mac_info {
u32 mta_shadow[128];
s32 mc_filter_type;
u32 mcft_size;
u32 vft_shadow[128];
u32 vft_size;
u32 num_rar_entries;
u32 rx_pb_size;
u32 tx_pb_size;
......@@ -508,10 +686,25 @@ union wx_rx_desc {
} wb; /* writeback */
};
struct wx_tx_context_desc {
__le32 vlan_macip_lens;
__le32 seqnum_seed;
__le32 type_tucmd_mlhl;
__le32 mss_l4len_idx;
};
/* if _flag is in _input, return _result */
#define WX_SET_FLAG(_input, _flag, _result) \
(((_flag) <= (_result)) ? \
((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
((u32)((_input) & (_flag)) / ((_flag) / (_result))))
#define WX_RX_DESC(R, i) \
(&(((union wx_rx_desc *)((R)->desc))[i]))
#define WX_TX_DESC(R, i) \
(&(((union wx_tx_desc *)((R)->desc))[i]))
#define WX_TX_CTXTDESC(R, i) \
(&(((struct wx_tx_context_desc *)((R)->desc))[i]))
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
......@@ -523,6 +716,8 @@ struct wx_tx_buffer {
unsigned short gso_segs;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
__be16 protocol;
u32 tx_flags;
};
struct wx_rx_buffer {
......@@ -539,6 +734,11 @@ struct wx_queue_stats {
u64 bytes;
};
struct wx_rx_queue_stats {
u64 csum_good_cnt;
u64 csum_err;
};
/* iterator for handling rings in ring container */
#define wx_for_each_ring(posm, headm) \
for (posm = (headm).ring; posm; posm = posm->next)
......@@ -550,7 +750,6 @@ struct wx_ring_container {
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
struct wx_ring {
struct wx_ring *next; /* pointer to next ring in q_vector */
struct wx_q_vector *q_vector; /* backpointer to host q_vector */
......@@ -580,6 +779,9 @@ struct wx_ring {
struct wx_queue_stats stats;
struct u64_stats_sync syncp;
union {
struct wx_rx_queue_stats rx_stats;
};
} ____cacheline_internodealigned_in_smp;
struct wx_q_vector {
......@@ -610,6 +812,8 @@ enum wx_isb_idx {
};
struct wx {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u8 __iomem *hw_addr;
struct pci_dev *pdev;
struct net_device *netdev;
......
......@@ -115,6 +115,7 @@ static int ngbe_sw_init(struct wx *wx)
wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES;
wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES;
wx->mac.mcft_size = NGBE_MC_TBL_SIZE;
wx->mac.vft_size = NGBE_SP_VFT_TBL_SIZE;
wx->mac.rx_pb_size = NGBE_RX_PB_SIZE;
wx->mac.tx_pb_size = NGBE_TDB_PB_SZ;
......@@ -473,9 +474,12 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_change_mtu = wx_change_mtu,
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
};
/**
......@@ -551,12 +555,18 @@ static int ngbe_probe(struct pci_dev *pdev,
ngbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &ngbe_netdev_ops;
netdev->features |= NETIF_F_HIGHDMA;
netdev->features = NETIF_F_SG;
netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXHASH | NETIF_F_RXCSUM;
netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID;
netdev->vlan_features |= netdev->features;
netdev->features |= NETIF_F_IPV6_CSUM | NETIF_F_VLAN_FEATURES;
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features |
NETIF_F_RXALL;
netdev->hw_features |= netdev->features | NETIF_F_RXALL;
netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA;
netdev->hw_features |= NETIF_F_GRO;
netdev->features |= NETIF_F_GRO;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
......
......@@ -136,6 +136,7 @@ enum NGBE_MSCA_CMD_value {
#define NGBE_RAR_ENTRIES 32
#define NGBE_RX_PB_SIZE 42
#define NGBE_MC_TBL_SIZE 128
#define NGBE_SP_VFT_TBL_SIZE 128
#define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */
/* TX/RX descriptor defines */
......
......@@ -258,6 +258,7 @@ static void txgbe_reset(struct wx *wx)
if (err != 0)
wx_err(wx, "Hardware Error: %d\n", err);
wx_start_hw(wx);
/* do not flush user set addresses */
memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len);
wx_flush_sw_mac_table(wx);
......@@ -330,6 +331,7 @@ static int txgbe_sw_init(struct wx *wx)
wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE;
wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
......@@ -491,9 +493,12 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_change_mtu = wx_change_mtu,
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
};
/**
......@@ -596,11 +601,25 @@ static int txgbe_probe(struct pci_dev *pdev,
goto err_free_mac_table;
}
netdev->features |= NETIF_F_HIGHDMA;
netdev->features = NETIF_F_SG;
netdev->features = NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
NETIF_F_HW_CSUM;
netdev->gso_partial_features = NETIF_F_GSO_ENCAP_ALL;
netdev->features |= netdev->gso_partial_features;
netdev->features |= NETIF_F_SCTP_CRC;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->hw_enc_features |= netdev->vlan_features;
netdev->features |= NETIF_F_VLAN_FEATURES;
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features | NETIF_F_RXALL;
netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA;
netdev->hw_features |= NETIF_F_GRO;
netdev->features |= NETIF_F_GRO;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
......
......@@ -77,6 +77,7 @@
#define TXGBE_SP_MAX_RX_QUEUES 128
#define TXGBE_SP_RAR_ENTRIES 128
#define TXGBE_SP_MC_TBL_SIZE 128
#define TXGBE_SP_VFT_TBL_SIZE 128
#define TXGBE_SP_RX_PB_SIZE 512
#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment