Commit 1cedb16b authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
net: intel: start The Great Code Dedup + Page Pool for iavf

Alexander Lobakin says:

Here's a two-shot: introduce {,Intel} Ethernet common library (libeth and
libie) and switch iavf to Page Pool. Details are in the commit messages;
here's a summary:

Not a secret there's a ton of code duplication between two and more Intel
ethernet modules. Before introducing new changes, which would need to be
copied over again, start decoupling the already existing duplicate
functionality into a new module, which will be shared between several
Intel Ethernet drivers. The first name that came to my mind was
"libie" -- "Intel Ethernet common library". Also this sounds like
"lovelie" (-> one word, no "lib I E" pls) and can be expanded as
"lib Internet Explorer" :P
The "generic", pure-software part is placed separately, so that it can be
easily reused in any driver by any vendor without linking to the Intel
pre-200G guts. In a few words, it's something any modern driver does the
same way, but nobody moved it level up (yet).
The series is only the beginning. From now on, adding every new feature
or doing any good driver refactoring will remove much more lines than add
for quite some time. There's a basic roadmap with some deduplications
planned already, not speaking of that touching every line now asks:
"can I share this?". The final destination is very ambitious: have only
one unified driver for at least i40e, ice, iavf, and idpf with a struct
ops for each generation. That's never gonna happen, right? But you still
can at least try.
PP conversion for iavf lands within the same series as these two are tied
closely. libie will support Page Pool model only, so that a driver can't
use much of the lib until it's converted. iavf is only the example, the
rest will eventually be converted soon on a per-driver basis. That is
when it gets really interesting. Stay tech.

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  MAINTAINERS: add entry for libeth and libie
  iavf: switch to Page Pool
  iavf: pack iavf_ring more efficiently
  libeth: add Rx buffer management
  page_pool: add DMA-sync-for-CPU inline helper
  page_pool: constify some read-only function arguments
  slab: introduce kvmalloc_array_node() and kvcalloc_node()
  iavf: drop page splitting and recycling
  iavf: kill "legacy-rx" for good
  net: intel: introduce {, Intel} Ethernet common library
====================

Link: https://lore.kernel.org/r/20240424203559.3420468-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3c4d7902 87a927ef
...@@ -12388,6 +12388,26 @@ F: drivers/ata/ ...@@ -12388,6 +12388,26 @@ F: drivers/ata/
F: include/linux/ata.h F: include/linux/ata.h
F: include/linux/libata.h F: include/linux/libata.h
LIBETH COMMON ETHERNET LIBRARY
M: Alexander Lobakin <aleksander.lobakin@intel.com>
L: netdev@vger.kernel.org
L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
S: Supported
T: git https://github.com/alobakin/linux.git
F: drivers/net/ethernet/intel/libeth/
F: include/net/libeth/
K: libeth
LIBIE COMMON INTEL ETHERNET LIBRARY
M: Alexander Lobakin <aleksander.lobakin@intel.com>
L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Supported
T: git https://github.com/alobakin/linux.git
F: drivers/net/ethernet/intel/libie/
F: include/linux/net/intel/libie/
K: libie
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com> M: Vishal Verma <vishal.l.verma@intel.com>
M: Dan Williams <dan.j.williams@intel.com> M: Dan Williams <dan.j.williams@intel.com>
......
...@@ -16,6 +16,9 @@ config NET_VENDOR_INTEL ...@@ -16,6 +16,9 @@ config NET_VENDOR_INTEL
if NET_VENDOR_INTEL if NET_VENDOR_INTEL
source "drivers/net/ethernet/intel/libeth/Kconfig"
source "drivers/net/ethernet/intel/libie/Kconfig"
config E100 config E100
tristate "Intel(R) PRO/100+ support" tristate "Intel(R) PRO/100+ support"
depends on PCI depends on PCI
...@@ -225,6 +228,7 @@ config I40E ...@@ -225,6 +228,7 @@ config I40E
depends on PTP_1588_CLOCK_OPTIONAL depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI depends on PCI
select AUXILIARY_BUS select AUXILIARY_BUS
select LIBIE
select NET_DEVLINK select NET_DEVLINK
help help
This driver supports Intel(R) Ethernet Controller XL710 Family of This driver supports Intel(R) Ethernet Controller XL710 Family of
...@@ -253,6 +257,8 @@ config I40E_DCB ...@@ -253,6 +257,8 @@ config I40E_DCB
# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF # so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF
config IAVF config IAVF
tristate tristate
select LIBIE
config I40EVF config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support" tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF select IAVF
...@@ -283,6 +289,7 @@ config ICE ...@@ -283,6 +289,7 @@ config ICE
depends on GNSS || GNSS = n depends on GNSS || GNSS = n
select AUXILIARY_BUS select AUXILIARY_BUS
select DIMLIB select DIMLIB
select LIBIE
select NET_DEVLINK select NET_DEVLINK
select PLDMFW select PLDMFW
select DPLL select DPLL
......
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
# Makefile for the Intel network device drivers. # Makefile for the Intel network device drivers.
# #
obj-$(CONFIG_LIBETH) += libeth/
obj-$(CONFIG_LIBIE) += libie/
obj-$(CONFIG_E100) += e100.o obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/ obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/ obj-$(CONFIG_E1000E) += e1000e/
......
...@@ -100,6 +100,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX ...@@ -100,6 +100,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq; static struct workqueue_struct *i40e_wq;
......
...@@ -371,13 +371,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); ...@@ -371,13 +371,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
int i40e_set_mac_type(struct i40e_hw *hw); int i40e_set_mac_type(struct i40e_hw *hw);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return i40e_ptype_lookup[ptype];
}
/** /**
* i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
* @link_speed: the speed to convert * @link_speed: the speed to convert
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/sctp.h> #include <linux/sctp.h>
#include <net/mpls.h> #include <net/mpls.h>
...@@ -1741,38 +1742,30 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1741,38 +1742,30 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb, struct sk_buff *skb,
union i40e_rx_desc *rx_desc) union i40e_rx_desc *rx_desc)
{ {
struct i40e_rx_ptype_decoded decoded; struct libeth_rx_pt decoded;
u32 rx_error, rx_status; u32 rx_error, rx_status;
bool ipv4, ipv6; bool ipv4, ipv6;
u8 ptype; u8 ptype;
u64 qword; u64 qword;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
skb_checksum_none_assert(skb); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
/* Rx csum enabled and ip headers found? */ decoded = libie_rx_pt_parse(ptype);
if (!(vsi->netdev->features & NETIF_F_RXCSUM)) if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
return; return;
rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
/* did the hardware decode the packet and checksum? */ /* did the hardware decode the packet and checksum? */
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return; return;
/* both known and outer_ip must be set for the below code to work */ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
if (!(decoded.known && decoded.outer_ip)) ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
return;
ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
if (ipv4 && if (ipv4 &&
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
...@@ -1800,49 +1793,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1800,49 +1793,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* we need to bump the checksum level by 1 to reflect the fact that * we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum. * we are indicating we validated the inner checksum.
*/ */
if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1; skb->csum_level = 1;
/* Only report checksum unnecessary for TCP, UDP, or SCTP */ skb->ip_summed = CHECKSUM_UNNECESSARY;
switch (decoded.inner_prot) {
case I40E_RX_PTYPE_INNER_PROT_TCP:
case I40E_RX_PTYPE_INNER_PROT_UDP:
case I40E_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY;
fallthrough;
default:
break;
}
return; return;
checksum_fail: checksum_fail:
vsi->back->hw_csum_rx_error++; vsi->back->hw_csum_rx_error++;
} }
/**
* i40e_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
*
* Returns a hash type to be used by skb_set_hash
**/
static inline int i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
if (!decoded.known)
return PKT_HASH_TYPE_NONE;
if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
return PKT_HASH_TYPE_L4;
else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
return PKT_HASH_TYPE_L3;
else
return PKT_HASH_TYPE_L2;
}
/** /**
* i40e_rx_hash - set the hash value in the skb * i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring * @ring: descriptor ring
...@@ -1855,17 +1815,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, ...@@ -1855,17 +1815,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
struct sk_buff *skb, struct sk_buff *skb,
u8 rx_ptype) u8 rx_ptype)
{ {
struct libeth_rx_pt decoded;
u32 hash; u32 hash;
const __le64 rss_mask = const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if (!(ring->netdev->features & NETIF_F_RXHASH)) decoded = libie_rx_pt_parse(rx_ptype);
if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
return; return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); libeth_rx_pt_set_hash(skb, hash, decoded);
} }
} }
......
...@@ -745,94 +745,6 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks { ...@@ -745,94 +745,6 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
#define I40E_RXD_QW1_PTYPE_SHIFT 30 #define I40E_RXD_QW1_PTYPE_SHIFT 30
#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) #define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
/* Packet type non-ip values */
enum i40e_rx_l2_ptype {
I40E_RX_PTYPE_L2_RESERVED = 0,
I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
I40E_RX_PTYPE_L2_ARP = 11,
I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
};
struct i40e_rx_ptype_decoded {
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:1;
u32 outer_frag:1;
u32 tunnel_type:3;
u32 tunnel_end_prot:2;
u32 tunnel_end_frag:1;
u32 inner_prot:4;
u32 payload_layer:3;
};
enum i40e_rx_ptype_outer_ip {
I40E_RX_PTYPE_OUTER_L2 = 0,
I40E_RX_PTYPE_OUTER_IP = 1
};
enum i40e_rx_ptype_outer_ip_ver {
I40E_RX_PTYPE_OUTER_NONE = 0,
I40E_RX_PTYPE_OUTER_IPV4 = 0,
I40E_RX_PTYPE_OUTER_IPV6 = 1
};
enum i40e_rx_ptype_outer_fragmented {
I40E_RX_PTYPE_NOT_FRAG = 0,
I40E_RX_PTYPE_FRAG = 1
};
enum i40e_rx_ptype_tunnel_type {
I40E_RX_PTYPE_TUNNEL_NONE = 0,
I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
};
enum i40e_rx_ptype_tunnel_end_prot {
I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
};
enum i40e_rx_ptype_inner_prot {
I40E_RX_PTYPE_INNER_PROT_NONE = 0,
I40E_RX_PTYPE_INNER_PROT_UDP = 1,
I40E_RX_PTYPE_INNER_PROT_TCP = 2,
I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
};
enum i40e_rx_ptype_payload_layer {
I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
......
...@@ -287,7 +287,7 @@ struct iavf_adapter { ...@@ -287,7 +287,7 @@ struct iavf_adapter {
#define IAVF_FLAG_RESET_PENDING BIT(4) #define IAVF_FLAG_RESET_PENDING BIT(4)
#define IAVF_FLAG_RESET_NEEDED BIT(5) #define IAVF_FLAG_RESET_NEEDED BIT(5)
#define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) #define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
#define IAVF_FLAG_LEGACY_RX BIT(15) /* BIT(15) is free, was IAVF_FLAG_LEGACY_RX */
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17) #define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) #define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
......
...@@ -240,29 +240,6 @@ static const struct iavf_stats iavf_gstrings_stats[] = { ...@@ -240,29 +240,6 @@ static const struct iavf_stats iavf_gstrings_stats[] = {
#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats) #define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
/* For now we have one and only one private flag and it is only defined
* when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
* of leaving all this code sitting around empty we will strip it unless
* our one private flag is actually available.
*/
struct iavf_priv_flags {
char flag_string[ETH_GSTRING_LEN];
u32 flag;
bool read_only;
};
#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
.flag_string = _name, \
.flag = _flag, \
.read_only = _read_only, \
}
static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
};
#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
/** /**
* iavf_get_link_ksettings - Get Link Speed and Duplex settings * iavf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -342,8 +319,6 @@ static int iavf_get_sset_count(struct net_device *netdev, int sset) ...@@ -342,8 +319,6 @@ static int iavf_get_sset_count(struct net_device *netdev, int sset)
return IAVF_STATS_LEN + return IAVF_STATS_LEN +
(IAVF_QUEUE_STATS_LEN * 2 * (IAVF_QUEUE_STATS_LEN * 2 *
netdev->real_num_tx_queues); netdev->real_num_tx_queues);
else if (sset == ETH_SS_PRIV_FLAGS)
return IAVF_PRIV_FLAGS_STR_LEN;
else else
return -EINVAL; return -EINVAL;
} }
...@@ -385,21 +360,6 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, ...@@ -385,21 +360,6 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
rcu_read_unlock(); rcu_read_unlock();
} }
/**
* iavf_get_priv_flag_strings - Get private flag strings
* @netdev: network interface device structure
* @data: buffer for string data
*
* Builds the private flags string table
**/
static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
{
unsigned int i;
for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++)
ethtool_puts(&data, iavf_gstrings_priv_flags[i].flag_string);
}
/** /**
* iavf_get_stat_strings - Get stat strings * iavf_get_stat_strings - Get stat strings
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -438,108 +398,11 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ...@@ -438,108 +398,11 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
case ETH_SS_STATS: case ETH_SS_STATS:
iavf_get_stat_strings(netdev, data); iavf_get_stat_strings(netdev, data);
break; break;
case ETH_SS_PRIV_FLAGS:
iavf_get_priv_flag_strings(netdev, data);
break;
default: default:
break; break;
} }
} }
/**
* iavf_get_priv_flags - report device private flags
* @netdev: network interface device structure
*
* The get string set count and the string set should be matched for each
* flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags
* array.
*
* Returns a u32 bitmap of flags.
**/
static u32 iavf_get_priv_flags(struct net_device *netdev)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u32 i, ret_flags = 0;
for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
const struct iavf_priv_flags *priv_flags;
priv_flags = &iavf_gstrings_priv_flags[i];
if (priv_flags->flag & adapter->flags)
ret_flags |= BIT(i);
}
return ret_flags;
}
/**
* iavf_set_priv_flags - set private flags
* @netdev: network interface device structure
* @flags: bit flags to be set
**/
static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u32 orig_flags, new_flags, changed_flags;
int ret = 0;
u32 i;
orig_flags = READ_ONCE(adapter->flags);
new_flags = orig_flags;
for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
const struct iavf_priv_flags *priv_flags;
priv_flags = &iavf_gstrings_priv_flags[i];
if (flags & BIT(i))
new_flags |= priv_flags->flag;
else
new_flags &= ~(priv_flags->flag);
if (priv_flags->read_only &&
((orig_flags ^ new_flags) & ~BIT(i)))
return -EOPNOTSUPP;
}
/* Before we finalize any flag changes, any checks which we need to
* perform to determine if the new flags will be supported should go
* here...
*/
/* Compare and exchange the new flags into place. If we failed, that
* is if cmpxchg returns anything but the old value, this means
* something else must have modified the flags variable since we
* copied it. We'll just punt with an error and log something in the
* message buffer.
*/
if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
dev_warn(&adapter->pdev->dev,
"Unable to update adapter->flags as it was modified by another thread...\n");
return -EAGAIN;
}
changed_flags = orig_flags ^ new_flags;
/* Process any additional changes needed as a result of flag changes.
* The changed_flags value reflects the list of bits that were changed
* in the code above.
*/
/* issue a reset to force legacy-rx change to take effect */
if (changed_flags & IAVF_FLAG_LEGACY_RX) {
if (netif_running(netdev)) {
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
ret = iavf_wait_for_reset(adapter);
if (ret)
netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset");
}
}
return ret;
}
/** /**
* iavf_get_msglevel - Get debug message level * iavf_get_msglevel - Get debug message level
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -585,7 +448,6 @@ static void iavf_get_drvinfo(struct net_device *netdev, ...@@ -585,7 +448,6 @@ static void iavf_get_drvinfo(struct net_device *netdev,
strscpy(drvinfo->driver, iavf_driver_name, 32); strscpy(drvinfo->driver, iavf_driver_name, 32);
strscpy(drvinfo->fw_version, "N/A", 4); strscpy(drvinfo->fw_version, "N/A", 4);
strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
} }
/** /**
...@@ -1995,8 +1857,6 @@ static const struct ethtool_ops iavf_ethtool_ops = { ...@@ -1995,8 +1857,6 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.get_strings = iavf_get_strings, .get_strings = iavf_get_strings,
.get_ethtool_stats = iavf_get_ethtool_stats, .get_ethtool_stats = iavf_get_ethtool_stats,
.get_sset_count = iavf_get_sset_count, .get_sset_count = iavf_get_sset_count,
.get_priv_flags = iavf_get_priv_flags,
.set_priv_flags = iavf_set_priv_flags,
.get_msglevel = iavf_get_msglevel, .get_msglevel = iavf_get_msglevel,
.set_msglevel = iavf_set_msglevel, .set_msglevel = iavf_set_msglevel,
.get_coalesce = iavf_get_coalesce, .get_coalesce = iavf_get_coalesce,
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/net/intel/libie/rx.h>
#include "iavf.h" #include "iavf.h"
#include "iavf_prototype.h" #include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must /* All iavf tracepoints are defined by the include below, which must
...@@ -45,6 +47,8 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); ...@@ -45,6 +47,8 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
MODULE_ALIAS("i40evf"); MODULE_ALIAS("i40evf");
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
MODULE_IMPORT_NS(LIBETH);
MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops; static const struct net_device_ops iavf_netdev_ops;
...@@ -714,40 +718,10 @@ static void iavf_configure_tx(struct iavf_adapter *adapter) ...@@ -714,40 +718,10 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
**/ **/
static void iavf_configure_rx(struct iavf_adapter *adapter) static void iavf_configure_rx(struct iavf_adapter *adapter)
{ {
unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
struct iavf_hw *hw = &adapter->hw; struct iavf_hw *hw = &adapter->hw;
int i;
/* Legacy Rx will always default to a 2048 buffer size. */
#if (PAGE_SIZE < 8192)
if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
struct net_device *netdev = adapter->netdev;
/* For jumbo frames on systems with 4K pages we have to use
* an order 1 page, so we might as well increase the size
* of our Rx buffer to make better use of the available space
*/
rx_buf_len = IAVF_RXBUFFER_3072;
/* We use a 1536 buffer size for configurations with
* standard Ethernet mtu. On x86 this gives us enough room
* for shared info and 192 bytes of padding.
*/
if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
(netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
}
#endif
for (i = 0; i < adapter->num_active_queues; i++) { for (u32 i = 0; i < adapter->num_active_queues; i++)
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
adapter->rx_rings[i].rx_buf_len = rx_buf_len;
if (adapter->flags & IAVF_FLAG_LEGACY_RX)
clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
else
set_ring_build_skb_enabled(&adapter->rx_rings[i]);
}
} }
/** /**
...@@ -1615,7 +1589,6 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) ...@@ -1615,7 +1589,6 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
rx_ring = &adapter->rx_rings[i]; rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i; rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev; rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count; rx_ring->count = adapter->rx_desc_count;
rx_ring->itr_setting = IAVF_ITR_RX_DEF; rx_ring->itr_setting = IAVF_ITR_RX_DEF;
} }
...@@ -2642,9 +2615,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) ...@@ -2642,9 +2615,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
iavf_set_ethtool_ops(netdev); iavf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ; netdev->watchdog_timeo = 5 * HZ;
/* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU; netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; netdev->max_mtu = LIBIE_MAX_MTU;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) { if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
......
...@@ -45,13 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, ...@@ -45,13 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
struct iavf_aqc_get_set_rss_key_data *key); struct iavf_aqc_get_set_rss_key_data *key);
extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return iavf_ptype_lookup[ptype];
}
void iavf_vf_parse_hw_config(struct iavf_hw *hw, void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg); struct virtchnl_vf_resource *msg);
enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
......
This diff is collapsed.
...@@ -80,79 +80,8 @@ enum iavf_dyn_idx_t { ...@@ -80,79 +80,8 @@ enum iavf_dyn_idx_t {
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define IAVF_RXBUFFER_256 256
#define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
#define IAVF_RXBUFFER_2048 2048
#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
* i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
* i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
#define iavf_rx_desc iavf_32byte_rx_desc #define iavf_rx_desc iavf_32byte_rx_desc
#define IAVF_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
/* Attempt to maximize the headroom available for incoming frames. We
* use a 2K buffer for receives and need 1536/1534 to store the data for
* the frame. This leaves us with 512 bytes of room. From that we need
* to deduct the space needed for the shared info and the padding needed
* to IP align the frame.
*
* Note: For cache line sizes 256 or larger this value is going to end
* up negative. In these cases we should fall back to the legacy
* receive path.
*/
#if (PAGE_SIZE < 8192)
#define IAVF_2K_TOO_SMALL_WITH_PADDING \
((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
static inline int iavf_compute_pad(int rx_buf_len)
{
int page_size, pad_size;
page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
return pad_size;
}
static inline int iavf_skb_pad(void)
{
int rx_buf_len;
/* If a 2K buffer cannot handle a standard Ethernet frame then
* optimize padding for a 3K buffer instead of a 1.5K buffer.
*
* For a 3K buffer we need to add enough padding to allow for
* tailroom due to NET_IP_ALIGN possibly shifting us out of
* cache-line alignment.
*/
if (IAVF_2K_TOO_SMALL_WITH_PADDING)
rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
else
rx_buf_len = IAVF_RXBUFFER_1536;
/* if needed make room for NET_IP_ALIGN */
rx_buf_len -= NET_IP_ALIGN;
return iavf_compute_pad(rx_buf_len);
}
#define IAVF_SKB_PAD iavf_skb_pad()
#else
#define IAVF_2K_TOO_SMALL_WITH_PADDING false
#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
/** /**
* iavf_test_staterr - tests bits in Rx descriptor status and error fields * iavf_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format) * @rx_desc: pointer to receive descriptor (in le64 format)
...@@ -271,17 +200,6 @@ struct iavf_tx_buffer { ...@@ -271,17 +200,6 @@ struct iavf_tx_buffer {
u32 tx_flags; u32 tx_flags;
}; };
struct iavf_rx_buffer {
dma_addr_t dma;
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
};
struct iavf_queue_stats { struct iavf_queue_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
...@@ -293,7 +211,6 @@ struct iavf_tx_queue_stats { ...@@ -293,7 +211,6 @@ struct iavf_tx_queue_stats {
u64 tx_done_old; u64 tx_done_old;
u64 tx_linearize; u64 tx_linearize;
u64 tx_force_wb; u64 tx_force_wb;
int prev_pkt_ctr;
u64 tx_lost_interrupt; u64 tx_lost_interrupt;
}; };
...@@ -301,14 +218,6 @@ struct iavf_rx_queue_stats { ...@@ -301,14 +218,6 @@ struct iavf_rx_queue_stats {
u64 non_eop_descs; u64 non_eop_descs;
u64 alloc_page_failed; u64 alloc_page_failed;
u64 alloc_buff_failed; u64 alloc_buff_failed;
u64 page_reuse_count;
u64 realloc_count;
};
enum iavf_ring_state_t {
__IAVF_TX_FDIR_INIT_DONE,
__IAVF_TX_XPS_INIT_DONE,
__IAVF_RING_STATE_NBITS /* must be last */
}; };
/* some useful defines for virtchannel interface, which /* some useful defines for virtchannel interface, which
...@@ -326,16 +235,19 @@ enum iavf_ring_state_t { ...@@ -326,16 +235,19 @@ enum iavf_ring_state_t {
struct iavf_ring { struct iavf_ring {
struct iavf_ring *next; /* pointer to next ring in q_vector */ struct iavf_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */ void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */ union {
struct page_pool *pp; /* Used on Rx for buffer management */
struct device *dev; /* Used on Tx for DMA mapping */
};
struct net_device *netdev; /* netdev ring maps to */ struct net_device *netdev; /* netdev ring maps to */
union { union {
struct libeth_fqe *rx_fqes;
struct iavf_tx_buffer *tx_bi; struct iavf_tx_buffer *tx_bi;
struct iavf_rx_buffer *rx_bi;
}; };
DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail; u8 __iomem *tail;
u32 truesize;
u16 queue_index; /* Queue number of ring */
/* high bit set means dynamic, use accessors routines to read/write. /* high bit set means dynamic, use accessors routines to read/write.
* hardware only supports 2us resolution for the ITR registers. * hardware only supports 2us resolution for the ITR registers.
...@@ -345,23 +257,15 @@ struct iavf_ring { ...@@ -345,23 +257,15 @@ struct iavf_ring {
u16 itr_setting; u16 itr_setting;
u16 count; /* Number of descriptors */ u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 rx_buf_len;
/* used in interrupt processing */ /* used in interrupt processing */
u16 next_to_use; u16 next_to_use;
u16 next_to_clean; u16 next_to_clean;
u8 atr_sample_rate;
u8 atr_count;
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
u8 packet_stride;
u16 flags; u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) #define IAVF_TXR_FLAGS_ARM_WB BIT(1)
/* BIT(2) is free */
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3) #define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4) #define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5) #define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
...@@ -374,6 +278,7 @@ struct iavf_ring { ...@@ -374,6 +278,7 @@ struct iavf_ring {
struct iavf_rx_queue_stats rx_stats; struct iavf_rx_queue_stats rx_stats;
}; };
int prev_pkt_ctr; /* For Tx stall detection */
unsigned int size; /* length of descriptor ring in bytes */ unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */ dma_addr_t dma; /* physical address of ring */
...@@ -381,7 +286,6 @@ struct iavf_ring { ...@@ -381,7 +286,6 @@ struct iavf_ring {
struct iavf_q_vector *q_vector; /* Backreference to associated vector */ struct iavf_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */ struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must
* return before it sees the EOP for * return before it sees the EOP for
* the current packet, we save that skb * the current packet, we save that skb
...@@ -390,22 +294,9 @@ struct iavf_ring { ...@@ -390,22 +294,9 @@ struct iavf_ring {
* iavf_clean_rx_ring_irq() is called * iavf_clean_rx_ring_irq() is called
* for this ring. * for this ring.
*/ */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct iavf_ring *ring)
{
return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
}
static inline void set_ring_build_skb_enabled(struct iavf_ring *ring) u32 rx_buf_len;
{ } ____cacheline_internodealigned_in_smp;
ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
}
static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
{
ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
}
#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002 #define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
...@@ -428,17 +319,6 @@ struct iavf_ring_container { ...@@ -428,17 +319,6 @@ struct iavf_ring_container {
#define iavf_for_each_ring(pos, head) \ #define iavf_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next) for (pos = (head).ring; pos != NULL; pos = pos->next)
static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring->rx_buf_len > (PAGE_SIZE / 2))
return 1;
#endif
return 0;
}
#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count); bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring); int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
......
...@@ -10,8 +10,6 @@ ...@@ -10,8 +10,6 @@
#include "iavf_adminq.h" #include "iavf_adminq.h"
#include "iavf_devids.h" #include "iavf_devids.h"
#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
/* IAVF_MASK is a macro used on 32 bit registers */ /* IAVF_MASK is a macro used on 32 bit registers */
#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift)) #define IAVF_MASK(mask, shift) ((u32)(mask) << (shift))
...@@ -327,94 +325,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks { ...@@ -327,94 +325,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
#define IAVF_RXD_QW1_PTYPE_SHIFT 30 #define IAVF_RXD_QW1_PTYPE_SHIFT 30
#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT) #define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
/* Packet type non-ip values */
enum iavf_rx_l2_ptype {
IAVF_RX_PTYPE_L2_RESERVED = 0,
IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
IAVF_RX_PTYPE_L2_ARP = 11,
IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
};
struct iavf_rx_ptype_decoded {
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:1;
u32 outer_frag:1;
u32 tunnel_type:3;
u32 tunnel_end_prot:2;
u32 tunnel_end_frag:1;
u32 inner_prot:4;
u32 payload_layer:3;
};
enum iavf_rx_ptype_outer_ip {
IAVF_RX_PTYPE_OUTER_L2 = 0,
IAVF_RX_PTYPE_OUTER_IP = 1
};
enum iavf_rx_ptype_outer_ip_ver {
IAVF_RX_PTYPE_OUTER_NONE = 0,
IAVF_RX_PTYPE_OUTER_IPV4 = 0,
IAVF_RX_PTYPE_OUTER_IPV6 = 1
};
enum iavf_rx_ptype_outer_fragmented {
IAVF_RX_PTYPE_NOT_FRAG = 0,
IAVF_RX_PTYPE_FRAG = 1
};
enum iavf_rx_ptype_tunnel_type {
IAVF_RX_PTYPE_TUNNEL_NONE = 0,
IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
};
enum iavf_rx_ptype_tunnel_end_prot {
IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
};
enum iavf_rx_ptype_inner_prot {
IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
};
enum iavf_rx_ptype_payload_layer {
IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ #define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/net/intel/libie/rx.h>
#include "iavf.h" #include "iavf.h"
#include "iavf_prototype.h" #include "iavf_prototype.h"
...@@ -268,13 +270,13 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) ...@@ -268,13 +270,13 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
void iavf_configure_queues(struct iavf_adapter *adapter) void iavf_configure_queues(struct iavf_adapter *adapter)
{ {
struct virtchnl_vsi_queue_config_info *vqci; struct virtchnl_vsi_queue_config_info *vqci;
int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues; int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi; struct virtchnl_queue_pair_info *vqpi;
u32 i, max_frame;
size_t len; size_t len;
if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
max_frame = IAVF_MAX_RXBUFFER; max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame);
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
...@@ -288,11 +290,6 @@ void iavf_configure_queues(struct iavf_adapter *adapter) ...@@ -288,11 +290,6 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci) if (!vqci)
return; return;
/* Limit maximum frame size when jumbo frames is not enabled */
if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
(adapter->netdev->mtu <= ETH_DATA_LEN))
max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs; vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair; vqpi = vqci->qpair;
...@@ -309,9 +306,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) ...@@ -309,9 +306,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.ring_len = adapter->rx_rings[i].count; vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame; vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
ALIGN(adapter->rx_rings[i].rx_buf_len,
BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
if (CRC_OFFLOAD_ALLOWED(adapter)) if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features & vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS); NETIF_F_RXFCS);
......
...@@ -37,6 +37,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; ...@@ -37,6 +37,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE); MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */ /* Copyright (c) 2019, Intel Corporation. */
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/net/intel/libie/rx.h>
#include "ice_txrx_lib.h" #include "ice_txrx_lib.h"
#include "ice_eswitch.h" #include "ice_eswitch.h"
...@@ -38,30 +39,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) ...@@ -38,30 +39,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
} }
} }
/**
* ice_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
*
* Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
* skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
* Rx desc.
*/
static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
{
struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
if (!decoded.known)
return PKT_HASH_TYPE_NONE;
if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
return PKT_HASH_TYPE_L4;
if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
return PKT_HASH_TYPE_L3;
if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
return PKT_HASH_TYPE_L2;
return PKT_HASH_TYPE_NONE;
}
/** /**
* ice_get_rx_hash - get RX hash value from descriptor * ice_get_rx_hash - get RX hash value from descriptor
* @rx_desc: specific descriptor * @rx_desc: specific descriptor
...@@ -91,14 +68,16 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, ...@@ -91,14 +68,16 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
const union ice_32b_rx_flex_desc *rx_desc, const union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 rx_ptype) struct sk_buff *skb, u16 rx_ptype)
{ {
struct libeth_rx_pt decoded;
u32 hash; u32 hash;
if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) decoded = libie_rx_pt_parse(rx_ptype);
if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded))
return; return;
hash = ice_get_rx_hash(rx_desc); hash = ice_get_rx_hash(rx_desc);
if (likely(hash)) if (likely(hash))
skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); libeth_rx_pt_set_hash(skb, hash, decoded);
} }
/** /**
...@@ -114,34 +93,26 @@ static void ...@@ -114,34 +93,26 @@ static void
ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype) union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{ {
struct ice_rx_ptype_decoded decoded; struct libeth_rx_pt decoded;
u16 rx_status0, rx_status1; u16 rx_status0, rx_status1;
bool ipv4, ipv6; bool ipv4, ipv6;
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
decoded = ice_decode_rx_desc_ptype(ptype);
/* Start with CHECKSUM_NONE and by default csum_level = 0 */ /* Start with CHECKSUM_NONE and by default csum_level = 0 */
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
skb_checksum_none_assert(skb);
/* check if Rx checksum is enabled */ decoded = libie_rx_pt_parse(ptype);
if (!(ring->netdev->features & NETIF_F_RXCSUM)) if (!libeth_rx_pt_has_checksum(ring->netdev, decoded))
return; return;
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
/* check if HW has decoded the packet and checksum */ /* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return; return;
if (!(decoded.known && decoded.outer_ip)) ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
return; ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) { if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
ring->vsi->back->hw_rx_eipe_error++; ring->vsi->back->hw_rx_eipe_error++;
...@@ -169,19 +140,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, ...@@ -169,19 +140,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
* we need to bump the checksum level by 1 to reflect the fact that * we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum. * we are indicating we validated the inner checksum.
*/ */
if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1; skb->csum_level = 1;
/* Only report checksum unnecessary for TCP, UDP, or SCTP */ skb->ip_summed = CHECKSUM_UNNECESSARY;
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
case ICE_RX_PTYPE_INNER_PROT_UDP:
case ICE_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
default:
break;
}
return; return;
checksum_fail: checksum_fail:
...@@ -536,42 +498,6 @@ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) ...@@ -536,42 +498,6 @@ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
return 0; return 0;
} }
/* Define a ptype index -> XDP hash type lookup table.
* It uses the same ptype definitions as ice_decode_rx_desc_ptype[],
* avoiding possible copy-paste errors.
*/
#undef ICE_PTT
#undef ICE_PTT_UNUSED_ENTRY
#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
[PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL
#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0
/* A few supplementary definitions for when XDP hash types do not coincide
* with what can be generated from ptype definitions
* by means of preprocessor concatenation.
*/
#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE
#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2
#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE
#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4
static const enum xdp_rss_hash_type
ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
ICE_PTYPES
};
#undef XDP_RSS_L3_NONE
#undef XDP_RSS_L4_NONE
#undef XDP_RSS_TYPE_PAY2
#undef XDP_RSS_TYPE_PAY3
#undef XDP_RSS_TYPE_PAY4
#undef ICE_PTT
#undef ICE_PTT_UNUSED_ENTRY
/** /**
* ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
* @eop_desc: End of Packet descriptor * @eop_desc: End of Packet descriptor
...@@ -579,12 +505,7 @@ ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { ...@@ -579,12 +505,7 @@ ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
static enum xdp_rss_hash_type static enum xdp_rss_hash_type
ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
{ {
u16 ptype = ice_get_ptype(eop_desc); return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type;
if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES))
return 0;
return ice_ptype_to_xdp_hash[ptype];
} }
/** /**
......
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2024 Intel Corporation
config LIBETH
tristate
select PAGE_POOL
help
libeth is a common library containing routines shared between several
drivers, but not yet promoted to the generic kernel API.
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2024 Intel Corporation
obj-$(CONFIG_LIBETH) += libeth.o
libeth-objs += rx.o
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2024 Intel Corporation */
#include <net/libeth/rx.h>
/* Rx buffer management */
/**
* libeth_rx_hw_len - get the actual buffer size to be passed to HW
* @pp: &page_pool_params of the netdev to calculate the size for
* @max_len: maximum buffer size for a single descriptor
*
* Return: HW-writeable length per one buffer to pass it to the HW accounting:
* MTU the @dev has, HW required alignment, minimum and maximum allowed values,
* and system's page size.
*/
static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
{
u32 len;
len = READ_ONCE(pp->netdev->mtu) + LIBETH_RX_LL_LEN;
len = ALIGN(len, LIBETH_RX_BUF_STRIDE);
len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
pp->max_len);
return len;
}
/**
* libeth_rx_fq_create - create a PP with the default libeth settings
* @fq: buffer queue struct to fill
* @napi: &napi_struct covering this PP (no usage outside its poll loops)
*
* Return: %0 on success, -%errno on failure.
*/
int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
{
struct page_pool_params pp = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = LIBETH_RX_PAGE_ORDER,
.pool_size = fq->count,
.nid = fq->nid,
.dev = napi->dev->dev.parent,
.netdev = napi->dev,
.napi = napi,
.dma_dir = DMA_FROM_DEVICE,
.offset = LIBETH_SKB_HEADROOM,
};
struct libeth_fqe *fqes;
struct page_pool *pool;
/* HW-writeable / syncable length per one page */
pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset);
/* HW-writeable length per buffer */
fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len);
/* Buffer size to allocate */
fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset +
fq->buf_len));
pool = page_pool_create(&pp);
if (IS_ERR(pool))
return PTR_ERR(pool);
fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
if (!fqes)
goto err_buf;
fq->fqes = fqes;
fq->pp = pool;
return 0;
err_buf:
page_pool_destroy(pool);
return -ENOMEM;
}
EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, LIBETH);
/**
* libeth_rx_fq_destroy - destroy a &page_pool created by libeth
* @fq: buffer queue to process
*/
void libeth_rx_fq_destroy(struct libeth_fq *fq)
{
kvfree(fq->fqes);
page_pool_destroy(fq->pp);
}
EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH);
/**
* libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
* @page: page to recycle
*
* To be used on exceptions or rare cases not requiring fast inline recycling.
*/
void libeth_rx_recycle_slow(struct page *page)
{
page_pool_recycle_direct(page->pp, page);
}
EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, LIBETH);
/* Converting abstract packet type numbers into a software structure with
* the packet parameters to do O(1) lookup on Rx.
*/
static const u16 libeth_rx_pt_xdp_oip[] = {
[LIBETH_RX_PT_OUTER_L2] = XDP_RSS_TYPE_NONE,
[LIBETH_RX_PT_OUTER_IPV4] = XDP_RSS_L3_IPV4,
[LIBETH_RX_PT_OUTER_IPV6] = XDP_RSS_L3_IPV6,
};
static const u16 libeth_rx_pt_xdp_iprot[] = {
[LIBETH_RX_PT_INNER_NONE] = XDP_RSS_TYPE_NONE,
[LIBETH_RX_PT_INNER_UDP] = XDP_RSS_L4_UDP,
[LIBETH_RX_PT_INNER_TCP] = XDP_RSS_L4_TCP,
[LIBETH_RX_PT_INNER_SCTP] = XDP_RSS_L4_SCTP,
[LIBETH_RX_PT_INNER_ICMP] = XDP_RSS_L4_ICMP,
[LIBETH_RX_PT_INNER_TIMESYNC] = XDP_RSS_TYPE_NONE,
};
static const u16 libeth_rx_pt_xdp_pl[] = {
[LIBETH_RX_PT_PAYLOAD_NONE] = XDP_RSS_TYPE_NONE,
[LIBETH_RX_PT_PAYLOAD_L2] = XDP_RSS_TYPE_NONE,
[LIBETH_RX_PT_PAYLOAD_L3] = XDP_RSS_TYPE_NONE,
[LIBETH_RX_PT_PAYLOAD_L4] = XDP_RSS_L4,
};
/**
* libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT
* @pt: PT structure to evaluate
*
* Generates ```hash_type``` field with XDP RSS type values from the parsed
* packet parameters if they're obtained dynamically at runtime.
*/
void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
{
pt->hash_type = 0;
pt->hash_type |= libeth_rx_pt_xdp_oip[pt->outer_ip];
pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
}
EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH);
/* Module */
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Common Ethernet library");
MODULE_LICENSE("GPL");
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2024 Intel Corporation
config LIBIE
tristate
select LIBETH
help
libie (Intel Ethernet library) is a common library built on top of
libeth and containing vendor-specific routines shared between several
Intel Ethernet drivers.
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2024 Intel Corporation
obj-$(CONFIG_LIBIE) += libie.o
libie-objs += rx.o
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2024 Intel Corporation */
#include <linux/net/intel/libie/rx.h>
/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
* bitfield struct.
*/
/* A few supplementary definitions for when XDP hash types do not coincide
* with what can be generated from ptype definitions by means of preprocessor
* concatenation.
*/
#define XDP_RSS_L3_L2 XDP_RSS_TYPE_NONE
#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
#define XDP_RSS_L4_TIMESYNC XDP_RSS_TYPE_NONE
#define XDP_RSS_TYPE_L3 XDP_RSS_TYPE_NONE
#define XDP_RSS_TYPE_L4 XDP_RSS_L4
#define LIBIE_RX_PT(oip, ofrag, tun, tp, tefr, iprot, pl) { \
.outer_ip = LIBETH_RX_PT_OUTER_##oip, \
.outer_frag = LIBETH_RX_PT_##ofrag, \
.tunnel_type = LIBETH_RX_PT_TUNNEL_IP_##tun, \
.tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_##tp, \
.tunnel_end_frag = LIBETH_RX_PT_##tefr, \
.inner_prot = LIBETH_RX_PT_INNER_##iprot, \
.payload_layer = LIBETH_RX_PT_PAYLOAD_##pl, \
.hash_type = XDP_RSS_L3_##oip | \
XDP_RSS_L4_##iprot | \
XDP_RSS_TYPE_##pl, \
}
#define LIBIE_RX_PT_UNUSED { }
#define __LIBIE_RX_PT_L2(iprot, pl) \
LIBIE_RX_PT(L2, NOT_FRAG, NONE, NONE, NOT_FRAG, iprot, pl)
#define LIBIE_RX_PT_L2 __LIBIE_RX_PT_L2(NONE, L2)
#define LIBIE_RX_PT_TS __LIBIE_RX_PT_L2(TIMESYNC, L2)
#define LIBIE_RX_PT_L3 __LIBIE_RX_PT_L2(NONE, L3)
#define LIBIE_RX_PT_IP_FRAG(oip) \
LIBIE_RX_PT(IPV##oip, FRAG, NONE, NONE, NOT_FRAG, NONE, L3)
#define LIBIE_RX_PT_IP_L3(oip, tun, teprot, tefr) \
LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, tefr, NONE, L3)
#define LIBIE_RX_PT_IP_L4(oip, tun, teprot, iprot) \
LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, NOT_FRAG, iprot, L4)
#define LIBIE_RX_PT_IP_NOF(oip, tun, ver) \
LIBIE_RX_PT_IP_L3(oip, tun, ver, NOT_FRAG), \
LIBIE_RX_PT_IP_L4(oip, tun, ver, UDP), \
LIBIE_RX_PT_UNUSED, \
LIBIE_RX_PT_IP_L4(oip, tun, ver, TCP), \
LIBIE_RX_PT_IP_L4(oip, tun, ver, SCTP), \
LIBIE_RX_PT_IP_L4(oip, tun, ver, ICMP)
/* IPv oip --> tun --> IPv ver */
#define LIBIE_RX_PT_IP_TUN_VER(oip, tun, ver) \
LIBIE_RX_PT_IP_L3(oip, tun, ver, FRAG), \
LIBIE_RX_PT_IP_NOF(oip, tun, ver)
/* Non Tunneled IPv oip */
#define LIBIE_RX_PT_IP_RAW(oip) \
LIBIE_RX_PT_IP_FRAG(oip), \
LIBIE_RX_PT_IP_NOF(oip, NONE, NONE)
/* IPv oip --> tun --> { IPv4, IPv6 } */
#define LIBIE_RX_PT_IP_TUN(oip, tun) \
LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV4), \
LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV6)
/* IPv oip --> GRE/NAT tun --> { x, IPv4, IPv6 } */
#define LIBIE_RX_PT_IP_GRE(oip, tun) \
LIBIE_RX_PT_IP_L3(oip, tun, NONE, NOT_FRAG), \
LIBIE_RX_PT_IP_TUN(oip, tun)
/* Non Tunneled IPv oip
* IPv oip --> { IPv4, IPv6 }
* IPv oip --> GRE/NAT --> { x, IPv4, IPv6 }
* IPv oip --> GRE/NAT --> MAC --> { x, IPv4, IPv6 }
* IPv oip --> GRE/NAT --> MAC/VLAN --> { x, IPv4, IPv6 }
*/
#define LIBIE_RX_PT_IP(oip) \
LIBIE_RX_PT_IP_RAW(oip), \
LIBIE_RX_PT_IP_TUN(oip, IP), \
LIBIE_RX_PT_IP_GRE(oip, GRENAT), \
LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC), \
LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC_VLAN)
/* Lookup table mapping for O(1) parsing */
const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
/* L2 packet types */
LIBIE_RX_PT_UNUSED,
LIBIE_RX_PT_L2,
LIBIE_RX_PT_TS,
LIBIE_RX_PT_L2,
LIBIE_RX_PT_UNUSED,
LIBIE_RX_PT_UNUSED,
LIBIE_RX_PT_L2,
LIBIE_RX_PT_L2,
LIBIE_RX_PT_UNUSED,
LIBIE_RX_PT_UNUSED,
LIBIE_RX_PT_L2,
LIBIE_RX_PT_UNUSED,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_L3,
LIBIE_RX_PT_IP(4),
LIBIE_RX_PT_IP(6),
};
EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Ethernet common library");
MODULE_IMPORT_NS(LIBETH);
MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2024 Intel Corporation */
#ifndef __LIBIE_RX_H
#define __LIBIE_RX_H
#include <net/libeth/rx.h>
/* Rx buffer management */
/* The largest size for a single descriptor as per HW */
#define LIBIE_MAX_RX_BUF_LEN 9728U
/* "True" HW-writeable space: minimum from SW and HW values */
#define LIBIE_RX_BUF_LEN(hr) min_t(u32, LIBETH_RX_PAGE_LEN(hr), \
LIBIE_MAX_RX_BUF_LEN)
/* The maximum frame size as per HW (S/G) */
#define __LIBIE_MAX_RX_FRM_LEN 16382U
/* ATST, HW can chain up to 5 Rx descriptors */
#define LIBIE_MAX_RX_FRM_LEN(hr) \
min_t(u32, __LIBIE_MAX_RX_FRM_LEN, LIBIE_RX_BUF_LEN(hr) * 5)
/* Maximum frame size minus LL overhead */
#define LIBIE_MAX_MTU \
(LIBIE_MAX_RX_FRM_LEN(LIBETH_MAX_HEADROOM) - LIBETH_RX_LL_LEN)
/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
* bitfield struct.
*/
#define LIBIE_RX_PT_NUM 154
extern const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM];
/**
* libie_rx_pt_parse - convert HW packet type to software bitfield structure
* @pt: 10-bit hardware packet type value from the descriptor
*
* ```libie_rx_pt_lut``` must be accessed only using this wrapper.
*
* Return: parsed bitfield struct corresponding to the provided ptype.
*/
static inline struct libeth_rx_pt libie_rx_pt_parse(u32 pt)
{
if (unlikely(pt >= LIBIE_RX_PT_NUM))
pt = 0;
return libie_rx_pt_lut[pt];
}
#endif /* __LIBIE_RX_H */
...@@ -774,14 +774,27 @@ static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) ...@@ -774,14 +774,27 @@ static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
return kvmalloc(size, flags | __GFP_ZERO); return kvmalloc(size, flags | __GFP_ZERO);
} }
static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) static inline __alloc_size(1, 2) void *
kvmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
{ {
size_t bytes; size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes))) if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL; return NULL;
return kvmalloc(bytes, flags); return kvmalloc_node(bytes, flags, node);
}
static inline __alloc_size(1, 2) void *
kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
return kvmalloc_array_node(n, size, flags, NUMA_NO_NODE);
}
static inline __alloc_size(1, 2) void *
kvcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
return kvmalloc_array_node(n, size, flags | __GFP_ZERO, node);
} }
static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags) static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2024 Intel Corporation */
#ifndef __LIBETH_RX_H
#define __LIBETH_RX_H
#include <linux/if_vlan.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
/* Rx buffer management */
/* Space reserved in front of each frame */
#define LIBETH_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
/* Maximum headroom for worst-case calculations */
#define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM
/* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */
#define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN)
/* Always use order-0 pages */
#define LIBETH_RX_PAGE_ORDER 0
/* Pick a sane buffer stride and align to a cacheline boundary */
#define LIBETH_RX_BUF_STRIDE SKB_DATA_ALIGN(128)
/* HW-writeable space in one buffer: truesize - headroom/tailroom, aligned */
#define LIBETH_RX_PAGE_LEN(hr) \
ALIGN_DOWN(SKB_MAX_ORDER(hr, LIBETH_RX_PAGE_ORDER), \
LIBETH_RX_BUF_STRIDE)
/**
* struct libeth_fqe - structure representing an Rx buffer (fill queue element)
* @page: page holding the buffer
* @offset: offset from the page start (to the headroom)
* @truesize: total space occupied by the buffer (w/ headroom and tailroom)
*
* Depending on the MTU, API switches between one-page-per-frame and shared
* page model (to conserve memory on bigger-page platforms). In case of the
* former, @offset is always 0 and @truesize is always ```PAGE_SIZE```.
*/
struct libeth_fqe {
struct page *page;
u32 offset;
u32 truesize;
} __aligned_largest;
/**
* struct libeth_fq - structure representing a buffer (fill) queue
* @fp: hotpath part of the structure
* @pp: &page_pool for buffer management
* @fqes: array of Rx buffers
* @truesize: size to allocate per buffer, w/overhead
* @count: number of descriptors/buffers the queue has
* @buf_len: HW-writeable length per each buffer
* @nid: ID of the closest NUMA node with memory
*/
struct libeth_fq {
struct_group_tagged(libeth_fq_fp, fp,
struct page_pool *pp;
struct libeth_fqe *fqes;
u32 truesize;
u32 count;
);
/* Cold fields */
u32 buf_len;
int nid;
};
int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi);
void libeth_rx_fq_destroy(struct libeth_fq *fq);
/**
* libeth_rx_alloc - allocate a new Rx buffer
* @fq: fill queue to allocate for
* @i: index of the buffer within the queue
*
* Return: DMA address to be passed to HW for Rx on successful allocation,
* ```DMA_MAPPING_ERROR``` otherwise.
*/
static inline dma_addr_t libeth_rx_alloc(const struct libeth_fq_fp *fq, u32 i)
{
struct libeth_fqe *buf = &fq->fqes[i];
buf->truesize = fq->truesize;
buf->page = page_pool_dev_alloc(fq->pp, &buf->offset, &buf->truesize);
if (unlikely(!buf->page))
return DMA_MAPPING_ERROR;
return page_pool_get_dma_addr(buf->page) + buf->offset +
fq->pp->p.offset;
}
void libeth_rx_recycle_slow(struct page *page);
/**
* libeth_rx_sync_for_cpu - synchronize or recycle buffer post DMA
* @fqe: buffer to process
* @len: frame length from the descriptor
*
* Process the buffer after it's written by HW. The regular path is to
* synchronize DMA for CPU, but in case of no data it will be immediately
* recycled back to its PP.
*
* Return: true when there's data to process, false otherwise.
*/
static inline bool libeth_rx_sync_for_cpu(const struct libeth_fqe *fqe,
u32 len)
{
struct page *page = fqe->page;
/* Very rare, but possible case. The most common reason:
* the last fragment contained FCS only, which was then
* stripped by the HW.
*/
if (unlikely(!len)) {
libeth_rx_recycle_slow(page);
return false;
}
page_pool_dma_sync_for_cpu(page->pp, page, fqe->offset, len);
return true;
}
/* Converting abstract packet type numbers into a software structure with
* the packet parameters to do O(1) lookup on Rx.
*/
enum {
LIBETH_RX_PT_OUTER_L2 = 0U,
LIBETH_RX_PT_OUTER_IPV4,
LIBETH_RX_PT_OUTER_IPV6,
};
enum {
LIBETH_RX_PT_NOT_FRAG = 0U,
LIBETH_RX_PT_FRAG,
};
enum {
LIBETH_RX_PT_TUNNEL_IP_NONE = 0U,
LIBETH_RX_PT_TUNNEL_IP_IP,
LIBETH_RX_PT_TUNNEL_IP_GRENAT,
LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC,
LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC_VLAN,
};
enum {
LIBETH_RX_PT_TUNNEL_END_NONE = 0U,
LIBETH_RX_PT_TUNNEL_END_IPV4,
LIBETH_RX_PT_TUNNEL_END_IPV6,
};
enum {
LIBETH_RX_PT_INNER_NONE = 0U,
LIBETH_RX_PT_INNER_UDP,
LIBETH_RX_PT_INNER_TCP,
LIBETH_RX_PT_INNER_SCTP,
LIBETH_RX_PT_INNER_ICMP,
LIBETH_RX_PT_INNER_TIMESYNC,
};
#define LIBETH_RX_PT_PAYLOAD_NONE PKT_HASH_TYPE_NONE
#define LIBETH_RX_PT_PAYLOAD_L2 PKT_HASH_TYPE_L2
#define LIBETH_RX_PT_PAYLOAD_L3 PKT_HASH_TYPE_L3
#define LIBETH_RX_PT_PAYLOAD_L4 PKT_HASH_TYPE_L4
struct libeth_rx_pt {
u32 outer_ip:2;
u32 outer_frag:1;
u32 tunnel_type:3;
u32 tunnel_end_prot:2;
u32 tunnel_end_frag:1;
u32 inner_prot:3;
enum pkt_hash_types payload_layer:2;
u32 pad:2;
enum xdp_rss_hash_type hash_type:16;
};
void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt);
/**
* libeth_rx_pt_get_ip_ver - get IP version from a packet type structure
* @pt: packet type params
*
* Wrapper to compile out the IPv6 code from the drivers when not supported
* by the kernel.
*
* Return: @pt.outer_ip or stub for IPv6 when not compiled-in.
*/
static inline u32 libeth_rx_pt_get_ip_ver(struct libeth_rx_pt pt)
{
#if !IS_ENABLED(CONFIG_IPV6)
switch (pt.outer_ip) {
case LIBETH_RX_PT_OUTER_IPV4:
return LIBETH_RX_PT_OUTER_IPV4;
default:
return LIBETH_RX_PT_OUTER_L2;
}
#else
return pt.outer_ip;
#endif
}
/* libeth_has_*() can be used to quickly check whether the HW metadata is
* available to avoid further expensive processing such as descriptor reads.
* They already check for the corresponding netdev feature to be enabled,
* thus can be used as drop-in replacements.
*/
static inline bool libeth_rx_pt_has_checksum(const struct net_device *dev,
struct libeth_rx_pt pt)
{
/* Non-zero _INNER* is only possible when _OUTER_IPV* is set,
* it is enough to check only for the L4 type.
*/
return likely(pt.inner_prot > LIBETH_RX_PT_INNER_NONE &&
(dev->features & NETIF_F_RXCSUM));
}
static inline bool libeth_rx_pt_has_hash(const struct net_device *dev,
struct libeth_rx_pt pt)
{
return likely(pt.payload_layer > LIBETH_RX_PT_PAYLOAD_NONE &&
(dev->features & NETIF_F_RXHASH));
}
/**
* libeth_rx_pt_set_hash - fill in skb hash value basing on the PT
* @skb: skb to fill the hash in
* @hash: 32-bit hash value from the descriptor
* @pt: packet type
*/
static inline void libeth_rx_pt_set_hash(struct sk_buff *skb, u32 hash,
struct libeth_rx_pt pt)
{
skb_set_hash(skb, hash, pt.payload_layer);
}
#endif /* __LIBETH_RX_H */
...@@ -52,13 +52,15 @@ ...@@ -52,13 +52,15 @@
#ifndef _NET_PAGE_POOL_HELPERS_H #ifndef _NET_PAGE_POOL_HELPERS_H
#define _NET_PAGE_POOL_HELPERS_H #define _NET_PAGE_POOL_HELPERS_H
#include <linux/dma-mapping.h>
#include <net/page_pool/types.h> #include <net/page_pool/types.h>
#ifdef CONFIG_PAGE_POOL_STATS #ifdef CONFIG_PAGE_POOL_STATS
/* Deprecated driver-facing API, use netlink instead */ /* Deprecated driver-facing API, use netlink instead */
int page_pool_ethtool_stats_get_count(void); int page_pool_ethtool_stats_get_count(void);
u8 *page_pool_ethtool_stats_get_strings(u8 *data); u8 *page_pool_ethtool_stats_get_strings(u8 *data);
u64 *page_pool_ethtool_stats_get(u64 *data, void *stats); u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats);
bool page_pool_get_stats(const struct page_pool *pool, bool page_pool_get_stats(const struct page_pool *pool,
struct page_pool_stats *stats); struct page_pool_stats *stats);
...@@ -73,7 +75,7 @@ static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) ...@@ -73,7 +75,7 @@ static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
return data; return data;
} }
static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
{ {
return data; return data;
} }
...@@ -204,8 +206,8 @@ static inline void *page_pool_dev_alloc_va(struct page_pool *pool, ...@@ -204,8 +206,8 @@ static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
* Get the stored dma direction. A driver might decide to store this locally * Get the stored dma direction. A driver might decide to store this locally
* and avoid the extra cache line from page_pool to determine the direction. * and avoid the extra cache line from page_pool to determine the direction.
*/ */
static static inline enum dma_data_direction
inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) page_pool_get_dma_dir(const struct page_pool *pool)
{ {
return pool->p.dma_dir; return pool->p.dma_dir;
} }
...@@ -370,7 +372,7 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va, ...@@ -370,7 +372,7 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
* Fetch the DMA address of the page. The page pool to which the page belongs * Fetch the DMA address of the page. The page pool to which the page belongs
* must had been created with PP_FLAG_DMA_MAP. * must had been created with PP_FLAG_DMA_MAP.
*/ */
static inline dma_addr_t page_pool_get_dma_addr(struct page *page) static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
{ {
dma_addr_t ret = page->dma_addr; dma_addr_t ret = page->dma_addr;
...@@ -395,6 +397,28 @@ static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) ...@@ -395,6 +397,28 @@ static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
return false; return false;
} }
/**
* page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW
* @pool: &page_pool the @page belongs to
* @page: page to sync
* @offset: offset from page start to "hard" start if using PP frags
* @dma_sync_size: size of the data written to the page
*
* Can be used as a shorthand to sync Rx pages before accessing them in the
* driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``.
* Note that this version performs DMA sync unconditionally, even if the
* associated PP doesn't perform sync-for-device.
*/
static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
const struct page *page,
u32 offset, u32 dma_sync_size)
{
dma_sync_single_range_for_cpu(pool->p.dev,
page_pool_get_dma_addr(page),
offset + pool->p.offset, dma_sync_size,
page_pool_get_dma_dir(pool));
}
static inline bool page_pool_put(struct page_pool *pool) static inline bool page_pool_put(struct page_pool *pool)
{ {
return refcount_dec_and_test(&pool->user_cnt); return refcount_dec_and_test(&pool->user_cnt);
......
...@@ -213,7 +213,7 @@ struct xdp_mem_info; ...@@ -213,7 +213,7 @@ struct xdp_mem_info;
#ifdef CONFIG_PAGE_POOL #ifdef CONFIG_PAGE_POOL
void page_pool_destroy(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
struct xdp_mem_info *mem); const struct xdp_mem_info *mem);
void page_pool_put_page_bulk(struct page_pool *pool, void **data, void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count); int count);
#else #else
...@@ -223,7 +223,7 @@ static inline void page_pool_destroy(struct page_pool *pool) ...@@ -223,7 +223,7 @@ static inline void page_pool_destroy(struct page_pool *pool)
static inline void page_pool_use_xdp_mem(struct page_pool *pool, static inline void page_pool_use_xdp_mem(struct page_pool *pool,
void (*disconnect)(void *), void (*disconnect)(void *),
struct xdp_mem_info *mem) const struct xdp_mem_info *mem)
{ {
} }
......
...@@ -123,9 +123,9 @@ int page_pool_ethtool_stats_get_count(void) ...@@ -123,9 +123,9 @@ int page_pool_ethtool_stats_get_count(void)
} }
EXPORT_SYMBOL(page_pool_ethtool_stats_get_count); EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
{ {
struct page_pool_stats *pool_stats = stats; const struct page_pool_stats *pool_stats = stats;
*data++ = pool_stats->alloc_stats.fast; *data++ = pool_stats->alloc_stats.fast;
*data++ = pool_stats->alloc_stats.slow; *data++ = pool_stats->alloc_stats.slow;
...@@ -383,8 +383,8 @@ static struct page *__page_pool_get_cached(struct page_pool *pool) ...@@ -383,8 +383,8 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
return page; return page;
} }
static void page_pool_dma_sync_for_device(struct page_pool *pool, static void page_pool_dma_sync_for_device(const struct page_pool *pool,
struct page *page, const struct page *page,
unsigned int dma_sync_size) unsigned int dma_sync_size)
{ {
dma_addr_t dma_addr = page_pool_get_dma_addr(page); dma_addr_t dma_addr = page_pool_get_dma_addr(page);
...@@ -987,7 +987,7 @@ static void page_pool_release_retry(struct work_struct *wq) ...@@ -987,7 +987,7 @@ static void page_pool_release_retry(struct work_struct *wq)
} }
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
struct xdp_mem_info *mem) const struct xdp_mem_info *mem)
{ {
refcount_inc(&pool->user_cnt); refcount_inc(&pool->user_cnt);
pool->disconnect = disconnect; pool->disconnect = disconnect;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment