Commit 556b2710 authored by David S. Miller's avatar David S. Miller

Merge branch 'ENETC'

Claudiu Manoil says:

====================
Introduce ENETC ethernet drivers

ENETC is a multi-port virtualized Ethernet controller supporting GbE
designs and Time-Sensitive Networking (TSN) functionality.
ENETC is operating as an SR-IOV multi-PF capable Root Complex Integrated
Endpoint (RCIE).  As such, it contains multiple physical (PF) and virtual
(VF) PCIe functions, discoverable by standard PCI Express.

The patch series adds basic enablement for these otherwise standard
buffer descriptor (BD) ring based ethernet devices (PCIe PFs and VFs),
currently included in the 64-bit dual ARMv8 processors LS1028A SoC.
The driver is portable to 32-bit designs, and it's independent of CPU
endianness.

Contributors:
Alex Marginean <alexandru.marginean@nxp.com>
Catalin Horghidan <catalin.horghidan@nxp.com>

TODO list:
* IEEE 1588 PTP support;
* TSN support;
* MDIO support and VF link management;
* power management support;
* flow control support;
* TC offloading with h/w MQPRIO;
* interrupt coalescing, configurable BD ring sizes, and other usual
config options if missing.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5e5b9f62 d382563f
......@@ -6023,6 +6023,12 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/dma/fsldma.*
FREESCALE ENETC ETHERNET DRIVERS
M: Claudiu Manoil <claudiu.manoil@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/freescale/enetc/
FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
M: Claudiu Manoil <claudiu.manoil@nxp.com>
L: netdev@vger.kernel.org
......
......@@ -97,5 +97,6 @@ config GIANFAR
source "drivers/net/ethernet/freescale/dpaa/Kconfig"
source "drivers/net/ethernet/freescale/dpaa2/Kconfig"
source "drivers/net/ethernet/freescale/enetc/Kconfig"
endif # NET_VENDOR_FREESCALE
......@@ -23,3 +23,6 @@ obj-$(CONFIG_FSL_FMAN) += fman/
obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
obj-$(CONFIG_FSL_ENETC) += enetc/
obj-$(CONFIG_FSL_ENETC_VF) += enetc/
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
physical function (PF) devices, managing ENETC Ports at a privileged
level.
If compiled as module (M), the module name is fsl-enetc.
config FSL_ENETC_VF
tristate "ENETC VF driver"
depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
virtual function (VF) devices enabled by the ENETC PF driver.
If compiled as module (M), the module name is fsl-enetc-vf.
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y)
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy)
fsl-enetc-vf-objs := enetc_vf.o
else
fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \
enetc_ethtool.o
fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y)
endif
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
#include "enetc.h"
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/of_mdio.h>
/* ENETC overhead: optional extension BD + 1 BD gap */
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
/* max # of chained Tx BDs is 15, including head and extension BD */
#define ENETC_MAX_SKB_FRAGS 13
#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
int count;
tx_ring = priv->tx_ring[skb->queue_mapping];
if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_BUSY;
}
count = enetc_map_tx_buffs(tx_ring, skb);
if (unlikely(!count))
goto drop_packet_err;
if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
drop_packet_err:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
{
int l3_start, l3_hsize;
u16 l3_flags, l4_flags;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return false;
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
l4_flags = ENETC_TXBD_L4_TCP;
break;
case offsetof(struct udphdr, check):
l4_flags = ENETC_TXBD_L4_UDP;
break;
default:
skb_checksum_help(skb);
return false;
}
l3_start = skb_network_offset(skb);
l3_hsize = skb_network_header_len(skb);
l3_flags = 0;
if (skb->protocol == htons(ETH_P_IPV6))
l3_flags = ENETC_TXBD_L3_IPV6;
/* write BD fields */
txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
txbd->l4_csoff = l4_flags;
return true;
}
static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
struct enetc_tx_swbd *tx_swbd)
{
if (tx_swbd->is_dma_page)
dma_unmap_page(tx_ring->dev, tx_swbd->dma,
tx_swbd->len, DMA_TO_DEVICE);
else
dma_unmap_single(tx_ring->dev, tx_swbd->dma,
tx_swbd->len, DMA_TO_DEVICE);
tx_swbd->dma = 0;
}
static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
struct enetc_tx_swbd *tx_swbd)
{
if (tx_swbd->dma)
enetc_unmap_tx_buff(tx_ring, tx_swbd);
if (tx_swbd->skb) {
dev_kfree_skb_any(tx_swbd->skb);
tx_swbd->skb = NULL;
}
}
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
struct enetc_tx_swbd *tx_swbd;
struct skb_frag_struct *frag;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
union enetc_tx_bd *txbd;
bool do_vlan, do_tstamp;
int i, count = 0;
unsigned int f;
dma_addr_t dma;
u8 flags = 0;
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
prefetchw(txbd);
dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
goto dma_err;
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
temp_bd.lstatus = 0;
tx_swbd = &tx_ring->tx_swbd[i];
tx_swbd->dma = dma;
tx_swbd->len = len;
tx_swbd->is_dma_page = 0;
count++;
do_vlan = skb_vlan_tag_present(skb);
do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
if (do_vlan || do_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
if (enetc_tx_csum(skb, &temp_bd))
flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
/* first BD needs frm_len and offload flags set */
temp_bd.frm_len = cpu_to_le16(skb->len);
temp_bd.flags = flags;
if (flags & ENETC_TXBD_FLAGS_EX) {
u8 e_flags = 0;
*txbd = temp_bd;
enetc_clear_tx_bd(&temp_bd);
/* add extension BD for VLAN and/or timestamping */
flags = 0;
tx_swbd++;
txbd++;
i++;
if (unlikely(i == tx_ring->bd_count)) {
i = 0;
tx_swbd = tx_ring->tx_swbd;
txbd = ENETC_TXBD(*tx_ring, 0);
}
prefetchw(txbd);
if (do_vlan) {
temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
temp_bd.ext.tpid = 0; /* < C-TAG */
e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
}
if (do_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
}
temp_bd.ext.e_flags = e_flags;
count++;
}
frag = &skb_shinfo(skb)->frags[0];
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
len = skb_frag_size(frag);
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_err;
*txbd = temp_bd;
enetc_clear_tx_bd(&temp_bd);
flags = 0;
tx_swbd++;
txbd++;
i++;
if (unlikely(i == tx_ring->bd_count)) {
i = 0;
tx_swbd = tx_ring->tx_swbd;
txbd = ENETC_TXBD(*tx_ring, 0);
}
prefetchw(txbd);
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
tx_swbd->dma = dma;
tx_swbd->len = len;
tx_swbd->is_dma_page = 1;
count++;
}
/* last BD needs 'F' bit set */
flags |= ENETC_TXBD_FLAGS_F;
temp_bd.flags = flags;
*txbd = temp_bd;
tx_ring->tx_swbd[i].skb = skb;
enetc_bdr_idx_inc(tx_ring, &i);
tx_ring->next_to_use = i;
/* let H/W know BD ring has been updated */
enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
return count;
dma_err:
dev_err(tx_ring->dev, "DMA map error");
do {
tx_swbd = &tx_ring->tx_swbd[i];
enetc_free_tx_skb(tx_ring, tx_swbd);
if (i == 0)
i = tx_ring->bd_count;
i--;
} while (count--);
return 0;
}
static irqreturn_t enetc_msix(int irq, void *data)
{
struct enetc_int_vector *v = data;
int i;
/* disable interrupts */
enetc_wr_reg(v->rbier, 0);
for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
napi_schedule_irqoff(&v->napi);
return IRQ_HANDLED;
}
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit);
static int enetc_poll(struct napi_struct *napi, int budget)
{
struct enetc_int_vector
*v = container_of(napi, struct enetc_int_vector, napi);
bool complete = true;
int work_done;
int i;
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
if (work_done == budget)
complete = false;
if (!complete)
return budget;
napi_complete_done(napi, work_done);
/* enable interrupts */
enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
ENETC_TBIER_TXTIE);
return work_done;
}
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
{
int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
}
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
{
struct net_device *ndev = tx_ring->ndev;
int tx_frm_cnt = 0, tx_byte_cnt = 0;
struct enetc_tx_swbd *tx_swbd;
int i, bds_to_clean;
i = tx_ring->next_to_clean;
tx_swbd = &tx_ring->tx_swbd[i];
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
bool is_eof = !!tx_swbd->skb;
enetc_unmap_tx_buff(tx_ring, tx_swbd);
if (is_eof) {
napi_consume_skb(tx_swbd->skb, napi_budget);
tx_swbd->skb = NULL;
}
tx_byte_cnt += tx_swbd->len;
bds_to_clean--;
tx_swbd++;
i++;
if (unlikely(i == tx_ring->bd_count)) {
i = 0;
tx_swbd = tx_ring->tx_swbd;
}
/* BD iteration loop end */
if (is_eof) {
tx_frm_cnt++;
/* re-arm interrupt source */
enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
BIT(16 + tx_ring->index));
}
if (unlikely(!bds_to_clean))
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
}
tx_ring->next_to_clean = i;
tx_ring->stats.packets += tx_frm_cnt;
tx_ring->stats.bytes += tx_byte_cnt;
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
(enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
}
static bool enetc_new_page(struct enetc_bdr *rx_ring,
struct enetc_rx_swbd *rx_swbd)
{
struct page *page;
dma_addr_t addr;
page = dev_alloc_page();
if (unlikely(!page))
return false;
addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
__free_page(page);
return false;
}
rx_swbd->dma = addr;
rx_swbd->page = page;
rx_swbd->page_offset = ENETC_RXB_PAD;
return true;
}
static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
{
struct enetc_rx_swbd *rx_swbd;
union enetc_rx_bd *rxbd;
int i, j;
i = rx_ring->next_to_use;
rx_swbd = &rx_ring->rx_swbd[i];
rxbd = ENETC_RXBD(*rx_ring, i);
for (j = 0; j < buff_cnt; j++) {
/* try reuse page */
if (unlikely(!rx_swbd->page)) {
if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
rx_ring->stats.rx_alloc_errs++;
break;
}
}
/* update RxBD */
rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
rx_swbd->page_offset);
/* clear 'R" as well */
rxbd->r.lstatus = 0;
rx_swbd++;
rxbd++;
i++;
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rx_swbd = rx_ring->rx_swbd;
rxbd = ENETC_RXBD(*rx_ring, 0);
}
}
if (likely(j)) {
rx_ring->next_to_alloc = i; /* keep track from page reuse */
rx_ring->next_to_use = i;
/* update ENETC's consumer index */
enetc_wr_reg(rx_ring->rcir, i);
}
return j;
}
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
{
/* TODO: add tstamp, hashing */
if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
skb->ip_summed = CHECKSUM_COMPLETE;
}
/* copy VLAN to skb, if one is extracted, for now we assume it's a
* standard TPID, but HW also supports custom values
*/
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxbd->r.vlan_opt));
}
static void enetc_process_skb(struct enetc_bdr *rx_ring,
struct sk_buff *skb)
{
skb_record_rx_queue(skb, rx_ring->index);
skb->protocol = eth_type_trans(skb, rx_ring->ndev);
}
static bool enetc_page_reusable(struct page *page)
{
return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
}
static void enetc_reuse_page(struct enetc_bdr *rx_ring,
struct enetc_rx_swbd *old)
{
struct enetc_rx_swbd *new;
new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
/* next buf that may reuse a page */
enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
/* copy page reference */
*new = *old;
}
static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
int i, u16 size)
{
struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
rx_swbd->page_offset,
size, DMA_FROM_DEVICE);
return rx_swbd;
}
static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
struct enetc_rx_swbd *rx_swbd)
{
if (likely(enetc_page_reusable(rx_swbd->page))) {
rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
page_ref_inc(rx_swbd->page);
enetc_reuse_page(rx_ring, rx_swbd);
/* sync for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
rx_swbd->page_offset,
ENETC_RXB_DMA_SIZE,
DMA_FROM_DEVICE);
} else {
dma_unmap_page(rx_ring->dev, rx_swbd->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
}
rx_swbd->page = NULL;
}
static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
int i, u16 size)
{
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
struct sk_buff *skb;
void *ba;
ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
if (unlikely(!skb)) {
rx_ring->stats.rx_alloc_errs++;
return NULL;
}
skb_reserve(skb, ENETC_RXB_PAD);
__skb_put(skb, size);
enetc_put_rx_buff(rx_ring, rx_swbd);
return skb;
}
static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
u16 size, struct sk_buff *skb)
{
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
enetc_put_rx_buff(rx_ring, rx_swbd);
}
#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit)
{
int rx_frm_cnt = 0, rx_byte_cnt = 0;
int cleaned_cnt, i;
cleaned_cnt = enetc_bd_unused(rx_ring);
/* next descriptor to process */
i = rx_ring->next_to_clean;
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
u32 bd_status;
u16 size;
if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
cleaned_cnt -= count;
}
rxbd = ENETC_RXBD(*rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
if (!bd_status)
break;
enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
dma_rmb(); /* for reading other rxbd fields */
size = le16_to_cpu(rxbd->r.buf_len);
skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
if (!skb)
break;
enetc_get_offloads(rx_ring, rxbd, skb);
cleaned_cnt++;
rxbd++;
i++;
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rxbd = ENETC_RXBD(*rx_ring, 0);
}
if (unlikely(bd_status &
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
dev_kfree_skb(skb);
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
dma_rmb();
bd_status = le32_to_cpu(rxbd->r.lstatus);
rxbd++;
i++;
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rxbd = ENETC_RXBD(*rx_ring, 0);
}
}
rx_ring->ndev->stats.rx_dropped++;
rx_ring->ndev->stats.rx_errors++;
break;
}
/* not last BD in frame? */
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
bd_status = le32_to_cpu(rxbd->r.lstatus);
size = ENETC_RXB_DMA_SIZE;
if (bd_status & ENETC_RXBD_LSTATUS_F) {
dma_rmb();
size = le16_to_cpu(rxbd->r.buf_len);
}
enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
cleaned_cnt++;
rxbd++;
i++;
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rxbd = ENETC_RXBD(*rx_ring, 0);
}
}
rx_byte_cnt += skb->len;
enetc_process_skb(rx_ring, skb);
napi_gro_receive(napi, skb);
rx_frm_cnt++;
}
rx_ring->next_to_clean = i;
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
return rx_frm_cnt;
}
/* Probing and Init */
#define ENETC_MAX_RFS_SIZE 64
void enetc_get_si_caps(struct enetc_si *si)
{
struct enetc_hw *hw = &si->hw;
u32 val;
/* find out how many of various resources we have to work with */
val = enetc_rd(hw, ENETC_SICAPR0);
si->num_rx_rings = (val >> 16) & 0xff;
si->num_tx_rings = val & 0xff;
val = enetc_rd(hw, ENETC_SIRFSCAPR);
si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
si->num_rss = 0;
val = enetc_rd(hw, ENETC_SIPCAPR0);
if (val & ENETC_SIPCAPR0_RSS) {
val = enetc_rd(hw, ENETC_SIRSSCAPR);
si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val);
}
}
static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
{
r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
&r->bd_dma_base, GFP_KERNEL);
if (!r->bd_base)
return -ENOMEM;
/* h/w requires 128B alignment */
if (!IS_ALIGNED(r->bd_dma_base, 128)) {
dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
r->bd_dma_base);
return -EINVAL;
}
return 0;
}
static int enetc_alloc_txbdr(struct enetc_bdr *txr)
{
int err;
txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
if (!txr->tx_swbd)
return -ENOMEM;
err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
if (err) {
vfree(txr->tx_swbd);
return err;
}
txr->next_to_clean = 0;
txr->next_to_use = 0;
return 0;
}
static void enetc_free_txbdr(struct enetc_bdr *txr)
{
int size, i;
for (i = 0; i < txr->bd_count; i++)
enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
size = txr->bd_count * sizeof(union enetc_tx_bd);
dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
txr->bd_base = NULL;
vfree(txr->tx_swbd);
txr->tx_swbd = NULL;
}
static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
{
int i, err;
for (i = 0; i < priv->num_tx_rings; i++) {
err = enetc_alloc_txbdr(priv->tx_ring[i]);
if (err)
goto fail;
}
return 0;
fail:
while (i-- > 0)
enetc_free_txbdr(priv->tx_ring[i]);
return err;
}
static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
{
int i;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_free_txbdr(priv->tx_ring[i]);
}
static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
{
int err;
rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
if (!rxr->rx_swbd)
return -ENOMEM;
err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
if (err) {
vfree(rxr->rx_swbd);
return err;
}
rxr->next_to_clean = 0;
rxr->next_to_use = 0;
rxr->next_to_alloc = 0;
return 0;
}
static void enetc_free_rxbdr(struct enetc_bdr *rxr)
{
int size;
size = rxr->bd_count * sizeof(union enetc_rx_bd);
dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
rxr->bd_base = NULL;
vfree(rxr->rx_swbd);
rxr->rx_swbd = NULL;
}
static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
{
int i, err;
for (i = 0; i < priv->num_rx_rings; i++) {
err = enetc_alloc_rxbdr(priv->rx_ring[i]);
if (err)
goto fail;
}
return 0;
fail:
while (i-- > 0)
enetc_free_rxbdr(priv->rx_ring[i]);
return err;
}
static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
{
int i;
for (i = 0; i < priv->num_rx_rings; i++)
enetc_free_rxbdr(priv->rx_ring[i]);
}
static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
{
int i;
if (!tx_ring->tx_swbd)
return;
for (i = 0; i < tx_ring->bd_count; i++) {
struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
enetc_free_tx_skb(tx_ring, tx_swbd);
}
tx_ring->next_to_clean = 0;
tx_ring->next_to_use = 0;
}
static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
{
int i;
if (!rx_ring->rx_swbd)
return;
for (i = 0; i < rx_ring->bd_count; i++) {
struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
if (!rx_swbd->page)
continue;
dma_unmap_page(rx_ring->dev, rx_swbd->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rx_swbd->page);
rx_swbd->page = NULL;
}
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->next_to_alloc = 0;
}
static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
{
int i;
for (i = 0; i < priv->num_rx_rings; i++)
enetc_free_rx_ring(priv->rx_ring[i]);
for (i = 0; i < priv->num_tx_rings; i++)
enetc_free_tx_ring(priv->tx_ring[i]);
}
static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
{
int size = cbdr->bd_count * sizeof(struct enetc_cbd);
cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
GFP_KERNEL);
if (!cbdr->bd_base)
return -ENOMEM;
/* h/w requires 128B alignment */
if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
return -EINVAL;
}
cbdr->next_to_clean = 0;
cbdr->next_to_use = 0;
return 0;
}
static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
{
int size = cbdr->bd_count * sizeof(struct enetc_cbd);
dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
cbdr->bd_base = NULL;
}
static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
{
/* set CBDR cache attributes */
enetc_wr(hw, ENETC_SICAR2,
ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
enetc_wr(hw, ENETC_SICBDRPIR, 0);
enetc_wr(hw, ENETC_SICBDRCIR, 0);
/* enable ring */
enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
cbdr->pir = hw->reg + ENETC_SICBDRPIR;
cbdr->cir = hw->reg + ENETC_SICBDRCIR;
}
static void enetc_clear_cbdr(struct enetc_hw *hw)
{
enetc_wr(hw, ENETC_SICBDRMR, 0);
}
static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
{
int *rss_table;
int i;
rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
if (!rss_table)
return -ENOMEM;
/* Set up RSS table defaults */
for (i = 0; i < si->num_rss; i++)
rss_table[i] = i % num_groups;
enetc_set_rss_table(si, rss_table, si->num_rss);
kfree(rss_table);
return 0;
}
static int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
struct enetc_hw *hw = &si->hw;
int err;
enetc_setup_cbdr(hw, &si->cbd_ring);
/* set SI cache attributes */
enetc_wr(hw, ENETC_SICAR0,
ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
if (si->num_rss) {
err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
if (err)
return err;
}
return 0;
}
void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
int cpus = num_online_cpus();
priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
/* Enable all available TX rings in order to configure as many
* priorities as possible, when needed.
* TODO: Make # of TX rings run-time configurable
*/
priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
priv->num_tx_rings = si->num_tx_rings;
priv->bdr_int_num = cpus;
/* SI specific */
si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
}
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
int err;
err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
if (err)
return err;
priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
GFP_KERNEL);
if (!priv->cls_rules) {
err = -ENOMEM;
goto err_alloc_cls;
}
err = enetc_configure_si(priv);
if (err)
goto err_config_si;
return 0;
err_config_si:
kfree(priv->cls_rules);
err_alloc_cls:
enetc_clear_cbdr(&si->hw);
enetc_free_cbdr(priv->dev, &si->cbd_ring);
return err;
}
void enetc_free_si_resources(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
enetc_clear_cbdr(&si->hw);
enetc_free_cbdr(priv->dev, &si->cbd_ring);
kfree(priv->cls_rules);
}
static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
int idx = tx_ring->index;
u32 tbmr;
enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
lower_32_bits(tx_ring->bd_dma_base));
enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
upper_32_bits(tx_ring->bd_dma_base));
WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
ENETC_RTBLENR_LEN(tx_ring->bd_count));
/* clearing PI/CI registers for Tx not supported, adjust sw indexes */
tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
/* enable Tx ints by setting pkt thr to 1 */
enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
tbmr = ENETC_TBMR_EN;
if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tbmr |= ENETC_TBMR_VIH;
/* enable ring */
enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
tx_ring->idr = hw->reg + ENETC_SITXIDR;
}
static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
int idx = rx_ring->index;
u32 rbmr;
enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
lower_32_bits(rx_ring->bd_dma_base));
enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
upper_32_bits(rx_ring->bd_dma_base));
WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
ENETC_RTBLENR_LEN(rx_ring->bd_count));
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
/* enable Rx ints by setting pkt thr to 1 */
enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
rbmr = ENETC_RBMR_EN;
if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rbmr |= ENETC_RBMR_VTE;
rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
/* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}
static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
{
int i;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
}
static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
int idx = rx_ring->index;
/* disable EN bit on ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
}
static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
int delay = 8, timeout = 100;
int idx = tx_ring->index;
/* disable EN bit on ring */
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
/* wait for busy to clear */
while (delay < timeout &&
enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
msleep(delay);
delay *= 2;
}
if (delay >= timeout)
netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
idx);
}
static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
{
int i;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
udelay(1);
}
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
cpumask_t cpu_mask;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
struct enetc_int_vector *v = priv->int_vector[i];
int entry = ENETC_BDR_INT_BASE_IDX + i;
struct enetc_hw *hw = &priv->si->hw;
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
err = request_irq(irq, enetc_msix, 0, v->name, v);
if (err) {
dev_err(priv->dev, "request_irq() failed!\n");
goto irq_err;
}
v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
for (j = 0; j < v->count_tx_rings; j++) {
int idx = v->tx_ring[j].index;
enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
}
cpumask_clear(&cpu_mask);
cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
irq_set_affinity_hint(irq, &cpu_mask);
}
return 0;
irq_err:
while (i--) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
irq_set_affinity_hint(irq, NULL);
free_irq(irq, priv->int_vector[i]);
}
return err;
}
static void enetc_free_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
int i;
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
irq_set_affinity_hint(irq, NULL);
free_irq(irq, priv->int_vector[i]);
}
}
static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
{
int i;
/* enable Tx & Rx event indication */
for (i = 0; i < priv->num_rx_rings; i++) {
enetc_rxbdr_wr(&priv->si->hw, i,
ENETC_RBIER, ENETC_RBIER_RXTIE);
}
for (i = 0; i < priv->num_tx_rings; i++) {
enetc_txbdr_wr(&priv->si->hw, i,
ENETC_TBIER, ENETC_TBIER_TXTIE);
}
}
static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
{
int i;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
for (i = 0; i < priv->num_rx_rings; i++)
enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
}
static void adjust_link(struct net_device *ndev)
{
struct phy_device *phydev = ndev->phydev;
phy_print_status(phydev);
}
static int enetc_phy_connect(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct phy_device *phydev;
if (!priv->phy_node)
return 0; /* phy-less mode */
phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
0, priv->if_mode);
if (!phydev) {
dev_err(&ndev->dev, "could not attach to PHY\n");
return -ENODEV;
}
phy_attached_info(phydev);
return 0;
}
int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int i, err;
err = enetc_setup_irqs(priv);
if (err)
return err;
err = enetc_phy_connect(ndev);
if (err)
goto err_phy_connect;
err = enetc_alloc_tx_resources(priv);
if (err)
goto err_alloc_tx;
err = enetc_alloc_rx_resources(priv);
if (err)
goto err_alloc_rx;
enetc_setup_bdrs(priv);
err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
if (err)
goto err_set_queues;
err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
if (err)
goto err_set_queues;
for (i = 0; i < priv->bdr_int_num; i++)
napi_enable(&priv->int_vector[i]->napi);
enetc_enable_interrupts(priv);
if (ndev->phydev)
phy_start(ndev->phydev);
else
netif_carrier_on(ndev);
netif_tx_start_all_queues(ndev);
return 0;
err_set_queues:
enetc_free_rx_resources(priv);
err_alloc_rx:
enetc_free_tx_resources(priv);
err_alloc_tx:
if (ndev->phydev)
phy_disconnect(ndev->phydev);
err_phy_connect:
enetc_free_irqs(priv);
return err;
}
int enetc_close(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int i;
netif_tx_stop_all_queues(ndev);
if (ndev->phydev) {
phy_stop(ndev->phydev);
phy_disconnect(ndev->phydev);
} else {
netif_carrier_off(ndev);
}
for (i = 0; i < priv->bdr_int_num; i++) {
napi_synchronize(&priv->int_vector[i]->napi);
napi_disable(&priv->int_vector[i]->napi);
}
enetc_disable_interrupts(priv);
enetc_clear_bdrs(priv);
enetc_free_rxtx_rings(priv);
enetc_free_rx_resources(priv);
enetc_free_tx_resources(priv);
enetc_free_irqs(priv);
return 0;
}
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
unsigned long packets = 0, bytes = 0;
int i;
for (i = 0; i < priv->num_rx_rings; i++) {
packets += priv->rx_ring[i]->stats.packets;
bytes += priv->rx_ring[i]->stats.bytes;
}
stats->rx_packets = packets;
stats->rx_bytes = bytes;
bytes = 0;
packets = 0;
for (i = 0; i < priv->num_tx_rings; i++) {
packets += priv->tx_ring[i]->stats.packets;
bytes += priv->tx_ring[i]->stats.bytes;
}
stats->tx_packets = packets;
stats->tx_bytes = bytes;
return stats;
}
static int enetc_set_rss(struct net_device *ndev, int en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
u32 reg;
enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
reg = enetc_rd(hw, ENETC_SIMR);
reg &= ~ENETC_SIMR_RSSE;
reg |= (en) ? ENETC_SIMR_RSSE : 0;
enetc_wr(hw, ENETC_SIMR, reg);
return 0;
}
int enetc_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
return 0;
}
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
int size, v_tx_rings;
int i, n, err, nvec;
nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
/* allocate MSIX for both messaging and Rx/Tx interrupts */
n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
if (n < 0)
return n;
if (n != nvec)
return -EPERM;
/* # of tx rings per int vector */
v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
size = sizeof(struct enetc_int_vector) +
sizeof(struct enetc_bdr) * v_tx_rings;
for (i = 0; i < priv->bdr_int_num; i++) {
struct enetc_int_vector *v;
struct enetc_bdr *bdr;
int j;
v = kzalloc(size, GFP_KERNEL);
if (!v) {
err = -ENOMEM;
goto fail;
}
priv->int_vector[i] = v;
netif_napi_add(priv->ndev, &v->napi, enetc_poll,
NAPI_POLL_WEIGHT);
v->count_tx_rings = v_tx_rings;
for (j = 0; j < v_tx_rings; j++) {
int idx;
/* default tx ring mapping policy */
if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
idx = 2 * j + i; /* 2 CPUs */
else
idx = j + i * v_tx_rings; /* default */
__set_bit(idx, &v->tx_rings_map);
bdr = &v->tx_ring[j];
bdr->index = idx;
bdr->ndev = priv->ndev;
bdr->dev = priv->dev;
bdr->bd_count = priv->tx_bd_count;
priv->tx_ring[idx] = bdr;
}
bdr = &v->rx_ring;
bdr->index = i;
bdr->ndev = priv->ndev;
bdr->dev = priv->dev;
bdr->bd_count = priv->rx_bd_count;
priv->rx_ring[i] = bdr;
}
return 0;
fail:
while (i--) {
netif_napi_del(&priv->int_vector[i]->napi);
kfree(priv->int_vector[i]);
}
pci_free_irq_vectors(pdev);
return err;
}
void enetc_free_msix(struct enetc_ndev_priv *priv)
{
int i;
for (i = 0; i < priv->bdr_int_num; i++) {
struct enetc_int_vector *v = priv->int_vector[i];
netif_napi_del(&v->napi);
}
for (i = 0; i < priv->num_rx_rings; i++)
priv->rx_ring[i] = NULL;
for (i = 0; i < priv->num_tx_rings; i++)
priv->tx_ring[i] = NULL;
for (i = 0; i < priv->bdr_int_num; i++) {
kfree(priv->int_vector[i]);
priv->int_vector[i] = NULL;
}
/* disable all MSIX for this device */
pci_free_irq_vectors(priv->si->pdev);
}
static void enetc_kfree_si(struct enetc_si *si)
{
char *p = (char *)si - si->pad;
kfree(p);
}
static void enetc_detect_errata(struct enetc_si *si)
{
if (si->pdev->revision == ENETC_REV1)
si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
ENETC_ERR_UCMCSWP;
}
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
{
struct enetc_si *si, *p;
struct enetc_hw *hw;
size_t alloc_size;
int err, len;
pcie_flr(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev, "device enable failed\n");
return err;
}
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
"DMA configuration failed: 0x%x\n", err);
goto err_dma;
}
}
err = pci_request_mem_regions(pdev, name);
if (err) {
dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
goto err_pci_mem_reg;
}
pci_set_master(pdev);
alloc_size = sizeof(struct enetc_si);
if (sizeof_priv) {
/* align priv to 32B */
alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
alloc_size += sizeof_priv;
}
/* force 32B alignment for enetc_si */
alloc_size += ENETC_SI_ALIGN - 1;
p = kzalloc(alloc_size, GFP_KERNEL);
if (!p) {
err = -ENOMEM;
goto err_alloc_si;
}
si = PTR_ALIGN(p, ENETC_SI_ALIGN);
si->pad = (char *)si - (char *)p;
pci_set_drvdata(pdev, si);
si->pdev = pdev;
hw = &si->hw;
len = pci_resource_len(pdev, ENETC_BAR_REGS);
hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
if (!hw->reg) {
err = -ENXIO;
dev_err(&pdev->dev, "ioremap() failed\n");
goto err_ioremap;
}
if (len > ENETC_PORT_BASE)
hw->port = hw->reg + ENETC_PORT_BASE;
if (len > ENETC_GLOBAL_BASE)
hw->global = hw->reg + ENETC_GLOBAL_BASE;
enetc_detect_errata(si);
return 0;
err_ioremap:
enetc_kfree_si(si);
err_alloc_si:
pci_release_mem_regions(pdev);
err_pci_mem_reg:
err_dma:
pci_disable_device(pdev);
return err;
}
void enetc_pci_remove(struct pci_dev *pdev)
{
struct enetc_si *si = pci_get_drvdata(pdev);
struct enetc_hw *hw = &si->hw;
iounmap(hw->reg);
enetc_kfree_si(si);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2017-2019 NXP */
#include <linux/timer.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include "enetc_hw.h"
#define ENETC_MAC_MAXFRM_SIZE 9600
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
struct enetc_tx_swbd {
struct sk_buff *skb;
dma_addr_t dma;
u16 len;
u16 is_dma_page;
};
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
struct enetc_rx_swbd {
dma_addr_t dma;
struct page *page;
u16 page_offset;
};
struct enetc_ring_stats {
unsigned int packets;
unsigned int bytes;
unsigned int rx_alloc_errs;
};
#define ENETC_BDR_DEFAULT_SIZE 1024
#define ENETC_DEFAULT_TX_WORK 256
struct enetc_bdr {
struct device *dev; /* for DMA mapping */
struct net_device *ndev;
void *bd_base; /* points to Rx or Tx BD ring */
union {
void __iomem *tpir;
void __iomem *rcir;
};
u16 index;
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;
union {
struct enetc_tx_swbd *tx_swbd;
struct enetc_rx_swbd *rx_swbd;
};
union {
void __iomem *tcir; /* Tx */
int next_to_alloc; /* Rx */
};
void __iomem *idr; /* Interrupt Detect Register pointer */
struct enetc_ring_stats stats;
dma_addr_t bd_dma_base;
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
{
if (unlikely(++*i == bdr->bd_count))
*i = 0;
}
static inline int enetc_bd_unused(struct enetc_bdr *bdr)
{
if (bdr->next_to_clean > bdr->next_to_use)
return bdr->next_to_clean - bdr->next_to_use - 1;
return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
}
/* Control BD ring */
#define ENETC_CBDR_DEFAULT_SIZE 64
struct enetc_cbdr {
void *bd_base; /* points to Rx or Tx BD ring */
void __iomem *pir;
void __iomem *cir;
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;
dma_addr_t bd_dma_base;
};
#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i]))
struct enetc_msg_swbd {
void *vaddr;
dma_addr_t dma;
int size;
};
#define ENETC_REV1 0x1
enum enetc_errata {
ENETC_ERR_TXCSUM = BIT(0),
ENETC_ERR_VLAN_ISOL = BIT(1),
ENETC_ERR_UCMCSWP = BIT(2),
};
/* PCI IEP device data */
struct enetc_si {
struct pci_dev *pdev;
struct enetc_hw hw;
enum enetc_errata errata;
struct net_device *ndev; /* back ref. */
struct enetc_cbdr cbd_ring;
int num_rx_rings; /* how many rings are available in the SI */
int num_tx_rings;
int num_fs_entries;
int num_rss; /* number of RSS buckets */
unsigned short pad;
};
#define ENETC_SI_ALIGN 32
static inline void *enetc_si_priv(const struct enetc_si *si)
{
return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
}
static inline bool enetc_si_is_pf(struct enetc_si *si)
{
return !!(si->hw.port);
}
#define ENETC_MAX_NUM_TXQS 8
#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
struct enetc_int_vector {
void __iomem *rbier;
void __iomem *tbier_base;
unsigned long tx_rings_map;
int count_tx_rings;
struct napi_struct napi;
char name[ENETC_INT_NAME_MAX];
struct enetc_bdr rx_ring ____cacheline_aligned_in_smp;
struct enetc_bdr tx_ring[0];
};
struct enetc_cls_rule {
struct ethtool_rx_flow_spec fs;
int used;
};
#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
struct enetc_ndev_priv {
struct net_device *ndev;
struct device *dev; /* dma-mapping device */
struct enetc_si *si;
int bdr_int_num; /* number of Rx/Tx ring interrupts */
struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
u16 num_rx_rings, num_tx_rings;
u16 rx_bd_count, tx_bd_count;
u16 msg_enable;
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
struct enetc_cls_rule *cls_rules;
struct device_node *phy_node;
phy_interface_t if_mode;
};
/* Messaging */
/* VF-PF set primary MAC address message format */
struct enetc_msg_cmd_set_primary_mac {
struct enetc_msg_cmd_header header;
struct sockaddr mac;
};
#define ENETC_CBD(R, i) (&(((struct enetc_cbd *)((R).bd_base))[i]))
#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
/* SI common */
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
void enetc_pci_remove(struct pci_dev *pdev);
int enetc_alloc_msix(struct enetc_ndev_priv *priv);
void enetc_free_msix(struct enetc_ndev_priv *priv);
void enetc_get_si_caps(struct enetc_si *si);
void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
void enetc_free_si_resources(struct enetc_ndev_priv *priv);
int enetc_open(struct net_device *ndev);
int enetc_close(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
int enetc_set_features(struct net_device *ndev,
netdev_features_t features);
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
/* control buffer descriptor ring (CBDR) */
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index);
void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
#include "enetc.h"
static void enetc_clean_cbdr(struct enetc_si *si)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd *dest_cbd;
int i, status;
i = ring->next_to_clean;
while (enetc_rd_reg(ring->cir) != i) {
dest_cbd = ENETC_CBD(*ring, i);
status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
if (status)
dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
status, dest_cbd->cmd);
memset(dest_cbd, 0, sizeof(*dest_cbd));
i = (i + 1) % ring->bd_count;
}
ring->next_to_clean = i;
}
static int enetc_cbd_unused(struct enetc_cbdr *r)
{
return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
r->bd_count;
}
static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
{
struct enetc_cbdr *ring = &si->cbd_ring;
int timeout = ENETC_CBDR_TIMEOUT;
struct enetc_cbd *dest_cbd;
int i;
if (unlikely(!ring->bd_base))
return -EIO;
if (unlikely(!enetc_cbd_unused(ring)))
enetc_clean_cbdr(si);
i = ring->next_to_use;
dest_cbd = ENETC_CBD(*ring, i);
/* copy command to the ring */
*dest_cbd = *cbd;
i = (i + 1) % ring->bd_count;
ring->next_to_use = i;
/* let H/W know BD ring has been updated */
enetc_wr_reg(ring->pir, i);
do {
if (enetc_rd_reg(ring->cir) == i)
break;
udelay(10); /* cannot sleep, rtnl_lock() */
timeout -= 10;
} while (timeout);
if (!timeout)
return -EBUSY;
enetc_clean_cbdr(si);
return 0;
}
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
{
struct enetc_cbd cbd;
memset(&cbd, 0, sizeof(cbd));
cbd.cls = 1;
cbd.status_flags = ENETC_CBD_FLAGS_SF;
cbd.index = cpu_to_le16(index);
return enetc_send_cmd(si, &cbd);
}
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map)
{
struct enetc_cbd cbd;
u32 upper;
u16 lower;
memset(&cbd, 0, sizeof(cbd));
/* fill up the "set" descriptor */
cbd.cls = 1;
cbd.status_flags = ENETC_CBD_FLAGS_SF;
cbd.index = cpu_to_le16(index);
cbd.opt[3] = cpu_to_le32(si_map);
/* enable entry */
cbd.opt[0] = cpu_to_le32(BIT(31));
upper = *(const u32 *)mac_addr;
lower = *(const u16 *)(mac_addr + 4);
cbd.addr[0] = cpu_to_le32(upper);
cbd.addr[1] = cpu_to_le32(lower);
return enetc_send_cmd(si, &cbd);
}
#define RFSE_ALIGN 64
/* Set entry in RFS table */
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index)
{
struct enetc_cbd cbd = {.cmd = 0};
dma_addr_t dma, dma_align;
void *tmp, *tmp_align;
int err;
/* fill up the "set" descriptor */
cbd.cmd = 0;
cbd.cls = 4;
cbd.index = cpu_to_le16(index);
cbd.length = cpu_to_le16(sizeof(*rfse));
cbd.opt[3] = cpu_to_le32(0); /* SI */
tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
&dma, GFP_KERNEL);
if (!tmp) {
dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
return -ENOMEM;
}
dma_align = ALIGN(dma, RFSE_ALIGN);
tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
memcpy(tmp_align, rfse, sizeof(*rfse));
cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
err = enetc_send_cmd(si, &cbd);
if (err)
dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
tmp, dma);
return err;
}
#define RSSE_ALIGN 64
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
{
struct enetc_cbd cbd = {.cmd = 0};
dma_addr_t dma, dma_align;
u8 *tmp, *tmp_align;
int err, i;
if (count < RSSE_ALIGN)
/* HW only takes in a full 64 entry table */
return -EINVAL;
tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
&dma, GFP_KERNEL);
if (!tmp) {
dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
return -ENOMEM;
}
dma_align = ALIGN(dma, RSSE_ALIGN);
tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
if (!read)
for (i = 0; i < count; i++)
tmp_align[i] = (u8)(table[i]);
/* fill up the descriptor */
cbd.cmd = read ? 2 : 1;
cbd.cls = 3;
cbd.length = cpu_to_le16(count);
cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
err = enetc_send_cmd(si, &cbd);
if (err)
dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
if (read)
for (i = 0; i < count; i++)
table[i] = tmp_align[i];
dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
return err;
}
/* Get RSS table */
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
{
return enetc_cmd_rss_table(si, table, count, true);
}
/* Set RSS table */
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
{
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
#include <linux/net_tstamp.h>
#include <linux/module.h>
#include "enetc.h"
static const u32 enetc_si_regs[] = {
ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR,
ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR,
ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1,
ENETC_SIUEFDCR
};
static const u32 enetc_txbdr_regs[] = {
ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1,
ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER
};
static const u32 enetc_rxbdr_regs[] = {
ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0,
ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICIR0, ENETC_RBIER
};
static const u32 enetc_port_regs[] = {
ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0),
ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1,
ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0),
ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE
};
static int enetc_get_reglen(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int len;
len = ARRAY_SIZE(enetc_si_regs);
len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings;
len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings;
if (hw->port)
len += ARRAY_SIZE(enetc_port_regs);
len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */
return len;
}
static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
void *regbuf)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
u32 *buf = (u32 *)regbuf;
int i, j;
u32 addr;
for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) {
*buf++ = enetc_si_regs[i];
*buf++ = enetc_rd(hw, enetc_si_regs[i]);
}
for (i = 0; i < priv->num_tx_rings; i++) {
for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) {
addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]);
*buf++ = addr;
*buf++ = enetc_rd(hw, addr);
}
}
for (i = 0; i < priv->num_rx_rings; i++) {
for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) {
addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]);
*buf++ = addr;
*buf++ = enetc_rd(hw, addr);
}
}
if (!hw->port)
return;
for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) {
addr = ENETC_PORT_BASE + enetc_port_regs[i];
*buf++ = addr;
*buf++ = enetc_rd(hw, addr);
}
}
static const struct {
int reg;
char name[ETH_GSTRING_LEN];
} enetc_si_counters[] = {
{ ENETC_SIROCT, "SI rx octets" },
{ ENETC_SIRFRM, "SI rx frames" },
{ ENETC_SIRUCA, "SI rx u-cast frames" },
{ ENETC_SIRMCA, "SI rx m-cast frames" },
{ ENETC_SITOCT, "SI tx octets" },
{ ENETC_SITFRM, "SI tx frames" },
{ ENETC_SITUCA, "SI tx u-cast frames" },
{ ENETC_SITMCA, "SI tx m-cast frames" },
{ ENETC_RBDCR(0), "Rx ring 0 discarded frames" },
{ ENETC_RBDCR(1), "Rx ring 1 discarded frames" },
{ ENETC_RBDCR(2), "Rx ring 2 discarded frames" },
{ ENETC_RBDCR(3), "Rx ring 3 discarded frames" },
{ ENETC_RBDCR(4), "Rx ring 4 discarded frames" },
{ ENETC_RBDCR(5), "Rx ring 5 discarded frames" },
{ ENETC_RBDCR(6), "Rx ring 6 discarded frames" },
{ ENETC_RBDCR(7), "Rx ring 7 discarded frames" },
{ ENETC_RBDCR(8), "Rx ring 8 discarded frames" },
{ ENETC_RBDCR(9), "Rx ring 9 discarded frames" },
{ ENETC_RBDCR(10), "Rx ring 10 discarded frames" },
{ ENETC_RBDCR(11), "Rx ring 11 discarded frames" },
{ ENETC_RBDCR(12), "Rx ring 12 discarded frames" },
{ ENETC_RBDCR(13), "Rx ring 13 discarded frames" },
{ ENETC_RBDCR(14), "Rx ring 14 discarded frames" },
{ ENETC_RBDCR(15), "Rx ring 15 discarded frames" },
};
static const struct {
int reg;
char name[ETH_GSTRING_LEN];
} enetc_port_counters[] = {
{ ENETC_PM0_REOCT, "MAC rx ethernet octets" },
{ ENETC_PM0_RALN, "MAC rx alignment errors" },
{ ENETC_PM0_RXPF, "MAC rx valid pause frames" },
{ ENETC_PM0_RFRM, "MAC rx valid frames" },
{ ENETC_PM0_RFCS, "MAC rx fcs errors" },
{ ENETC_PM0_RVLAN, "MAC rx VLAN frames" },
{ ENETC_PM0_RERR, "MAC rx frame errors" },
{ ENETC_PM0_RUCA, "MAC rx unicast frames" },
{ ENETC_PM0_RMCA, "MAC rx multicast frames" },
{ ENETC_PM0_RBCA, "MAC rx broadcast frames" },
{ ENETC_PM0_RDRP, "MAC rx dropped packets" },
{ ENETC_PM0_RPKT, "MAC rx packets" },
{ ENETC_PM0_RUND, "MAC rx undersized packets" },
{ ENETC_PM0_R64, "MAC rx 64 byte packets" },
{ ENETC_PM0_R127, "MAC rx 65-127 byte packets" },
{ ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
{ ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
{ ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
{ ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" },
{ ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
{ ENETC_PM0_ROVR, "MAC rx oversized packets" },
{ ENETC_PM0_RJBR, "MAC rx jabber packets" },
{ ENETC_PM0_RFRG, "MAC rx fragment packets" },
{ ENETC_PM0_RCNP, "MAC rx control packets" },
{ ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
{ ENETC_PM0_TEOCT, "MAC tx ethernet octets" },
{ ENETC_PM0_TOCT, "MAC tx octets" },
{ ENETC_PM0_TCRSE, "MAC tx carrier sense errors" },
{ ENETC_PM0_TXPF, "MAC tx valid pause frames" },
{ ENETC_PM0_TFRM, "MAC tx frames" },
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
{ ENETC_PM0_TERR, "MAC tx frames" },
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
{ ENETC_PM0_TPKT, "MAC tx packets" },
{ ENETC_PM0_TUND, "MAC tx undersized packets" },
{ ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
{ ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
{ ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" },
{ ENETC_PM0_TCNP, "MAC tx control packets" },
{ ENETC_PM0_TDFR, "MAC tx deferred packets" },
{ ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
{ ENETC_PM0_TSCOL, "MAC tx single collisions" },
{ ENETC_PM0_TLCOL, "MAC tx late collisions" },
{ ENETC_PM0_TECOL, "MAC tx excessive collisions" },
{ ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
{ ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
{ ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
{ ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
{ ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
{ ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
{ ENETC_PFDMSAPR, "SI pruning discarded frames" },
{ ENETC_PICDR(0), "ICM DR0 discarded frames" },
{ ENETC_PICDR(1), "ICM DR1 discarded frames" },
{ ENETC_PICDR(2), "ICM DR2 discarded frames" },
{ ENETC_PICDR(3), "ICM DR3 discarded frames" },
};
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
"Rx ring %2d frames",
"Rx ring %2d alloc errors",
};
static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
"Tx ring %2d frames",
};
static int enetc_get_sset_count(struct net_device *ndev, int sset)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
if (sset == ETH_SS_STATS)
return ARRAY_SIZE(enetc_si_counters) +
ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings +
(enetc_si_is_pf(priv->si) ?
ARRAY_SIZE(enetc_port_counters) : 0);
return -EOPNOTSUPP;
}
static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
u8 *p = data;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < priv->num_tx_rings; i++) {
for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) {
snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j],
i);
p += ETH_GSTRING_LEN;
}
}
for (i = 0; i < priv->num_rx_rings; i++) {
for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) {
snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j],
i);
p += ETH_GSTRING_LEN;
}
}
if (!enetc_si_is_pf(priv->si))
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
strlcpy(p, enetc_port_counters[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
break;
}
}
static void enetc_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int i, o = 0;
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg);
for (i = 0; i < priv->num_tx_rings; i++)
data[o++] = priv->tx_ring[i]->stats.packets;
for (i = 0; i < priv->num_rx_rings; i++) {
data[o++] = priv->rx_ring[i]->stats.packets;
data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs;
}
if (!enetc_si_is_pf(priv->si))
return;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
}
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
{
static const u32 rsshash[] = {
[TCP_V4_FLOW] = ENETC_RSSHASH_L4,
[UDP_V4_FLOW] = ENETC_RSSHASH_L4,
[SCTP_V4_FLOW] = ENETC_RSSHASH_L4,
[AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3,
[IPV4_FLOW] = ENETC_RSSHASH_L3,
[TCP_V6_FLOW] = ENETC_RSSHASH_L4,
[UDP_V6_FLOW] = ENETC_RSSHASH_L4,
[SCTP_V6_FLOW] = ENETC_RSSHASH_L4,
[AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3,
[IPV6_FLOW] = ENETC_RSSHASH_L3,
[ETHER_FLOW] = 0,
};
if (rxnfc->flow_type >= ARRAY_SIZE(rsshash))
return -EINVAL;
rxnfc->data = rsshash[rxnfc->flow_type];
return 0;
}
/* current HW spec does byte reversal on everything including MAC addresses */
static void ether_addr_copy_swap(u8 *dst, const u8 *src)
{
int i;
for (i = 0; i < ETH_ALEN; i++)
dst[i] = src[ETH_ALEN - i - 1];
}
static int enetc_set_cls_entry(struct enetc_si *si,
struct ethtool_rx_flow_spec *fs, bool en)
{
struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m;
struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m;
struct ethhdr *eth_h, *eth_m;
struct enetc_cmd_rfse rfse = { {0} };
if (!en)
goto done;
switch (fs->flow_type & 0xff) {
case TCP_V4_FLOW:
l4ip4_h = &fs->h_u.tcp_ip4_spec;
l4ip4_m = &fs->m_u.tcp_ip4_spec;
goto l4ip4;
case UDP_V4_FLOW:
l4ip4_h = &fs->h_u.udp_ip4_spec;
l4ip4_m = &fs->m_u.udp_ip4_spec;
goto l4ip4;
case SCTP_V4_FLOW:
l4ip4_h = &fs->h_u.sctp_ip4_spec;
l4ip4_m = &fs->m_u.sctp_ip4_spec;
l4ip4:
rfse.sip_h[0] = l4ip4_h->ip4src;
rfse.sip_m[0] = l4ip4_m->ip4src;
rfse.dip_h[0] = l4ip4_h->ip4dst;
rfse.dip_m[0] = l4ip4_m->ip4dst;
rfse.sport_h = ntohs(l4ip4_h->psrc);
rfse.sport_m = ntohs(l4ip4_m->psrc);
rfse.dport_h = ntohs(l4ip4_h->pdst);
rfse.dport_m = ntohs(l4ip4_m->pdst);
if (l4ip4_m->tos)
netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
rfse.ethtype_h = ETH_P_IP; /* IPv4 */
rfse.ethtype_m = 0xffff;
break;
case IP_USER_FLOW:
l3ip4_h = &fs->h_u.usr_ip4_spec;
l3ip4_m = &fs->m_u.usr_ip4_spec;
rfse.sip_h[0] = l3ip4_h->ip4src;
rfse.sip_m[0] = l3ip4_m->ip4src;
rfse.dip_h[0] = l3ip4_h->ip4dst;
rfse.dip_m[0] = l3ip4_m->ip4dst;
if (l3ip4_m->tos)
netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
rfse.ethtype_h = ETH_P_IP; /* IPv4 */
rfse.ethtype_m = 0xffff;
break;
case ETHER_FLOW:
eth_h = &fs->h_u.ether_spec;
eth_m = &fs->m_u.ether_spec;
ether_addr_copy_swap(rfse.smac_h, eth_h->h_source);
ether_addr_copy_swap(rfse.smac_m, eth_m->h_source);
ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest);
ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest);
rfse.ethtype_h = ntohs(eth_h->h_proto);
rfse.ethtype_m = ntohs(eth_m->h_proto);
break;
default:
return -EOPNOTSUPP;
}
rfse.mode |= ENETC_RFSE_EN;
if (fs->ring_cookie != RX_CLS_FLOW_DISC) {
rfse.mode |= ENETC_RFSE_MODE_BD;
rfse.result = fs->ring_cookie;
}
done:
return enetc_set_fs_entry(si, &rfse, fs->location);
}
static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
u32 *rule_locs)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int i, j;
switch (rxnfc->cmd) {
case ETHTOOL_GRXRINGS:
rxnfc->data = priv->num_rx_rings;
break;
case ETHTOOL_GRXFH:
/* get RSS hash config */
return enetc_get_rsshash(rxnfc);
case ETHTOOL_GRXCLSRLCNT:
/* total number of entries */
rxnfc->data = priv->si->num_fs_entries;
/* number of entries in use */
rxnfc->rule_cnt = 0;
for (i = 0; i < priv->si->num_fs_entries; i++)
if (priv->cls_rules[i].used)
rxnfc->rule_cnt++;
break;
case ETHTOOL_GRXCLSRULE:
if (rxnfc->fs.location >= priv->si->num_fs_entries)
return -EINVAL;
/* get entry x */
rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
break;
case ETHTOOL_GRXCLSRLALL:
/* total number of entries */
rxnfc->data = priv->si->num_fs_entries;
/* array of indexes of used entries */
j = 0;
for (i = 0; i < priv->si->num_fs_entries; i++) {
if (!priv->cls_rules[i].used)
continue;
if (j == rxnfc->rule_cnt)
return -EMSGSIZE;
rule_locs[j++] = i;
}
/* number of entries in use */
rxnfc->rule_cnt = j;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err;
switch (rxnfc->cmd) {
case ETHTOOL_SRXCLSRLINS:
if (rxnfc->fs.location >= priv->si->num_fs_entries)
return -EINVAL;
if (rxnfc->fs.ring_cookie >= priv->num_rx_rings &&
rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true);
if (err)
return err;
priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs;
priv->cls_rules[rxnfc->fs.location].used = 1;
break;
case ETHTOOL_SRXCLSRLDEL:
if (rxnfc->fs.location >= priv->si->num_fs_entries)
return -EINVAL;
err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false);
if (err)
return err;
priv->cls_rules[rxnfc->fs.location].used = 0;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static u32 enetc_get_rxfh_key_size(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
/* return the size of the RX flow hash key. PF only */
return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0;
}
static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
/* return the size of the RX flow hash indirection table */
return priv->si->num_rss;
}
static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int err = 0, i;
/* return hash function */
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
if (key && hw->port)
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i));
/* return RSS table */
if (indir)
err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss);
return err;
}
void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
{
int i;
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
}
static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int err = 0;
/* set hash key, if PF */
if (key && hw->port)
enetc_set_rss_key(hw, key);
/* set RSS table */
if (indir)
err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss);
return err;
}
static void enetc_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
ring->rx_pending = priv->rx_bd_count;
ring->tx_pending = priv->tx_bd_count;
/* do some h/w sanity checks for BDR length */
if (netif_running(ndev)) {
struct enetc_hw *hw = &priv->si->hw;
u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR);
if (val != priv->rx_bd_count)
netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val);
val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR);
if (val != priv->tx_bd_count)
netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val);
}
}
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_regs_len = enetc_get_reglen,
.get_regs = enetc_get_regs,
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_key_size = enetc_get_rxfh_key_size,
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
.get_ringparam = enetc_get_ringparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
.get_regs_len = enetc_get_reglen,
.get_regs = enetc_get_regs,
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
.get_ringparam = enetc_get_ringparam,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
if (enetc_si_is_pf(priv->si))
ndev->ethtool_ops = &enetc_pf_ethtool_ops;
else
ndev->ethtool_ops = &enetc_vf_ethtool_ops;
}
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2017-2019 NXP */
#include <linux/bitops.h>
/* ENETC device IDs */
#define ENETC_DEV_ID_PF 0xe100
#define ENETC_DEV_ID_VF 0xef00
/* ENETC register block BAR */
#define ENETC_BAR_REGS 0
/** SI regs, offset: 0h */
#define ENETC_SIMR 0
#define ENETC_SIMR_EN BIT(31)
#define ENETC_SIMR_RSSE BIT(0)
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
/* cache attribute registers for transactions initiated by ENETC */
#define ENETC_SICAR0 0x40
#define ENETC_SICAR1 0x44
#define ENETC_SICAR2 0x48
/* rd snoop, no alloc
* wr snoop, no alloc, partial cache line update for BDs and full cache line
* update for data
*/
#define ENETC_SICAR_RD_COHERENT 0x2b2b0000
#define ENETC_SICAR_WR_COHERENT 0x00006727
#define ENETC_SICAR_MSI 0x00300030 /* rd/wr device, no snoop, no alloc */
#define ENETC_SIPMAR0 0x80
#define ENETC_SIPMAR1 0x84
/* VF-PF Message passing */
#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
/* msg size encoding: default and max msg value of 1024B encoded as 0 */
static inline u32 enetc_vsi_set_msize(u32 size)
{
return size < ENETC_DEFAULT_MSG_SIZE ? size >> 5 : 0;
}
#define ENETC_PSIMSGRR 0x204
#define ENETC_PSIMSGRR_MR_MASK GENMASK(2, 1)
#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */
#define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */
#define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8)
#define ENETC_VSIMSGSR 0x204 /* RO */
#define ENETC_VSIMSGSR_MB BIT(0)
#define ENETC_VSIMSGSR_MS BIT(1)
#define ENETC_VSIMSGSNDAR0 0x210
#define ENETC_VSIMSGSNDAR1 0x214
#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16)
#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16)
/* SI statistics */
#define ENETC_SIROCT 0x300
#define ENETC_SIRFRM 0x308
#define ENETC_SIRUCA 0x310
#define ENETC_SIRMCA 0x318
#define ENETC_SITOCT 0x320
#define ENETC_SITFRM 0x328
#define ENETC_SITUCA 0x330
#define ENETC_SITMCA 0x338
#define ENETC_RBDCR(n) (0x8180 + (n) * 0x200)
/* Control BDR regs */
#define ENETC_SICBDRMR 0x800
#define ENETC_SICBDRSR 0x804 /* RO */
#define ENETC_SICBDRBAR0 0x810
#define ENETC_SICBDRBAR1 0x814
#define ENETC_SICBDRPIR 0x818
#define ENETC_SICBDRCIR 0x81c
#define ENETC_SICBDRLENR 0x820
#define ENETC_SICAPR0 0x900
#define ENETC_SICAPR1 0x904
#define ENETC_PSIIER 0xa00
#define ENETC_PSIIER_MR_MASK GENMASK(2, 1)
#define ENETC_PSIIDR 0xa08
#define ENETC_SITXIDR 0xa18
#define ENETC_SIRXIDR 0xa28
#define ENETC_SIMSIVR 0xa30
#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4)
#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4)
#define ENETC_SIUEFDCR 0xe28
#define ENETC_SIRFSCAPR 0x1200
#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f)
#define ENETC_SIRSSCAPR 0x1600
#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
/** SI BDR sub-blocks, n = 0..7 */
enum enetc_bdr_type {TX, RX};
#define ENETC_BDR_OFF(i) ((i) * 0x200)
#define ENETC_BDR(t, i, r) (0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r))
/* RX BDR reg offsets */
#define ENETC_RBMR 0
#define ENETC_RBMR_BDS BIT(2)
#define ENETC_RBMR_VTE BIT(5)
#define ENETC_RBMR_EN BIT(31)
#define ENETC_RBSR 0x4
#define ENETC_RBBSR 0x8
#define ENETC_RBCIR 0xc
#define ENETC_RBBAR0 0x10
#define ENETC_RBBAR1 0x14
#define ENETC_RBPIR 0x18
#define ENETC_RBLENR 0x20
#define ENETC_RBIER 0xa0
#define ENETC_RBIER_RXTIE BIT(0)
#define ENETC_RBIDR 0xa4
#define ENETC_RBICIR0 0xa8
#define ENETC_RBICIR0_ICEN BIT(31)
/* TX BDR reg offsets */
#define ENETC_TBMR 0
#define ENETC_TBSR_BUSY BIT(0)
#define ENETC_TBMR_VIH BIT(9)
#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0)
#define ENETC_TBMR_PRIO_SET(val) val
#define ENETC_TBMR_EN BIT(31)
#define ENETC_TBSR 0x4
#define ENETC_TBBAR0 0x10
#define ENETC_TBBAR1 0x14
#define ENETC_TBPIR 0x18
#define ENETC_TBCIR 0x1c
#define ENETC_TBCIR_IDX_MASK 0xffff
#define ENETC_TBLENR 0x20
#define ENETC_TBIER 0xa0
#define ENETC_TBIER_TXTIE BIT(0)
#define ENETC_TBIDR 0xa4
#define ENETC_TBICIR0 0xa8
#define ENETC_TBICIR0_ICEN BIT(31)
#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7)
/* Port regs, offset: 1_0000h */
#define ENETC_PORT_BASE 0x10000
#define ENETC_PMR 0x0000
#define ENETC_PMR_EN GENMASK(18, 16)
#define ENETC_PSR 0x0004 /* RO */
#define ENETC_PSIPMR 0x0018
#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */
#define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16)
#define ENETC_PSIPVMR 0x001c
#define ENETC_VLAN_PROMISC_MAP_ALL 0x7
#define ENETC_PSIPVMR_SET_VP(simap) ((simap) & 0x7)
#define ENETC_PSIPVMR_SET_VUTA(simap) (((simap) & 0x7) << 16)
#define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */
#define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8)
#define ENETC_PVCLCTR 0x0208
#define ENETC_VLAN_TYPE_C BIT(0)
#define ENETC_VLAN_TYPE_S BIT(1)
#define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */
#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */
#define ENETC_PSIVLAN_EN BIT(31)
#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12)
#define ENETC_PTXMBAR 0x0608
#define ENETC_PCAPR0 0x0900
#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
#define ENETC_PCAPR1 0x0904
#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
#define ENETC_PSICFGR0_SET_RXBDR(val) (((val) & 0xff) << 16)
#define ENETC_PSICFGR0_VTE BIT(12)
#define ENETC_PSICFGR0_SIVIE BIT(14)
#define ENETC_PSICFGR0_ASE BIT(15)
#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */
#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
#define ENETC_PSIVLANFMR 0x1700
#define ENETC_PSIVLANFMR_VS BIT(0)
#define ENETC_PRFSMR 0x1800
#define ENETC_PRFSMR_RFSE BIT(31)
#define ENETC_PRFSCAPR 0x1804
#define ENETC_PRFSCAPR_GET_NUM_RFS(val) ((((val) & 0xf) + 1) * 16)
#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */
#define ENETC_PFPMR 0x1900
#define ENETC_PFPMR_PMACE BIT(1)
#define ENETC_PFPMR_MWLM BIT(0)
#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
#define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10)
#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
#define ENETC_MMCSR 0x1f00
#define ENETC_MMCSR_ME BIT(16)
#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
#define ENETC_PM0_CMD_CFG 0x8008
#define ENETC_PM1_CMD_CFG 0x9008
#define ENETC_PM0_TX_EN BIT(0)
#define ENETC_PM0_RX_EN BIT(1)
#define ENETC_PM0_PROMISC BIT(4)
#define ENETC_PM0_CMD_XGLP BIT(10)
#define ENETC_PM0_CMD_TXP BIT(11)
#define ENETC_PM0_CMD_PHY_TX_EN BIT(15)
#define ENETC_PM0_CMD_SFD BIT(21)
#define ENETC_PM0_MAXFRM 0x8014
#define ENETC_SET_TX_MTU(val) ((val) << 16)
#define ENETC_SET_MAXFRM(val) ((val) & 0xffff)
#define ENETC_PM0_IF_MODE 0x8300
#define ENETC_PMO_IFM_RG BIT(2)
#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
#define ENETC_PM0_IFM_XGMII BIT(12)
/* MAC counters */
#define ENETC_PM0_REOCT 0x8100
#define ENETC_PM0_RALN 0x8110
#define ENETC_PM0_RXPF 0x8118
#define ENETC_PM0_RFRM 0x8120
#define ENETC_PM0_RFCS 0x8128
#define ENETC_PM0_RVLAN 0x8130
#define ENETC_PM0_RERR 0x8138
#define ENETC_PM0_RUCA 0x8140
#define ENETC_PM0_RMCA 0x8148
#define ENETC_PM0_RBCA 0x8150
#define ENETC_PM0_RDRP 0x8158
#define ENETC_PM0_RPKT 0x8160
#define ENETC_PM0_RUND 0x8168
#define ENETC_PM0_R64 0x8170
#define ENETC_PM0_R127 0x8178
#define ENETC_PM0_R255 0x8180
#define ENETC_PM0_R511 0x8188
#define ENETC_PM0_R1023 0x8190
#define ENETC_PM0_R1518 0x8198
#define ENETC_PM0_R1519X 0x81A0
#define ENETC_PM0_ROVR 0x81A8
#define ENETC_PM0_RJBR 0x81B0
#define ENETC_PM0_RFRG 0x81B8
#define ENETC_PM0_RCNP 0x81C0
#define ENETC_PM0_RDRNTP 0x81C8
#define ENETC_PM0_TEOCT 0x8200
#define ENETC_PM0_TOCT 0x8208
#define ENETC_PM0_TCRSE 0x8210
#define ENETC_PM0_TXPF 0x8218
#define ENETC_PM0_TFRM 0x8220
#define ENETC_PM0_TFCS 0x8228
#define ENETC_PM0_TVLAN 0x8230
#define ENETC_PM0_TERR 0x8238
#define ENETC_PM0_TUCA 0x8240
#define ENETC_PM0_TMCA 0x8248
#define ENETC_PM0_TBCA 0x8250
#define ENETC_PM0_TPKT 0x8260
#define ENETC_PM0_TUND 0x8268
#define ENETC_PM0_T127 0x8278
#define ENETC_PM0_T1023 0x8290
#define ENETC_PM0_T1518 0x8298
#define ENETC_PM0_TCNP 0x82C0
#define ENETC_PM0_TDFR 0x82D0
#define ENETC_PM0_TMCOL 0x82D8
#define ENETC_PM0_TSCOL 0x82E0
#define ENETC_PM0_TLCOL 0x82E8
#define ENETC_PM0_TECOL 0x82F0
/* Port counters */
#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
#define ENETC_PBFDSIR 0x0810
#define ENETC_PFDMSAPR 0x0814
#define ENETC_UFDMF 0x1680
#define ENETC_MFDMF 0x1684
#define ENETC_PUFDVFR 0x1780
#define ENETC_PMFDVFR 0x1784
#define ENETC_PBFDVFR 0x1788
/** Global regs, offset: 2_0000h */
#define ENETC_GLOBAL_BASE 0x20000
#define ENETC_G_EIPBRR0 0x0bf8
#define ENETC_G_EIPBRR1 0x0bfc
#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n))
#define ENETC_G_EPFBLPR1_XGMII 0x80000000
/* PCI device info */
struct enetc_hw {
/* SI registers, used by all PCI functions */
void __iomem *reg;
/* Port registers, PF only */
void __iomem *port;
/* IP global registers, PF only */
void __iomem *global;
};
/* general register accessors */
#define enetc_rd_reg(reg) ioread32((reg))
#define enetc_wr_reg(reg, val) iowrite32((val), (reg))
#ifdef ioread64
#define enetc_rd_reg64(reg) ioread64((reg))
#else
/* using this to read out stats on 32b systems */
static inline u64 enetc_rd_reg64(void __iomem *reg)
{
u32 low, high, tmp;
do {
high = ioread32(reg + 4);
low = ioread32(reg);
tmp = ioread32(reg + 4);
} while (high != tmp);
return le64_to_cpu((__le64)high << 32 | low);
}
#endif
#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off))
#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val)
#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off))
/* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
/* global register accessors - PF only */
#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off))
#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val)
/* BDR register accessors, see ENETC_BDR() */
#define enetc_bdr_rd(hw, t, n, off) \
enetc_rd(hw, ENETC_BDR(t, n, off))
#define enetc_bdr_wr(hw, t, n, off, val) \
enetc_wr(hw, ENETC_BDR(t, n, off), val)
#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off)
#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off)
#define enetc_txbdr_wr(hw, n, off, val) \
enetc_bdr_wr(hw, TX, n, off, val)
#define enetc_rxbdr_wr(hw, n, off, val) \
enetc_bdr_wr(hw, RX, n, off, val)
/* Buffer Descriptors (BD) */
union enetc_tx_bd {
struct {
__le64 addr;
__le16 buf_len;
__le16 frm_len;
union {
struct {
__le16 l3_csoff;
u8 l4_csoff;
u8 flags;
}; /* default layout */
__le32 lstatus;
};
};
struct {
__le32 tstamp;
__le16 tpid;
__le16 vid;
u8 reserved[6];
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
};
#define ENETC_TXBD_FLAGS_L4CS BIT(0)
#define ENETC_TXBD_FLAGS_W BIT(2)
#define ENETC_TXBD_FLAGS_CSUM BIT(3)
#define ENETC_TXBD_FLAGS_EX BIT(6)
#define ENETC_TXBD_FLAGS_F BIT(7)
static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
{
memset(txbd, 0, sizeof(*txbd));
}
/* L3 csum flags */
#define ENETC_TXBD_L3_IPCS BIT(7)
#define ENETC_TXBD_L3_IPV6 BIT(15)
#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0)
#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8)
/* Extension flags */
#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags)
{
return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) |
(start & ENETC_TXBD_L3_START_MASK));
}
/* L4 csum flags */
#define ENETC_TXBD_L4_UDP BIT(5)
#define ENETC_TXBD_L4_TCP BIT(6)
union enetc_rx_bd {
struct {
__le64 addr;
u8 reserved[8];
} w;
struct {
__le16 inet_csum;
__le16 parse_summary;
__le32 rss_hash;
__le16 buf_len;
__le16 vlan_opt;
union {
struct {
__le16 flags;
__le16 error;
};
__le32 lstatus;
};
} r;
};
#define ENETC_RXBD_LSTATUS_R BIT(30)
#define ENETC_RXBD_LSTATUS_F BIT(31)
#define ENETC_RXBD_ERR_MASK 0xff
#define ENETC_RXBD_LSTATUS(flags) ((flags) << 16)
#define ENETC_RXBD_FLAG_VLAN BIT(9)
#define ENETC_RXBD_FLAG_TSTMP BIT(10)
#define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */
#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */
#define ENETC_MAX_NUM_VFS 2
struct enetc_cbd {
union {
struct {
__le32 addr[2];
__le32 opt[4];
};
__le32 data[6];
};
__le16 index;
__le16 length;
u8 cmd;
u8 cls;
u8 _res;
u8 status_flags;
};
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
struct enetc_cmd_rfse {
u8 smac_h[6];
u8 smac_m[6];
u8 dmac_h[6];
u8 dmac_m[6];
u32 sip_h[4];
u32 sip_m[4];
u32 dip_h[4];
u32 dip_m[4];
u16 ethtype_h;
u16 ethtype_m;
u16 ethtype4_h;
u16 ethtype4_m;
u16 sport_h;
u16 sport_m;
u16 dport_h;
u16 dport_m;
u16 vlan_h;
u16 vlan_m;
u8 proto_h;
u8 proto_m;
u16 flags;
u16 result;
u16 mode;
};
#define ENETC_RFSE_EN BIT(15)
#define ENETC_RFSE_MODE_BD 2
static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
{
*(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
*(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
}
#define ENETC_SI_INT_IDX 0
/* base index for Rx/Tx interrupts */
#define ENETC_BDR_INT_BASE_IDX 1
/* Messaging */
/* Command completion status */
enum enetc_msg_cmd_status {
ENETC_MSG_CMD_STATUS_OK,
ENETC_MSG_CMD_STATUS_FAIL
};
/* VSI-PSI command message types */
enum enetc_msg_cmd_type {
ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */
ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */
ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */
};
/* VSI-PSI command action types */
enum enetc_msg_cmd_action_type {
ENETC_MSG_CMD_MNG_ADD = 1,
ENETC_MSG_CMD_MNG_REMOVE
};
/* PSI-VSI command header format */
struct enetc_msg_cmd_header {
u16 type; /* command class type */
u16 id; /* denotes the specific required action */
};
/* Common H/W utility functions */
static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
bool en)
{
u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
}
static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
bool en)
{
u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
}
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
#include "enetc_pf.h"
static void enetc_msg_disable_mr_int(struct enetc_hw *hw)
{
u32 psiier = enetc_rd(hw, ENETC_PSIIER);
/* disable MR int source(s) */
enetc_wr(hw, ENETC_PSIIER, psiier & ~ENETC_PSIIER_MR_MASK);
}
static void enetc_msg_enable_mr_int(struct enetc_hw *hw)
{
u32 psiier = enetc_rd(hw, ENETC_PSIIER);
enetc_wr(hw, ENETC_PSIIER, psiier | ENETC_PSIIER_MR_MASK);
}
static irqreturn_t enetc_msg_psi_msix(int irq, void *data)
{
struct enetc_si *si = (struct enetc_si *)data;
struct enetc_pf *pf = enetc_si_priv(si);
enetc_msg_disable_mr_int(&si->hw);
schedule_work(&pf->msg_task);
return IRQ_HANDLED;
}
static void enetc_msg_task(struct work_struct *work)
{
struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task);
struct enetc_hw *hw = &pf->si->hw;
unsigned long mr_mask;
int i;
for (;;) {
mr_mask = enetc_rd(hw, ENETC_PSIMSGRR) & ENETC_PSIMSGRR_MR_MASK;
if (!mr_mask) {
/* re-arm MR interrupts, w1c the IDR reg */
enetc_wr(hw, ENETC_PSIIDR, ENETC_PSIIER_MR_MASK);
enetc_msg_enable_mr_int(hw);
return;
}
for (i = 0; i < pf->num_vfs; i++) {
u32 psimsgrr;
u16 msg_code;
if (!(ENETC_PSIMSGRR_MR(i) & mr_mask))
continue;
enetc_msg_handle_rxmsg(pf, i, &msg_code);
psimsgrr = ENETC_SIMSGSR_SET_MC(msg_code);
psimsgrr |= ENETC_PSIMSGRR_MR(i); /* w1c */
enetc_wr(hw, ENETC_PSIMSGRR, psimsgrr);
}
}
}
/* Init */
static int enetc_msg_alloc_mbx(struct enetc_si *si, int idx)
{
struct enetc_pf *pf = enetc_si_priv(si);
struct device *dev = &si->pdev->dev;
struct enetc_hw *hw = &si->hw;
struct enetc_msg_swbd *msg;
u32 val;
msg = &pf->rxmsg[idx];
/* allocate and set receive buffer */
msg->size = ENETC_DEFAULT_MSG_SIZE;
msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma,
GFP_KERNEL);
if (!msg->vaddr) {
dev_err(dev, "msg: fail to alloc dma buffer of size: %d\n",
msg->size);
return -ENOMEM;
}
/* set multiple of 32 bytes */
val = lower_32_bits(msg->dma);
enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), val);
val = upper_32_bits(msg->dma);
enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), val);
return 0;
}
static void enetc_msg_free_mbx(struct enetc_si *si, int idx)
{
struct enetc_pf *pf = enetc_si_priv(si);
struct enetc_hw *hw = &si->hw;
struct enetc_msg_swbd *msg;
msg = &pf->rxmsg[idx];
dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma);
memset(msg, 0, sizeof(*msg));
enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), 0);
enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), 0);
}
int enetc_msg_psi_init(struct enetc_pf *pf)
{
struct enetc_si *si = pf->si;
int vector, i, err;
/* register message passing interrupt handler */
snprintf(pf->msg_int_name, sizeof(pf->msg_int_name), "%s-vfmsg",
si->ndev->name);
vector = pci_irq_vector(si->pdev, ENETC_SI_INT_IDX);
err = request_irq(vector, enetc_msg_psi_msix, 0, pf->msg_int_name, si);
if (err) {
dev_err(&si->pdev->dev,
"PSI messaging: request_irq() failed!\n");
return err;
}
/* set one IRQ entry for PSI message receive notification (SI int) */
enetc_wr(&si->hw, ENETC_SIMSIVR, ENETC_SI_INT_IDX);
/* initialize PSI mailbox */
INIT_WORK(&pf->msg_task, enetc_msg_task);
for (i = 0; i < pf->num_vfs; i++) {
err = enetc_msg_alloc_mbx(si, i);
if (err)
goto err_init_mbx;
}
/* enable MR interrupts */
enetc_msg_enable_mr_int(&si->hw);
return 0;
err_init_mbx:
for (i--; i >= 0; i--)
enetc_msg_free_mbx(si, i);
free_irq(vector, si);
return err;
}
void enetc_msg_psi_free(struct enetc_pf *pf)
{
struct enetc_si *si = pf->si;
int i;
cancel_work_sync(&pf->msg_task);
/* disable MR interrupts */
enetc_msg_disable_mr_int(&si->hw);
for (i = 0; i < pf->num_vfs; i++)
enetc_msg_free_mbx(si, i);
/* de-register message passing interrupt handler */
free_irq(pci_irq_vector(si->pdev, ENETC_SI_INT_IDX), si);
}
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "enetc_pf.h"
#define ENETC_DRV_VER_MAJ 1
#define ENETC_DRV_VER_MIN 0
#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
__stringify(ENETC_DRV_VER_MIN)
static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
#define ENETC_DRV_NAME_STR "ENETC PF driver"
static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
{
u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si));
u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si));
*(u32 *)addr = upper;
*(u16 *)(addr + 4) = lower;
}
static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
const u8 *addr)
{
u32 upper = *(const u32 *)addr;
u16 lower = *(const u16 *)(addr + 4);
__raw_writel(upper, hw->port + ENETC_PSIPMAR0(si));
__raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
}
static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct sockaddr *saddr = addr;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len);
enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
return 0;
}
static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
{
u32 val = enetc_port_rd(hw, ENETC_PSIPVMR);
val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL);
enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val);
}
static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx)
{
return pf->vlan_promisc_simap & BIT(si_idx);
}
static bool enetc_vlan_filter_is_on(struct enetc_pf *pf)
{
int i;
for_each_set_bit(i, pf->active_vlans, VLAN_N_VID)
return true;
return false;
}
static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
{
pf->vlan_promisc_simap |= BIT(si_idx);
enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
}
static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
{
pf->vlan_promisc_simap &= ~BIT(si_idx);
enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
}
static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
{
u32 val = 0;
if (vlan)
val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan;
enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
}
static int enetc_mac_addr_hash_idx(const u8 *addr)
{
u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
u64 mask = 0;
int res = 0;
int i;
for (i = 0; i < 8; i++)
mask |= BIT_ULL(i * 6);
for (i = 0; i < 6; i++)
res |= (hweight64(fold & (mask << i)) & 0x1) << i;
return res;
}
static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
{
filter->mac_addr_cnt = 0;
bitmap_zero(filter->mac_hash_table,
ENETC_MADDR_HASH_TBL_SZ);
}
static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
const unsigned char *addr)
{
/* add exact match addr */
ether_addr_copy(filter->mac_addr, addr);
filter->mac_addr_cnt++;
}
static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
const unsigned char *addr)
{
int idx = enetc_mac_addr_hash_idx(addr);
/* add hash table entry */
__set_bit(idx, filter->mac_hash_table);
filter->mac_addr_cnt++;
}
static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
if (type == UC) {
enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0);
enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0);
} else { /* MC */
enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0);
enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0);
}
}
static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type,
u32 *hash)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
if (type == UC) {
enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash);
enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1));
} else { /* MC */
enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash);
enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1));
}
}
static void enetc_sync_mac_filters(struct enetc_pf *pf)
{
struct enetc_mac_filter *f = pf->mac_filter;
struct enetc_si *si = pf->si;
int i, pos;
pos = EMETC_MAC_ADDR_FILT_RES;
for (i = 0; i < MADDR_TYPE; i++, f++) {
bool em = (f->mac_addr_cnt == 1) && (i == UC);
bool clear = !f->mac_addr_cnt;
if (clear) {
if (i == UC)
enetc_clear_mac_flt_entry(si, pos);
enetc_clear_mac_ht_flt(si, 0, i);
continue;
}
/* exact match filter */
if (em) {
int err;
enetc_clear_mac_ht_flt(si, 0, UC);
err = enetc_set_mac_flt_entry(si, pos, f->mac_addr,
BIT(0));
if (!err)
continue;
/* fallback to HT filtering */
dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n",
err);
}
/* hash table filter, clear EM filter for UC entries */
if (i == UC)
enetc_clear_mac_flt_entry(si, pos);
enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table);
}
}
static void enetc_pf_set_rx_mode(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_pf *pf = enetc_si_priv(priv->si);
struct enetc_hw *hw = &priv->si->hw;
bool uprom = false, mprom = false;
struct enetc_mac_filter *filter;
struct netdev_hw_addr *ha;
u32 psipmr = 0;
bool em;
if (ndev->flags & IFF_PROMISC) {
/* enable promisc mode for SI0 (PF) */
psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
uprom = true;
mprom = true;
/* enable VLAN promisc mode for SI0 */
if (!enetc_si_vlan_promisc_is_on(pf, 0))
enetc_enable_si_vlan_promisc(pf, 0);
} else if (ndev->flags & IFF_ALLMULTI) {
/* enable multi cast promisc mode for SI0 (PF) */
psipmr = ENETC_PSIPMR_SET_MP(0);
mprom = true;
}
/* first 2 filter entries belong to PF */
if (!uprom) {
/* Update unicast filters */
filter = &pf->mac_filter[UC];
enetc_reset_mac_addr_filter(filter);
em = (netdev_uc_count(ndev) == 1);
netdev_for_each_uc_addr(ha, ndev) {
if (em) {
enetc_add_mac_addr_em_filter(filter, ha->addr);
break;
}
enetc_add_mac_addr_ht_filter(filter, ha->addr);
}
}
if (!mprom) {
/* Update multicast filters */
filter = &pf->mac_filter[MC];
enetc_reset_mac_addr_filter(filter);
netdev_for_each_mc_addr(ha, ndev) {
if (!is_multicast_ether_addr(ha->addr))
continue;
enetc_add_mac_addr_ht_filter(filter, ha->addr);
}
}
if (!uprom || !mprom)
/* update PF entries */
enetc_sync_mac_filters(pf);
psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) &
~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0));
enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
}
static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
u32 *hash)
{
enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash);
enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1));
}
static int enetc_vid_hash_idx(unsigned int vid)
{
int res = 0;
int i;
for (i = 0; i < 6; i++)
res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
return res;
}
static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
{
int i;
if (rehash) {
bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
int hidx = enetc_vid_hash_idx(i);
__set_bit(hidx, pf->vlan_ht_filter);
}
}
enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter);
}
static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_pf *pf = enetc_si_priv(priv->si);
int idx;
if (enetc_si_vlan_promisc_is_on(pf, 0))
enetc_disable_si_vlan_promisc(pf, 0);
__set_bit(vid, pf->active_vlans);
idx = enetc_vid_hash_idx(vid);
if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
enetc_sync_vlan_ht_filter(pf, false);
return 0;
}
static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_pf *pf = enetc_si_priv(priv->si);
__clear_bit(vid, pf->active_vlans);
enetc_sync_vlan_ht_filter(pf, true);
if (!enetc_vlan_filter_is_on(pf))
enetc_enable_si_vlan_promisc(pf, 0);
return 0;
}
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
u32 reg;
reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
if (reg & ENETC_PMO_IFM_RG) {
/* RGMII mode */
reg = (reg & ~ENETC_PM0_IFM_RLP) |
(en ? ENETC_PM0_IFM_RLP : 0);
enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
} else {
/* assume SGMII mode */
reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
reg = (reg & ~ENETC_PM0_CMD_XGLP) |
(en ? ENETC_PM0_CMD_XGLP : 0);
reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
(en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
}
}
static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_pf *pf = enetc_si_priv(priv->si);
struct enetc_vf_state *vf_state;
if (vf >= pf->total_vfs)
return -EINVAL;
if (!is_valid_ether_addr(mac))
return -EADDRNOTAVAIL;
vf_state = &pf->vf_state[vf];
vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC;
enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac);
return 0;
}
static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan,
u8 qos, __be16 proto)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_pf *pf = enetc_si_priv(priv->si);
if (priv->si->errata & ENETC_ERR_VLAN_ISOL)
return -EOPNOTSUPP;
if (vf >= pf->total_vfs)
return -EINVAL;
if (proto != htons(ETH_P_8021Q))
/* only C-tags supported for now */
return -EPROTONOSUPPORT;
enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos);
return 0;
}
static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_pf *pf = enetc_si_priv(priv->si);
u32 cfgr;
if (vf >= pf->total_vfs)
return -EINVAL;
cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1));
cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0);
enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr);
return 0;
}
static void enetc_port_setup_primary_mac_address(struct enetc_si *si)
{
unsigned char mac_addr[MAX_ADDR_LEN];
struct enetc_pf *pf = enetc_si_priv(si);
struct enetc_hw *hw = &si->hw;
int i;
/* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */
for (i = 0; i < pf->total_vfs + 1; i++) {
enetc_pf_get_primary_mac_addr(hw, i, mac_addr);
if (!is_zero_ether_addr(mac_addr))
continue;
eth_random_addr(mac_addr);
dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n",
i, mac_addr);
enetc_pf_set_primary_mac_addr(hw, i, mac_addr);
}
}
static void enetc_port_assign_rfs_entries(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
struct enetc_hw *hw = &si->hw;
int num_entries, vf_entries, i;
u32 val;
/* split RFS entries between functions */
val = enetc_port_rd(hw, ENETC_PRFSCAPR);
num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val);
vf_entries = num_entries / (pf->total_vfs + 1);
for (i = 0; i < pf->total_vfs; i++)
enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries);
enetc_port_wr(hw, ENETC_PSIRFSCFGR(0),
num_entries - vf_entries * pf->total_vfs);
/* enable RFS on port */
enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
}
static void enetc_port_si_configure(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
struct enetc_hw *hw = &si->hw;
int num_rings, i;
u32 val;
val = enetc_port_rd(hw, ENETC_PCAPR0);
num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS);
val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS);
if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) {
val = ENETC_PSICFGR0_SET_TXBDR(num_rings);
val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n",
num_rings, ENETC_PF_NUM_RINGS);
num_rings = 0;
}
/* Add default one-time settings for SI0 (PF) */
val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
enetc_port_wr(hw, ENETC_PSICFGR0(0), val);
if (num_rings)
num_rings -= ENETC_PF_NUM_RINGS;
/* Configure the SIs for each available VF */
val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
if (num_rings) {
num_rings /= pf->total_vfs;
val |= ENETC_PSICFGR0_SET_TXBDR(num_rings);
val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
}
for (i = 0; i < pf->total_vfs; i++)
enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val);
/* Port level VLAN settings */
val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
enetc_port_wr(hw, ENETC_PVCLCTR, val);
/* use outer tag for VLAN filtering */
enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
}
static void enetc_configure_port_mac(struct enetc_hw *hw)
{
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
/* set auto-speed for RGMII */
if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG)
enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
if (enetc_global_rd(hw, ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII)
enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
}
static void enetc_configure_port_pmac(struct enetc_hw *hw)
{
u32 temp;
/* Set pMAC step lock */
temp = enetc_port_rd(hw, ENETC_PFPMR);
enetc_port_wr(hw, ENETC_PFPMR,
temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
temp = enetc_port_rd(hw, ENETC_MMCSR);
enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
}
static void enetc_configure_port(struct enetc_pf *pf)
{
u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
enetc_configure_port_pmac(hw);
enetc_configure_port_mac(hw);
enetc_port_si_configure(pf->si);
/* set up hash key */
get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
enetc_set_rss_key(hw, hash_key);
/* split up RFS entries */
enetc_port_assign_rfs_entries(pf->si);
/* fix-up primary MAC addresses, if not set already */
enetc_port_setup_primary_mac_address(pf->si);
/* enforce VLAN promisc mode for all SIs */
pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL;
enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap);
enetc_port_wr(hw, ENETC_PSIPMR, 0);
/* enable port */
enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN);
}
/* Messaging */
static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf,
int vf_id)
{
struct enetc_vf_state *vf_state = &pf->vf_state[vf_id];
struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
struct enetc_msg_cmd_set_primary_mac *cmd;
struct device *dev = &pf->si->pdev->dev;
u16 cmd_id;
char *addr;
cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr;
cmd_id = cmd->header.id;
if (cmd_id != ENETC_MSG_CMD_MNG_ADD)
return ENETC_MSG_CMD_STATUS_FAIL;
addr = cmd->mac.sa_data;
if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC)
dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n",
vf_id);
else
enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr);
return ENETC_MSG_CMD_STATUS_OK;
}
void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status)
{
struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
struct device *dev = &pf->si->pdev->dev;
struct enetc_msg_cmd_header *cmd_hdr;
u16 cmd_type;
*status = ENETC_MSG_CMD_STATUS_OK;
cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr;
cmd_type = cmd_hdr->type;
switch (cmd_type) {
case ENETC_MSG_CMD_MNG_MAC:
*status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id);
break;
default:
dev_err(dev, "command not supported (cmd_type: 0x%x)\n",
cmd_type);
}
}
#ifdef CONFIG_PCI_IOV
static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct enetc_si *si = pci_get_drvdata(pdev);
struct enetc_pf *pf = enetc_si_priv(si);
int err;
if (!num_vfs) {
enetc_msg_psi_free(pf);
kfree(pf->vf_state);
pf->num_vfs = 0;
pci_disable_sriov(pdev);
} else {
pf->num_vfs = num_vfs;
pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
GFP_KERNEL);
if (!pf->vf_state) {
pf->num_vfs = 0;
return -ENOMEM;
}
err = enetc_msg_psi_init(pf);
if (err) {
dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
goto err_msg_psi;
}
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err);
goto err_en_sriov;
}
}
return num_vfs;
err_en_sriov:
enetc_msg_psi_free(pf);
err_msg_psi:
kfree(pf->vf_state);
pf->num_vfs = 0;
return err;
}
#else
#define enetc_sriov_configure(pdev, num_vfs) (void)0
#endif
static int enetc_pf_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
enetc_enable_rxvlan(&priv->si->hw, 0,
!!(features & NETIF_F_HW_VLAN_CTAG_RX));
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(&priv->si->hw, 0,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
return enetc_set_features(ndev, features);
}
static const struct net_device_ops enetc_ndev_ops = {
.ndo_open = enetc_open,
.ndo_stop = enetc_close,
.ndo_start_xmit = enetc_xmit,
.ndo_get_stats = enetc_get_stats,
.ndo_set_mac_address = enetc_pf_set_mac_addr,
.ndo_set_rx_mode = enetc_pf_set_rx_mode,
.ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
.ndo_set_vf_mac = enetc_pf_set_vf_mac,
.ndo_set_vf_vlan = enetc_pf_set_vf_vlan,
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
};
static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
const struct net_device_ops *ndev_ops)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &si->pdev->dev);
priv->ndev = ndev;
priv->si = si;
priv->dev = &si->pdev->dev;
si->ndev = ndev;
priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
ndev->max_mtu = ENETC_MAX_MTU;
ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_LOOPBACK;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
if (si->errata & ENETC_ERR_TXCSUM) {
ndev->hw_features &= ~NETIF_F_HW_CSUM;
ndev->features &= ~NETIF_F_HW_CSUM;
}
ndev->priv_flags |= IFF_UNICAST_FLT;
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
{
struct device_node *np = priv->dev->of_node;
int err;
if (!np) {
dev_err(priv->dev, "missing ENETC port node\n");
return -ENODEV;
}
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!priv->phy_node) {
if (!of_phy_is_fixed_link(np)) {
dev_err(priv->dev, "PHY not specified\n");
return -ENODEV;
}
err = of_phy_register_fixed_link(np);
if (err < 0) {
dev_err(priv->dev, "fixed link registration failed\n");
return err;
}
priv->phy_node = of_node_get(np);
}
priv->if_mode = of_get_phy_mode(np);
if (priv->if_mode < 0) {
dev_err(priv->dev, "missing phy type\n");
of_node_put(priv->phy_node);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
return -EINVAL;
}
return 0;
}
static void enetc_of_put_phy(struct enetc_ndev_priv *priv)
{
struct device_node *np = priv->dev->of_node;
if (np && of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
if (priv->phy_node)
of_node_put(priv->phy_node);
}
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct enetc_ndev_priv *priv;
struct net_device *ndev;
struct enetc_si *si;
struct enetc_pf *pf;
int err;
if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
dev_info(&pdev->dev, "device is disabled, skipping\n");
return -ENODEV;
}
err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
if (err) {
dev_err(&pdev->dev, "PCI probing failed\n");
return err;
}
si = pci_get_drvdata(pdev);
if (!si->hw.port || !si->hw.global) {
err = -ENODEV;
dev_err(&pdev->dev, "could not map PF space, probing a VF?\n");
goto err_map_pf_space;
}
pf = enetc_si_priv(si);
pf->si = si;
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
enetc_configure_port(pf);
enetc_get_si_caps(si);
ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
if (!ndev) {
err = -ENOMEM;
dev_err(&pdev->dev, "netdev creation failed\n");
goto err_alloc_netdev;
}
enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops);
priv = netdev_priv(ndev);
enetc_init_si_rings_params(priv);
err = enetc_alloc_si_resources(priv);
if (err) {
dev_err(&pdev->dev, "SI resource alloc failed\n");
goto err_alloc_si_res;
}
err = enetc_alloc_msix(priv);
if (err) {
dev_err(&pdev->dev, "MSIX alloc failed\n");
goto err_alloc_msix;
}
err = enetc_of_get_phy(priv);
if (err)
dev_warn(&pdev->dev, "Fallback to PHY-less operation\n");
err = register_netdev(ndev);
if (err)
goto err_reg_netdev;
netif_carrier_off(ndev);
netif_info(priv, probe, ndev, "%s v%s\n",
enetc_drv_name, enetc_drv_ver);
return 0;
err_reg_netdev:
enetc_of_put_phy(priv);
enetc_free_msix(priv);
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
err_map_pf_space:
enetc_pci_remove(pdev);
return err;
}
static void enetc_pf_remove(struct pci_dev *pdev)
{
struct enetc_si *si = pci_get_drvdata(pdev);
struct enetc_pf *pf = enetc_si_priv(si);
struct enetc_ndev_priv *priv;
if (pf->num_vfs)
enetc_sriov_configure(pdev, 0);
priv = netdev_priv(si->ndev);
netif_info(priv, drv, si->ndev, "%s v%s remove\n",
enetc_drv_name, enetc_drv_ver);
unregister_netdev(si->ndev);
enetc_of_put_phy(priv);
enetc_free_msix(priv);
enetc_free_si_resources(priv);
free_netdev(si->ndev);
enetc_pci_remove(pdev);
}
static const struct pci_device_id enetc_pf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) },
{ 0, } /* End of table. */
};
MODULE_DEVICE_TABLE(pci, enetc_pf_id_table);
static struct pci_driver enetc_pf_driver = {
.name = KBUILD_MODNAME,
.id_table = enetc_pf_id_table,
.probe = enetc_pf_probe,
.remove = enetc_pf_remove,
#ifdef CONFIG_PCI_IOV
.sriov_configure = enetc_sriov_configure,
#endif
};
module_pci_driver(enetc_pf_driver);
MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ENETC_DRV_VER_STR);
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2017-2019 NXP */
#include "enetc.h"
#define ENETC_PF_NUM_RINGS 8
enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
#define ENETC_MADDR_HASH_TBL_SZ 64
struct enetc_mac_filter {
union {
char mac_addr[ETH_ALEN];
DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
};
int mac_addr_cnt;
};
#define ENETC_VLAN_HT_SIZE 64
enum enetc_vf_flags {
ENETC_VF_FLAG_PF_SET_MAC = BIT(0),
};
struct enetc_vf_state {
enum enetc_vf_flags flags;
};
struct enetc_pf {
struct enetc_si *si;
int num_vfs; /* number of active VFs, after sriov_init */
int total_vfs; /* max number of VFs, set for PF at probe */
struct enetc_vf_state *vf_state;
struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT];
struct enetc_msg_swbd rxmsg[ENETC_MAX_NUM_VFS];
struct work_struct msg_task;
char msg_int_name[ENETC_INT_NAME_MAX];
char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */
DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE);
DECLARE_BITMAP(active_vlans, VLAN_N_VID);
};
int enetc_msg_psi_init(struct enetc_pf *pf);
void enetc_msg_psi_free(struct enetc_pf *pf);
void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
#include <linux/module.h>
#include "enetc.h"
#define ENETC_DRV_VER_MAJ 1
#define ENETC_DRV_VER_MIN 0
#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
__stringify(ENETC_DRV_VER_MIN)
static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
#define ENETC_DRV_NAME_STR "ENETC VF driver"
static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
/* Messaging */
static void enetc_msg_vsi_write_msg(struct enetc_hw *hw,
struct enetc_msg_swbd *msg)
{
u32 val;
val = enetc_vsi_set_msize(msg->size) | lower_32_bits(msg->dma);
enetc_wr(hw, ENETC_VSIMSGSNDAR1, upper_32_bits(msg->dma));
enetc_wr(hw, ENETC_VSIMSGSNDAR0, val);
}
static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg)
{
int timeout = 100;
u32 vsimsgsr;
enetc_msg_vsi_write_msg(&si->hw, msg);
do {
vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR);
if (!(vsimsgsr & ENETC_VSIMSGSR_MB))
break;
usleep_range(1000, 2000);
} while (--timeout);
if (!timeout)
return -ETIMEDOUT;
/* check for message delivery error */
if (vsimsgsr & ENETC_VSIMSGSR_MS) {
dev_err(&si->pdev->dev, "VSI command execute error: %d\n",
ENETC_SIMSGSR_GET_MC(vsimsgsr));
return -EIO;
}
return 0;
}
static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv,
struct sockaddr *saddr)
{
struct enetc_msg_cmd_set_primary_mac *cmd;
struct enetc_msg_swbd msg;
int err;
msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64);
msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma,
GFP_KERNEL);
if (!msg.vaddr) {
dev_err(priv->dev, "Failed to alloc Tx msg (size: %d)\n",
msg.size);
return -ENOMEM;
}
cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr;
cmd->header.type = ENETC_MSG_CMD_MNG_MAC;
cmd->header.id = ENETC_MSG_CMD_MNG_ADD;
memcpy(&cmd->mac, saddr, sizeof(struct sockaddr));
/* send the command and wait */
err = enetc_msg_vsi_send(priv->si, &msg);
dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma);
return err;
}
static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct sockaddr *saddr = addr;
int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
if (err)
return err;
return 0;
}
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
return enetc_set_features(ndev, features);
}
/* Probing/ Init */
static const struct net_device_ops enetc_ndev_ops = {
.ndo_open = enetc_open,
.ndo_stop = enetc_close,
.ndo_start_xmit = enetc_xmit,
.ndo_get_stats = enetc_get_stats,
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
const struct net_device_ops *ndev_ops)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &si->pdev->dev);
priv->ndev = ndev;
priv->si = si;
priv->dev = &si->pdev->dev;
si->ndev = ndev;
priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
ndev->max_mtu = ENETC_MAX_MTU;
ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
if (si->errata & ENETC_ERR_TXCSUM) {
ndev->hw_features &= ~NETIF_F_HW_CSUM;
ndev->features &= ~NETIF_F_HW_CSUM;
}
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
static int enetc_vf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct enetc_ndev_priv *priv;
struct net_device *ndev;
struct enetc_si *si;
int err;
err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
if (err) {
dev_err(&pdev->dev, "PCI probing failed\n");
return err;
}
si = pci_get_drvdata(pdev);
enetc_get_si_caps(si);
ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
if (!ndev) {
err = -ENOMEM;
dev_err(&pdev->dev, "netdev creation failed\n");
goto err_alloc_netdev;
}
enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops);
priv = netdev_priv(ndev);
enetc_init_si_rings_params(priv);
err = enetc_alloc_si_resources(priv);
if (err) {
dev_err(&pdev->dev, "SI resource alloc failed\n");
goto err_alloc_si_res;
}
err = enetc_alloc_msix(priv);
if (err) {
dev_err(&pdev->dev, "MSIX alloc failed\n");
goto err_alloc_msix;
}
err = register_netdev(ndev);
if (err)
goto err_reg_netdev;
netif_carrier_off(ndev);
netif_info(priv, probe, ndev, "%s v%s\n",
enetc_drv_name, enetc_drv_ver);
return 0;
err_reg_netdev:
enetc_free_msix(priv);
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
enetc_pci_remove(pdev);
return err;
}
static void enetc_vf_remove(struct pci_dev *pdev)
{
struct enetc_si *si = pci_get_drvdata(pdev);
struct enetc_ndev_priv *priv;
priv = netdev_priv(si->ndev);
netif_info(priv, drv, si->ndev, "%s v%s remove\n",
enetc_drv_name, enetc_drv_ver);
unregister_netdev(si->ndev);
enetc_free_msix(priv);
enetc_free_si_resources(priv);
free_netdev(si->ndev);
enetc_pci_remove(pdev);
}
static const struct pci_device_id enetc_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
{ 0, } /* End of table. */
};
MODULE_DEVICE_TABLE(pci, enetc_vf_id_table);
static struct pci_driver enetc_vf_driver = {
.name = KBUILD_MODNAME,
.id_table = enetc_vf_id_table,
.probe = enetc_vf_probe,
.remove = enetc_vf_remove,
};
module_pci_driver(enetc_vf_driver);
MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ENETC_DRV_VER_STR);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment