Commit 2c99cd91 authored by David S. Miller's avatar David S. Miller

Merge branch 'amd-xgbe-next'

Tom Lendacky says:

====================
amd-xgbe: AMD XGBE driver updates 2014-11-04

The following series of patches includes functional updates to the
driver as well as some trivial changes for function renaming and
spelling fixes.

- Move channel and ring structure allocation into the device open path
- Rename the pre_xmit function to dev_xmit
- Explicitly use the u32 data type for the device descriptors
- Use page allocation for the receive buffers
- Add support for split header/payload receive
- Add support for per DMA channel interrupts
- Add support for receive side scaling (RSS)
- Add support for ethtool receive side scaling commands
- Fix the spelling of descriptors
- After a PCS reset, sync the PCS and PHY modes
- Add dependency on HAS_IOMEM to both the amd-xgbe and amd-xgbe-phy
  drivers

This patch series is based on net-next.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 25de4668 5cdec679
......@@ -7,7 +7,10 @@ Required properties:
- PCS registers
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
- interrupts: Should contain the amd-xgbe interrupt
- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
listed is required and is the general device interrupt. If the optional
amd,per-channel-interrupt property is specified, then one additional
interrupt for each DMA channel supported by the device should be specified
- clocks:
- DMA clock for the amd-xgbe device (used for calculating the
correct Rx interrupt watchdog timer value on a DMA channel
......@@ -23,6 +26,9 @@ Optional properties:
- mac-address: mac address to be assigned to the device. Can be overridden
by UEFI.
- dma-coherent: Present if dma operations are coherent
- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
a unique interrupt for each DMA channel - this requires an additional
interrupt be configured for each DMA channel
Example:
xgbe@e0700000 {
......@@ -30,7 +36,9 @@ Example:
reg = <0 0xe0700000 0 0x80000>,
<0 0xe0780000 0 0x80000>;
interrupt-parent = <&gic>;
interrupts = <0 325 4>;
interrupts = <0 325 4>,
<0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
amd,per-channel-interrupt;
clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
clock-names = "dma_clk", "ptp_clk";
phy-handle = <&phy>;
......
......@@ -179,7 +179,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
depends on OF_NET
depends on OF_NET && HAS_IOMEM
select PHYLIB
select AMD_XGBE_PHY
select BITREVERSE
......
......@@ -207,6 +207,8 @@
/* DMA channel register entry bit positions and sizes */
#define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1
#define DMA_CH_CR_SPH_INDEX 24
#define DMA_CH_CR_SPH_WIDTH 1
#define DMA_CH_IER_AIE_INDEX 15
#define DMA_CH_IER_AIE_WIDTH 1
#define DMA_CH_IER_FBEE_INDEX 12
......@@ -306,6 +308,9 @@
#define MAC_MACA0LR 0x0304
#define MAC_MACA1HR 0x0308
#define MAC_MACA1LR 0x030c
#define MAC_RSSCR 0x0c80
#define MAC_RSSAR 0x0c88
#define MAC_RSSDR 0x0c8c
#define MAC_TSCR 0x0d00
#define MAC_SSIR 0x0d04
#define MAC_STSR 0x0d08
......@@ -429,6 +434,8 @@
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
#define MAC_RCR_HDSMS_INDEX 12
#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
#define MAC_RCR_IPC_WIDTH 1
#define MAC_RCR_JE_INDEX 8
......@@ -445,6 +452,24 @@
#define MAC_RFCR_UP_WIDTH 1
#define MAC_RQC0R_RXQ0EN_INDEX 0
#define MAC_RQC0R_RXQ0EN_WIDTH 2
#define MAC_RSSAR_ADDRT_INDEX 2
#define MAC_RSSAR_ADDRT_WIDTH 1
#define MAC_RSSAR_CT_INDEX 1
#define MAC_RSSAR_CT_WIDTH 1
#define MAC_RSSAR_OB_INDEX 0
#define MAC_RSSAR_OB_WIDTH 1
#define MAC_RSSAR_RSSIA_INDEX 8
#define MAC_RSSAR_RSSIA_WIDTH 8
#define MAC_RSSCR_IP2TE_INDEX 1
#define MAC_RSSCR_IP2TE_WIDTH 1
#define MAC_RSSCR_RSSE_INDEX 0
#define MAC_RSSCR_RSSE_WIDTH 1
#define MAC_RSSCR_TCP4TE_INDEX 2
#define MAC_RSSCR_TCP4TE_WIDTH 1
#define MAC_RSSCR_UDP4TE_INDEX 3
#define MAC_RSSCR_UDP4TE_WIDTH 1
#define MAC_RSSDR_DMCH_INDEX 0
#define MAC_RSSDR_DMCH_WIDTH 4
#define MAC_SSIR_SNSINC_INDEX 8
#define MAC_SSIR_SNSINC_WIDTH 8
#define MAC_SSIR_SSINC_INDEX 16
......@@ -844,9 +869,13 @@
#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
#define RX_NORMAL_DESC2_HL_INDEX 0
#define RX_NORMAL_DESC2_HL_WIDTH 10
#define RX_NORMAL_DESC3_CDA_INDEX 27
#define RX_NORMAL_DESC3_CDA_WIDTH 1
#define RX_NORMAL_DESC3_CTXT_INDEX 30
......@@ -855,14 +884,27 @@
#define RX_NORMAL_DESC3_ES_WIDTH 1
#define RX_NORMAL_DESC3_ETLT_INDEX 16
#define RX_NORMAL_DESC3_ETLT_WIDTH 4
#define RX_NORMAL_DESC3_FD_INDEX 29
#define RX_NORMAL_DESC3_FD_WIDTH 1
#define RX_NORMAL_DESC3_INTE_INDEX 30
#define RX_NORMAL_DESC3_INTE_WIDTH 1
#define RX_NORMAL_DESC3_L34T_INDEX 20
#define RX_NORMAL_DESC3_L34T_WIDTH 4
#define RX_NORMAL_DESC3_LD_INDEX 28
#define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_NORMAL_DESC3_OWN_INDEX 31
#define RX_NORMAL_DESC3_OWN_WIDTH 1
#define RX_NORMAL_DESC3_PL_INDEX 0
#define RX_NORMAL_DESC3_PL_WIDTH 14
#define RX_NORMAL_DESC3_RSV_INDEX 26
#define RX_NORMAL_DESC3_RSV_WIDTH 1
#define RX_DESC3_L34T_IPV4_TCP 1
#define RX_DESC3_L34T_IPV4_UDP 2
#define RX_DESC3_L34T_IPV4_ICMP 3
#define RX_DESC3_L34T_IPV6_TCP 9
#define RX_DESC3_L34T_IPV6_UDP 10
#define RX_DESC3_L34T_IPV6_ICMP 11
#define RX_CONTEXT_DESC3_TSA_INDEX 4
#define RX_CONTEXT_DESC3_TSA_WIDTH 1
......
......@@ -117,7 +117,7 @@
#include "xgbe.h"
#include "xgbe-common.h"
static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
static void xgbe_free_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring)
......@@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
xgbe_unmap_skb(pdata, rdata);
xgbe_unmap_rdata(pdata, rdata);
}
kfree(ring->rdata);
ring->rdata = NULL;
}
if (ring->rx_hdr_pa.pages) {
dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
put_page(ring->rx_hdr_pa.pages);
ring->rx_hdr_pa.pages = NULL;
ring->rx_hdr_pa.pages_len = 0;
ring->rx_hdr_pa.pages_offset = 0;
ring->rx_hdr_pa.pages_dma = 0;
}
if (ring->rx_buf_pa.pages) {
dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
put_page(ring->rx_buf_pa.pages);
ring->rx_buf_pa.pages = NULL;
ring->rx_buf_pa.pages_len = 0;
ring->rx_buf_pa.pages_offset = 0;
ring->rx_buf_pa.pages_dma = 0;
}
if (ring->rdesc) {
dma_free_coherent(pdata->dev,
(sizeof(struct xgbe_ring_desc) *
......@@ -233,6 +255,96 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
return ret;
}
static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
struct xgbe_page_alloc *pa, gfp_t gfp, int order)
{
struct page *pages = NULL;
dma_addr_t pages_dma;
int ret;
/* Try to obtain pages, decreasing order if necessary */
gfp |= __GFP_COLD | __GFP_COMP;
while (order >= 0) {
pages = alloc_pages(gfp, order);
if (pages)
break;
order--;
}
if (!pages)
return -ENOMEM;
/* Map the pages */
pages_dma = dma_map_page(pdata->dev, pages, 0,
PAGE_SIZE << order, DMA_FROM_DEVICE);
ret = dma_mapping_error(pdata->dev, pages_dma);
if (ret) {
put_page(pages);
return ret;
}
pa->pages = pages;
pa->pages_len = PAGE_SIZE << order;
pa->pages_offset = 0;
pa->pages_dma = pages_dma;
return 0;
}
static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
struct xgbe_page_alloc *pa,
unsigned int len)
{
get_page(pa->pages);
bd->pa = *pa;
bd->dma = pa->pages_dma + pa->pages_offset;
bd->dma_len = len;
pa->pages_offset += len;
if ((pa->pages_offset + len) > pa->pages_len) {
/* This data descriptor is responsible for unmapping page(s) */
bd->pa_unmap = *pa;
/* Get a new allocation next time */
pa->pages = NULL;
pa->pages_len = 0;
pa->pages_offset = 0;
pa->pages_dma = 0;
}
}
static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring,
struct xgbe_ring_data *rdata)
{
int order, ret;
if (!ring->rx_hdr_pa.pages) {
ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
if (ret)
return ret;
}
if (!ring->rx_buf_pa.pages) {
order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
order);
if (ret)
return ret;
}
/* Set up the header page info */
xgbe_set_buffer_data(&rdata->rx_hdr, &ring->rx_hdr_pa,
XGBE_SKB_ALLOC_SIZE);
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx_buf, &ring->rx_buf_pa,
pdata->rx_buf_size);
return 0;
}
static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
......@@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring;
struct xgbe_ring_desc *rdesc;
struct xgbe_ring_data *rdata;
dma_addr_t rdesc_dma, skb_dma;
struct sk_buff *skb = NULL;
dma_addr_t rdesc_dma;
unsigned int i, j;
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
......@@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma;
/* Allocate skb & assign to each rdesc */
skb = dev_alloc_skb(pdata->rx_buf_size);
if (skb == NULL)
if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
skb_dma = dma_map_single(pdata->dev, skb->data,
pdata->rx_buf_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
netdev_alert(pdata->netdev,
"failed to do the dma map\n");
dev_kfree_skb_any(skb);
break;
}
rdata->skb = skb;
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = pdata->rx_buf_size;
rdesc++;
rdesc_dma += sizeof(struct xgbe_ring_desc);
......@@ -334,7 +431,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
}
static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata)
{
if (rdata->skb_dma) {
......@@ -354,6 +451,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
rdata->skb = NULL;
}
if (rdata->rx_hdr.pa.pages)
put_page(rdata->rx_hdr.pa.pages);
if (rdata->rx_hdr.pa_unmap.pages) {
dma_unmap_page(pdata->dev, rdata->rx_hdr.pa_unmap.pages_dma,
rdata->rx_hdr.pa_unmap.pages_len,
DMA_FROM_DEVICE);
put_page(rdata->rx_hdr.pa_unmap.pages);
}
if (rdata->rx_buf.pa.pages)
put_page(rdata->rx_buf.pa.pages);
if (rdata->rx_buf.pa_unmap.pages) {
dma_unmap_page(pdata->dev, rdata->rx_buf.pa_unmap.pages_dma,
rdata->rx_buf.pa_unmap.pages_len,
DMA_FROM_DEVICE);
put_page(rdata->rx_buf.pa_unmap.pages);
}
memset(&rdata->rx_hdr, 0, sizeof(rdata->rx_hdr));
memset(&rdata->rx_buf, 0, sizeof(rdata->rx_buf));
rdata->tso_header = 0;
rdata->len = 0;
rdata->interrupt = 0;
......@@ -494,7 +614,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
err_out:
while (start_index < cur_index) {
rdata = XGBE_GET_DESC_DATA(ring, start_index++);
xgbe_unmap_skb(pdata, rdata);
xgbe_unmap_rdata(pdata, rdata);
}
DBGPR("<--xgbe_map_tx_skb: count=0\n");
......@@ -502,40 +622,25 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
return 0;
}
static void xgbe_realloc_skb(struct xgbe_channel *channel)
static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct sk_buff *skb = NULL;
dma_addr_t skb_dma;
int i;
DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
ring->rx.realloc_index);
for (i = 0; i < ring->dirty; i++) {
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
/* Reset rdata values */
xgbe_unmap_skb(pdata, rdata);
xgbe_unmap_rdata(pdata, rdata);
/* Allocate skb & assign to each rdesc */
skb = dev_alloc_skb(pdata->rx_buf_size);
if (skb == NULL)
break;
skb_dma = dma_map_single(pdata->dev, skb->data,
pdata->rx_buf_size, DMA_FROM_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
netdev_alert(pdata->netdev,
"failed to do the dma map\n");
dev_kfree_skb_any(skb);
if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
}
rdata->skb = skb;
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = pdata->rx_buf_size;
hw_if->rx_desc_reset(rdata);
......@@ -543,7 +648,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
}
ring->dirty = 0;
DBGPR("<--xgbe_realloc_skb\n");
DBGPR("<--xgbe_realloc_rx_buffer\n");
}
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
......@@ -553,8 +658,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
desc_if->free_ring_resources = xgbe_free_ring_resources;
desc_if->map_tx_skb = xgbe_map_tx_skb;
desc_if->realloc_skb = xgbe_realloc_skb;
desc_if->unmap_skb = xgbe_unmap_skb;
desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
desc_if->unmap_rdata = xgbe_unmap_rdata;
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
......
......@@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
}
}
static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
unsigned int i;
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->rx_ring)
break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
}
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
unsigned int index, unsigned int val)
{
unsigned int wait;
int ret = 0;
mutex_lock(&pdata->rss_mutex);
if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
ret = -EBUSY;
goto unlock;
}
XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
wait = 1000;
while (wait--) {
if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
goto unlock;
usleep_range(1000, 1500);
}
ret = -EBUSY;
unlock:
mutex_unlock(&pdata->rss_mutex);
return ret;
}
static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
{
unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
unsigned int *key = (unsigned int *)&pdata->rss_key;
int ret;
while (key_regs--) {
ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
key_regs, *key++);
if (ret)
return ret;
}
return 0;
}
static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
{
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
ret = xgbe_write_rss_reg(pdata,
XGBE_RSS_LOOKUP_TABLE_TYPE, i,
pdata->rss_table[i]);
if (ret)
return ret;
}
return 0;
}
static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
{
memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
return xgbe_write_rss_hash_key(pdata);
}
static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
const u32 *table)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
return xgbe_write_rss_lookup_table(pdata);
}
static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
{
int ret;
if (!pdata->hw_feat.rss)
return -EOPNOTSUPP;
/* Program the hash key */
ret = xgbe_write_rss_hash_key(pdata);
if (ret)
return ret;
/* Program the lookup table */
ret = xgbe_write_rss_lookup_table(pdata);
if (ret)
return ret;
/* Set the RSS options */
XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
/* Enable RSS */
XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
return 0;
}
static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
{
if (!pdata->hw_feat.rss)
return -EOPNOTSUPP;
XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
return 0;
}
static void xgbe_config_rss(struct xgbe_prv_data *pdata)
{
int ret;
if (!pdata->hw_feat.rss)
return;
if (pdata->netdev->features & NETIF_F_RXHASH)
ret = xgbe_enable_rss(pdata);
else
ret = xgbe_disable_rss(pdata);
if (ret)
netdev_err(pdata->netdev,
"error configuring RSS, RSS disabled\n");
}
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
......@@ -465,16 +620,20 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
if (channel->tx_ring) {
/* Enable the following Tx interrupts
* TIE - Transmit Interrupt Enable (unless polling)
* TIE - Transmit Interrupt Enable (unless using
* per channel interrupts)
*/
if (!pdata->per_channel_irq)
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
}
if (channel->rx_ring) {
/* Enable following Rx interrupts
* RBUE - Receive Buffer Unavailable Enable
* RIE - Receive Interrupt Enable
* RIE - Receive Interrupt Enable (unless using
* per channel interrupts)
*/
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
if (!pdata->per_channel_irq)
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
}
......@@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc1 = 0;
rdesc->desc2 = 0;
rdesc->desc3 = 0;
/* Make sure ownership is written to the descriptor */
wmb();
}
static void xgbe_tx_desc_init(struct xgbe_channel *channel)
{
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
int i;
int start_index = ring->cur;
......@@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
/* Initialze all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
rdesc = rdata->rdesc;
/* Initialize Tx descriptor
* Set buffer 1 (lo) address to zero
* Set buffer 1 (hi) address to zero
* Reset all other control bits (IC, TTSE, B2L & B1L)
* Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
* etc)
*/
rdesc->desc0 = 0;
rdesc->desc1 = 0;
rdesc->desc2 = 0;
rdesc->desc3 = 0;
/* Initialize Tx descriptor */
xgbe_tx_desc_reset(rdata);
}
/* Make sure everything is written to the descriptor(s) before
* telling the device about them
*/
wmb();
/* Update the total number of Tx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
......@@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
struct xgbe_ring_desc *rdesc = rdata->rdesc;
/* Reset the Rx descriptor
* Set buffer 1 (lo) address to dma address (lo)
* Set buffer 1 (hi) address to dma address (hi)
* Set buffer 2 (lo) address to zero
* Set buffer 2 (hi) address to zero and set control bits
* OWN and INTE
* Set buffer 1 (lo) address to header dma address (lo)
* Set buffer 1 (hi) address to header dma address (hi)
* Set buffer 2 (lo) address to buffer dma address (lo)
* Set buffer 2 (hi) address to buffer dma address (hi) and
* set control bits OWN and INTE
*/
rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
rdesc->desc2 = 0;
rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_hdr.dma));
rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_hdr.dma));
rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx_buf.dma));
rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx_buf.dma));
rdesc->desc3 = 0;
if (rdata->interrupt)
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
rdata->interrupt ? 1 : 0);
/* Since the Rx DMA engine is likely running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
......@@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
unsigned int start_index = ring->cur;
unsigned int rx_coalesce, rx_frames;
unsigned int i;
......@@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
/* Initialize all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
rdesc = rdata->rdesc;
/* Initialize Rx descriptor
* Set buffer 1 (lo) address to dma address (lo)
* Set buffer 1 (hi) address to dma address (hi)
* Set buffer 2 (lo) address to zero
* Set buffer 2 (hi) address to zero and set control
* bits OWN and INTE appropriateley
*/
rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
rdesc->desc2 = 0;
rdesc->desc3 = 0;
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
rdata->interrupt = 1;
if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
/* Clear interrupt on completion bit */
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
0);
/* Set interrupt on completion bit as appropriate */
if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
rdata->interrupt = 0;
}
}
else
rdata->interrupt = 1;
/* Make sure everything is written to the descriptors before
* telling the device about them
*/
wmb();
/* Initialize Rx descriptor */
xgbe_rx_desc_reset(rdata);
}
/* Update the total number of Rx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
......@@ -1198,7 +1325,7 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
xgbe_config_flow_control(pdata);
}
static void xgbe_pre_xmit(struct xgbe_channel *channel)
static void xgbe_dev_xmit(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->tx_ring;
......@@ -1211,7 +1338,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
int start_index = ring->cur;
int i;
DBGPR("-->xgbe_pre_xmit\n");
DBGPR("-->xgbe_dev_xmit\n");
csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
CSUM_ENABLE);
......@@ -1410,7 +1537,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
channel->name, start_index & (ring->rdesc_count - 1),
(ring->cur - 1) & (ring->rdesc_count - 1));
DBGPR("<--xgbe_pre_xmit\n");
DBGPR("<--xgbe_dev_xmit\n");
}
static int xgbe_dev_read(struct xgbe_channel *channel)
......@@ -1420,7 +1547,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
struct net_device *netdev = channel->pdata->netdev;
unsigned int err, etlt;
unsigned int err, etlt, l34t;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
......@@ -1454,6 +1581,31 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT, 1);
/* Get the header length */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
rdata->hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
/* Get the RSS hash */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
RSS_HASH, 1);
packet->rss_hash = le32_to_cpu(rdesc->desc1);
l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
switch (l34t) {
case RX_DESC3_L34T_IPV4_TCP:
case RX_DESC3_L34T_IPV4_UDP:
case RX_DESC3_L34T_IPV6_TCP:
case RX_DESC3_L34T_IPV6_UDP:
packet->rss_hash_type = PKT_HASH_TYPE_L4;
default:
packet->rss_hash_type = PKT_HASH_TYPE_L3;
}
}
/* Get the packet length */
rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
......@@ -2485,6 +2637,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
xgbe_config_sph_mode(pdata);
xgbe_config_rss(pdata);
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
......@@ -2561,7 +2715,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->powerup_rx = xgbe_powerup_rx;
hw_if->powerdown_rx = xgbe_powerdown_rx;
hw_if->pre_xmit = xgbe_pre_xmit;
hw_if->dev_xmit = xgbe_dev_xmit;
hw_if->dev_read = xgbe_dev_read;
hw_if->enable_int = xgbe_enable_int;
hw_if->disable_int = xgbe_disable_int;
......@@ -2620,5 +2774,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
/* For Receive Side Scaling */
hw_if->enable_rss = xgbe_enable_rss;
hw_if->disable_rss = xgbe_disable_rss;
hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
DBGPR("<--xgbe_init_function_ptrs\n");
}
......@@ -114,6 +114,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
......@@ -126,9 +127,99 @@
#include "xgbe.h"
#include "xgbe-common.h"
static int xgbe_poll(struct napi_struct *, int);
static int xgbe_one_poll(struct napi_struct *, int);
static int xgbe_all_poll(struct napi_struct *, int);
static void xgbe_set_rx_mode(struct net_device *);
static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel_mem, *channel;
struct xgbe_ring *tx_ring, *rx_ring;
unsigned int count, i;
int ret = -ENOMEM;
count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
if (!channel_mem)
goto err_channel;
tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
GFP_KERNEL);
if (!tx_ring)
goto err_tx_ring;
rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
GFP_KERNEL);
if (!rx_ring)
goto err_rx_ring;
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
if (pdata->per_channel_irq) {
/* Get the DMA interrupt (offset 1) */
ret = platform_get_irq(pdata->pdev, i + 1);
if (ret < 0) {
netdev_err(pdata->netdev,
"platform_get_irq %u failed\n",
i + 1);
goto err_irq;
}
channel->dma_irq = ret;
}
if (i < pdata->tx_ring_count) {
spin_lock_init(&tx_ring->lock);
channel->tx_ring = tx_ring++;
}
if (i < pdata->rx_ring_count) {
spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
channel->name, channel->queue_index, channel->dma_regs,
channel->dma_irq, channel->tx_ring, channel->rx_ring);
}
pdata->channel = channel_mem;
pdata->channel_count = count;
return 0;
err_irq:
kfree(rx_ring);
err_rx_ring:
kfree(tx_ring);
err_tx_ring:
kfree(channel_mem);
err_channel:
return ret;
}
static void xgbe_free_channels(struct xgbe_prv_data *pdata)
{
if (!pdata->channel)
return;
kfree(pdata->channel->rx_ring);
kfree(pdata->channel->tx_ring);
kfree(pdata->channel);
pdata->channel = NULL;
pdata->channel_count = 0;
}
static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
{
return (ring->rdesc_count - (ring->cur - ring->dirty));
......@@ -144,8 +235,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
}
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
~(XGBE_RX_BUF_ALIGN - 1);
......@@ -213,11 +304,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!dma_isr)
goto isr_done;
DBGPR("-->xgbe_isr\n");
DBGPR(" DMA_ISR = %08x\n", dma_isr);
DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
for (i = 0; i < pdata->channel_count; i++) {
if (!(dma_isr & (1 << i)))
......@@ -228,6 +315,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
/* If we get a TI or RI interrupt that means per channel DMA
* interrupts are not enabled, so we use the private data napi
* structure, not the per channel napi structure
*/
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
if (napi_schedule_prep(&pdata->napi)) {
......@@ -270,12 +361,28 @@ static irqreturn_t xgbe_isr(int irq, void *data)
DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
DBGPR("<--xgbe_isr\n");
isr_done:
return IRQ_HANDLED;
}
static irqreturn_t xgbe_dma_isr(int irq, void *data)
{
struct xgbe_channel *channel = data;
/* Per channel DMA interrupts are enabled, so we use the per
* channel napi structure and not the private data napi structure
*/
if (napi_schedule_prep(&channel->napi)) {
/* Disable Tx and Rx interrupts */
disable_irq(channel->dma_irq);
/* Turn on polling */
__napi_schedule(&channel->napi);
}
return IRQ_HANDLED;
}
static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
{
struct xgbe_channel *channel = container_of(timer,
......@@ -283,18 +390,24 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
tx_timer);
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
unsigned long flags;
DBGPR("-->xgbe_tx_timer\n");
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
spin_lock_irqsave(&ring->lock, flags);
if (napi_schedule_prep(&pdata->napi)) {
if (napi_schedule_prep(napi)) {
/* Disable Tx and Rx interrupts */
if (pdata->per_channel_irq)
disable_irq(channel->dma_irq);
else
xgbe_disable_rx_tx_ints(pdata);
/* Turn on polling */
__napi_schedule(&pdata->napi);
__napi_schedule(napi);
}
channel->tx_timer_active = 0;
......@@ -430,18 +543,46 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
{
struct xgbe_channel *channel;
unsigned int i;
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
NAPI_POLL_WEIGHT);
netif_napi_add(pdata->netdev, &channel->napi,
xgbe_one_poll, NAPI_POLL_WEIGHT);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
xgbe_all_poll, NAPI_POLL_WEIGHT);
napi_enable(&pdata->napi);
}
}
static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
{
struct xgbe_channel *channel;
unsigned int i;
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
napi_disable(&channel->napi);
if (del)
netif_napi_del(&channel->napi);
}
} else {
napi_disable(&pdata->napi);
if (del)
netif_napi_del(&pdata->napi);
}
}
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
......@@ -472,7 +613,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_init_rx_coalesce\n");
}
static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
......@@ -480,7 +621,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
struct xgbe_ring_data *rdata;
unsigned int i, j;
DBGPR("-->xgbe_free_tx_skbuff\n");
DBGPR("-->xgbe_free_tx_data\n");
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
......@@ -490,14 +631,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
desc_if->unmap_skb(pdata, rdata);
desc_if->unmap_rdata(pdata, rdata);
}
}
DBGPR("<--xgbe_free_tx_skbuff\n");
DBGPR("<--xgbe_free_tx_data\n");
}
static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
......@@ -505,7 +646,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
struct xgbe_ring_data *rdata;
unsigned int i, j;
DBGPR("-->xgbe_free_rx_skbuff\n");
DBGPR("-->xgbe_free_rx_data\n");
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
......@@ -515,11 +656,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
desc_if->unmap_skb(pdata, rdata);
desc_if->unmap_rdata(pdata, rdata);
}
}
DBGPR("<--xgbe_free_rx_skbuff\n");
DBGPR("<--xgbe_free_rx_data\n");
}
static void xgbe_adjust_link(struct net_device *netdev)
......@@ -754,7 +895,9 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
{
struct xgbe_channel *channel;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int i;
DBGPR("-->xgbe_restart_dev\n");
......@@ -763,10 +906,15 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
return;
xgbe_stop(pdata);
synchronize_irq(pdata->irq_number);
synchronize_irq(pdata->dev_irq);
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++)
synchronize_irq(channel->dma_irq);
}
xgbe_free_tx_skbuff(pdata);
xgbe_free_rx_skbuff(pdata);
xgbe_free_tx_data(pdata);
xgbe_free_rx_data(pdata);
/* Issue software reset to device if requested */
if (reset)
......@@ -1037,13 +1185,13 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
packet->rdesc_count = 0;
if (xgbe_is_tso(skb)) {
/* TSO requires an extra desriptor if mss is different */
/* TSO requires an extra descriptor if mss is different */
if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
context_desc = 1;
packet->rdesc_count++;
}
/* TSO requires an extra desriptor for TSO header */
/* TSO requires an extra descriptor for TSO header */
packet->rdesc_count++;
XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
......@@ -1091,6 +1239,9 @@ static int xgbe_open(struct net_device *netdev)
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel = NULL;
char dma_irq_name[IFNAMSIZ + 32];
unsigned int i = 0;
int ret;
DBGPR("-->xgbe_open\n");
......@@ -1119,24 +1270,47 @@ static int xgbe_open(struct net_device *netdev)
goto err_ptpclk;
pdata->rx_buf_size = ret;
/* Allocate the channel and ring structures */
ret = xgbe_alloc_channels(pdata);
if (ret)
goto err_ptpclk;
/* Allocate the ring descriptors and buffers */
ret = desc_if->alloc_ring_resources(pdata);
if (ret)
goto err_ptpclk;
goto err_channels;
/* Initialize the device restart and Tx timestamp work struct */
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
/* Request interrupts */
ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev->name, pdata);
if (ret) {
netdev_alert(netdev, "error requesting irq %d\n",
pdata->irq_number);
pdata->dev_irq);
goto err_rings;
}
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
snprintf(dma_irq_name, sizeof(dma_irq_name) - 1,
"%s-TxRx-%u", netdev_name(netdev),
channel->queue_index);
ret = devm_request_irq(pdata->dev, channel->dma_irq,
xgbe_dma_isr, 0, dma_irq_name,
channel);
if (ret) {
netdev_alert(netdev,
"error requesting irq %d\n",
channel->dma_irq);
goto err_irq;
}
pdata->irq_number = netdev->irq;
}
}
ret = xgbe_start(pdata);
if (ret)
......@@ -1149,12 +1323,21 @@ static int xgbe_open(struct net_device *netdev)
err_start:
hw_if->exit(pdata);
devm_free_irq(pdata->dev, pdata->irq_number, pdata);
pdata->irq_number = 0;
err_irq:
if (pdata->per_channel_irq) {
/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
for (i--, channel--; i < pdata->channel_count; i--, channel--)
devm_free_irq(pdata->dev, channel->dma_irq, channel);
}
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
err_rings:
desc_if->free_ring_resources(pdata);
err_channels:
xgbe_free_channels(pdata);
err_ptpclk:
clk_disable_unprepare(pdata->ptpclk);
......@@ -1172,6 +1355,8 @@ static int xgbe_close(struct net_device *netdev)
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
unsigned int i;
DBGPR("-->xgbe_close\n");
......@@ -1181,13 +1366,18 @@ static int xgbe_close(struct net_device *netdev)
/* Issue software reset to device */
hw_if->exit(pdata);
/* Free all the ring data */
/* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata);
/* Release the interrupt */
if (pdata->irq_number != 0) {
devm_free_irq(pdata->dev, pdata->irq_number, pdata);
pdata->irq_number = 0;
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
/* Release the interrupts */
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++)
devm_free_irq(pdata->dev, channel->dma_irq, channel);
}
/* Disable the clocks */
......@@ -1258,7 +1448,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
xgbe_prep_tx_tstamp(pdata, skb, packet);
/* Configure required descriptor fields for transmission */
hw_if->pre_xmit(channel);
hw_if->dev_xmit(channel);
#ifdef XGMAC_ENABLE_TX_PKT_DUMP
xgbe_print_pkt(netdev, skb, true);
......@@ -1420,14 +1610,20 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
static void xgbe_poll_controller(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_channel *channel;
unsigned int i;
DBGPR("-->xgbe_poll_controller\n");
disable_irq(pdata->irq_number);
xgbe_isr(pdata->irq_number, pdata);
enable_irq(pdata->irq_number);
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++)
xgbe_dma_isr(channel->dma_irq, channel);
} else {
disable_irq(pdata->dev_irq);
xgbe_isr(pdata->dev_irq, pdata);
enable_irq(pdata->dev_irq);
}
DBGPR("<--xgbe_poll_controller\n");
}
......@@ -1465,12 +1661,21 @@ static int xgbe_set_features(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
netdev_features_t rxcsum, rxvlan, rxvlan_filter;
netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
int ret = 0;
rxhash = pdata->netdev_features & NETIF_F_RXHASH;
rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
if ((features & NETIF_F_RXHASH) && !rxhash)
ret = hw_if->enable_rss(pdata);
else if (!(features & NETIF_F_RXHASH) && rxhash)
ret = hw_if->disable_rss(pdata);
if (ret)
return ret;
if ((features & NETIF_F_RXCSUM) && !rxcsum)
hw_if->enable_rx_csum(pdata);
else if (!(features & NETIF_F_RXCSUM) && rxcsum)
......@@ -1524,7 +1729,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
desc_if->realloc_skb(channel);
desc_if->realloc_rx_buffer(channel);
/* Update the Rx Tail Pointer Register with address of
* the last cleaned entry */
......@@ -1533,6 +1738,31 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
lower_32_bits(rdata->rdesc_dma));
}
static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata,
unsigned int *len)
{
struct net_device *netdev = pdata->netdev;
struct sk_buff *skb;
u8 *packet;
unsigned int copy_len;
skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len);
if (!skb)
return NULL;
packet = page_address(rdata->rx_hdr.pa.pages) +
rdata->rx_hdr.pa.pages_offset;
copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len;
copy_len = min(rdata->rx_hdr.dma_len, copy_len);
skb_copy_to_linear_data(skb, packet, copy_len);
skb_put(skb, copy_len);
*len -= copy_len;
return skb;
}
static int xgbe_tx_poll(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
......@@ -1566,7 +1796,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
#endif
/* Free the SKB and reset the descriptor for re-use */
desc_if->unmap_skb(pdata, rdata);
desc_if->unmap_rdata(pdata, rdata);
hw_if->tx_desc_reset(rdata);
processed++;
......@@ -1594,6 +1824,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
struct net_device *netdev = pdata->netdev;
struct napi_struct *napi;
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context;
......@@ -1607,6 +1838,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
packet = &ring->packet_data;
while (packet_count < budget) {
......@@ -1641,10 +1874,6 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
ring->cur++;
ring->dirty++;
dma_unmap_single(pdata->dev, rdata->skb_dma,
rdata->skb_dma_len, DMA_FROM_DEVICE);
rdata->skb_dma = 0;
incomplete = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
INCOMPLETE);
......@@ -1668,26 +1897,33 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!context) {
put_len = rdata->len - len;
if (skb) {
if (pskb_expand_head(skb, 0, put_len,
GFP_ATOMIC)) {
DBGPR("pskb_expand_head error\n");
if (incomplete) {
len += put_len;
if (!skb) {
dma_sync_single_for_cpu(pdata->dev,
rdata->rx_hdr.dma,
rdata->rx_hdr.dma_len,
DMA_FROM_DEVICE);
skb = xgbe_create_skb(pdata, rdata, &put_len);
if (!skb) {
error = 1;
goto read_again;
}
dev_kfree_skb(skb);
goto next_packet;
}
memcpy(skb_tail_pointer(skb), rdata->skb->data,
put_len);
} else {
skb = rdata->skb;
rdata->skb = NULL;
if (put_len) {
dma_sync_single_for_cpu(pdata->dev,
rdata->rx_buf.dma,
rdata->rx_buf.dma_len,
DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx_buf.pa.pages,
rdata->rx_buf.pa.pages_offset,
put_len, rdata->rx_buf.dma_len);
rdata->rx_buf.pa.pages = NULL;
}
skb_put(skb, put_len);
len += put_len;
}
if (incomplete || context_next)
......@@ -1733,13 +1969,18 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
hwtstamps->hwtstamp = ns_to_ktime(nsec);
}
if (XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RSS_HASH))
skb_set_hash(skb, packet->rss_hash,
packet->rss_hash_type);
skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, channel->queue_index);
skb_mark_napi_id(skb, &pdata->napi);
skb_mark_napi_id(skb, napi);
netdev->last_rx = jiffies;
napi_gro_receive(&pdata->napi, skb);
napi_gro_receive(napi, skb);
next_packet:
packet_count++;
......@@ -1761,7 +2002,35 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
return packet_count;
}
static int xgbe_poll(struct napi_struct *napi, int budget)
static int xgbe_one_poll(struct napi_struct *napi, int budget)
{
struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
napi);
int processed = 0;
DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
/* Cleanup Tx ring first */
xgbe_tx_poll(channel);
/* Process Rx ring next */
processed = xgbe_rx_poll(channel, budget);
/* If we processed everything, we are done */
if (processed < budget) {
/* Turn off polling */
napi_complete(napi);
/* Enable Tx and Rx interrupts */
enable_irq(channel->dma_irq);
}
DBGPR("<--xgbe_one_poll: received = %d\n", processed);
return processed;
}
static int xgbe_all_poll(struct napi_struct *napi, int budget)
{
struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
napi);
......@@ -1770,7 +2039,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
int processed, last_processed;
unsigned int i;
DBGPR("-->xgbe_poll: budget=%d\n", budget);
DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
processed = 0;
ring_budget = budget / pdata->rx_ring_count;
......@@ -1798,7 +2067,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
xgbe_enable_rx_tx_ints(pdata);
}
DBGPR("<--xgbe_poll: received = %d\n", processed);
DBGPR("<--xgbe_all_poll: received = %d\n", processed);
return processed;
}
......@@ -1812,7 +2081,7 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
while (count--) {
rdata = XGBE_GET_DESC_DATA(ring, idx);
rdesc = rdata->rdesc;
DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
(flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
......@@ -1823,7 +2092,7 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
unsigned int idx)
{
DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
}
......
......@@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
rx_usecs);
return -EINVAL;
}
if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
if (rx_frames > pdata->rx_desc_count) {
netdev_alert(netdev, "rx-frames is limited to %d frames\n",
pdata->channel->rx_ring->rdesc_count);
pdata->rx_desc_count);
return -EINVAL;
}
......@@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */
if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
if (tx_frames > pdata->tx_desc_count) {
netdev_alert(netdev, "tx-frames is limited to %d frames\n",
pdata->channel->tx_ring->rdesc_count);
pdata->tx_desc_count);
return -EINVAL;
}
......@@ -481,6 +481,75 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return 0;
}
static int xgbe_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
switch (rxnfc->cmd) {
case ETHTOOL_GRXRINGS:
rxnfc->data = pdata->rx_ring_count;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
return sizeof(pdata->rss_key);
}
static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
return ARRAY_SIZE(pdata->rss_table);
}
static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int i;
if (indir) {
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
MAC_RSSDR, DMCH);
}
if (key)
memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
return 0;
}
static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int ret;
if (indir) {
ret = hw_if->set_rss_lookup_table(pdata, indir);
if (ret)
return ret;
}
if (key) {
ret = hw_if->set_rss_hash_key(pdata, key);
if (ret)
return ret;
}
return 0;
}
static int xgbe_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *ts_info)
{
......@@ -526,6 +595,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_strings = xgbe_get_strings,
.get_ethtool_stats = xgbe_get_ethtool_stats,
.get_sset_count = xgbe_get_sset_count,
.get_rxnfc = xgbe_get_rxnfc,
.get_rxfh_key_size = xgbe_get_rxfh_key_size,
.get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
.get_rxfh = xgbe_get_rxfh,
.set_rxfh = xgbe_set_rxfh,
.get_ts_info = xgbe_get_ts_info,
};
......
......@@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel_mem, *channel;
struct xgbe_ring *tx_ring, *rx_ring;
unsigned int count, i;
DBGPR("-->xgbe_alloc_rings\n");
count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
channel_mem = devm_kcalloc(pdata->dev, count,
sizeof(struct xgbe_channel), GFP_KERNEL);
if (!channel_mem)
return NULL;
tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
sizeof(struct xgbe_ring), GFP_KERNEL);
if (!tx_ring)
return NULL;
rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
sizeof(struct xgbe_ring), GFP_KERNEL);
if (!rx_ring)
return NULL;
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
if (i < pdata->tx_ring_count) {
spin_lock_init(&tx_ring->lock);
channel->tx_ring = tx_ring++;
}
if (i < pdata->rx_ring_count) {
spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
channel->name, channel->queue_index, channel->dma_regs,
channel->tx_ring, channel->rx_ring);
}
pdata->channel_count = count;
DBGPR("<--xgbe_alloc_rings\n");
return channel_mem;
}
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
......@@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
const u8 *mac_addr;
unsigned int i;
int ret;
DBGPR("--> xgbe_probe\n");
......@@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
mutex_init(&pdata->xpcs_mutex);
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
/* Set and validate the number of descriptors for a ring */
......@@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev)
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
}
/* Check for per channel interrupt support */
if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
pdata->per_channel_irq = 1;
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(dev, "platform_get_irq failed\n");
dev_err(dev, "platform_get_irq 0 failed\n");
goto err_io;
}
netdev->irq = ret;
pdata->dev_irq = ret;
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
/* Set all the function pointers */
......@@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_io;
}
/* Allocate the rings for the DMA channels */
pdata->channel = xgbe_alloc_rings(pdata);
if (!pdata->channel) {
dev_err(dev, "ring allocation failed\n");
ret = -ENOMEM;
goto err_io;
}
/* Initialize RSS hash key and lookup table */
get_random_bytes(pdata->rss_key, sizeof(pdata->rss_key));
for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
i % pdata->rx_ring_count);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Prepare to regsiter with MDIO */
pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
......@@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_FILTER;
if (pdata->hw_feat.rss)
netdev->hw_features |= NETIF_F_RXHASH;
netdev->vlan_features |= NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
......
......@@ -142,6 +142,8 @@
#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGBE_RX_BUF_ALIGN 64
#define XGBE_SKB_ALLOC_SIZE 256
#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
#define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16
......@@ -171,6 +173,7 @@
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
#define XGBE_PTP_CLOCK "ptp_clk"
#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
/* Timestamp support - values based on 50MHz PTP clock
* 50MHz => 20 nsec
......@@ -212,6 +215,12 @@
/* Maximum MAC address hash table size (256 bits = 8 bytes) */
#define XGBE_MAC_HASH_TABLE_SIZE 8
/* Receive Side Scaling */
#define XGBE_RSS_HASH_KEY_SIZE 40
#define XGBE_RSS_MAX_TABLE_SIZE 256
#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
#define XGBE_RSS_HASH_KEY_TYPE 1
struct xgbe_prv_data;
struct xgbe_packet_data {
......@@ -230,14 +239,35 @@ struct xgbe_packet_data {
unsigned short vlan_ctag;
u64 rx_tstamp;
u32 rss_hash;
enum pkt_hash_types rss_hash_type;
};
/* Common Rx and Tx descriptor mapping */
struct xgbe_ring_desc {
unsigned int desc0;
unsigned int desc1;
unsigned int desc2;
unsigned int desc3;
u32 desc0;
u32 desc1;
u32 desc2;
u32 desc3;
};
/* Page allocation related values */
struct xgbe_page_alloc {
struct page *pages;
unsigned int pages_len;
unsigned int pages_offset;
dma_addr_t pages_dma;
};
/* Ring entry buffer data */
struct xgbe_buffer_data {
struct xgbe_page_alloc pa;
struct xgbe_page_alloc pa_unmap;
dma_addr_t dma;
unsigned int dma_len;
};
/* Structure used to hold information related to the descriptor
......@@ -253,6 +283,10 @@ struct xgbe_ring_data {
unsigned int skb_dma_len; /* Length of SKB DMA area */
unsigned int tso_header; /* TSO header indicator */
struct xgbe_buffer_data rx_hdr; /* Header locations */
struct xgbe_buffer_data rx_buf; /* Payload locations */
unsigned short hdr_len; /* Length of received header */
unsigned short len; /* Length of received Rx packet */
unsigned int interrupt; /* Interrupt indicator */
......@@ -291,6 +325,10 @@ struct xgbe_ring {
*/
struct xgbe_ring_data *rdata;
/* Page allocation for RX buffers */
struct xgbe_page_alloc rx_hdr_pa;
struct xgbe_page_alloc rx_buf_pa;
/* Ring index values
* cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability
......@@ -331,6 +369,12 @@ struct xgbe_channel {
unsigned int queue_index;
void __iomem *dma_regs;
/* Per channel interrupt irq number */
int dma_irq;
/* Netdev related settings */
struct napi_struct napi;
unsigned int saved_ier;
unsigned int tx_timer_active;
......@@ -456,7 +500,7 @@ struct xgbe_hw_if {
int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
void (*pre_xmit)(struct xgbe_channel *);
void (*dev_xmit)(struct xgbe_channel *);
int (*dev_read)(struct xgbe_channel *);
void (*tx_desc_init)(struct xgbe_channel *);
void (*rx_desc_init)(struct xgbe_channel *);
......@@ -509,14 +553,20 @@ struct xgbe_hw_if {
/* For Data Center Bridging config */
void (*config_dcb_tc)(struct xgbe_prv_data *);
void (*config_dcb_pfc)(struct xgbe_prv_data *);
/* For Receive Side Scaling */
int (*enable_rss)(struct xgbe_prv_data *);
int (*disable_rss)(struct xgbe_prv_data *);
int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
void (*realloc_skb)(struct xgbe_channel *);
void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
void (*realloc_rx_buffer)(struct xgbe_channel *);
void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
};
......@@ -581,7 +631,11 @@ struct xgbe_prv_data {
/* XPCS indirect addressing mutex */
struct mutex xpcs_mutex;
int irq_number;
/* RSS addressing mutex */
struct mutex rss_mutex;
int dev_irq;
unsigned int per_channel_irq;
struct xgbe_hw_if hw_if;
struct xgbe_desc_if desc_if;
......@@ -624,7 +678,7 @@ struct xgbe_prv_data {
unsigned int rx_riwt;
unsigned int rx_frames;
/* Current MTU */
/* Current Rx buffer size */
unsigned int rx_buf_size;
/* Flow control settings */
......@@ -632,6 +686,11 @@ struct xgbe_prv_data {
unsigned int tx_pause;
unsigned int rx_pause;
/* Receive Side Scaling settings */
u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
u32 rss_options;
/* MDIO settings */
struct module *phy_module;
char *mii_bus_id;
......
......@@ -26,7 +26,7 @@ config AMD_PHY
config AMD_XGBE_PHY
tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
depends on OF
depends on OF && HAS_IOMEM
---help---
Currently supports the AMD 10GbE PHY
......
......@@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
if (ret & MDIO_CTRL1_RESET)
return -ETIMEDOUT;
return 0;
/* Make sure the XPCS and SerDes are in compatible states */
return amd_xgbe_phy_xgmii_mode(phydev);
}
static int amd_xgbe_phy_config_init(struct phy_device *phydev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment