Commit 2c99cd91 authored by David S. Miller's avatar David S. Miller

Merge branch 'amd-xgbe-next'

Tom Lendacky says:

====================
amd-xgbe: AMD XGBE driver updates 2014-11-04

The following series of patches includes functional updates to the
driver as well as some trivial changes for function renaming and
spelling fixes.

- Move channel and ring structure allocation into the device open path
- Rename the pre_xmit function to dev_xmit
- Explicitly use the u32 data type for the device descriptors
- Use page allocation for the receive buffers
- Add support for split header/payload receive
- Add support for per DMA channel interrupts
- Add support for receive side scaling (RSS)
- Add support for ethtool receive side scaling commands
- Fix the spelling of descriptors
- After a PCS reset, sync the PCS and PHY modes
- Add dependency on HAS_IOMEM to both the amd-xgbe and amd-xgbe-phy
  drivers

This patch series is based on net-next.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 25de4668 5cdec679
...@@ -7,7 +7,10 @@ Required properties: ...@@ -7,7 +7,10 @@ Required properties:
- PCS registers - PCS registers
- interrupt-parent: Should be the phandle for the interrupt controller - interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device that services interrupts for this device
- interrupts: Should contain the amd-xgbe interrupt - interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
listed is required and is the general device interrupt. If the optional
amd,per-channel-interrupt property is specified, then one additional
interrupt for each DMA channel supported by the device should be specified
- clocks: - clocks:
- DMA clock for the amd-xgbe device (used for calculating the - DMA clock for the amd-xgbe device (used for calculating the
correct Rx interrupt watchdog timer value on a DMA channel correct Rx interrupt watchdog timer value on a DMA channel
...@@ -23,6 +26,9 @@ Optional properties: ...@@ -23,6 +26,9 @@ Optional properties:
- mac-address: mac address to be assigned to the device. Can be overridden - mac-address: mac address to be assigned to the device. Can be overridden
by UEFI. by UEFI.
- dma-coherent: Present if dma operations are coherent - dma-coherent: Present if dma operations are coherent
- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
a unique interrupt for each DMA channel - this requires an additional
interrupt be configured for each DMA channel
Example: Example:
xgbe@e0700000 { xgbe@e0700000 {
...@@ -30,7 +36,9 @@ Example: ...@@ -30,7 +36,9 @@ Example:
reg = <0 0xe0700000 0 0x80000>, reg = <0 0xe0700000 0 0x80000>,
<0 0xe0780000 0 0x80000>; <0 0xe0780000 0 0x80000>;
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
interrupts = <0 325 4>; interrupts = <0 325 4>,
<0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
amd,per-channel-interrupt;
clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>; clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
clock-names = "dma_clk", "ptp_clk"; clock-names = "dma_clk", "ptp_clk";
phy-handle = <&phy>; phy-handle = <&phy>;
......
...@@ -179,7 +179,7 @@ config SUNLANCE ...@@ -179,7 +179,7 @@ config SUNLANCE
config AMD_XGBE config AMD_XGBE
tristate "AMD 10GbE Ethernet driver" tristate "AMD 10GbE Ethernet driver"
depends on OF_NET depends on OF_NET && HAS_IOMEM
select PHYLIB select PHYLIB
select AMD_XGBE_PHY select AMD_XGBE_PHY
select BITREVERSE select BITREVERSE
......
...@@ -207,6 +207,8 @@ ...@@ -207,6 +207,8 @@
/* DMA channel register entry bit positions and sizes */ /* DMA channel register entry bit positions and sizes */
#define DMA_CH_CR_PBLX8_INDEX 16 #define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1 #define DMA_CH_CR_PBLX8_WIDTH 1
#define DMA_CH_CR_SPH_INDEX 24
#define DMA_CH_CR_SPH_WIDTH 1
#define DMA_CH_IER_AIE_INDEX 15 #define DMA_CH_IER_AIE_INDEX 15
#define DMA_CH_IER_AIE_WIDTH 1 #define DMA_CH_IER_AIE_WIDTH 1
#define DMA_CH_IER_FBEE_INDEX 12 #define DMA_CH_IER_FBEE_INDEX 12
...@@ -306,6 +308,9 @@ ...@@ -306,6 +308,9 @@
#define MAC_MACA0LR 0x0304 #define MAC_MACA0LR 0x0304
#define MAC_MACA1HR 0x0308 #define MAC_MACA1HR 0x0308
#define MAC_MACA1LR 0x030c #define MAC_MACA1LR 0x030c
#define MAC_RSSCR 0x0c80
#define MAC_RSSAR 0x0c88
#define MAC_RSSDR 0x0c8c
#define MAC_TSCR 0x0d00 #define MAC_TSCR 0x0d00
#define MAC_SSIR 0x0d04 #define MAC_SSIR 0x0d04
#define MAC_STSR 0x0d08 #define MAC_STSR 0x0d08
...@@ -429,6 +434,8 @@ ...@@ -429,6 +434,8 @@
#define MAC_RCR_CST_WIDTH 1 #define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3 #define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1 #define MAC_RCR_DCRCC_WIDTH 1
#define MAC_RCR_HDSMS_INDEX 12
#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9 #define MAC_RCR_IPC_INDEX 9
#define MAC_RCR_IPC_WIDTH 1 #define MAC_RCR_IPC_WIDTH 1
#define MAC_RCR_JE_INDEX 8 #define MAC_RCR_JE_INDEX 8
...@@ -445,6 +452,24 @@ ...@@ -445,6 +452,24 @@
#define MAC_RFCR_UP_WIDTH 1 #define MAC_RFCR_UP_WIDTH 1
#define MAC_RQC0R_RXQ0EN_INDEX 0 #define MAC_RQC0R_RXQ0EN_INDEX 0
#define MAC_RQC0R_RXQ0EN_WIDTH 2 #define MAC_RQC0R_RXQ0EN_WIDTH 2
#define MAC_RSSAR_ADDRT_INDEX 2
#define MAC_RSSAR_ADDRT_WIDTH 1
#define MAC_RSSAR_CT_INDEX 1
#define MAC_RSSAR_CT_WIDTH 1
#define MAC_RSSAR_OB_INDEX 0
#define MAC_RSSAR_OB_WIDTH 1
#define MAC_RSSAR_RSSIA_INDEX 8
#define MAC_RSSAR_RSSIA_WIDTH 8
#define MAC_RSSCR_IP2TE_INDEX 1
#define MAC_RSSCR_IP2TE_WIDTH 1
#define MAC_RSSCR_RSSE_INDEX 0
#define MAC_RSSCR_RSSE_WIDTH 1
#define MAC_RSSCR_TCP4TE_INDEX 2
#define MAC_RSSCR_TCP4TE_WIDTH 1
#define MAC_RSSCR_UDP4TE_INDEX 3
#define MAC_RSSCR_UDP4TE_WIDTH 1
#define MAC_RSSDR_DMCH_INDEX 0
#define MAC_RSSDR_DMCH_WIDTH 4
#define MAC_SSIR_SNSINC_INDEX 8 #define MAC_SSIR_SNSINC_INDEX 8
#define MAC_SSIR_SNSINC_WIDTH 8 #define MAC_SSIR_SNSINC_WIDTH 8
#define MAC_SSIR_SSINC_INDEX 16 #define MAC_SSIR_SSINC_INDEX 16
...@@ -844,9 +869,13 @@ ...@@ -844,9 +869,13 @@
#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16 #define RX_NORMAL_DESC0_OVT_WIDTH 16
#define RX_NORMAL_DESC2_HL_INDEX 0
#define RX_NORMAL_DESC2_HL_WIDTH 10
#define RX_NORMAL_DESC3_CDA_INDEX 27 #define RX_NORMAL_DESC3_CDA_INDEX 27
#define RX_NORMAL_DESC3_CDA_WIDTH 1 #define RX_NORMAL_DESC3_CDA_WIDTH 1
#define RX_NORMAL_DESC3_CTXT_INDEX 30 #define RX_NORMAL_DESC3_CTXT_INDEX 30
...@@ -855,14 +884,27 @@ ...@@ -855,14 +884,27 @@
#define RX_NORMAL_DESC3_ES_WIDTH 1 #define RX_NORMAL_DESC3_ES_WIDTH 1
#define RX_NORMAL_DESC3_ETLT_INDEX 16 #define RX_NORMAL_DESC3_ETLT_INDEX 16
#define RX_NORMAL_DESC3_ETLT_WIDTH 4 #define RX_NORMAL_DESC3_ETLT_WIDTH 4
#define RX_NORMAL_DESC3_FD_INDEX 29
#define RX_NORMAL_DESC3_FD_WIDTH 1
#define RX_NORMAL_DESC3_INTE_INDEX 30 #define RX_NORMAL_DESC3_INTE_INDEX 30
#define RX_NORMAL_DESC3_INTE_WIDTH 1 #define RX_NORMAL_DESC3_INTE_WIDTH 1
#define RX_NORMAL_DESC3_L34T_INDEX 20
#define RX_NORMAL_DESC3_L34T_WIDTH 4
#define RX_NORMAL_DESC3_LD_INDEX 28 #define RX_NORMAL_DESC3_LD_INDEX 28
#define RX_NORMAL_DESC3_LD_WIDTH 1 #define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_NORMAL_DESC3_OWN_INDEX 31 #define RX_NORMAL_DESC3_OWN_INDEX 31
#define RX_NORMAL_DESC3_OWN_WIDTH 1 #define RX_NORMAL_DESC3_OWN_WIDTH 1
#define RX_NORMAL_DESC3_PL_INDEX 0 #define RX_NORMAL_DESC3_PL_INDEX 0
#define RX_NORMAL_DESC3_PL_WIDTH 14 #define RX_NORMAL_DESC3_PL_WIDTH 14
#define RX_NORMAL_DESC3_RSV_INDEX 26
#define RX_NORMAL_DESC3_RSV_WIDTH 1
#define RX_DESC3_L34T_IPV4_TCP 1
#define RX_DESC3_L34T_IPV4_UDP 2
#define RX_DESC3_L34T_IPV4_ICMP 3
#define RX_DESC3_L34T_IPV6_TCP 9
#define RX_DESC3_L34T_IPV6_UDP 10
#define RX_DESC3_L34T_IPV6_ICMP 11
#define RX_CONTEXT_DESC3_TSA_INDEX 4 #define RX_CONTEXT_DESC3_TSA_INDEX 4
#define RX_CONTEXT_DESC3_TSA_WIDTH 1 #define RX_CONTEXT_DESC3_TSA_WIDTH 1
......
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
#include "xgbe.h" #include "xgbe.h"
#include "xgbe-common.h" #include "xgbe-common.h"
static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *); static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
static void xgbe_free_ring(struct xgbe_prv_data *pdata, static void xgbe_free_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring) struct xgbe_ring *ring)
...@@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata, ...@@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
if (ring->rdata) { if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) { for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i); rdata = XGBE_GET_DESC_DATA(ring, i);
xgbe_unmap_skb(pdata, rdata); xgbe_unmap_rdata(pdata, rdata);
} }
kfree(ring->rdata); kfree(ring->rdata);
ring->rdata = NULL; ring->rdata = NULL;
} }
if (ring->rx_hdr_pa.pages) {
dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
put_page(ring->rx_hdr_pa.pages);
ring->rx_hdr_pa.pages = NULL;
ring->rx_hdr_pa.pages_len = 0;
ring->rx_hdr_pa.pages_offset = 0;
ring->rx_hdr_pa.pages_dma = 0;
}
if (ring->rx_buf_pa.pages) {
dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
put_page(ring->rx_buf_pa.pages);
ring->rx_buf_pa.pages = NULL;
ring->rx_buf_pa.pages_len = 0;
ring->rx_buf_pa.pages_offset = 0;
ring->rx_buf_pa.pages_dma = 0;
}
if (ring->rdesc) { if (ring->rdesc) {
dma_free_coherent(pdata->dev, dma_free_coherent(pdata->dev,
(sizeof(struct xgbe_ring_desc) * (sizeof(struct xgbe_ring_desc) *
...@@ -233,6 +255,96 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) ...@@ -233,6 +255,96 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
return ret; return ret;
} }
static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
struct xgbe_page_alloc *pa, gfp_t gfp, int order)
{
struct page *pages = NULL;
dma_addr_t pages_dma;
int ret;
/* Try to obtain pages, decreasing order if necessary */
gfp |= __GFP_COLD | __GFP_COMP;
while (order >= 0) {
pages = alloc_pages(gfp, order);
if (pages)
break;
order--;
}
if (!pages)
return -ENOMEM;
/* Map the pages */
pages_dma = dma_map_page(pdata->dev, pages, 0,
PAGE_SIZE << order, DMA_FROM_DEVICE);
ret = dma_mapping_error(pdata->dev, pages_dma);
if (ret) {
put_page(pages);
return ret;
}
pa->pages = pages;
pa->pages_len = PAGE_SIZE << order;
pa->pages_offset = 0;
pa->pages_dma = pages_dma;
return 0;
}
static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
struct xgbe_page_alloc *pa,
unsigned int len)
{
get_page(pa->pages);
bd->pa = *pa;
bd->dma = pa->pages_dma + pa->pages_offset;
bd->dma_len = len;
pa->pages_offset += len;
if ((pa->pages_offset + len) > pa->pages_len) {
/* This data descriptor is responsible for unmapping page(s) */
bd->pa_unmap = *pa;
/* Get a new allocation next time */
pa->pages = NULL;
pa->pages_len = 0;
pa->pages_offset = 0;
pa->pages_dma = 0;
}
}
static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring,
struct xgbe_ring_data *rdata)
{
int order, ret;
if (!ring->rx_hdr_pa.pages) {
ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
if (ret)
return ret;
}
if (!ring->rx_buf_pa.pages) {
order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
order);
if (ret)
return ret;
}
/* Set up the header page info */
xgbe_set_buffer_data(&rdata->rx_hdr, &ring->rx_hdr_pa,
XGBE_SKB_ALLOC_SIZE);
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx_buf, &ring->rx_buf_pa,
pdata->rx_buf_size);
return 0;
}
static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
{ {
struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_hw_if *hw_if = &pdata->hw_if;
...@@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) ...@@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring; struct xgbe_ring *ring;
struct xgbe_ring_desc *rdesc; struct xgbe_ring_desc *rdesc;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
dma_addr_t rdesc_dma, skb_dma; dma_addr_t rdesc_dma;
struct sk_buff *skb = NULL;
unsigned int i, j; unsigned int i, j;
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
...@@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) ...@@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
rdata->rdesc = rdesc; rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma; rdata->rdesc_dma = rdesc_dma;
/* Allocate skb & assign to each rdesc */ if (xgbe_map_rx_buffer(pdata, ring, rdata))
skb = dev_alloc_skb(pdata->rx_buf_size);
if (skb == NULL)
break;
skb_dma = dma_map_single(pdata->dev, skb->data,
pdata->rx_buf_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
netdev_alert(pdata->netdev,
"failed to do the dma map\n");
dev_kfree_skb_any(skb);
break; break;
}
rdata->skb = skb;
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = pdata->rx_buf_size;
rdesc++; rdesc++;
rdesc_dma += sizeof(struct xgbe_ring_desc); rdesc_dma += sizeof(struct xgbe_ring_desc);
...@@ -334,8 +431,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) ...@@ -334,8 +431,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
} }
static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata) struct xgbe_ring_data *rdata)
{ {
if (rdata->skb_dma) { if (rdata->skb_dma) {
if (rdata->mapped_as_page) { if (rdata->mapped_as_page) {
...@@ -354,6 +451,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, ...@@ -354,6 +451,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
rdata->skb = NULL; rdata->skb = NULL;
} }
if (rdata->rx_hdr.pa.pages)
put_page(rdata->rx_hdr.pa.pages);
if (rdata->rx_hdr.pa_unmap.pages) {
dma_unmap_page(pdata->dev, rdata->rx_hdr.pa_unmap.pages_dma,
rdata->rx_hdr.pa_unmap.pages_len,
DMA_FROM_DEVICE);
put_page(rdata->rx_hdr.pa_unmap.pages);
}
if (rdata->rx_buf.pa.pages)
put_page(rdata->rx_buf.pa.pages);
if (rdata->rx_buf.pa_unmap.pages) {
dma_unmap_page(pdata->dev, rdata->rx_buf.pa_unmap.pages_dma,
rdata->rx_buf.pa_unmap.pages_len,
DMA_FROM_DEVICE);
put_page(rdata->rx_buf.pa_unmap.pages);
}
memset(&rdata->rx_hdr, 0, sizeof(rdata->rx_hdr));
memset(&rdata->rx_buf, 0, sizeof(rdata->rx_buf));
rdata->tso_header = 0; rdata->tso_header = 0;
rdata->len = 0; rdata->len = 0;
rdata->interrupt = 0; rdata->interrupt = 0;
...@@ -494,7 +614,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) ...@@ -494,7 +614,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
err_out: err_out:
while (start_index < cur_index) { while (start_index < cur_index) {
rdata = XGBE_GET_DESC_DATA(ring, start_index++); rdata = XGBE_GET_DESC_DATA(ring, start_index++);
xgbe_unmap_skb(pdata, rdata); xgbe_unmap_rdata(pdata, rdata);
} }
DBGPR("<--xgbe_map_tx_skb: count=0\n"); DBGPR("<--xgbe_map_tx_skb: count=0\n");
...@@ -502,40 +622,25 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) ...@@ -502,40 +622,25 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
return 0; return 0;
} }
static void xgbe_realloc_skb(struct xgbe_channel *channel) static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
{ {
struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
struct sk_buff *skb = NULL;
dma_addr_t skb_dma;
int i; int i;
DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n", DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
ring->rx.realloc_index); ring->rx.realloc_index);
for (i = 0; i < ring->dirty; i++) { for (i = 0; i < ring->dirty; i++) {
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index); rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
/* Reset rdata values */ /* Reset rdata values */
xgbe_unmap_skb(pdata, rdata); xgbe_unmap_rdata(pdata, rdata);
/* Allocate skb & assign to each rdesc */ if (xgbe_map_rx_buffer(pdata, ring, rdata))
skb = dev_alloc_skb(pdata->rx_buf_size);
if (skb == NULL)
break; break;
skb_dma = dma_map_single(pdata->dev, skb->data,
pdata->rx_buf_size, DMA_FROM_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
netdev_alert(pdata->netdev,
"failed to do the dma map\n");
dev_kfree_skb_any(skb);
break;
}
rdata->skb = skb;
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = pdata->rx_buf_size;
hw_if->rx_desc_reset(rdata); hw_if->rx_desc_reset(rdata);
...@@ -543,7 +648,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel) ...@@ -543,7 +648,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
} }
ring->dirty = 0; ring->dirty = 0;
DBGPR("<--xgbe_realloc_skb\n"); DBGPR("<--xgbe_realloc_rx_buffer\n");
} }
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
...@@ -553,8 +658,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) ...@@ -553,8 +658,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
desc_if->free_ring_resources = xgbe_free_ring_resources; desc_if->free_ring_resources = xgbe_free_ring_resources;
desc_if->map_tx_skb = xgbe_map_tx_skb; desc_if->map_tx_skb = xgbe_map_tx_skb;
desc_if->realloc_skb = xgbe_realloc_skb; desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
desc_if->unmap_skb = xgbe_unmap_skb; desc_if->unmap_rdata = xgbe_unmap_rdata;
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
......
...@@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) ...@@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
} }
} }
static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
unsigned int i;
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->rx_ring)
break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
}
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
unsigned int index, unsigned int val)
{
unsigned int wait;
int ret = 0;
mutex_lock(&pdata->rss_mutex);
if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
ret = -EBUSY;
goto unlock;
}
XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
wait = 1000;
while (wait--) {
if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
goto unlock;
usleep_range(1000, 1500);
}
ret = -EBUSY;
unlock:
mutex_unlock(&pdata->rss_mutex);
return ret;
}
static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
{
unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
unsigned int *key = (unsigned int *)&pdata->rss_key;
int ret;
while (key_regs--) {
ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
key_regs, *key++);
if (ret)
return ret;
}
return 0;
}
static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
{
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
ret = xgbe_write_rss_reg(pdata,
XGBE_RSS_LOOKUP_TABLE_TYPE, i,
pdata->rss_table[i]);
if (ret)
return ret;
}
return 0;
}
static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
{
memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
return xgbe_write_rss_hash_key(pdata);
}
static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
const u32 *table)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
return xgbe_write_rss_lookup_table(pdata);
}
static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
{
int ret;
if (!pdata->hw_feat.rss)
return -EOPNOTSUPP;
/* Program the hash key */
ret = xgbe_write_rss_hash_key(pdata);
if (ret)
return ret;
/* Program the lookup table */
ret = xgbe_write_rss_lookup_table(pdata);
if (ret)
return ret;
/* Set the RSS options */
XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
/* Enable RSS */
XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
return 0;
}
static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
{
if (!pdata->hw_feat.rss)
return -EOPNOTSUPP;
XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
return 0;
}
static void xgbe_config_rss(struct xgbe_prv_data *pdata)
{
int ret;
if (!pdata->hw_feat.rss)
return;
if (pdata->netdev->features & NETIF_F_RXHASH)
ret = xgbe_enable_rss(pdata);
else
ret = xgbe_disable_rss(pdata);
if (ret)
netdev_err(pdata->netdev,
"error configuring RSS, RSS disabled\n");
}
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{ {
unsigned int max_q_count, q_count; unsigned int max_q_count, q_count;
...@@ -465,17 +620,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) ...@@ -465,17 +620,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
if (channel->tx_ring) { if (channel->tx_ring) {
/* Enable the following Tx interrupts /* Enable the following Tx interrupts
* TIE - Transmit Interrupt Enable (unless polling) * TIE - Transmit Interrupt Enable (unless using
* per channel interrupts)
*/ */
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); if (!pdata->per_channel_irq)
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
} }
if (channel->rx_ring) { if (channel->rx_ring) {
/* Enable following Rx interrupts /* Enable following Rx interrupts
* RBUE - Receive Buffer Unavailable Enable * RBUE - Receive Buffer Unavailable Enable
* RIE - Receive Interrupt Enable * RIE - Receive Interrupt Enable (unless using
* per channel interrupts)
*/ */
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); if (!pdata->per_channel_irq)
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
} }
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
...@@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) ...@@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc1 = 0; rdesc->desc1 = 0;
rdesc->desc2 = 0; rdesc->desc2 = 0;
rdesc->desc3 = 0; rdesc->desc3 = 0;
/* Make sure ownership is written to the descriptor */
wmb();
} }
static void xgbe_tx_desc_init(struct xgbe_channel *channel) static void xgbe_tx_desc_init(struct xgbe_channel *channel)
{ {
struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
int i; int i;
int start_index = ring->cur; int start_index = ring->cur;
...@@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel) ...@@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
/* Initialze all descriptors */ /* Initialze all descriptors */
for (i = 0; i < ring->rdesc_count; i++) { for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i); rdata = XGBE_GET_DESC_DATA(ring, i);
rdesc = rdata->rdesc;
/* Initialize Tx descriptor /* Initialize Tx descriptor */
* Set buffer 1 (lo) address to zero xgbe_tx_desc_reset(rdata);
* Set buffer 1 (hi) address to zero
* Reset all other control bits (IC, TTSE, B2L & B1L)
* Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
* etc)
*/
rdesc->desc0 = 0;
rdesc->desc1 = 0;
rdesc->desc2 = 0;
rdesc->desc3 = 0;
} }
/* Make sure everything is written to the descriptor(s) before
* telling the device about them
*/
wmb();
/* Update the total number of Tx descriptors */ /* Update the total number of Tx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
...@@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata) ...@@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
struct xgbe_ring_desc *rdesc = rdata->rdesc; struct xgbe_ring_desc *rdesc = rdata->rdesc;
/* Reset the Rx descriptor /* Reset the Rx descriptor
* Set buffer 1 (lo) address to dma address (lo) * Set buffer 1 (lo) address to header dma address (lo)
* Set buffer 1 (hi) address to dma address (hi) * Set buffer 1 (hi) address to header dma address (hi)
* Set buffer 2 (lo) address to zero * Set buffer 2 (lo) address to buffer dma address (lo)
* Set buffer 2 (hi) address to zero and set control bits * Set buffer 2 (hi) address to buffer dma address (hi) and
* OWN and INTE * set control bits OWN and INTE
*/ */
rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_hdr.dma));
rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_hdr.dma));
rdesc->desc2 = 0; rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx_buf.dma));
rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx_buf.dma));
rdesc->desc3 = 0; XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
if (rdata->interrupt) rdata->interrupt ? 1 : 0);
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
/* Since the Rx DMA engine is likely running, make sure everything /* Since the Rx DMA engine is likely running, make sure everything
* is written to the descriptor(s) before setting the OWN bit * is written to the descriptor(s) before setting the OWN bit
...@@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) ...@@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
unsigned int start_index = ring->cur; unsigned int start_index = ring->cur;
unsigned int rx_coalesce, rx_frames; unsigned int rx_coalesce, rx_frames;
unsigned int i; unsigned int i;
...@@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) ...@@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
/* Initialize all descriptors */ /* Initialize all descriptors */
for (i = 0; i < ring->rdesc_count; i++) { for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i); rdata = XGBE_GET_DESC_DATA(ring, i);
rdesc = rdata->rdesc;
/* Initialize Rx descriptor /* Set interrupt on completion bit as appropriate */
* Set buffer 1 (lo) address to dma address (lo) if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
* Set buffer 1 (hi) address to dma address (hi)
* Set buffer 2 (lo) address to zero
* Set buffer 2 (hi) address to zero and set control
* bits OWN and INTE appropriateley
*/
rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
rdesc->desc2 = 0;
rdesc->desc3 = 0;
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
rdata->interrupt = 1;
if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
/* Clear interrupt on completion bit */
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
0);
rdata->interrupt = 0; rdata->interrupt = 0;
} else
} rdata->interrupt = 1;
/* Make sure everything is written to the descriptors before /* Initialize Rx descriptor */
* telling the device about them xgbe_rx_desc_reset(rdata);
*/ }
wmb();
/* Update the total number of Rx descriptors */ /* Update the total number of Rx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
...@@ -1198,7 +1325,7 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) ...@@ -1198,7 +1325,7 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
xgbe_config_flow_control(pdata); xgbe_config_flow_control(pdata);
} }
static void xgbe_pre_xmit(struct xgbe_channel *channel) static void xgbe_dev_xmit(struct xgbe_channel *channel)
{ {
struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring *ring = channel->tx_ring;
...@@ -1211,7 +1338,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) ...@@ -1211,7 +1338,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
int start_index = ring->cur; int start_index = ring->cur;
int i; int i;
DBGPR("-->xgbe_pre_xmit\n"); DBGPR("-->xgbe_dev_xmit\n");
csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
CSUM_ENABLE); CSUM_ENABLE);
...@@ -1410,7 +1537,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) ...@@ -1410,7 +1537,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
channel->name, start_index & (ring->rdesc_count - 1), channel->name, start_index & (ring->rdesc_count - 1),
(ring->cur - 1) & (ring->rdesc_count - 1)); (ring->cur - 1) & (ring->rdesc_count - 1));
DBGPR("<--xgbe_pre_xmit\n"); DBGPR("<--xgbe_dev_xmit\n");
} }
static int xgbe_dev_read(struct xgbe_channel *channel) static int xgbe_dev_read(struct xgbe_channel *channel)
...@@ -1420,7 +1547,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) ...@@ -1420,7 +1547,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
struct xgbe_ring_desc *rdesc; struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data; struct xgbe_packet_data *packet = &ring->packet_data;
struct net_device *netdev = channel->pdata->netdev; struct net_device *netdev = channel->pdata->netdev;
unsigned int err, etlt; unsigned int err, etlt, l34t;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
...@@ -1454,6 +1581,31 @@ static int xgbe_dev_read(struct xgbe_channel *channel) ...@@ -1454,6 +1581,31 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT, 1); CONTEXT_NEXT, 1);
/* Get the header length */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
rdata->hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
/* Get the RSS hash */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
RSS_HASH, 1);
packet->rss_hash = le32_to_cpu(rdesc->desc1);
l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
switch (l34t) {
case RX_DESC3_L34T_IPV4_TCP:
case RX_DESC3_L34T_IPV4_UDP:
case RX_DESC3_L34T_IPV6_TCP:
case RX_DESC3_L34T_IPV6_UDP:
packet->rss_hash_type = PKT_HASH_TYPE_L4;
default:
packet->rss_hash_type = PKT_HASH_TYPE_L3;
}
}
/* Get the packet length */ /* Get the packet length */
rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
...@@ -2485,6 +2637,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata) ...@@ -2485,6 +2637,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata); xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata); xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata); xgbe_config_tso_mode(pdata);
xgbe_config_sph_mode(pdata);
xgbe_config_rss(pdata);
desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata); xgbe_enable_dma_interrupts(pdata);
...@@ -2561,7 +2715,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) ...@@ -2561,7 +2715,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->powerup_rx = xgbe_powerup_rx; hw_if->powerup_rx = xgbe_powerup_rx;
hw_if->powerdown_rx = xgbe_powerdown_rx; hw_if->powerdown_rx = xgbe_powerdown_rx;
hw_if->pre_xmit = xgbe_pre_xmit; hw_if->dev_xmit = xgbe_dev_xmit;
hw_if->dev_read = xgbe_dev_read; hw_if->dev_read = xgbe_dev_read;
hw_if->enable_int = xgbe_enable_int; hw_if->enable_int = xgbe_enable_int;
hw_if->disable_int = xgbe_disable_int; hw_if->disable_int = xgbe_disable_int;
...@@ -2620,5 +2774,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) ...@@ -2620,5 +2774,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->config_dcb_tc = xgbe_config_dcb_tc; hw_if->config_dcb_tc = xgbe_config_dcb_tc;
hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
/* For Receive Side Scaling */
hw_if->enable_rss = xgbe_enable_rss;
hw_if->disable_rss = xgbe_disable_rss;
hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
DBGPR("<--xgbe_init_function_ptrs\n"); DBGPR("<--xgbe_init_function_ptrs\n");
} }
...@@ -114,6 +114,7 @@ ...@@ -114,6 +114,7 @@
* THE POSSIBILITY OF SUCH DAMAGE. * THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <linux/platform_device.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
...@@ -126,9 +127,99 @@ ...@@ -126,9 +127,99 @@
#include "xgbe.h" #include "xgbe.h"
#include "xgbe-common.h" #include "xgbe-common.h"
static int xgbe_poll(struct napi_struct *, int); static int xgbe_one_poll(struct napi_struct *, int);
static int xgbe_all_poll(struct napi_struct *, int);
static void xgbe_set_rx_mode(struct net_device *); static void xgbe_set_rx_mode(struct net_device *);
static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel_mem, *channel;
struct xgbe_ring *tx_ring, *rx_ring;
unsigned int count, i;
int ret = -ENOMEM;
count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
if (!channel_mem)
goto err_channel;
tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
GFP_KERNEL);
if (!tx_ring)
goto err_tx_ring;
rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
GFP_KERNEL);
if (!rx_ring)
goto err_rx_ring;
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
if (pdata->per_channel_irq) {
/* Get the DMA interrupt (offset 1) */
ret = platform_get_irq(pdata->pdev, i + 1);
if (ret < 0) {
netdev_err(pdata->netdev,
"platform_get_irq %u failed\n",
i + 1);
goto err_irq;
}
channel->dma_irq = ret;
}
if (i < pdata->tx_ring_count) {
spin_lock_init(&tx_ring->lock);
channel->tx_ring = tx_ring++;
}
if (i < pdata->rx_ring_count) {
spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
channel->name, channel->queue_index, channel->dma_regs,
channel->dma_irq, channel->tx_ring, channel->rx_ring);
}
pdata->channel = channel_mem;
pdata->channel_count = count;
return 0;
err_irq:
kfree(rx_ring);
err_rx_ring:
kfree(tx_ring);
err_tx_ring:
kfree(channel_mem);
err_channel:
return ret;
}
static void xgbe_free_channels(struct xgbe_prv_data *pdata)
{
if (!pdata->channel)
return;
kfree(pdata->channel->rx_ring);
kfree(pdata->channel->tx_ring);
kfree(pdata->channel);
pdata->channel = NULL;
pdata->channel_count = 0;
}
static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
{ {
return (ring->rdesc_count - (ring->cur - ring->dirty)); return (ring->rdesc_count - (ring->cur - ring->dirty));
...@@ -144,8 +235,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) ...@@ -144,8 +235,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
} }
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE) rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
~(XGBE_RX_BUF_ALIGN - 1); ~(XGBE_RX_BUF_ALIGN - 1);
...@@ -213,11 +304,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) ...@@ -213,11 +304,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!dma_isr) if (!dma_isr)
goto isr_done; goto isr_done;
DBGPR("-->xgbe_isr\n");
DBGPR(" DMA_ISR = %08x\n", dma_isr); DBGPR(" DMA_ISR = %08x\n", dma_isr);
DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
for (i = 0; i < pdata->channel_count; i++) { for (i = 0; i < pdata->channel_count; i++) {
if (!(dma_isr & (1 << i))) if (!(dma_isr & (1 << i)))
...@@ -228,6 +315,10 @@ static irqreturn_t xgbe_isr(int irq, void *data) ...@@ -228,6 +315,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
/* If we get a TI or RI interrupt that means per channel DMA
* interrupts are not enabled, so we use the private data napi
* structure, not the per channel napi structure
*/
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) { XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
if (napi_schedule_prep(&pdata->napi)) { if (napi_schedule_prep(&pdata->napi)) {
...@@ -270,12 +361,28 @@ static irqreturn_t xgbe_isr(int irq, void *data) ...@@ -270,12 +361,28 @@ static irqreturn_t xgbe_isr(int irq, void *data)
DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
DBGPR("<--xgbe_isr\n");
isr_done: isr_done:
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t xgbe_dma_isr(int irq, void *data)
{
struct xgbe_channel *channel = data;
/* Per channel DMA interrupts are enabled, so we use the per
* channel napi structure and not the private data napi structure
*/
if (napi_schedule_prep(&channel->napi)) {
/* Disable Tx and Rx interrupts */
disable_irq(channel->dma_irq);
/* Turn on polling */
__napi_schedule(&channel->napi);
}
return IRQ_HANDLED;
}
static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
{ {
struct xgbe_channel *channel = container_of(timer, struct xgbe_channel *channel = container_of(timer,
...@@ -283,18 +390,24 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) ...@@ -283,18 +390,24 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
tx_timer); tx_timer);
struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
unsigned long flags; unsigned long flags;
DBGPR("-->xgbe_tx_timer\n"); DBGPR("-->xgbe_tx_timer\n");
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
spin_lock_irqsave(&ring->lock, flags); spin_lock_irqsave(&ring->lock, flags);
if (napi_schedule_prep(&pdata->napi)) { if (napi_schedule_prep(napi)) {
/* Disable Tx and Rx interrupts */ /* Disable Tx and Rx interrupts */
xgbe_disable_rx_tx_ints(pdata); if (pdata->per_channel_irq)
disable_irq(channel->dma_irq);
else
xgbe_disable_rx_tx_ints(pdata);
/* Turn on polling */ /* Turn on polling */
__napi_schedule(&pdata->napi); __napi_schedule(napi);
} }
channel->tx_timer_active = 0; channel->tx_timer_active = 0;
...@@ -430,18 +543,46 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) ...@@ -430,18 +543,46 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
{ {
if (add) struct xgbe_channel *channel;
netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll, unsigned int i;
NAPI_POLL_WEIGHT);
napi_enable(&pdata->napi); if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
xgbe_one_poll, NAPI_POLL_WEIGHT);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
xgbe_all_poll, NAPI_POLL_WEIGHT);
napi_enable(&pdata->napi);
}
} }
static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
{ {
napi_disable(&pdata->napi); struct xgbe_channel *channel;
unsigned int i;
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
napi_disable(&channel->napi);
if (del)
netif_napi_del(&channel->napi);
}
} else {
napi_disable(&pdata->napi);
if (del) if (del)
netif_napi_del(&pdata->napi); netif_napi_del(&pdata->napi);
}
} }
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
...@@ -472,7 +613,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) ...@@ -472,7 +613,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_init_rx_coalesce\n"); DBGPR("<--xgbe_init_rx_coalesce\n");
} }
static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
{ {
struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel; struct xgbe_channel *channel;
...@@ -480,7 +621,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) ...@@ -480,7 +621,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
unsigned int i, j; unsigned int i, j;
DBGPR("-->xgbe_free_tx_skbuff\n"); DBGPR("-->xgbe_free_tx_data\n");
channel = pdata->channel; channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) { for (i = 0; i < pdata->channel_count; i++, channel++) {
...@@ -490,14 +631,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) ...@@ -490,14 +631,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
for (j = 0; j < ring->rdesc_count; j++) { for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j); rdata = XGBE_GET_DESC_DATA(ring, j);
desc_if->unmap_skb(pdata, rdata); desc_if->unmap_rdata(pdata, rdata);
} }
} }
DBGPR("<--xgbe_free_tx_skbuff\n"); DBGPR("<--xgbe_free_tx_data\n");
} }
static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
{ {
struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel; struct xgbe_channel *channel;
...@@ -505,7 +646,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) ...@@ -505,7 +646,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
unsigned int i, j; unsigned int i, j;
DBGPR("-->xgbe_free_rx_skbuff\n"); DBGPR("-->xgbe_free_rx_data\n");
channel = pdata->channel; channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) { for (i = 0; i < pdata->channel_count; i++, channel++) {
...@@ -515,11 +656,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) ...@@ -515,11 +656,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
for (j = 0; j < ring->rdesc_count; j++) { for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j); rdata = XGBE_GET_DESC_DATA(ring, j);
desc_if->unmap_skb(pdata, rdata); desc_if->unmap_rdata(pdata, rdata);
} }
} }
DBGPR("<--xgbe_free_rx_skbuff\n"); DBGPR("<--xgbe_free_rx_data\n");
} }
static void xgbe_adjust_link(struct net_device *netdev) static void xgbe_adjust_link(struct net_device *netdev)
...@@ -754,7 +895,9 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) ...@@ -754,7 +895,9 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
{ {
struct xgbe_channel *channel;
struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int i;
DBGPR("-->xgbe_restart_dev\n"); DBGPR("-->xgbe_restart_dev\n");
...@@ -763,10 +906,15 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) ...@@ -763,10 +906,15 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
return; return;
xgbe_stop(pdata); xgbe_stop(pdata);
synchronize_irq(pdata->irq_number); synchronize_irq(pdata->dev_irq);
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++)
synchronize_irq(channel->dma_irq);
}
xgbe_free_tx_skbuff(pdata); xgbe_free_tx_data(pdata);
xgbe_free_rx_skbuff(pdata); xgbe_free_rx_data(pdata);
/* Issue software reset to device if requested */ /* Issue software reset to device if requested */
if (reset) if (reset)
...@@ -1037,13 +1185,13 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, ...@@ -1037,13 +1185,13 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
packet->rdesc_count = 0; packet->rdesc_count = 0;
if (xgbe_is_tso(skb)) { if (xgbe_is_tso(skb)) {
/* TSO requires an extra desriptor if mss is different */ /* TSO requires an extra descriptor if mss is different */
if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
context_desc = 1; context_desc = 1;
packet->rdesc_count++; packet->rdesc_count++;
} }
/* TSO requires an extra desriptor for TSO header */ /* TSO requires an extra descriptor for TSO header */
packet->rdesc_count++; packet->rdesc_count++;
XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
...@@ -1091,6 +1239,9 @@ static int xgbe_open(struct net_device *netdev) ...@@ -1091,6 +1239,9 @@ static int xgbe_open(struct net_device *netdev)
struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel = NULL;
char dma_irq_name[IFNAMSIZ + 32];
unsigned int i = 0;
int ret; int ret;
DBGPR("-->xgbe_open\n"); DBGPR("-->xgbe_open\n");
...@@ -1119,24 +1270,47 @@ static int xgbe_open(struct net_device *netdev) ...@@ -1119,24 +1270,47 @@ static int xgbe_open(struct net_device *netdev)
goto err_ptpclk; goto err_ptpclk;
pdata->rx_buf_size = ret; pdata->rx_buf_size = ret;
/* Allocate the channel and ring structures */
ret = xgbe_alloc_channels(pdata);
if (ret)
goto err_ptpclk;
/* Allocate the ring descriptors and buffers */ /* Allocate the ring descriptors and buffers */
ret = desc_if->alloc_ring_resources(pdata); ret = desc_if->alloc_ring_resources(pdata);
if (ret) if (ret)
goto err_ptpclk; goto err_channels;
/* Initialize the device restart and Tx timestamp work struct */ /* Initialize the device restart and Tx timestamp work struct */
INIT_WORK(&pdata->restart_work, xgbe_restart); INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
/* Request interrupts */ /* Request interrupts */
ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0, ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev->name, pdata); netdev->name, pdata);
if (ret) { if (ret) {
netdev_alert(netdev, "error requesting irq %d\n", netdev_alert(netdev, "error requesting irq %d\n",
pdata->irq_number); pdata->dev_irq);
goto err_irq; goto err_rings;
}
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
snprintf(dma_irq_name, sizeof(dma_irq_name) - 1,
"%s-TxRx-%u", netdev_name(netdev),
channel->queue_index);
ret = devm_request_irq(pdata->dev, channel->dma_irq,
xgbe_dma_isr, 0, dma_irq_name,
channel);
if (ret) {
netdev_alert(netdev,
"error requesting irq %d\n",
channel->dma_irq);
goto err_irq;
}
}
} }
pdata->irq_number = netdev->irq;
ret = xgbe_start(pdata); ret = xgbe_start(pdata);
if (ret) if (ret)
...@@ -1149,12 +1323,21 @@ static int xgbe_open(struct net_device *netdev) ...@@ -1149,12 +1323,21 @@ static int xgbe_open(struct net_device *netdev)
err_start: err_start:
hw_if->exit(pdata); hw_if->exit(pdata);
devm_free_irq(pdata->dev, pdata->irq_number, pdata);
pdata->irq_number = 0;
err_irq: err_irq:
if (pdata->per_channel_irq) {
/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
for (i--, channel--; i < pdata->channel_count; i--, channel--)
devm_free_irq(pdata->dev, channel->dma_irq, channel);
}
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
err_rings:
desc_if->free_ring_resources(pdata); desc_if->free_ring_resources(pdata);
err_channels:
xgbe_free_channels(pdata);
err_ptpclk: err_ptpclk:
clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->ptpclk);
...@@ -1172,6 +1355,8 @@ static int xgbe_close(struct net_device *netdev) ...@@ -1172,6 +1355,8 @@ static int xgbe_close(struct net_device *netdev)
struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
unsigned int i;
DBGPR("-->xgbe_close\n"); DBGPR("-->xgbe_close\n");
...@@ -1181,13 +1366,18 @@ static int xgbe_close(struct net_device *netdev) ...@@ -1181,13 +1366,18 @@ static int xgbe_close(struct net_device *netdev)
/* Issue software reset to device */ /* Issue software reset to device */
hw_if->exit(pdata); hw_if->exit(pdata);
/* Free all the ring data */ /* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata); desc_if->free_ring_resources(pdata);
/* Release the interrupt */ /* Free the channel and ring structures */
if (pdata->irq_number != 0) { xgbe_free_channels(pdata);
devm_free_irq(pdata->dev, pdata->irq_number, pdata);
pdata->irq_number = 0; /* Release the interrupts */
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
if (pdata->per_channel_irq) {
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++)
devm_free_irq(pdata->dev, channel->dma_irq, channel);
} }
/* Disable the clocks */ /* Disable the clocks */
...@@ -1258,7 +1448,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1258,7 +1448,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
xgbe_prep_tx_tstamp(pdata, skb, packet); xgbe_prep_tx_tstamp(pdata, skb, packet);
/* Configure required descriptor fields for transmission */ /* Configure required descriptor fields for transmission */
hw_if->pre_xmit(channel); hw_if->dev_xmit(channel);
#ifdef XGMAC_ENABLE_TX_PKT_DUMP #ifdef XGMAC_ENABLE_TX_PKT_DUMP
xgbe_print_pkt(netdev, skb, true); xgbe_print_pkt(netdev, skb, true);
...@@ -1420,14 +1610,20 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, ...@@ -1420,14 +1610,20 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
static void xgbe_poll_controller(struct net_device *netdev) static void xgbe_poll_controller(struct net_device *netdev)
{ {
struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_channel *channel;
unsigned int i;
DBGPR("-->xgbe_poll_controller\n"); DBGPR("-->xgbe_poll_controller\n");
disable_irq(pdata->irq_number); if (pdata->per_channel_irq) {
channel = pdata->channel;
xgbe_isr(pdata->irq_number, pdata); for (i = 0; i < pdata->channel_count; i++, channel++)
xgbe_dma_isr(channel->dma_irq, channel);
enable_irq(pdata->irq_number); } else {
disable_irq(pdata->dev_irq);
xgbe_isr(pdata->dev_irq, pdata);
enable_irq(pdata->dev_irq);
}
DBGPR("<--xgbe_poll_controller\n"); DBGPR("<--xgbe_poll_controller\n");
} }
...@@ -1465,12 +1661,21 @@ static int xgbe_set_features(struct net_device *netdev, ...@@ -1465,12 +1661,21 @@ static int xgbe_set_features(struct net_device *netdev,
{ {
struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_hw_if *hw_if = &pdata->hw_if;
netdev_features_t rxcsum, rxvlan, rxvlan_filter; netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
int ret = 0;
rxhash = pdata->netdev_features & NETIF_F_RXHASH;
rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
if ((features & NETIF_F_RXHASH) && !rxhash)
ret = hw_if->enable_rss(pdata);
else if (!(features & NETIF_F_RXHASH) && rxhash)
ret = hw_if->disable_rss(pdata);
if (ret)
return ret;
if ((features & NETIF_F_RXCSUM) && !rxcsum) if ((features & NETIF_F_RXCSUM) && !rxcsum)
hw_if->enable_rx_csum(pdata); hw_if->enable_rx_csum(pdata);
else if (!(features & NETIF_F_RXCSUM) && rxcsum) else if (!(features & NETIF_F_RXCSUM) && rxcsum)
...@@ -1524,7 +1729,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) ...@@ -1524,7 +1729,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
desc_if->realloc_skb(channel); desc_if->realloc_rx_buffer(channel);
/* Update the Rx Tail Pointer Register with address of /* Update the Rx Tail Pointer Register with address of
* the last cleaned entry */ * the last cleaned entry */
...@@ -1533,6 +1738,31 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) ...@@ -1533,6 +1738,31 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
lower_32_bits(rdata->rdesc_dma)); lower_32_bits(rdata->rdesc_dma));
} }
static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata,
unsigned int *len)
{
struct net_device *netdev = pdata->netdev;
struct sk_buff *skb;
u8 *packet;
unsigned int copy_len;
skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len);
if (!skb)
return NULL;
packet = page_address(rdata->rx_hdr.pa.pages) +
rdata->rx_hdr.pa.pages_offset;
copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len;
copy_len = min(rdata->rx_hdr.dma_len, copy_len);
skb_copy_to_linear_data(skb, packet, copy_len);
skb_put(skb, copy_len);
*len -= copy_len;
return skb;
}
static int xgbe_tx_poll(struct xgbe_channel *channel) static int xgbe_tx_poll(struct xgbe_channel *channel)
{ {
struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_prv_data *pdata = channel->pdata;
...@@ -1566,7 +1796,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) ...@@ -1566,7 +1796,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
#endif #endif
/* Free the SKB and reset the descriptor for re-use */ /* Free the SKB and reset the descriptor for re-use */
desc_if->unmap_skb(pdata, rdata); desc_if->unmap_rdata(pdata, rdata);
hw_if->tx_desc_reset(rdata); hw_if->tx_desc_reset(rdata);
processed++; processed++;
...@@ -1594,6 +1824,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -1594,6 +1824,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet; struct xgbe_packet_data *packet;
struct net_device *netdev = pdata->netdev; struct net_device *netdev = pdata->netdev;
struct napi_struct *napi;
struct sk_buff *skb; struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps; struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context; unsigned int incomplete, error, context_next, context;
...@@ -1607,6 +1838,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -1607,6 +1838,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring) if (!ring)
return 0; return 0;
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
packet = &ring->packet_data; packet = &ring->packet_data;
while (packet_count < budget) { while (packet_count < budget) {
...@@ -1641,10 +1874,6 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -1641,10 +1874,6 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
ring->cur++; ring->cur++;
ring->dirty++; ring->dirty++;
dma_unmap_single(pdata->dev, rdata->skb_dma,
rdata->skb_dma_len, DMA_FROM_DEVICE);
rdata->skb_dma = 0;
incomplete = XGMAC_GET_BITS(packet->attributes, incomplete = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RX_PACKET_ATTRIBUTES,
INCOMPLETE); INCOMPLETE);
...@@ -1668,26 +1897,33 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -1668,26 +1897,33 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!context) { if (!context) {
put_len = rdata->len - len; put_len = rdata->len - len;
if (skb) { len += put_len;
if (pskb_expand_head(skb, 0, put_len,
GFP_ATOMIC)) { if (!skb) {
DBGPR("pskb_expand_head error\n"); dma_sync_single_for_cpu(pdata->dev,
if (incomplete) { rdata->rx_hdr.dma,
error = 1; rdata->rx_hdr.dma_len,
goto read_again; DMA_FROM_DEVICE);
}
skb = xgbe_create_skb(pdata, rdata, &put_len);
dev_kfree_skb(skb); if (!skb) {
goto next_packet; error = 1;
goto read_again;
} }
memcpy(skb_tail_pointer(skb), rdata->skb->data,
put_len);
} else {
skb = rdata->skb;
rdata->skb = NULL;
} }
skb_put(skb, put_len);
len += put_len; if (put_len) {
dma_sync_single_for_cpu(pdata->dev,
rdata->rx_buf.dma,
rdata->rx_buf.dma_len,
DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx_buf.pa.pages,
rdata->rx_buf.pa.pages_offset,
put_len, rdata->rx_buf.dma_len);
rdata->rx_buf.pa.pages = NULL;
}
} }
if (incomplete || context_next) if (incomplete || context_next)
...@@ -1733,13 +1969,18 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -1733,13 +1969,18 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
hwtstamps->hwtstamp = ns_to_ktime(nsec); hwtstamps->hwtstamp = ns_to_ktime(nsec);
} }
if (XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RSS_HASH))
skb_set_hash(skb, packet->rss_hash,
packet->rss_hash_type);
skb->dev = netdev; skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, channel->queue_index); skb_record_rx_queue(skb, channel->queue_index);
skb_mark_napi_id(skb, &pdata->napi); skb_mark_napi_id(skb, napi);
netdev->last_rx = jiffies; netdev->last_rx = jiffies;
napi_gro_receive(&pdata->napi, skb); napi_gro_receive(napi, skb);
next_packet: next_packet:
packet_count++; packet_count++;
...@@ -1761,7 +2002,35 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -1761,7 +2002,35 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
return packet_count; return packet_count;
} }
static int xgbe_poll(struct napi_struct *napi, int budget) static int xgbe_one_poll(struct napi_struct *napi, int budget)
{
struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
napi);
int processed = 0;
DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
/* Cleanup Tx ring first */
xgbe_tx_poll(channel);
/* Process Rx ring next */
processed = xgbe_rx_poll(channel, budget);
/* If we processed everything, we are done */
if (processed < budget) {
/* Turn off polling */
napi_complete(napi);
/* Enable Tx and Rx interrupts */
enable_irq(channel->dma_irq);
}
DBGPR("<--xgbe_one_poll: received = %d\n", processed);
return processed;
}
static int xgbe_all_poll(struct napi_struct *napi, int budget)
{ {
struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
napi); napi);
...@@ -1770,7 +2039,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget) ...@@ -1770,7 +2039,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
int processed, last_processed; int processed, last_processed;
unsigned int i; unsigned int i;
DBGPR("-->xgbe_poll: budget=%d\n", budget); DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
processed = 0; processed = 0;
ring_budget = budget / pdata->rx_ring_count; ring_budget = budget / pdata->rx_ring_count;
...@@ -1798,7 +2067,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget) ...@@ -1798,7 +2067,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
xgbe_enable_rx_tx_ints(pdata); xgbe_enable_rx_tx_ints(pdata);
} }
DBGPR("<--xgbe_poll: received = %d\n", processed); DBGPR("<--xgbe_all_poll: received = %d\n", processed);
return processed; return processed;
} }
...@@ -1812,10 +2081,10 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, ...@@ -1812,10 +2081,10 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
while (count--) { while (count--) {
rdata = XGBE_GET_DESC_DATA(ring, idx); rdata = XGBE_GET_DESC_DATA(ring, idx);
rdesc = rdata->rdesc; rdesc = rdata->rdesc;
DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
(flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
idx++; idx++;
} }
} }
...@@ -1823,9 +2092,9 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, ...@@ -1823,9 +2092,9 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
unsigned int idx) unsigned int idx)
{ {
DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
} }
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
......
...@@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev, ...@@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
rx_usecs); rx_usecs);
return -EINVAL; return -EINVAL;
} }
if (rx_frames > pdata->channel->rx_ring->rdesc_count) { if (rx_frames > pdata->rx_desc_count) {
netdev_alert(netdev, "rx-frames is limited to %d frames\n", netdev_alert(netdev, "rx-frames is limited to %d frames\n",
pdata->channel->rx_ring->rdesc_count); pdata->rx_desc_count);
return -EINVAL; return -EINVAL;
} }
...@@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev, ...@@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
tx_frames = ec->tx_max_coalesced_frames; tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */ /* Check the bounds of values for Tx */
if (tx_frames > pdata->channel->tx_ring->rdesc_count) { if (tx_frames > pdata->tx_desc_count) {
netdev_alert(netdev, "tx-frames is limited to %d frames\n", netdev_alert(netdev, "tx-frames is limited to %d frames\n",
pdata->channel->tx_ring->rdesc_count); pdata->tx_desc_count);
return -EINVAL; return -EINVAL;
} }
...@@ -481,6 +481,75 @@ static int xgbe_set_coalesce(struct net_device *netdev, ...@@ -481,6 +481,75 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return 0; return 0;
} }
static int xgbe_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
switch (rxnfc->cmd) {
case ETHTOOL_GRXRINGS:
rxnfc->data = pdata->rx_ring_count;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
return sizeof(pdata->rss_key);
}
static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
return ARRAY_SIZE(pdata->rss_table);
}
static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int i;
if (indir) {
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
MAC_RSSDR, DMCH);
}
if (key)
memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
return 0;
}
static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int ret;
if (indir) {
ret = hw_if->set_rss_lookup_table(pdata, indir);
if (ret)
return ret;
}
if (key) {
ret = hw_if->set_rss_hash_key(pdata, key);
if (ret)
return ret;
}
return 0;
}
static int xgbe_get_ts_info(struct net_device *netdev, static int xgbe_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *ts_info) struct ethtool_ts_info *ts_info)
{ {
...@@ -526,6 +595,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = { ...@@ -526,6 +595,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_strings = xgbe_get_strings, .get_strings = xgbe_get_strings,
.get_ethtool_stats = xgbe_get_ethtool_stats, .get_ethtool_stats = xgbe_get_ethtool_stats,
.get_sset_count = xgbe_get_sset_count, .get_sset_count = xgbe_get_sset_count,
.get_rxnfc = xgbe_get_rxnfc,
.get_rxfh_key_size = xgbe_get_rxfh_key_size,
.get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
.get_rxfh = xgbe_get_rxfh,
.set_rxfh = xgbe_set_rxfh,
.get_ts_info = xgbe_get_ts_info, .get_ts_info = xgbe_get_ts_info,
}; };
......
...@@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL"); ...@@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION); MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC); MODULE_DESCRIPTION(XGBE_DRV_DESC);
static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel_mem, *channel;
struct xgbe_ring *tx_ring, *rx_ring;
unsigned int count, i;
DBGPR("-->xgbe_alloc_rings\n");
count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
channel_mem = devm_kcalloc(pdata->dev, count,
sizeof(struct xgbe_channel), GFP_KERNEL);
if (!channel_mem)
return NULL;
tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
sizeof(struct xgbe_ring), GFP_KERNEL);
if (!tx_ring)
return NULL;
rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
sizeof(struct xgbe_ring), GFP_KERNEL);
if (!rx_ring)
return NULL;
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
if (i < pdata->tx_ring_count) {
spin_lock_init(&tx_ring->lock);
channel->tx_ring = tx_ring++;
}
if (i < pdata->rx_ring_count) {
spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
channel->name, channel->queue_index, channel->dma_regs,
channel->tx_ring, channel->rx_ring);
}
pdata->channel_count = count;
DBGPR("<--xgbe_alloc_rings\n");
return channel_mem;
}
static void xgbe_default_config(struct xgbe_prv_data *pdata) static void xgbe_default_config(struct xgbe_prv_data *pdata)
{ {
DBGPR("-->xgbe_default_config\n"); DBGPR("-->xgbe_default_config\n");
...@@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res; struct resource *res;
const u8 *mac_addr; const u8 *mac_addr;
unsigned int i;
int ret; int ret;
DBGPR("--> xgbe_probe\n"); DBGPR("--> xgbe_probe\n");
...@@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock); spin_lock_init(&pdata->lock);
mutex_init(&pdata->xpcs_mutex); mutex_init(&pdata->xpcs_mutex);
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock); spin_lock_init(&pdata->tstamp_lock);
/* Set and validate the number of descriptors for a ring */ /* Set and validate the number of descriptors for a ring */
...@@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev)
pdata->awcache = XGBE_DMA_SYS_AWCACHE; pdata->awcache = XGBE_DMA_SYS_AWCACHE;
} }
/* Check for per channel interrupt support */
if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
pdata->per_channel_irq = 1;
ret = platform_get_irq(pdev, 0); ret = platform_get_irq(pdev, 0);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "platform_get_irq failed\n"); dev_err(dev, "platform_get_irq 0 failed\n");
goto err_io; goto err_io;
} }
netdev->irq = ret; pdata->dev_irq = ret;
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs; netdev->base_addr = (unsigned long)pdata->xgmac_regs;
/* Set all the function pointers */ /* Set all the function pointers */
...@@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_io; goto err_io;
} }
/* Allocate the rings for the DMA channels */ /* Initialize RSS hash key and lookup table */
pdata->channel = xgbe_alloc_rings(pdata); get_random_bytes(pdata->rss_key, sizeof(pdata->rss_key));
if (!pdata->channel) {
dev_err(dev, "ring allocation failed\n"); for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
ret = -ENOMEM; XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
goto err_io; i % pdata->rx_ring_count);
}
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Prepare to regsiter with MDIO */ /* Prepare to regsiter with MDIO */
pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name); pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
...@@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
if (pdata->hw_feat.rss)
netdev->hw_features |= NETIF_F_RXHASH;
netdev->vlan_features |= NETIF_F_SG | netdev->vlan_features |= NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_IPV6_CSUM |
......
...@@ -142,6 +142,8 @@ ...@@ -142,6 +142,8 @@
#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) #define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGBE_RX_BUF_ALIGN 64 #define XGBE_RX_BUF_ALIGN 64
#define XGBE_SKB_ALLOC_SIZE 256
#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
#define XGBE_MAX_DMA_CHANNELS 16 #define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16 #define XGBE_MAX_QUEUES 16
...@@ -171,6 +173,7 @@ ...@@ -171,6 +173,7 @@
/* Device-tree clock names */ /* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk" #define XGBE_DMA_CLOCK "dma_clk"
#define XGBE_PTP_CLOCK "ptp_clk" #define XGBE_PTP_CLOCK "ptp_clk"
#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
/* Timestamp support - values based on 50MHz PTP clock /* Timestamp support - values based on 50MHz PTP clock
* 50MHz => 20 nsec * 50MHz => 20 nsec
...@@ -212,6 +215,12 @@ ...@@ -212,6 +215,12 @@
/* Maximum MAC address hash table size (256 bits = 8 bytes) */ /* Maximum MAC address hash table size (256 bits = 8 bytes) */
#define XGBE_MAC_HASH_TABLE_SIZE 8 #define XGBE_MAC_HASH_TABLE_SIZE 8
/* Receive Side Scaling */
#define XGBE_RSS_HASH_KEY_SIZE 40
#define XGBE_RSS_MAX_TABLE_SIZE 256
#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
#define XGBE_RSS_HASH_KEY_TYPE 1
struct xgbe_prv_data; struct xgbe_prv_data;
struct xgbe_packet_data { struct xgbe_packet_data {
...@@ -230,14 +239,35 @@ struct xgbe_packet_data { ...@@ -230,14 +239,35 @@ struct xgbe_packet_data {
unsigned short vlan_ctag; unsigned short vlan_ctag;
u64 rx_tstamp; u64 rx_tstamp;
u32 rss_hash;
enum pkt_hash_types rss_hash_type;
}; };
/* Common Rx and Tx descriptor mapping */ /* Common Rx and Tx descriptor mapping */
struct xgbe_ring_desc { struct xgbe_ring_desc {
unsigned int desc0; u32 desc0;
unsigned int desc1; u32 desc1;
unsigned int desc2; u32 desc2;
unsigned int desc3; u32 desc3;
};
/* Page allocation related values */
struct xgbe_page_alloc {
struct page *pages;
unsigned int pages_len;
unsigned int pages_offset;
dma_addr_t pages_dma;
};
/* Ring entry buffer data */
struct xgbe_buffer_data {
struct xgbe_page_alloc pa;
struct xgbe_page_alloc pa_unmap;
dma_addr_t dma;
unsigned int dma_len;
}; };
/* Structure used to hold information related to the descriptor /* Structure used to hold information related to the descriptor
...@@ -253,6 +283,10 @@ struct xgbe_ring_data { ...@@ -253,6 +283,10 @@ struct xgbe_ring_data {
unsigned int skb_dma_len; /* Length of SKB DMA area */ unsigned int skb_dma_len; /* Length of SKB DMA area */
unsigned int tso_header; /* TSO header indicator */ unsigned int tso_header; /* TSO header indicator */
struct xgbe_buffer_data rx_hdr; /* Header locations */
struct xgbe_buffer_data rx_buf; /* Payload locations */
unsigned short hdr_len; /* Length of received header */
unsigned short len; /* Length of received Rx packet */ unsigned short len; /* Length of received Rx packet */
unsigned int interrupt; /* Interrupt indicator */ unsigned int interrupt; /* Interrupt indicator */
...@@ -291,6 +325,10 @@ struct xgbe_ring { ...@@ -291,6 +325,10 @@ struct xgbe_ring {
*/ */
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
/* Page allocation for RX buffers */
struct xgbe_page_alloc rx_hdr_pa;
struct xgbe_page_alloc rx_buf_pa;
/* Ring index values /* Ring index values
* cur - Tx: index of descriptor to be used for current transfer * cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability * Rx: index of descriptor to check for packet availability
...@@ -331,6 +369,12 @@ struct xgbe_channel { ...@@ -331,6 +369,12 @@ struct xgbe_channel {
unsigned int queue_index; unsigned int queue_index;
void __iomem *dma_regs; void __iomem *dma_regs;
/* Per channel interrupt irq number */
int dma_irq;
/* Netdev related settings */
struct napi_struct napi;
unsigned int saved_ier; unsigned int saved_ier;
unsigned int tx_timer_active; unsigned int tx_timer_active;
...@@ -456,7 +500,7 @@ struct xgbe_hw_if { ...@@ -456,7 +500,7 @@ struct xgbe_hw_if {
int (*enable_int)(struct xgbe_channel *, enum xgbe_int); int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
int (*disable_int)(struct xgbe_channel *, enum xgbe_int); int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
void (*pre_xmit)(struct xgbe_channel *); void (*dev_xmit)(struct xgbe_channel *);
int (*dev_read)(struct xgbe_channel *); int (*dev_read)(struct xgbe_channel *);
void (*tx_desc_init)(struct xgbe_channel *); void (*tx_desc_init)(struct xgbe_channel *);
void (*rx_desc_init)(struct xgbe_channel *); void (*rx_desc_init)(struct xgbe_channel *);
...@@ -509,14 +553,20 @@ struct xgbe_hw_if { ...@@ -509,14 +553,20 @@ struct xgbe_hw_if {
/* For Data Center Bridging config */ /* For Data Center Bridging config */
void (*config_dcb_tc)(struct xgbe_prv_data *); void (*config_dcb_tc)(struct xgbe_prv_data *);
void (*config_dcb_pfc)(struct xgbe_prv_data *); void (*config_dcb_pfc)(struct xgbe_prv_data *);
/* For Receive Side Scaling */
int (*enable_rss)(struct xgbe_prv_data *);
int (*disable_rss)(struct xgbe_prv_data *);
int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
}; };
struct xgbe_desc_if { struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *); int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *); void (*free_ring_resources)(struct xgbe_prv_data *);
int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
void (*realloc_skb)(struct xgbe_channel *); void (*realloc_rx_buffer)(struct xgbe_channel *);
void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *); void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
}; };
...@@ -581,7 +631,11 @@ struct xgbe_prv_data { ...@@ -581,7 +631,11 @@ struct xgbe_prv_data {
/* XPCS indirect addressing mutex */ /* XPCS indirect addressing mutex */
struct mutex xpcs_mutex; struct mutex xpcs_mutex;
int irq_number; /* RSS addressing mutex */
struct mutex rss_mutex;
int dev_irq;
unsigned int per_channel_irq;
struct xgbe_hw_if hw_if; struct xgbe_hw_if hw_if;
struct xgbe_desc_if desc_if; struct xgbe_desc_if desc_if;
...@@ -624,7 +678,7 @@ struct xgbe_prv_data { ...@@ -624,7 +678,7 @@ struct xgbe_prv_data {
unsigned int rx_riwt; unsigned int rx_riwt;
unsigned int rx_frames; unsigned int rx_frames;
/* Current MTU */ /* Current Rx buffer size */
unsigned int rx_buf_size; unsigned int rx_buf_size;
/* Flow control settings */ /* Flow control settings */
...@@ -632,6 +686,11 @@ struct xgbe_prv_data { ...@@ -632,6 +686,11 @@ struct xgbe_prv_data {
unsigned int tx_pause; unsigned int tx_pause;
unsigned int rx_pause; unsigned int rx_pause;
/* Receive Side Scaling settings */
u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
u32 rss_options;
/* MDIO settings */ /* MDIO settings */
struct module *phy_module; struct module *phy_module;
char *mii_bus_id; char *mii_bus_id;
......
...@@ -26,7 +26,7 @@ config AMD_PHY ...@@ -26,7 +26,7 @@ config AMD_PHY
config AMD_XGBE_PHY config AMD_XGBE_PHY
tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
depends on OF depends on OF && HAS_IOMEM
---help--- ---help---
Currently supports the AMD 10GbE PHY Currently supports the AMD 10GbE PHY
......
...@@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev) ...@@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
if (ret & MDIO_CTRL1_RESET) if (ret & MDIO_CTRL1_RESET)
return -ETIMEDOUT; return -ETIMEDOUT;
return 0; /* Make sure the XPCS and SerDes are in compatible states */
return amd_xgbe_phy_xgmii_mode(phydev);
} }
static int amd_xgbe_phy_config_init(struct phy_device *phydev) static int amd_xgbe_phy_config_init(struct phy_device *phydev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment