Commit 2c99cd91 authored by David S. Miller's avatar David S. Miller

Merge branch 'amd-xgbe-next'

Tom Lendacky says:

====================
amd-xgbe: AMD XGBE driver updates 2014-11-04

The following series of patches includes functional updates to the
driver as well as some trivial changes for function renaming and
spelling fixes.

- Move channel and ring structure allocation into the device open path
- Rename the pre_xmit function to dev_xmit
- Explicitly use the u32 data type for the device descriptors
- Use page allocation for the receive buffers
- Add support for split header/payload receive
- Add support for per DMA channel interrupts
- Add support for receive side scaling (RSS)
- Add support for ethtool receive side scaling commands
- Fix the spelling of descriptors
- After a PCS reset, sync the PCS and PHY modes
- Add dependency on HAS_IOMEM to both the amd-xgbe and amd-xgbe-phy
  drivers

This patch series is based on net-next.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 25de4668 5cdec679
...@@ -7,7 +7,10 @@ Required properties: ...@@ -7,7 +7,10 @@ Required properties:
- PCS registers - PCS registers
- interrupt-parent: Should be the phandle for the interrupt controller - interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device that services interrupts for this device
- interrupts: Should contain the amd-xgbe interrupt - interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
listed is required and is the general device interrupt. If the optional
amd,per-channel-interrupt property is specified, then one additional
interrupt for each DMA channel supported by the device should be specified
- clocks: - clocks:
- DMA clock for the amd-xgbe device (used for calculating the - DMA clock for the amd-xgbe device (used for calculating the
correct Rx interrupt watchdog timer value on a DMA channel correct Rx interrupt watchdog timer value on a DMA channel
...@@ -23,6 +26,9 @@ Optional properties: ...@@ -23,6 +26,9 @@ Optional properties:
- mac-address: mac address to be assigned to the device. Can be overridden - mac-address: mac address to be assigned to the device. Can be overridden
by UEFI. by UEFI.
- dma-coherent: Present if dma operations are coherent - dma-coherent: Present if dma operations are coherent
- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
a unique interrupt for each DMA channel - this requires an additional
interrupt be configured for each DMA channel
Example: Example:
xgbe@e0700000 { xgbe@e0700000 {
...@@ -30,7 +36,9 @@ Example: ...@@ -30,7 +36,9 @@ Example:
reg = <0 0xe0700000 0 0x80000>, reg = <0 0xe0700000 0 0x80000>,
<0 0xe0780000 0 0x80000>; <0 0xe0780000 0 0x80000>;
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
interrupts = <0 325 4>; interrupts = <0 325 4>,
<0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
amd,per-channel-interrupt;
clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>; clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
clock-names = "dma_clk", "ptp_clk"; clock-names = "dma_clk", "ptp_clk";
phy-handle = <&phy>; phy-handle = <&phy>;
......
...@@ -179,7 +179,7 @@ config SUNLANCE ...@@ -179,7 +179,7 @@ config SUNLANCE
config AMD_XGBE config AMD_XGBE
tristate "AMD 10GbE Ethernet driver" tristate "AMD 10GbE Ethernet driver"
depends on OF_NET depends on OF_NET && HAS_IOMEM
select PHYLIB select PHYLIB
select AMD_XGBE_PHY select AMD_XGBE_PHY
select BITREVERSE select BITREVERSE
......
...@@ -207,6 +207,8 @@ ...@@ -207,6 +207,8 @@
/* DMA channel register entry bit positions and sizes */ /* DMA channel register entry bit positions and sizes */
#define DMA_CH_CR_PBLX8_INDEX 16 #define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1 #define DMA_CH_CR_PBLX8_WIDTH 1
#define DMA_CH_CR_SPH_INDEX 24
#define DMA_CH_CR_SPH_WIDTH 1
#define DMA_CH_IER_AIE_INDEX 15 #define DMA_CH_IER_AIE_INDEX 15
#define DMA_CH_IER_AIE_WIDTH 1 #define DMA_CH_IER_AIE_WIDTH 1
#define DMA_CH_IER_FBEE_INDEX 12 #define DMA_CH_IER_FBEE_INDEX 12
...@@ -306,6 +308,9 @@ ...@@ -306,6 +308,9 @@
#define MAC_MACA0LR 0x0304 #define MAC_MACA0LR 0x0304
#define MAC_MACA1HR 0x0308 #define MAC_MACA1HR 0x0308
#define MAC_MACA1LR 0x030c #define MAC_MACA1LR 0x030c
#define MAC_RSSCR 0x0c80
#define MAC_RSSAR 0x0c88
#define MAC_RSSDR 0x0c8c
#define MAC_TSCR 0x0d00 #define MAC_TSCR 0x0d00
#define MAC_SSIR 0x0d04 #define MAC_SSIR 0x0d04
#define MAC_STSR 0x0d08 #define MAC_STSR 0x0d08
...@@ -429,6 +434,8 @@ ...@@ -429,6 +434,8 @@
#define MAC_RCR_CST_WIDTH 1 #define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3 #define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1 #define MAC_RCR_DCRCC_WIDTH 1
#define MAC_RCR_HDSMS_INDEX 12
#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9 #define MAC_RCR_IPC_INDEX 9
#define MAC_RCR_IPC_WIDTH 1 #define MAC_RCR_IPC_WIDTH 1
#define MAC_RCR_JE_INDEX 8 #define MAC_RCR_JE_INDEX 8
...@@ -445,6 +452,24 @@ ...@@ -445,6 +452,24 @@
#define MAC_RFCR_UP_WIDTH 1 #define MAC_RFCR_UP_WIDTH 1
#define MAC_RQC0R_RXQ0EN_INDEX 0 #define MAC_RQC0R_RXQ0EN_INDEX 0
#define MAC_RQC0R_RXQ0EN_WIDTH 2 #define MAC_RQC0R_RXQ0EN_WIDTH 2
#define MAC_RSSAR_ADDRT_INDEX 2
#define MAC_RSSAR_ADDRT_WIDTH 1
#define MAC_RSSAR_CT_INDEX 1
#define MAC_RSSAR_CT_WIDTH 1
#define MAC_RSSAR_OB_INDEX 0
#define MAC_RSSAR_OB_WIDTH 1
#define MAC_RSSAR_RSSIA_INDEX 8
#define MAC_RSSAR_RSSIA_WIDTH 8
#define MAC_RSSCR_IP2TE_INDEX 1
#define MAC_RSSCR_IP2TE_WIDTH 1
#define MAC_RSSCR_RSSE_INDEX 0
#define MAC_RSSCR_RSSE_WIDTH 1
#define MAC_RSSCR_TCP4TE_INDEX 2
#define MAC_RSSCR_TCP4TE_WIDTH 1
#define MAC_RSSCR_UDP4TE_INDEX 3
#define MAC_RSSCR_UDP4TE_WIDTH 1
#define MAC_RSSDR_DMCH_INDEX 0
#define MAC_RSSDR_DMCH_WIDTH 4
#define MAC_SSIR_SNSINC_INDEX 8 #define MAC_SSIR_SNSINC_INDEX 8
#define MAC_SSIR_SNSINC_WIDTH 8 #define MAC_SSIR_SNSINC_WIDTH 8
#define MAC_SSIR_SSINC_INDEX 16 #define MAC_SSIR_SSINC_INDEX 16
...@@ -844,9 +869,13 @@ ...@@ -844,9 +869,13 @@
#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16 #define RX_NORMAL_DESC0_OVT_WIDTH 16
#define RX_NORMAL_DESC2_HL_INDEX 0
#define RX_NORMAL_DESC2_HL_WIDTH 10
#define RX_NORMAL_DESC3_CDA_INDEX 27 #define RX_NORMAL_DESC3_CDA_INDEX 27
#define RX_NORMAL_DESC3_CDA_WIDTH 1 #define RX_NORMAL_DESC3_CDA_WIDTH 1
#define RX_NORMAL_DESC3_CTXT_INDEX 30 #define RX_NORMAL_DESC3_CTXT_INDEX 30
...@@ -855,14 +884,27 @@ ...@@ -855,14 +884,27 @@
#define RX_NORMAL_DESC3_ES_WIDTH 1 #define RX_NORMAL_DESC3_ES_WIDTH 1
#define RX_NORMAL_DESC3_ETLT_INDEX 16 #define RX_NORMAL_DESC3_ETLT_INDEX 16
#define RX_NORMAL_DESC3_ETLT_WIDTH 4 #define RX_NORMAL_DESC3_ETLT_WIDTH 4
#define RX_NORMAL_DESC3_FD_INDEX 29
#define RX_NORMAL_DESC3_FD_WIDTH 1
#define RX_NORMAL_DESC3_INTE_INDEX 30 #define RX_NORMAL_DESC3_INTE_INDEX 30
#define RX_NORMAL_DESC3_INTE_WIDTH 1 #define RX_NORMAL_DESC3_INTE_WIDTH 1
#define RX_NORMAL_DESC3_L34T_INDEX 20
#define RX_NORMAL_DESC3_L34T_WIDTH 4
#define RX_NORMAL_DESC3_LD_INDEX 28 #define RX_NORMAL_DESC3_LD_INDEX 28
#define RX_NORMAL_DESC3_LD_WIDTH 1 #define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_NORMAL_DESC3_OWN_INDEX 31 #define RX_NORMAL_DESC3_OWN_INDEX 31
#define RX_NORMAL_DESC3_OWN_WIDTH 1 #define RX_NORMAL_DESC3_OWN_WIDTH 1
#define RX_NORMAL_DESC3_PL_INDEX 0 #define RX_NORMAL_DESC3_PL_INDEX 0
#define RX_NORMAL_DESC3_PL_WIDTH 14 #define RX_NORMAL_DESC3_PL_WIDTH 14
#define RX_NORMAL_DESC3_RSV_INDEX 26
#define RX_NORMAL_DESC3_RSV_WIDTH 1
#define RX_DESC3_L34T_IPV4_TCP 1
#define RX_DESC3_L34T_IPV4_UDP 2
#define RX_DESC3_L34T_IPV4_ICMP 3
#define RX_DESC3_L34T_IPV6_TCP 9
#define RX_DESC3_L34T_IPV6_UDP 10
#define RX_DESC3_L34T_IPV6_ICMP 11
#define RX_CONTEXT_DESC3_TSA_INDEX 4 #define RX_CONTEXT_DESC3_TSA_INDEX 4
#define RX_CONTEXT_DESC3_TSA_WIDTH 1 #define RX_CONTEXT_DESC3_TSA_WIDTH 1
......
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
#include "xgbe.h" #include "xgbe.h"
#include "xgbe-common.h" #include "xgbe-common.h"
static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *); static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
static void xgbe_free_ring(struct xgbe_prv_data *pdata, static void xgbe_free_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring) struct xgbe_ring *ring)
...@@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata, ...@@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
if (ring->rdata) { if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) { for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i); rdata = XGBE_GET_DESC_DATA(ring, i);
xgbe_unmap_skb(pdata, rdata); xgbe_unmap_rdata(pdata, rdata);
} }
kfree(ring->rdata); kfree(ring->rdata);
ring->rdata = NULL; ring->rdata = NULL;
} }
if (ring->rx_hdr_pa.pages) {
dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
put_page(ring->rx_hdr_pa.pages);
ring->rx_hdr_pa.pages = NULL;
ring->rx_hdr_pa.pages_len = 0;
ring->rx_hdr_pa.pages_offset = 0;
ring->rx_hdr_pa.pages_dma = 0;
}
if (ring->rx_buf_pa.pages) {
dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
put_page(ring->rx_buf_pa.pages);
ring->rx_buf_pa.pages = NULL;
ring->rx_buf_pa.pages_len = 0;
ring->rx_buf_pa.pages_offset = 0;
ring->rx_buf_pa.pages_dma = 0;
}
if (ring->rdesc) { if (ring->rdesc) {
dma_free_coherent(pdata->dev, dma_free_coherent(pdata->dev,
(sizeof(struct xgbe_ring_desc) * (sizeof(struct xgbe_ring_desc) *
...@@ -233,6 +255,96 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) ...@@ -233,6 +255,96 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
return ret; return ret;
} }
static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
struct xgbe_page_alloc *pa, gfp_t gfp, int order)
{
struct page *pages = NULL;
dma_addr_t pages_dma;
int ret;
/* Try to obtain pages, decreasing order if necessary */
gfp |= __GFP_COLD | __GFP_COMP;
while (order >= 0) {
pages = alloc_pages(gfp, order);
if (pages)
break;
order--;
}
if (!pages)
return -ENOMEM;
/* Map the pages */
pages_dma = dma_map_page(pdata->dev, pages, 0,
PAGE_SIZE << order, DMA_FROM_DEVICE);
ret = dma_mapping_error(pdata->dev, pages_dma);
if (ret) {
put_page(pages);
return ret;
}
pa->pages = pages;
pa->pages_len = PAGE_SIZE << order;
pa->pages_offset = 0;
pa->pages_dma = pages_dma;
return 0;
}
static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
struct xgbe_page_alloc *pa,
unsigned int len)
{
get_page(pa->pages);
bd->pa = *pa;
bd->dma = pa->pages_dma + pa->pages_offset;
bd->dma_len = len;
pa->pages_offset += len;
if ((pa->pages_offset + len) > pa->pages_len) {
/* This data descriptor is responsible for unmapping page(s) */
bd->pa_unmap = *pa;
/* Get a new allocation next time */
pa->pages = NULL;
pa->pages_len = 0;
pa->pages_offset = 0;
pa->pages_dma = 0;
}
}
static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring,
struct xgbe_ring_data *rdata)
{
int order, ret;
if (!ring->rx_hdr_pa.pages) {
ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
if (ret)
return ret;
}
if (!ring->rx_buf_pa.pages) {
order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
order);
if (ret)
return ret;
}
/* Set up the header page info */
xgbe_set_buffer_data(&rdata->rx_hdr, &ring->rx_hdr_pa,
XGBE_SKB_ALLOC_SIZE);
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx_buf, &ring->rx_buf_pa,
pdata->rx_buf_size);
return 0;
}
static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
{ {
struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_hw_if *hw_if = &pdata->hw_if;
...@@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) ...@@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring; struct xgbe_ring *ring;
struct xgbe_ring_desc *rdesc; struct xgbe_ring_desc *rdesc;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
dma_addr_t rdesc_dma, skb_dma; dma_addr_t rdesc_dma;
struct sk_buff *skb = NULL;
unsigned int i, j; unsigned int i, j;
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
...@@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) ...@@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
rdata->rdesc = rdesc; rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma; rdata->rdesc_dma = rdesc_dma;
/* Allocate skb & assign to each rdesc */ if (xgbe_map_rx_buffer(pdata, ring, rdata))
skb = dev_alloc_skb(pdata->rx_buf_size);
if (skb == NULL)
break; break;
skb_dma = dma_map_single(pdata->dev, skb->data,
pdata->rx_buf_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
netdev_alert(pdata->netdev,
"failed to do the dma map\n");
dev_kfree_skb_any(skb);
break;
}
rdata->skb = skb;
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = pdata->rx_buf_size;
rdesc++; rdesc++;
rdesc_dma += sizeof(struct xgbe_ring_desc); rdesc_dma += sizeof(struct xgbe_ring_desc);
...@@ -334,7 +431,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) ...@@ -334,7 +431,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
} }
static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata) struct xgbe_ring_data *rdata)
{ {
if (rdata->skb_dma) { if (rdata->skb_dma) {
...@@ -354,6 +451,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, ...@@ -354,6 +451,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
rdata->skb = NULL; rdata->skb = NULL;
} }
if (rdata->rx_hdr.pa.pages)
put_page(rdata->rx_hdr.pa.pages);
if (rdata->rx_hdr.pa_unmap.pages) {
dma_unmap_page(pdata->dev, rdata->rx_hdr.pa_unmap.pages_dma,
rdata->rx_hdr.pa_unmap.pages_len,
DMA_FROM_DEVICE);
put_page(rdata->rx_hdr.pa_unmap.pages);
}
if (rdata->rx_buf.pa.pages)
put_page(rdata->rx_buf.pa.pages);
if (rdata->rx_buf.pa_unmap.pages) {
dma_unmap_page(pdata->dev, rdata->rx_buf.pa_unmap.pages_dma,
rdata->rx_buf.pa_unmap.pages_len,
DMA_FROM_DEVICE);
put_page(rdata->rx_buf.pa_unmap.pages);
}
memset(&rdata->rx_hdr, 0, sizeof(rdata->rx_hdr));
memset(&rdata->rx_buf, 0, sizeof(rdata->rx_buf));
rdata->tso_header = 0; rdata->tso_header = 0;
rdata->len = 0; rdata->len = 0;
rdata->interrupt = 0; rdata->interrupt = 0;
...@@ -494,7 +614,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) ...@@ -494,7 +614,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
err_out: err_out:
while (start_index < cur_index) { while (start_index < cur_index) {
rdata = XGBE_GET_DESC_DATA(ring, start_index++); rdata = XGBE_GET_DESC_DATA(ring, start_index++);
xgbe_unmap_skb(pdata, rdata); xgbe_unmap_rdata(pdata, rdata);
} }
DBGPR("<--xgbe_map_tx_skb: count=0\n"); DBGPR("<--xgbe_map_tx_skb: count=0\n");
...@@ -502,40 +622,25 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) ...@@ -502,40 +622,25 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
return 0; return 0;
} }
static void xgbe_realloc_skb(struct xgbe_channel *channel) static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
{ {
struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
struct sk_buff *skb = NULL;
dma_addr_t skb_dma;
int i; int i;
DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n", DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
ring->rx.realloc_index); ring->rx.realloc_index);
for (i = 0; i < ring->dirty; i++) { for (i = 0; i < ring->dirty; i++) {
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index); rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
/* Reset rdata values */ /* Reset rdata values */
xgbe_unmap_skb(pdata, rdata); xgbe_unmap_rdata(pdata, rdata);
/* Allocate skb & assign to each rdesc */ if (xgbe_map_rx_buffer(pdata, ring, rdata))
skb = dev_alloc_skb(pdata->rx_buf_size);
if (skb == NULL)
break;
skb_dma = dma_map_single(pdata->dev, skb->data,
pdata->rx_buf_size, DMA_FROM_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
netdev_alert(pdata->netdev,
"failed to do the dma map\n");
dev_kfree_skb_any(skb);
break; break;
}
rdata->skb = skb;
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = pdata->rx_buf_size;
hw_if->rx_desc_reset(rdata); hw_if->rx_desc_reset(rdata);
...@@ -543,7 +648,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel) ...@@ -543,7 +648,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
} }
ring->dirty = 0; ring->dirty = 0;
DBGPR("<--xgbe_realloc_skb\n"); DBGPR("<--xgbe_realloc_rx_buffer\n");
} }
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
...@@ -553,8 +658,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) ...@@ -553,8 +658,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
desc_if->free_ring_resources = xgbe_free_ring_resources; desc_if->free_ring_resources = xgbe_free_ring_resources;
desc_if->map_tx_skb = xgbe_map_tx_skb; desc_if->map_tx_skb = xgbe_map_tx_skb;
desc_if->realloc_skb = xgbe_realloc_skb; desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
desc_if->unmap_skb = xgbe_unmap_skb; desc_if->unmap_rdata = xgbe_unmap_rdata;
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
......
This diff is collapsed.
This diff is collapsed.
...@@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev, ...@@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
rx_usecs); rx_usecs);
return -EINVAL; return -EINVAL;
} }
if (rx_frames > pdata->channel->rx_ring->rdesc_count) { if (rx_frames > pdata->rx_desc_count) {
netdev_alert(netdev, "rx-frames is limited to %d frames\n", netdev_alert(netdev, "rx-frames is limited to %d frames\n",
pdata->channel->rx_ring->rdesc_count); pdata->rx_desc_count);
return -EINVAL; return -EINVAL;
} }
...@@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev, ...@@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
tx_frames = ec->tx_max_coalesced_frames; tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */ /* Check the bounds of values for Tx */
if (tx_frames > pdata->channel->tx_ring->rdesc_count) { if (tx_frames > pdata->tx_desc_count) {
netdev_alert(netdev, "tx-frames is limited to %d frames\n", netdev_alert(netdev, "tx-frames is limited to %d frames\n",
pdata->channel->tx_ring->rdesc_count); pdata->tx_desc_count);
return -EINVAL; return -EINVAL;
} }
...@@ -481,6 +481,75 @@ static int xgbe_set_coalesce(struct net_device *netdev, ...@@ -481,6 +481,75 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return 0; return 0;
} }
static int xgbe_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
switch (rxnfc->cmd) {
case ETHTOOL_GRXRINGS:
rxnfc->data = pdata->rx_ring_count;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
return sizeof(pdata->rss_key);
}
static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
return ARRAY_SIZE(pdata->rss_table);
}
static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int i;
if (indir) {
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
MAC_RSSDR, DMCH);
}
if (key)
memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
return 0;
}
static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int ret;
if (indir) {
ret = hw_if->set_rss_lookup_table(pdata, indir);
if (ret)
return ret;
}
if (key) {
ret = hw_if->set_rss_hash_key(pdata, key);
if (ret)
return ret;
}
return 0;
}
static int xgbe_get_ts_info(struct net_device *netdev, static int xgbe_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *ts_info) struct ethtool_ts_info *ts_info)
{ {
...@@ -526,6 +595,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = { ...@@ -526,6 +595,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_strings = xgbe_get_strings, .get_strings = xgbe_get_strings,
.get_ethtool_stats = xgbe_get_ethtool_stats, .get_ethtool_stats = xgbe_get_ethtool_stats,
.get_sset_count = xgbe_get_sset_count, .get_sset_count = xgbe_get_sset_count,
.get_rxnfc = xgbe_get_rxnfc,
.get_rxfh_key_size = xgbe_get_rxfh_key_size,
.get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
.get_rxfh = xgbe_get_rxfh,
.set_rxfh = xgbe_set_rxfh,
.get_ts_info = xgbe_get_ts_info, .get_ts_info = xgbe_get_ts_info,
}; };
......
...@@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL"); ...@@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION); MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC); MODULE_DESCRIPTION(XGBE_DRV_DESC);
static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel_mem, *channel;
struct xgbe_ring *tx_ring, *rx_ring;
unsigned int count, i;
DBGPR("-->xgbe_alloc_rings\n");
count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
channel_mem = devm_kcalloc(pdata->dev, count,
sizeof(struct xgbe_channel), GFP_KERNEL);
if (!channel_mem)
return NULL;
tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
sizeof(struct xgbe_ring), GFP_KERNEL);
if (!tx_ring)
return NULL;
rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
sizeof(struct xgbe_ring), GFP_KERNEL);
if (!rx_ring)
return NULL;
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
if (i < pdata->tx_ring_count) {
spin_lock_init(&tx_ring->lock);
channel->tx_ring = tx_ring++;
}
if (i < pdata->rx_ring_count) {
spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
channel->name, channel->queue_index, channel->dma_regs,
channel->tx_ring, channel->rx_ring);
}
pdata->channel_count = count;
DBGPR("<--xgbe_alloc_rings\n");
return channel_mem;
}
static void xgbe_default_config(struct xgbe_prv_data *pdata) static void xgbe_default_config(struct xgbe_prv_data *pdata)
{ {
DBGPR("-->xgbe_default_config\n"); DBGPR("-->xgbe_default_config\n");
...@@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res; struct resource *res;
const u8 *mac_addr; const u8 *mac_addr;
unsigned int i;
int ret; int ret;
DBGPR("--> xgbe_probe\n"); DBGPR("--> xgbe_probe\n");
...@@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock); spin_lock_init(&pdata->lock);
mutex_init(&pdata->xpcs_mutex); mutex_init(&pdata->xpcs_mutex);
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock); spin_lock_init(&pdata->tstamp_lock);
/* Set and validate the number of descriptors for a ring */ /* Set and validate the number of descriptors for a ring */
...@@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev)
pdata->awcache = XGBE_DMA_SYS_AWCACHE; pdata->awcache = XGBE_DMA_SYS_AWCACHE;
} }
/* Check for per channel interrupt support */
if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
pdata->per_channel_irq = 1;
ret = platform_get_irq(pdev, 0); ret = platform_get_irq(pdev, 0);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "platform_get_irq failed\n"); dev_err(dev, "platform_get_irq 0 failed\n");
goto err_io; goto err_io;
} }
netdev->irq = ret; pdata->dev_irq = ret;
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs; netdev->base_addr = (unsigned long)pdata->xgmac_regs;
/* Set all the function pointers */ /* Set all the function pointers */
...@@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_io; goto err_io;
} }
/* Allocate the rings for the DMA channels */ /* Initialize RSS hash key and lookup table */
pdata->channel = xgbe_alloc_rings(pdata); get_random_bytes(pdata->rss_key, sizeof(pdata->rss_key));
if (!pdata->channel) {
dev_err(dev, "ring allocation failed\n"); for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
ret = -ENOMEM; XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
goto err_io; i % pdata->rx_ring_count);
}
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Prepare to regsiter with MDIO */ /* Prepare to regsiter with MDIO */
pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name); pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
...@@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev) ...@@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
if (pdata->hw_feat.rss)
netdev->hw_features |= NETIF_F_RXHASH;
netdev->vlan_features |= NETIF_F_SG | netdev->vlan_features |= NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_IPV6_CSUM |
......
...@@ -142,6 +142,8 @@ ...@@ -142,6 +142,8 @@
#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) #define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGBE_RX_BUF_ALIGN 64 #define XGBE_RX_BUF_ALIGN 64
#define XGBE_SKB_ALLOC_SIZE 256
#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
#define XGBE_MAX_DMA_CHANNELS 16 #define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16 #define XGBE_MAX_QUEUES 16
...@@ -171,6 +173,7 @@ ...@@ -171,6 +173,7 @@
/* Device-tree clock names */ /* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk" #define XGBE_DMA_CLOCK "dma_clk"
#define XGBE_PTP_CLOCK "ptp_clk" #define XGBE_PTP_CLOCK "ptp_clk"
#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
/* Timestamp support - values based on 50MHz PTP clock /* Timestamp support - values based on 50MHz PTP clock
* 50MHz => 20 nsec * 50MHz => 20 nsec
...@@ -212,6 +215,12 @@ ...@@ -212,6 +215,12 @@
/* Maximum MAC address hash table size (256 bits = 8 bytes) */ /* Maximum MAC address hash table size (256 bits = 8 bytes) */
#define XGBE_MAC_HASH_TABLE_SIZE 8 #define XGBE_MAC_HASH_TABLE_SIZE 8
/* Receive Side Scaling */
#define XGBE_RSS_HASH_KEY_SIZE 40
#define XGBE_RSS_MAX_TABLE_SIZE 256
#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
#define XGBE_RSS_HASH_KEY_TYPE 1
struct xgbe_prv_data; struct xgbe_prv_data;
struct xgbe_packet_data { struct xgbe_packet_data {
...@@ -230,14 +239,35 @@ struct xgbe_packet_data { ...@@ -230,14 +239,35 @@ struct xgbe_packet_data {
unsigned short vlan_ctag; unsigned short vlan_ctag;
u64 rx_tstamp; u64 rx_tstamp;
u32 rss_hash;
enum pkt_hash_types rss_hash_type;
}; };
/* Common Rx and Tx descriptor mapping */ /* Common Rx and Tx descriptor mapping */
struct xgbe_ring_desc { struct xgbe_ring_desc {
unsigned int desc0; u32 desc0;
unsigned int desc1; u32 desc1;
unsigned int desc2; u32 desc2;
unsigned int desc3; u32 desc3;
};
/* Page allocation related values */
struct xgbe_page_alloc {
struct page *pages;
unsigned int pages_len;
unsigned int pages_offset;
dma_addr_t pages_dma;
};
/* Ring entry buffer data */
struct xgbe_buffer_data {
struct xgbe_page_alloc pa;
struct xgbe_page_alloc pa_unmap;
dma_addr_t dma;
unsigned int dma_len;
}; };
/* Structure used to hold information related to the descriptor /* Structure used to hold information related to the descriptor
...@@ -253,6 +283,10 @@ struct xgbe_ring_data { ...@@ -253,6 +283,10 @@ struct xgbe_ring_data {
unsigned int skb_dma_len; /* Length of SKB DMA area */ unsigned int skb_dma_len; /* Length of SKB DMA area */
unsigned int tso_header; /* TSO header indicator */ unsigned int tso_header; /* TSO header indicator */
struct xgbe_buffer_data rx_hdr; /* Header locations */
struct xgbe_buffer_data rx_buf; /* Payload locations */
unsigned short hdr_len; /* Length of received header */
unsigned short len; /* Length of received Rx packet */ unsigned short len; /* Length of received Rx packet */
unsigned int interrupt; /* Interrupt indicator */ unsigned int interrupt; /* Interrupt indicator */
...@@ -291,6 +325,10 @@ struct xgbe_ring { ...@@ -291,6 +325,10 @@ struct xgbe_ring {
*/ */
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
/* Page allocation for RX buffers */
struct xgbe_page_alloc rx_hdr_pa;
struct xgbe_page_alloc rx_buf_pa;
/* Ring index values /* Ring index values
* cur - Tx: index of descriptor to be used for current transfer * cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability * Rx: index of descriptor to check for packet availability
...@@ -331,6 +369,12 @@ struct xgbe_channel { ...@@ -331,6 +369,12 @@ struct xgbe_channel {
unsigned int queue_index; unsigned int queue_index;
void __iomem *dma_regs; void __iomem *dma_regs;
/* Per channel interrupt irq number */
int dma_irq;
/* Netdev related settings */
struct napi_struct napi;
unsigned int saved_ier; unsigned int saved_ier;
unsigned int tx_timer_active; unsigned int tx_timer_active;
...@@ -456,7 +500,7 @@ struct xgbe_hw_if { ...@@ -456,7 +500,7 @@ struct xgbe_hw_if {
int (*enable_int)(struct xgbe_channel *, enum xgbe_int); int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
int (*disable_int)(struct xgbe_channel *, enum xgbe_int); int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
void (*pre_xmit)(struct xgbe_channel *); void (*dev_xmit)(struct xgbe_channel *);
int (*dev_read)(struct xgbe_channel *); int (*dev_read)(struct xgbe_channel *);
void (*tx_desc_init)(struct xgbe_channel *); void (*tx_desc_init)(struct xgbe_channel *);
void (*rx_desc_init)(struct xgbe_channel *); void (*rx_desc_init)(struct xgbe_channel *);
...@@ -509,14 +553,20 @@ struct xgbe_hw_if { ...@@ -509,14 +553,20 @@ struct xgbe_hw_if {
/* For Data Center Bridging config */ /* For Data Center Bridging config */
void (*config_dcb_tc)(struct xgbe_prv_data *); void (*config_dcb_tc)(struct xgbe_prv_data *);
void (*config_dcb_pfc)(struct xgbe_prv_data *); void (*config_dcb_pfc)(struct xgbe_prv_data *);
/* For Receive Side Scaling */
int (*enable_rss)(struct xgbe_prv_data *);
int (*disable_rss)(struct xgbe_prv_data *);
int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
}; };
struct xgbe_desc_if { struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *); int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *); void (*free_ring_resources)(struct xgbe_prv_data *);
int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
void (*realloc_skb)(struct xgbe_channel *); void (*realloc_rx_buffer)(struct xgbe_channel *);
void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *); void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
}; };
...@@ -581,7 +631,11 @@ struct xgbe_prv_data { ...@@ -581,7 +631,11 @@ struct xgbe_prv_data {
/* XPCS indirect addressing mutex */ /* XPCS indirect addressing mutex */
struct mutex xpcs_mutex; struct mutex xpcs_mutex;
int irq_number; /* RSS addressing mutex */
struct mutex rss_mutex;
int dev_irq;
unsigned int per_channel_irq;
struct xgbe_hw_if hw_if; struct xgbe_hw_if hw_if;
struct xgbe_desc_if desc_if; struct xgbe_desc_if desc_if;
...@@ -624,7 +678,7 @@ struct xgbe_prv_data { ...@@ -624,7 +678,7 @@ struct xgbe_prv_data {
unsigned int rx_riwt; unsigned int rx_riwt;
unsigned int rx_frames; unsigned int rx_frames;
/* Current MTU */ /* Current Rx buffer size */
unsigned int rx_buf_size; unsigned int rx_buf_size;
/* Flow control settings */ /* Flow control settings */
...@@ -632,6 +686,11 @@ struct xgbe_prv_data { ...@@ -632,6 +686,11 @@ struct xgbe_prv_data {
unsigned int tx_pause; unsigned int tx_pause;
unsigned int rx_pause; unsigned int rx_pause;
/* Receive Side Scaling settings */
u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
u32 rss_options;
/* MDIO settings */ /* MDIO settings */
struct module *phy_module; struct module *phy_module;
char *mii_bus_id; char *mii_bus_id;
......
...@@ -26,7 +26,7 @@ config AMD_PHY ...@@ -26,7 +26,7 @@ config AMD_PHY
config AMD_XGBE_PHY config AMD_XGBE_PHY
tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
depends on OF depends on OF && HAS_IOMEM
---help--- ---help---
Currently supports the AMD 10GbE PHY Currently supports the AMD 10GbE PHY
......
...@@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev) ...@@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
if (ret & MDIO_CTRL1_RESET) if (ret & MDIO_CTRL1_RESET)
return -ETIMEDOUT; return -ETIMEDOUT;
return 0; /* Make sure the XPCS and SerDes are in compatible states */
return amd_xgbe_phy_xgmii_mode(phydev);
} }
static int amd_xgbe_phy_config_init(struct phy_device *phydev) static int amd_xgbe_phy_config_init(struct phy_device *phydev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment