Commit 4a956bd2 authored by David S. Miller's avatar David S. Miller

Merge branch 'DPAA-Ethernet-changes'

Madalin Bucur says:

====================
DPAA Ethernet changes

v2: remove excess braces

Here are some more changes for the DPAA 1.x area.
In summary, these changes use pages for the receive buffers and
for the scatter-gather table fed to the HW on the Tx path, perform
a bit of cleanup in some convoluted parts of the code, add some
minor fixes related to DMA (un)mapping sequencing for a not so
common scenario, add a device link that removes the interfaces
when the QMan portal in use by them is removed.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2bd7c3e1 e06eea55
...@@ -178,31 +178,9 @@ struct fm_port_fqs { ...@@ -178,31 +178,9 @@ struct fm_port_fqs {
/* All the dpa bps in use at any moment */ /* All the dpa bps in use at any moment */
static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
/* The raw buffer size must be cacheline aligned */
#define DPAA_BP_RAW_SIZE 4096 #define DPAA_BP_RAW_SIZE 4096
/* When using more than one buffer pool, the raw sizes are as follows:
* 1 bp: 4KB
* 2 bp: 2KB, 4KB
* 3 bp: 1KB, 2KB, 4KB
* 4 bp: 1KB, 2KB, 4KB, 8KB
*/
static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
{
size_t res = DPAA_BP_RAW_SIZE / 4;
u8 i;
for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
res *= 2;
return res;
}
/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
* even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
* via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
* half-page-aligned buffers, so we reserve some more space for start-of-buffer
* alignment.
*/
#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
static int dpaa_max_frm; static int dpaa_max_frm;
...@@ -288,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev) ...@@ -288,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev)
/* Allow the Fman (Tx) port to process in-flight frames before we /* Allow the Fman (Tx) port to process in-flight frames before we
* try switching it off. * try switching it off.
*/ */
usleep_range(5000, 10000); msleep(200);
err = mac_dev->stop(mac_dev); err = mac_dev->stop(mac_dev);
if (err < 0) if (err < 0)
...@@ -305,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev) ...@@ -305,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
phy_disconnect(net_dev->phydev); phy_disconnect(net_dev->phydev);
net_dev->phydev = NULL; net_dev->phydev = NULL;
msleep(200);
return err; return err;
} }
...@@ -596,10 +576,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) ...@@ -596,10 +576,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
static void dpaa_bps_free(struct dpaa_priv *priv) static void dpaa_bps_free(struct dpaa_priv *priv)
{ {
int i; dpaa_bp_free(priv->dpaa_bp);
for (i = 0; i < DPAA_BPS_NUM; i++)
dpaa_bp_free(priv->dpaa_bps[i]);
} }
/* Use multiple WQs for FQ assignment: /* Use multiple WQs for FQ assignment:
...@@ -773,7 +750,7 @@ static void dpaa_release_channel(void) ...@@ -773,7 +750,7 @@ static void dpaa_release_channel(void)
qman_release_pool(rx_pool_channel); qman_release_pool(rx_pool_channel);
} }
static void dpaa_eth_add_channel(u16 channel) static void dpaa_eth_add_channel(u16 channel, struct device *dev)
{ {
u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
const cpumask_t *cpus = qman_affine_cpus(); const cpumask_t *cpus = qman_affine_cpus();
...@@ -783,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel) ...@@ -783,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel)
for_each_cpu_and(cpu, cpus, cpu_online_mask) { for_each_cpu_and(cpu, cpus, cpu_online_mask) {
portal = qman_get_affine_portal(cpu); portal = qman_get_affine_portal(cpu);
qman_p_static_dequeue_add(portal, pool); qman_p_static_dequeue_add(portal, pool);
qman_start_using_portal(portal, dev);
} }
} }
...@@ -1197,15 +1175,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, ...@@ -1197,15 +1175,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
return err; return err;
} }
static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
size_t count, struct dpaa_fq *errq, struct dpaa_fq *errq,
struct dpaa_fq *defq, struct dpaa_fq *pcdq, struct dpaa_fq *defq, struct dpaa_fq *pcdq,
struct dpaa_buffer_layout *buf_layout) struct dpaa_buffer_layout *buf_layout)
{ {
struct fman_buffer_prefix_content buf_prefix_content; struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_rx_params *rx_p; struct fman_port_rx_params *rx_p;
struct fman_port_params params; struct fman_port_params params;
int i, err; int err;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
...@@ -1224,12 +1202,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, ...@@ -1224,12 +1202,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM; rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
} }
count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); rx_p->ext_buf_pools.num_of_pools_used = 1;
rx_p->ext_buf_pools.num_of_pools_used = (u8)count; rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
for (i = 0; i < count; i++) { rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
}
err = fman_port_config(port, &params); err = fman_port_config(port, &params);
if (err) { if (err) {
...@@ -1252,7 +1227,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, ...@@ -1252,7 +1227,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
} }
static int dpaa_eth_init_ports(struct mac_device *mac_dev, static int dpaa_eth_init_ports(struct mac_device *mac_dev,
struct dpaa_bp **bps, size_t count, struct dpaa_bp *bp,
struct fm_port_fqs *port_fqs, struct fm_port_fqs *port_fqs,
struct dpaa_buffer_layout *buf_layout, struct dpaa_buffer_layout *buf_layout,
struct device *dev) struct device *dev)
...@@ -1266,7 +1241,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev, ...@@ -1266,7 +1241,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev,
if (err) if (err)
return err; return err;
err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
port_fqs->rx_defq, port_fqs->rx_pcdq, port_fqs->rx_defq, port_fqs->rx_pcdq,
&buf_layout[RX]); &buf_layout[RX]);
...@@ -1335,13 +1310,14 @@ static void dpaa_fd_release(const struct net_device *net_dev, ...@@ -1335,13 +1310,14 @@ static void dpaa_fd_release(const struct net_device *net_dev,
vaddr = phys_to_virt(qm_fd_addr(fd)); vaddr = phys_to_virt(qm_fd_addr(fd));
sgt = vaddr + qm_fd_get_offset(fd); sgt = vaddr + qm_fd_get_offset(fd);
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd), dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
dpaa_bp->size, DMA_FROM_DEVICE); DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
dpaa_release_sgt_members(sgt); dpaa_release_sgt_members(sgt);
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr, addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
dpaa_bp->size, DMA_FROM_DEVICE); virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) { if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
netdev_err(net_dev, "DMA mapping failed\n"); netdev_err(net_dev, "DMA mapping failed\n");
return; return;
...@@ -1396,7 +1372,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv, ...@@ -1396,7 +1372,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv,
static int dpaa_enable_tx_csum(struct dpaa_priv *priv, static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
struct sk_buff *skb, struct sk_buff *skb,
struct qm_fd *fd, struct qm_fd *fd,
char *parse_results) void *parse_results)
{ {
struct fman_prs_result *parse_result; struct fman_prs_result *parse_result;
u16 ethertype = ntohs(skb->protocol); u16 ethertype = ntohs(skb->protocol);
...@@ -1491,21 +1467,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) ...@@ -1491,21 +1467,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
struct net_device *net_dev = dpaa_bp->priv->net_dev; struct net_device *net_dev = dpaa_bp->priv->net_dev;
struct bm_buffer bmb[8]; struct bm_buffer bmb[8];
dma_addr_t addr; dma_addr_t addr;
void *new_buf; struct page *p;
u8 i; u8 i;
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
new_buf = netdev_alloc_frag(dpaa_bp->raw_size); p = dev_alloc_pages(0);
if (unlikely(!new_buf)) { if (unlikely(!p)) {
netdev_err(net_dev, netdev_err(net_dev, "dev_alloc_pages() failed\n");
"netdev_alloc_frag() failed, size %zu\n",
dpaa_bp->raw_size);
goto release_previous_buffs; goto release_previous_buffs;
} }
new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf, addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
dpaa_bp->size, DMA_FROM_DEVICE); DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev, if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
addr))) { addr))) {
netdev_err(net_dev, "DMA map failed\n"); netdev_err(net_dev, "DMA map failed\n");
...@@ -1583,17 +1556,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) ...@@ -1583,17 +1556,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
{ {
struct dpaa_bp *dpaa_bp; struct dpaa_bp *dpaa_bp;
int *countptr; int *countptr;
int res, i; int res;
dpaa_bp = priv->dpaa_bp;
if (!dpaa_bp)
return -EINVAL;
countptr = this_cpu_ptr(dpaa_bp->percpu_count);
res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
if (res)
return res;
for (i = 0; i < DPAA_BPS_NUM; i++) {
dpaa_bp = priv->dpaa_bps[i];
if (!dpaa_bp)
return -EINVAL;
countptr = this_cpu_ptr(dpaa_bp->percpu_count);
res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
if (res)
return res;
}
return 0; return 0;
} }
...@@ -1602,68 +1574,74 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) ...@@ -1602,68 +1574,74 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
* Skb freeing is not handled here. * Skb freeing is not handled here.
* *
* This function may be called on error paths in the Tx function, so guard * This function may be called on error paths in the Tx function, so guard
* against cases when not all fd relevant fields were filled in. * against cases when not all fd relevant fields were filled in. To avoid
* reading the invalid transmission timestamp for the error paths set ts to
* false.
* *
* Return the skb backpointer, since for S/G frames the buffer containing it * Return the skb backpointer, since for S/G frames the buffer containing it
* gets freed here. * gets freed here.
*/ */
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
const struct qm_fd *fd) const struct qm_fd *fd, bool ts)
{ {
const enum dma_data_direction dma_dir = DMA_TO_DEVICE; const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
struct device *dev = priv->net_dev->dev.parent; struct device *dev = priv->net_dev->dev.parent;
struct skb_shared_hwtstamps shhwtstamps; struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd); dma_addr_t addr = qm_fd_addr(fd);
void *vaddr = phys_to_virt(addr);
const struct qm_sg_entry *sgt; const struct qm_sg_entry *sgt;
struct sk_buff **skbh, *skb; struct sk_buff *skb;
int nr_frags, i;
u64 ns; u64 ns;
int i;
skbh = (struct sk_buff **)phys_to_virt(addr);
skb = *skbh;
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
&ns)) {
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
} else {
dev_warn(dev, "fman_port_get_tstamp failed!\n");
}
}
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
nr_frags = skb_shinfo(skb)->nr_frags; dma_unmap_page(priv->tx_dma_dev, addr,
dma_unmap_single(priv->tx_dma_dev, addr, qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
qm_fd_get_offset(fd) + DPAA_SGT_SIZE, dma_dir);
dma_dir);
/* The sgt buffer has been allocated with netdev_alloc_frag(), /* The sgt buffer has been allocated with netdev_alloc_frag(),
* it's from lowmem. * it's from lowmem.
*/ */
sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); sgt = vaddr + qm_fd_get_offset(fd);
/* sgt[0] is from lowmem, was dma_map_single()-ed */ /* sgt[0] is from lowmem, was dma_map_single()-ed */
dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]), dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
qm_sg_entry_get_len(&sgt[0]), dma_dir); qm_sg_entry_get_len(&sgt[0]), dma_dir);
/* remaining pages were mapped with skb_frag_dma_map() */ /* remaining pages were mapped with skb_frag_dma_map() */
for (i = 1; i <= nr_frags; i++) { for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
!qm_sg_entry_is_final(&sgt[i - 1]); i++) {
WARN_ON(qm_sg_entry_is_ext(&sgt[i])); WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]), dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
qm_sg_entry_get_len(&sgt[i]), dma_dir); qm_sg_entry_get_len(&sgt[i]), dma_dir);
} }
/* Free the page frag that we allocated on Tx */
skb_free_frag(phys_to_virt(addr));
} else { } else {
dma_unmap_single(priv->tx_dma_dev, addr, dma_unmap_single(priv->tx_dma_dev, addr,
skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); priv->tx_headroom + qm_fd_get_length(fd),
dma_dir);
}
skb = *(struct sk_buff **)vaddr;
/* DMA unmapping is required before accessing the HW provided info */
if (ts && priv->tx_tstamp &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
&ns)) {
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
} else {
dev_warn(dev, "fman_port_get_tstamp failed!\n");
}
} }
if (qm_fd_get_format(fd) == qm_fd_sg)
/* Free the page that we allocated on Tx for the SGT */
free_pages((unsigned long)vaddr, 0);
return skb; return skb;
} }
...@@ -1717,7 +1695,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1717,7 +1695,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
return skb; return skb;
free_buffer: free_buffer:
skb_free_frag(vaddr); free_pages((unsigned long)vaddr, 0);
return NULL; return NULL;
} }
...@@ -1764,8 +1742,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1764,8 +1742,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
goto free_buffers; goto free_buffers;
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr, dma_unmap_page(priv->rx_dma_dev, sg_addr,
dpaa_bp->size, DMA_FROM_DEVICE); DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (!skb) { if (!skb) {
sz = dpaa_bp->size + sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
...@@ -1817,7 +1795,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1817,7 +1795,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
/* free the SG table buffer */ /* free the SG table buffer */
skb_free_frag(vaddr); free_pages((unsigned long)vaddr, 0);
return skb; return skb;
...@@ -1834,7 +1812,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1834,7 +1812,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
sg_addr = qm_sg_addr(&sgt[i]); sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr); sg_vaddr = phys_to_virt(sg_addr);
skb_free_frag(sg_vaddr); free_pages((unsigned long)sg_vaddr, 0);
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (dpaa_bp) { if (dpaa_bp) {
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
...@@ -1845,7 +1823,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1845,7 +1823,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
break; break;
} }
/* free the SGT fragment */ /* free the SGT fragment */
skb_free_frag(vaddr); free_pages((unsigned long)vaddr, 0);
return NULL; return NULL;
} }
...@@ -1856,7 +1834,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, ...@@ -1856,7 +1834,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
{ {
struct net_device *net_dev = priv->net_dev; struct net_device *net_dev = priv->net_dev;
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
unsigned char *buffer_start; unsigned char *buff_start;
struct sk_buff **skbh; struct sk_buff **skbh;
dma_addr_t addr; dma_addr_t addr;
int err; int err;
...@@ -1865,10 +1843,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, ...@@ -1865,10 +1843,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
* available, so just use that for offset. * available, so just use that for offset.
*/ */
fd->bpid = FSL_DPAA_BPID_INV; fd->bpid = FSL_DPAA_BPID_INV;
buffer_start = skb->data - priv->tx_headroom; buff_start = skb->data - priv->tx_headroom;
dma_dir = DMA_TO_DEVICE; dma_dir = DMA_TO_DEVICE;
skbh = (struct sk_buff **)buffer_start; skbh = (struct sk_buff **)buff_start;
*skbh = skb; *skbh = skb;
/* Enable L3/L4 hardware checksum computation. /* Enable L3/L4 hardware checksum computation.
...@@ -1877,7 +1855,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, ...@@ -1877,7 +1855,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
* need to write into the skb. * need to write into the skb.
*/ */
err = dpaa_enable_tx_csum(priv, skb, fd, err = dpaa_enable_tx_csum(priv, skb, fd,
((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) { if (unlikely(err < 0)) {
if (net_ratelimit()) if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
...@@ -1890,8 +1868,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, ...@@ -1890,8 +1868,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
/* Map the entire buffer size that may be seen by FMan, but no more */ /* Map the entire buffer size that may be seen by FMan, but no more */
addr = dma_map_single(priv->tx_dma_dev, skbh, addr = dma_map_single(priv->tx_dma_dev, buff_start,
skb_tail_pointer(skb) - buffer_start, dma_dir); priv->tx_headroom + skb->len, dma_dir);
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
if (net_ratelimit()) if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
...@@ -1910,21 +1888,20 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1910,21 +1888,20 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
struct net_device *net_dev = priv->net_dev; struct net_device *net_dev = priv->net_dev;
struct qm_sg_entry *sgt; struct qm_sg_entry *sgt;
struct sk_buff **skbh; struct sk_buff **skbh;
int i, j, err, sz; void *buff_start;
void *buffer_start;
skb_frag_t *frag; skb_frag_t *frag;
dma_addr_t addr; dma_addr_t addr;
size_t frag_len; size_t frag_len;
void *sgt_buf; struct page *p;
int i, j, err;
/* get a page frag to store the SGTable */
sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); /* get a page to store the SGTable */
sgt_buf = netdev_alloc_frag(sz); p = dev_alloc_pages(0);
if (unlikely(!sgt_buf)) { if (unlikely(!p)) {
netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", netdev_err(net_dev, "dev_alloc_pages() failed\n");
sz);
return -ENOMEM; return -ENOMEM;
} }
buff_start = page_address(p);
/* Enable L3/L4 hardware checksum computation. /* Enable L3/L4 hardware checksum computation.
* *
...@@ -1932,7 +1909,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1932,7 +1909,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
* need to write into the skb. * need to write into the skb.
*/ */
err = dpaa_enable_tx_csum(priv, skb, fd, err = dpaa_enable_tx_csum(priv, skb, fd,
sgt_buf + DPAA_TX_PRIV_DATA_SIZE); buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) { if (unlikely(err < 0)) {
if (net_ratelimit()) if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
...@@ -1941,7 +1918,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1941,7 +1918,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
} }
/* SGT[0] is used by the linear part */ /* SGT[0] is used by the linear part */
sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
frag_len = skb_headlen(skb); frag_len = skb_headlen(skb);
qm_sg_entry_set_len(&sgt[0], frag_len); qm_sg_entry_set_len(&sgt[0], frag_len);
sgt[0].bpid = FSL_DPAA_BPID_INV; sgt[0].bpid = FSL_DPAA_BPID_INV;
...@@ -1979,15 +1956,15 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1979,15 +1956,15 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
/* Set the final bit in the last used entry of the SGT */ /* Set the final bit in the last used entry of the SGT */
qm_sg_entry_set_f(&sgt[nr_frags], frag_len); qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
/* set fd offset to priv->tx_headroom */
qm_fd_set_sg(fd, priv->tx_headroom, skb->len); qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
/* DMA map the SGT page */ /* DMA map the SGT page */
buffer_start = (void *)sgt - priv->tx_headroom; skbh = (struct sk_buff **)buff_start;
skbh = (struct sk_buff **)buffer_start;
*skbh = skb; *skbh = skb;
addr = dma_map_single(priv->tx_dma_dev, buffer_start, addr = dma_map_page(priv->tx_dma_dev, p, 0,
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
netdev_err(priv->net_dev, "DMA mapping failed\n"); netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL; err = -EINVAL;
...@@ -2007,7 +1984,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -2007,7 +1984,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
qm_sg_entry_get_len(&sgt[j]), dma_dir); qm_sg_entry_get_len(&sgt[j]), dma_dir);
sg0_map_failed: sg0_map_failed:
csum_failed: csum_failed:
skb_free_frag(sgt_buf); free_pages((unsigned long)buff_start, 0);
return err; return err;
} }
...@@ -2114,7 +2091,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) ...@@ -2114,7 +2091,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
return NETDEV_TX_OK; return NETDEV_TX_OK;
dpaa_cleanup_tx_fd(priv, &fd); dpaa_cleanup_tx_fd(priv, &fd, false);
skb_to_fd_failed: skb_to_fd_failed:
enomem: enomem:
percpu_stats->tx_errors++; percpu_stats->tx_errors++;
...@@ -2160,7 +2137,7 @@ static void dpaa_tx_error(struct net_device *net_dev, ...@@ -2160,7 +2137,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
percpu_priv->stats.tx_errors++; percpu_priv->stats.tx_errors++;
skb = dpaa_cleanup_tx_fd(priv, fd); skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
...@@ -2200,7 +2177,7 @@ static void dpaa_tx_conf(struct net_device *net_dev, ...@@ -2200,7 +2177,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
percpu_priv->tx_confirm++; percpu_priv->tx_confirm++;
skb = dpaa_cleanup_tx_fd(priv, fd); skb = dpaa_cleanup_tx_fd(priv, fd, true);
consume_skb(skb); consume_skb(skb);
} }
...@@ -2304,8 +2281,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, ...@@ -2304,8 +2281,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
return qman_cb_dqrr_consume; return qman_cb_dqrr_consume;
} }
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size, dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* prefetch the first 64 bytes of the frame or the SGT start */ /* prefetch the first 64 bytes of the frame or the SGT start */
vaddr = phys_to_virt(addr); vaddr = phys_to_virt(addr);
...@@ -2427,7 +2404,7 @@ static void egress_ern(struct qman_portal *portal, ...@@ -2427,7 +2404,7 @@ static void egress_ern(struct qman_portal *portal,
percpu_priv->stats.tx_fifo_errors++; percpu_priv->stats.tx_fifo_errors++;
count_ern(percpu_priv, msg); count_ern(percpu_priv, msg);
skb = dpaa_cleanup_tx_fd(priv, fd); skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
...@@ -2660,7 +2637,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, ...@@ -2660,7 +2637,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
{ {
dma_addr_t addr = bm_buf_addr(bmb); dma_addr_t addr = bm_buf_addr(bmb);
dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size, DMA_FROM_DEVICE); dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
DMA_FROM_DEVICE);
skb_free_frag(phys_to_virt(addr)); skb_free_frag(phys_to_virt(addr));
} }
...@@ -2761,13 +2739,13 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) ...@@ -2761,13 +2739,13 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
static int dpaa_eth_probe(struct platform_device *pdev) static int dpaa_eth_probe(struct platform_device *pdev)
{ {
struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
struct net_device *net_dev = NULL; struct net_device *net_dev = NULL;
struct dpaa_bp *dpaa_bp = NULL;
struct dpaa_fq *dpaa_fq, *tmp; struct dpaa_fq *dpaa_fq, *tmp;
struct dpaa_priv *priv = NULL; struct dpaa_priv *priv = NULL;
struct fm_port_fqs port_fqs; struct fm_port_fqs port_fqs;
struct mac_device *mac_dev; struct mac_device *mac_dev;
int err = 0, i, channel; int err = 0, channel;
struct device *dev; struct device *dev;
dev = &pdev->dev; dev = &pdev->dev;
...@@ -2856,23 +2834,21 @@ static int dpaa_eth_probe(struct platform_device *pdev) ...@@ -2856,23 +2834,21 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
/* bp init */ /* bp init */
for (i = 0; i < DPAA_BPS_NUM; i++) { dpaa_bp = dpaa_bp_alloc(dev);
dpaa_bps[i] = dpaa_bp_alloc(dev); if (IS_ERR(dpaa_bp)) {
if (IS_ERR(dpaa_bps[i])) { err = PTR_ERR(dpaa_bp);
err = PTR_ERR(dpaa_bps[i]); goto free_dpaa_bps;
goto free_dpaa_bps;
}
/* the raw size of the buffers used for reception */
dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
/* avoid runtime computations by keeping the usable size here */
dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
dpaa_bps[i]->priv = priv;
err = dpaa_bp_alloc_pool(dpaa_bps[i]);
if (err < 0)
goto free_dpaa_bps;
priv->dpaa_bps[i] = dpaa_bps[i];
} }
/* the raw size of the buffers used for reception */
dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
/* avoid runtime computations by keeping the usable size here */
dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
dpaa_bp->priv = priv;
err = dpaa_bp_alloc_pool(dpaa_bp);
if (err < 0)
goto free_dpaa_bps;
priv->dpaa_bp = dpaa_bp;
INIT_LIST_HEAD(&priv->dpaa_fq_list); INIT_LIST_HEAD(&priv->dpaa_fq_list);
...@@ -2898,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) ...@@ -2898,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Walk the CPUs with affine portals /* Walk the CPUs with affine portals
* and add this pool channel to each's dequeue mask. * and add this pool channel to each's dequeue mask.
*/ */
dpaa_eth_add_channel(priv->channel); dpaa_eth_add_channel(priv->channel, &pdev->dev);
dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
...@@ -2930,7 +2906,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) ...@@ -2930,7 +2906,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */ /* All real interfaces need their ports initialized */
err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
&priv->buf_layout[0], dev); &priv->buf_layout[0], dev);
if (err) if (err)
goto free_dpaa_fqs; goto free_dpaa_fqs;
......
...@@ -47,8 +47,6 @@ ...@@ -47,8 +47,6 @@
/* Total number of Tx queues */ /* Total number of Tx queues */
#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM) #define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
#define DPAA_BPS_NUM 3 /* number of bpools per interface */
/* More detailed FQ types - used for fine-grained WQ assignments */ /* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type { enum dpaa_fq_type {
FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
...@@ -148,7 +146,7 @@ struct dpaa_buffer_layout { ...@@ -148,7 +146,7 @@ struct dpaa_buffer_layout {
struct dpaa_priv { struct dpaa_priv {
struct dpaa_percpu_priv __percpu *percpu_priv; struct dpaa_percpu_priv __percpu *percpu_priv;
struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM]; struct dpaa_bp *dpaa_bp;
/* Store here the needed Tx headroom for convenience and speed /* Store here the needed Tx headroom for convenience and speed
* (even though it can be computed based on the fields of buf_layout) * (even though it can be computed based on the fields of buf_layout)
*/ */
......
...@@ -131,11 +131,9 @@ static ssize_t dpaa_eth_show_bpids(struct device *dev, ...@@ -131,11 +131,9 @@ static ssize_t dpaa_eth_show_bpids(struct device *dev,
{ {
struct dpaa_priv *priv = netdev_priv(to_net_dev(dev)); struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
ssize_t bytes = 0; ssize_t bytes = 0;
int i = 0;
for (i = 0; i < DPAA_BPS_NUM; i++) bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n", priv->dpaa_bp->bpid);
priv->dpaa_bps[i]->bpid);
return bytes; return bytes;
} }
......
...@@ -47,6 +47,8 @@ static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = { ...@@ -47,6 +47,8 @@ static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
"tx S/G", "tx S/G",
"tx error", "tx error",
"rx error", "rx error",
"rx dropped",
"tx dropped",
}; };
static char dpaa_stats_global[][ETH_GSTRING_LEN] = { static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
...@@ -78,10 +80,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = { ...@@ -78,10 +80,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
static int dpaa_get_link_ksettings(struct net_device *net_dev, static int dpaa_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd) struct ethtool_link_ksettings *cmd)
{ {
if (!net_dev->phydev) { if (!net_dev->phydev)
netdev_dbg(net_dev, "phy device not initialized\n");
return 0; return 0;
}
phy_ethtool_ksettings_get(net_dev->phydev, cmd); phy_ethtool_ksettings_get(net_dev->phydev, cmd);
...@@ -93,10 +93,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev, ...@@ -93,10 +93,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
{ {
int err; int err;
if (!net_dev->phydev) { if (!net_dev->phydev)
netdev_err(net_dev, "phy device not initialized\n");
return -ENODEV; return -ENODEV;
}
err = phy_ethtool_ksettings_set(net_dev->phydev, cmd); err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
if (err < 0) if (err < 0)
...@@ -140,10 +138,8 @@ static int dpaa_nway_reset(struct net_device *net_dev) ...@@ -140,10 +138,8 @@ static int dpaa_nway_reset(struct net_device *net_dev)
{ {
int err; int err;
if (!net_dev->phydev) { if (!net_dev->phydev)
netdev_err(net_dev, "phy device not initialized\n");
return -ENODEV; return -ENODEV;
}
err = 0; err = 0;
if (net_dev->phydev->autoneg) { if (net_dev->phydev->autoneg) {
...@@ -165,10 +161,8 @@ static void dpaa_get_pauseparam(struct net_device *net_dev, ...@@ -165,10 +161,8 @@ static void dpaa_get_pauseparam(struct net_device *net_dev,
priv = netdev_priv(net_dev); priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev; mac_dev = priv->mac_dev;
if (!net_dev->phydev) { if (!net_dev->phydev)
netdev_err(net_dev, "phy device not initialized\n");
return; return;
}
epause->autoneg = mac_dev->autoneg_pause; epause->autoneg = mac_dev->autoneg_pause;
epause->rx_pause = mac_dev->rx_pause_active; epause->rx_pause = mac_dev->rx_pause_active;
...@@ -223,7 +217,7 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type) ...@@ -223,7 +217,7 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
unsigned int total_stats, num_stats; unsigned int total_stats, num_stats;
num_stats = num_online_cpus() + 1; num_stats = num_online_cpus() + 1;
total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) + total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
DPAA_STATS_GLOBAL_LEN; DPAA_STATS_GLOBAL_LEN;
switch (type) { switch (type) {
...@@ -235,10 +229,10 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type) ...@@ -235,10 +229,10 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
} }
static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus, static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
int crr_cpu, u64 *bp_count, u64 *data) int crr_cpu, u64 bp_count, u64 *data)
{ {
int num_values = num_cpus + 1; int num_values = num_cpus + 1;
int crr = 0, j; int crr = 0;
/* update current CPU's stats and also add them to the total values */ /* update current CPU's stats and also add them to the total values */
data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt; data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
...@@ -262,23 +256,27 @@ static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus, ...@@ -262,23 +256,27 @@ static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors; data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors; data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
for (j = 0; j < DPAA_BPS_NUM; j++) { data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
data[crr * num_values + crr_cpu] = bp_count[j]; data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
data[crr++ * num_values + num_cpus] += bp_count[j];
} data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
data[crr * num_values + crr_cpu] = bp_count;
data[crr++ * num_values + num_cpus] += bp_count;
} }
static void dpaa_get_ethtool_stats(struct net_device *net_dev, static void dpaa_get_ethtool_stats(struct net_device *net_dev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
struct dpaa_percpu_priv *percpu_priv; struct dpaa_percpu_priv *percpu_priv;
struct dpaa_rx_errors rx_errors; struct dpaa_rx_errors rx_errors;
unsigned int num_cpus, offset; unsigned int num_cpus, offset;
u64 bp_count, cg_time, cg_num;
struct dpaa_ern_cnt ern_cnt; struct dpaa_ern_cnt ern_cnt;
struct dpaa_bp *dpaa_bp; struct dpaa_bp *dpaa_bp;
struct dpaa_priv *priv; struct dpaa_priv *priv;
int total_stats, i, j; int total_stats, i;
bool cg_status; bool cg_status;
total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS); total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
...@@ -292,12 +290,10 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev, ...@@ -292,12 +290,10 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
for_each_online_cpu(i) { for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i); percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
for (j = 0; j < DPAA_BPS_NUM; j++) { dpaa_bp = priv->dpaa_bp;
dpaa_bp = priv->dpaa_bps[j]; if (!dpaa_bp->percpu_count)
if (!dpaa_bp->percpu_count) continue;
continue; bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
}
rx_errors.dme += percpu_priv->rx_errors.dme; rx_errors.dme += percpu_priv->rx_errors.dme;
rx_errors.fpe += percpu_priv->rx_errors.fpe; rx_errors.fpe += percpu_priv->rx_errors.fpe;
rx_errors.fse += percpu_priv->rx_errors.fse; rx_errors.fse += percpu_priv->rx_errors.fse;
...@@ -315,7 +311,7 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev, ...@@ -315,7 +311,7 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
copy_stats(percpu_priv, num_cpus, i, bp_count, data); copy_stats(percpu_priv, num_cpus, i, bp_count, data);
} }
offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM); offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors)); memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
offset += sizeof(struct dpaa_rx_errors) / sizeof(u64); offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
...@@ -363,18 +359,16 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, ...@@ -363,18 +359,16 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
memcpy(strings, string_cpu, ETH_GSTRING_LEN); memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN; strings += ETH_GSTRING_LEN;
} }
for (i = 0; i < DPAA_BPS_NUM; i++) { for (j = 0; j < num_cpus; j++) {
for (j = 0; j < num_cpus; j++) { snprintf(string_cpu, ETH_GSTRING_LEN,
snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [CPU %d]", j);
"bpool %c [CPU %d]", 'a' + i, j);
memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN;
}
snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
'a' + i);
memcpy(strings, string_cpu, ETH_GSTRING_LEN); memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN; strings += ETH_GSTRING_LEN;
} }
snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN;
memcpy(strings, dpaa_stats_global, size); memcpy(strings, dpaa_stats_global, size);
} }
......
...@@ -1749,6 +1749,13 @@ struct qman_portal *qman_get_affine_portal(int cpu) ...@@ -1749,6 +1749,13 @@ struct qman_portal *qman_get_affine_portal(int cpu)
} }
EXPORT_SYMBOL(qman_get_affine_portal); EXPORT_SYMBOL(qman_get_affine_portal);
int qman_start_using_portal(struct qman_portal *p, struct device *dev)
{
return (!device_link_add(dev, p->config->dev,
DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
}
EXPORT_SYMBOL(qman_start_using_portal);
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{ {
return __poll_portal_fast(p, limit); return __poll_portal_fast(p, limit);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#define __FSL_QMAN_H #define __FSL_QMAN_H
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/device.h>
/* Hardware constants */ /* Hardware constants */
#define QM_CHANNEL_SWPORTAL0 0 #define QM_CHANNEL_SWPORTAL0 0
...@@ -914,6 +915,16 @@ u16 qman_affine_channel(int cpu); ...@@ -914,6 +915,16 @@ u16 qman_affine_channel(int cpu);
*/ */
struct qman_portal *qman_get_affine_portal(int cpu); struct qman_portal *qman_get_affine_portal(int cpu);
/**
* qman_start_using_portal - register a device link for the portal user
* @p: the portal that will be in use
* @dev: the device that will use the portal
*
* Makes sure that the devices that use the portal are unbound when the
* portal is unbound
*/
int qman_start_using_portal(struct qman_portal *p, struct device *dev);
/** /**
* qman_p_poll_dqrr - process DQRR (fast-path) entries * qman_p_poll_dqrr - process DQRR (fast-path) entries
* @limit: the maximum number of DQRR entries to process * @limit: the maximum number of DQRR entries to process
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment