Commit a8c9baf2 authored by David S. Miller's avatar David S. Miller

Merge branch 'hsr-hsr-code-refactoring'

Taehee Yoo says:

====================
hsr: hsr code refactoring

There are some unnecessary routine in the hsr module.
This patch removes these routines.

The first patch removes incorrect comment.
The second patch removes unnecessary WARN_ONCE() macro.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7596ac9d 38c440b2
...@@ -244,6 +244,35 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv, ...@@ -244,6 +244,35 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
ch->xdp.drop_cnt = 0; ch->xdp.drop_cnt = 0;
} }
static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq,
struct dpaa2_eth_xdp_fds *xdp_fds)
{
int total_enqueued = 0, retries = 0, enqueued;
struct dpaa2_eth_drv_stats *percpu_extras;
int num_fds, err, max_retries;
struct dpaa2_fd *fds;
percpu_extras = this_cpu_ptr(priv->percpu_extras);
/* try to enqueue all the FDs until the max number of retries is hit */
fds = xdp_fds->fds;
num_fds = xdp_fds->num;
max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
while (total_enqueued < num_fds && retries < max_retries) {
err = priv->enqueue(priv, fq, &fds[total_enqueued],
0, num_fds - total_enqueued, &enqueued);
if (err == -EBUSY) {
percpu_extras->tx_portal_busy += ++retries;
continue;
}
total_enqueued += enqueued;
}
xdp_fds->num = 0;
return total_enqueued;
}
static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
void *buf_start, u16 queue_id) void *buf_start, u16 queue_id)
{ {
...@@ -1934,12 +1963,11 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, ...@@ -1934,12 +1963,11 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags) struct xdp_frame **frames, u32 flags)
{ {
struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int total_enqueued = 0, retries = 0, enqueued; struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
struct dpaa2_eth_drv_stats *percpu_extras;
struct rtnl_link_stats64 *percpu_stats; struct rtnl_link_stats64 *percpu_stats;
int num_fds, i, err, max_retries;
struct dpaa2_eth_fq *fq; struct dpaa2_eth_fq *fq;
struct dpaa2_fd *fds; struct dpaa2_fd *fds;
int enqueued, i, err;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL; return -EINVAL;
...@@ -1948,10 +1976,10 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, ...@@ -1948,10 +1976,10 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
return -ENETDOWN; return -ENETDOWN;
fq = &priv->fq[smp_processor_id()]; fq = &priv->fq[smp_processor_id()];
fds = fq->xdp_fds; xdp_redirect_fds = &fq->xdp_redirect_fds;
fds = xdp_redirect_fds->fds;
percpu_stats = this_cpu_ptr(priv->percpu_stats); percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
/* create a FD for each xdp_frame in the list received */ /* create a FD for each xdp_frame in the list received */
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
...@@ -1959,28 +1987,19 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, ...@@ -1959,28 +1987,19 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
if (err) if (err)
break; break;
} }
num_fds = i; xdp_redirect_fds->num = i;
/* try to enqueue all the FDs until the max number of retries is hit */ /* enqueue all the frame descriptors */
max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
while (total_enqueued < num_fds && retries < max_retries) {
err = priv->enqueue(priv, fq, &fds[total_enqueued],
0, num_fds - total_enqueued, &enqueued);
if (err == -EBUSY) {
percpu_extras->tx_portal_busy += ++retries;
continue;
}
total_enqueued += enqueued;
}
/* update statistics */ /* update statistics */
percpu_stats->tx_packets += total_enqueued; percpu_stats->tx_packets += enqueued;
for (i = 0; i < total_enqueued; i++) for (i = 0; i < enqueued; i++)
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
for (i = total_enqueued; i < n; i++) for (i = enqueued; i < n; i++)
xdp_return_frame_rx_napi(frames[i]); xdp_return_frame_rx_napi(frames[i]);
return total_enqueued; return enqueued;
} }
static int update_xps(struct dpaa2_eth_priv *priv) static int update_xps(struct dpaa2_eth_priv *priv)
......
...@@ -310,6 +310,11 @@ enum dpaa2_eth_fq_type { ...@@ -310,6 +310,11 @@ enum dpaa2_eth_fq_type {
struct dpaa2_eth_priv; struct dpaa2_eth_priv;
struct dpaa2_eth_xdp_fds {
struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
ssize_t num;
};
struct dpaa2_eth_fq { struct dpaa2_eth_fq {
u32 fqid; u32 fqid;
u32 tx_qdbin; u32 tx_qdbin;
...@@ -328,7 +333,7 @@ struct dpaa2_eth_fq { ...@@ -328,7 +333,7 @@ struct dpaa2_eth_fq {
struct dpaa2_eth_fq *fq); struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats; struct dpaa2_eth_fq_stats stats;
struct dpaa2_fd xdp_fds[DEV_MAP_BULK_SIZE]; struct dpaa2_eth_xdp_fds xdp_redirect_fds;
}; };
struct dpaa2_eth_ch_xdp { struct dpaa2_eth_ch_xdp {
......
...@@ -321,7 +321,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame, ...@@ -321,7 +321,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame,
if (ethhdr->h_proto == htons(ETH_P_8021Q)) { if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
frame->is_vlan = true; frame->is_vlan = true;
/* FIXME: */ /* FIXME: */
WARN_ONCE(1, "HSR: VLAN not yet supported"); netdev_warn_once(skb->dev, "VLAN not yet supported");
} }
if (ethhdr->h_proto == htons(ETH_P_PRP) || if (ethhdr->h_proto == htons(ETH_P_PRP) ||
ethhdr->h_proto == htons(ETH_P_HSR)) { ethhdr->h_proto == htons(ETH_P_HSR)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment