Commit 44fb6fbb authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

mlx5: support napi_complete_done()

A NAPI poll handler should return number of RX packets processed,
instead of 0 / budget.

This allows proper busy poll accounting through LINUX_MIB_BUSYPOLLRXPACKETS
SNMP counter.

napi_complete_done() allows /sys/class/net/ethX/gro_flush_timeout
to be used for finer GRO aggregation control.

Tested:

Enabled busy polling, and checked TcpExtBusyPollRxPackets counter is increasing.

echo 70 >/proc/sys/net/core/busy_read
nstat >/dev/null
netperf -H target -t TCP_RR >/dev/null
nstat | grep TcpExtBusyPollRxPackets
TcpExtBusyPollRxPackets         490958             0.0
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Eli Cohen <eli@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7ae92ae5
...@@ -564,7 +564,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq); ...@@ -564,7 +564,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget); int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
......
...@@ -216,16 +216,16 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -216,16 +216,16 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
be16_to_cpu(cqe->vlan_info)); be16_to_cpu(cqe->vlan_info));
} }
bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{ {
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int i; int work_done;
/* avoid accessing cq (dma coherent memory) if not needed */ /* avoid accessing cq (dma coherent memory) if not needed */
if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
return false; return 0;
for (i = 0; i < budget; i++) { for (work_done = 0; work_done < budget; work_done++) {
struct mlx5e_rx_wqe *wqe; struct mlx5e_rx_wqe *wqe;
struct mlx5_cqe64 *cqe; struct mlx5_cqe64 *cqe;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -271,10 +271,8 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -271,10 +271,8 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
/* ensure cq space is freed before enabling more cqes */ /* ensure cq space is freed before enabling more cqes */
wmb(); wmb();
if (i == budget) { if (work_done == budget)
set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
return true;
}
return false; return work_done;
} }
...@@ -54,6 +54,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -54,6 +54,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi); napi);
bool busy = false; bool busy = false;
int work_done;
int i; int i;
clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
...@@ -61,26 +62,26 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -61,26 +62,26 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++) for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget); work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
busy |= mlx5e_post_rx_wqes(&c->rq); busy |= mlx5e_post_rx_wqes(&c->rq);
if (busy) if (busy)
return budget; return budget;
napi_complete(napi); napi_complete_done(napi, work_done);
/* avoid losing completion event during/after polling cqs */ /* avoid losing completion event during/after polling cqs */
if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) { if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
napi_schedule(napi); napi_schedule(napi);
return 0; return work_done;
} }
for (i = 0; i < c->num_tc; i++) for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq); mlx5e_cq_arm(&c->sq[i].cq);
mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->rq.cq);
return 0; return work_done;
} }
void mlx5e_completion_event(struct mlx5_core_cq *mcq) void mlx5e_completion_event(struct mlx5_core_cq *mcq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment