Commit 4d922ce9 authored by Karthikeyan Periyasamy's avatar Karthikeyan Periyasamy Committed by Kalle Valo

wifi: ath12k: avoid explicit HW conversion argument in Rxdma replenish

Currently Rxdma replenish require HW conversion argument which is
unnecessary argument since ath12k driver configures the Rxdma only
in HW conversion. To optimize the rx data path per packet, avoid
the explicit unnecessary argument and condition check in the rx
replenish.

Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.1.1-00125-QCAHKSWPL_SILICONZ-1
Signed-off-by: default avatarKarthikeyan Periyasamy <quic_periyasa@quicinc.com>
Acked-by: default avatarJeff Johnson <quic_jjohnson@quicinc.com>
Signed-off-by: default avatarKalle Valo <quic_kvalo@quicinc.com>
Link: https://lore.kernel.org/r/20231111043934.20485-4-quic_periyasa@quicinc.com
parent d457f9fe
......@@ -961,8 +961,7 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
ath12k_dp_rx_bufs_replenish(ab, rx_ring, 0,
true);
ath12k_dp_rx_bufs_replenish(ab, rx_ring, 0);
}
/* TODO: Implement handler for other interrupts */
......
......@@ -258,15 +258,13 @@ static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
/* Returns number of Rx buffers replenished */
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
int req_entries,
bool hw_cc)
int req_entries)
{
struct ath12k_buffer_addr *desc;
struct hal_srng *srng;
struct sk_buff *skb;
int num_free;
int num_remain;
int buf_id;
u32 cookie;
dma_addr_t paddr;
struct ath12k_dp *dp = &ab->dp;
......@@ -307,40 +305,29 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
if (dma_mapping_error(ab->dev, paddr))
goto fail_free_skb;
if (hw_cc) {
spin_lock_bh(&dp->rx_desc_lock);
/* Get desc from free list and store in used list
* for cleanup purposes
*
* TODO: pass the removed descs rather than
* add/read to optimize
*/
rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
struct ath12k_rx_desc_info,
list);
if (!rx_desc) {
spin_unlock_bh(&dp->rx_desc_lock);
goto fail_dma_unmap;
}
rx_desc->skb = skb;
cookie = rx_desc->cookie;
list_del(&rx_desc->list);
list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
spin_lock_bh(&dp->rx_desc_lock);
/* Get desc from free list and store in used list
* for cleanup purposes
*
* TODO: pass the removed descs rather than
* add/read to optimize
*/
rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
struct ath12k_rx_desc_info,
list);
if (!rx_desc) {
spin_unlock_bh(&dp->rx_desc_lock);
} else {
spin_lock_bh(&rx_ring->idr_lock);
buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
rx_ring->bufs_max * 3, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (buf_id < 0)
goto fail_dma_unmap;
cookie = u32_encode_bits(buf_id,
DP_RXDMA_BUF_COOKIE_BUF_ID);
goto fail_dma_unmap;
}
rx_desc->skb = skb;
cookie = rx_desc->cookie;
list_del(&rx_desc->list);
list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
spin_unlock_bh(&dp->rx_desc_lock);
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
goto fail_buf_unassign;
......@@ -359,17 +346,11 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
return req_entries - num_remain;
fail_buf_unassign:
if (hw_cc) {
spin_lock_bh(&dp->rx_desc_lock);
list_del(&rx_desc->list);
list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
rx_desc->skb = NULL;
spin_unlock_bh(&dp->rx_desc_lock);
} else {
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
}
spin_lock_bh(&dp->rx_desc_lock);
list_del(&rx_desc->list);
list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
rx_desc->skb = NULL;
spin_unlock_bh(&dp->rx_desc_lock);
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
......@@ -435,8 +416,7 @@ static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))
ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
else
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_entries,
ringtype == HAL_RXDMA_BUF);
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_entries);
return 0;
}
......@@ -2708,7 +2688,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
if (!total_msdu_reaped)
goto exit;
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped, true);
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
......@@ -3486,7 +3466,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
rx_ring = &dp->rx_refill_buf_ring;
ath12k_dp_rx_bufs_replenish(ab, rx_ring, tot_n_bufs_reaped, true);
ath12k_dp_rx_bufs_replenish(ab, rx_ring, tot_n_bufs_reaped);
return tot_n_bufs_reaped;
}
......@@ -3799,7 +3779,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!num_buffs_reaped)
goto done;
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped, true);
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
rcu_read_lock();
for (i = 0; i < ab->num_radios; i++) {
......
......@@ -118,8 +118,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id,
int budget);
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
int req_entries,
bool hw_cc);
int req_entries);
int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar);
int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment