Commit aaf24414 authored by Zhenghao Gu's avatar Zhenghao Gu Committed by Kalle Valo

wifi: ath11k: fix IOMMU errors on buffer rings

virt_to_phys() doesn't work on systems with IOMMU enabled, which have
non-identity physical-to-IOVA mappings.  It leads to IO_PAGE_FAULTs like this:

[IO_PAGE_FAULT domain=0x0023 address=0x1cce00000 flags=0x0020]

And no association to the AP can be established.

This patch changes that to dma_map_single(), which works correctly. Even
virt_to_phys() documentation says device drivers should not use it:

    This function does not give bus mappings for DMA transfers. In
    almost all conceivable cases a device driver should not be using
    this function

Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.7.0.1-01744-QCAHKSWPL_SILICONZ-1
Signed-off-by: default avatarZhenghao Gu <imguzh@gmail.com>
Acked-by: default avatarJeff Johnson <quic_jjohnson@quicinc.com>
Signed-off-by: default avatarKalle Valo <quic_kvalo@quicinc.com>
Link: https://msgid.link/20231212031914.47339-1-imguzh@gmail.com
parent e7ab40b7
...@@ -104,11 +104,14 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) ...@@ -104,11 +104,14 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned) if (!ring->vaddr_unaligned)
return; return;
if (ring->cached) if (ring->cached) {
dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
DMA_FROM_DEVICE);
kfree(ring->vaddr_unaligned); kfree(ring->vaddr_unaligned);
else } else {
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned); ring->paddr_unaligned);
}
ring->vaddr_unaligned = NULL; ring->vaddr_unaligned = NULL;
} }
...@@ -249,7 +252,18 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, ...@@ -249,7 +252,18 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
if (cached) { if (cached) {
ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned); if (!ring->vaddr_unaligned)
return -ENOMEM;
ring->paddr_unaligned = dma_map_single(ab->dev,
ring->vaddr_unaligned,
ring->size,
DMA_FROM_DEVICE);
if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
kfree(ring->vaddr_unaligned);
ring->vaddr_unaligned = NULL;
return -ENOMEM;
}
} }
} }
......
...@@ -626,15 +626,30 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng) ...@@ -626,15 +626,30 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
return NULL; return NULL;
} }
static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab,
struct hal_srng *srng, dma_addr_t *paddr)
{
lockdep_assert_held(&srng->lock);
if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
*paddr = srng->ring_base_paddr +
sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp;
return srng->ring_base_vaddr + srng->u.dst_ring.tp;
}
return NULL;
}
static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab, static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
struct hal_srng *srng) struct hal_srng *srng)
{ {
dma_addr_t desc_paddr;
u32 *desc; u32 *desc;
/* prefetch only if desc is available */ /* prefetch only if desc is available */
desc = ath11k_hal_srng_dst_peek(ab, srng); desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr);
if (likely(desc)) { if (likely(desc)) {
dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc), dma_sync_single_for_cpu(ab->dev, desc_paddr,
(srng->entry_size * sizeof(u32)), (srng->entry_size * sizeof(u32)),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
prefetch(desc); prefetch(desc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment