Commit 90db5075 authored by Benjamin Berg's avatar Benjamin Berg Committed by Johannes Berg

wifi: iwlwifi: use already mapped data when TXing an AMSDU

The previous commits added mappings for the SKB and TSO page. This
switches the code to use these mappings instead of creating new ones.
Signed-off-by: default avatarBenjamin Berg <benjamin.berg@intel.com>
Signed-off-by: default avatarMiri Korenblit <miriam.rachel.korenblit@intel.com>
Reviewed-by: default avatarJohannes Berg <johannes.berg@intel.com>
Link: https://patch.msgid.link/20240703125541.35d89c5e4ae8.I4feb8d34e7b30768d21365ec22c944bacc274d0b@changeidSigned-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent adc902ce
...@@ -61,7 +61,8 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, ...@@ -61,7 +61,8 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
struct sk_buff *skb, struct sk_buff *skb,
struct iwl_tfh_tfd *tfd, struct iwl_tfh_tfd *tfd,
dma_addr_t phys, void *virt, dma_addr_t phys, void *virt,
u16 len, struct iwl_cmd_meta *meta) u16 len, struct iwl_cmd_meta *meta,
bool unmap)
{ {
dma_addr_t oldphys = phys; dma_addr_t oldphys = phys;
struct page *page; struct page *page;
...@@ -105,10 +106,27 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, ...@@ -105,10 +106,27 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
memcpy(page_address(page), virt, len); memcpy(page_address(page), virt, len);
phys = dma_map_single(trans->dev, page_address(page), len, /*
DMA_TO_DEVICE); * This is a bit odd, but performance does not matter here, what
if (unlikely(dma_mapping_error(trans->dev, phys))) * matters are the expectations of the calling code and TB cleanup
return -ENOMEM; * function.
*
* As such, if unmap is set, then create another mapping for the TB
* entry as it will be unmapped later. On the other hand, if it is not
* set, then the TB entry will not be unmapped and instead we simply
* reference and sync the mapping that get_workaround_page() created.
*/
if (unmap) {
phys = dma_map_single(trans->dev, page_address(page), len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, phys)))
return -ENOMEM;
} else {
phys = iwl_pcie_get_tso_page_phys(page_address(page));
dma_sync_single_for_device(trans->dev, phys, len,
DMA_TO_DEVICE);
}
ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
if (ret < 0) { if (ret < 0) {
/* unmap the new allocation as single */ /* unmap the new allocation as single */
...@@ -116,6 +134,7 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, ...@@ -116,6 +134,7 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
meta = NULL; meta = NULL;
goto unmap; goto unmap;
} }
IWL_DEBUG_TX(trans, IWL_DEBUG_TX(trans,
"TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
len, (unsigned long long)oldphys, len, (unsigned long long)oldphys,
...@@ -123,6 +142,9 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, ...@@ -123,6 +142,9 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
ret = 0; ret = 0;
unmap: unmap:
if (!unmap)
goto trace;
if (meta) if (meta)
dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
else else
...@@ -146,6 +168,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, ...@@ -146,6 +168,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int mss = skb_shinfo(skb)->gso_size;
dma_addr_t start_hdr_phys;
u16 length, amsdu_pad; u16 length, amsdu_pad;
u8 *start_hdr; u8 *start_hdr;
struct sg_table *sgt; struct sg_table *sgt;
...@@ -168,6 +191,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, ...@@ -168,6 +191,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
if (!sgt) if (!sgt)
return -ENOMEM; return -ENOMEM;
start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
/* /*
* Pull the ieee80211 header to be able to use TSO core, * Pull the ieee80211 header to be able to use TSO core,
* we will restore it for the tx_status flow. * we will restore it for the tx_status flow.
...@@ -214,10 +239,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, ...@@ -214,10 +239,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
pos_hdr += snap_ip_tcp_hdrlen; pos_hdr += snap_ip_tcp_hdrlen;
tb_len = pos_hdr - start_hdr; tb_len = pos_hdr - start_hdr;
tb_phys = dma_map_single(trans->dev, start_hdr, tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
/* /*
* No need for _with_wa, this is from the TSO page and * No need for _with_wa, this is from the TSO page and
* we leave some space at the end of it so can't hit * we leave some space at the end of it so can't hit
...@@ -237,11 +260,14 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, ...@@ -237,11 +260,14 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
int ret; int ret;
tb_len = min_t(unsigned int, tso.size, data_left); tb_len = min_t(unsigned int, tso.size, data_left);
tb_phys = dma_map_single(trans->dev, tso.data, tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
tb_len, DMA_TO_DEVICE); /* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
goto out_err;
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
tb_phys, tso.data, tb_phys, tso.data,
tb_len, NULL); tb_len, NULL, false);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -250,6 +276,9 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, ...@@ -250,6 +276,9 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
} }
} }
dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
DMA_TO_DEVICE);
/* re -add the WiFi header */ /* re -add the WiFi header */
skb_push(skb, hdr_len); skb_push(skb, hdr_len);
...@@ -339,7 +368,7 @@ static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, ...@@ -339,7 +368,7 @@ static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
fragsz, DMA_TO_DEVICE); fragsz, DMA_TO_DEVICE);
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
skb_frag_address(frag), skb_frag_address(frag),
fragsz, out_meta); fragsz, out_meta, true);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -413,7 +442,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, ...@@ -413,7 +442,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
tb2_len, DMA_TO_DEVICE); tb2_len, DMA_TO_DEVICE);
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
skb->data + hdr_len, tb2_len, skb->data + hdr_len, tb2_len,
NULL); NULL, true);
if (ret) if (ret)
goto out_err; goto out_err;
} }
...@@ -428,7 +457,8 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, ...@@ -428,7 +457,8 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
skb_headlen(frag), DMA_TO_DEVICE); skb_headlen(frag), DMA_TO_DEVICE);
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
frag->data, frag->data,
skb_headlen(frag), NULL); skb_headlen(frag), NULL,
true);
if (ret) if (ret)
goto out_err; goto out_err;
if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
...@@ -623,6 +653,10 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, ...@@ -623,6 +653,10 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
return; return;
} }
/* TB1 is mapped directly, the rest is the TSO page and SG list. */
if (meta->sg_offset)
num_tbs = 2;
/* first TB is never freed - it's the bidirectional DMA data */ /* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) { for (i = 1; i < num_tbs; i++) {
if (meta->tbs & BIT(i)) if (meta->tbs & BIT(i))
......
...@@ -303,6 +303,10 @@ static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, ...@@ -303,6 +303,10 @@ static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
return; return;
} }
/* TB1 is mapped directly, the rest is the TSO page and SG list. */
if (meta->sg_offset)
num_tbs = 2;
/* first TB is never freed - it's the bidirectional DMA data */ /* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) { for (i = 1; i < num_tbs; i++) {
...@@ -1892,6 +1896,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -1892,6 +1896,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int mss = skb_shinfo(skb)->gso_size;
u16 length, iv_len, amsdu_pad; u16 length, iv_len, amsdu_pad;
dma_addr_t start_hdr_phys;
u8 *start_hdr, *pos_hdr; u8 *start_hdr, *pos_hdr;
struct sg_table *sgt; struct sg_table *sgt;
struct tso_t tso; struct tso_t tso;
...@@ -1920,6 +1925,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -1920,6 +1925,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
if (!sgt) if (!sgt)
return -ENOMEM; return -ENOMEM;
start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
pos_hdr = start_hdr; pos_hdr = start_hdr;
memcpy(pos_hdr, skb->data + hdr_len, iv_len); memcpy(pos_hdr, skb->data + hdr_len, iv_len);
pos_hdr += iv_len; pos_hdr += iv_len;
...@@ -1971,10 +1977,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -1971,10 +1977,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
pos_hdr += snap_ip_tcp_hdrlen; pos_hdr += snap_ip_tcp_hdrlen;
hdr_tb_len = pos_hdr - start_hdr; hdr_tb_len = pos_hdr - start_hdr;
hdr_tb_phys = dma_map_single(trans->dev, start_hdr, hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
hdr_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys)))
return -EINVAL;
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false); hdr_tb_len, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
...@@ -1991,9 +1995,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -1991,9 +1995,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
data_left); data_left);
dma_addr_t tb_phys; dma_addr_t tb_phys;
tb_phys = dma_map_single(trans->dev, tso.data, tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
size, DMA_TO_DEVICE); /* Not a real mapping error, use direct comparison */
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) if (unlikely(tb_phys == DMA_MAPPING_ERROR))
return -EINVAL; return -EINVAL;
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
...@@ -2006,6 +2010,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -2006,6 +2010,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
} }
} }
dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
DMA_TO_DEVICE);
/* re -add the WiFi header and IV */ /* re -add the WiFi header and IV */
skb_push(skb, hdr_len + iv_len); skb_push(skb, hdr_len + iv_len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment