Commit af424a41 authored by William Wu's avatar William Wu Committed by Felipe Balbi

usb: dwc2: alloc dma aligned buffer for isoc split in

The commit 3bc04e28 ("usb: dwc2: host: Get aligned DMA in
a more supported way") rips out a lot of code to simply the
allocation of aligned DMA. However, it also introduces a new
issue when use isoc split in transfer.

In my test case, I connect the dwc2 controller with an usb hs
Hub (GL852G-12), and plug an usb fs audio device (Plantronics
headset) into the downstream port of Hub. Then use the usb mic
to record, we can find noise when playback.

It's because that the usb Hub uses an MDATA for the first
transaction and a DATA0 for the second transaction for the isoc
split in transaction. An typical isoc split in transaction sequence
like this:

- SSPLIT IN transaction
- CSPLIT IN transaction
  - MDATA packet
- CSPLIT IN transaction
  - DATA0 packet

The DMA address of MDATA (urb->dma) is always DWORD-aligned, but
the DMA address of DATA0 (urb->dma + qtd->isoc_split_offset) may
not be DWORD-aligned, it depends on the qtd->isoc_split_offset (the
length of MDATA). In my test case, the length of MDATA is usually
unaligned, this cause DATA0 packet transmission error.

This patch use kmem_cache to allocate aligned DMA buf for isoc
split in transaction. Note that according to usb 2.0 spec, the
maximum data payload size is 1023 bytes for each fs isoc ep,
and the maximum allowable interrupt data payload size is 64 bytes
or less for fs interrupt ep. So we set the size of object to be
1024 bytes in the kmem cache.
Tested-by: default avatarGevorg Sahakyan <sahakyan@synopsys.com>
Tested-by: default avatarHeiko Stuebner <heiko@sntech.de>
Acked-by: Minas Harutyunyan hminas@synopsys.com>
Signed-off-by: default avatarWilliam Wu <william.wu@rock-chips.com>
Reviewed-by: default avatarDouglas Anderson <dianders@chromium.org>
Signed-off-by: default avatarFelipe Balbi <felipe.balbi@linux.intel.com>
parent 87606759
...@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup { ...@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup {
* @frame_list_sz: Frame list size * @frame_list_sz: Frame list size
* @desc_gen_cache: Kmem cache for generic descriptors * @desc_gen_cache: Kmem cache for generic descriptors
* @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors
* @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf
* *
* These are for peripheral mode: * These are for peripheral mode:
* *
...@@ -1177,6 +1178,8 @@ struct dwc2_hsotg { ...@@ -1177,6 +1178,8 @@ struct dwc2_hsotg {
u32 frame_list_sz; u32 frame_list_sz;
struct kmem_cache *desc_gen_cache; struct kmem_cache *desc_gen_cache;
struct kmem_cache *desc_hsisoc_cache; struct kmem_cache *desc_hsisoc_cache;
struct kmem_cache *unaligned_cache;
#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
#endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */ #endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
......
...@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, ...@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
} }
if (hsotg->params.host_dma) { if (hsotg->params.host_dma) {
dwc2_writel((u32)chan->xfer_dma, dma_addr_t dma_addr;
hsotg->regs + HCDMA(chan->hc_num));
if (chan->align_buf) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "align_buf\n");
dma_addr = chan->align_buf;
} else {
dma_addr = chan->xfer_dma;
}
dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
if (dbg_hc(chan)) if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
(unsigned long)chan->xfer_dma, chan->hc_num); (unsigned long)dma_addr, chan->hc_num);
} }
/* Start the split */ /* Start the split */
...@@ -2625,6 +2634,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, ...@@ -2625,6 +2634,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
} }
} }
static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh,
struct dwc2_host_chan *chan)
{
if (!hsotg->unaligned_cache ||
chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
return -ENOMEM;
if (!qh->dw_align_buf) {
qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
GFP_ATOMIC | GFP_DMA);
if (!qh->dw_align_buf)
return -ENOMEM;
}
qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
DWC2_KMEM_UNALIGNED_BUF_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
dev_err(hsotg->dev, "can't map align_buf\n");
chan->align_buf = 0;
return -EINVAL;
}
chan->align_buf = qh->dw_align_buf_dma;
return 0;
}
#define DWC2_USB_DMA_ALIGN 4 #define DWC2_USB_DMA_ALIGN 4
struct dma_aligned_buffer { struct dma_aligned_buffer {
...@@ -2802,6 +2840,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) ...@@ -2802,6 +2840,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
/* Set the transfer attributes */ /* Set the transfer attributes */
dwc2_hc_init_xfer(hsotg, chan, qtd); dwc2_hc_init_xfer(hsotg, chan, qtd);
/* For non-dword aligned buffers */
if (hsotg->params.host_dma && qh->do_split &&
chan->ep_is_in && (chan->xfer_dma & 0x3)) {
dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
dev_err(hsotg->dev,
"Failed to allocate memory to handle non-aligned buffer\n");
/* Add channel back to free list */
chan->align_buf = 0;
chan->multi_count = 0;
list_add_tail(&chan->hc_list_entry,
&hsotg->free_hc_list);
qtd->in_process = 0;
qh->channel = NULL;
return -ENOMEM;
}
} else {
/*
* We assume that DMA is always aligned in non-split
* case or split out case. Warn if not.
*/
WARN_ON_ONCE(hsotg->params.host_dma &&
(chan->xfer_dma & 0x3));
chan->align_buf = 0;
}
if (chan->ep_type == USB_ENDPOINT_XFER_INT || if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) chan->ep_type == USB_ENDPOINT_XFER_ISOC)
/* /*
...@@ -5246,6 +5310,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) ...@@ -5246,6 +5310,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
} }
} }
if (hsotg->params.host_dma) {
/*
* Create kmem caches to handle non-aligned buffer
* in Buffer DMA mode.
*/
hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
SLAB_CACHE_DMA, NULL);
if (!hsotg->unaligned_cache)
dev_err(hsotg->dev,
"unable to create dwc2 unaligned cache\n");
}
hsotg->otg_port = 1; hsotg->otg_port = 1;
hsotg->frame_list = NULL; hsotg->frame_list = NULL;
hsotg->frame_list_dma = 0; hsotg->frame_list_dma = 0;
...@@ -5280,8 +5357,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) ...@@ -5280,8 +5357,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
return 0; return 0;
error4: error4:
kmem_cache_destroy(hsotg->desc_gen_cache); kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache);
kmem_cache_destroy(hsotg->desc_gen_cache);
error3: error3:
dwc2_hcd_release(hsotg); dwc2_hcd_release(hsotg);
error2: error2:
...@@ -5322,8 +5400,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) ...@@ -5322,8 +5400,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
usb_remove_hcd(hcd); usb_remove_hcd(hcd);
hsotg->priv = NULL; hsotg->priv = NULL;
kmem_cache_destroy(hsotg->desc_gen_cache); kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache);
kmem_cache_destroy(hsotg->desc_gen_cache);
dwc2_hcd_release(hsotg); dwc2_hcd_release(hsotg);
usb_put_hcd(hcd); usb_put_hcd(hcd);
......
...@@ -76,6 +76,8 @@ struct dwc2_qh; ...@@ -76,6 +76,8 @@ struct dwc2_qh;
* (micro)frame * (micro)frame
* @xfer_buf: Pointer to current transfer buffer position * @xfer_buf: Pointer to current transfer buffer position
* @xfer_dma: DMA address of xfer_buf * @xfer_dma: DMA address of xfer_buf
* @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
* DWORD aligned
* @xfer_len: Total number of bytes to transfer * @xfer_len: Total number of bytes to transfer
* @xfer_count: Number of bytes transferred so far * @xfer_count: Number of bytes transferred so far
* @start_pkt_count: Packet count at start of transfer * @start_pkt_count: Packet count at start of transfer
...@@ -133,6 +135,7 @@ struct dwc2_host_chan { ...@@ -133,6 +135,7 @@ struct dwc2_host_chan {
u8 *xfer_buf; u8 *xfer_buf;
dma_addr_t xfer_dma; dma_addr_t xfer_dma;
dma_addr_t align_buf;
u32 xfer_len; u32 xfer_len;
u32 xfer_count; u32 xfer_count;
u16 start_pkt_count; u16 start_pkt_count;
...@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time { ...@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
* speed. Note that this is in "schedule slice" which * speed. Note that this is in "schedule slice" which
* is tightly packed. * is tightly packed.
* @ntd: Actual number of transfer descriptors in a list * @ntd: Actual number of transfer descriptors in a list
* @dw_align_buf: Used instead of original buffer if its physical address
* is not dword-aligned
* @dw_align_buf_dma: DMA address for dw_align_buf
* @qtd_list: List of QTDs for this QH * @qtd_list: List of QTDs for this QH
* @channel: Host channel currently processing transfers for this QH * @channel: Host channel currently processing transfers for this QH
* @qh_list_entry: Entry for QH in either the periodic or non-periodic * @qh_list_entry: Entry for QH in either the periodic or non-periodic
...@@ -350,6 +356,8 @@ struct dwc2_qh { ...@@ -350,6 +356,8 @@ struct dwc2_qh {
struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES]; struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
u32 ls_start_schedule_slice; u32 ls_start_schedule_slice;
u16 ntd; u16 ntd;
u8 *dw_align_buf;
dma_addr_t dw_align_buf_dma;
struct list_head qtd_list; struct list_head qtd_list;
struct dwc2_host_chan *channel; struct dwc2_host_chan *channel;
struct list_head qh_list_entry; struct list_head qh_list_entry;
......
...@@ -950,6 +950,14 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, ...@@ -950,6 +950,14 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc->actual_length += len; frame_desc->actual_length += len;
if (chan->align_buf) {
dev_vdbg(hsotg->dev, "non-aligned buffer\n");
dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
chan->qh->dw_align_buf, len);
}
qtd->isoc_split_offset += len; qtd->isoc_split_offset += len;
hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
......
...@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) ...@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if (qh->desc_list) if (qh->desc_list)
dwc2_hcd_qh_free_ddma(hsotg, qh); dwc2_hcd_qh_free_ddma(hsotg, qh);
else if (hsotg->unaligned_cache && qh->dw_align_buf)
kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
kfree(qh); kfree(qh);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment