Commit 95105a99 authored by Gregory Herrero's avatar Gregory Herrero Committed by Felipe Balbi

usb: dwc2: host: avoid usage of dma_alloc_coherent with irqs disabled

Use Streaming DMA mappings to handle cache coherency of frame list and
descriptor list. Cache are always flushed before controller access it
or before cpu access it.
Acked-by: default avatarJohn Youn <johnyoun@synopsys.com>
Signed-off-by: default avatarGregory Herrero <gregory.herrero@intel.com>
Signed-off-by: default avatarFelipe Balbi <balbi@ti.com>
parent fbb9e22b
...@@ -1934,6 +1934,9 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, ...@@ -1934,6 +1934,9 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
chan->desc_list_sz, DMA_TO_DEVICE);
hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK; hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
/* Always start from first descriptor */ /* Always start from first descriptor */
......
...@@ -685,6 +685,7 @@ struct dwc2_hregs_backup { ...@@ -685,6 +685,7 @@ struct dwc2_hregs_backup {
* @otg_port: OTG port number * @otg_port: OTG port number
* @frame_list: Frame list * @frame_list: Frame list
* @frame_list_dma: Frame list DMA address * @frame_list_dma: Frame list DMA address
* @frame_list_sz: Frame list size
* *
* These are for peripheral mode: * These are for peripheral mode:
* *
...@@ -804,6 +805,7 @@ struct dwc2_hsotg { ...@@ -804,6 +805,7 @@ struct dwc2_hsotg {
u8 otg_port; u8 otg_port;
u32 *frame_list; u32 *frame_list;
dma_addr_t frame_list_dma; dma_addr_t frame_list_dma;
u32 frame_list_sz;
#ifdef DEBUG #ifdef DEBUG
u32 frrem_samples; u32 frrem_samples;
......
...@@ -881,8 +881,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) ...@@ -881,8 +881,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*/ */
chan->multi_count = dwc2_hb_mult(qh->maxp); chan->multi_count = dwc2_hb_mult(qh->maxp);
if (hsotg->core_params->dma_desc_enable > 0) if (hsotg->core_params->dma_desc_enable > 0) {
chan->desc_list_addr = qh->desc_list_dma; chan->desc_list_addr = qh->desc_list_dma;
chan->desc_list_sz = qh->desc_list_sz;
}
dwc2_hc_init(hsotg, chan); dwc2_hc_init(hsotg, chan);
chan->qh = qh; chan->qh = qh;
......
...@@ -107,6 +107,7 @@ struct dwc2_qh; ...@@ -107,6 +107,7 @@ struct dwc2_qh;
* @qh: QH for the transfer being processed by this channel * @qh: QH for the transfer being processed by this channel
* @hc_list_entry: For linking to list of host channels * @hc_list_entry: For linking to list of host channels
* @desc_list_addr: Current QH's descriptor list DMA address * @desc_list_addr: Current QH's descriptor list DMA address
* @desc_list_sz: Current QH's descriptor list size
* *
* This structure represents the state of a single host channel when acting in * This structure represents the state of a single host channel when acting in
* host mode. It contains the data items needed to transfer packets to an * host mode. It contains the data items needed to transfer packets to an
...@@ -159,6 +160,7 @@ struct dwc2_host_chan { ...@@ -159,6 +160,7 @@ struct dwc2_host_chan {
struct dwc2_qh *qh; struct dwc2_qh *qh;
struct list_head hc_list_entry; struct list_head hc_list_entry;
dma_addr_t desc_list_addr; dma_addr_t desc_list_addr;
u32 desc_list_sz;
}; };
struct dwc2_hcd_pipe_info { struct dwc2_hcd_pipe_info {
...@@ -251,6 +253,7 @@ enum dwc2_transaction_type { ...@@ -251,6 +253,7 @@ enum dwc2_transaction_type {
* schedule * schedule
* @desc_list: List of transfer descriptors * @desc_list: List of transfer descriptors
* @desc_list_dma: Physical address of desc_list * @desc_list_dma: Physical address of desc_list
* @desc_list_sz: Size of descriptors list
* @n_bytes: Xfer Bytes array. Each element corresponds to a transfer * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
* descriptor and indicates original XferSize value for the * descriptor and indicates original XferSize value for the
* descriptor * descriptor
...@@ -284,6 +287,7 @@ struct dwc2_qh { ...@@ -284,6 +287,7 @@ struct dwc2_qh {
struct list_head qh_list_entry; struct list_head qh_list_entry;
struct dwc2_hcd_dma_desc *desc_list; struct dwc2_hcd_dma_desc *desc_list;
dma_addr_t desc_list_dma; dma_addr_t desc_list_dma;
u32 desc_list_sz;
u32 *n_bytes; u32 *n_bytes;
unsigned tt_buffer_dirty:1; unsigned tt_buffer_dirty:1;
}; };
......
...@@ -87,22 +87,23 @@ static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) ...@@ -87,22 +87,23 @@ static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
gfp_t flags) gfp_t flags)
{ {
qh->desc_list = dma_alloc_coherent(hsotg->dev, qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh);
dwc2_max_desc_num(qh), &qh->desc_list_dma,
flags);
qh->desc_list = kzalloc(qh->desc_list_sz, flags | GFP_DMA);
if (!qh->desc_list) if (!qh->desc_list)
return -ENOMEM; return -ENOMEM;
memset(qh->desc_list, 0, qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh)); qh->desc_list_sz,
DMA_TO_DEVICE);
qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags); qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
if (!qh->n_bytes) { if (!qh->n_bytes) {
dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc) dma_unmap_single(hsotg->dev, qh->desc_list_dma,
* dwc2_max_desc_num(qh), qh->desc_list, qh->desc_list_sz,
qh->desc_list_dma); DMA_FROM_DEVICE);
kfree(qh->desc_list);
qh->desc_list = NULL; qh->desc_list = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -113,9 +114,9 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, ...@@ -113,9 +114,9 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{ {
if (qh->desc_list) { if (qh->desc_list) {
dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc) dma_unmap_single(hsotg->dev, qh->desc_list_dma,
* dwc2_max_desc_num(qh), qh->desc_list, qh->desc_list_sz, DMA_FROM_DEVICE);
qh->desc_list_dma); kfree(qh->desc_list);
qh->desc_list = NULL; qh->desc_list = NULL;
} }
...@@ -128,21 +129,20 @@ static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) ...@@ -128,21 +129,20 @@ static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
if (hsotg->frame_list) if (hsotg->frame_list)
return 0; return 0;
hsotg->frame_list = dma_alloc_coherent(hsotg->dev, hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
4 * FRLISTEN_64_SIZE, hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
&hsotg->frame_list_dma,
mem_flags);
if (!hsotg->frame_list) if (!hsotg->frame_list)
return -ENOMEM; return -ENOMEM;
memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE); hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
hsotg->frame_list_sz,
DMA_TO_DEVICE);
return 0; return 0;
} }
static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
{ {
u32 *frame_list;
dma_addr_t frame_list_dma;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags); spin_lock_irqsave(&hsotg->lock, flags);
...@@ -152,14 +152,14 @@ static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) ...@@ -152,14 +152,14 @@ static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
return; return;
} }
frame_list = hsotg->frame_list; dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
frame_list_dma = hsotg->frame_list_dma; hsotg->frame_list_sz, DMA_FROM_DEVICE);
kfree(hsotg->frame_list);
hsotg->frame_list = NULL; hsotg->frame_list = NULL;
spin_unlock_irqrestore(&hsotg->lock, flags); spin_unlock_irqrestore(&hsotg->lock, flags);
dma_free_coherent(hsotg->dev, 4 * FRLISTEN_64_SIZE, frame_list,
frame_list_dma);
} }
static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
...@@ -249,6 +249,15 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, ...@@ -249,6 +249,15 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
j = (j + inc) & (FRLISTEN_64_SIZE - 1); j = (j + inc) & (FRLISTEN_64_SIZE - 1);
} while (j != i); } while (j != i);
/*
* Sync frame list since controller will access it if periodic
* channel is currently enabled.
*/
dma_sync_single_for_device(hsotg->dev,
hsotg->frame_list_dma,
hsotg->frame_list_sz,
DMA_TO_DEVICE);
if (!enable) if (!enable)
return; return;
...@@ -541,6 +550,11 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, ...@@ -541,6 +550,11 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
dma_desc->status |= HOST_DMA_IOC; dma_desc->status |= HOST_DMA_IOC;
#endif #endif
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
(idx * sizeof(struct dwc2_hcd_dma_desc)),
sizeof(struct dwc2_hcd_dma_desc),
DMA_TO_DEVICE);
} }
static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
...@@ -610,6 +624,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, ...@@ -610,6 +624,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
if (qh->ntd == ntd_max) { if (qh->ntd == ntd_max) {
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
qh->desc_list[idx].status |= HOST_DMA_IOC; qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (idx *
sizeof(struct dwc2_hcd_dma_desc)),
sizeof(struct dwc2_hcd_dma_desc),
DMA_TO_DEVICE);
} }
#else #else
/* /*
...@@ -639,6 +658,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, ...@@ -639,6 +658,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
qh->desc_list[idx].status |= HOST_DMA_IOC; qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
(idx * sizeof(struct dwc2_hcd_dma_desc)),
sizeof(struct dwc2_hcd_dma_desc),
DMA_TO_DEVICE);
#endif #endif
} }
...@@ -676,6 +700,12 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, ...@@ -676,6 +700,12 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
dma_desc->buf = (u32)chan->xfer_dma; dma_desc->buf = (u32)chan->xfer_dma;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
(n_desc * sizeof(struct dwc2_hcd_dma_desc)),
sizeof(struct dwc2_hcd_dma_desc),
DMA_TO_DEVICE);
/* /*
* Last (or only) descriptor of IN transfer with actual size less * Last (or only) descriptor of IN transfer with actual size less
* than MaxPacket * than MaxPacket
...@@ -726,6 +756,12 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, ...@@ -726,6 +756,12 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
"set A bit in desc %d (%p)\n", "set A bit in desc %d (%p)\n",
n_desc - 1, n_desc - 1,
&qh->desc_list[n_desc - 1]); &qh->desc_list[n_desc - 1]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
((n_desc - 1) *
sizeof(struct dwc2_hcd_dma_desc)),
sizeof(struct dwc2_hcd_dma_desc),
DMA_TO_DEVICE);
} }
dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
dev_vdbg(hsotg->dev, dev_vdbg(hsotg->dev,
...@@ -751,10 +787,19 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, ...@@ -751,10 +787,19 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
n_desc - 1, &qh->desc_list[n_desc - 1]); n_desc - 1, &qh->desc_list[n_desc - 1]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (n_desc - 1) *
sizeof(struct dwc2_hcd_dma_desc),
sizeof(struct dwc2_hcd_dma_desc),
DMA_TO_DEVICE);
if (n_desc > 1) { if (n_desc > 1) {
qh->desc_list[0].status |= HOST_DMA_A; qh->desc_list[0].status |= HOST_DMA_A;
dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
&qh->desc_list[0]); &qh->desc_list[0]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma,
sizeof(struct dwc2_hcd_dma_desc),
DMA_TO_DEVICE);
} }
chan->ntd = n_desc; chan->ntd = n_desc;
} }
...@@ -829,7 +874,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, ...@@ -829,7 +874,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd, struct dwc2_qtd *qtd,
struct dwc2_qh *qh, u16 idx) struct dwc2_qh *qh, u16 idx)
{ {
struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; struct dwc2_hcd_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc; struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 remain = 0; u16 remain = 0;
int rc = 0; int rc = 0;
...@@ -837,6 +882,13 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, ...@@ -837,6 +882,13 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
if (!qtd->urb) if (!qtd->urb)
return -EINVAL; return -EINVAL;
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_hcd_dma_desc)),
sizeof(struct dwc2_hcd_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[idx];
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset); dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
if (chan->ep_is_in) if (chan->ep_is_in)
...@@ -1092,6 +1144,12 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, ...@@ -1092,6 +1144,12 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
if (!urb) if (!urb)
return -EINVAL; return -EINVAL;
dma_sync_single_for_cpu(hsotg->dev,
qh->desc_list_dma + (desc_num *
sizeof(struct dwc2_hcd_dma_desc)),
sizeof(struct dwc2_hcd_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[desc_num]; dma_desc = &qh->desc_list[desc_num];
n_bytes = qh->n_bytes[desc_num]; n_bytes = qh->n_bytes[desc_num];
dev_vdbg(hsotg->dev, dev_vdbg(hsotg->dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment