Commit 9d1a68c4 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'fixes-for-v4.18-rc1' of...

Merge tag 'fixes-for-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-linus

usb: fixes for v4.18-rc1

First set of fixes for the current -rc cycle. The main parts being
warnings of different kinds being fixed. We're also adding support for
Intel'l Icelake devices on dwc3-pci.c.
parents 7daf201d 1d8e5c00
...@@ -226,7 +226,7 @@ $ rm configs/<config name>.<number>/<function> ...@@ -226,7 +226,7 @@ $ rm configs/<config name>.<number>/<function>
where <config name>.<number> specify the configuration and <function> is where <config name>.<number> specify the configuration and <function> is
a symlink to a function being removed from the configuration, e.g.: a symlink to a function being removed from the configuration, e.g.:
$ rm configfs/c.1/ncm.usb0 $ rm configs/c.1/ncm.usb0
... ...
... ...
......
...@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup { ...@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup {
* @frame_list_sz: Frame list size * @frame_list_sz: Frame list size
* @desc_gen_cache: Kmem cache for generic descriptors * @desc_gen_cache: Kmem cache for generic descriptors
* @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors
* @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf
* *
* These are for peripheral mode: * These are for peripheral mode:
* *
...@@ -1177,6 +1178,8 @@ struct dwc2_hsotg { ...@@ -1177,6 +1178,8 @@ struct dwc2_hsotg {
u32 frame_list_sz; u32 frame_list_sz;
struct kmem_cache *desc_gen_cache; struct kmem_cache *desc_gen_cache;
struct kmem_cache *desc_hsisoc_cache; struct kmem_cache *desc_hsisoc_cache;
struct kmem_cache *unaligned_cache;
#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
#endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */ #endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
......
...@@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, ...@@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
u32 index; u32 index;
u32 maxsize = 0; u32 maxsize = 0;
u32 mask = 0; u32 mask = 0;
u8 pid = 0;
maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
...@@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, ...@@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
((len << DEV_DMA_NBYTES_SHIFT) & mask)); ((len << DEV_DMA_NBYTES_SHIFT) & mask));
if (hs_ep->dir_in) { if (hs_ep->dir_in) {
desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) & if (len)
pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
else
pid = 1;
desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
DEV_DMA_ISOC_PID_MASK) | DEV_DMA_ISOC_PID_MASK) |
((len % hs_ep->ep.maxpacket) ? ((len % hs_ep->ep.maxpacket) ?
DEV_DMA_SHORT : 0) | DEV_DMA_SHORT : 0) |
...@@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) ...@@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
struct dwc2_dma_desc *desc; struct dwc2_dma_desc *desc;
if (list_empty(&hs_ep->queue)) { if (list_empty(&hs_ep->queue)) {
hs_ep->target_frame = TARGET_FRAME_INITIAL;
dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
return; return;
} }
...@@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) ...@@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
*/ */
tmp = dwc2_hsotg_read_frameno(hsotg); tmp = dwc2_hsotg_read_frameno(hsotg);
dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0);
if (using_desc_dma(hsotg)) { if (using_desc_dma(hsotg)) {
if (ep->target_frame == TARGET_FRAME_INITIAL) { if (ep->target_frame == TARGET_FRAME_INITIAL) {
/* Start first ISO Out */ /* Start first ISO Out */
...@@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) ...@@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
tmp = dwc2_hsotg_read_frameno(hsotg); tmp = dwc2_hsotg_read_frameno(hsotg);
if (using_desc_dma(hsotg)) { if (using_desc_dma(hsotg)) {
dwc2_hsotg_complete_request(hsotg, hs_ep,
get_ep_head(hs_ep), 0);
hs_ep->target_frame = tmp; hs_ep->target_frame = tmp;
dwc2_gadget_incr_frame_num(hs_ep); dwc2_gadget_incr_frame_num(hs_ep);
dwc2_gadget_start_isoc_ddma(hs_ep); dwc2_gadget_start_isoc_ddma(hs_ep);
...@@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) ...@@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
} }
ret = usb_add_gadget_udc(dev, &hsotg->gadget); ret = usb_add_gadget_udc(dev, &hsotg->gadget);
if (ret) if (ret) {
dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
hsotg->ctrl_req);
return ret; return ret;
}
dwc2_hsotg_dump(hsotg); dwc2_hsotg_dump(hsotg);
return 0; return 0;
...@@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) ...@@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
{ {
usb_del_gadget_udc(&hsotg->gadget); usb_del_gadget_udc(&hsotg->gadget);
dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
return 0; return 0;
} }
......
...@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, ...@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
} }
if (hsotg->params.host_dma) { if (hsotg->params.host_dma) {
dwc2_writel((u32)chan->xfer_dma, dma_addr_t dma_addr;
hsotg->regs + HCDMA(chan->hc_num));
if (chan->align_buf) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "align_buf\n");
dma_addr = chan->align_buf;
} else {
dma_addr = chan->xfer_dma;
}
dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
if (dbg_hc(chan)) if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
(unsigned long)chan->xfer_dma, chan->hc_num); (unsigned long)dma_addr, chan->hc_num);
} }
/* Start the split */ /* Start the split */
...@@ -2625,6 +2634,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, ...@@ -2625,6 +2634,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
} }
} }
static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh,
struct dwc2_host_chan *chan)
{
if (!hsotg->unaligned_cache ||
chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
return -ENOMEM;
if (!qh->dw_align_buf) {
qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
GFP_ATOMIC | GFP_DMA);
if (!qh->dw_align_buf)
return -ENOMEM;
}
qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
DWC2_KMEM_UNALIGNED_BUF_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
dev_err(hsotg->dev, "can't map align_buf\n");
chan->align_buf = 0;
return -EINVAL;
}
chan->align_buf = qh->dw_align_buf_dma;
return 0;
}
#define DWC2_USB_DMA_ALIGN 4 #define DWC2_USB_DMA_ALIGN 4
struct dma_aligned_buffer { struct dma_aligned_buffer {
...@@ -2802,6 +2840,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) ...@@ -2802,6 +2840,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
/* Set the transfer attributes */ /* Set the transfer attributes */
dwc2_hc_init_xfer(hsotg, chan, qtd); dwc2_hc_init_xfer(hsotg, chan, qtd);
/* For non-dword aligned buffers */
if (hsotg->params.host_dma && qh->do_split &&
chan->ep_is_in && (chan->xfer_dma & 0x3)) {
dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
dev_err(hsotg->dev,
"Failed to allocate memory to handle non-aligned buffer\n");
/* Add channel back to free list */
chan->align_buf = 0;
chan->multi_count = 0;
list_add_tail(&chan->hc_list_entry,
&hsotg->free_hc_list);
qtd->in_process = 0;
qh->channel = NULL;
return -ENOMEM;
}
} else {
/*
* We assume that DMA is always aligned in non-split
* case or split out case. Warn if not.
*/
WARN_ON_ONCE(hsotg->params.host_dma &&
(chan->xfer_dma & 0x3));
chan->align_buf = 0;
}
if (chan->ep_type == USB_ENDPOINT_XFER_INT || if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) chan->ep_type == USB_ENDPOINT_XFER_ISOC)
/* /*
...@@ -5246,6 +5310,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) ...@@ -5246,6 +5310,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
} }
} }
if (hsotg->params.host_dma) {
/*
* Create kmem caches to handle non-aligned buffer
* in Buffer DMA mode.
*/
hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
SLAB_CACHE_DMA, NULL);
if (!hsotg->unaligned_cache)
dev_err(hsotg->dev,
"unable to create dwc2 unaligned cache\n");
}
hsotg->otg_port = 1; hsotg->otg_port = 1;
hsotg->frame_list = NULL; hsotg->frame_list = NULL;
hsotg->frame_list_dma = 0; hsotg->frame_list_dma = 0;
...@@ -5280,8 +5357,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) ...@@ -5280,8 +5357,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
return 0; return 0;
error4: error4:
kmem_cache_destroy(hsotg->desc_gen_cache); kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache);
kmem_cache_destroy(hsotg->desc_gen_cache);
error3: error3:
dwc2_hcd_release(hsotg); dwc2_hcd_release(hsotg);
error2: error2:
...@@ -5322,8 +5400,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) ...@@ -5322,8 +5400,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
usb_remove_hcd(hcd); usb_remove_hcd(hcd);
hsotg->priv = NULL; hsotg->priv = NULL;
kmem_cache_destroy(hsotg->desc_gen_cache); kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache);
kmem_cache_destroy(hsotg->desc_gen_cache);
dwc2_hcd_release(hsotg); dwc2_hcd_release(hsotg);
usb_put_hcd(hcd); usb_put_hcd(hcd);
...@@ -5435,7 +5514,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg) ...@@ -5435,7 +5514,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
dwc2_writel(hprt0, hsotg->regs + HPRT0); dwc2_writel(hprt0, hsotg->regs + HPRT0);
/* Wait for the HPRT0.PrtSusp register field to be set */ /* Wait for the HPRT0.PrtSusp register field to be set */
if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300)) if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
dev_warn(hsotg->dev, "Suspend wasn't generated\n"); dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/* /*
...@@ -5616,6 +5695,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup, ...@@ -5616,6 +5695,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
return ret; return ret;
} }
dwc2_hcd_rem_wakeup(hsotg);
hsotg->hibernated = 0; hsotg->hibernated = 0;
hsotg->bus_suspended = 0; hsotg->bus_suspended = 0;
hsotg->lx_state = DWC2_L0; hsotg->lx_state = DWC2_L0;
......
...@@ -76,6 +76,8 @@ struct dwc2_qh; ...@@ -76,6 +76,8 @@ struct dwc2_qh;
* (micro)frame * (micro)frame
* @xfer_buf: Pointer to current transfer buffer position * @xfer_buf: Pointer to current transfer buffer position
* @xfer_dma: DMA address of xfer_buf * @xfer_dma: DMA address of xfer_buf
* @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
* DWORD aligned
* @xfer_len: Total number of bytes to transfer * @xfer_len: Total number of bytes to transfer
* @xfer_count: Number of bytes transferred so far * @xfer_count: Number of bytes transferred so far
* @start_pkt_count: Packet count at start of transfer * @start_pkt_count: Packet count at start of transfer
...@@ -133,6 +135,7 @@ struct dwc2_host_chan { ...@@ -133,6 +135,7 @@ struct dwc2_host_chan {
u8 *xfer_buf; u8 *xfer_buf;
dma_addr_t xfer_dma; dma_addr_t xfer_dma;
dma_addr_t align_buf;
u32 xfer_len; u32 xfer_len;
u32 xfer_count; u32 xfer_count;
u16 start_pkt_count; u16 start_pkt_count;
...@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time { ...@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
* speed. Note that this is in "schedule slice" which * speed. Note that this is in "schedule slice" which
* is tightly packed. * is tightly packed.
* @ntd: Actual number of transfer descriptors in a list * @ntd: Actual number of transfer descriptors in a list
* @dw_align_buf: Used instead of original buffer if its physical address
* is not dword-aligned
* @dw_align_buf_dma: DMA address for dw_align_buf
* @qtd_list: List of QTDs for this QH * @qtd_list: List of QTDs for this QH
* @channel: Host channel currently processing transfers for this QH * @channel: Host channel currently processing transfers for this QH
* @qh_list_entry: Entry for QH in either the periodic or non-periodic * @qh_list_entry: Entry for QH in either the periodic or non-periodic
...@@ -350,6 +356,8 @@ struct dwc2_qh { ...@@ -350,6 +356,8 @@ struct dwc2_qh {
struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES]; struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
u32 ls_start_schedule_slice; u32 ls_start_schedule_slice;
u16 ntd; u16 ntd;
u8 *dw_align_buf;
dma_addr_t dw_align_buf_dma;
struct list_head qtd_list; struct list_head qtd_list;
struct dwc2_host_chan *channel; struct dwc2_host_chan *channel;
struct list_head qh_list_entry; struct list_head qh_list_entry;
......
...@@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, ...@@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index]; frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
DWC2_HC_XFER_COMPLETE, NULL); DWC2_HC_XFER_COMPLETE, NULL);
if (!len) { if (!len && !qtd->isoc_split_offset) {
qtd->complete_split = 0; qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
return 0; return 0;
} }
frame_desc->actual_length += len; frame_desc->actual_length += len;
if (chan->align_buf) {
dev_vdbg(hsotg->dev, "non-aligned buffer\n");
dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
chan->qh->dw_align_buf, len);
}
qtd->isoc_split_offset += len; qtd->isoc_split_offset += len;
hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
......
...@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, ...@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
/* Get the map and adjust if this is a multi_tt hub */ /* Get the map and adjust if this is a multi_tt hub */
map = qh->dwc_tt->periodic_bitmaps; map = qh->dwc_tt->periodic_bitmaps;
if (qh->dwc_tt->usb_tt->multi) if (qh->dwc_tt->usb_tt->multi)
map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
return map; return map;
} }
...@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) ...@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if (qh->desc_list) if (qh->desc_list)
dwc2_hcd_qh_free_ddma(hsotg, qh); dwc2_hcd_qh_free_ddma(hsotg, qh);
else if (hsotg->unaligned_cache && qh->dw_align_buf)
kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
kfree(qh); kfree(qh);
} }
......
...@@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev) ...@@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (!dwc->clks) if (!dwc->clks)
return -ENOMEM; return -ENOMEM;
dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
dwc->dev = dev; dwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
...@@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev) ...@@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev)
if (IS_ERR(dwc->reset)) if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset); return PTR_ERR(dwc->reset);
ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); if (dev->of_node) {
if (ret == -EPROBE_DEFER) dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
return ret;
/* ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
* Clocks are optional, but new DT platforms should support all clocks if (ret == -EPROBE_DEFER)
* as required by the DT-binding. return ret;
*/ /*
if (ret) * Clocks are optional, but new DT platforms should support all
dwc->num_clks = 0; * clocks as required by the DT-binding.
*/
if (ret)
dwc->num_clks = 0;
}
ret = reset_control_deassert(dwc->reset); ret = reset_control_deassert(dwc->reset);
if (ret) if (ret)
......
...@@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) ...@@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
reset_control_put(simple->resets); reset_control_put(simple->resets);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev); pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
return 0; return 0;
} }
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa #define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4 #define PCI_INTEL_BXT_FUNC_PMU_PWR 4
...@@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { ...@@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */ { } /* Terminating Entry */
}; };
......
...@@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev) ...@@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->dwc3 = of_find_device_by_node(dwc3_np); qcom->dwc3 = of_find_device_by_node(dwc3_np);
if (!qcom->dwc3) { if (!qcom->dwc3) {
dev_err(&pdev->dev, "failed to get dwc3 platform device\n"); dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
ret = -ENODEV;
goto depopulate; goto depopulate;
} }
...@@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev) ...@@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
static int dwc3_qcom_pm_suspend(struct device *dev)
{ {
struct dwc3_qcom *qcom = dev_get_drvdata(dev); struct dwc3_qcom *qcom = dev_get_drvdata(dev);
int ret = 0; int ret = 0;
...@@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev) ...@@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev)
return ret; return ret;
} }
static int dwc3_qcom_pm_resume(struct device *dev) static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
{ {
struct dwc3_qcom *qcom = dev_get_drvdata(dev); struct dwc3_qcom *qcom = dev_get_drvdata(dev);
int ret; int ret;
...@@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev) ...@@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev)
return ret; return ret;
} }
#endif
#ifdef CONFIG_PM static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
static int dwc3_qcom_runtime_suspend(struct device *dev)
{ {
struct dwc3_qcom *qcom = dev_get_drvdata(dev); struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_suspend(qcom); return dwc3_qcom_suspend(qcom);
} }
static int dwc3_qcom_runtime_resume(struct device *dev) static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
{ {
struct dwc3_qcom *qcom = dev_get_drvdata(dev); struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_resume(qcom); return dwc3_qcom_resume(qcom);
} }
#endif
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = { static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume) SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
......
...@@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) ...@@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
*/ */
if (w_value && !f->get_alt) if (w_value && !f->get_alt)
break; break;
spin_lock(&cdev->lock);
value = f->set_alt(f, w_index, w_value); value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) { if (value == USB_GADGET_DELAYED_STATUS) {
DBG(cdev, DBG(cdev,
...@@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) ...@@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
DBG(cdev, "delayed_status count %d\n", DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status); cdev->delayed_status);
} }
spin_unlock(&cdev->lock);
break; break;
case USB_REQ_GET_INTERFACE: case USB_REQ_GET_INTERFACE:
if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
......
...@@ -215,6 +215,7 @@ struct ffs_io_data { ...@@ -215,6 +215,7 @@ struct ffs_io_data {
struct mm_struct *mm; struct mm_struct *mm;
struct work_struct work; struct work_struct work;
struct work_struct cancellation_work;
struct usb_ep *ep; struct usb_ep *ep;
struct usb_request *req; struct usb_request *req;
...@@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file) ...@@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file)
return 0; return 0;
} }
static void ffs_aio_cancel_worker(struct work_struct *work)
{
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
cancellation_work);
ENTER();
usb_ep_dequeue(io_data->ep, io_data->req);
}
static int ffs_aio_cancel(struct kiocb *kiocb) static int ffs_aio_cancel(struct kiocb *kiocb)
{ {
struct ffs_io_data *io_data = kiocb->private; struct ffs_io_data *io_data = kiocb->private;
struct ffs_epfile *epfile = kiocb->ki_filp->private_data; struct ffs_data *ffs = io_data->ffs;
int value; int value;
ENTER(); ENTER();
spin_lock_irq(&epfile->ffs->eps_lock); if (likely(io_data && io_data->ep && io_data->req)) {
INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
if (likely(io_data && io_data->ep && io_data->req)) queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
value = usb_ep_dequeue(io_data->ep, io_data->req); value = -EINPROGRESS;
else } else {
value = -EINVAL; value = -EINVAL;
}
spin_unlock_irq(&epfile->ffs->eps_lock);
return value; return value;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment