Commit 8b435e40 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'usb-6.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb

Pull USB fixes from Greg KH:
 "Here are some USB driver and core fixes for 6.4-rc5. Most of these are
  tiny driver fixes, including:

   - udc driver bugfix

   - f_fs gadget driver bugfix

   - cdns3 driver bugfix

   - typec bugfixes

  But the "big" thing in here is a fix yet-again for how the USB buffers
  are handled from userspace when dealing with DMA issues. The changes
  were discussed a lot, and tested a lot, on the list, and acked by the
  relevant mm maintainers and have been in linux-next all this past week
  with no reported problems"

* tag 'usb-6.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb:
  usb: typec: tps6598x: Fix broken polling mode after system suspend/resume
  mm: page_table_check: Ensure user pages are not slab pages
  mm: page_table_check: Make it dependent on EXCLUSIVE_SYSTEM_RAM
  usb: usbfs: Use consistent mmap functions
  usb: usbfs: Enforce page requirements for mmap
  dt-bindings: usb: snps,dwc3: Fix "snps,hsphy_interface" type
  usb: gadget: udc: fix NULL dereference in remove()
  usb: gadget: f_fs: Add unbind event before functionfs_unbind
  usb: cdns3: fix NCM gadget RX speed 20x slow than expection at iMX8QM
parents b066935b fcfe8423
...@@ -287,7 +287,7 @@ properties: ...@@ -287,7 +287,7 @@ properties:
description: description:
High-Speed PHY interface selection between UTMI+ and ULPI when the High-Speed PHY interface selection between UTMI+ and ULPI when the
DWC_USB3_HSPHY_INTERFACE has value 3. DWC_USB3_HSPHY_INTERFACE has value 3.
$ref: /schemas/types.yaml#/definitions/uint8 $ref: /schemas/types.yaml#/definitions/string
enum: [utmi, ulpi] enum: [utmi, ulpi]
snps,quirk-frame-length-adjustment: snps,quirk-frame-length-adjustment:
......
...@@ -52,3 +52,22 @@ Build kernel with: ...@@ -52,3 +52,22 @@ Build kernel with:
Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page
table support without extra kernel parameter. table support without extra kernel parameter.
Implementation notes
====================
We specifically decided not to use VMA information in order to avoid relying on
MM states (except for limited "struct page" info). The page table check is a
separate from Linux-MM state machine that verifies that the user accessible
pages are not falsely shared.
PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without
EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory
regions into the userspace via /dev/mem. At the same time, pages may change
their properties (e.g., from anonymous pages to named pages) while they are
still being mapped in the userspace, leading to "corruption" detected by the
page table check.
Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via
/dev/mem. However, these pages are always considered as named pages, so they
won't break the logic used in the page table check.
...@@ -2097,6 +2097,19 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) ...@@ -2097,6 +2097,19 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
else else
priv_ep->trb_burst_size = 16; priv_ep->trb_burst_size = 16;
/*
* In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs
* in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the
* address is not aligned to 128 Bytes (which is a product of the 64-bit AXI
* and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This
* results in data corruption when it crosses the 4K border. The corruption
* specifically occurs from the position (4K - (address & 0x7F)) to 4K.
*
* So force trb_burst_size to 16 at such platform.
*/
if (priv_dev->dev_ver < DEV_VER_V2)
priv_ep->trb_burst_size = 16;
mult = min_t(u8, mult, EP_CFG_MULT_MAX); mult = min_t(u8, mult, EP_CFG_MULT_MAX);
buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
......
...@@ -172,3 +172,44 @@ void hcd_buffer_free( ...@@ -172,3 +172,44 @@ void hcd_buffer_free(
} }
dma_free_coherent(hcd->self.sysdev, size, addr, dma); dma_free_coherent(hcd->self.sysdev, size, addr, dma);
} }
void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
size_t size, gfp_t mem_flags, dma_addr_t *dma)
{
if (size == 0)
return NULL;
if (hcd->localmem_pool)
return gen_pool_dma_alloc_align(hcd->localmem_pool,
size, dma, PAGE_SIZE);
/* some USB hosts just use PIO */
if (!hcd_uses_dma(hcd)) {
*dma = DMA_MAPPING_ERROR;
return (void *)__get_free_pages(mem_flags,
get_order(size));
}
return dma_alloc_coherent(hcd->self.sysdev,
size, dma, mem_flags);
}
void hcd_buffer_free_pages(struct usb_hcd *hcd,
size_t size, void *addr, dma_addr_t dma)
{
if (!addr)
return;
if (hcd->localmem_pool) {
gen_pool_free(hcd->localmem_pool,
(unsigned long)addr, size);
return;
}
if (!hcd_uses_dma(hcd)) {
free_pages((unsigned long)addr, get_order(size));
return;
}
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}
...@@ -186,6 +186,7 @@ static int connected(struct usb_dev_state *ps) ...@@ -186,6 +186,7 @@ static int connected(struct usb_dev_state *ps)
static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
{ {
struct usb_dev_state *ps = usbm->ps; struct usb_dev_state *ps = usbm->ps;
struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ps->lock, flags); spin_lock_irqsave(&ps->lock, flags);
...@@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) ...@@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
list_del(&usbm->memlist); list_del(&usbm->memlist);
spin_unlock_irqrestore(&ps->lock, flags); spin_unlock_irqrestore(&ps->lock, flags);
usb_free_coherent(ps->dev, usbm->size, usbm->mem, hcd_buffer_free_pages(hcd, usbm->size,
usbm->dma_handle); usbm->mem, usbm->dma_handle);
usbfs_decrease_memory_usage( usbfs_decrease_memory_usage(
usbm->size + sizeof(struct usb_memory)); usbm->size + sizeof(struct usb_memory));
kfree(usbm); kfree(usbm);
...@@ -234,7 +235,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -234,7 +235,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start; size_t size = vma->vm_end - vma->vm_start;
void *mem; void *mem;
unsigned long flags; unsigned long flags;
dma_addr_t dma_handle; dma_addr_t dma_handle = DMA_MAPPING_ERROR;
int ret; int ret;
ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory)); ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
...@@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
goto error_decrease_mem; goto error_decrease_mem;
} }
mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, mem = hcd_buffer_alloc_pages(hcd,
&dma_handle); size, GFP_USER | __GFP_NOWARN, &dma_handle);
if (!mem) { if (!mem) {
ret = -ENOMEM; ret = -ENOMEM;
goto error_free_usbm; goto error_free_usbm;
...@@ -264,7 +265,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -264,7 +265,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
usbm->vma_use_count = 1; usbm->vma_use_count = 1;
INIT_LIST_HEAD(&usbm->memlist); INIT_LIST_HEAD(&usbm->memlist);
if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { /*
* In DMA-unavailable cases, hcd_buffer_alloc_pages allocates
* normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check
* whether we are in such cases, and then use remap_pfn_range (or
* dma_mmap_coherent) to map normal (or DMA) pages into the user
* space, respectively.
*/
if (dma_handle == DMA_MAPPING_ERROR) {
if (remap_pfn_range(vma, vma->vm_start, if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(usbm->mem) >> PAGE_SHIFT, virt_to_phys(usbm->mem) >> PAGE_SHIFT,
size, vma->vm_page_prot) < 0) { size, vma->vm_page_prot) < 0) {
......
...@@ -3535,6 +3535,7 @@ static void ffs_func_unbind(struct usb_configuration *c, ...@@ -3535,6 +3535,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
/* Drain any pending AIO completions */ /* Drain any pending AIO completions */
drain_workqueue(ffs->io_completion_wq); drain_workqueue(ffs->io_completion_wq);
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
if (!--opts->refcnt) if (!--opts->refcnt)
functionfs_unbind(ffs); functionfs_unbind(ffs);
...@@ -3559,7 +3560,6 @@ static void ffs_func_unbind(struct usb_configuration *c, ...@@ -3559,7 +3560,6 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->function.ssp_descriptors = NULL; func->function.ssp_descriptors = NULL;
func->interfaces_nums = NULL; func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
} }
static struct usb_function *ffs_alloc(struct usb_function_instance *fi) static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
......
...@@ -170,6 +170,9 @@ static int udc_pci_probe( ...@@ -170,6 +170,9 @@ static int udc_pci_probe(
retval = -ENODEV; retval = -ENODEV;
goto err_probe; goto err_probe;
} }
udc = dev;
return 0; return 0;
err_probe: err_probe:
......
...@@ -920,7 +920,7 @@ static int __maybe_unused tps6598x_resume(struct device *dev) ...@@ -920,7 +920,7 @@ static int __maybe_unused tps6598x_resume(struct device *dev)
enable_irq(client->irq); enable_irq(client->irq);
} }
if (client->irq) if (!client->irq)
queue_delayed_work(system_power_efficient_wq, &tps->wq_poll, queue_delayed_work(system_power_efficient_wq, &tps->wq_poll,
msecs_to_jiffies(POLL_INTERVAL)); msecs_to_jiffies(POLL_INTERVAL));
......
...@@ -617,6 +617,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) ...@@ -617,6 +617,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
* Please note that, confusingly, "page_mapping" refers to the inode * Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped" * address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped. * refers to user virtual address space into which the page is mapped.
*
* For slab pages, since slab reuses the bits in struct page to store its
* internal states, the page->mapping does not exist as such, nor do these
* flags below. So in order to avoid testing non-existent bits, please
* make sure that PageSlab(page) actually evaluates to false before calling
* the following functions (e.g., PageAnon). See mm/slab.h.
*/ */
#define PAGE_MAPPING_ANON 0x1 #define PAGE_MAPPING_ANON 0x1
#define PAGE_MAPPING_MOVABLE 0x2 #define PAGE_MAPPING_MOVABLE 0x2
......
...@@ -501,6 +501,11 @@ void *hcd_buffer_alloc(struct usb_bus *bus, size_t size, ...@@ -501,6 +501,11 @@ void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
void hcd_buffer_free(struct usb_bus *bus, size_t size, void hcd_buffer_free(struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma); void *addr, dma_addr_t dma);
void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
size_t size, gfp_t mem_flags, dma_addr_t *dma);
void hcd_buffer_free_pages(struct usb_hcd *hcd,
size_t size, void *addr, dma_addr_t dma);
/* generic bus glue, needed for host controllers that don't use PCI */ /* generic bus glue, needed for host controllers that don't use PCI */
extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
......
...@@ -98,6 +98,7 @@ config PAGE_OWNER ...@@ -98,6 +98,7 @@ config PAGE_OWNER
config PAGE_TABLE_CHECK config PAGE_TABLE_CHECK
bool "Check for invalid mappings in user page tables" bool "Check for invalid mappings in user page tables"
depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK
depends on EXCLUSIVE_SYSTEM_RAM
select PAGE_EXTENSION select PAGE_EXTENSION
help help
Check that anonymous page is not being mapped twice with read write Check that anonymous page is not being mapped twice with read write
......
...@@ -71,6 +71,8 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr, ...@@ -71,6 +71,8 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
page_ext = page_ext_get(page); page_ext = page_ext_get(page);
BUG_ON(PageSlab(page));
anon = PageAnon(page); anon = PageAnon(page);
for (i = 0; i < pgcnt; i++) { for (i = 0; i < pgcnt; i++) {
...@@ -107,6 +109,8 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr, ...@@ -107,6 +109,8 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
page_ext = page_ext_get(page); page_ext = page_ext_get(page);
BUG_ON(PageSlab(page));
anon = PageAnon(page); anon = PageAnon(page);
for (i = 0; i < pgcnt; i++) { for (i = 0; i < pgcnt; i++) {
...@@ -133,6 +137,8 @@ void __page_table_check_zero(struct page *page, unsigned int order) ...@@ -133,6 +137,8 @@ void __page_table_check_zero(struct page *page, unsigned int order)
struct page_ext *page_ext; struct page_ext *page_ext;
unsigned long i; unsigned long i;
BUG_ON(PageSlab(page));
page_ext = page_ext_get(page); page_ext = page_ext_get(page);
BUG_ON(!page_ext); BUG_ON(!page_ext);
for (i = 0; i < (1ul << order); i++) { for (i = 0; i < (1ul << order); i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment