Commit 16547b21 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-6.0-2022-09-10' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - revert a panic on swiotlb initialization failure (Yu Zhao)

 - fix the lookup for partial syncs in dma-debug (Robin Murphy)

 - fix a shift overflow in swiotlb (Chao Gao)

 - fix a comment typo in swiotlb (Chao Gao)

 - mark a function static now that all abusers are gone (Christoph
   Hellwig)

* tag 'dma-mapping-6.0-2022-09-10' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: mark dma_supported static
  swiotlb: fix a typo
  swiotlb: avoid potential left shift overflow
  dma-debug: improve search for partial syncs
  Revert "swiotlb: panic if nslabs is too small"
parents ce888220 9fc18f6d
...@@ -139,7 +139,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, ...@@ -139,7 +139,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs); unsigned long attrs);
bool dma_can_mmap(struct device *dev); bool dma_can_mmap(struct device *dev);
int dma_supported(struct device *dev, u64 mask);
bool dma_pci_p2pdma_supported(struct device *dev); bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask); int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask);
...@@ -248,10 +247,6 @@ static inline bool dma_can_mmap(struct device *dev) ...@@ -248,10 +247,6 @@ static inline bool dma_can_mmap(struct device *dev)
{ {
return false; return false;
} }
static inline int dma_supported(struct device *dev, u64 mask)
{
return 0;
}
static inline bool dma_pci_p2pdma_supported(struct device *dev) static inline bool dma_pci_p2pdma_supported(struct device *dev)
{ {
return false; return false;
......
...@@ -350,11 +350,10 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, ...@@ -350,11 +350,10 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
unsigned long *flags) unsigned long *flags)
{ {
unsigned int max_range = dma_get_max_seg_size(ref->dev);
struct dma_debug_entry *entry, index = *ref; struct dma_debug_entry *entry, index = *ref;
unsigned int range = 0; int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
while (range <= max_range) { for (int i = 0; i < limit; i++) {
entry = __hash_bucket_find(*bucket, ref, containing_match); entry = __hash_bucket_find(*bucket, ref, containing_match);
if (entry) if (entry)
...@@ -364,7 +363,6 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, ...@@ -364,7 +363,6 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
* Nothing found, go back a hash bucket * Nothing found, go back a hash bucket
*/ */
put_hash_bucket(*bucket, *flags); put_hash_bucket(*bucket, *flags);
range += (1 << HASH_FN_SHIFT);
index.dev_addr -= (1 << HASH_FN_SHIFT); index.dev_addr -= (1 << HASH_FN_SHIFT);
*bucket = get_hash_bucket(&index, flags); *bucket = get_hash_bucket(&index, flags);
} }
......
...@@ -707,7 +707,7 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, ...@@ -707,7 +707,7 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
} }
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
int dma_supported(struct device *dev, u64 mask) static int dma_supported(struct device *dev, u64 mask)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
...@@ -721,7 +721,6 @@ int dma_supported(struct device *dev, u64 mask) ...@@ -721,7 +721,6 @@ int dma_supported(struct device *dev, u64 mask)
return 1; return 1;
return ops->dma_supported(dev, mask); return ops->dma_supported(dev, mask);
} }
EXPORT_SYMBOL(dma_supported);
bool dma_pci_p2pdma_supported(struct device *dev) bool dma_pci_p2pdma_supported(struct device *dev)
{ {
......
...@@ -326,9 +326,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, ...@@ -326,9 +326,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
swiotlb_adjust_nareas(num_possible_cpus()); swiotlb_adjust_nareas(num_possible_cpus());
nslabs = default_nslabs; nslabs = default_nslabs;
if (nslabs < IO_TLB_MIN_SLABS)
panic("%s: nslabs = %lu too small\n", __func__, nslabs);
/* /*
* By default allocate the bounce buffer memory from low memory, but * By default allocate the bounce buffer memory from low memory, but
* allow to pick a location everywhere for hypervisors with guest * allow to pick a location everywhere for hypervisors with guest
...@@ -341,8 +338,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, ...@@ -341,8 +338,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
else else
tlb = memblock_alloc_low(bytes, PAGE_SIZE); tlb = memblock_alloc_low(bytes, PAGE_SIZE);
if (!tlb) { if (!tlb) {
pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", pr_warn("%s: failed to allocate tlb structure\n", __func__);
__func__, bytes);
return; return;
} }
...@@ -579,7 +575,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size ...@@ -579,7 +575,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
} }
} }
#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
{
return start + (idx << IO_TLB_SHIFT);
}
/* /*
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL. * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
...@@ -765,7 +764,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, ...@@ -765,7 +764,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
/* /*
* When dir == DMA_FROM_DEVICE we could omit the copy from the orig * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
* to the tlb buffer, if we knew for sure the device will * to the tlb buffer, if we knew for sure the device will
* overwirte the entire current content. But we don't. Thus * overwrite the entire current content. But we don't. Thus
* unconditional bounce may prevent leaking swiotlb content (i.e. * unconditional bounce may prevent leaking swiotlb content (i.e.
* kernel memory) to user-space. * kernel memory) to user-space.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment