Commit 1e6d5dea authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-6.5-2023-06-28' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - swiotlb cleanups (Petr Tesarik)

 - use kvmalloc_array (gaoxu)

 - a small step towards removing is_swiotlb_active (Christoph Hellwig)

 - fix a Kconfig typo Sui Jingfeng)

* tag 'dma-mapping-6.5-2023-06-28' of git://git.infradead.org/users/hch/dma-mapping:
  drm/nouveau: stop using is_swiotlb_active
  swiotlb: use the atomic counter of total used slabs if available
  swiotlb: remove unused field "used" from struct io_tlb_mem
  dma-remap: use kvmalloc_array/kvfree for larger dma memory remap
  dma-mapping: fix a Kconfig typo
parents 7ede5f78 0a2f6372
......@@ -24,9 +24,9 @@
*/
#include <linux/limits.h>
#include <linux/swiotlb.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/drm_cache.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
......@@ -265,7 +265,6 @@ nouveau_ttm_init(struct nouveau_drm *drm)
struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
bool need_swiotlb = false;
int typei, ret;
ret = nouveau_ttm_init_host(drm, 0);
......@@ -300,13 +299,10 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma;
}
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
need_swiotlb = is_swiotlb_active(dev->dev);
#endif
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager, need_swiotlb,
dev->vma_offset_manager,
drm_need_swiotlb(drm->client.mmu.dmabits),
drm->client.mmu.dmabits <= 32);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
......
......@@ -76,7 +76,6 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
* @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
* @end. For default swiotlb, this is command line adjustable via
* setup_io_tlb_npages.
* @used: The number of used IO TLB block.
* @list: The free list describing the number of free entries available
* from each index.
* @orig_addr: The original address corresponding to a mapped entry.
......@@ -98,7 +97,6 @@ struct io_tlb_mem {
phys_addr_t end;
void *vaddr;
unsigned long nslabs;
unsigned long used;
struct dentry *debugfs;
bool late_alloc;
bool force_bounce;
......
......@@ -42,7 +42,7 @@ config ARCH_HAS_DMA_SET_MASK
#
# Select this option if the architecture needs special handling for
# DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what
# people thing of when saying write combine, so very few platforms should
# people think of when saying write combine, so very few platforms should
# need to enable this.
#
config ARCH_HAS_DMA_WRITE_COMBINE
......
......@@ -43,13 +43,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
void *vaddr;
int i;
pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
if (!pages)
return NULL;
for (i = 0; i < count; i++)
pages[i] = nth_page(page, i);
vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
kfree(pages);
kvfree(pages);
return vaddr;
}
......
......@@ -717,6 +717,15 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
return -1;
}
#ifdef CONFIG_DEBUG_FS
static unsigned long mem_used(struct io_tlb_mem *mem)
{
return atomic_long_read(&mem->total_used);
}
#else /* !CONFIG_DEBUG_FS */
static unsigned long mem_used(struct io_tlb_mem *mem)
{
int i;
......@@ -727,6 +736,8 @@ static unsigned long mem_used(struct io_tlb_mem *mem)
return used;
}
#endif /* CONFIG_DEBUG_FS */
phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
size_t mapping_size, size_t alloc_size,
unsigned int alloc_align_mask, enum dma_data_direction dir,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment