Commit ff489fe0 authored by Joerg Roedel's avatar Joerg Roedel

Merge branch 'iommu-memory-accounting' into core

Merge patch-set from Jason:

	"Let iommufd charge IOPTE allocations to the memory cgroup"

Description:

IOMMUFD follows the same design as KVM and uses memory cgroups to limit
the amount of kernel memory a iommufd file descriptor can pin down. The
various internal data structures already use GFP_KERNEL_ACCOUNT to charge
its own memory.

However, one of the biggest consumers of kernel memory is the IOPTEs
stored under the iommu_domain and these allocations are not tracked.

This series is the first step in fixing it.

The iommu driver contract already includes a 'gfp' argument to the
map_pages op, allowing iommufd to specify GFP_KERNEL_ACCOUNT and then
having the driver allocate the IOPTE tables with that flag will capture a
significant amount of the allocations.

Update the iommu_map() API to pass in the GFP argument, and fix all call
sites. Replace iommu_map_atomic().

Audit the "enterprise" iommu drivers to make sure they do the right thing.
Intel and S390 ignore the GFP argument and always use GFP_ATOMIC. This is
problematic for iommufd anyhow, so fix it. AMD and ARM SMMUv2/3 are
already correct.

A follow up series will be needed to capture the allocations made when the
iommu_domain itself is allocated, which will complete the job.

Link: https://lore.kernel.org/linux-iommu/0-v3-76b587fe28df+6e3-iommu_map_gfp_jgg@nvidia.com/
parents 5cef282e 429f27e3
...@@ -984,7 +984,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, ...@@ -984,7 +984,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
len = (j - i) << PAGE_SHIFT; len = (j - i) << PAGE_SHIFT;
ret = iommu_map(mapping->domain, iova, phys, len, ret = iommu_map(mapping->domain, iova, phys, len,
__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs),
GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
iova += len; iova += len;
...@@ -1207,7 +1208,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, ...@@ -1207,7 +1208,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
prot = __dma_info_to_prot(dir, attrs); prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, iova, phys, len, prot); ret = iommu_map(mapping->domain, iova, phys, len, prot,
GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
count += len >> PAGE_SHIFT; count += len >> PAGE_SHIFT;
...@@ -1379,7 +1381,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, ...@@ -1379,7 +1381,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
prot = __dma_info_to_prot(dir, attrs); prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
prot, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
...@@ -1443,7 +1446,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, ...@@ -1443,7 +1446,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
......
...@@ -186,9 +186,10 @@ static inline unsigned long *get_st_pto(unsigned long entry) ...@@ -186,9 +186,10 @@ static inline unsigned long *get_st_pto(unsigned long entry)
/* Prototypes */ /* Prototypes */
void dma_free_seg_table(unsigned long); void dma_free_seg_table(unsigned long);
unsigned long *dma_alloc_cpu_table(void); unsigned long *dma_alloc_cpu_table(gfp_t gfp);
void dma_cleanup_tables(unsigned long *); void dma_cleanup_tables(unsigned long *);
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr); unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
gfp_t gfp);
void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags); void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags);
extern const struct dma_map_ops s390_pci_dma_ops; extern const struct dma_map_ops s390_pci_dma_ops;
......
...@@ -27,11 +27,11 @@ static int zpci_refresh_global(struct zpci_dev *zdev) ...@@ -27,11 +27,11 @@ static int zpci_refresh_global(struct zpci_dev *zdev)
zdev->iommu_pages * PAGE_SIZE); zdev->iommu_pages * PAGE_SIZE);
} }
unsigned long *dma_alloc_cpu_table(void) unsigned long *dma_alloc_cpu_table(gfp_t gfp)
{ {
unsigned long *table, *entry; unsigned long *table, *entry;
table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC); table = kmem_cache_alloc(dma_region_table_cache, gfp);
if (!table) if (!table)
return NULL; return NULL;
...@@ -45,11 +45,11 @@ static void dma_free_cpu_table(void *table) ...@@ -45,11 +45,11 @@ static void dma_free_cpu_table(void *table)
kmem_cache_free(dma_region_table_cache, table); kmem_cache_free(dma_region_table_cache, table);
} }
static unsigned long *dma_alloc_page_table(void) static unsigned long *dma_alloc_page_table(gfp_t gfp)
{ {
unsigned long *table, *entry; unsigned long *table, *entry;
table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC); table = kmem_cache_alloc(dma_page_table_cache, gfp);
if (!table) if (!table)
return NULL; return NULL;
...@@ -63,7 +63,7 @@ static void dma_free_page_table(void *table) ...@@ -63,7 +63,7 @@ static void dma_free_page_table(void *table)
kmem_cache_free(dma_page_table_cache, table); kmem_cache_free(dma_page_table_cache, table);
} }
static unsigned long *dma_get_seg_table_origin(unsigned long *rtep) static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
{ {
unsigned long old_rte, rte; unsigned long old_rte, rte;
unsigned long *sto; unsigned long *sto;
...@@ -72,7 +72,7 @@ static unsigned long *dma_get_seg_table_origin(unsigned long *rtep) ...@@ -72,7 +72,7 @@ static unsigned long *dma_get_seg_table_origin(unsigned long *rtep)
if (reg_entry_isvalid(rte)) { if (reg_entry_isvalid(rte)) {
sto = get_rt_sto(rte); sto = get_rt_sto(rte);
} else { } else {
sto = dma_alloc_cpu_table(); sto = dma_alloc_cpu_table(gfp);
if (!sto) if (!sto)
return NULL; return NULL;
...@@ -90,7 +90,7 @@ static unsigned long *dma_get_seg_table_origin(unsigned long *rtep) ...@@ -90,7 +90,7 @@ static unsigned long *dma_get_seg_table_origin(unsigned long *rtep)
return sto; return sto;
} }
static unsigned long *dma_get_page_table_origin(unsigned long *step) static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
{ {
unsigned long old_ste, ste; unsigned long old_ste, ste;
unsigned long *pto; unsigned long *pto;
...@@ -99,7 +99,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step) ...@@ -99,7 +99,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step)
if (reg_entry_isvalid(ste)) { if (reg_entry_isvalid(ste)) {
pto = get_st_pto(ste); pto = get_st_pto(ste);
} else { } else {
pto = dma_alloc_page_table(); pto = dma_alloc_page_table(gfp);
if (!pto) if (!pto)
return NULL; return NULL;
set_st_pto(&ste, virt_to_phys(pto)); set_st_pto(&ste, virt_to_phys(pto));
...@@ -116,18 +116,19 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step) ...@@ -116,18 +116,19 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step)
return pto; return pto;
} }
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
gfp_t gfp)
{ {
unsigned long *sto, *pto; unsigned long *sto, *pto;
unsigned int rtx, sx, px; unsigned int rtx, sx, px;
rtx = calc_rtx(dma_addr); rtx = calc_rtx(dma_addr);
sto = dma_get_seg_table_origin(&rto[rtx]); sto = dma_get_seg_table_origin(&rto[rtx], gfp);
if (!sto) if (!sto)
return NULL; return NULL;
sx = calc_sx(dma_addr); sx = calc_sx(dma_addr);
pto = dma_get_page_table_origin(&sto[sx]); pto = dma_get_page_table_origin(&sto[sx], gfp);
if (!pto) if (!pto)
return NULL; return NULL;
...@@ -170,7 +171,8 @@ static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa, ...@@ -170,7 +171,8 @@ static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
return -EINVAL; return -EINVAL;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
GFP_ATOMIC);
if (!entry) { if (!entry) {
rc = -ENOMEM; rc = -ENOMEM;
goto undo_cpu_trans; goto undo_cpu_trans;
...@@ -186,7 +188,8 @@ static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa, ...@@ -186,7 +188,8 @@ static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
while (i-- > 0) { while (i-- > 0) {
page_addr -= PAGE_SIZE; page_addr -= PAGE_SIZE;
dma_addr -= PAGE_SIZE; dma_addr -= PAGE_SIZE;
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
GFP_ATOMIC);
if (!entry) if (!entry)
break; break;
dma_update_cpu_trans(entry, page_addr, flags); dma_update_cpu_trans(entry, page_addr, flags);
...@@ -576,7 +579,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) ...@@ -576,7 +579,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
spin_lock_init(&zdev->iommu_bitmap_lock); spin_lock_init(&zdev->iommu_bitmap_lock);
zdev->dma_table = dma_alloc_cpu_table(); zdev->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
if (!zdev->dma_table) { if (!zdev->dma_table) {
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
......
...@@ -475,7 +475,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, ...@@ -475,7 +475,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
u32 offset = (r->offset + i) << imem->iommu_pgshift; u32 offset = (r->offset + i) << imem->iommu_pgshift;
ret = iommu_map(imem->domain, offset, node->dma_addrs[i], ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); PAGE_SIZE, IOMMU_READ | IOMMU_WRITE,
GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret); nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
......
...@@ -1057,7 +1057,7 @@ void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma) ...@@ -1057,7 +1057,7 @@ void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
*dma = iova_dma_addr(&tegra->carveout.domain, alloc); *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
size, IOMMU_READ | IOMMU_WRITE); size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (err < 0) if (err < 0)
goto free_iova; goto free_iova;
......
...@@ -105,7 +105,7 @@ static int host1x_pushbuffer_init(struct push_buffer *pb) ...@@ -105,7 +105,7 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
pb->dma = iova_dma_addr(&host1x->iova, alloc); pb->dma = iova_dma_addr(&host1x->iova, alloc);
err = iommu_map(host1x->domain, pb->dma, pb->phys, size, err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
IOMMU_READ); IOMMU_READ, GFP_KERNEL);
if (err) if (err)
goto iommu_free_iova; goto iommu_free_iova;
} else { } else {
......
...@@ -277,7 +277,7 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, ...@@ -277,7 +277,7 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags); va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start, err = iommu_map(pd->domain, va_start, pa_start,
size, flags); size, flags, GFP_KERNEL);
if (err) { if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err); va_start, &pa_start, size, err);
...@@ -294,7 +294,7 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, ...@@ -294,7 +294,7 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags); va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start, err = iommu_map(pd->domain, va_start, pa_start,
size, flags); size, flags, GFP_KERNEL);
if (err) { if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err); va_start, &pa_start, size, err);
......
...@@ -716,7 +716,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, ...@@ -716,7 +716,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
if (!iova) if (!iova)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
iommu_dma_free_iova(cookie, iova, size, NULL); iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
...@@ -825,7 +825,14 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, ...@@ -825,7 +825,14 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
if (!iova) if (!iova)
goto out_free_pages; goto out_free_pages;
if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) /*
* Remove the zone/policy flags from the GFP - these are applied to the
* __iommu_dma_alloc_pages() but are not used for the supporting
* internal allocations that follow.
*/
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
goto out_free_iova; goto out_free_iova;
if (!(ioprot & IOMMU_CACHE)) { if (!(ioprot & IOMMU_CACHE)) {
...@@ -836,7 +843,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, ...@@ -836,7 +843,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
arch_dma_prep_coherent(sg_page(sg), sg->length); arch_dma_prep_coherent(sg_page(sg), sg->length);
} }
ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot); ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
gfp);
if (ret < 0 || ret < size) if (ret < 0 || ret < size)
goto out_free_sg; goto out_free_sg;
...@@ -1284,7 +1292,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -1284,7 +1292,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's * We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do. * implementation - it knows better than we do.
*/ */
ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot); ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
if (ret < 0 || ret < iova_len) if (ret < 0 || ret < iova_len)
goto out_free_iova; goto out_free_iova;
...@@ -1618,7 +1626,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, ...@@ -1618,7 +1626,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!iova) if (!iova)
goto out_free_page; goto out_free_page;
if (iommu_map(domain, iova, msi_addr, size, prot)) if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
goto out_free_iova; goto out_free_iova;
INIT_LIST_HEAD(&msi_page->list); INIT_LIST_HEAD(&msi_page->list);
......
...@@ -362,12 +362,12 @@ static int __init intel_iommu_setup(char *str) ...@@ -362,12 +362,12 @@ static int __init intel_iommu_setup(char *str)
} }
__setup("intel_iommu=", intel_iommu_setup); __setup("intel_iommu=", intel_iommu_setup);
void *alloc_pgtable_page(int node) void *alloc_pgtable_page(int node, gfp_t gfp)
{ {
struct page *page; struct page *page;
void *vaddr = NULL; void *vaddr = NULL;
page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0); page = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
if (page) if (page)
vaddr = page_address(page); vaddr = page_address(page);
return vaddr; return vaddr;
...@@ -612,7 +612,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, ...@@ -612,7 +612,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
if (!alloc) if (!alloc)
return NULL; return NULL;
context = alloc_pgtable_page(iommu->node); context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
if (!context) if (!context)
return NULL; return NULL;
...@@ -908,7 +908,8 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, ...@@ -908,7 +908,8 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
#endif #endif
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn, int *target_level) unsigned long pfn, int *target_level,
gfp_t gfp)
{ {
struct dma_pte *parent, *pte; struct dma_pte *parent, *pte;
int level = agaw_to_level(domain->agaw); int level = agaw_to_level(domain->agaw);
...@@ -935,7 +936,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -935,7 +936,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) { if (!dma_pte_present(pte)) {
uint64_t pteval; uint64_t pteval;
tmp_page = alloc_pgtable_page(domain->nid); tmp_page = alloc_pgtable_page(domain->nid, gfp);
if (!tmp_page) if (!tmp_page)
return NULL; return NULL;
...@@ -1186,7 +1187,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) ...@@ -1186,7 +1187,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{ {
struct root_entry *root; struct root_entry *root;
root = (struct root_entry *)alloc_pgtable_page(iommu->node); root = (struct root_entry *)alloc_pgtable_page(iommu->node, GFP_ATOMIC);
if (!root) { if (!root) {
pr_err("Allocating root entry for %s failed\n", pr_err("Allocating root entry for %s failed\n",
iommu->name); iommu->name);
...@@ -2150,7 +2151,8 @@ static void switch_to_super_page(struct dmar_domain *domain, ...@@ -2150,7 +2151,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
while (start_pfn <= end_pfn) { while (start_pfn <= end_pfn) {
if (!pte) if (!pte)
pte = pfn_to_dma_pte(domain, start_pfn, &level); pte = pfn_to_dma_pte(domain, start_pfn, &level,
GFP_ATOMIC);
if (dma_pte_present(pte)) { if (dma_pte_present(pte)) {
dma_pte_free_pagetable(domain, start_pfn, dma_pte_free_pagetable(domain, start_pfn,
...@@ -2172,7 +2174,8 @@ static void switch_to_super_page(struct dmar_domain *domain, ...@@ -2172,7 +2174,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
static int static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot) unsigned long phys_pfn, unsigned long nr_pages, int prot,
gfp_t gfp)
{ {
struct dma_pte *first_pte = NULL, *pte = NULL; struct dma_pte *first_pte = NULL, *pte = NULL;
unsigned int largepage_lvl = 0; unsigned int largepage_lvl = 0;
...@@ -2202,7 +2205,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -2202,7 +2205,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
largepage_lvl = hardware_largepage_caps(domain, iov_pfn, largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
phys_pfn, nr_pages); phys_pfn, nr_pages);
pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl,
gfp);
if (!pte) if (!pte)
return -ENOMEM; return -ENOMEM;
first_pte = pte; first_pte = pte;
...@@ -2368,7 +2372,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain, ...@@ -2368,7 +2372,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
return __domain_mapping(domain, first_vpfn, return __domain_mapping(domain, first_vpfn,
first_vpfn, last_vpfn - first_vpfn + 1, first_vpfn, last_vpfn - first_vpfn + 1,
DMA_PTE_READ|DMA_PTE_WRITE); DMA_PTE_READ|DMA_PTE_WRITE, GFP_KERNEL);
} }
static int md_domain_init(struct dmar_domain *domain, int guest_width); static int md_domain_init(struct dmar_domain *domain, int guest_width);
...@@ -2676,7 +2680,7 @@ static int copy_context_table(struct intel_iommu *iommu, ...@@ -2676,7 +2680,7 @@ static int copy_context_table(struct intel_iommu *iommu,
if (!old_ce) if (!old_ce)
goto out; goto out;
new_ce = alloc_pgtable_page(iommu->node); new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL);
if (!new_ce) if (!new_ce)
goto out_unmap; goto out_unmap;
...@@ -4136,7 +4140,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) ...@@ -4136,7 +4140,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->max_addr = 0; domain->max_addr = 0;
/* always allocate the top pgd */ /* always allocate the top pgd */
domain->pgd = alloc_pgtable_page(domain->nid); domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
if (!domain->pgd) if (!domain->pgd)
return -ENOMEM; return -ENOMEM;
domain_flush_cache(domain, domain->pgd, PAGE_SIZE); domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
...@@ -4298,7 +4302,7 @@ static int intel_iommu_map(struct iommu_domain *domain, ...@@ -4298,7 +4302,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
the low bits of hpa would take us onto the next page */ the low bits of hpa would take us onto the next page */
size = aligned_nrpages(hpa, size); size = aligned_nrpages(hpa, size);
return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
hpa >> VTD_PAGE_SHIFT, size, prot); hpa >> VTD_PAGE_SHIFT, size, prot, gfp);
} }
static int intel_iommu_map_pages(struct iommu_domain *domain, static int intel_iommu_map_pages(struct iommu_domain *domain,
...@@ -4333,7 +4337,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, ...@@ -4333,7 +4337,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
/* Cope with horrid API which requires us to unmap more than the /* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */ size argument if it happens to be a large-page mapping. */
BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)); BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
GFP_ATOMIC));
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level); size = VTD_PAGE_SIZE << level_to_offset_bits(level);
...@@ -4392,7 +4397,8 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -4392,7 +4397,8 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
int level = 0; int level = 0;
u64 phys = 0; u64 phys = 0;
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
GFP_ATOMIC);
if (pte && dma_pte_present(pte)) if (pte && dma_pte_present(pte))
phys = dma_pte_addr(pte) + phys = dma_pte_addr(pte) +
(iova & (BIT_MASK(level_to_offset_bits(level) + (iova & (BIT_MASK(level_to_offset_bits(level) +
......
...@@ -737,7 +737,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, ...@@ -737,7 +737,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
extern int dmar_ir_support(void); extern int dmar_ir_support(void);
void *alloc_pgtable_page(int node); void *alloc_pgtable_page(int node, gfp_t gfp);
void free_pgtable_page(void *vaddr); void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu); void iommu_flush_write_buffer(struct intel_iommu *iommu);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
......
...@@ -200,7 +200,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) ...@@ -200,7 +200,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
retry: retry:
entries = get_pasid_table_from_pde(&dir[dir_index]); entries = get_pasid_table_from_pde(&dir[dir_index]);
if (!entries) { if (!entries) {
entries = alloc_pgtable_page(info->iommu->node); entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
if (!entries) if (!entries)
return NULL; return NULL;
......
...@@ -954,7 +954,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, ...@@ -954,7 +954,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
if (map_size) { if (map_size) {
ret = iommu_map(domain, addr - map_size, ret = iommu_map(domain, addr - map_size,
addr - map_size, map_size, addr - map_size, map_size,
entry->prot); entry->prot, GFP_KERNEL);
if (ret) if (ret)
goto out; goto out;
map_size = 0; map_size = 0;
...@@ -2354,34 +2354,27 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -2354,34 +2354,27 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret; return ret;
} }
static int _iommu_map(struct iommu_domain *domain, unsigned long iova, int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
const struct iommu_domain_ops *ops = domain->ops; const struct iommu_domain_ops *ops = domain->ops;
int ret; int ret;
might_sleep_if(gfpflags_allow_blocking(gfp));
/* Discourage passing strange GFP flags */
if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
__GFP_HIGHMEM)))
return -EINVAL;
ret = __iommu_map(domain, iova, paddr, size, prot, gfp); ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
if (ret == 0 && ops->iotlb_sync_map) if (ret == 0 && ops->iotlb_sync_map)
ops->iotlb_sync_map(domain, iova, size); ops->iotlb_sync_map(domain, iova, size);
return ret; return ret;
} }
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
might_sleep();
return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(iommu_map); EXPORT_SYMBOL_GPL(iommu_map);
int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iommu_map_atomic);
static size_t __iommu_unmap_pages(struct iommu_domain *domain, static size_t __iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova, size_t size, unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather) struct iommu_iotlb_gather *iotlb_gather)
...@@ -2471,9 +2464,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain, ...@@ -2471,9 +2464,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
} }
EXPORT_SYMBOL_GPL(iommu_unmap_fast); EXPORT_SYMBOL_GPL(iommu_unmap_fast);
static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot, struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp) gfp_t gfp)
{ {
const struct iommu_domain_ops *ops = domain->ops; const struct iommu_domain_ops *ops = domain->ops;
size_t len = 0, mapped = 0; size_t len = 0, mapped = 0;
...@@ -2481,6 +2474,13 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -2481,6 +2474,13 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
unsigned int i = 0; unsigned int i = 0;
int ret; int ret;
might_sleep_if(gfpflags_allow_blocking(gfp));
/* Discourage passing strange GFP flags */
if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
__GFP_HIGHMEM)))
return -EINVAL;
while (i <= nents) { while (i <= nents) {
phys_addr_t s_phys = sg_phys(sg); phys_addr_t s_phys = sg_phys(sg);
...@@ -2520,21 +2520,8 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -2520,21 +2520,8 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
return ret; return ret;
} }
ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
might_sleep();
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(iommu_map_sg); EXPORT_SYMBOL_GPL(iommu_map_sg);
ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
}
/** /**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened * @domain: the iommu domain where the fault has happened
......
...@@ -456,7 +456,8 @@ static int batch_iommu_map_small(struct iommu_domain *domain, ...@@ -456,7 +456,8 @@ static int batch_iommu_map_small(struct iommu_domain *domain,
size % PAGE_SIZE); size % PAGE_SIZE);
while (size) { while (size) {
rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot); rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot,
GFP_KERNEL_ACCOUNT);
if (rc) if (rc)
goto err_unmap; goto err_unmap;
iova += PAGE_SIZE; iova += PAGE_SIZE;
...@@ -500,7 +501,8 @@ static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain, ...@@ -500,7 +501,8 @@ static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
else else
rc = iommu_map(domain, iova, rc = iommu_map(domain, iova,
PFN_PHYS(batch->pfns[cur]) + page_offset, PFN_PHYS(batch->pfns[cur]) + page_offset,
next_iova - iova, area->iommu_prot); next_iova - iova, area->iommu_prot,
GFP_KERNEL_ACCOUNT);
if (rc) if (rc)
goto err_unmap; goto err_unmap;
iova = next_iova; iova = next_iova;
......
...@@ -52,7 +52,7 @@ static struct iommu_domain *s390_domain_alloc(unsigned domain_type) ...@@ -52,7 +52,7 @@ static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
if (!s390_domain) if (!s390_domain)
return NULL; return NULL;
s390_domain->dma_table = dma_alloc_cpu_table(); s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
if (!s390_domain->dma_table) { if (!s390_domain->dma_table) {
kfree(s390_domain); kfree(s390_domain);
return NULL; return NULL;
...@@ -257,7 +257,8 @@ static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain, ...@@ -257,7 +257,8 @@ static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
static int s390_iommu_validate_trans(struct s390_domain *s390_domain, static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
phys_addr_t pa, dma_addr_t dma_addr, phys_addr_t pa, dma_addr_t dma_addr,
unsigned long nr_pages, int flags) unsigned long nr_pages, int flags,
gfp_t gfp)
{ {
phys_addr_t page_addr = pa & PAGE_MASK; phys_addr_t page_addr = pa & PAGE_MASK;
unsigned long *entry; unsigned long *entry;
...@@ -265,7 +266,8 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain, ...@@ -265,7 +266,8 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
int rc; int rc;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
gfp);
if (unlikely(!entry)) { if (unlikely(!entry)) {
rc = -ENOMEM; rc = -ENOMEM;
goto undo_cpu_trans; goto undo_cpu_trans;
...@@ -281,7 +283,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain, ...@@ -281,7 +283,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
while (i-- > 0) { while (i-- > 0) {
dma_addr -= PAGE_SIZE; dma_addr -= PAGE_SIZE;
entry = dma_walk_cpu_trans(s390_domain->dma_table, entry = dma_walk_cpu_trans(s390_domain->dma_table,
dma_addr); dma_addr, gfp);
if (!entry) if (!entry)
break; break;
dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
...@@ -298,7 +300,8 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain, ...@@ -298,7 +300,8 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
int rc = 0; int rc = 0;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
GFP_ATOMIC);
if (unlikely(!entry)) { if (unlikely(!entry)) {
rc = -EINVAL; rc = -EINVAL;
break; break;
...@@ -336,7 +339,7 @@ static int s390_iommu_map_pages(struct iommu_domain *domain, ...@@ -336,7 +339,7 @@ static int s390_iommu_map_pages(struct iommu_domain *domain,
flags |= ZPCI_TABLE_PROTECTED; flags |= ZPCI_TABLE_PROTECTED;
rc = s390_iommu_validate_trans(s390_domain, paddr, iova, rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
pgcount, flags); pgcount, flags, gfp);
if (!rc) if (!rc)
*mapped = size; *mapped = size;
......
...@@ -158,7 +158,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys, ...@@ -158,7 +158,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
core->fw.mapped_mem_size = mem_size; core->fw.mapped_mem_size = mem_size;
ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size, ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV); IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV, GFP_KERNEL);
if (ret) { if (ret) {
dev_err(dev, "could not map video firmware region\n"); dev_err(dev, "could not map video firmware region\n");
return ret; return ret;
......
...@@ -466,7 +466,8 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size) ...@@ -466,7 +466,8 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
size = PAGE_ALIGN(size + addr - phys); size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */ iova = phys; /* We just want a direct mapping */
ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE); ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
GFP_KERNEL);
if (ret) if (ret)
return ret; return ret;
...@@ -574,7 +575,8 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size) ...@@ -574,7 +575,8 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
size = PAGE_ALIGN(size + addr - phys); size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */ iova = phys; /* We just want a direct mapping */
ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE); ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
GFP_KERNEL);
if (ret) if (ret)
return ret; return ret;
......
...@@ -1639,7 +1639,7 @@ static int ath10k_fw_init(struct ath10k *ar) ...@@ -1639,7 +1639,7 @@ static int ath10k_fw_init(struct ath10k *ar)
ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr, ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
ar->msa.paddr, ar->msa.mem_size, ar->msa.paddr, ar->msa.mem_size,
IOMMU_READ | IOMMU_WRITE); IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) { if (ret) {
ath10k_err(ar, "failed to map firmware region: %d\n", ret); ath10k_err(ar, "failed to map firmware region: %d\n", ret);
goto err_iommu_detach; goto err_iommu_detach;
......
...@@ -1021,7 +1021,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) ...@@ -1021,7 +1021,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr, ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
IOMMU_READ | IOMMU_WRITE); IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) { if (ret) {
ath11k_err(ab, "failed to map firmware region: %d\n", ret); ath11k_err(ab, "failed to map firmware region: %d\n", ret);
goto err_iommu_detach; goto err_iommu_detach;
...@@ -1029,7 +1029,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) ...@@ -1029,7 +1029,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr, ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
IOMMU_READ | IOMMU_WRITE); IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) { if (ret) {
ath11k_err(ab, "failed to map firmware CE region: %d\n", ret); ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
goto err_iommu_unmap; goto err_iommu_unmap;
......
...@@ -643,7 +643,8 @@ static int rproc_handle_devmem(struct rproc *rproc, void *ptr, ...@@ -643,7 +643,8 @@ static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
if (!mapping) if (!mapping)
return -ENOMEM; return -ENOMEM;
ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags,
GFP_KERNEL);
if (ret) { if (ret) {
dev_err(dev, "failed to map devmem: %d\n", ret); dev_err(dev, "failed to map devmem: %d\n", ret);
goto out; goto out;
...@@ -737,7 +738,7 @@ static int rproc_alloc_carveout(struct rproc *rproc, ...@@ -737,7 +738,7 @@ static int rproc_alloc_carveout(struct rproc *rproc,
} }
ret = iommu_map(rproc->domain, mem->da, dma, mem->len, ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
mem->flags); mem->flags, GFP_KERNEL);
if (ret) { if (ret) {
dev_err(dev, "iommu_map failed: %d\n", ret); dev_err(dev, "iommu_map failed: %d\n", ret);
goto free_mapping; goto free_mapping;
......
...@@ -1480,7 +1480,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, ...@@ -1480,7 +1480,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
list_for_each_entry(d, &iommu->domain_list, next) { list_for_each_entry(d, &iommu->domain_list, next) {
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
npage << PAGE_SHIFT, prot | IOMMU_CACHE); npage << PAGE_SHIFT, prot | IOMMU_CACHE,
GFP_KERNEL);
if (ret) if (ret)
goto unwind; goto unwind;
...@@ -1777,8 +1778,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, ...@@ -1777,8 +1778,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
size = npage << PAGE_SHIFT; size = npage << PAGE_SHIFT;
} }
ret = iommu_map(domain->domain, iova, phys, ret = iommu_map(domain->domain, iova, phys, size,
size, dma->prot | IOMMU_CACHE); dma->prot | IOMMU_CACHE, GFP_KERNEL);
if (ret) { if (ret) {
if (!dma->iommu_mapped) { if (!dma->iommu_mapped) {
vfio_unpin_pages_remote(dma, iova, vfio_unpin_pages_remote(dma, iova,
...@@ -1866,7 +1867,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain) ...@@ -1866,7 +1867,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
return; return;
ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
if (!ret) { if (!ret) {
size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
......
...@@ -792,7 +792,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, ...@@ -792,7 +792,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
r = ops->set_map(vdpa, asid, iotlb); r = ops->set_map(vdpa, asid, iotlb);
} else { } else {
r = iommu_map(v->domain, iova, pa, size, r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm)); perm_to_iommu_flags(perm), GFP_KERNEL);
} }
if (r) { if (r) {
vhost_iotlb_del_range(iotlb, iova, iova + size - 1); vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
......
...@@ -471,19 +471,15 @@ extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, ...@@ -471,19 +471,15 @@ extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova, extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot); phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size); size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain, extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size, unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather); struct iommu_iotlb_gather *iotlb_gather);
extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot); struct scatterlist *sg, unsigned int nents,
extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, int prot, gfp_t gfp);
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token); iommu_fault_handler_t handler, void *token);
...@@ -777,14 +773,7 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) ...@@ -777,14 +773,7 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
} }
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
return -ENODEV;
}
static inline int iommu_map_atomic(struct iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{ {
return -ENODEV; return -ENODEV;
} }
...@@ -804,14 +793,7 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain, ...@@ -804,14 +793,7 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
static inline ssize_t iommu_map_sg(struct iommu_domain *domain, static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg, unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot) unsigned int nents, int prot, gfp_t gfp)
{
return -ENODEV;
}
static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{ {
return -ENODEV; return -ENODEV;
} }
...@@ -1122,7 +1104,8 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, ...@@ -1122,7 +1104,8 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
static inline size_t iommu_map_sgtable(struct iommu_domain *domain, static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot) unsigned long iova, struct sg_table *sgt, int prot)
{ {
return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
GFP_KERNEL);
} }
#ifdef CONFIG_IOMMU_DEBUGFS #ifdef CONFIG_IOMMU_DEBUGFS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment