Commit 95b18ef9 authored by Pasha Tatashin's avatar Pasha Tatashin Committed by Joerg Roedel

iommu/dma: use iommu_put_pages_list() to releae freelist

Free the IOMMU page tables via iommu_put_pages_list(). The page tables
were allocated via iommu_alloc_* functions in architecture specific
places, but are released in dma-iommu if the freelist is gathered during
map/unmap operations into iommu_iotlb_gather data structure.

Currently, only iommu/intel that does that.
Signed-off-by: default avatarPasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Link: https://lore.kernel.org/r/20240413002522.1101315-3-pasha.tatashin@soleen.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 06c37505
......@@ -32,6 +32,7 @@
#include <trace/events/swiotlb.h>
#include "dma-iommu.h"
#include "iommu-pages.h"
struct iommu_dma_msi_page {
struct list_head list;
......@@ -156,7 +157,7 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
if (fq->entries[idx].counter >= counter)
break;
put_pages_list(&fq->entries[idx].freelist);
iommu_put_pages_list(&fq->entries[idx].freelist);
free_iova_fast(&cookie->iovad,
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
......@@ -254,7 +255,7 @@ static void iommu_dma_free_fq_single(struct iova_fq *fq)
int idx;
fq_ring_for_each(idx, fq)
put_pages_list(&fq->entries[idx].freelist);
iommu_put_pages_list(&fq->entries[idx].freelist);
vfree(fq);
}
......@@ -267,7 +268,7 @@ static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
fq_ring_for_each(idx, fq)
put_pages_list(&fq->entries[idx].freelist);
iommu_put_pages_list(&fq->entries[idx].freelist);
}
free_percpu(percpu_fq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment