Commit 8cc93159 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu: Prepare IOMMU domain for IOPF

This adds some mechanisms around the iommu_domain so that the I/O page
fault handling framework could route a page fault to the domain and
call the fault handler from it.

Add pointers to the page fault handler and its private data in struct
iommu_domain. The fault handler will be called with the private data
as a parameter once a page fault is routed to the domain. Any kernel
component which owns an iommu domain could install handler and its
private parameter so that the page fault could be further routed and
handled.

This also prepares the SVA implementation to be the first consumer of
the per-domain page fault handling model. The I/O page fault handler
for SVA is copied to the SVA file with mmget_not_zero() added before
mmap_read_lock().
Suggested-by: default avatarJean-Philippe Brucker <jean-philippe@linaro.org>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarJean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Tested-by: default avatarZhangfei Gao <zhangfei.gao@linaro.org>
Tested-by: default avatarTony Zhu <tony.zhu@intel.com>
Link: https://lore.kernel.org/r/20221031005917.45690-12-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 1c263576
...@@ -181,6 +181,13 @@ static void iopf_handle_group(struct work_struct *work) ...@@ -181,6 +181,13 @@ static void iopf_handle_group(struct work_struct *work)
* request completes, outstanding faults will have been dealt with by the time * request completes, outstanding faults will have been dealt with by the time
* the PASID is freed. * the PASID is freed.
* *
* Any valid page fault will be eventually routed to an iommu domain and the
* page fault handler installed there will get called. The users of this
* handling framework should guarantee that the iommu domain could only be
* freed after the device has stopped generating page faults (or the iommu
* hardware has been set to block the page faults) and the pending page faults
* have been flushed.
*
* Return: 0 on success and <0 on error. * Return: 0 on success and <0 on error.
*/ */
int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
......
...@@ -180,3 +180,61 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle) ...@@ -180,3 +180,61 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
return domain->mm->pasid; return domain->mm->pasid;
} }
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
/*
* I/O page fault handler for SVA
*/
enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
{
vm_fault_t ret;
struct vm_area_struct *vma;
struct mm_struct *mm = data;
unsigned int access_flags = 0;
unsigned int fault_flags = FAULT_FLAG_REMOTE;
struct iommu_fault_page_request *prm = &fault->prm;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
return status;
if (!mmget_not_zero(mm))
return status;
mmap_read_lock(mm);
vma = find_extend_vma(mm, prm->addr);
if (!vma)
/* Unmapped area */
goto out_put_mm;
if (prm->perm & IOMMU_FAULT_PERM_READ)
access_flags |= VM_READ;
if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
access_flags |= VM_WRITE;
fault_flags |= FAULT_FLAG_WRITE;
}
if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
access_flags |= VM_EXEC;
fault_flags |= FAULT_FLAG_INSTRUCTION;
}
if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
fault_flags |= FAULT_FLAG_USER;
if (access_flags & ~vma->vm_flags)
/* Access fault */
goto out_put_mm;
ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
IOMMU_PAGE_RESP_SUCCESS;
out_put_mm:
mmap_read_unlock(mm);
mmput(mm);
return status;
}
...@@ -26,6 +26,8 @@ int iopf_queue_flush_dev(struct device *dev); ...@@ -26,6 +26,8 @@ int iopf_queue_flush_dev(struct device *dev);
struct iopf_queue *iopf_queue_alloc(const char *name); struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue); void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue); int iopf_queue_discard_partial(struct iopf_queue *queue);
enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
#else /* CONFIG_IOMMU_SVA */ #else /* CONFIG_IOMMU_SVA */
static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
...@@ -63,5 +65,11 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue) ...@@ -63,5 +65,11 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
{ {
return -ENODEV; return -ENODEV;
} }
static inline enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
{
return IOMMU_PAGE_RESP_INVALID;
}
#endif /* CONFIG_IOMMU_SVA */ #endif /* CONFIG_IOMMU_SVA */
#endif /* _IOMMU_SVA_LIB_H */ #endif /* _IOMMU_SVA_LIB_H */
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#include "dma-iommu.h" #include "dma-iommu.h"
#include "iommu-sva-lib.h"
static struct kset *iommu_group_kset; static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida); static DEFINE_IDA(iommu_group_ida);
...@@ -3309,6 +3311,8 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, ...@@ -3309,6 +3311,8 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
domain->type = IOMMU_DOMAIN_SVA; domain->type = IOMMU_DOMAIN_SVA;
mmgrab(mm); mmgrab(mm);
domain->mm = mm; domain->mm = mm;
domain->iopf_handler = iommu_sva_handle_iopf;
domain->fault_data = mm;
return domain; return domain;
} }
...@@ -98,6 +98,9 @@ struct iommu_domain { ...@@ -98,6 +98,9 @@ struct iommu_domain {
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
struct iommu_domain_geometry geometry; struct iommu_domain_geometry geometry;
struct iommu_dma_cookie *iova_cookie; struct iommu_dma_cookie *iova_cookie;
enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault,
void *data);
void *fault_data;
union { union {
struct { struct {
iommu_fault_handler_t handler; iommu_fault_handler_t handler;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment