Commit 24b5d268 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu: Prepare for separating SVA and IOPF

Move iopf_group data structure to iommu.h to make it a minimal set of
faults that a domain's page fault handler should handle.

Add a new function, iopf_free_group(), to free a fault group after all
faults in the group are handled. This function will be made global so
that it can be called from other files, such as iommu-sva.c.

Move iopf_queue data structure to iommu.h to allow the workqueue to be
scheduled out of this file.

This will simplify the sequential patches.
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarYi Liu <yi.l.liu@intel.com>
Tested-by: default avatarYan Zhao <yan.y.zhao@intel.com>
Tested-by: default avatarLongfang Liu <liulongfang@huawei.com>
Link: https://lore.kernel.org/r/20240212012227.119381-9-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 3f02a9dc
...@@ -13,24 +13,17 @@ ...@@ -13,24 +13,17 @@
#include "iommu-sva.h" #include "iommu-sva.h"
/** static void iopf_free_group(struct iopf_group *group)
* struct iopf_queue - IO Page Fault queue {
* @wq: the fault workqueue struct iopf_fault *iopf, *next;
* @devices: devices attached to this queue
* @lock: protects the device list list_for_each_entry_safe(iopf, next, &group->faults, list) {
*/ if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
struct iopf_queue { kfree(iopf);
struct workqueue_struct *wq; }
struct list_head devices;
struct mutex lock; kfree(group);
}; }
struct iopf_group {
struct iopf_fault last_fault;
struct list_head faults;
struct work_struct work;
struct device *dev;
};
static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
enum iommu_page_response_code status) enum iommu_page_response_code status)
...@@ -50,9 +43,9 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, ...@@ -50,9 +43,9 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
static void iopf_handler(struct work_struct *work) static void iopf_handler(struct work_struct *work)
{ {
struct iopf_fault *iopf;
struct iopf_group *group; struct iopf_group *group;
struct iommu_domain *domain; struct iommu_domain *domain;
struct iopf_fault *iopf, *next;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
group = container_of(work, struct iopf_group, work); group = container_of(work, struct iopf_group, work);
...@@ -61,7 +54,7 @@ static void iopf_handler(struct work_struct *work) ...@@ -61,7 +54,7 @@ static void iopf_handler(struct work_struct *work)
if (!domain || !domain->iopf_handler) if (!domain || !domain->iopf_handler)
status = IOMMU_PAGE_RESP_INVALID; status = IOMMU_PAGE_RESP_INVALID;
list_for_each_entry_safe(iopf, next, &group->faults, list) { list_for_each_entry(iopf, &group->faults, list) {
/* /*
* For the moment, errors are sticky: don't handle subsequent * For the moment, errors are sticky: don't handle subsequent
* faults in the group if there is an error. * faults in the group if there is an error.
...@@ -69,14 +62,10 @@ static void iopf_handler(struct work_struct *work) ...@@ -69,14 +62,10 @@ static void iopf_handler(struct work_struct *work)
if (status == IOMMU_PAGE_RESP_SUCCESS) if (status == IOMMU_PAGE_RESP_SUCCESS)
status = domain->iopf_handler(&iopf->fault, status = domain->iopf_handler(&iopf->fault,
domain->fault_data); domain->fault_data);
if (!(iopf->fault.prm.flags &
IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
kfree(iopf);
} }
iopf_complete_group(group->dev, &group->last_fault, status); iopf_complete_group(group->dev, &group->last_fault, status);
kfree(group); iopf_free_group(group);
} }
/** /**
......
...@@ -41,7 +41,6 @@ struct iommu_dirty_ops; ...@@ -41,7 +41,6 @@ struct iommu_dirty_ops;
struct notifier_block; struct notifier_block;
struct iommu_sva; struct iommu_sva;
struct iommu_dma_cookie; struct iommu_dma_cookie;
struct iopf_queue;
#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
...@@ -126,6 +125,25 @@ struct iopf_fault { ...@@ -126,6 +125,25 @@ struct iopf_fault {
struct list_head list; struct list_head list;
}; };
struct iopf_group {
struct iopf_fault last_fault;
struct list_head faults;
struct work_struct work;
struct device *dev;
};
/**
* struct iopf_queue - IO Page Fault queue
* @wq: the fault workqueue
* @devices: devices attached to this queue
* @lock: protects the device list
*/
struct iopf_queue {
struct workqueue_struct *wq;
struct list_head devices;
struct mutex lock;
};
/* iommu fault flags */ /* iommu fault flags */
#define IOMMU_FAULT_READ 0x0 #define IOMMU_FAULT_READ 0x0
#define IOMMU_FAULT_WRITE 0x1 #define IOMMU_FAULT_WRITE 0x1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment