Commit 3dfa64ae authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu: Make iommu_report_device_fault() return void

As the iommu_report_device_fault() has been converted to auto-respond a
page fault if it fails to enqueue it, there's no need to return a code
in any case. Make it return void.
Suggested-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20240212012227.119381-17-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent b554e396
...@@ -1455,7 +1455,7 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) ...@@ -1455,7 +1455,7 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
/* IRQ and event handlers */ /* IRQ and event handlers */
static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
{ {
int ret; int ret = 0;
u32 perm = 0; u32 perm = 0;
struct arm_smmu_master *master; struct arm_smmu_master *master;
bool ssid_valid = evt[0] & EVTQ_0_SSV; bool ssid_valid = evt[0] & EVTQ_0_SSV;
...@@ -1511,7 +1511,7 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) ...@@ -1511,7 +1511,7 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
goto out_unlock; goto out_unlock;
} }
ret = iommu_report_device_fault(master->dev, &fault_evt); iommu_report_device_fault(master->dev, &fault_evt);
out_unlock: out_unlock:
mutex_unlock(&smmu->streams_mutex); mutex_unlock(&smmu->streams_mutex);
return ret; return ret;
......
...@@ -561,14 +561,11 @@ static int prq_to_iommu_prot(struct page_req_dsc *req) ...@@ -561,14 +561,11 @@ static int prq_to_iommu_prot(struct page_req_dsc *req)
return prot; return prot;
} }
static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
struct page_req_dsc *desc) struct page_req_dsc *desc)
{ {
struct iopf_fault event = { }; struct iopf_fault event = { };
if (!dev || !dev_is_pci(dev))
return -ENODEV;
/* Fill in event data for device specific processing */ /* Fill in event data for device specific processing */
event.fault.type = IOMMU_FAULT_PAGE_REQ; event.fault.type = IOMMU_FAULT_PAGE_REQ;
event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
...@@ -601,7 +598,7 @@ static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, ...@@ -601,7 +598,7 @@ static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
event.fault.prm.private_data[0] = ktime_to_ns(ktime_get()); event.fault.prm.private_data[0] = ktime_to_ns(ktime_get());
} }
return iommu_report_device_fault(dev, &event); iommu_report_device_fault(dev, &event);
} }
static void handle_bad_prq_event(struct intel_iommu *iommu, static void handle_bad_prq_event(struct intel_iommu *iommu,
...@@ -704,12 +701,10 @@ static irqreturn_t prq_event_thread(int irq, void *d) ...@@ -704,12 +701,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!pdev) if (!pdev)
goto bad_req; goto bad_req;
if (intel_svm_prq_report(iommu, &pdev->dev, req)) intel_svm_prq_report(iommu, &pdev->dev, req);
handle_bad_prq_event(iommu, req, QI_RESP_INVALID); trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
else req->priv_data[0], req->priv_data[1],
trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, iommu->prq_seq_number++);
req->priv_data[0], req->priv_data[1],
iommu->prq_seq_number++);
pci_dev_put(pdev); pci_dev_put(pdev);
prq_advance: prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK; head = (head + sizeof(*req)) & PRQ_RING_MASK;
......
...@@ -176,26 +176,22 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param, ...@@ -176,26 +176,22 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
* freed after the device has stopped generating page faults (or the iommu * freed after the device has stopped generating page faults (or the iommu
* hardware has been set to block the page faults) and the pending page faults * hardware has been set to block the page faults) and the pending page faults
* have been flushed. * have been flushed.
*
* Return: 0 on success and <0 on error.
*/ */
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
{ {
struct iommu_fault *fault = &evt->fault; struct iommu_fault *fault = &evt->fault;
struct iommu_fault_param *iopf_param; struct iommu_fault_param *iopf_param;
struct iopf_group abort_group = {}; struct iopf_group abort_group = {};
struct iopf_group *group; struct iopf_group *group;
int ret;
iopf_param = iopf_get_dev_fault_param(dev); iopf_param = iopf_get_dev_fault_param(dev);
if (WARN_ON(!iopf_param)) if (WARN_ON(!iopf_param))
return -ENODEV; return;
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
ret = report_partial_fault(iopf_param, fault); report_partial_fault(iopf_param, fault);
iopf_put_dev_fault_param(iopf_param); iopf_put_dev_fault_param(iopf_param);
/* A request that is not the last does not need to be ack'd */ /* A request that is not the last does not need to be ack'd */
return ret;
} }
/* /*
...@@ -207,25 +203,21 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) ...@@ -207,25 +203,21 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
* leaving, otherwise partial faults will be stuck. * leaving, otherwise partial faults will be stuck.
*/ */
group = iopf_group_alloc(iopf_param, evt, &abort_group); group = iopf_group_alloc(iopf_param, evt, &abort_group);
if (group == &abort_group) { if (group == &abort_group)
ret = -ENOMEM;
goto err_abort; goto err_abort;
}
group->domain = get_domain_for_iopf(dev, fault); group->domain = get_domain_for_iopf(dev, fault);
if (!group->domain) { if (!group->domain)
ret = -EINVAL;
goto err_abort; goto err_abort;
}
/* /*
* On success iopf_handler must call iopf_group_response() and * On success iopf_handler must call iopf_group_response() and
* iopf_free_group() * iopf_free_group()
*/ */
ret = group->domain->iopf_handler(group); if (group->domain->iopf_handler(group))
if (ret)
goto err_abort; goto err_abort;
return 0;
return;
err_abort: err_abort:
iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE); iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE);
...@@ -233,7 +225,6 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) ...@@ -233,7 +225,6 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
__iopf_free_group(group); __iopf_free_group(group);
else else
iopf_free_group(group); iopf_free_group(group);
return ret;
} }
EXPORT_SYMBOL_GPL(iommu_report_device_fault); EXPORT_SYMBOL_GPL(iommu_report_device_fault);
......
...@@ -1545,7 +1545,7 @@ struct iopf_queue *iopf_queue_alloc(const char *name); ...@@ -1545,7 +1545,7 @@ struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue); void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue); int iopf_queue_discard_partial(struct iopf_queue *queue);
void iopf_free_group(struct iopf_group *group); void iopf_free_group(struct iopf_group *group);
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
void iopf_group_response(struct iopf_group *group, void iopf_group_response(struct iopf_group *group,
enum iommu_page_response_code status); enum iommu_page_response_code status);
#else #else
...@@ -1583,10 +1583,9 @@ static inline void iopf_free_group(struct iopf_group *group) ...@@ -1583,10 +1583,9 @@ static inline void iopf_free_group(struct iopf_group *group)
{ {
} }
static inline int static inline void
iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
{ {
return -ENODEV;
} }
static inline void iopf_group_response(struct iopf_group *group, static inline void iopf_group_response(struct iopf_group *group,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment