Commit 70eadc7f authored by Jason Gunthorpe's avatar Jason Gunthorpe

iommufd: Allow a hwpt to be aborted after allocation

During creation the hwpt must have the ioas->mutex held until the object
is finalized. This means we need to be able to call
iommufd_object_abort_and_destroy() while holding the mutex.

Since iommufd_hw_pagetable_destroy() also needs the mutex this is
problematic.

Fix it by creating a special abort op for the object that can assume the
caller is holding the lock, as required by the contract.

The next patch will add another iommufd_object_abort_and_destroy() for a
hwpt.

Fixes: e8d57210 ("iommufd: Add kAPI toward external drivers for physical devices")
Link: https://lore.kernel.org/r/10-v8-6659224517ea+532-iommufd_alloc_jgg@nvidia.comReviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Tested-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 17bad527
...@@ -25,6 +25,21 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) ...@@ -25,6 +25,21 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
refcount_dec(&hwpt->ioas->obj.users); refcount_dec(&hwpt->ioas->obj.users);
} }
void iommufd_hw_pagetable_abort(struct iommufd_object *obj)
{
struct iommufd_hw_pagetable *hwpt =
container_of(obj, struct iommufd_hw_pagetable, obj);
/* The ioas->mutex must be held until finalize is called. */
lockdep_assert_held(&hwpt->ioas->mutex);
if (!list_empty(&hwpt->hwpt_item)) {
list_del_init(&hwpt->hwpt_item);
iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain);
}
iommufd_hw_pagetable_destroy(obj);
}
int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt)
{ {
if (hwpt->enforce_cache_coherency) if (hwpt->enforce_cache_coherency)
...@@ -49,6 +64,10 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) ...@@ -49,6 +64,10 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt)
* Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
* will be linked to the given ioas and upon return the underlying iommu_domain * will be linked to the given ioas and upon return the underlying iommu_domain
* is fully popoulated. * is fully popoulated.
*
* The caller must hold the ioas->mutex until after
* iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
* the returned hwpt.
*/ */
struct iommufd_hw_pagetable * struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
......
...@@ -260,6 +260,7 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, ...@@ -260,6 +260,7 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_hw_pagetable * struct iommufd_hw_pagetable *
iommufd_hw_pagetable_detach(struct iommufd_device *idev); iommufd_hw_pagetable_detach(struct iommufd_device *idev);
void iommufd_hw_pagetable_destroy(struct iommufd_object *obj); void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
void iommufd_hw_pagetable_abort(struct iommufd_object *obj);
static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx, static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
struct iommufd_hw_pagetable *hwpt) struct iommufd_hw_pagetable *hwpt)
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
struct iommufd_object_ops { struct iommufd_object_ops {
void (*destroy)(struct iommufd_object *obj); void (*destroy)(struct iommufd_object *obj);
void (*abort)(struct iommufd_object *obj);
}; };
static const struct iommufd_object_ops iommufd_object_ops[]; static const struct iommufd_object_ops iommufd_object_ops[];
static struct miscdevice vfio_misc_dev; static struct miscdevice vfio_misc_dev;
...@@ -95,7 +96,10 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj) ...@@ -95,7 +96,10 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj) struct iommufd_object *obj)
{ {
iommufd_object_ops[obj->type].destroy(obj); if (iommufd_object_ops[obj->type].abort)
iommufd_object_ops[obj->type].abort(obj);
else
iommufd_object_ops[obj->type].destroy(obj);
iommufd_object_abort(ictx, obj); iommufd_object_abort(ictx, obj);
} }
...@@ -425,6 +429,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = { ...@@ -425,6 +429,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
}, },
[IOMMUFD_OBJ_HW_PAGETABLE] = { [IOMMUFD_OBJ_HW_PAGETABLE] = {
.destroy = iommufd_hw_pagetable_destroy, .destroy = iommufd_hw_pagetable_destroy,
.abort = iommufd_hw_pagetable_abort,
}, },
#ifdef CONFIG_IOMMUFD_TEST #ifdef CONFIG_IOMMUFD_TEST
[IOMMUFD_OBJ_SELFTEST] = { [IOMMUFD_OBJ_SELFTEST] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment