Commit 0299a13a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd

Pull iommufd fixes from Jason Gunthorpe:
 "Two user triggerable problems:

   - Syzkaller found a way to trigger a WARN_ON and leak memory by
     racing destroy with other actions

   - There is still a bug in the "batch carry" stuff that gets invoked
     for complex cases with accesses and unmapping of huge pages. The
     test suite found this (triggers rarely)"

* tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd:
  iommufd: Set end correctly when doing batch carry
  iommufd: IOMMUFD_DESTROY should not increase the refcount
parents c75981a1 b7c822fa
...@@ -109,10 +109,7 @@ EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD); ...@@ -109,10 +109,7 @@ EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD);
*/ */
void iommufd_device_unbind(struct iommufd_device *idev) void iommufd_device_unbind(struct iommufd_device *idev)
{ {
bool was_destroyed; iommufd_object_destroy_user(idev->ictx, &idev->obj);
was_destroyed = iommufd_object_destroy_user(idev->ictx, &idev->obj);
WARN_ON(!was_destroyed);
} }
EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD); EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD);
...@@ -382,7 +379,7 @@ void iommufd_device_detach(struct iommufd_device *idev) ...@@ -382,7 +379,7 @@ void iommufd_device_detach(struct iommufd_device *idev)
mutex_unlock(&hwpt->devices_lock); mutex_unlock(&hwpt->devices_lock);
if (hwpt->auto_domain) if (hwpt->auto_domain)
iommufd_object_destroy_user(idev->ictx, &hwpt->obj); iommufd_object_deref_user(idev->ictx, &hwpt->obj);
else else
refcount_dec(&hwpt->obj.users); refcount_dec(&hwpt->obj.users);
...@@ -456,10 +453,7 @@ EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD); ...@@ -456,10 +453,7 @@ EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD);
*/ */
void iommufd_access_destroy(struct iommufd_access *access) void iommufd_access_destroy(struct iommufd_access *access)
{ {
bool was_destroyed; iommufd_object_destroy_user(access->ictx, &access->obj);
was_destroyed = iommufd_object_destroy_user(access->ictx, &access->obj);
WARN_ON(!was_destroyed);
} }
EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD); EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
......
...@@ -176,8 +176,19 @@ void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, ...@@ -176,8 +176,19 @@ void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj); struct iommufd_object *obj);
void iommufd_object_finalize(struct iommufd_ctx *ictx, void iommufd_object_finalize(struct iommufd_ctx *ictx,
struct iommufd_object *obj); struct iommufd_object *obj);
bool iommufd_object_destroy_user(struct iommufd_ctx *ictx, void __iommufd_object_destroy_user(struct iommufd_ctx *ictx,
struct iommufd_object *obj); struct iommufd_object *obj, bool allow_fail);
static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
__iommufd_object_destroy_user(ictx, obj, false);
}
static inline void iommufd_object_deref_user(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
__iommufd_object_destroy_user(ictx, obj, true);
}
struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
size_t size, size_t size,
enum iommufd_object_type type); enum iommufd_object_type type);
......
...@@ -116,14 +116,56 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id, ...@@ -116,14 +116,56 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
return obj; return obj;
} }
/*
* Remove the given object id from the xarray if the only reference to the
* object is held by the xarray. The caller must call ops destroy().
*/
static struct iommufd_object *iommufd_object_remove(struct iommufd_ctx *ictx,
u32 id, bool extra_put)
{
struct iommufd_object *obj;
XA_STATE(xas, &ictx->objects, id);
xa_lock(&ictx->objects);
obj = xas_load(&xas);
if (xa_is_zero(obj) || !obj) {
obj = ERR_PTR(-ENOENT);
goto out_xa;
}
/*
* If the caller is holding a ref on obj we put it here under the
* spinlock.
*/
if (extra_put)
refcount_dec(&obj->users);
if (!refcount_dec_if_one(&obj->users)) {
obj = ERR_PTR(-EBUSY);
goto out_xa;
}
xas_store(&xas, NULL);
if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj))
ictx->vfio_ioas = NULL;
out_xa:
xa_unlock(&ictx->objects);
/* The returned object reference count is zero */
return obj;
}
/* /*
* The caller holds a users refcount and wants to destroy the object. Returns * The caller holds a users refcount and wants to destroy the object. Returns
* true if the object was destroyed. In all cases the caller no longer has a * true if the object was destroyed. In all cases the caller no longer has a
* reference on obj. * reference on obj.
*/ */
bool iommufd_object_destroy_user(struct iommufd_ctx *ictx, void __iommufd_object_destroy_user(struct iommufd_ctx *ictx,
struct iommufd_object *obj) struct iommufd_object *obj, bool allow_fail)
{ {
struct iommufd_object *ret;
/* /*
* The purpose of the destroy_rwsem is to ensure deterministic * The purpose of the destroy_rwsem is to ensure deterministic
* destruction of objects used by external drivers and destroyed by this * destruction of objects used by external drivers and destroyed by this
...@@ -131,22 +173,22 @@ bool iommufd_object_destroy_user(struct iommufd_ctx *ictx, ...@@ -131,22 +173,22 @@ bool iommufd_object_destroy_user(struct iommufd_ctx *ictx,
* side of this, such as during ioctl execution. * side of this, such as during ioctl execution.
*/ */
down_write(&obj->destroy_rwsem); down_write(&obj->destroy_rwsem);
xa_lock(&ictx->objects); ret = iommufd_object_remove(ictx, obj->id, true);
refcount_dec(&obj->users);
if (!refcount_dec_if_one(&obj->users)) {
xa_unlock(&ictx->objects);
up_write(&obj->destroy_rwsem);
return false;
}
__xa_erase(&ictx->objects, obj->id);
if (ictx->vfio_ioas && &ictx->vfio_ioas->obj == obj)
ictx->vfio_ioas = NULL;
xa_unlock(&ictx->objects);
up_write(&obj->destroy_rwsem); up_write(&obj->destroy_rwsem);
if (allow_fail && IS_ERR(ret))
return;
/*
* If there is a bug and we couldn't destroy the object then we did put
* back the caller's refcount and will eventually try to free it again
* during close.
*/
if (WARN_ON(IS_ERR(ret)))
return;
iommufd_object_ops[obj->type].destroy(obj); iommufd_object_ops[obj->type].destroy(obj);
kfree(obj); kfree(obj);
return true;
} }
static int iommufd_destroy(struct iommufd_ucmd *ucmd) static int iommufd_destroy(struct iommufd_ucmd *ucmd)
...@@ -154,13 +196,11 @@ static int iommufd_destroy(struct iommufd_ucmd *ucmd) ...@@ -154,13 +196,11 @@ static int iommufd_destroy(struct iommufd_ucmd *ucmd)
struct iommu_destroy *cmd = ucmd->cmd; struct iommu_destroy *cmd = ucmd->cmd;
struct iommufd_object *obj; struct iommufd_object *obj;
obj = iommufd_get_object(ucmd->ictx, cmd->id, IOMMUFD_OBJ_ANY); obj = iommufd_object_remove(ucmd->ictx, cmd->id, false);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
iommufd_ref_to_users(obj); iommufd_object_ops[obj->type].destroy(obj);
/* See iommufd_ref_to_users() */ kfree(obj);
if (!iommufd_object_destroy_user(ucmd->ictx, obj))
return -EBUSY;
return 0; return 0;
} }
......
...@@ -297,7 +297,7 @@ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns) ...@@ -297,7 +297,7 @@ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns)
batch->pfns[0] = batch->pfns[batch->end - 1] + batch->pfns[0] = batch->pfns[batch->end - 1] +
(batch->npfns[batch->end - 1] - keep_pfns); (batch->npfns[batch->end - 1] - keep_pfns);
batch->npfns[0] = keep_pfns; batch->npfns[0] = keep_pfns;
batch->end = 0; batch->end = 1;
} }
static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns) static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment