Commit 6df969b7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd

Pull iommufd updates from Jason Gunthorpe:
 "Two series:

   - Reorganize how the hardware page table objects are managed,
     particularly their destruction flow. Increase the selftest test
     coverage in this area by creating a more complete mock iommu
     driver.

     This is preparation to add a replace operation for HWPT binding,
     which is done but waiting for the VFIO parts to complete so there
     is a user.

   - Split the iommufd support for "access" to make it two step -
     allocate an access then link it to an IOAS. Update VFIO and have
     VFIO always create an access even for the VFIO mdevs that never do
     DMA.

     This is also preperation for the replace VFIO series that will
     allow replace to work on access types as well.

  Three minor fixes:

   - Sykzaller found the selftest code didn't check for overflow when
     processing user VAs

   - smatch noted a .data item should have been static

   - Add a selftest that reproduces a syzkaller bug for batch carry
     already fixed in rc"

* tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd: (21 commits)
  iommufd/selftest: Cover domain unmap with huge pages and access
  iommufd/selftest: Set varaiable mock_iommu_device storage-class-specifier to static
  vfio: Check the presence for iommufd callbacks in __vfio_register_dev()
  vfio/mdev: Uses the vfio emulated iommufd ops set in the mdev sample drivers
  vfio-iommufd: Make vfio_iommufd_emulated_bind() return iommufd_access ID
  vfio-iommufd: No need to record iommufd_ctx in vfio_device
  iommufd: Create access in vfio_iommufd_emulated_bind()
  iommu/iommufd: Pass iommufd_ctx pointer in iommufd_get_ioas()
  iommufd/selftest: Catch overflow of uptr and length
  iommufd/selftest: Add a selftest for iommufd_device_attach() with a hwpt argument
  iommufd/selftest: Make selftest create a more complete mock device
  iommufd/selftest: Rename the remaining mock device_id's to stdev_id
  iommufd/selftest: Rename domain_id to hwpt_id for FIXTURE iommufd_mock_domain
  iommufd/selftest: Rename domain_id to stdev_id for FIXTURE iommufd_ioas
  iommufd/selftest: Rename the sefltest 'device_id' to 'stdev_id'
  iommufd: Make iommufd_hw_pagetable_alloc() do iopt_table_add_domain()
  iommufd: Move iommufd_device to iommufd_private.h
  iommufd: Move ioas related HWPT destruction into iommufd_hw_pagetable_destroy()
  iommufd: Consistently manage hwpt_item
  iommufd: Add iommufd_lock_obj() around the auto-domains hwpts
  ...
parents 32f7ad0f 62e37c86
This diff is collapsed.
......@@ -13,7 +13,17 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
WARN_ON(!list_empty(&hwpt->devices));
iommu_domain_free(hwpt->domain);
if (!list_empty(&hwpt->hwpt_item)) {
mutex_lock(&hwpt->ioas->mutex);
list_del(&hwpt->hwpt_item);
mutex_unlock(&hwpt->ioas->mutex);
iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain);
}
if (hwpt->domain)
iommu_domain_free(hwpt->domain);
refcount_dec(&hwpt->ioas->obj.users);
mutex_destroy(&hwpt->devices_lock);
}
......@@ -22,36 +32,74 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
* iommufd_hw_pagetable_alloc() - Get an iommu_domain for a device
* @ictx: iommufd context
* @ioas: IOAS to associate the domain with
* @dev: Device to get an iommu_domain for
* @idev: Device to get an iommu_domain for
* @immediate_attach: True if idev should be attached to the hwpt
*
* Allocate a new iommu_domain and return it as a hw_pagetable.
* Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
* will be linked to the given ioas and upon return the underlying iommu_domain
* is fully popoulated.
*/
struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
struct device *dev)
struct iommufd_device *idev, bool immediate_attach)
{
struct iommufd_hw_pagetable *hwpt;
int rc;
lockdep_assert_held(&ioas->mutex);
hwpt = iommufd_object_alloc(ictx, hwpt, IOMMUFD_OBJ_HW_PAGETABLE);
if (IS_ERR(hwpt))
return hwpt;
hwpt->domain = iommu_domain_alloc(dev->bus);
if (!hwpt->domain) {
rc = -ENOMEM;
goto out_abort;
}
INIT_LIST_HEAD(&hwpt->devices);
INIT_LIST_HEAD(&hwpt->hwpt_item);
mutex_init(&hwpt->devices_lock);
/* Pairs with iommufd_hw_pagetable_destroy() */
refcount_inc(&ioas->obj.users);
hwpt->ioas = ioas;
hwpt->domain = iommu_domain_alloc(idev->dev->bus);
if (!hwpt->domain) {
rc = -ENOMEM;
goto out_abort;
}
mutex_lock(&hwpt->devices_lock);
/*
* immediate_attach exists only to accommodate iommu drivers that cannot
* directly allocate a domain. These drivers do not finish creating the
* domain until attach is completed. Thus we must have this call
* sequence. Once those drivers are fixed this should be removed.
*/
if (immediate_attach) {
rc = iommufd_hw_pagetable_attach(hwpt, idev);
if (rc)
goto out_unlock;
}
rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain);
if (rc)
goto out_detach;
list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list);
if (immediate_attach) {
/* See iommufd_device_do_attach() */
refcount_inc(&hwpt->obj.users);
idev->hwpt = hwpt;
list_add(&idev->devices_item, &hwpt->devices);
}
mutex_unlock(&hwpt->devices_lock);
return hwpt;
out_detach:
if (immediate_attach)
iommufd_hw_pagetable_detach(hwpt, idev);
out_unlock:
mutex_unlock(&hwpt->devices_lock);
out_abort:
iommufd_object_abort(ictx, &hwpt->obj);
iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
return ERR_PTR(rc);
}
......@@ -71,7 +71,7 @@ int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd)
if (cmd->__reserved)
return -EOPNOTSUPP;
ioas = iommufd_get_ioas(ucmd, cmd->ioas_id);
ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
......@@ -151,7 +151,7 @@ int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd)
if (cmd->__reserved)
return -EOPNOTSUPP;
ioas = iommufd_get_ioas(ucmd, cmd->ioas_id);
ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
iopt = &ioas->iopt;
......@@ -213,7 +213,7 @@ int iommufd_ioas_map(struct iommufd_ucmd *ucmd)
if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
return -EOVERFLOW;
ioas = iommufd_get_ioas(ucmd, cmd->ioas_id);
ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
......@@ -253,7 +253,7 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
cmd->dst_iova >= ULONG_MAX)
return -EOVERFLOW;
src_ioas = iommufd_get_ioas(ucmd, cmd->src_ioas_id);
src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id);
if (IS_ERR(src_ioas))
return PTR_ERR(src_ioas);
rc = iopt_get_pages(&src_ioas->iopt, cmd->src_iova, cmd->length,
......@@ -262,7 +262,7 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
if (rc)
return rc;
dst_ioas = iommufd_get_ioas(ucmd, cmd->dst_ioas_id);
dst_ioas = iommufd_get_ioas(ucmd->ictx, cmd->dst_ioas_id);
if (IS_ERR(dst_ioas)) {
rc = PTR_ERR(dst_ioas);
goto out_pages;
......@@ -292,7 +292,7 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
unsigned long unmapped = 0;
int rc;
ioas = iommufd_get_ioas(ucmd, cmd->ioas_id);
ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
......@@ -381,7 +381,7 @@ int iommufd_ioas_option(struct iommufd_ucmd *ucmd)
if (cmd->__reserved)
return -EOPNOTSUPP;
ioas = iommufd_get_ioas(ucmd, cmd->object_id);
ioas = iommufd_get_ioas(ucmd->ictx, cmd->object_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
......
......@@ -12,6 +12,7 @@
struct iommu_domain;
struct iommu_group;
struct iommu_option;
struct iommufd_device;
struct iommufd_ctx {
struct file *file;
......@@ -211,10 +212,10 @@ struct iommufd_ioas {
struct list_head hwpt_list;
};
static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ucmd *ucmd,
static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ctx *ictx,
u32 id)
{
return container_of(iommufd_get_object(ucmd->ictx, id,
return container_of(iommufd_get_object(ictx, id,
IOMMUFD_OBJ_IOAS),
struct iommufd_ioas, obj);
}
......@@ -254,9 +255,30 @@ struct iommufd_hw_pagetable {
struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
struct device *dev);
struct iommufd_device *idev, bool immediate_attach);
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev);
void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev);
void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
/*
* A iommufd_device object represents the binding relationship between a
* consuming driver and the iommufd. These objects are created/destroyed by
* external drivers, not by userspace.
*/
struct iommufd_device {
struct iommufd_object obj;
struct iommufd_ctx *ictx;
struct iommufd_hw_pagetable *hwpt;
/* Head at iommufd_hw_pagetable::devices */
struct list_head devices_item;
/* always the physical device */
struct device *dev;
struct iommu_group *group;
bool enforce_cache_coherency;
};
void iommufd_device_destroy(struct iommufd_object *obj);
struct iommufd_access {
......@@ -275,12 +297,6 @@ void iopt_remove_access(struct io_pagetable *iopt,
void iommufd_access_destroy_object(struct iommufd_object *obj);
#ifdef CONFIG_IOMMUFD_TEST
struct iommufd_hw_pagetable *
iommufd_device_selftest_attach(struct iommufd_ctx *ictx,
struct iommufd_ioas *ioas,
struct device *mock_dev);
void iommufd_device_selftest_detach(struct iommufd_ctx *ictx,
struct iommufd_hw_pagetable *hwpt);
int iommufd_test(struct iommufd_ucmd *ucmd);
void iommufd_selftest_destroy(struct iommufd_object *obj);
extern size_t iommufd_test_memory_limit;
......@@ -289,6 +305,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
bool iommufd_should_fail(void);
void __init iommufd_test_init(void);
void iommufd_test_exit(void);
bool iommufd_selftest_is_mock_dev(struct device *dev);
#else
static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id,
......@@ -305,5 +322,9 @@ static inline void __init iommufd_test_init(void)
static inline void iommufd_test_exit(void)
{
}
static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
{
return false;
}
#endif
#endif
......@@ -49,7 +49,7 @@ struct iommu_test_cmd {
__aligned_u64 length;
} add_reserved;
struct {
__u32 out_device_id;
__u32 out_stdev_id;
__u32 out_hwpt_id;
} mock_domain;
struct {
......
......@@ -75,7 +75,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
return;
*flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
ioas = iommufd_get_ioas(ucmd, ioas_id);
ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
if (IS_ERR(ioas))
return;
*iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
......@@ -91,23 +91,50 @@ enum selftest_obj_type {
TYPE_IDEV,
};
struct mock_dev {
struct device dev;
};
struct selftest_obj {
struct iommufd_object obj;
enum selftest_obj_type type;
union {
struct {
struct iommufd_hw_pagetable *hwpt;
struct iommufd_device *idev;
struct iommufd_ctx *ictx;
struct device mock_dev;
struct mock_dev *mock_dev;
} idev;
};
};
static void mock_domain_blocking_free(struct iommu_domain *domain)
{
}
static int mock_domain_nop_attach(struct iommu_domain *domain,
struct device *dev)
{
return 0;
}
static const struct iommu_domain_ops mock_blocking_ops = {
.free = mock_domain_blocking_free,
.attach_dev = mock_domain_nop_attach,
};
static struct iommu_domain mock_blocking_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &mock_blocking_ops,
};
static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type)
{
struct mock_iommu_domain *mock;
if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED)
return &mock_blocking_domain;
if (WARN_ON(iommu_domain_type != IOMMU_DOMAIN_UNMANAGED))
return NULL;
......@@ -236,19 +263,39 @@ static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
}
static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
{
return cap == IOMMU_CAP_CACHE_COHERENCY;
}
static void mock_domain_set_plaform_dma_ops(struct device *dev)
{
/*
* mock doesn't setup default domains because we can't hook into the
* normal probe path
*/
}
static const struct iommu_ops mock_ops = {
.owner = THIS_MODULE,
.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
.domain_alloc = mock_domain_alloc,
.capable = mock_domain_capable,
.set_platform_dma_ops = mock_domain_set_plaform_dma_ops,
.default_domain_ops =
&(struct iommu_domain_ops){
.free = mock_domain_free,
.attach_dev = mock_domain_nop_attach,
.map_pages = mock_domain_map_pages,
.unmap_pages = mock_domain_unmap_pages,
.iova_to_phys = mock_domain_iova_to_phys,
},
};
static struct iommu_device mock_iommu_device = {
.ops = &mock_ops,
};
static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
struct mock_iommu_domain **mock)
......@@ -269,48 +316,142 @@ get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
return hwpt;
}
static struct bus_type iommufd_mock_bus_type = {
.name = "iommufd_mock",
.iommu_ops = &mock_ops,
};
static void mock_dev_release(struct device *dev)
{
struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
kfree(mdev);
}
static struct mock_dev *mock_dev_create(void)
{
struct iommu_group *iommu_group;
struct dev_iommu *dev_iommu;
struct mock_dev *mdev;
int rc;
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return ERR_PTR(-ENOMEM);
device_initialize(&mdev->dev);
mdev->dev.release = mock_dev_release;
mdev->dev.bus = &iommufd_mock_bus_type;
iommu_group = iommu_group_alloc();
if (IS_ERR(iommu_group)) {
rc = PTR_ERR(iommu_group);
goto err_put;
}
rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
iommu_group_id(iommu_group));
if (rc)
goto err_group;
/*
* The iommu core has no way to associate a single device with an iommu
* driver (heck currently it can't even support two iommu_drivers
* registering). Hack it together with an open coded dev_iommu_get().
* Notice that the normal notifier triggered iommu release process also
* does not work here because this bus is not in iommu_buses.
*/
mdev->dev.iommu = kzalloc(sizeof(*dev_iommu), GFP_KERNEL);
if (!mdev->dev.iommu) {
rc = -ENOMEM;
goto err_group;
}
mutex_init(&mdev->dev.iommu->lock);
mdev->dev.iommu->iommu_dev = &mock_iommu_device;
rc = device_add(&mdev->dev);
if (rc)
goto err_dev_iommu;
rc = iommu_group_add_device(iommu_group, &mdev->dev);
if (rc)
goto err_del;
iommu_group_put(iommu_group);
return mdev;
err_del:
device_del(&mdev->dev);
err_dev_iommu:
kfree(mdev->dev.iommu);
mdev->dev.iommu = NULL;
err_group:
iommu_group_put(iommu_group);
err_put:
put_device(&mdev->dev);
return ERR_PTR(rc);
}
static void mock_dev_destroy(struct mock_dev *mdev)
{
iommu_group_remove_device(&mdev->dev);
device_del(&mdev->dev);
kfree(mdev->dev.iommu);
mdev->dev.iommu = NULL;
put_device(&mdev->dev);
}
bool iommufd_selftest_is_mock_dev(struct device *dev)
{
return dev->release == mock_dev_release;
}
/* Create an hw_pagetable with the mock domain so we can test the domain ops */
static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
struct iommu_test_cmd *cmd)
{
static struct bus_type mock_bus = { .iommu_ops = &mock_ops };
struct iommufd_hw_pagetable *hwpt;
struct iommufd_device *idev;
struct selftest_obj *sobj;
struct iommufd_ioas *ioas;
u32 pt_id = cmd->id;
u32 idev_id;
int rc;
ioas = iommufd_get_ioas(ucmd, cmd->id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
if (IS_ERR(sobj)) {
rc = PTR_ERR(sobj);
goto out_ioas;
}
if (IS_ERR(sobj))
return PTR_ERR(sobj);
sobj->idev.ictx = ucmd->ictx;
sobj->type = TYPE_IDEV;
sobj->idev.mock_dev.bus = &mock_bus;
hwpt = iommufd_device_selftest_attach(ucmd->ictx, ioas,
&sobj->idev.mock_dev);
if (IS_ERR(hwpt)) {
rc = PTR_ERR(hwpt);
sobj->idev.mock_dev = mock_dev_create();
if (IS_ERR(sobj->idev.mock_dev)) {
rc = PTR_ERR(sobj->idev.mock_dev);
goto out_sobj;
}
sobj->idev.hwpt = hwpt;
/* Userspace must destroy both of these IDs to destroy the object */
cmd->mock_domain.out_hwpt_id = hwpt->obj.id;
cmd->mock_domain.out_device_id = sobj->obj.id;
idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
&idev_id);
if (IS_ERR(idev)) {
rc = PTR_ERR(idev);
goto out_mdev;
}
sobj->idev.idev = idev;
rc = iommufd_device_attach(idev, &pt_id);
if (rc)
goto out_unbind;
/* Userspace must destroy the device_id to destroy the object */
cmd->mock_domain.out_hwpt_id = pt_id;
cmd->mock_domain.out_stdev_id = sobj->obj.id;
iommufd_object_finalize(ucmd->ictx, &sobj->obj);
iommufd_put_object(&ioas->obj);
return iommufd_ucmd_respond(ucmd, sizeof(*cmd));
out_unbind:
iommufd_device_unbind(idev);
out_mdev:
mock_dev_destroy(sobj->idev.mock_dev);
out_sobj:
iommufd_object_abort(ucmd->ictx, &sobj->obj);
out_ioas:
iommufd_put_object(&ioas->obj);
return rc;
}
......@@ -322,7 +463,7 @@ static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
struct iommufd_ioas *ioas;
int rc;
ioas = iommufd_get_ioas(ucmd, mockpt_id);
ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
down_write(&ioas->iopt.iova_rwsem);
......@@ -339,10 +480,12 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
{
struct iommufd_hw_pagetable *hwpt;
struct mock_iommu_domain *mock;
uintptr_t end;
int rc;
if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
(uintptr_t)uptr % MOCK_IO_PAGE_SIZE)
(uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
return -EINVAL;
hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
......@@ -390,7 +533,10 @@ static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
void __user *uptr, size_t length,
unsigned int refs)
{
if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE)
uintptr_t end;
if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
return -EINVAL;
for (; length; length -= PAGE_SIZE) {
......@@ -554,6 +700,7 @@ static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
struct iommu_test_cmd *cmd = ucmd->cmd;
struct selftest_access *staccess;
struct iommufd_access *access;
u32 id;
int fdno;
int rc;
......@@ -571,15 +718,18 @@ static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
}
access = iommufd_access_create(
ucmd->ictx, ioas_id,
ucmd->ictx,
(flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
&selftest_access_ops_pin :
&selftest_access_ops,
staccess);
staccess, &id);
if (IS_ERR(access)) {
rc = PTR_ERR(access);
goto out_put_fdno;
}
rc = iommufd_access_attach(access, ioas_id);
if (rc)
goto out_destroy;
cmd->create_access.out_access_fd = fdno;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
if (rc)
......@@ -780,8 +930,9 @@ void iommufd_selftest_destroy(struct iommufd_object *obj)
switch (sobj->type) {
case TYPE_IDEV:
iommufd_device_selftest_detach(sobj->idev.ictx,
sobj->idev.hwpt);
iommufd_device_detach(sobj->idev.idev);
iommufd_device_unbind(sobj->idev.idev);
mock_dev_destroy(sobj->idev.mock_dev);
break;
}
}
......@@ -845,9 +996,11 @@ void __init iommufd_test_init(void)
{
dbgfs_root =
fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
WARN_ON(bus_register(&iommufd_mock_bus_type));
}
void iommufd_test_exit(void)
{
debugfs_remove_recursive(dbgfs_root);
bus_unregister(&iommufd_mock_bus_type);
}
......@@ -137,7 +137,7 @@ int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd)
return iommufd_ucmd_respond(ucmd, sizeof(*cmd));
case IOMMU_VFIO_IOAS_SET:
ioas = iommufd_get_ioas(ucmd, cmd->ioas_id);
ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
xa_lock(&ucmd->ictx->objects);
......
......@@ -32,13 +32,6 @@ int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
return 0;
}
/*
* If the driver doesn't provide this op then it means the device does
* not do DMA at all. So nothing to do.
*/
if (!vdev->ops->bind_iommufd)
return 0;
ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id);
if (ret)
return ret;
......@@ -119,7 +112,8 @@ EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
/*
* The emulated standard ops mean that vfio_device is going to use the
* "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
* ops set should call vfio_register_emulated_iommu_dev().
* ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
* not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
*/
static void vfio_emulated_unmap(void *data, unsigned long iova,
......@@ -127,7 +121,8 @@ static void vfio_emulated_unmap(void *data, unsigned long iova,
{
struct vfio_device *vdev = data;
vdev->ops->dma_unmap(vdev, iova, length);
if (vdev->ops->dma_unmap)
vdev->ops->dma_unmap(vdev, iova, length);
}
static const struct iommufd_access_ops vfio_user_ops = {
......@@ -138,10 +133,14 @@ static const struct iommufd_access_ops vfio_user_ops = {
int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
struct iommufd_ctx *ictx, u32 *out_device_id)
{
struct iommufd_access *user;
lockdep_assert_held(&vdev->dev_set->lock);
vdev->iommufd_ictx = ictx;
iommufd_ctx_get(ictx);
user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
if (IS_ERR(user))
return PTR_ERR(user);
vdev->iommufd_access = user;
return 0;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
......@@ -152,24 +151,24 @@ void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
if (vdev->iommufd_access) {
iommufd_access_destroy(vdev->iommufd_access);
vdev->iommufd_attached = false;
vdev->iommufd_access = NULL;
}
iommufd_ctx_put(vdev->iommufd_ictx);
vdev->iommufd_ictx = NULL;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
{
struct iommufd_access *user;
int rc;
lockdep_assert_held(&vdev->dev_set->lock);
user = iommufd_access_create(vdev->iommufd_ictx, *pt_id, &vfio_user_ops,
vdev);
if (IS_ERR(user))
return PTR_ERR(user);
vdev->iommufd_access = user;
if (vdev->iommufd_attached)
return -EBUSY;
rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
if (rc)
return rc;
vdev->iommufd_attached = true;
return 0;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
......@@ -255,8 +255,9 @@ static int __vfio_register_dev(struct vfio_device *device,
{
int ret;
if (WARN_ON(device->ops->bind_iommufd &&
(!device->ops->unbind_iommufd ||
if (WARN_ON(IS_ENABLED(CONFIG_IOMMUFD) &&
(!device->ops->bind_iommufd ||
!device->ops->unbind_iommufd ||
!device->ops->attach_ioas)))
return -EINVAL;
......
......@@ -40,9 +40,10 @@ enum {
};
struct iommufd_access *
iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id,
const struct iommufd_access_ops *ops, void *data);
iommufd_access_create(struct iommufd_ctx *ictx,
const struct iommufd_access_ops *ops, void *data, u32 *id);
void iommufd_access_destroy(struct iommufd_access *access);
int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
void iommufd_ctx_get(struct iommufd_ctx *ictx);
......
......@@ -60,7 +60,6 @@ struct vfio_device {
void (*put_kvm)(struct kvm *kvm);
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
struct iommufd_ctx *iommufd_ictx;
bool iommufd_attached;
#endif
};
......
......@@ -1374,6 +1374,9 @@ static const struct vfio_device_ops mbochs_dev_ops = {
.write = mbochs_write,
.ioctl = mbochs_ioctl,
.mmap = mbochs_mmap,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
};
static struct mdev_driver mbochs_driver = {
......
......@@ -663,6 +663,9 @@ static const struct vfio_device_ops mdpy_dev_ops = {
.write = mdpy_write,
.ioctl = mdpy_ioctl,
.mmap = mdpy_mmap,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
};
static struct mdev_driver mdpy_driver = {
......
......@@ -1269,6 +1269,9 @@ static const struct vfio_device_ops mtty_dev_ops = {
.read = mtty_read,
.write = mtty_write,
.ioctl = mtty_ioctl,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
};
static struct mdev_driver mtty_driver = {
......
......@@ -186,7 +186,8 @@ FIXTURE(iommufd_ioas)
{
int fd;
uint32_t ioas_id;
uint32_t domain_id;
uint32_t stdev_id;
uint32_t hwpt_id;
uint64_t base_iova;
};
......@@ -212,7 +213,8 @@ FIXTURE_SETUP(iommufd_ioas)
}
for (i = 0; i != variant->mock_domains; i++) {
test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_id);
test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
&self->hwpt_id);
self->base_iova = MOCK_APERTURE_START;
}
}
......@@ -249,8 +251,8 @@ TEST_F(iommufd_ioas, ioas_auto_destroy)
TEST_F(iommufd_ioas, ioas_destroy)
{
if (self->domain_id) {
/* IOAS cannot be freed while a domain is on it */
if (self->stdev_id) {
/* IOAS cannot be freed while a device has a HWPT using it */
EXPECT_ERRNO(EBUSY,
_test_ioctl_destroy(self->fd, self->ioas_id));
} else {
......@@ -259,11 +261,21 @@ TEST_F(iommufd_ioas, ioas_destroy)
}
}
TEST_F(iommufd_ioas, hwpt_attach)
{
/* Create a device attached directly to a hwpt */
if (self->stdev_id) {
test_cmd_mock_domain(self->hwpt_id, NULL, NULL);
} else {
test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
}
}
TEST_F(iommufd_ioas, ioas_area_destroy)
{
/* Adding an area does not change ability to destroy */
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
if (self->domain_id)
if (self->stdev_id)
EXPECT_ERRNO(EBUSY,
_test_ioctl_destroy(self->fd, self->ioas_id));
else
......@@ -382,7 +394,7 @@ TEST_F(iommufd_ioas, area_auto_iova)
for (i = 0; i != 10; i++) {
size_t length = PAGE_SIZE * (i + 1);
if (self->domain_id) {
if (self->stdev_id) {
test_ioctl_ioas_map(buffer, length, &iovas[i]);
} else {
test_ioctl_ioas_map((void *)(1UL << 31), length,
......@@ -418,7 +430,7 @@ TEST_F(iommufd_ioas, area_auto_iova)
ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
/* Allocate from an allowed region */
if (self->domain_id) {
if (self->stdev_id) {
ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
} else {
......@@ -525,7 +537,7 @@ TEST_F(iommufd_ioas, iova_ranges)
/* Range can be read */
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
EXPECT_EQ(1, ranges_cmd.num_iovas);
if (!self->domain_id) {
if (!self->stdev_id) {
EXPECT_EQ(0, ranges[0].start);
EXPECT_EQ(SIZE_MAX, ranges[0].last);
EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
......@@ -550,7 +562,7 @@ TEST_F(iommufd_ioas, iova_ranges)
&test_cmd));
ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
if (!self->domain_id) {
if (!self->stdev_id) {
EXPECT_EQ(2, ranges_cmd.num_iovas);
EXPECT_EQ(0, ranges[0].start);
EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
......@@ -565,7 +577,7 @@ TEST_F(iommufd_ioas, iova_ranges)
/* Buffer too small */
memset(ranges, 0, BUFFER_SIZE);
ranges_cmd.num_iovas = 1;
if (!self->domain_id) {
if (!self->stdev_id) {
EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
&ranges_cmd));
EXPECT_EQ(2, ranges_cmd.num_iovas);
......@@ -582,6 +594,40 @@ TEST_F(iommufd_ioas, iova_ranges)
EXPECT_EQ(0, ranges[1].last);
}
TEST_F(iommufd_ioas, access_domain_destory)
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_PAGES,
.access_pages = { .iova = self->base_iova + PAGE_SIZE,
.length = PAGE_SIZE},
};
size_t buf_size = 2 * HUGEPAGE_SIZE;
uint8_t *buf;
buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
0);
ASSERT_NE(MAP_FAILED, buf);
test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
test_cmd_create_access(self->ioas_id, &access_cmd.id,
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
ASSERT_EQ(0,
ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
/* Causes a complicated unpin across a huge page boundary */
if (self->stdev_id)
test_ioctl_destroy(self->stdev_id);
test_cmd_destroy_access_pages(
access_cmd.id, access_cmd.access_pages.out_access_pages_id);
test_cmd_destroy_access(access_cmd.id);
ASSERT_EQ(0, munmap(buf, buf_size));
}
TEST_F(iommufd_ioas, access_pin)
{
struct iommu_test_cmd access_cmd = {
......@@ -605,7 +651,7 @@ TEST_F(iommufd_ioas, access_pin)
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
uint32_t mock_device_id;
uint32_t mock_stdev_id;
uint32_t mock_hwpt_id;
access_cmd.access_pages.length = npages * PAGE_SIZE;
......@@ -637,15 +683,14 @@ TEST_F(iommufd_ioas, access_pin)
ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
test_cmd_mock_domain(self->ioas_id, &mock_device_id,
test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
&mock_hwpt_id);
check_map_cmd.id = mock_hwpt_id;
ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
&check_map_cmd));
test_ioctl_destroy(mock_device_id);
test_ioctl_destroy(mock_hwpt_id);
test_ioctl_destroy(mock_stdev_id);
test_cmd_destroy_access_pages(
access_cmd.id,
access_cmd.access_pages.out_access_pages_id);
......@@ -789,7 +834,7 @@ TEST_F(iommufd_ioas, fork_gone)
ASSERT_NE(-1, child);
ASSERT_EQ(child, waitpid(child, NULL, 0));
if (self->domain_id) {
if (self->stdev_id) {
/*
* If a domain already existed then everything was pinned within
* the fork, so this copies from one domain to another.
......@@ -988,8 +1033,8 @@ FIXTURE(iommufd_mock_domain)
{
int fd;
uint32_t ioas_id;
uint32_t domain_id;
uint32_t domain_ids[2];
uint32_t hwpt_id;
uint32_t hwpt_ids[2];
int mmap_flags;
size_t mmap_buf_size;
};
......@@ -1008,11 +1053,11 @@ FIXTURE_SETUP(iommufd_mock_domain)
ASSERT_NE(-1, self->fd);
test_ioctl_ioas_alloc(&self->ioas_id);
ASSERT_GE(ARRAY_SIZE(self->domain_ids), variant->mock_domains);
ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
for (i = 0; i != variant->mock_domains; i++)
test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_ids[i]);
self->domain_id = self->domain_ids[0];
test_cmd_mock_domain(self->ioas_id, NULL, &self->hwpt_ids[i]);
self->hwpt_id = self->hwpt_ids[0];
self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
self->mmap_buf_size = PAGE_SIZE * 8;
......@@ -1061,7 +1106,7 @@ FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
struct iommu_test_cmd check_map_cmd = { \
.size = sizeof(check_map_cmd), \
.op = IOMMU_TEST_OP_MD_CHECK_MAP, \
.id = self->domain_id, \
.id = self->hwpt_id, \
.check_map = { .iova = _iova, \
.length = _length, \
.uptr = (uintptr_t)(_ptr) }, \
......@@ -1070,8 +1115,8 @@ FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
ioctl(self->fd, \
_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
&check_map_cmd)); \
if (self->domain_ids[1]) { \
check_map_cmd.id = self->domain_ids[1]; \
if (self->hwpt_ids[1]) { \
check_map_cmd.id = self->hwpt_ids[1]; \
ASSERT_EQ(0, \
ioctl(self->fd, \
_IOMMU_TEST_CMD( \
......@@ -1197,15 +1242,15 @@ TEST_F(iommufd_mock_domain, all_aligns_copy)
for (; end < buf_size; end += MOCK_PAGE_SIZE) {
size_t length = end - start;
unsigned int old_id;
uint32_t mock_device_id;
uint32_t mock_stdev_id;
__u64 iova;
test_ioctl_ioas_map(buf + start, length, &iova);
/* Add and destroy a domain while the area exists */
old_id = self->domain_ids[1];
test_cmd_mock_domain(self->ioas_id, &mock_device_id,
&self->domain_ids[1]);
old_id = self->hwpt_ids[1];
test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
&self->hwpt_ids[1]);
check_mock_iova(buf + start, iova, length);
check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
......@@ -1213,9 +1258,8 @@ TEST_F(iommufd_mock_domain, all_aligns_copy)
start / PAGE_SIZE * PAGE_SIZE,
1);
test_ioctl_destroy(mock_device_id);
test_ioctl_destroy(self->domain_ids[1]);
self->domain_ids[1] = old_id;
test_ioctl_destroy(mock_stdev_id);
self->hwpt_ids[1] = old_id;
test_ioctl_ioas_unmap(iova, length);
}
......
......@@ -297,7 +297,7 @@ TEST_FAIL_NTH(basic_fail_nth, basic)
TEST_FAIL_NTH(basic_fail_nth, map_domain)
{
uint32_t ioas_id;
__u32 device_id;
__u32 stdev_id;
__u32 hwpt_id;
__u64 iova;
......@@ -313,7 +313,7 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain)
fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
......@@ -321,12 +321,10 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain)
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_destroy(self->fd, device_id))
return -1;
if (_test_ioctl_destroy(self->fd, hwpt_id))
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
return 0;
}
......@@ -334,8 +332,8 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain)
TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
{
uint32_t ioas_id;
__u32 device_id2;
__u32 device_id;
__u32 stdev_id2;
__u32 stdev_id;
__u32 hwpt_id2;
__u32 hwpt_id;
__u64 iova;
......@@ -350,12 +348,12 @@ TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2))
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
......@@ -363,19 +361,15 @@ TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_destroy(self->fd, device_id))
return -1;
if (_test_ioctl_destroy(self->fd, hwpt_id))
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
if (_test_ioctl_destroy(self->fd, device_id2))
return -1;
if (_test_ioctl_destroy(self->fd, hwpt_id2))
if (_test_ioctl_destroy(self->fd, stdev_id2))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2))
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2))
return -1;
return 0;
}
......@@ -518,7 +512,7 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
{
uint32_t access_pages_id;
uint32_t ioas_id;
__u32 device_id;
__u32 stdev_id;
__u32 hwpt_id;
__u64 iova;
......@@ -532,7 +526,7 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
......@@ -570,9 +564,7 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
return -1;
self->access_id = 0;
if (_test_ioctl_destroy(self->fd, device_id))
return -1;
if (_test_ioctl_destroy(self->fd, hwpt_id))
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
return 0;
}
......
......@@ -38,7 +38,7 @@ static unsigned long BUFFER_SIZE;
&test_cmd)); \
})
static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *device_id,
static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
__u32 *hwpt_id)
{
struct iommu_test_cmd cmd = {
......@@ -52,19 +52,19 @@ static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *device_id,
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
if (ret)
return ret;
if (device_id)
*device_id = cmd.mock_domain.out_device_id;
if (stdev_id)
*stdev_id = cmd.mock_domain.out_stdev_id;
assert(cmd.id != 0);
if (hwpt_id)
*hwpt_id = cmd.mock_domain.out_hwpt_id;
return 0;
}
#define test_cmd_mock_domain(ioas_id, device_id, hwpt_id) \
ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, device_id, \
hwpt_id))
#define test_err_mock_domain(_errno, ioas_id, device_id, hwpt_id) \
#define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id) \
ASSERT_EQ(0, \
_test_cmd_mock_domain(self->fd, ioas_id, stdev_id, hwpt_id))
#define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
device_id, hwpt_id))
stdev_id, hwpt_id))
static int _test_cmd_create_access(int fd, unsigned int ioas_id,
__u32 *access_id, unsigned int flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment