Commit 395f9d89 authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge patch series "Move group specific code into group.c"

Yi Liu <yi.l.liu@intel.com> says:

==================
With the introduction of iommufd, VFIO is towarding to provide device
centric uAPI after adapting to iommufd. With this trend, existing VFIO
group infrastructure is optional once VFIO converted to device centric.

This series moves the group specific code out of vfio_main.c, prepares
for compiling group infrastructure out after adding vfio device cdev[2]

[2] https://github.com/yiliu1765/iommufd/tree/wip/vfio_device_cdev
==================

Link: https://lore.kernel.org/all/20221201145535.589687-1-yi.l.liu@intel.com/Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parents 90337f52 9eefba80
......@@ -4,6 +4,7 @@ vfio_virqfd-y := virqfd.o
obj-$(CONFIG_VFIO) += vfio.o
vfio-y += vfio_main.o \
group.o \
iova_bitmap.o
vfio-$(CONFIG_IOMMUFD) += iommufd.o
vfio-$(CONFIG_VFIO_CONTAINER) += container.o
......
......@@ -540,10 +540,12 @@ void vfio_group_unuse_container(struct vfio_group *group)
fput(group->opened_file);
}
int vfio_container_pin_pages(struct vfio_container *container,
struct iommu_group *iommu_group, dma_addr_t iova,
int npage, int prot, struct page **pages)
int vfio_device_container_pin_pages(struct vfio_device *device,
dma_addr_t iova, int npage,
int prot, struct page **pages)
{
struct vfio_container *container = device->group->container;
struct iommu_group *iommu_group = device->group->iommu_group;
struct vfio_iommu_driver *driver = container->iommu_driver;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
......@@ -555,9 +557,11 @@ int vfio_container_pin_pages(struct vfio_container *container,
npage, prot, pages);
}
void vfio_container_unpin_pages(struct vfio_container *container,
dma_addr_t iova, int npage)
void vfio_device_container_unpin_pages(struct vfio_device *device,
dma_addr_t iova, int npage)
{
struct vfio_container *container = device->group->container;
if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
return;
......@@ -565,9 +569,11 @@ void vfio_container_unpin_pages(struct vfio_container *container,
npage);
}
int vfio_container_dma_rw(struct vfio_container *container, dma_addr_t iova,
void *data, size_t len, bool write)
int vfio_device_container_dma_rw(struct vfio_device *device,
dma_addr_t iova, void *data,
size_t len, bool write)
{
struct vfio_container *container = device->group->container;
struct vfio_iommu_driver *driver = container->iommu_driver;
if (unlikely(!driver || !driver->ops->dma_rw))
......
This diff is collapsed.
......@@ -6,6 +6,7 @@
#ifndef __VFIO_VFIO_H__
#define __VFIO_VFIO_H__
#include <linux/file.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/module.h>
......@@ -15,6 +16,15 @@ struct iommu_group;
struct vfio_device;
struct vfio_container;
void vfio_device_put_registration(struct vfio_device *device);
bool vfio_device_try_get_registration(struct vfio_device *device);
int vfio_device_open(struct vfio_device *device,
struct iommufd_ctx *iommufd, struct kvm *kvm);
void vfio_device_close(struct vfio_device *device,
struct iommufd_ctx *iommufd);
extern const struct file_operations vfio_device_fops;
enum vfio_group_type {
/*
* Physical device with IOMMU backing.
......@@ -66,6 +76,18 @@ struct vfio_group {
struct iommufd_ctx *iommufd;
};
int vfio_device_set_group(struct vfio_device *device,
enum vfio_group_type type);
void vfio_device_remove_group(struct vfio_device *device);
void vfio_device_group_register(struct vfio_device *device);
void vfio_device_group_unregister(struct vfio_device *device);
int vfio_device_group_use_iommu(struct vfio_device *device);
void vfio_device_group_unuse_iommu(struct vfio_device *device);
void vfio_device_group_close(struct vfio_device *device);
bool vfio_device_has_container(struct vfio_device *device);
int __init vfio_group_init(void);
void vfio_group_cleanup(void);
#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
/* events for the backend driver notify callback */
enum vfio_iommu_notify_type {
......@@ -122,13 +144,14 @@ int vfio_container_attach_group(struct vfio_container *container,
void vfio_group_detach_container(struct vfio_group *group);
void vfio_device_container_register(struct vfio_device *device);
void vfio_device_container_unregister(struct vfio_device *device);
int vfio_container_pin_pages(struct vfio_container *container,
struct iommu_group *iommu_group, dma_addr_t iova,
int npage, int prot, struct page **pages);
void vfio_container_unpin_pages(struct vfio_container *container,
dma_addr_t iova, int npage);
int vfio_container_dma_rw(struct vfio_container *container, dma_addr_t iova,
void *data, size_t len, bool write);
int vfio_device_container_pin_pages(struct vfio_device *device,
dma_addr_t iova, int npage,
int prot, struct page **pages);
void vfio_device_container_unpin_pages(struct vfio_device *device,
dma_addr_t iova, int npage);
int vfio_device_container_dma_rw(struct vfio_device *device,
dma_addr_t iova, void *data,
size_t len, bool write);
int __init vfio_container_init(void);
void vfio_container_cleanup(void);
......@@ -166,22 +189,21 @@ static inline void vfio_device_container_unregister(struct vfio_device *device)
{
}
static inline int vfio_container_pin_pages(struct vfio_container *container,
struct iommu_group *iommu_group,
dma_addr_t iova, int npage, int prot,
struct page **pages)
static inline int vfio_device_container_pin_pages(struct vfio_device *device,
dma_addr_t iova, int npage,
int prot, struct page **pages)
{
return -EOPNOTSUPP;
}
static inline void vfio_container_unpin_pages(struct vfio_container *container,
dma_addr_t iova, int npage)
static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
dma_addr_t iova, int npage)
{
}
static inline int vfio_container_dma_rw(struct vfio_container *container,
dma_addr_t iova, void *data, size_t len,
bool write)
static inline int vfio_device_container_dma_rw(struct vfio_device *device,
dma_addr_t iova, void *data,
size_t len, bool write)
{
return -EOPNOTSUPP;
}
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment