Commit ffc17596 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:

 - new vdpa features to allow creation and deletion of new devices

 - virtio-blk support per-device queue depth

 - fixes, cleanups all over the place

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (31 commits)
  virtio-input: add multi-touch support
  virtio_mmio: fix one typo
  vdpa/mlx5: fix param validation in mlx5_vdpa_get_config()
  virtio_net: Fix fall-through warnings for Clang
  virtio_input: Prevent EV_MSC/MSC_TIMESTAMP loop storm for MT.
  virtio-blk: support per-device queue depth
  virtio_vdpa: don't warn when fail to disable vq
  virtio-pci: introduce modern device module
  virito-pci-modern: rename map_capability() to vp_modern_map_capability()
  virtio-pci-modern: introduce helper to get notification offset
  virtio-pci-modern: introduce helper for getting queue nums
  virtio-pci-modern: introduce helper for setting/geting queue size
  virtio-pci-modern: introduce helper to set/get queue_enable
  virtio-pci-modern: introduce vp_modern_queue_address()
  virtio-pci-modern: introduce vp_modern_set_queue_vector()
  virtio-pci-modern: introduce vp_modern_generation()
  virtio-pci-modern: introduce helpers for setting and getting features
  virtio-pci-modern: introduce helpers for setting and getting status
  virtio-pci-modern: introduce helper to set config vector
  virtio-pci-modern: introduce vp_modern_remove()
  ...
parents a6525b99 16c10bed
......@@ -705,6 +705,7 @@ static int virtblk_probe(struct virtio_device *vdev)
u32 v, blk_size, max_size, sg_elems, opt_io_size;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
......@@ -756,16 +757,18 @@ static int virtblk_probe(struct virtio_device *vdev)
}
/* Default queue sizing is to fill the ring. */
if (!virtblk_queue_depth) {
virtblk_queue_depth = vblk->vqs[0].vq->num_free;
if (likely(!virtblk_queue_depth)) {
queue_depth = vblk->vqs[0].vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
virtblk_queue_depth /= 2;
queue_depth /= 2;
} else {
queue_depth = virtblk_queue_depth;
}
memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = virtblk_queue_depth;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
......
......@@ -729,6 +729,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
case XDP_DROP:
goto err_xdp;
}
......
# SPDX-License-Identifier: GPL-2.0-only
menuconfig VDPA
tristate "vDPA drivers"
depends on NET
help
Enable this module to support vDPA device that uses a
datapath which complies with virtio specifications with
......
......@@ -432,7 +432,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
dev, &ifc_vdpa_ops,
IFCVF_MAX_QUEUE_PAIRS * 2);
IFCVF_MAX_QUEUE_PAIRS * 2, NULL);
if (adapter == NULL) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return -ENOMEM;
......
......@@ -1820,7 +1820,7 @@ static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
if (offset + len < sizeof(struct virtio_net_config))
if (offset + len <= sizeof(struct virtio_net_config))
memcpy(buf, (u8 *)&ndev->config + offset, len);
}
......@@ -1982,7 +1982,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
2 * mlx5_vdpa_max_qps(max_vqs));
2 * mlx5_vdpa_max_qps(max_vqs), NULL);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
......
This diff is collapsed.
......@@ -235,7 +235,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
ops = &vdpasim_config_ops;
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
dev_attr->nvqs);
dev_attr->nvqs, dev_attr->name);
if (!vdpasim)
goto err_alloc;
......@@ -249,6 +249,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
goto err_iommu;
set_dma_ops(dev, &vdpasim_dma_ops);
vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
if (!vdpasim->config)
......
......@@ -33,6 +33,8 @@ struct vdpasim_virtqueue {
};
struct vdpasim_dev_attr {
struct vdpa_mgmt_dev *mgmt_dev;
const char *name;
u64 supported_features;
size_t config_size;
size_t buffer_size;
......
......@@ -33,9 +33,7 @@ static char *macaddr;
module_param(macaddr, charp, 0);
MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
u8 macaddr_buf[ETH_ALEN];
static struct vdpasim *vdpasim_net_dev;
static u8 macaddr_buf[ETH_ALEN];
static void vdpasim_net_work(struct work_struct *work)
{
......@@ -120,21 +118,23 @@ static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
memcpy(net_config->mac, macaddr_buf, ETH_ALEN);
}
static int __init vdpasim_net_init(void)
static void vdpasim_net_mgmtdev_release(struct device *dev)
{
}
static struct device vdpasim_net_mgmtdev = {
.init_name = "vdpasim_net",
.release = vdpasim_net_mgmtdev_release,
};
static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
{
struct vdpasim_dev_attr dev_attr = {};
struct vdpasim *simdev;
int ret;
if (macaddr) {
mac_pton(macaddr, macaddr_buf);
if (!is_valid_ether_addr(macaddr_buf)) {
ret = -EADDRNOTAVAIL;
goto out;
}
} else {
eth_random_addr(macaddr_buf);
}
dev_attr.mgmt_dev = mdev;
dev_attr.name = name;
dev_attr.id = VIRTIO_ID_NET;
dev_attr.supported_features = VDPASIM_NET_FEATURES;
dev_attr.nvqs = VDPASIM_NET_VQ_NUM;
......@@ -143,29 +143,75 @@ static int __init vdpasim_net_init(void)
dev_attr.work_fn = vdpasim_net_work;
dev_attr.buffer_size = PAGE_SIZE;
vdpasim_net_dev = vdpasim_create(&dev_attr);
if (IS_ERR(vdpasim_net_dev)) {
ret = PTR_ERR(vdpasim_net_dev);
goto out;
simdev = vdpasim_create(&dev_attr);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
ret = _vdpa_register_device(&simdev->vdpa);
if (ret)
goto reg_err;
return 0;
reg_err:
put_device(&simdev->vdpa.dev);
return ret;
}
static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev,
struct vdpa_device *dev)
{
struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa);
_vdpa_unregister_device(&simdev->vdpa);
}
static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = {
.dev_add = vdpasim_net_dev_add,
.dev_del = vdpasim_net_dev_del
};
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static struct vdpa_mgmt_dev mgmt_dev = {
.device = &vdpasim_net_mgmtdev,
.id_table = id_table,
.ops = &vdpasim_net_mgmtdev_ops,
};
static int __init vdpasim_net_init(void)
{
int ret;
if (macaddr) {
mac_pton(macaddr, macaddr_buf);
if (!is_valid_ether_addr(macaddr_buf))
return -EADDRNOTAVAIL;
} else {
eth_random_addr(macaddr_buf);
}
ret = vdpa_register_device(&vdpasim_net_dev->vdpa);
ret = device_register(&vdpasim_net_mgmtdev);
if (ret)
goto put_dev;
return ret;
ret = vdpa_mgmtdev_register(&mgmt_dev);
if (ret)
goto parent_err;
return 0;
put_dev:
put_device(&vdpasim_net_dev->vdpa.dev);
out:
parent_err:
device_unregister(&vdpasim_net_mgmtdev);
return ret;
}
static void __exit vdpasim_net_exit(void)
{
struct vdpa_device *vdpa = &vdpasim_net_dev->vdpa;
vdpa_unregister_device(vdpa);
vdpa_mgmtdev_unregister(&mgmt_dev);
device_unregister(&vdpasim_net_mgmtdev);
}
module_init(vdpasim_net_init);
......
......@@ -1814,12 +1814,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
struct vhost_virtqueue **vqs;
int r = -ENOMEM, i;
vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
if (!vs) {
vs = vzalloc(sizeof(*vs));
vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
goto err_vs;
}
vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
if (!vqs)
......
......@@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
This option is selected if the architecture may need to enforce
VIRTIO_F_ACCESS_PLATFORM
config VIRTIO_PCI_LIB
tristate
help
Modern PCI device implementation. This module implements the
basic probe and control for devices which are based on modern
PCI device with possible vendor specific extensions. Any
module that selects this module must depend on PCI.
menuconfig VIRTIO_MENU
bool "Virtio drivers"
default y
......@@ -21,6 +29,7 @@ if VIRTIO_MENU
config VIRTIO_PCI
tristate "PCI driver for virtio devices"
depends on PCI
select VIRTIO_PCI_LIB
select VIRTIO
help
This driver provides support for virtio based paravirtual device
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio_pci_modern_dev.o
obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
......
......@@ -7,6 +7,7 @@
#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/virtio_input.h>
#include <linux/input/mt.h>
struct virtio_input {
struct virtio_device *vdev;
......@@ -64,6 +65,21 @@ static int virtinput_send_status(struct virtio_input *vi,
unsigned long flags;
int rc;
/*
* Since 29cc309d8bf1 (HID: hid-multitouch: forward MSC_TIMESTAMP),
* EV_MSC/MSC_TIMESTAMP is added to each before EV_SYN event.
* EV_MSC is configured as INPUT_PASS_TO_ALL.
* In case of touch device:
* BE pass EV_MSC/MSC_TIMESTAMP to FE on receiving event from evdev.
* FE pass EV_MSC/MSC_TIMESTAMP back to BE.
* BE writes EV_MSC/MSC_TIMESTAMP to evdev due to INPUT_PASS_TO_ALL.
* BE receives extra EV_MSC/MSC_TIMESTAMP and pass to FE.
* >>> Each new frame becomes larger and larger.
* Disable EV_MSC/MSC_TIMESTAMP forwarding for MT.
*/
if (vi->idev->mt && type == EV_MSC && code == MSC_TIMESTAMP)
return 0;
stsbuf = kzalloc(sizeof(*stsbuf), GFP_ATOMIC);
if (!stsbuf)
return -ENOMEM;
......@@ -204,7 +220,7 @@ static int virtinput_probe(struct virtio_device *vdev)
struct virtio_input *vi;
unsigned long flags;
size_t size;
int abs, err;
int abs, err, nslots;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
......@@ -289,6 +305,13 @@ static int virtinput_probe(struct virtio_device *vdev)
continue;
virtinput_cfg_abs(vi, abs);
}
if (test_bit(ABS_MT_SLOT, vi->idev->absbit)) {
nslots = input_abs_get_max(vi->idev, ABS_MT_SLOT) + 1;
err = input_mt_init_slots(vi->idev, nslots, 0);
if (err)
goto err_mt_init_slots;
}
}
virtio_device_ready(vdev);
......@@ -304,6 +327,7 @@ static int virtinput_probe(struct virtio_device *vdev)
spin_lock_irqsave(&vi->lock, flags);
vi->ready = false;
spin_unlock_irqrestore(&vi->lock, flags);
err_mt_init_slots:
input_free_device(vi->idev);
err_input_alloc:
vdev->config->del_vqs(vdev);
......
......@@ -2577,7 +2577,7 @@ static int virtio_mem_probe(struct virtio_device *vdev)
* actually in use (e.g., trying to reload the driver).
*/
if (vm->plugged_size) {
vm->unplug_all_required = 1;
vm->unplug_all_required = true;
dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
}
......
......@@ -126,7 +126,7 @@ static int vm_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
/* Make sure there is are no mixed devices */
/* Make sure there are no mixed devices */
if (vm_dev->version == 2 &&
!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
......
......@@ -25,6 +25,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
#include <linux/virtio_pci_modern.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
......@@ -43,31 +44,12 @@ struct virtio_pci_vq_info {
struct virtio_pci_device {
struct virtio_device vdev;
struct pci_dev *pci_dev;
struct virtio_pci_modern_device mdev;
/* In legacy mode, these two point to within ->legacy. */
/* Where to read and clear interrupt */
u8 __iomem *isr;
/* Modern only fields */
/* The IO mapping for the PCI config space (non-legacy mode) */
struct virtio_pci_common_cfg __iomem *common;
/* Device-specific data (non-legacy mode) */
void __iomem *device;
/* Base of vq notifications (non-legacy mode). */
void __iomem *notify_base;
/* So we can sanity-check accesses. */
size_t notify_len;
size_t device_len;
/* Capability for when we need to map notifications per-vq. */
int notify_map_cap;
/* Multiply queue_notify_off by this value. (non-legacy mode). */
u32 notify_offset_multiplier;
int modern_bars;
/* Legacy only field */
/* the IO mapping for the PCI config space */
void __iomem *ioaddr;
......
This diff is collapsed.
This diff is collapsed.
......@@ -225,9 +225,8 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq)
list_del(&info->node);
spin_unlock_irqrestore(&vd_dev->lock, flags);
/* Select and deactivate the queue */
/* Select and deactivate the queue (best effort) */
ops->set_vq_ready(vdpa, index, 0);
WARN_ON(ops->get_vq_ready(vdpa, index));
vring_del_virtqueue(vq);
......
......@@ -35,6 +35,8 @@ struct vdpa_vq_state {
u16 avail_index;
};
struct vdpa_mgmt_dev;
/**
* vDPA device - representation of a vDPA device
* @dev: underlying device
......@@ -43,6 +45,8 @@ struct vdpa_vq_state {
* @index: device index
* @features_valid: were features initialized? for legacy guests
* @nvqs: maximum number of supported virtqueues
* @mdev: management device pointer; caller must setup when registering device as part
* of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
*/
struct vdpa_device {
struct device dev;
......@@ -51,6 +55,7 @@ struct vdpa_device {
unsigned int index;
bool features_valid;
int nvqs;
struct vdpa_mgmt_dev *mdev;
};
/**
......@@ -245,20 +250,22 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
int nvqs,
size_t size);
int nvqs, size_t size, const char *name);
#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \
#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name) \
container_of(__vdpa_alloc_device( \
parent, config, nvqs, \
sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
dev_struct, member))), \
dev_struct, member)), name), \
dev_struct, member)
int vdpa_register_device(struct vdpa_device *vdev);
void vdpa_unregister_device(struct vdpa_device *vdev);
int _vdpa_register_device(struct vdpa_device *vdev);
void _vdpa_unregister_device(struct vdpa_device *vdev);
/**
* vdpa_driver - operations for a vDPA driver
* @driver: underlying device driver
......@@ -336,4 +343,33 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset,
ops->get_config(vdev, offset, buf, len);
}
/**
* vdpa_mgmtdev_ops - vdpa device ops
* @dev_add: Add a vdpa device using alloc and register
* @mdev: parent device to use for device addition
* @name: name of the new vdpa device
* Driver need to add a new device using _vdpa_register_device()
* after fully initializing the vdpa device. Driver must return 0
* on success or appropriate error code.
* @dev_del: Remove a vdpa device using unregister
* @mdev: parent device to use for device removal
* @dev: vdpa device to remove
* Driver need to remove the specified device by calling
* _vdpa_unregister_device().
*/
struct vdpa_mgmtdev_ops {
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name);
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
};
struct vdpa_mgmt_dev {
struct device *device;
const struct vdpa_mgmtdev_ops *ops;
const struct virtio_device_id *id_table; /* supported ids */
struct list_head list;
};
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
#endif /* _LINUX_VDPA_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_VIRTIO_PCI_MODERN_H
#define _LINUX_VIRTIO_PCI_MODERN_H
#include <linux/pci.h>
#include <linux/virtio_pci.h>
struct virtio_pci_modern_device {
struct pci_dev *pci_dev;
struct virtio_pci_common_cfg __iomem *common;
/* Device-specific data (non-legacy mode) */
void __iomem *device;
/* Base of vq notifications (non-legacy mode). */
void __iomem *notify_base;
/* Where to read and clear interrupt */
u8 __iomem *isr;
/* So we can sanity-check accesses. */
size_t notify_len;
size_t device_len;
/* Capability for when we need to map notifications per-vq. */
int notify_map_cap;
/* Multiply queue_notify_off by this value. (non-legacy mode). */
u32 notify_offset_multiplier;
int modern_bars;
struct virtio_device_id id;
};
/*
* Type-safe wrappers for io accesses.
* Use these to enforce at compile time the following spec requirement:
*
* The driver MUST access each field using the “natural” access
* method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
* for 16-bit fields and 8-bit accesses for 8-bit fields.
*/
static inline u8 vp_ioread8(const u8 __iomem *addr)
{
return ioread8(addr);
}
static inline u16 vp_ioread16 (const __le16 __iomem *addr)
{
return ioread16(addr);
}
static inline u32 vp_ioread32(const __le32 __iomem *addr)
{
return ioread32(addr);
}
static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
{
iowrite8(value, addr);
}
static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
{
iowrite16(value, addr);
}
static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
{
iowrite32(value, addr);
}
static inline void vp_iowrite64_twopart(u64 val,
__le32 __iomem *lo,
__le32 __iomem *hi)
{
vp_iowrite32((u32)val, lo);
vp_iowrite32(val >> 32, hi);
}
u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
u64 features);
u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
u8 status);
u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
u16 idx, u16 vector);
u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
u16 vector);
void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
u16 index, u64 desc_addr, u64 driver_addr,
u64 device_addr);
void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
u16 idx, bool enable);
bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
u16 idx);
void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
u16 idx, u16 size);
u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
u16 idx);
u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
u16 idx);
void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
size_t minlen,
u32 align,
u32 start, u32 size,
size_t *len);
int vp_modern_probe(struct virtio_pci_modern_device *mdev);
void vp_modern_remove(struct virtio_pci_modern_device *mdev);
#endif
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* vdpa device management interface
* Copyright (c) 2020 Mellanox Technologies Ltd. All rights reserved.
*/
#ifndef _UAPI_LINUX_VDPA_H_
#define _UAPI_LINUX_VDPA_H_
#define VDPA_GENL_NAME "vdpa"
#define VDPA_GENL_VERSION 0x1
enum vdpa_command {
VDPA_CMD_UNSPEC,
VDPA_CMD_MGMTDEV_NEW,
VDPA_CMD_MGMTDEV_GET, /* can dump */
VDPA_CMD_DEV_NEW,
VDPA_CMD_DEV_DEL,
VDPA_CMD_DEV_GET, /* can dump */
};
enum vdpa_attr {
VDPA_ATTR_UNSPEC,
/* bus name (optional) + dev name together make the parent device handle */
VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */
VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */
VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, /* u64 */
VDPA_ATTR_DEV_NAME, /* string */
VDPA_ATTR_DEV_ID, /* u32 */
VDPA_ATTR_DEV_VENDOR_ID, /* u32 */
VDPA_ATTR_DEV_MAX_VQS, /* u32 */
VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */
/* new attributes must be added above here */
VDPA_ATTR_MAX,
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment