Commit 94e95d58 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes from Michael Tsirkin:
 "Fixes in virtio, vhost, and vdpa drivers"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vdpa/mlx5: Fix queue type selection logic
  vdpa/mlx5: Avoid destroying MR on empty iotlb
  tools/virtio: fix build
  virtio_ring: pull in spinlock header
  vringh: pull in spinlock header
  virtio-blk: Add validation for block size in config space
  vringh: Use wiov->used to check for read/write desc order
  virtio_vdpa: reject invalid vq indices
  vdpa: Add documentation for vdpa_alloc_device() macro
  vDPA/ifcvf: Fix return value check for vdpa_alloc_device()
  vp_vdpa: Fix return value check for vdpa_alloc_device()
  vdpa_sim: Fix return value check for vdpa_alloc_device()
  vhost: Fix the calculation in vhost_overflow()
  vhost-vdpa: Fix integer overflow in vhost_vdpa_process_iotlb_update()
  virtio_pci: Support surprise removal of virtio pci device
  virtio: Protect vqs list access
  virtio: Keep vring_del_virtqueue() mirror of VQ create
  virtio: Improve vq->broken access to avoid any compiler optimization
parents 7c60610d 879753c8
...@@ -692,6 +692,28 @@ static const struct blk_mq_ops virtio_mq_ops = { ...@@ -692,6 +692,28 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth; static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
static int virtblk_validate(struct virtio_device *vdev)
{
u32 blk_size;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
return 0;
blk_size = virtio_cread32(vdev,
offsetof(struct virtio_blk_config, blk_size));
if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
__virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
return 0;
}
static int virtblk_probe(struct virtio_device *vdev) static int virtblk_probe(struct virtio_device *vdev)
{ {
struct virtio_blk *vblk; struct virtio_blk *vblk;
...@@ -703,12 +725,6 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -703,12 +725,6 @@ static int virtblk_probe(struct virtio_device *vdev)
u8 physical_block_exp, alignment_offset; u8 physical_block_exp, alignment_offset;
unsigned int queue_depth; unsigned int queue_depth;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
GFP_KERNEL); GFP_KERNEL);
if (err < 0) if (err < 0)
...@@ -823,6 +839,14 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -823,6 +839,14 @@ static int virtblk_probe(struct virtio_device *vdev)
else else
blk_size = queue_logical_block_size(q); blk_size = queue_logical_block_size(q);
if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
dev_err(&vdev->dev,
"block size is changed unexpectedly, now is %u\n",
blk_size);
err = -EINVAL;
goto err_cleanup_disk;
}
/* Use topology information if available */ /* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp, struct virtio_blk_config, physical_block_exp,
...@@ -881,6 +905,8 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -881,6 +905,8 @@ static int virtblk_probe(struct virtio_device *vdev)
device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
return 0; return 0;
err_cleanup_disk:
blk_cleanup_disk(vblk->disk);
out_free_tags: out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set); blk_mq_free_tag_set(&vblk->tag_set);
out_free_vq: out_free_vq:
...@@ -983,6 +1009,7 @@ static struct virtio_driver virtio_blk = { ...@@ -983,6 +1009,7 @@ static struct virtio_driver virtio_blk = {
.driver.name = KBUILD_MODNAME, .driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE, .driver.owner = THIS_MODULE,
.id_table = id_table, .id_table = id_table,
.validate = virtblk_validate,
.probe = virtblk_probe, .probe = virtblk_probe,
.remove = virtblk_remove, .remove = virtblk_remove,
.config_changed = virtblk_config_changed, .config_changed = virtblk_config_changed,
......
...@@ -493,9 +493,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -493,9 +493,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
dev, &ifc_vdpa_ops, NULL); dev, &ifc_vdpa_ops, NULL);
if (adapter == NULL) { if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return -ENOMEM; return PTR_ERR(adapter);
} }
pci_set_master(pdev); pci_set_master(pdev);
......
...@@ -512,11 +512,6 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) ...@@ -512,11 +512,6 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
mutex_unlock(&mr->mkey_mtx); mutex_unlock(&mr->mkey_mtx);
} }
static bool map_empty(struct vhost_iotlb *iotlb)
{
return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
}
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map) bool *change_map)
{ {
...@@ -524,10 +519,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io ...@@ -524,10 +519,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
int err = 0; int err = 0;
*change_map = false; *change_map = false;
if (map_empty(iotlb)) {
mlx5_vdpa_destroy_mr(mvdev);
return 0;
}
mutex_lock(&mr->mkey_mtx); mutex_lock(&mr->mkey_mtx);
if (mr->initialized) { if (mr->initialized) {
mlx5_vdpa_info(mvdev, "memory map update\n"); mlx5_vdpa_info(mvdev, "memory map update\n");
......
...@@ -752,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev) ...@@ -752,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev)
type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
/* prefer split queue */ /* prefer split queue */
if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED) if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)
return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED; return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)); WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED));
return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT; return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
} }
static bool vq_is_tx(u16 idx) static bool vq_is_tx(u16 idx)
...@@ -2029,6 +2029,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) ...@@ -2029,6 +2029,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
return -ENOSPC; return -ENOSPC;
mdev = mgtdev->madev->mdev; mdev = mgtdev->madev->mdev;
if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
dev_warn(mdev->device, "missing support for split virtqueues\n");
return -EOPNOTSUPP;
}
/* we save one virtqueue for control virtqueue should we require it */ /* we save one virtqueue for control virtqueue should we require it */
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues); max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
......
...@@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) ...@@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
dev_attr->name); dev_attr->name);
if (!vdpasim) if (IS_ERR(vdpasim)) {
ret = PTR_ERR(vdpasim);
goto err_alloc; goto err_alloc;
}
vdpasim->dev_attr = *dev_attr; vdpasim->dev_attr = *dev_attr;
INIT_WORK(&vdpasim->work, dev_attr->work_fn); INIT_WORK(&vdpasim->work, dev_attr->work_fn);
......
...@@ -436,9 +436,9 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -436,9 +436,9 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
dev, &vp_vdpa_ops, NULL); dev, &vp_vdpa_ops, NULL);
if (vp_vdpa == NULL) { if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n"); dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return -ENOMEM; return PTR_ERR(vp_vdpa);
} }
mdev = &vp_vdpa->mdev; mdev = &vp_vdpa->mdev;
......
...@@ -614,7 +614,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, ...@@ -614,7 +614,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
long pinned; long pinned;
int ret = 0; int ret = 0;
if (msg->iova < v->range.first || if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 ||
msg->iova + msg->size - 1 > v->range.last) msg->iova + msg->size - 1 > v->range.last)
return -EINVAL; return -EINVAL;
......
...@@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz) ...@@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
} }
/* Make sure 64 bit math will not overflow. */
static bool vhost_overflow(u64 uaddr, u64 size) static bool vhost_overflow(u64 uaddr, u64 size)
{ {
/* Make sure 64 bit math will not overflow. */ if (uaddr > ULONG_MAX || size > ULONG_MAX)
return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size; return true;
if (!size)
return false;
return uaddr > ULONG_MAX - size + 1;
} }
/* Caller should have vq mutex and device mutex. */ /* Caller should have vq mutex and device mutex. */
......
...@@ -359,7 +359,7 @@ __vringh_iov(struct vringh *vrh, u16 i, ...@@ -359,7 +359,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
iov = wiov; iov = wiov;
else { else {
iov = riov; iov = riov;
if (unlikely(wiov && wiov->i)) { if (unlikely(wiov && wiov->used)) {
vringh_bad("Readable desc %p after writable", vringh_bad("Readable desc %p after writable",
&descs[i]); &descs[i]);
err = -EINVAL; err = -EINVAL;
......
...@@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev) ...@@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev)
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
INIT_LIST_HEAD(&dev->vqs); INIT_LIST_HEAD(&dev->vqs);
spin_lock_init(&dev->vqs_list_lock);
/* /*
* device_add() causes the bus infrastructure to look for a matching * device_add() causes the bus infrastructure to look for a matching
......
...@@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev) ...@@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
struct device *dev = get_device(&vp_dev->vdev.dev); struct device *dev = get_device(&vp_dev->vdev.dev);
/*
* Device is marked broken on surprise removal so that virtio upper
* layers can abort any ongoing operation.
*/
if (!pci_device_is_present(pci_dev))
virtio_break_device(&vp_dev->vdev);
pci_disable_sriov(pci_dev); pci_disable_sriov(pci_dev);
unregister_virtio_device(&vp_dev->vdev); unregister_virtio_device(&vp_dev->vdev);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <xen/xen.h> #include <xen/xen.h>
#ifdef DEBUG #ifdef DEBUG
...@@ -1755,7 +1756,9 @@ static struct virtqueue *vring_create_virtqueue_packed( ...@@ -1755,7 +1756,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
cpu_to_le16(vq->packed.event_flags_shadow); cpu_to_le16(vq->packed.event_flags_shadow);
} }
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs); list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq; return &vq->vq;
err_desc_extra: err_desc_extra:
...@@ -2229,7 +2232,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, ...@@ -2229,7 +2232,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
memset(vq->split.desc_state, 0, vring.num * memset(vq->split.desc_state, 0, vring.num *
sizeof(struct vring_desc_state_split)); sizeof(struct vring_desc_state_split));
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs); list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq; return &vq->vq;
err_extra: err_extra:
...@@ -2291,6 +2296,10 @@ void vring_del_virtqueue(struct virtqueue *_vq) ...@@ -2291,6 +2296,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
spin_lock(&vq->vq.vdev->vqs_list_lock);
list_del(&_vq->list);
spin_unlock(&vq->vq.vdev->vqs_list_lock);
if (vq->we_own_ring) { if (vq->we_own_ring) {
if (vq->packed_ring) { if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev, vring_free_queue(vq->vq.vdev,
...@@ -2321,7 +2330,6 @@ void vring_del_virtqueue(struct virtqueue *_vq) ...@@ -2321,7 +2330,6 @@ void vring_del_virtqueue(struct virtqueue *_vq)
kfree(vq->split.desc_state); kfree(vq->split.desc_state);
kfree(vq->split.desc_extra); kfree(vq->split.desc_extra);
} }
list_del(&_vq->list);
kfree(vq); kfree(vq);
} }
EXPORT_SYMBOL_GPL(vring_del_virtqueue); EXPORT_SYMBOL_GPL(vring_del_virtqueue);
...@@ -2373,7 +2381,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq) ...@@ -2373,7 +2381,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
return vq->broken; return READ_ONCE(vq->broken);
} }
EXPORT_SYMBOL_GPL(virtqueue_is_broken); EXPORT_SYMBOL_GPL(virtqueue_is_broken);
...@@ -2385,10 +2393,14 @@ void virtio_break_device(struct virtio_device *dev) ...@@ -2385,10 +2393,14 @@ void virtio_break_device(struct virtio_device *dev)
{ {
struct virtqueue *_vq; struct virtqueue *_vq;
spin_lock(&dev->vqs_list_lock);
list_for_each_entry(_vq, &dev->vqs, list) { list_for_each_entry(_vq, &dev->vqs, list) {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
vq->broken = true;
/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
WRITE_ONCE(vq->broken, true);
} }
spin_unlock(&dev->vqs_list_lock);
} }
EXPORT_SYMBOL_GPL(virtio_break_device); EXPORT_SYMBOL_GPL(virtio_break_device);
......
...@@ -151,6 +151,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index, ...@@ -151,6 +151,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (!name) if (!name)
return NULL; return NULL;
if (index >= vdpa->nvqs)
return ERR_PTR(-ENOENT);
/* Queue shouldn't already be set up. */ /* Queue shouldn't already be set up. */
if (ops->get_vq_ready(vdpa, index)) if (ops->get_vq_ready(vdpa, index))
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
......
...@@ -11,13 +11,15 @@ enum { ...@@ -11,13 +11,15 @@ enum {
}; };
enum { enum {
MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps? MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2, MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
}; };
enum { enum {
MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
}; };
struct mlx5_ifc_virtio_q_bits { struct mlx5_ifc_virtio_q_bits {
......
...@@ -277,6 +277,17 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, ...@@ -277,6 +277,17 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config, const struct vdpa_config_ops *config,
size_t size, const char *name); size_t size, const char *name);
/**
* vdpa_alloc_device - allocate and initilaize a vDPA device
*
* @dev_struct: the type of the parent structure
* @member: the name of struct vdpa_device within the @dev_struct
* @parent: the parent device
* @config: the bus operations that is supported by this device
* @name: name of the vdpa device
*
* Return allocated data structure or ERR_PTR upon error
*/
#define vdpa_alloc_device(dev_struct, member, parent, config, name) \ #define vdpa_alloc_device(dev_struct, member, parent, config, name) \
container_of(__vdpa_alloc_device( \ container_of(__vdpa_alloc_device( \
parent, config, \ parent, config, \
......
...@@ -110,6 +110,7 @@ struct virtio_device { ...@@ -110,6 +110,7 @@ struct virtio_device {
bool config_enabled; bool config_enabled;
bool config_change_pending; bool config_change_pending;
spinlock_t config_lock; spinlock_t config_lock;
spinlock_t vqs_list_lock; /* Protects VQs list access */
struct device dev; struct device dev;
struct virtio_device_id id; struct virtio_device_id id;
const struct virtio_config_ops *config; const struct virtio_config_ops *config;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/virtio_byteorder.h> #include <linux/virtio_byteorder.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h>
#if IS_REACHABLE(CONFIG_VHOST_IOTLB) #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#include <linux/vhost_iotlb.h> #include <linux/vhost_iotlb.h>
......
...@@ -4,7 +4,8 @@ test: virtio_test vringh_test ...@@ -4,7 +4,8 @@ test: virtio_test vringh_test
virtio_test: virtio_ring.o virtio_test.o virtio_test: virtio_ring.o virtio_test.o
vringh_test: vringh_test.o vringh.o virtio_ring.o vringh_test: vringh_test.o vringh.o virtio_ring.o
CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
LDFLAGS += -lpthread
vpath %.c ../../drivers/virtio ../../drivers/vhost vpath %.c ../../drivers/virtio ../../drivers/vhost
mod: mod:
${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V} ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
......
#ifndef SPINLOCK_H_STUB
#define SPINLOCK_H_STUB
#include <pthread.h>
typedef pthread_spinlock_t spinlock_t;
static inline void spin_lock_init(spinlock_t *lock)
{
int r = pthread_spin_init(lock, 0);
assert(!r);
}
static inline void spin_lock(spinlock_t *lock)
{
int ret = pthread_spin_lock(lock);
assert(!ret);
}
static inline void spin_unlock(spinlock_t *lock)
{
int ret = pthread_spin_unlock(lock);
assert(!ret);
}
static inline void spin_lock_bh(spinlock_t *lock)
{
spin_lock(lock);
}
static inline void spin_unlock_bh(spinlock_t *lock)
{
spin_unlock(lock);
}
static inline void spin_lock_irq(spinlock_t *lock)
{
spin_lock(lock);
}
static inline void spin_unlock_irq(spinlock_t *lock)
{
spin_unlock(lock);
}
static inline void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
{
spin_lock(lock);
}
static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
{
spin_unlock(lock);
}
#endif
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define LINUX_VIRTIO_H #define LINUX_VIRTIO_H
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/spinlock.h>
struct device { struct device {
void *parent; void *parent;
...@@ -12,6 +13,7 @@ struct virtio_device { ...@@ -12,6 +13,7 @@ struct virtio_device {
struct device dev; struct device dev;
u64 features; u64 features;
struct list_head vqs; struct list_head vqs;
spinlock_t vqs_list_lock;
}; };
struct virtqueue { struct virtqueue {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment