Commit 43e1b129 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:
 "vhost and virtio fixes and features:

   - Hardening work by Jason

   - vdpa driver for Alibaba ENI

   - Performance tweaks for virtio blk

   - virtio rng rework using an internal buffer

   - mac/mtu programming for mlx5 vdpa

   - Misc fixes, cleanups"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (45 commits)
  vdpa/mlx5: Forward only packets with allowed MAC address
  vdpa/mlx5: Support configuration of MAC
  vdpa/mlx5: Fix clearing of VIRTIO_NET_F_MAC feature bit
  vdpa_sim_net: Enable user to set mac address and mtu
  vdpa: Enable user to set mac and mtu of vdpa device
  vdpa: Use kernel coding style for structure comments
  vdpa: Introduce query of device config layout
  vdpa: Introduce and use vdpa device get, set config helpers
  virtio-scsi: don't let virtio core to validate used buffer length
  virtio-blk: don't let virtio core to validate used length
  virtio-net: don't let virtio core to validate used length
  virtio_ring: validate used buffer length
  virtio_blk: correct types for status handling
  virtio_blk: allow 0 as num_request_queues
  i2c: virtio: Add support for zero-length requests
  virtio-blk: fixup coccinelle warnings
  virtio_ring: fix typos in vring_desc_extra
  virtio-pci: harden INTX interrupts
  virtio_pci: harden MSI-X interrupts
  virtio_config: introduce a new .enable_cbs method
  ...
parents d4ec3d55 540061ac
......@@ -20083,6 +20083,13 @@ S: Maintained
F: drivers/i2c/busses/i2c-virtio.c
F: include/uapi/linux/virtio_i2c.h
VIRTIO PMEM DRIVER
M: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/nvdimm/virtio_pmem.c
F: drivers/nvdimm/nd_virtio.c
VIRTUAL BOX GUEST DEVICE DRIVER
M: Hans de Goede <hdegoede@redhat.com>
M: Arnd Bergmann <arnd@arndb.de>
......
......@@ -371,6 +371,7 @@ config XEN_BLKDEV_BACKEND
config VIRTIO_BLK
tristate "Virtio block driver"
depends on VIRTIO
select SG_POOL
help
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
......
......@@ -24,6 +24,19 @@
/* The maximum number of sg elements that fit into a virtqueue */
#define VIRTIO_BLK_MAX_SG_ELEMS 32768
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define VIRTIO_BLK_INLINE_SG_CNT 0
#else
#define VIRTIO_BLK_INLINE_SG_CNT 2
#endif
static unsigned int num_request_queues;
module_param(num_request_queues, uint, 0644);
MODULE_PARM_DESC(num_request_queues,
"Limit the number of request queues to use for blk device. "
"0 for no limit. "
"Values > nr_cpu_ids truncated to nr_cpu_ids.");
static int major;
static DEFINE_IDA(vd_index_ida);
......@@ -77,6 +90,7 @@ struct virtio_blk {
struct virtblk_req {
struct virtio_blk_outhdr out_hdr;
u8 status;
struct sg_table sg_table;
struct scatterlist sg[];
};
......@@ -162,12 +176,93 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
return 0;
}
static inline void virtblk_request_done(struct request *req)
static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
if (blk_rq_nr_phys_segments(req))
sg_free_table_chained(&vbr->sg_table,
VIRTIO_BLK_INLINE_SG_CNT);
}
static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
struct virtblk_req *vbr)
{
int err;
if (!blk_rq_nr_phys_segments(req))
return 0;
vbr->sg_table.sgl = vbr->sg;
err = sg_alloc_table_chained(&vbr->sg_table,
blk_rq_nr_phys_segments(req),
vbr->sg_table.sgl,
VIRTIO_BLK_INLINE_SG_CNT);
if (unlikely(err))
return -ENOMEM;
return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
}
static void virtblk_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
kfree(bvec_virt(&req->special_vec));
}
static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
struct request *req,
struct virtblk_req *vbr)
{
bool unmap = false;
u32 type;
vbr->out_hdr.sector = 0;
switch (req_op(req)) {
case REQ_OP_READ:
type = VIRTIO_BLK_T_IN;
vbr->out_hdr.sector = cpu_to_virtio64(vdev,
blk_rq_pos(req));
break;
case REQ_OP_WRITE:
type = VIRTIO_BLK_T_OUT;
vbr->out_hdr.sector = cpu_to_virtio64(vdev,
blk_rq_pos(req));
break;
case REQ_OP_FLUSH:
type = VIRTIO_BLK_T_FLUSH;
break;
case REQ_OP_DISCARD:
type = VIRTIO_BLK_T_DISCARD;
break;
case REQ_OP_WRITE_ZEROES:
type = VIRTIO_BLK_T_WRITE_ZEROES;
unmap = !(req->cmd_flags & REQ_NOUNMAP);
break;
case REQ_OP_DRV_IN:
type = VIRTIO_BLK_T_GET_ID;
break;
default:
WARN_ON_ONCE(1);
return BLK_STS_IOERR;
}
vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
if (virtblk_setup_discard_write_zeroes(req, unmap))
return BLK_STS_RESOURCE;
}
return 0;
}
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
blk_mq_end_request(req, virtblk_result(vbr));
}
......@@ -223,59 +318,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
unsigned long flags;
unsigned int num;
int qid = hctx->queue_num;
int err;
bool notify = false;
bool unmap = false;
u32 type;
blk_status_t status;
int err;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
switch (req_op(req)) {
case REQ_OP_READ:
case REQ_OP_WRITE:
type = 0;
break;
case REQ_OP_FLUSH:
type = VIRTIO_BLK_T_FLUSH;
break;
case REQ_OP_DISCARD:
type = VIRTIO_BLK_T_DISCARD;
break;
case REQ_OP_WRITE_ZEROES:
type = VIRTIO_BLK_T_WRITE_ZEROES;
unmap = !(req->cmd_flags & REQ_NOUNMAP);
break;
case REQ_OP_DRV_IN:
type = VIRTIO_BLK_T_GET_ID;
break;
default:
WARN_ON_ONCE(1);
return BLK_STS_IOERR;
}
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
vbr->out_hdr.sector = type ?
0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status))
return status;
blk_mq_start_request(req);
if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
err = virtblk_setup_discard_write_zeroes(req, unmap);
if (err)
num = virtblk_map_data(hctx, req, vbr);
if (unlikely(num < 0)) {
virtblk_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
if (num) {
if (rq_data_dir(req) == WRITE)
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
else
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
......@@ -284,6 +346,8 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
switch (err) {
case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
......@@ -497,8 +561,14 @@ static int init_vq(struct virtio_blk *vblk)
&num_vqs);
if (err)
num_vqs = 1;
if (!err && !num_vqs) {
dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
return -EINVAL;
}
num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
num_vqs = min_t(unsigned int,
min_not_zero(num_request_queues, nr_cpu_ids),
num_vqs);
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
......@@ -624,7 +694,7 @@ cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
u8 writeback = virtblk_get_cache_mode(vblk->vdev);
BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
}
static DEVICE_ATTR_RW(cache_type);
......@@ -660,16 +730,6 @@ static const struct attribute_group *virtblk_attr_groups[] = {
NULL,
};
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct virtio_blk *vblk = set->driver_data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
sg_init_table(vbr->sg, vblk->sg_elems);
return 0;
}
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
......@@ -682,7 +742,6 @@ static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.commit_rqs = virtio_commit_rqs,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
.map_queues = virtblk_map_queues,
};
......@@ -762,7 +821,7 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
vblk->tag_set.driver_data = vblk;
vblk->tag_set.nr_hw_queues = vblk->num_vqs;
......@@ -990,6 +1049,7 @@ static struct virtio_driver virtio_blk = {
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
......
......@@ -18,13 +18,20 @@ static DEFINE_IDA(rng_index_ida);
struct virtrng_info {
struct hwrng hwrng;
struct virtqueue *vq;
struct completion have_data;
char name[25];
unsigned int data_avail;
int index;
bool busy;
bool hwrng_register_done;
bool hwrng_removed;
/* data transfer */
struct completion have_data;
unsigned int data_avail;
unsigned int data_idx;
/* minimal size returned by rng_buffer_size() */
#if SMP_CACHE_BYTES < 32
u8 data[32];
#else
u8 data[SMP_CACHE_BYTES];
#endif
};
static void random_recv_done(struct virtqueue *vq)
......@@ -35,54 +42,88 @@ static void random_recv_done(struct virtqueue *vq)
if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
return;
vi->data_idx = 0;
complete(&vi->have_data);
}
/* The host will fill any buffer we give it with sweet, sweet randomness. */
static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size)
static void request_entropy(struct virtrng_info *vi)
{
struct scatterlist sg;
sg_init_one(&sg, buf, size);
reinit_completion(&vi->have_data);
vi->data_avail = 0;
vi->data_idx = 0;
sg_init_one(&sg, vi->data, sizeof(vi->data));
/* There should always be room for one buffer. */
virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL);
virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL);
virtqueue_kick(vi->vq);
}
static unsigned int copy_data(struct virtrng_info *vi, void *buf,
unsigned int size)
{
size = min_t(unsigned int, size, vi->data_avail);
memcpy(buf, vi->data + vi->data_idx, size);
vi->data_idx += size;
vi->data_avail -= size;
if (vi->data_avail == 0)
request_entropy(vi);
return size;
}
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
{
int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
unsigned int chunk;
size_t read;
if (vi->hwrng_removed)
return -ENODEV;
if (!vi->busy) {
vi->busy = true;
reinit_completion(&vi->have_data);
register_buffer(vi, buf, size);
read = 0;
/* copy available data */
if (vi->data_avail) {
chunk = copy_data(vi, buf, size);
size -= chunk;
read += chunk;
}
if (!wait)
return 0;
return read;
/* We have already copied available entropy,
* so either size is 0 or data_avail is 0
*/
while (size != 0) {
/* data_avail is 0 but a request is pending */
ret = wait_for_completion_killable(&vi->have_data);
if (ret < 0)
return ret;
/* if vi->data_avail is 0, we have been interrupted
* by a cleanup, but buffer stays in the queue
*/
if (vi->data_avail == 0)
return read;
vi->busy = false;
chunk = copy_data(vi, buf + read, size);
size -= chunk;
read += chunk;
}
return vi->data_avail;
return read;
}
static void virtio_cleanup(struct hwrng *rng)
{
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
if (vi->busy)
wait_for_completion(&vi->have_data);
complete(&vi->have_data);
}
static int probe_common(struct virtio_device *vdev)
......@@ -118,6 +159,9 @@ static int probe_common(struct virtio_device *vdev)
goto err_find;
}
/* we always have a pending entropy request */
request_entropy(vi);
return 0;
err_find:
......@@ -133,9 +177,9 @@ static void remove_common(struct virtio_device *vdev)
vi->hwrng_removed = true;
vi->data_avail = 0;
vi->data_idx = 0;
complete(&vi->have_data);
vdev->config->reset(vdev);
vi->busy = false;
if (vi->hwrng_register_done)
hwrng_unregister(&vi->hwrng);
vdev->config->del_vqs(vdev);
......
......@@ -28,6 +28,7 @@
#include "../tty/hvc/hvc_console.h"
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
#define VIRTCONS_MAX_PORTS 0x8000
/*
* This is a global struct for storing common data for all the devices
......@@ -2036,6 +2037,14 @@ static int virtcons_probe(struct virtio_device *vdev)
virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
struct virtio_console_config, max_nr_ports,
&portdev->max_nr_ports) == 0) {
if (portdev->max_nr_ports == 0 ||
portdev->max_nr_ports > VIRTCONS_MAX_PORTS) {
dev_err(&vdev->dev,
"Invalidate max_nr_ports %d",
portdev->max_nr_ports);
err = -EINVAL;
goto free;
}
multiport = true;
}
......
......@@ -62,25 +62,22 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
for (i = 0; i < num; i++) {
int outcnt = 0, incnt = 0;
/*
* We don't support 0 length messages and so filter out
* 0 length transfers by using i2c_adapter_quirks.
*/
if (!msgs[i].len)
break;
/*
* Only 7-bit mode supported for this moment. For the address
* format, Please check the Virtio I2C Specification.
*/
reqs[i].out_hdr.addr = cpu_to_le16(msgs[i].addr << 1);
if (msgs[i].flags & I2C_M_RD)
reqs[i].out_hdr.flags |= cpu_to_le32(VIRTIO_I2C_FLAGS_M_RD);
if (i != num - 1)
reqs[i].out_hdr.flags = cpu_to_le32(VIRTIO_I2C_FLAGS_FAIL_NEXT);
reqs[i].out_hdr.flags |= cpu_to_le32(VIRTIO_I2C_FLAGS_FAIL_NEXT);
sg_init_one(&out_hdr, &reqs[i].out_hdr, sizeof(reqs[i].out_hdr));
sgs[outcnt++] = &out_hdr;
if (msgs[i].len) {
reqs[i].buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
if (!reqs[i].buf)
break;
......@@ -91,6 +88,7 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
sgs[outcnt + incnt++] = &msg_buf;
else
sgs[outcnt++] = &msg_buf;
}
sg_init_one(&in_hdr, &reqs[i].in_hdr, sizeof(reqs[i].in_hdr));
sgs[outcnt + incnt++] = &in_hdr;
......@@ -191,7 +189,7 @@ static int virtio_i2c_setup_vqs(struct virtio_i2c *vi)
static u32 virtio_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static struct i2c_algorithm virtio_algorithm = {
......@@ -199,15 +197,16 @@ static struct i2c_algorithm virtio_algorithm = {
.functionality = virtio_i2c_func,
};
static const struct i2c_adapter_quirks virtio_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
static int virtio_i2c_probe(struct virtio_device *vdev)
{
struct virtio_i2c *vi;
int ret;
if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) {
dev_err(&vdev->dev, "Zero-length request feature is mandatory\n");
return -EINVAL;
}
vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL);
if (!vi)
return -ENOMEM;
......@@ -225,7 +224,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
snprintf(vi->adap.name, sizeof(vi->adap.name),
"i2c_virtio at virtio bus %d", vdev->index);
vi->adap.algo = &virtio_algorithm;
vi->adap.quirks = &virtio_i2c_quirks;
vi->adap.dev.parent = &vdev->dev;
vi->adap.dev.of_node = vdev->dev.of_node;
i2c_set_adapdata(&vi->adap, vi);
......@@ -270,7 +268,13 @@ static int virtio_i2c_restore(struct virtio_device *vdev)
}
#endif
static const unsigned int features[] = {
VIRTIO_I2C_F_ZERO_LENGTH_REQUEST,
};
static struct virtio_driver virtio_i2c_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,
.probe = virtio_i2c_probe,
.remove = virtio_i2c_remove,
......
......@@ -408,12 +408,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
* add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
truesize = headroom ? PAGE_SIZE : truesize;
tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
tailroom = truesize - headroom;
buf = p - headroom;
len -= hdr_len;
offset += hdr_padded_len;
p += hdr_padded_len;
tailroom -= hdr_padded_len + len;
shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
......@@ -3422,6 +3423,7 @@ static struct virtio_driver virtio_net_driver = {
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
......
......@@ -978,6 +978,7 @@ static unsigned int features[] = {
static struct virtio_driver virtio_scsi_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
......
......@@ -78,4 +78,12 @@ config VP_VDPA
help
This kernel module bridges virtio PCI device to vDPA bus.
config ALIBABA_ENI_VDPA
tristate "vDPA driver for Alibaba ENI"
select VIRTIO_PCI_LIB_LEGACY
depends on PCI_MSI && X86
help
VDPA driver for Alibaba ENI (Elastic Network Interface) which is built upon
virtio 0.9.5 specification.
endif # VDPA
......@@ -5,3 +5,4 @@ obj-$(CONFIG_VDPA_USER) += vdpa_user/
obj-$(CONFIG_IFCVF) += ifcvf/
obj-$(CONFIG_MLX5_VDPA) += mlx5/
obj-$(CONFIG_VP_VDPA) += virtio_pci/
obj-$(CONFIG_ALIBABA_ENI_VDPA) += alibaba/
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ALIBABA_ENI_VDPA) += eni_vdpa.o
This diff is collapsed.
......@@ -499,7 +499,8 @@ static u32 get_dev_type(struct pci_dev *pdev)
return dev_type;
}
static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct ifcvf_adapter *adapter;
......
......@@ -63,7 +63,7 @@ struct mlx5_control_vq {
unsigned short head;
};
struct mlx5_ctrl_wq_ent {
struct mlx5_vdpa_wq_ent {
struct work_struct work;
struct mlx5_vdpa_dev *mvdev;
};
......
This diff is collapsed.
This diff is collapsed.
......@@ -248,7 +248,8 @@ static struct device vdpasim_blk_mgmtdev = {
.release = vdpasim_blk_mgmtdev_release,
};
static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct vdpasim_dev_attr dev_attr = {};
struct vdpasim *simdev;
......
......@@ -16,6 +16,7 @@
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <uapi/linux/virtio_net.h>
#include <uapi/linux/vdpa.h>
#include "vdpa_sim.h"
......@@ -29,12 +30,6 @@
#define VDPASIM_NET_VQ_NUM 2
static char *macaddr;
module_param(macaddr, charp, 0);
MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
static u8 macaddr_buf[ETH_ALEN];
static void vdpasim_net_work(struct work_struct *work)
{
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
......@@ -112,9 +107,21 @@ static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
{
struct virtio_net_config *net_config = config;
net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
memcpy(net_config->mac, macaddr_buf, ETH_ALEN);
}
static void vdpasim_net_setup_config(struct vdpasim *vdpasim,
const struct vdpa_dev_set_config *config)
{
struct virtio_net_config *vio_config = vdpasim->config;
if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR))
memcpy(vio_config->mac, config->net.mac, ETH_ALEN);
if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MTU))
vio_config->mtu = cpu_to_vdpasim16(vdpasim, config->net.mtu);
else
/* Setup default MTU to be 1500 */
vio_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
}
static void vdpasim_net_mgmtdev_release(struct device *dev)
......@@ -126,7 +133,8 @@ static struct device vdpasim_net_mgmtdev = {
.release = vdpasim_net_mgmtdev_release,
};
static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct vdpasim_dev_attr dev_attr = {};
struct vdpasim *simdev;
......@@ -146,6 +154,8 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
if (IS_ERR(simdev))
return PTR_ERR(simdev);
vdpasim_net_setup_config(simdev, config);
ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
if (ret)
goto reg_err;
......@@ -179,20 +189,14 @@ static struct vdpa_mgmt_dev mgmt_dev = {
.device = &vdpasim_net_mgmtdev,
.id_table = id_table,
.ops = &vdpasim_net_mgmtdev_ops,
.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
1 << VDPA_ATTR_DEV_NET_CFG_MTU),
};
static int __init vdpasim_net_init(void)
{
int ret;
if (macaddr) {
mac_pton(macaddr, macaddr_buf);
if (!is_valid_ether_addr(macaddr_buf))
return -EADDRNOTAVAIL;
} else {
eth_random_addr(macaddr_buf);
}
ret = device_register(&vdpasim_net_mgmtdev);
if (ret)
return ret;
......
......@@ -1503,7 +1503,8 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
return 0;
}
static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct vduse_dev *dev;
int ret;
......
......@@ -76,6 +76,17 @@ static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
return vp_modern_get_status(mdev);
}
static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
int irq = vp_vdpa->vring[idx].irq;
if (irq == VIRTIO_MSI_NO_VECTOR)
return -EINVAL;
return irq;
}
static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
{
struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
......@@ -427,6 +438,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = {
.get_config = vp_vdpa_get_config,
.set_config = vp_vdpa_set_config,
.set_config_cb = vp_vdpa_set_config_cb,
.get_vq_irq = vp_vdpa_get_vq_irq,
};
static void vp_vdpa_free_irq_vectors(void *data)
......
......@@ -237,7 +237,6 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
struct vhost_vdpa_config __user *c)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_vdpa_config config;
unsigned long size = offsetof(struct vhost_vdpa_config, buf);
u8 *buf;
......@@ -251,7 +250,7 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
if (IS_ERR(buf))
return PTR_ERR(buf);
ops->set_config(vdpa, config.off, buf, config.len);
vdpa_set_config(vdpa, config.off, buf, config.len);
kvfree(buf);
return 0;
......
......@@ -20,6 +20,15 @@ config VIRTIO_PCI_LIB
PCI device with possible vendor specific extensions. Any
module that selects this module must depend on PCI.
config VIRTIO_PCI_LIB_LEGACY
tristate
help
Legacy PCI device (Virtio PCI Card 0.9.x Draft and older device)
implementation.
This module implements the basic probe and control for devices
which are based on legacy PCI device. Any module that selects this
module must depend on PCI.
menuconfig VIRTIO_MENU
bool "Virtio drivers"
default y
......@@ -43,6 +52,7 @@ config VIRTIO_PCI_LEGACY
bool "Support for legacy virtio draft 0.9.X and older devices"
default y
depends on VIRTIO_PCI
select VIRTIO_PCI_LIB_LEGACY
help
Virtio PCI Card 0.9.X Draft (circa 2014) and older device support.
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio_pci_modern_dev.o
obj-$(CONFIG_VIRTIO_PCI_LIB_LEGACY) += virtio_pci_legacy_dev.o
obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
......
......@@ -24,17 +24,46 @@ MODULE_PARM_DESC(force_legacy,
"Force legacy mode for transitional virtio 1 devices");
#endif
/* wait for pending irq handlers */
void vp_synchronize_vectors(struct virtio_device *vdev)
/* disable irq handlers */
void vp_disable_cbs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i;
if (vp_dev->intx_enabled)
if (vp_dev->intx_enabled) {
/*
* The below synchronize() guarantees that any
* interrupt for this line arriving after
* synchronize_irq() has completed is guaranteed to see
* intx_soft_enabled == false.
*/
WRITE_ONCE(vp_dev->intx_soft_enabled, false);
synchronize_irq(vp_dev->pci_dev->irq);
}
for (i = 0; i < vp_dev->msix_vectors; ++i)
synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
}
/* enable irq handlers */
void vp_enable_cbs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i;
if (vp_dev->intx_enabled) {
disable_irq(vp_dev->pci_dev->irq);
/*
* The above disable_irq() provides TSO ordering and
* as such promotes the below store to store-release.
*/
WRITE_ONCE(vp_dev->intx_soft_enabled, true);
enable_irq(vp_dev->pci_dev->irq);
return;
}
for (i = 0; i < vp_dev->msix_vectors; ++i)
enable_irq(pci_irq_vector(vp_dev->pci_dev, i));
}
/* the notify function used when creating a virt queue */
......@@ -84,6 +113,9 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
struct virtio_pci_device *vp_dev = opaque;
u8 isr;
if (!READ_ONCE(vp_dev->intx_soft_enabled))
return IRQ_NONE;
/* reading the ISR has the effect of also clearing it so it's very
* important to save off the value. */
isr = ioread8(vp_dev->isr);
......@@ -141,7 +173,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-config", name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_config_changed, 0, vp_dev->msix_names[v],
vp_config_changed, IRQF_NO_AUTOEN,
vp_dev->msix_names[v],
vp_dev);
if (err)
goto error;
......@@ -160,7 +193,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-virtqueues", name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_vring_interrupt, 0, vp_dev->msix_names[v],
vp_vring_interrupt, IRQF_NO_AUTOEN,
vp_dev->msix_names[v],
vp_dev);
if (err)
goto error;
......@@ -337,7 +371,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
"%s-%s",
dev_name(&vp_dev->vdev.dev), names[i]);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
vring_interrupt, 0,
vring_interrupt, IRQF_NO_AUTOEN,
vp_dev->msix_names[msix_vec],
vqs[i]);
if (err)
......@@ -549,6 +583,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
vp_dev->is_legacy = vp_dev->ldev.ioaddr ? true : false;
rc = register_virtio_device(&vp_dev->vdev);
reg_dev = vp_dev;
if (rc)
......@@ -557,7 +593,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
return 0;
err_register:
if (vp_dev->ioaddr)
if (vp_dev->is_legacy)
virtio_pci_legacy_remove(vp_dev);
else
virtio_pci_modern_remove(vp_dev);
......@@ -587,7 +623,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
unregister_virtio_device(&vp_dev->vdev);
if (vp_dev->ioaddr)
if (vp_dev->is_legacy)
virtio_pci_legacy_remove(vp_dev);
else
virtio_pci_modern_remove(vp_dev);
......
......@@ -25,6 +25,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
#include <linux/virtio_pci_legacy.h>
#include <linux/virtio_pci_modern.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
......@@ -44,16 +45,14 @@ struct virtio_pci_vq_info {
struct virtio_pci_device {
struct virtio_device vdev;
struct pci_dev *pci_dev;
struct virtio_pci_legacy_device ldev;
struct virtio_pci_modern_device mdev;
/* In legacy mode, these two point to within ->legacy. */
bool is_legacy;
/* Where to read and clear interrupt */
u8 __iomem *isr;
/* Legacy only field */
/* the IO mapping for the PCI config space */
void __iomem *ioaddr;
/* a list of queues so we can dispatch IRQs */
spinlock_t lock;
struct list_head virtqueues;
......@@ -64,6 +63,7 @@ struct virtio_pci_device {
/* MSI-X support */
int msix_enabled;
int intx_enabled;
bool intx_soft_enabled;
cpumask_var_t *msix_affinity_masks;
/* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */
......@@ -102,8 +102,10 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
return container_of(vdev, struct virtio_pci_device, vdev);
}
/* wait for pending irq handlers */
void vp_synchronize_vectors(struct virtio_device *vdev);
/* disable irq handlers */
void vp_disable_cbs(struct virtio_device *vdev);
/* enable irq handlers */
void vp_enable_cbs(struct virtio_device *vdev);
/* the notify function used when creating a virt queue */
bool vp_notify(struct virtqueue *vq);
/* the config->del_vqs() implementation */
......
......@@ -14,6 +14,7 @@
* Michael S. Tsirkin <mst@redhat.com>
*/
#include "linux/virtio_pci_legacy.h"
#include "virtio_pci_common.h"
/* virtio config->get_features() implementation */
......@@ -23,7 +24,7 @@ static u64 vp_get_features(struct virtio_device *vdev)
/* When someone needs more than 32 feature bits, we'll need to
* steal a bit to indicate that the rest are somewhere else. */
return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
return vp_legacy_get_features(&vp_dev->ldev);
}
/* virtio config->finalize_features() implementation */
......@@ -38,7 +39,7 @@ static int vp_finalize_features(struct virtio_device *vdev)
BUG_ON((u32)vdev->features != vdev->features);
/* We only support 32 feature bits. */
iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
vp_legacy_set_features(&vp_dev->ldev, vdev->features);
return 0;
}
......@@ -48,7 +49,7 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ioaddr +
void __iomem *ioaddr = vp_dev->ldev.ioaddr +
VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
offset;
u8 *ptr = buf;
......@@ -64,7 +65,7 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ioaddr +
void __iomem *ioaddr = vp_dev->ldev.ioaddr +
VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
offset;
const u8 *ptr = buf;
......@@ -78,7 +79,7 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
return vp_legacy_get_status(&vp_dev->ldev);
}
static void vp_set_status(struct virtio_device *vdev, u8 status)
......@@ -86,28 +87,24 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* We should never be setting status to 0. */
BUG_ON(status == 0);
iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
vp_legacy_set_status(&vp_dev->ldev, status);
}
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
vp_legacy_set_status(&vp_dev->ldev, 0);
/* Flush out the status write, and flush in device writes,
* including MSi-X interrupts, if any. */
ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
vp_legacy_get_status(&vp_dev->ldev);
/* Disable VQ/configuration callbacks. */
vp_disable_cbs(vdev);
}
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
/* Setup the vector used for configuration events */
iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
/* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */
return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
return vp_legacy_config_vector(&vp_dev->ldev, vector);
}
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
......@@ -123,12 +120,9 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
int err;
u64 q_pfn;
/* Select the queue we're interested in */
iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
/* Check if queue is either not available or already active. */
num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
num = vp_legacy_get_queue_size(&vp_dev->ldev, index);
if (!num || vp_legacy_get_queue_enable(&vp_dev->ldev, index))
return ERR_PTR(-ENOENT);
info->msix_vector = msix_vec;
......@@ -151,13 +145,12 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* activate the queue */
iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
vp_legacy_set_queue_address(&vp_dev->ldev, index, q_pfn);
vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
vq->priv = (void __force *)vp_dev->ldev.ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
msix_vec = vp_legacy_queue_vector(&vp_dev->ldev, index, msix_vec);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto out_deactivate;
......@@ -167,7 +160,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
return vq;
out_deactivate:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
vp_legacy_set_queue_address(&vp_dev->ldev, index, 0);
out_del_vq:
vring_del_virtqueue(vq);
return ERR_PTR(err);
......@@ -178,22 +171,21 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR,
vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
vp_legacy_queue_vector(&vp_dev->ldev, vq->index,
VIRTIO_MSI_NO_VECTOR);
/* Flush the write out to device */
ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
ioread8(vp_dev->ldev.ioaddr + VIRTIO_PCI_ISR);
}
/* Select and deactivate the queue */
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
vp_legacy_set_queue_address(&vp_dev->ldev, vq->index, 0);
vring_del_virtqueue(vq);
}
static const struct virtio_config_ops virtio_pci_config_ops = {
.enable_cbs = vp_enable_cbs,
.get = vp_get,
.set = vp_set,
.get_status = vp_get_status,
......@@ -211,51 +203,18 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
/* the PCI probing function */
int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
{
struct virtio_pci_legacy_device *ldev = &vp_dev->ldev;
struct pci_dev *pci_dev = vp_dev->pci_dev;
int rc;
/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
return -ENODEV;
if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
return -ENODEV;
}
rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
if (rc) {
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
} else {
/*
* The virtio ring base address is expressed as a 32-bit PFN,
* with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
*/
dma_set_coherent_mask(&pci_dev->dev,
DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
}
if (rc)
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
ldev->pci_dev = pci_dev;
rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
rc = vp_legacy_probe(ldev);
if (rc)
return rc;
rc = -ENOMEM;
vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
if (!vp_dev->ioaddr)
goto err_iomap;
vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
/* we use the subsystem vendor/device id as the virtio vendor/device
* id. this allows us to use the same PCI vendor/device id for all
* virtio devices and to identify the particular virtio driver by
* the subsystem ids */
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
vp_dev->vdev.id.device = pci_dev->subsystem_device;
vp_dev->isr = ldev->isr;
vp_dev->vdev.id = ldev->id;
vp_dev->vdev.config = &virtio_pci_config_ops;
......@@ -264,16 +223,11 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
vp_dev->del_vq = del_vq;
return 0;
err_iomap:
pci_release_region(pci_dev, 0);
return rc;
}
void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
{
struct pci_dev *pci_dev = vp_dev->pci_dev;
struct virtio_pci_legacy_device *ldev = &vp_dev->ldev;
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_region(pci_dev, 0);
vp_legacy_remove(ldev);
}
// SPDX-License-Identifier: GPL-2.0-or-later
#include "linux/virtio_pci.h"
#include <linux/virtio_pci_legacy.h>
#include <linux/module.h>
#include <linux/pci.h>
/*
* vp_legacy_probe: probe the legacy virtio pci device, note that the
* caller is required to enable PCI device before calling this function.
* @ldev: the legacy virtio-pci device
*
* Return 0 on succeed otherwise fail
*/
int vp_legacy_probe(struct virtio_pci_legacy_device *ldev)
{
struct pci_dev *pci_dev = ldev->pci_dev;
int rc;
/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
return -ENODEV;
if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION)
return -ENODEV;
rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
if (rc) {
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
} else {
/*
* The virtio ring base address is expressed as a 32-bit PFN,
* with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
*/
dma_set_coherent_mask(&pci_dev->dev,
DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
}
if (rc)
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
if (rc)
return rc;
ldev->ioaddr = pci_iomap(pci_dev, 0, 0);
if (!ldev->ioaddr)
goto err_iomap;
ldev->isr = ldev->ioaddr + VIRTIO_PCI_ISR;
ldev->id.vendor = pci_dev->subsystem_vendor;
ldev->id.device = pci_dev->subsystem_device;
return 0;
err_iomap:
pci_release_region(pci_dev, 0);
return rc;
}
EXPORT_SYMBOL_GPL(vp_legacy_probe);
/*
* vp_legacy_probe: remove and cleanup the legacy virtio pci device
* @ldev: the legacy virtio-pci device
*/
void vp_legacy_remove(struct virtio_pci_legacy_device *ldev)
{
struct pci_dev *pci_dev = ldev->pci_dev;
pci_iounmap(pci_dev, ldev->ioaddr);
pci_release_region(pci_dev, 0);
}
EXPORT_SYMBOL_GPL(vp_legacy_remove);
/*
* vp_legacy_get_features - get features from device
* @ldev: the legacy virtio-pci device
*
* Returns the features read from the device
*/
u64 vp_legacy_get_features(struct virtio_pci_legacy_device *ldev)
{
return ioread32(ldev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
}
EXPORT_SYMBOL_GPL(vp_legacy_get_features);
/*
* vp_legacy_get_driver_features - get driver features from device
* @ldev: the legacy virtio-pci device
*
* Returns the driver features read from the device
*/
u64 vp_legacy_get_driver_features(struct virtio_pci_legacy_device *ldev)
{
return ioread32(ldev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
}
EXPORT_SYMBOL_GPL(vp_legacy_get_driver_features);
/*
* vp_legacy_set_features - set features to device
* @ldev: the legacy virtio-pci device
* @features: the features set to device
*/
void vp_legacy_set_features(struct virtio_pci_legacy_device *ldev,
u32 features)
{
iowrite32(features, ldev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
}
EXPORT_SYMBOL_GPL(vp_legacy_set_features);
/*
* vp_legacy_get_status - get the device status
* @ldev: the legacy virtio-pci device
*
* Returns the status read from device
*/
u8 vp_legacy_get_status(struct virtio_pci_legacy_device *ldev)
{
return ioread8(ldev->ioaddr + VIRTIO_PCI_STATUS);
}
EXPORT_SYMBOL_GPL(vp_legacy_get_status);
/*
* vp_legacy_set_status - set status to device
* @ldev: the legacy virtio-pci device
* @status: the status set to device
*/
void vp_legacy_set_status(struct virtio_pci_legacy_device *ldev,
u8 status)
{
iowrite8(status, ldev->ioaddr + VIRTIO_PCI_STATUS);
}
EXPORT_SYMBOL_GPL(vp_legacy_set_status);
/*
* vp_legacy_queue_vector - set the MSIX vector for a specific virtqueue
* @ldev: the legacy virtio-pci device
* @index: queue index
* @vector: the config vector
*
* Returns the config vector read from the device
*/
u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
u16 index, u16 vector)
{
iowrite16(index, ldev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
iowrite16(vector, ldev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
/* Flush the write out to device */
return ioread16(ldev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
}
EXPORT_SYMBOL_GPL(vp_legacy_queue_vector);
/*
* vp_legacy_config_vector - set the vector for config interrupt
* @ldev: the legacy virtio-pci device
* @vector: the config vector
*
* Returns the config vector read from the device
*/
u16 vp_legacy_config_vector(struct virtio_pci_legacy_device *ldev,
u16 vector)
{
/* Setup the vector used for configuration events */
iowrite16(vector, ldev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
/* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */
return ioread16(ldev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
}
EXPORT_SYMBOL_GPL(vp_legacy_config_vector);
/*
* vp_legacy_set_queue_address - set the virtqueue address
* @ldev: the legacy virtio-pci device
* @index: the queue index
* @queue_pfn: pfn of the virtqueue
*/
void vp_legacy_set_queue_address(struct virtio_pci_legacy_device *ldev,
u16 index, u32 queue_pfn)
{
iowrite16(index, ldev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
iowrite32(queue_pfn, ldev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
}
EXPORT_SYMBOL_GPL(vp_legacy_set_queue_address);
/*
* vp_legacy_get_queue_enable - enable a virtqueue
* @ldev: the legacy virtio-pci device
* @index: the queue index
*
* Returns whether a virtqueue is enabled or not
*/
bool vp_legacy_get_queue_enable(struct virtio_pci_legacy_device *ldev,
u16 index)
{
iowrite16(index, ldev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
return ioread32(ldev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
}
EXPORT_SYMBOL_GPL(vp_legacy_get_queue_enable);
/*
* vp_legacy_get_queue_size - get size for a virtqueue
* @ldev: the legacy virtio-pci device
* @index: the queue index
*
* Returns the size of the virtqueue
*/
u16 vp_legacy_get_queue_size(struct virtio_pci_legacy_device *ldev,
u16 index)
{
iowrite16(index, ldev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
return ioread16(ldev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
}
EXPORT_SYMBOL_GPL(vp_legacy_get_queue_size);
MODULE_VERSION("0.1");
MODULE_DESCRIPTION("Legacy Virtio PCI Device");
MODULE_AUTHOR("Wu Zongyong <wuzongyong@linux.alibaba.com>");
MODULE_LICENSE("GPL");
......@@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev)
*/
while (vp_modern_get_status(mdev))
msleep(1);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
/* Disable VQ/configuration callbacks. */
vp_disable_cbs(vdev);
}
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
......@@ -380,6 +380,7 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
}
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.enable_cbs = vp_enable_cbs,
.get = NULL,
.set = NULL,
.generation = vp_generation,
......@@ -397,6 +398,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
};
static const struct virtio_config_ops virtio_pci_config_ops = {
.enable_cbs = vp_enable_cbs,
.get = vp_get,
.set = vp_set,
.generation = vp_generation,
......
......@@ -14,6 +14,9 @@
#include <linux/spinlock.h>
#include <xen/xen.h>
static bool force_used_validation = false;
module_param(force_used_validation, bool, 0444);
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING(_vq, fmt, args...) \
......@@ -79,8 +82,8 @@ struct vring_desc_state_packed {
};
struct vring_desc_extra {
dma_addr_t addr; /* Buffer DMA addr. */
u32 len; /* Buffer length. */
dma_addr_t addr; /* Descriptor DMA addr. */
u32 len; /* Descriptor length. */
u16 flags; /* Descriptor flags. */
u16 next; /* The next desc state in a list. */
};
......@@ -182,6 +185,9 @@ struct vring_virtqueue {
} packed;
};
/* Per-descriptor in buffer length */
u32 *buflen;
/* How to notify other side. FIXME: commonalize hcalls! */
bool (*notify)(struct virtqueue *vq);
......@@ -490,6 +496,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
unsigned int i, n, avail, descs_used, prev, err_idx;
int head;
bool indirect;
u32 buflen = 0;
START_USE(vq);
......@@ -571,6 +578,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
VRING_DESC_F_NEXT |
VRING_DESC_F_WRITE,
indirect);
buflen += sg->length;
}
}
/* Last one doesn't continue. */
......@@ -610,6 +618,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
else
vq->split.desc_state[head].indir_desc = ctx;
/* Store in buffer length if necessary */
if (vq->buflen)
vq->buflen[head] = buflen;
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
......@@ -784,6 +796,11 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
BAD_RING(vq, "id %u is not a head!\n", i);
return NULL;
}
if (vq->buflen && unlikely(*len > vq->buflen[i])) {
BAD_RING(vq, "used len %d is larger than in buflen %u\n",
*len, vq->buflen[i]);
return NULL;
}
/* detach_buf_split clears data, so grab it now. */
ret = vq->split.desc_state[i].data;
......@@ -1062,9 +1079,12 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
unsigned int i, n, err_idx;
u16 head, id;
dma_addr_t addr;
u32 buflen = 0;
head = vq->packed.next_avail_idx;
desc = alloc_indirect_packed(total_sg, gfp);
if (!desc)
return -ENOMEM;
if (unlikely(vq->vq.num_free < 1)) {
pr_debug("Can't add buf len 1 - avail = 0\n");
......@@ -1089,6 +1109,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
desc[i].addr = cpu_to_le64(addr);
desc[i].len = cpu_to_le32(sg->length);
i++;
if (n >= out_sgs)
buflen += sg->length;
}
}
......@@ -1142,6 +1164,10 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
vq->packed.desc_state[id].indir_desc = desc;
vq->packed.desc_state[id].last = id;
/* Store in buffer length if necessary */
if (vq->buflen)
vq->buflen[id] = buflen;
vq->num_added += 1;
pr_debug("Added buffer head %i to %p\n", head, vq);
......@@ -1176,6 +1202,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
unsigned int i, n, c, descs_used, err_idx;
__le16 head_flags, flags;
u16 head, id, prev, curr, avail_used_flags;
int err;
u32 buflen = 0;
START_USE(vq);
......@@ -1191,9 +1219,14 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
BUG_ON(total_sg == 0);
if (virtqueue_use_indirect(_vq, total_sg))
return virtqueue_add_indirect_packed(vq, sgs, total_sg,
out_sgs, in_sgs, data, gfp);
if (virtqueue_use_indirect(_vq, total_sg)) {
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
in_sgs, data, gfp);
if (err != -ENOMEM)
return err;
/* fall back on direct */
}
head = vq->packed.next_avail_idx;
avail_used_flags = vq->packed.avail_used_flags;
......@@ -1250,6 +1283,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
1 << VRING_PACKED_DESC_F_AVAIL |
1 << VRING_PACKED_DESC_F_USED;
}
if (n >= out_sgs)
buflen += sg->length;
}
}
......@@ -1269,6 +1304,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
vq->packed.desc_state[id].indir_desc = ctx;
vq->packed.desc_state[id].last = prev;
/* Store in buffer length if necessary */
if (vq->buflen)
vq->buflen[id] = buflen;
/*
* A driver MUST NOT make the first descriptor in the list
* available before all subsequent descriptors comprising
......@@ -1455,6 +1494,11 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
BAD_RING(vq, "id %u is not a head!\n", id);
return NULL;
}
if (vq->buflen && unlikely(*len > vq->buflen[id])) {
BAD_RING(vq, "used len %d is larger than in buflen %u\n",
*len, vq->buflen[id]);
return NULL;
}
/* detach_buf_packed clears data, so grab it now. */
ret = vq->packed.desc_state[id].data;
......@@ -1660,6 +1704,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
size_t ring_size_in_bytes, event_size_in_bytes;
......@@ -1749,6 +1794,15 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (!vq->packed.desc_extra)
goto err_desc_extra;
if (!drv->suppress_used_validation || force_used_validation) {
vq->buflen = kmalloc_array(num, sizeof(*vq->buflen),
GFP_KERNEL);
if (!vq->buflen)
goto err_buflen;
} else {
vq->buflen = NULL;
}
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
......@@ -1761,6 +1815,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
err_buflen:
kfree(vq->packed.desc_extra);
err_desc_extra:
kfree(vq->packed.desc_state);
err_desc_state:
......@@ -2168,6 +2224,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *),
const char *name)
{
struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
struct vring_virtqueue *vq;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
......@@ -2227,6 +2284,15 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (!vq->split.desc_extra)
goto err_extra;
if (!drv->suppress_used_validation || force_used_validation) {
vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen),
GFP_KERNEL);
if (!vq->buflen)
goto err_buflen;
} else {
vq->buflen = NULL;
}
/* Put everything in free lists. */
vq->free_head = 0;
memset(vq->split.desc_state, 0, vring.num *
......@@ -2237,6 +2303,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
err_buflen:
kfree(vq->split.desc_extra);
err_extra:
kfree(vq->split.desc_state);
err_state:
......
......@@ -65,9 +65,8 @@ static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
ops->set_config(vdpa, offset, buf, len);
vdpa_set_config(vdpa, offset, buf, len);
}
static u32 virtio_vdpa_generation(struct virtio_device *vdev)
......@@ -145,7 +144,8 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
/* Assume split virtqueue, switch to packed if necessary */
struct vdpa_vq_state state = {0};
unsigned long flags;
u32 align, num;
u32 align, max_num, min_num = 1;
bool may_reduce_num = true;
int err;
if (!name)
......@@ -163,16 +163,21 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (!info)
return ERR_PTR(-ENOMEM);
num = ops->get_vq_num_max(vdpa);
if (num == 0) {
max_num = ops->get_vq_num_max(vdpa);
if (max_num == 0) {
err = -ENOENT;
goto error_new_virtqueue;
}
if (ops->get_vq_num_min)
min_num = ops->get_vq_num_min(vdpa);
may_reduce_num = (max_num == min_num) ? false : true;
/* Create the vring */
align = ops->get_vq_align(vdpa);
vq = vring_create_virtqueue(index, num, align, vdev,
true, true, ctx,
vq = vring_create_virtqueue(index, max_num, align, vdev,
true, may_reduce_num, ctx,
virtio_vdpa_notify, callback, name);
if (!vq) {
err = -ENOMEM;
......
......@@ -6,6 +6,8 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/vhost_iotlb.h>
#include <linux/virtio_net.h>
#include <linux/if_ether.h>
/**
* struct vdpa_calllback - vDPA callback definition.
......@@ -63,6 +65,7 @@ struct vdpa_mgmt_dev;
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
* @config: the configuration ops for this device.
* @cf_mutex: Protects get and set access to configuration layout.
* @index: device index
* @features_valid: were features initialized? for legacy guests
* @use_va: indicate whether virtual address must be used by this device
......@@ -74,6 +77,7 @@ struct vdpa_device {
struct device dev;
struct device *dma_dev;
const struct vdpa_config_ops *config;
struct mutex cf_mutex; /* Protects get/set config */
unsigned int index;
bool features_valid;
bool use_va;
......@@ -91,6 +95,14 @@ struct vdpa_iova_range {
u64 last;
};
struct vdpa_dev_set_config {
struct {
u8 mac[ETH_ALEN];
u16 mtu;
} net;
u64 mask;
};
/**
* Corresponding file area for device memory mapping
* @file: vma->vm_file for the mapping
......@@ -171,6 +183,9 @@ struct vdpa_map_file {
* @get_vq_num_max: Get the max size of virtqueue
* @vdev: vdpa device
* Returns u16: max size of virtqueue
* @get_vq_num_min: Get the min size of virtqueue (optional)
* @vdev: vdpa device
* Returns u16: min size of virtqueue
* @get_device_id: Get virtio device id
* @vdev: vdpa device
* Returns u32: virtio device id
......@@ -257,7 +272,7 @@ struct vdpa_config_ops {
struct vdpa_notification_area
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* vq irq is not expected to be changed once DRIVER_OK is set */
int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx);
int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx);
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
......@@ -266,6 +281,7 @@ struct vdpa_config_ops {
void (*set_config_cb)(struct vdpa_device *vdev,
struct vdpa_callback *cb);
u16 (*get_vq_num_max)(struct vdpa_device *vdev);
u16 (*get_vq_num_min)(struct vdpa_device *vdev);
u32 (*get_device_id)(struct vdpa_device *vdev);
u32 (*get_vendor_id)(struct vdpa_device *vdev);
u8 (*get_status)(struct vdpa_device *vdev);
......@@ -382,26 +398,16 @@ static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
return ops->set_features(vdev, features);
}
static inline void vdpa_get_config(struct vdpa_device *vdev,
unsigned int offset, void *buf,
unsigned int len)
{
const struct vdpa_config_ops *ops = vdev->config;
/*
* Config accesses aren't supposed to trigger before features are set.
* If it does happen we assume a legacy guest.
*/
if (!vdev->features_valid)
vdpa_set_features(vdev, 0);
ops->get_config(vdev, offset, buf, len);
}
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len);
void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
const void *buf, unsigned int length);
/**
* struct vdpa_mgmtdev_ops - vdpa device ops
* @dev_add: Add a vdpa device using alloc and register
* @mdev: parent device to use for device addition
* @name: name of the new vdpa device
* @config: config attributes to apply to the device under creation
* Driver need to add a new device using _vdpa_register_device()
* after fully initializing the vdpa device. Driver must return 0
* on success or appropriate error code.
......@@ -412,14 +418,25 @@ static inline void vdpa_get_config(struct vdpa_device *vdev,
* _vdpa_unregister_device().
*/
struct vdpa_mgmtdev_ops {
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name);
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config);
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
};
/**
* struct vdpa_mgmt_dev - vdpa management device
* @device: Management parent device
* @ops: operations supported by management device
* @id_table: Pointer to device id table of supported ids
* @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
* management device support during dev_add callback
* @list: list entry
*/
struct vdpa_mgmt_dev {
struct device *device;
const struct vdpa_mgmtdev_ops *ops;
const struct virtio_device_id *id_table; /* supported ids */
const struct virtio_device_id *id_table;
u64 config_attr_mask;
struct list_head list;
};
......
......@@ -152,6 +152,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev);
* @feature_table_size: number of entries in the feature table array.
* @feature_table_legacy: same as feature_table but when working in legacy mode.
* @feature_table_size_legacy: number of entries in feature table legacy array.
* @suppress_used_validation: set to not have core validate used length
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @scan: optional function to call after successful probe; intended
* for virtio-scsi to invoke a scan.
......@@ -168,6 +169,7 @@ struct virtio_driver {
unsigned int feature_table_size;
const unsigned int *feature_table_legacy;
unsigned int feature_table_size_legacy;
bool suppress_used_validation;
int (*validate)(struct virtio_device *dev);
int (*probe)(struct virtio_device *dev);
void (*scan)(struct virtio_device *dev);
......
......@@ -23,6 +23,8 @@ struct virtio_shm_region {
* any of @get/@set, @get_status/@set_status, or @get_features/
* @finalize_features are NOT safe to be called from an atomic
* context.
* @enable_cbs: enable the callbacks
* vdev: the virtio_device
* @get: read the value of a configuration field
* vdev: the virtio_device
* offset: the offset of the configuration field
......@@ -75,6 +77,7 @@ struct virtio_shm_region {
*/
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
void (*enable_cbs)(struct virtio_device *vdev);
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,
......@@ -229,6 +232,9 @@ void virtio_device_ready(struct virtio_device *dev)
{
unsigned status = dev->config->get_status(dev);
if (dev->config->enable_cbs)
dev->config->enable_cbs(dev);
BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
}
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_VIRTIO_PCI_LEGACY_H
#define _LINUX_VIRTIO_PCI_LEGACY_H
#include "linux/mod_devicetable.h"
#include <linux/pci.h>
#include <linux/virtio_pci.h>
struct virtio_pci_legacy_device {
struct pci_dev *pci_dev;
/* Where to read and clear interrupt */
u8 __iomem *isr;
/* The IO mapping for the PCI config space (legacy mode only) */
void __iomem *ioaddr;
struct virtio_device_id id;
};
u64 vp_legacy_get_features(struct virtio_pci_legacy_device *ldev);
u64 vp_legacy_get_driver_features(struct virtio_pci_legacy_device *ldev);
void vp_legacy_set_features(struct virtio_pci_legacy_device *ldev,
u32 features);
u8 vp_legacy_get_status(struct virtio_pci_legacy_device *ldev);
void vp_legacy_set_status(struct virtio_pci_legacy_device *ldev,
u8 status);
u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
u16 idx, u16 vector);
u16 vp_legacy_config_vector(struct virtio_pci_legacy_device *ldev,
u16 vector);
void vp_legacy_set_queue_address(struct virtio_pci_legacy_device *ldev,
u16 index, u32 queue_pfn);
bool vp_legacy_get_queue_enable(struct virtio_pci_legacy_device *ldev,
u16 idx);
void vp_legacy_set_queue_size(struct virtio_pci_legacy_device *ldev,
u16 idx, u16 size);
u16 vp_legacy_get_queue_size(struct virtio_pci_legacy_device *ldev,
u16 idx);
int vp_legacy_probe(struct virtio_pci_legacy_device *ldev);
void vp_legacy_remove(struct virtio_pci_legacy_device *ldev);
#endif
......@@ -17,6 +17,7 @@ enum vdpa_command {
VDPA_CMD_DEV_NEW,
VDPA_CMD_DEV_DEL,
VDPA_CMD_DEV_GET, /* can dump */
VDPA_CMD_DEV_CONFIG_GET, /* can dump */
};
enum vdpa_attr {
......@@ -32,6 +33,12 @@ enum vdpa_attr {
VDPA_ATTR_DEV_VENDOR_ID, /* u32 */
VDPA_ATTR_DEV_MAX_VQS, /* u32 */
VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */
VDPA_ATTR_DEV_MIN_VQ_SIZE, /* u16 */
VDPA_ATTR_DEV_NET_CFG_MACADDR, /* binary */
VDPA_ATTR_DEV_NET_STATUS, /* u8 */
VDPA_ATTR_DEV_NET_CFG_MAX_VQP, /* u16 */
VDPA_ATTR_DEV_NET_CFG_MTU, /* u16 */
/* new attributes must be added above here */
VDPA_ATTR_MAX,
......
......@@ -11,9 +11,15 @@
#include <linux/const.h>
#include <linux/types.h>
/* Virtio I2C Feature bits */
#define VIRTIO_I2C_F_ZERO_LENGTH_REQUEST 0
/* The bit 0 of the @virtio_i2c_out_hdr.@flags, used to group the requests */
#define VIRTIO_I2C_FLAGS_FAIL_NEXT _BITUL(0)
/* The bit 1 of the @virtio_i2c_out_hdr.@flags, used to mark a buffer as read */
#define VIRTIO_I2C_FLAGS_M_RD _BITUL(1)
/**
* struct virtio_i2c_out_hdr - the virtio I2C message OUT header
* @addr: the controlled device address
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment