Commit 70575e77 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes from Michael Tsirkin:
 "Some last minute fixes.

  The virtio-blk one is the most important one since it was actually
  seen in the field, but the rest of them are small and clearly safe,
  everything here has been in next for a while"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vdpa/mlx5: Fix MQ to support non power of two num queues
  vduse: prevent uninitialized memory accesses
  virtio-blk: Fix WARN_ON_ONCE in virtio_queue_rq()
  virtio_test: fixup for vq reset
  virtio-crypto: fix memory-leak
  vdpa/ifcvf: fix the calculation of queuepair
parents 7bc6e90d a43ae805
...@@ -322,14 +322,14 @@ static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, ...@@ -322,14 +322,14 @@ static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(status)) if (unlikely(status))
return status; return status;
blk_mq_start_request(req);
vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr); vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
if (unlikely(vbr->sg_table.nents < 0)) { if (unlikely(vbr->sg_table.nents < 0)) {
virtblk_cleanup_cmd(req); virtblk_cleanup_cmd(req);
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
} }
blk_mq_start_request(req);
return BLK_STS_OK; return BLK_STS_OK;
} }
...@@ -391,8 +391,7 @@ static bool virtblk_prep_rq_batch(struct request *req) ...@@ -391,8 +391,7 @@ static bool virtblk_prep_rq_batch(struct request *req)
} }
static bool virtblk_add_req_batch(struct virtio_blk_vq *vq, static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
struct request **rqlist, struct request **rqlist)
struct request **requeue_list)
{ {
unsigned long flags; unsigned long flags;
int err; int err;
...@@ -408,7 +407,7 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq, ...@@ -408,7 +407,7 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
if (err) { if (err) {
virtblk_unmap_data(req, vbr); virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req); virtblk_cleanup_cmd(req);
rq_list_add(requeue_list, req); blk_mq_requeue_request(req, true);
} }
} }
...@@ -436,7 +435,7 @@ static void virtio_queue_rqs(struct request **rqlist) ...@@ -436,7 +435,7 @@ static void virtio_queue_rqs(struct request **rqlist)
if (!next || req->mq_hctx != next->mq_hctx) { if (!next || req->mq_hctx != next->mq_hctx) {
req->rq_next = NULL; req->rq_next = NULL;
kick = virtblk_add_req_batch(vq, rqlist, &requeue_list); kick = virtblk_add_req_batch(vq, rqlist);
if (kick) if (kick)
virtqueue_notify(vq->vq); virtqueue_notify(vq->vq);
......
...@@ -56,6 +56,10 @@ static void virtio_crypto_akcipher_finalize_req( ...@@ -56,6 +56,10 @@ static void virtio_crypto_akcipher_finalize_req(
struct virtio_crypto_akcipher_request *vc_akcipher_req, struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, int err) struct akcipher_request *req, int err)
{ {
kfree(vc_akcipher_req->src_buf);
kfree(vc_akcipher_req->dst_buf);
vc_akcipher_req->src_buf = NULL;
vc_akcipher_req->dst_buf = NULL;
virtcrypto_clear_request(&vc_akcipher_req->base); virtcrypto_clear_request(&vc_akcipher_req->base);
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err); crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
......
...@@ -323,7 +323,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) ...@@ -323,7 +323,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
u32 q_pair_id; u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
q_pair_id = qid / hw->nr_vring; q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
last_avail_idx = vp_ioread16(avail_idx_addr); last_avail_idx = vp_ioread16(avail_idx_addr);
...@@ -337,7 +337,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) ...@@ -337,7 +337,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
u32 q_pair_id; u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
q_pair_id = qid / hw->nr_vring; q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
hw->vring[qid].last_avail_idx = num; hw->vring[qid].last_avail_idx = num;
vp_iowrite16(num, avail_idx_addr); vp_iowrite16(num, avail_idx_addr);
......
...@@ -1320,6 +1320,8 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue * ...@@ -1320,6 +1320,8 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev) static int create_rqt(struct mlx5_vdpa_net *ndev)
{ {
int rqt_table_size = roundup_pow_of_two(ndev->rqt_size);
int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2);
__be32 *list; __be32 *list;
void *rqtc; void *rqtc;
int inlen; int inlen;
...@@ -1327,7 +1329,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) ...@@ -1327,7 +1329,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
int i, j; int i, j;
int err; int err;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num); inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + rqt_table_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL); in = kzalloc(inlen, GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
...@@ -1336,12 +1338,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) ...@@ -1336,12 +1338,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q); MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size); MLX5_SET(rqtc, rqtc, rqt_max_size, rqt_table_size);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]); list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2) for (i = 0, j = 0; i < act_sz; i++, j += 2)
list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id); list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size); MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in); kfree(in);
if (err) if (err)
...@@ -1354,6 +1356,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) ...@@ -1354,6 +1356,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
{ {
int act_sz = roundup_pow_of_two(num / 2);
__be32 *list; __be32 *list;
void *rqtc; void *rqtc;
int inlen; int inlen;
...@@ -1361,7 +1364,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) ...@@ -1361,7 +1364,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
int i, j; int i, j;
int err; int err;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num); inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + act_sz * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL); in = kzalloc(inlen, GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
...@@ -1372,10 +1375,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) ...@@ -1372,10 +1375,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q); MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]); list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2) for (i = 0, j = 0; i < act_sz; i++, j = j + 2)
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size); MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in); kfree(in);
if (err) if (err)
......
...@@ -673,10 +673,15 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset, ...@@ -673,10 +673,15 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
{ {
struct vduse_dev *dev = vdpa_to_vduse(vdpa); struct vduse_dev *dev = vdpa_to_vduse(vdpa);
if (offset > dev->config_size || /* Initialize the buffer in case of partial copy. */
len > dev->config_size - offset) memset(buf, 0, len);
if (offset > dev->config_size)
return; return;
if (len > dev->config_size - offset)
len = dev->config_size - offset;
memcpy(buf, dev->config + offset, len); memcpy(buf, dev->config + offset, len);
} }
......
...@@ -14,6 +14,7 @@ struct virtio_device { ...@@ -14,6 +14,7 @@ struct virtio_device {
u64 features; u64 features;
struct list_head vqs; struct list_head vqs;
spinlock_t vqs_list_lock; spinlock_t vqs_list_lock;
const struct virtio_config_ops *config;
}; };
struct virtqueue { struct virtqueue {
...@@ -23,7 +24,9 @@ struct virtqueue { ...@@ -23,7 +24,9 @@ struct virtqueue {
struct virtio_device *vdev; struct virtio_device *vdev;
unsigned int index; unsigned int index;
unsigned int num_free; unsigned int num_free;
unsigned int num_max;
void *priv; void *priv;
bool reset;
}; };
/* Interfaces exported by virtio_ring. */ /* Interfaces exported by virtio_ring. */
......
...@@ -3,6 +3,11 @@ ...@@ -3,6 +3,11 @@
#include <linux/virtio.h> #include <linux/virtio.h>
#include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_config.h>
struct virtio_config_ops {
int (*disable_vq_and_reset)(struct virtqueue *vq);
int (*enable_vq_after_reset)(struct virtqueue *vq);
};
/* /*
* __virtio_test_bit - helper to test feature bits. For use by transports. * __virtio_test_bit - helper to test feature bits. For use by transports.
* Devices should normally use virtio_has_feature, * Devices should normally use virtio_has_feature,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment