Commit 41c03ba9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:
 "Mostly fixes all over the place, a couple of cleanups"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (32 commits)
  virtio_blk: Fix signedness bug in virtblk_prep_rq()
  vdpa_sim_net: should not drop the multicast/broadcast packet
  vdpasim: fix memory leak when freeing IOTLBs
  vdpa: conditionally fill max max queue pair for stats
  vdpa/vp_vdpa: fix kfree a wrong pointer in vp_vdpa_remove
  vduse: Validate vq_num in vduse_validate_config()
  tools/virtio: remove smp_read_barrier_depends()
  tools/virtio: remove stray characters
  vhost_vdpa: fix the crash in unmap a large memory
  virtio: Implementing attribute show with sysfs_emit
  virtio-crypto: fix memory leak in virtio_crypto_alg_skcipher_close_session()
  tools/virtio: Variable type completion
  vdpa_sim: fix vringh initialization in vdpasim_queue_ready()
  virtio_blk: use UINT_MAX instead of -1U
  vhost-vdpa: fix an iotlb memory leak
  vhost: fix range used in translate_desc()
  vringh: fix range used in iotlb_translate()
  vhost/vsock: Fix error handling in vhost_vsock_init()
  vdpa_sim: fix possible memory leak in vdpasim_net_init() and vdpasim_blk_init()
  tools: Delete the unneeded semicolon after curly braces
  ...
parents 512dee0c a26116c1
...@@ -315,22 +315,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) ...@@ -315,22 +315,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
virtqueue_notify(vq->vq); virtqueue_notify(vq->vq);
} }
static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
{
virtblk_cleanup_cmd(req);
switch (rc) {
case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
case -ENOMEM:
return BLK_STS_RESOURCE;
default:
return BLK_STS_IOERR;
}
}
static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
struct virtio_blk *vblk, struct virtio_blk *vblk,
struct request *req, struct request *req,
struct virtblk_req *vbr) struct virtblk_req *vbr)
{ {
blk_status_t status; blk_status_t status;
int num;
status = virtblk_setup_cmd(vblk->vdev, req, vbr); status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status)) if (unlikely(status))
return status; return status;
vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr); num = virtblk_map_data(hctx, req, vbr);
if (unlikely(vbr->sg_table.nents < 0)) { if (unlikely(num < 0))
virtblk_cleanup_cmd(req); return virtblk_fail_to_queue(req, -ENOMEM);
return BLK_STS_RESOURCE; vbr->sg_table.nents = num;
}
blk_mq_start_request(req); blk_mq_start_request(req);
...@@ -364,15 +377,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -364,15 +377,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_stop_hw_queue(hctx); blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
virtblk_unmap_data(req, vbr); virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req); return virtblk_fail_to_queue(req, err);
switch (err) {
case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
case -ENOMEM:
return BLK_STS_RESOURCE;
default:
return BLK_STS_IOERR;
}
} }
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
...@@ -991,7 +996,7 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -991,7 +996,7 @@ static int virtblk_probe(struct virtio_device *vdev)
blk_queue_max_segments(q, sg_elems); blk_queue_max_segments(q, sg_elems);
/* No real sector limit. */ /* No real sector limit. */
blk_queue_max_hw_sectors(q, -1U); blk_queue_max_hw_sectors(q, UINT_MAX);
max_size = virtio_max_dma_size(vdev); max_size = virtio_max_dma_size(vdev);
......
...@@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session( ...@@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session(
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n", pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
ctrl_status->status, destroy_session->session_id); ctrl_status->status, destroy_session->session_id);
return -EINVAL; err = -EINVAL;
goto out;
} }
err = 0; err = 0;
......
...@@ -116,8 +116,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in, ...@@ -116,8 +116,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int inlen); int inlen);
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey); int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map); bool *change_map, unsigned int asid);
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb); int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
unsigned int asid);
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev); void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
#define mlx5_vdpa_warn(__dev, format, ...) \ #define mlx5_vdpa_warn(__dev, format, ...) \
......
...@@ -311,7 +311,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 ...@@ -311,7 +311,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8
u64 st; u64 st;
u64 sz; u64 sz;
int err; int err;
int i = 0;
st = start; st = start;
while (size) { while (size) {
...@@ -336,7 +335,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 ...@@ -336,7 +335,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8
mr->num_directs++; mr->num_directs++;
mr->num_klms++; mr->num_klms++;
st += sz; st += sz;
i++;
} }
list_splice_tail(&tmp, &mr->head); list_splice_tail(&tmp, &mr->head);
return 0; return 0;
...@@ -511,7 +509,8 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) ...@@ -511,7 +509,8 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
mutex_unlock(&mr->mkey_mtx); mutex_unlock(&mr->mkey_mtx);
} }
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb, unsigned int asid)
{ {
struct mlx5_vdpa_mr *mr = &mvdev->mr; struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err; int err;
...@@ -519,42 +518,49 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb ...@@ -519,42 +518,49 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
if (mr->initialized) if (mr->initialized)
return 0; return 0;
if (iotlb) if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
err = create_user_mr(mvdev, iotlb); if (iotlb)
else err = create_user_mr(mvdev, iotlb);
err = create_dma_mr(mvdev, mr); else
err = create_dma_mr(mvdev, mr);
if (err) if (err)
return err; return err;
}
err = dup_iotlb(mvdev, iotlb); if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
if (err) err = dup_iotlb(mvdev, iotlb);
goto out_err; if (err)
goto out_err;
}
mr->initialized = true; mr->initialized = true;
return 0; return 0;
out_err: out_err:
if (iotlb) if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
destroy_user_mr(mvdev, mr); if (iotlb)
else destroy_user_mr(mvdev, mr);
destroy_dma_mr(mvdev, mr); else
destroy_dma_mr(mvdev, mr);
}
return err; return err;
} }
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
unsigned int asid)
{ {
int err; int err;
mutex_lock(&mvdev->mr.mkey_mtx); mutex_lock(&mvdev->mr.mkey_mtx);
err = _mlx5_vdpa_create_mr(mvdev, iotlb); err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
mutex_unlock(&mvdev->mr.mkey_mtx); mutex_unlock(&mvdev->mr.mkey_mtx);
return err; return err;
} }
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map) bool *change_map, unsigned int asid)
{ {
struct mlx5_vdpa_mr *mr = &mvdev->mr; struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err = 0; int err = 0;
...@@ -566,7 +572,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io ...@@ -566,7 +572,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
*change_map = true; *change_map = true;
} }
if (!*change_map) if (!*change_map)
err = _mlx5_vdpa_create_mr(mvdev, iotlb); err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
mutex_unlock(&mr->mkey_mtx); mutex_unlock(&mr->mkey_mtx);
return err; return err;
......
...@@ -1468,11 +1468,13 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac, ...@@ -1468,11 +1468,13 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16); dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
eth_broadcast_addr(dmac_c); eth_broadcast_addr(dmac_c);
ether_addr_copy(dmac_v, mac); ether_addr_copy(dmac_v, mac);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
}
if (tagged) { if (tagged) {
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, vid);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid);
} }
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
...@@ -1684,7 +1686,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) ...@@ -1684,7 +1686,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
/* Need recreate the flow table entry, so that the packet could forward back /* Need recreate the flow table entry, so that the packet could forward back
*/ */
mac_vlan_del(ndev, ndev->config.mac, 0, false); mac_vlan_del(ndev, mac_back, 0, false);
if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) { if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n"); mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
...@@ -1821,6 +1823,9 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd) ...@@ -1821,6 +1823,9 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
size_t read; size_t read;
u16 id; u16 id;
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)))
return status;
switch (cmd) { switch (cmd) {
case VIRTIO_NET_CTRL_VLAN_ADD: case VIRTIO_NET_CTRL_VLAN_ADD:
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
...@@ -2389,7 +2394,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev) ...@@ -2389,7 +2394,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
} }
} }
static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb, unsigned int asid)
{ {
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err; int err;
...@@ -2401,7 +2407,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb ...@@ -2401,7 +2407,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
teardown_driver(ndev); teardown_driver(ndev);
mlx5_vdpa_destroy_mr(mvdev); mlx5_vdpa_destroy_mr(mvdev);
err = mlx5_vdpa_create_mr(mvdev, iotlb); err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
if (err) if (err)
goto err_mr; goto err_mr;
...@@ -2582,7 +2588,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) ...@@ -2582,7 +2588,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
++mvdev->generation; ++mvdev->generation;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_mr(mvdev, NULL)) if (mlx5_vdpa_create_mr(mvdev, NULL, 0))
mlx5_vdpa_warn(mvdev, "create MR failed\n"); mlx5_vdpa_warn(mvdev, "create MR failed\n");
} }
up_write(&ndev->reslock); up_write(&ndev->reslock);
...@@ -2618,41 +2624,20 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev) ...@@ -2618,41 +2624,20 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
return mvdev->generation; return mvdev->generation;
} }
static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
{ unsigned int asid)
u64 start = 0ULL, last = 0ULL - 1;
struct vhost_iotlb_map *map;
int err = 0;
spin_lock(&mvdev->cvq.iommu_lock);
vhost_iotlb_reset(mvdev->cvq.iotlb);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
map->last, map->addr, map->perm);
if (err)
goto out;
}
out:
spin_unlock(&mvdev->cvq.iommu_lock);
return err;
}
static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{ {
bool change_map; bool change_map;
int err; int err;
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
if (err) { if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
return err; return err;
} }
if (change_map) if (change_map)
err = mlx5_vdpa_change_map(mvdev, iotlb); err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
return err; return err;
} }
...@@ -2665,16 +2650,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid, ...@@ -2665,16 +2650,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
int err = -EINVAL; int err = -EINVAL;
down_write(&ndev->reslock); down_write(&ndev->reslock);
if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { err = set_map_data(mvdev, iotlb, asid);
err = set_map_data(mvdev, iotlb);
if (err)
goto out;
}
if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
err = set_map_control(mvdev, iotlb);
out:
up_write(&ndev->reslock); up_write(&ndev->reslock);
return err; return err;
} }
...@@ -2840,8 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) ...@@ -2840,8 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
int i; int i;
down_write(&ndev->reslock); down_write(&ndev->reslock);
mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
ndev->nb_registered = false; ndev->nb_registered = false;
mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
flush_workqueue(ndev->mvdev.wq); flush_workqueue(ndev->mvdev.wq);
for (i = 0; i < ndev->cur_num_vqs; i++) { for (i = 0; i < ndev->cur_num_vqs; i++) {
mvq = &ndev->vqs[i]; mvq = &ndev->vqs[i];
...@@ -3019,7 +2995,7 @@ static void update_carrier(struct work_struct *work) ...@@ -3019,7 +2995,7 @@ static void update_carrier(struct work_struct *work)
else else
ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
if (ndev->config_cb.callback) if (ndev->nb_registered && ndev->config_cb.callback)
ndev->config_cb.callback(ndev->config_cb.private); ndev->config_cb.callback(ndev->config_cb.private);
kfree(wqent); kfree(wqent);
...@@ -3036,21 +3012,13 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p ...@@ -3036,21 +3012,13 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
switch (eqe->sub_type) { switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN: case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
down_read(&ndev->reslock);
if (!ndev->nb_registered) {
up_read(&ndev->reslock);
return NOTIFY_DONE;
}
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
if (!wqent) { if (!wqent)
up_read(&ndev->reslock);
return NOTIFY_DONE; return NOTIFY_DONE;
}
wqent->mvdev = &ndev->mvdev; wqent->mvdev = &ndev->mvdev;
INIT_WORK(&wqent->work, update_carrier); INIT_WORK(&wqent->work, update_carrier);
queue_work(ndev->mvdev.wq, &wqent->work); queue_work(ndev->mvdev.wq, &wqent->work);
up_read(&ndev->reslock);
ret = NOTIFY_OK; ret = NOTIFY_OK;
break; break;
default: default:
...@@ -3185,7 +3153,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ...@@ -3185,7 +3153,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
goto err_mpfs; goto err_mpfs;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_mr(mvdev, NULL); err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
if (err) if (err)
goto err_res; goto err_res;
} }
...@@ -3237,8 +3205,8 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * ...@@ -3237,8 +3205,8 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct workqueue_struct *wq; struct workqueue_struct *wq;
if (ndev->nb_registered) { if (ndev->nb_registered) {
mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
ndev->nb_registered = false; ndev->nb_registered = false;
mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
} }
wq = mvdev->wq; wq = mvdev->wq;
mvdev->wq = NULL; mvdev->wq = NULL;
......
...@@ -855,7 +855,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms ...@@ -855,7 +855,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
features_device = vdev->config->get_device_features(vdev); features_device = vdev->config->get_device_features(vdev);
if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device, if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
VDPA_ATTR_PAD)) VDPA_ATTR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
...@@ -935,7 +935,6 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg, ...@@ -935,7 +935,6 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
{ {
struct virtio_net_config config = {}; struct virtio_net_config config = {};
u64 features; u64 features;
u16 max_vqp;
u8 status; u8 status;
int err; int err;
...@@ -946,15 +945,15 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg, ...@@ -946,15 +945,15 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
} }
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config)); vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
return -EMSGSIZE;
features = vdev->config->get_driver_features(vdev); features = vdev->config->get_driver_features(vdev);
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
features, VDPA_ATTR_PAD)) features, VDPA_ATTR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
err = vdpa_dev_net_mq_config_fill(msg, features, &config);
if (err)
return err;
if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index)) if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
return -EMSGSIZE; return -EMSGSIZE;
......
...@@ -67,8 +67,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) ...@@ -67,8 +67,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
{ {
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false,
VDPASIM_QUEUE_MAX, false,
(struct vring_desc *)(uintptr_t)vq->desc_addr, (struct vring_desc *)(uintptr_t)vq->desc_addr,
(struct vring_avail *) (struct vring_avail *)
(uintptr_t)vq->driver_addr, (uintptr_t)vq->driver_addr,
...@@ -690,7 +689,9 @@ static void vdpasim_free(struct vdpa_device *vdpa) ...@@ -690,7 +689,9 @@ static void vdpasim_free(struct vdpa_device *vdpa)
} }
kvfree(vdpasim->buffer); kvfree(vdpasim->buffer);
vhost_iotlb_free(vdpasim->iommu); for (i = 0; i < vdpasim->dev_attr.nas; i++)
vhost_iotlb_reset(&vdpasim->iommu[i]);
kfree(vdpasim->iommu);
kfree(vdpasim->vqs); kfree(vdpasim->vqs);
kfree(vdpasim->config); kfree(vdpasim->config);
} }
......
...@@ -427,8 +427,10 @@ static int __init vdpasim_blk_init(void) ...@@ -427,8 +427,10 @@ static int __init vdpasim_blk_init(void)
int ret; int ret;
ret = device_register(&vdpasim_blk_mgmtdev); ret = device_register(&vdpasim_blk_mgmtdev);
if (ret) if (ret) {
put_device(&vdpasim_blk_mgmtdev);
return ret; return ret;
}
ret = vdpa_mgmtdev_register(&mgmt_dev); ret = vdpa_mgmtdev_register(&mgmt_dev);
if (ret) if (ret)
......
...@@ -62,6 +62,9 @@ static bool receive_filter(struct vdpasim *vdpasim, size_t len) ...@@ -62,6 +62,9 @@ static bool receive_filter(struct vdpasim *vdpasim, size_t len)
if (len < ETH_ALEN + hdr_len) if (len < ETH_ALEN + hdr_len)
return false; return false;
if (is_broadcast_ether_addr(vdpasim->buffer + hdr_len) ||
is_multicast_ether_addr(vdpasim->buffer + hdr_len))
return true;
if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN)) if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN))
return true; return true;
...@@ -305,8 +308,10 @@ static int __init vdpasim_net_init(void) ...@@ -305,8 +308,10 @@ static int __init vdpasim_net_init(void)
int ret; int ret;
ret = device_register(&vdpasim_net_mgmtdev); ret = device_register(&vdpasim_net_mgmtdev);
if (ret) if (ret) {
put_device(&vdpasim_net_mgmtdev);
return ret; return ret;
}
ret = vdpa_mgmtdev_register(&mgmt_dev); ret = vdpa_mgmtdev_register(&mgmt_dev);
if (ret) if (ret)
......
...@@ -1440,6 +1440,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config) ...@@ -1440,6 +1440,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
if (config->config_size > PAGE_SIZE) if (config->config_size > PAGE_SIZE)
return false; return false;
if (config->vq_num > 0xffff)
return false;
if (!device_is_allowed(config->device_id)) if (!device_is_allowed(config->device_id))
return false; return false;
......
...@@ -647,7 +647,7 @@ static void vp_vdpa_remove(struct pci_dev *pdev) ...@@ -647,7 +647,7 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
mdev = vp_vdpa_mgtdev->mdev; mdev = vp_vdpa_mgtdev->mdev;
vp_modern_remove(mdev); vp_modern_remove(mdev);
vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev); vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
kfree(&vp_vdpa_mgtdev->mgtdev.id_table); kfree(vp_vdpa_mgtdev->mgtdev.id_table);
kfree(mdev); kfree(mdev);
kfree(vp_vdpa_mgtdev); kfree(vp_vdpa_mgtdev);
} }
......
...@@ -65,6 +65,10 @@ static DEFINE_IDA(vhost_vdpa_ida); ...@@ -65,6 +65,10 @@ static DEFINE_IDA(vhost_vdpa_ida);
static dev_t vhost_vdpa_major; static dev_t vhost_vdpa_major;
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb, u64 start,
u64 last, u32 asid);
static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb) static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
{ {
struct vhost_vdpa_as *as = container_of(iotlb, struct struct vhost_vdpa_as *as = container_of(iotlb, struct
...@@ -135,7 +139,7 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid) ...@@ -135,7 +139,7 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
return -EINVAL; return -EINVAL;
hlist_del(&as->hash_link); hlist_del(&as->hash_link);
vhost_iotlb_reset(&as->iotlb); vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
kfree(as); kfree(as);
return 0; return 0;
...@@ -683,10 +687,20 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, ...@@ -683,10 +687,20 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
mutex_unlock(&d->mutex); mutex_unlock(&d->mutex);
return r; return r;
} }
static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
struct vhost_iotlb_map *map, u32 asid)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (ops->dma_map) {
ops->dma_unmap(vdpa, asid, map->start, map->size);
} else if (ops->set_map == NULL) {
iommu_unmap(v->domain, map->start, map->size);
}
}
static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
struct vhost_iotlb *iotlb, u64 start, u64 last, u32 asid)
u64 start, u64 last)
{ {
struct vhost_dev *dev = &v->vdev; struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb_map *map; struct vhost_iotlb_map *map;
...@@ -703,13 +717,13 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, ...@@ -703,13 +717,13 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
unpin_user_page(page); unpin_user_page(page);
} }
atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm); atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
vhost_vdpa_general_unmap(v, map, asid);
vhost_iotlb_map_free(iotlb, map); vhost_iotlb_map_free(iotlb, map);
} }
} }
static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
struct vhost_iotlb *iotlb, u64 start, u64 last, u32 asid)
u64 start, u64 last)
{ {
struct vhost_iotlb_map *map; struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file; struct vdpa_map_file *map_file;
...@@ -718,20 +732,21 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, ...@@ -718,20 +732,21 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
map_file = (struct vdpa_map_file *)map->opaque; map_file = (struct vdpa_map_file *)map->opaque;
fput(map_file->file); fput(map_file->file);
kfree(map_file); kfree(map_file);
vhost_vdpa_general_unmap(v, map, asid);
vhost_iotlb_map_free(iotlb, map); vhost_iotlb_map_free(iotlb, map);
} }
} }
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb, struct vhost_iotlb *iotlb, u64 start,
u64 start, u64 last) u64 last, u32 asid)
{ {
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
if (vdpa->use_va) if (vdpa->use_va)
return vhost_vdpa_va_unmap(v, iotlb, start, last); return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
return vhost_vdpa_pa_unmap(v, iotlb, start, last); return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
} }
static int perm_to_iommu_flags(u32 perm) static int perm_to_iommu_flags(u32 perm)
...@@ -798,17 +813,12 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, ...@@ -798,17 +813,12 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
const struct vdpa_config_ops *ops = vdpa->config; const struct vdpa_config_ops *ops = vdpa->config;
u32 asid = iotlb_to_asid(iotlb); u32 asid = iotlb_to_asid(iotlb);
vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1); vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
if (ops->dma_map) { if (ops->set_map) {
ops->dma_unmap(vdpa, asid, iova, size);
} else if (ops->set_map) {
if (!v->in_batch) if (!v->in_batch)
ops->set_map(vdpa, asid, iotlb); ops->set_map(vdpa, asid, iotlb);
} else {
iommu_unmap(v->domain, iova, size);
} }
/* If we are in the middle of batch processing, delay the free /* If we are in the middle of batch processing, delay the free
* of AS until BATCH_END. * of AS until BATCH_END.
*/ */
...@@ -1162,14 +1172,14 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v) ...@@ -1162,14 +1172,14 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
struct vhost_vdpa_as *as; struct vhost_vdpa_as *as;
u32 asid; u32 asid;
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
for (asid = 0; asid < v->vdpa->nas; asid++) { for (asid = 0; asid < v->vdpa->nas; asid++) {
as = asid_to_as(v, asid); as = asid_to_as(v, asid);
if (as) if (as)
vhost_vdpa_remove_as(v, asid); vhost_vdpa_remove_as(v, asid);
} }
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
} }
static int vhost_vdpa_open(struct inode *inode, struct file *filep) static int vhost_vdpa_open(struct inode *inode, struct file *filep)
......
...@@ -2053,7 +2053,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, ...@@ -2053,7 +2053,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct vhost_dev *dev = vq->dev; struct vhost_dev *dev = vq->dev;
struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov; struct iovec *_iov;
u64 s = 0; u64 s = 0, last = addr + len - 1;
int ret = 0; int ret = 0;
while ((u64)len > s) { while ((u64)len > s) {
...@@ -2063,7 +2063,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, ...@@ -2063,7 +2063,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
break; break;
} }
map = vhost_iotlb_itree_first(umem, addr, addr + len - 1); map = vhost_iotlb_itree_first(umem, addr, last);
if (map == NULL || map->start > addr) { if (map == NULL || map->start > addr) {
if (umem != dev->iotlb) { if (umem != dev->iotlb) {
ret = -EFAULT; ret = -EFAULT;
......
...@@ -1102,7 +1102,7 @@ static int iotlb_translate(const struct vringh *vrh, ...@@ -1102,7 +1102,7 @@ static int iotlb_translate(const struct vringh *vrh,
struct vhost_iotlb_map *map; struct vhost_iotlb_map *map;
struct vhost_iotlb *iotlb = vrh->iotlb; struct vhost_iotlb *iotlb = vrh->iotlb;
int ret = 0; int ret = 0;
u64 s = 0; u64 s = 0, last = addr + len - 1;
spin_lock(vrh->iotlb_lock); spin_lock(vrh->iotlb_lock);
...@@ -1114,8 +1114,7 @@ static int iotlb_translate(const struct vringh *vrh, ...@@ -1114,8 +1114,7 @@ static int iotlb_translate(const struct vringh *vrh,
break; break;
} }
map = vhost_iotlb_itree_first(iotlb, addr, map = vhost_iotlb_itree_first(iotlb, addr, last);
addr + len - 1);
if (!map || map->start > addr) { if (!map || map->start > addr) {
ret = -EINVAL; ret = -EINVAL;
break; break;
......
...@@ -959,7 +959,14 @@ static int __init vhost_vsock_init(void) ...@@ -959,7 +959,14 @@ static int __init vhost_vsock_init(void)
VSOCK_TRANSPORT_F_H2G); VSOCK_TRANSPORT_F_H2G);
if (ret < 0) if (ret < 0)
return ret; return ret;
return misc_register(&vhost_vsock_misc);
ret = misc_register(&vhost_vsock_misc);
if (ret) {
vsock_core_unregister(&vhost_transport.transport);
return ret;
}
return 0;
}; };
static void __exit vhost_vsock_exit(void) static void __exit vhost_vsock_exit(void)
......
...@@ -15,7 +15,7 @@ static ssize_t device_show(struct device *_d, ...@@ -15,7 +15,7 @@ static ssize_t device_show(struct device *_d,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct virtio_device *dev = dev_to_virtio(_d); struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.device); return sysfs_emit(buf, "0x%04x\n", dev->id.device);
} }
static DEVICE_ATTR_RO(device); static DEVICE_ATTR_RO(device);
...@@ -23,7 +23,7 @@ static ssize_t vendor_show(struct device *_d, ...@@ -23,7 +23,7 @@ static ssize_t vendor_show(struct device *_d,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct virtio_device *dev = dev_to_virtio(_d); struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.vendor); return sysfs_emit(buf, "0x%04x\n", dev->id.vendor);
} }
static DEVICE_ATTR_RO(vendor); static DEVICE_ATTR_RO(vendor);
...@@ -31,7 +31,7 @@ static ssize_t status_show(struct device *_d, ...@@ -31,7 +31,7 @@ static ssize_t status_show(struct device *_d,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct virtio_device *dev = dev_to_virtio(_d); struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); return sysfs_emit(buf, "0x%08x\n", dev->config->get_status(dev));
} }
static DEVICE_ATTR_RO(status); static DEVICE_ATTR_RO(status);
...@@ -39,7 +39,7 @@ static ssize_t modalias_show(struct device *_d, ...@@ -39,7 +39,7 @@ static ssize_t modalias_show(struct device *_d,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct virtio_device *dev = dev_to_virtio(_d); struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "virtio:d%08Xv%08X\n", return sysfs_emit(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor); dev->id.device, dev->id.vendor);
} }
static DEVICE_ATTR_RO(modalias); static DEVICE_ATTR_RO(modalias);
...@@ -54,9 +54,9 @@ static ssize_t features_show(struct device *_d, ...@@ -54,9 +54,9 @@ static ssize_t features_show(struct device *_d,
/* We actually represent this as a bitstring, as it could be /* We actually represent this as a bitstring, as it could be
* arbitrary length in future. */ * arbitrary length in future. */
for (i = 0; i < sizeof(dev->features)*8; i++) for (i = 0; i < sizeof(dev->features)*8; i++)
len += sprintf(buf+len, "%c", len += sysfs_emit_at(buf, len, "%c",
__virtio_test_bit(dev, i) ? '1' : '0'); __virtio_test_bit(dev, i) ? '1' : '0');
len += sprintf(buf+len, "\n"); len += sysfs_emit_at(buf, len, "\n");
return len; return len;
} }
static DEVICE_ATTR_RO(features); static DEVICE_ATTR_RO(features);
......
...@@ -303,14 +303,14 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, ...@@ -303,14 +303,14 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
int err; int err;
if (index >= vp_modern_get_num_queues(mdev)) if (index >= vp_modern_get_num_queues(mdev))
return ERR_PTR(-ENOENT); return ERR_PTR(-EINVAL);
/* Check if queue is either not available or already active. */ /* Check if queue is either not available or already active. */
num = vp_modern_get_queue_size(mdev, index); num = vp_modern_get_queue_size(mdev, index);
if (!num || vp_modern_get_queue_enable(mdev, index)) if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
if (num & (num - 1)) { if (!is_power_of_2(num)) {
dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
......
...@@ -1052,7 +1052,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, ...@@ -1052,7 +1052,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
dma_addr_t dma_addr; dma_addr_t dma_addr;
/* We assume num is a power of 2. */ /* We assume num is a power of 2. */
if (num & (num - 1)) { if (!is_power_of_2(num)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
return -EINVAL; return -EINVAL;
} }
......
...@@ -53,11 +53,9 @@ enum vdpa_attr { ...@@ -53,11 +53,9 @@ enum vdpa_attr {
VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */ VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */
VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */ VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */
/* virtio features that are provisioned to the vDPA device */
VDPA_ATTR_DEV_FEATURES, /* u64 */ VDPA_ATTR_DEV_FEATURES, /* u64 */
/* virtio features that are supported by the vDPA device */
VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, /* u64 */
/* new attributes must be added above here */ /* new attributes must be added above here */
VDPA_ATTR_MAX, VDPA_ATTR_MAX,
}; };
......
...@@ -140,25 +140,19 @@ static inline void busy_wait(void) ...@@ -140,25 +140,19 @@ static inline void busy_wait(void)
#define smp_wmb() smp_release() #define smp_wmb() smp_release()
#endif #endif
#ifdef __alpha__
#define smp_read_barrier_depends() smp_acquire()
#else
#define smp_read_barrier_depends() do {} while(0)
#endif
static __always_inline static __always_inline
void __read_once_size(const volatile void *p, void *res, int size) void __read_once_size(const volatile void *p, void *res, int size)
{ {
switch (size) { \ switch (size) {
case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break; \ case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;
case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break; \ case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;
case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break; \ case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;
case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break; \ case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;
default: \ default:
barrier(); \ barrier();
__builtin_memcpy((void *)res, (const void *)p, size); \ __builtin_memcpy((void *)res, (const void *)p, size);
barrier(); \ barrier();
} \ }
} }
static __always_inline void __write_once_size(volatile void *p, void *res, int size) static __always_inline void __write_once_size(volatile void *p, void *res, int size)
...@@ -175,13 +169,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s ...@@ -175,13 +169,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
} }
} }
#ifdef __alpha__
#define READ_ONCE(x) \ #define READ_ONCE(x) \
({ \ ({ \
union { typeof(x) __val; char __c[1]; } __u; \ union { typeof(x) __val; char __c[1]; } __u; \
__read_once_size(&(x), __u.__c, sizeof(x)); \ __read_once_size(&(x), __u.__c, sizeof(x)); \
smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ smp_mb(); /* Enforce dependency ordering from x */ \
__u.__val; \
})
#else
#define READ_ONCE(x) \
({ \
union { typeof(x) __val; char __c[1]; } __u; \
__read_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \ __u.__val; \
}) })
#endif
#define WRITE_ONCE(x, val) \ #define WRITE_ONCE(x, val) \
({ \ ({ \
......
...@@ -75,7 +75,7 @@ static int wait_order(int ctl_fd) ...@@ -75,7 +75,7 @@ static int wait_order(int ctl_fd)
if (ret) if (ret)
break; break;
}; }
return ret; return ret;
......
...@@ -173,7 +173,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, ...@@ -173,7 +173,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
long started = 0, completed = 0, next_reset = reset_n; long started = 0, completed = 0, next_reset = reset_n;
long completed_before, started_before; long completed_before, started_before;
int r, test = 1; int r, test = 1;
unsigned len; unsigned int len;
long long spurious = 0; long long spurious = 0;
const bool random_batch = batch == RANDOM_BATCH; const bool random_batch = batch == RANDOM_BATCH;
......
...@@ -308,6 +308,7 @@ static int parallel_test(u64 features, ...@@ -308,6 +308,7 @@ static int parallel_test(u64 features,
gvdev.vdev.features = features; gvdev.vdev.features = features;
INIT_LIST_HEAD(&gvdev.vdev.vqs); INIT_LIST_HEAD(&gvdev.vdev.vqs);
spin_lock_init(&gvdev.vdev.vqs_list_lock);
gvdev.to_host_fd = to_host[1]; gvdev.to_host_fd = to_host[1];
gvdev.notifies = 0; gvdev.notifies = 0;
...@@ -455,6 +456,7 @@ int main(int argc, char *argv[]) ...@@ -455,6 +456,7 @@ int main(int argc, char *argv[])
getrange = getrange_iov; getrange = getrange_iov;
vdev.features = 0; vdev.features = 0;
INIT_LIST_HEAD(&vdev.vqs); INIT_LIST_HEAD(&vdev.vqs);
spin_lock_init(&vdev.vqs_list_lock);
while (argv[1]) { while (argv[1]) {
if (strcmp(argv[1], "--indirect") == 0) if (strcmp(argv[1], "--indirect") == 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment