Commit b2ffa407 authored by Mike Christie's avatar Mike Christie Committed by Michael S. Tsirkin

vhost: rename vhost_work_dev_flush

This patch renames vhost_work_dev_flush to just vhost_dev_flush to
relfect that it flushes everything on the device and that drivers
don't know/care that polls are based on vhost_works. Drivers just
flush the entire device and polls, and works for vhost-scsi
management TMFs and IO net virtqueues, etc all are flushed.
Signed-off-by: default avatarMike Christie <michael.christie@oracle.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Reviewed-by: default avatarStefano Garzarella <sgarzare@redhat.com>
Message-Id: <20220517180850.198915-9-michael.christie@oracle.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent f3a1aad9
...@@ -1376,7 +1376,7 @@ static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, ...@@ -1376,7 +1376,7 @@ static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
static void vhost_net_flush(struct vhost_net *n) static void vhost_net_flush(struct vhost_net *n)
{ {
vhost_work_dev_flush(&n->dev); vhost_dev_flush(&n->dev);
if (n->vqs[VHOST_NET_VQ_TX].ubufs) { if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = true; n->tx_flush = true;
...@@ -1565,7 +1565,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) ...@@ -1565,7 +1565,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
} }
if (oldsock) { if (oldsock) {
vhost_work_dev_flush(&n->dev); vhost_dev_flush(&n->dev);
sockfd_put(oldsock); sockfd_put(oldsock);
} }
......
...@@ -1436,7 +1436,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) ...@@ -1436,7 +1436,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight); kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */ /* Flush both the vhost poll and vhost work */
vhost_work_dev_flush(&vs->dev); vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */ /* Wait for all reqs issued before the flush to be finished */
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
......
...@@ -146,7 +146,7 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep) ...@@ -146,7 +146,7 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
static void vhost_test_flush(struct vhost_test *n) static void vhost_test_flush(struct vhost_test *n)
{ {
vhost_work_dev_flush(&n->dev); vhost_dev_flush(&n->dev);
} }
static int vhost_test_release(struct inode *inode, struct file *f) static int vhost_test_release(struct inode *inode, struct file *f)
......
...@@ -231,7 +231,7 @@ void vhost_poll_stop(struct vhost_poll *poll) ...@@ -231,7 +231,7 @@ void vhost_poll_stop(struct vhost_poll *poll)
} }
EXPORT_SYMBOL_GPL(vhost_poll_stop); EXPORT_SYMBOL_GPL(vhost_poll_stop);
void vhost_work_dev_flush(struct vhost_dev *dev) void vhost_dev_flush(struct vhost_dev *dev)
{ {
struct vhost_flush_struct flush; struct vhost_flush_struct flush;
...@@ -243,7 +243,7 @@ void vhost_work_dev_flush(struct vhost_dev *dev) ...@@ -243,7 +243,7 @@ void vhost_work_dev_flush(struct vhost_dev *dev)
wait_for_completion(&flush.wait_event); wait_for_completion(&flush.wait_event);
} }
} }
EXPORT_SYMBOL_GPL(vhost_work_dev_flush); EXPORT_SYMBOL_GPL(vhost_dev_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{ {
...@@ -530,7 +530,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev) ...@@ -530,7 +530,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
attach.owner = current; attach.owner = current;
vhost_work_init(&attach.work, vhost_attach_cgroups_work); vhost_work_init(&attach.work, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work); vhost_work_queue(dev, &attach.work);
vhost_work_dev_flush(dev); vhost_dev_flush(dev);
return attach.ret; return attach.ret;
} }
...@@ -657,7 +657,7 @@ void vhost_dev_stop(struct vhost_dev *dev) ...@@ -657,7 +657,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
vhost_poll_stop(&dev->vqs[i]->poll); vhost_poll_stop(&dev->vqs[i]->poll);
} }
vhost_work_dev_flush(dev); vhost_dev_flush(dev);
} }
EXPORT_SYMBOL_GPL(vhost_dev_stop); EXPORT_SYMBOL_GPL(vhost_dev_stop);
...@@ -1724,7 +1724,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg ...@@ -1724,7 +1724,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
if (pollstop && vq->handle_kick) if (pollstop && vq->handle_kick)
vhost_work_dev_flush(vq->poll.dev); vhost_dev_flush(vq->poll.dev);
return r; return r;
} }
EXPORT_SYMBOL_GPL(vhost_vring_ioctl); EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
......
...@@ -45,7 +45,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, ...@@ -45,7 +45,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
int vhost_poll_start(struct vhost_poll *poll, struct file *file); int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll); void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll); void vhost_poll_queue(struct vhost_poll *poll);
void vhost_work_dev_flush(struct vhost_dev *dev); void vhost_dev_flush(struct vhost_dev *dev);
struct vhost_log { struct vhost_log {
u64 addr; u64 addr;
......
...@@ -705,7 +705,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) ...@@ -705,7 +705,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
static void vhost_vsock_flush(struct vhost_vsock *vsock) static void vhost_vsock_flush(struct vhost_vsock *vsock)
{ {
vhost_work_dev_flush(&vsock->dev); vhost_dev_flush(&vsock->dev);
} }
static void vhost_vsock_reset_orphans(struct sock *sk) static void vhost_vsock_reset_orphans(struct sock *sk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment