Commit 3ab2e420 authored by Asias He's avatar Asias He Committed by Michael S. Tsirkin

vhost: Allow device specific fields per vq

This is useful for any device who wants device specific fields per vq.
For example, tcm_vhost wants a per vq field to track requests which are
in flight on the vq. Also, on top of this we can add patches to move
things like ubufs from vhost.h out to net.c.
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarAsias He <asias@redhat.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent bc756235
...@@ -64,9 +64,13 @@ enum { ...@@ -64,9 +64,13 @@ enum {
VHOST_NET_VQ_MAX = 2, VHOST_NET_VQ_MAX = 2,
}; };
struct vhost_net_virtqueue {
struct vhost_virtqueue vq;
};
struct vhost_net { struct vhost_net {
struct vhost_dev dev; struct vhost_dev dev;
struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX]; struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
struct vhost_poll poll[VHOST_NET_VQ_MAX]; struct vhost_poll poll[VHOST_NET_VQ_MAX];
/* Number of TX recently submitted. /* Number of TX recently submitted.
* Protected by tx vq lock. */ * Protected by tx vq lock. */
...@@ -198,7 +202,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) ...@@ -198,7 +202,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
* read-size critical section for our kind of RCU. */ * read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net) static void handle_tx(struct vhost_net *net)
{ {
struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_TX].vq;
unsigned out, in, s; unsigned out, in, s;
int head; int head;
struct msghdr msg = { struct msghdr msg = {
...@@ -417,7 +421,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, ...@@ -417,7 +421,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
* read-size critical section for our kind of RCU. */ * read-size critical section for our kind of RCU. */
static void handle_rx(struct vhost_net *net) static void handle_rx(struct vhost_net *net)
{ {
struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_RX].vq;
unsigned uninitialized_var(in), log; unsigned uninitialized_var(in), log;
struct vhost_log *vq_log; struct vhost_log *vq_log;
struct msghdr msg = { struct msghdr msg = {
...@@ -559,17 +563,26 @@ static int vhost_net_open(struct inode *inode, struct file *f) ...@@ -559,17 +563,26 @@ static int vhost_net_open(struct inode *inode, struct file *f)
{ {
struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev; struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
int r; int r;
if (!n) if (!n)
return -ENOMEM; return -ENOMEM;
vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kfree(n);
return -ENOMEM;
}
dev = &n->dev; dev = &n->dev;
n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
if (r < 0) { if (r < 0) {
kfree(n); kfree(n);
kfree(vqs);
return r; return r;
} }
...@@ -584,7 +597,9 @@ static int vhost_net_open(struct inode *inode, struct file *f) ...@@ -584,7 +597,9 @@ static int vhost_net_open(struct inode *inode, struct file *f)
static void vhost_net_disable_vq(struct vhost_net *n, static void vhost_net_disable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq) struct vhost_virtqueue *vq)
{ {
struct vhost_poll *poll = n->poll + (vq - n->vqs); struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
if (!vq->private_data) if (!vq->private_data)
return; return;
vhost_poll_stop(poll); vhost_poll_stop(poll);
...@@ -593,7 +608,9 @@ static void vhost_net_disable_vq(struct vhost_net *n, ...@@ -593,7 +608,9 @@ static void vhost_net_disable_vq(struct vhost_net *n,
static int vhost_net_enable_vq(struct vhost_net *n, static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq) struct vhost_virtqueue *vq)
{ {
struct vhost_poll *poll = n->poll + (vq - n->vqs); struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock; struct socket *sock;
sock = rcu_dereference_protected(vq->private_data, sock = rcu_dereference_protected(vq->private_data,
...@@ -621,30 +638,30 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, ...@@ -621,30 +638,30 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
struct socket **rx_sock) struct socket **rx_sock)
{ {
*tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX); *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
*rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX); *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
} }
static void vhost_net_flush_vq(struct vhost_net *n, int index) static void vhost_net_flush_vq(struct vhost_net *n, int index)
{ {
vhost_poll_flush(n->poll + index); vhost_poll_flush(n->poll + index);
vhost_poll_flush(&n->dev.vqs[index].poll); vhost_poll_flush(&n->vqs[index].vq.poll);
} }
static void vhost_net_flush(struct vhost_net *n) static void vhost_net_flush(struct vhost_net *n)
{ {
vhost_net_flush_vq(n, VHOST_NET_VQ_TX); vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
vhost_net_flush_vq(n, VHOST_NET_VQ_RX); vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) { if (n->vqs[VHOST_NET_VQ_TX].vq.ubufs) {
mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = true; n->tx_flush = true;
mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
/* Wait for all lower device DMAs done. */ /* Wait for all lower device DMAs done. */
vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs); vhost_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].vq.ubufs);
mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false; n->tx_flush = false;
kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref); kref_init(&n->vqs[VHOST_NET_VQ_TX].vq.ubufs->kref);
mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
} }
} }
...@@ -665,6 +682,7 @@ static int vhost_net_release(struct inode *inode, struct file *f) ...@@ -665,6 +682,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
/* We do an extra flush before freeing memory, /* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */ * since jobs can re-queue themselves. */
vhost_net_flush(n); vhost_net_flush(n);
kfree(n->dev.vqs);
kfree(n); kfree(n);
return 0; return 0;
} }
...@@ -750,7 +768,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) ...@@ -750,7 +768,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
r = -ENOBUFS; r = -ENOBUFS;
goto err; goto err;
} }
vq = n->vqs + index; vq = &n->vqs[index].vq;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
/* Verify that ring has been setup correctly. */ /* Verify that ring has been setup correctly. */
...@@ -870,10 +888,10 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) ...@@ -870,10 +888,10 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
n->dev.acked_features = features; n->dev.acked_features = features;
smp_wmb(); smp_wmb();
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].mutex); mutex_lock(&n->vqs[i].vq.mutex);
n->vqs[i].vhost_hlen = vhost_hlen; n->vqs[i].vq.vhost_hlen = vhost_hlen;
n->vqs[i].sock_hlen = sock_hlen; n->vqs[i].vq.sock_hlen = sock_hlen;
mutex_unlock(&n->vqs[i].mutex); mutex_unlock(&n->vqs[i].vq.mutex);
} }
vhost_net_flush(n); vhost_net_flush(n);
mutex_unlock(&n->dev.mutex); mutex_unlock(&n->dev.mutex);
......
...@@ -74,13 +74,17 @@ enum { ...@@ -74,13 +74,17 @@ enum {
#define VHOST_SCSI_MAX_VQ 128 #define VHOST_SCSI_MAX_VQ 128
#define VHOST_SCSI_MAX_EVENT 128 #define VHOST_SCSI_MAX_EVENT 128
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
};
struct vhost_scsi { struct vhost_scsi {
/* Protected by vhost_scsi->dev.mutex */ /* Protected by vhost_scsi->dev.mutex */
struct tcm_vhost_tpg **vs_tpg; struct tcm_vhost_tpg **vs_tpg;
char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
struct vhost_dev dev; struct vhost_dev dev;
struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
struct vhost_work vs_completion_work; /* cmd completion work item */ struct vhost_work vs_completion_work; /* cmd completion work item */
struct llist_head vs_completion_list; /* cmd completion queue */ struct llist_head vs_completion_list; /* cmd completion queue */
...@@ -366,7 +370,7 @@ static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) ...@@ -366,7 +370,7 @@ static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs, static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
u32 event, u32 reason) u32 event, u32 reason)
{ {
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct tcm_vhost_evt *evt; struct tcm_vhost_evt *evt;
if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
...@@ -409,7 +413,7 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) ...@@ -409,7 +413,7 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
static void tcm_vhost_do_evt_work(struct vhost_scsi *vs, static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
struct tcm_vhost_evt *evt) struct tcm_vhost_evt *evt)
{ {
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct virtio_scsi_event *event = &evt->event; struct virtio_scsi_event *event = &evt->event;
struct virtio_scsi_event __user *eventp; struct virtio_scsi_event __user *eventp;
unsigned out, in; unsigned out, in;
...@@ -460,7 +464,7 @@ static void tcm_vhost_evt_work(struct vhost_work *work) ...@@ -460,7 +464,7 @@ static void tcm_vhost_evt_work(struct vhost_work *work)
{ {
struct vhost_scsi *vs = container_of(work, struct vhost_scsi, struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_event_work); vs_event_work);
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct tcm_vhost_evt *evt; struct tcm_vhost_evt *evt;
struct llist_node *llnode; struct llist_node *llnode;
...@@ -511,8 +515,10 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) ...@@ -511,8 +515,10 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
v_rsp.sense_len); v_rsp.sense_len);
ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
if (likely(ret == 0)) { if (likely(ret == 0)) {
struct vhost_scsi_virtqueue *q;
vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0); vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
vq = tv_cmd->tvc_vq - vs->vqs; q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
vq = q - vs->vqs;
__set_bit(vq, signal); __set_bit(vq, signal);
} else } else
pr_err("Faulted on virtio_scsi_cmd_resp\n"); pr_err("Faulted on virtio_scsi_cmd_resp\n");
...@@ -523,7 +529,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) ...@@ -523,7 +529,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vq = -1; vq = -1;
while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
< VHOST_SCSI_MAX_VQ) < VHOST_SCSI_MAX_VQ)
vhost_signal(&vs->dev, &vs->vqs[vq]); vhost_signal(&vs->dev, &vs->vqs[vq].vq);
} }
static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
...@@ -938,7 +944,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work) ...@@ -938,7 +944,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
{ {
vhost_poll_flush(&vs->dev.vqs[index].poll); vhost_poll_flush(&vs->vqs[index].vq.poll);
} }
static void vhost_scsi_flush(struct vhost_scsi *vs) static void vhost_scsi_flush(struct vhost_scsi *vs)
...@@ -975,7 +981,7 @@ static int vhost_scsi_set_endpoint( ...@@ -975,7 +981,7 @@ static int vhost_scsi_set_endpoint(
/* Verify that ring has been setup correctly. */ /* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) { for (index = 0; index < vs->dev.nvqs; ++index) {
/* Verify that ring has been setup correctly. */ /* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(&vs->vqs[index])) { if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
...@@ -1022,7 +1028,7 @@ static int vhost_scsi_set_endpoint( ...@@ -1022,7 +1028,7 @@ static int vhost_scsi_set_endpoint(
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn)); sizeof(vs->vs_vhost_wwpn));
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i]; vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */ /* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, vs_tpg); rcu_assign_pointer(vq->private_data, vs_tpg);
...@@ -1063,7 +1069,7 @@ static int vhost_scsi_clear_endpoint( ...@@ -1063,7 +1069,7 @@ static int vhost_scsi_clear_endpoint(
mutex_lock(&vs->dev.mutex); mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */ /* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) { for (index = 0; index < vs->dev.nvqs; ++index) {
if (!vhost_vq_access_ok(&vs->vqs[index])) { if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT; ret = -EFAULT;
goto err_dev; goto err_dev;
} }
...@@ -1103,7 +1109,7 @@ static int vhost_scsi_clear_endpoint( ...@@ -1103,7 +1109,7 @@ static int vhost_scsi_clear_endpoint(
} }
if (match) { if (match) {
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i]; vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */ /* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, NULL); rcu_assign_pointer(vq->private_data, NULL);
...@@ -1151,24 +1157,36 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) ...@@ -1151,24 +1157,36 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
static int vhost_scsi_open(struct inode *inode, struct file *f) static int vhost_scsi_open(struct inode *inode, struct file *f)
{ {
struct vhost_scsi *s; struct vhost_scsi *s;
struct vhost_virtqueue **vqs;
int r, i; int r, i;
s = kzalloc(sizeof(*s), GFP_KERNEL); s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) if (!s)
return -ENOMEM; return -ENOMEM;
vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kfree(s);
return -ENOMEM;
}
vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work); vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
s->vs_events_nr = 0; s->vs_events_nr = 0;
s->vs_events_missed = false; s->vs_events_missed = false;
s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
s->vqs[i].handle_kick = vhost_scsi_handle_kick; s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ); for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
vqs[i] = &s->vqs[i].vq;
s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
if (r < 0) { if (r < 0) {
kfree(vqs);
kfree(s); kfree(s);
return r; return r;
} }
...@@ -1190,6 +1208,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) ...@@ -1190,6 +1208,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_dev_cleanup(&s->dev, false); vhost_dev_cleanup(&s->dev, false);
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
vhost_scsi_flush(s); vhost_scsi_flush(s);
kfree(s->dev.vqs);
kfree(s); kfree(s);
return 0; return 0;
} }
...@@ -1205,7 +1224,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, ...@@ -1205,7 +1224,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
u32 events_missed; u32 events_missed;
u64 features; u64 features;
int r, abi_version = VHOST_SCSI_ABI_VERSION; int r, abi_version = VHOST_SCSI_ABI_VERSION;
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
switch (ioctl) { switch (ioctl) {
case VHOST_SCSI_SET_ENDPOINT: case VHOST_SCSI_SET_ENDPOINT:
...@@ -1333,7 +1352,7 @@ static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, ...@@ -1333,7 +1352,7 @@ static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
else else
reason = VIRTIO_SCSI_EVT_RESET_REMOVED; reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
tcm_vhost_send_evt(vs, tpg, lun, tcm_vhost_send_evt(vs, tpg, lun,
VIRTIO_SCSI_T_TRANSPORT_RESET, reason); VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
......
...@@ -269,27 +269,27 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) ...@@ -269,27 +269,27 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
bool zcopy; bool zcopy;
for (i = 0; i < dev->nvqs; ++i) { for (i = 0; i < dev->nvqs; ++i) {
dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect * dev->vqs[i]->indirect = kmalloc(sizeof *dev->vqs[i]->indirect *
UIO_MAXIOV, GFP_KERNEL); UIO_MAXIOV, GFP_KERNEL);
dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV, dev->vqs[i]->log = kmalloc(sizeof *dev->vqs[i]->log * UIO_MAXIOV,
GFP_KERNEL); GFP_KERNEL);
dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads * dev->vqs[i]->heads = kmalloc(sizeof *dev->vqs[i]->heads *
UIO_MAXIOV, GFP_KERNEL); UIO_MAXIOV, GFP_KERNEL);
zcopy = vhost_zcopy_mask & (0x1 << i); zcopy = vhost_zcopy_mask & (0x1 << i);
if (zcopy) if (zcopy)
dev->vqs[i].ubuf_info = dev->vqs[i]->ubuf_info =
kmalloc(sizeof *dev->vqs[i].ubuf_info * kmalloc(sizeof *dev->vqs[i]->ubuf_info *
UIO_MAXIOV, GFP_KERNEL); UIO_MAXIOV, GFP_KERNEL);
if (!dev->vqs[i].indirect || !dev->vqs[i].log || if (!dev->vqs[i]->indirect || !dev->vqs[i]->log ||
!dev->vqs[i].heads || !dev->vqs[i]->heads ||
(zcopy && !dev->vqs[i].ubuf_info)) (zcopy && !dev->vqs[i]->ubuf_info))
goto err_nomem; goto err_nomem;
} }
return 0; return 0;
err_nomem: err_nomem:
for (; i >= 0; --i) for (; i >= 0; --i)
vhost_vq_free_iovecs(&dev->vqs[i]); vhost_vq_free_iovecs(dev->vqs[i]);
return -ENOMEM; return -ENOMEM;
} }
...@@ -298,11 +298,11 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev) ...@@ -298,11 +298,11 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
int i; int i;
for (i = 0; i < dev->nvqs; ++i) for (i = 0; i < dev->nvqs; ++i)
vhost_vq_free_iovecs(&dev->vqs[i]); vhost_vq_free_iovecs(dev->vqs[i]);
} }
long vhost_dev_init(struct vhost_dev *dev, long vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue *vqs, int nvqs) struct vhost_virtqueue **vqs, int nvqs)
{ {
int i; int i;
...@@ -318,16 +318,16 @@ long vhost_dev_init(struct vhost_dev *dev, ...@@ -318,16 +318,16 @@ long vhost_dev_init(struct vhost_dev *dev,
dev->worker = NULL; dev->worker = NULL;
for (i = 0; i < dev->nvqs; ++i) { for (i = 0; i < dev->nvqs; ++i) {
dev->vqs[i].log = NULL; dev->vqs[i]->log = NULL;
dev->vqs[i].indirect = NULL; dev->vqs[i]->indirect = NULL;
dev->vqs[i].heads = NULL; dev->vqs[i]->heads = NULL;
dev->vqs[i].ubuf_info = NULL; dev->vqs[i]->ubuf_info = NULL;
dev->vqs[i].dev = dev; dev->vqs[i]->dev = dev;
mutex_init(&dev->vqs[i].mutex); mutex_init(&dev->vqs[i]->mutex);
vhost_vq_reset(dev, dev->vqs + i); vhost_vq_reset(dev, dev->vqs[i]);
if (dev->vqs[i].handle_kick) if (dev->vqs[i]->handle_kick)
vhost_poll_init(&dev->vqs[i].poll, vhost_poll_init(&dev->vqs[i]->poll,
dev->vqs[i].handle_kick, POLLIN, dev); dev->vqs[i]->handle_kick, POLLIN, dev);
} }
return 0; return 0;
...@@ -430,9 +430,9 @@ void vhost_dev_stop(struct vhost_dev *dev) ...@@ -430,9 +430,9 @@ void vhost_dev_stop(struct vhost_dev *dev)
int i; int i;
for (i = 0; i < dev->nvqs; ++i) { for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i].kick && dev->vqs[i].handle_kick) { if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
vhost_poll_stop(&dev->vqs[i].poll); vhost_poll_stop(&dev->vqs[i]->poll);
vhost_poll_flush(&dev->vqs[i].poll); vhost_poll_flush(&dev->vqs[i]->poll);
} }
} }
} }
...@@ -443,17 +443,17 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) ...@@ -443,17 +443,17 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
int i; int i;
for (i = 0; i < dev->nvqs; ++i) { for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i].error_ctx) if (dev->vqs[i]->error_ctx)
eventfd_ctx_put(dev->vqs[i].error_ctx); eventfd_ctx_put(dev->vqs[i]->error_ctx);
if (dev->vqs[i].error) if (dev->vqs[i]->error)
fput(dev->vqs[i].error); fput(dev->vqs[i]->error);
if (dev->vqs[i].kick) if (dev->vqs[i]->kick)
fput(dev->vqs[i].kick); fput(dev->vqs[i]->kick);
if (dev->vqs[i].call_ctx) if (dev->vqs[i]->call_ctx)
eventfd_ctx_put(dev->vqs[i].call_ctx); eventfd_ctx_put(dev->vqs[i]->call_ctx);
if (dev->vqs[i].call) if (dev->vqs[i]->call)
fput(dev->vqs[i].call); fput(dev->vqs[i]->call);
vhost_vq_reset(dev, dev->vqs + i); vhost_vq_reset(dev, dev->vqs[i]);
} }
vhost_dev_free_iovecs(dev); vhost_dev_free_iovecs(dev);
if (dev->log_ctx) if (dev->log_ctx)
...@@ -524,14 +524,14 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, ...@@ -524,14 +524,14 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
for (i = 0; i < d->nvqs; ++i) { for (i = 0; i < d->nvqs; ++i) {
int ok; int ok;
mutex_lock(&d->vqs[i].mutex); mutex_lock(&d->vqs[i]->mutex);
/* If ring is inactive, will check when it's enabled. */ /* If ring is inactive, will check when it's enabled. */
if (d->vqs[i].private_data) if (d->vqs[i]->private_data)
ok = vq_memory_access_ok(d->vqs[i].log_base, mem, ok = vq_memory_access_ok(d->vqs[i]->log_base, mem,
log_all); log_all);
else else
ok = 1; ok = 1;
mutex_unlock(&d->vqs[i].mutex); mutex_unlock(&d->vqs[i]->mutex);
if (!ok) if (!ok)
return 0; return 0;
} }
...@@ -641,7 +641,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -641,7 +641,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
if (idx >= d->nvqs) if (idx >= d->nvqs)
return -ENOBUFS; return -ENOBUFS;
vq = d->vqs + idx; vq = d->vqs[idx];
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
...@@ -852,7 +852,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) ...@@ -852,7 +852,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
for (i = 0; i < d->nvqs; ++i) { for (i = 0; i < d->nvqs; ++i) {
struct vhost_virtqueue *vq; struct vhost_virtqueue *vq;
void __user *base = (void __user *)(unsigned long)p; void __user *base = (void __user *)(unsigned long)p;
vq = d->vqs + i; vq = d->vqs[i];
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */ /* If ring is inactive, will check when it's enabled. */
if (vq->private_data && !vq_log_access_ok(d, vq, base)) if (vq->private_data && !vq_log_access_ok(d, vq, base))
...@@ -879,9 +879,9 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) ...@@ -879,9 +879,9 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
} else } else
filep = eventfp; filep = eventfp;
for (i = 0; i < d->nvqs; ++i) { for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i].mutex); mutex_lock(&d->vqs[i]->mutex);
d->vqs[i].log_ctx = d->log_ctx; d->vqs[i]->log_ctx = d->log_ctx;
mutex_unlock(&d->vqs[i].mutex); mutex_unlock(&d->vqs[i]->mutex);
} }
if (ctx) if (ctx)
eventfd_ctx_put(ctx); eventfd_ctx_put(ctx);
......
...@@ -150,7 +150,7 @@ struct vhost_dev { ...@@ -150,7 +150,7 @@ struct vhost_dev {
struct mm_struct *mm; struct mm_struct *mm;
struct mutex mutex; struct mutex mutex;
unsigned acked_features; unsigned acked_features;
struct vhost_virtqueue *vqs; struct vhost_virtqueue **vqs;
int nvqs; int nvqs;
struct file *log_file; struct file *log_file;
struct eventfd_ctx *log_ctx; struct eventfd_ctx *log_ctx;
...@@ -159,7 +159,7 @@ struct vhost_dev { ...@@ -159,7 +159,7 @@ struct vhost_dev {
struct task_struct *worker; struct task_struct *worker;
}; };
long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs); long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
long vhost_dev_check_owner(struct vhost_dev *); long vhost_dev_check_owner(struct vhost_dev *);
long vhost_dev_reset_owner(struct vhost_dev *); long vhost_dev_reset_owner(struct vhost_dev *);
void vhost_dev_cleanup(struct vhost_dev *, bool locked); void vhost_dev_cleanup(struct vhost_dev *, bool locked);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment