Commit 138fd251 authored by Tiwei Bie's avatar Tiwei Bie Committed by David S. Miller

virtio_ring: add _split suffix for split ring functions

Add _split suffix for split ring specific functions. This
is a preparation for introducing the packed ring support.
There is no functional change.
Signed-off-by: default avatarTiwei Bie <tiwei.bie@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 89a9157e
...@@ -200,8 +200,8 @@ static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, ...@@ -200,8 +200,8 @@ static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
cpu_addr, size, direction); cpu_addr, size, direction);
} }
static void vring_unmap_one(const struct vring_virtqueue *vq, static void vring_unmap_one_split(const struct vring_virtqueue *vq,
struct vring_desc *desc) struct vring_desc *desc)
{ {
u16 flags; u16 flags;
...@@ -234,8 +234,9 @@ static int vring_mapping_error(const struct vring_virtqueue *vq, ...@@ -234,8 +234,9 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
return dma_mapping_error(vring_dma_dev(vq), addr); return dma_mapping_error(vring_dma_dev(vq), addr);
} }
static struct vring_desc *alloc_indirect(struct virtqueue *_vq, static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
unsigned int total_sg, gfp_t gfp) unsigned int total_sg,
gfp_t gfp)
{ {
struct vring_desc *desc; struct vring_desc *desc;
unsigned int i; unsigned int i;
...@@ -256,14 +257,14 @@ static struct vring_desc *alloc_indirect(struct virtqueue *_vq, ...@@ -256,14 +257,14 @@ static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
return desc; return desc;
} }
static inline int virtqueue_add(struct virtqueue *_vq, static inline int virtqueue_add_split(struct virtqueue *_vq,
struct scatterlist *sgs[], struct scatterlist *sgs[],
unsigned int total_sg, unsigned int total_sg,
unsigned int out_sgs, unsigned int out_sgs,
unsigned int in_sgs, unsigned int in_sgs,
void *data, void *data,
void *ctx, void *ctx,
gfp_t gfp) gfp_t gfp)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
struct scatterlist *sg; struct scatterlist *sg;
...@@ -302,7 +303,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, ...@@ -302,7 +303,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/* If the host supports indirect descriptor tables, and we have multiple /* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */ * buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && total_sg > 1 && vq->vq.num_free) if (vq->indirect && total_sg > 1 && vq->vq.num_free)
desc = alloc_indirect(_vq, total_sg, gfp); desc = alloc_indirect_split(_vq, total_sg, gfp);
else { else {
desc = NULL; desc = NULL;
WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect); WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
...@@ -423,7 +424,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, ...@@ -423,7 +424,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
for (n = 0; n < total_sg; n++) { for (n = 0; n < total_sg; n++) {
if (i == err_idx) if (i == err_idx)
break; break;
vring_unmap_one(vq, &desc[i]); vring_unmap_one_split(vq, &desc[i]);
i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
} }
...@@ -434,6 +435,19 @@ static inline int virtqueue_add(struct virtqueue *_vq, ...@@ -434,6 +435,19 @@ static inline int virtqueue_add(struct virtqueue *_vq,
return -EIO; return -EIO;
} }
static inline int virtqueue_add(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
void *ctx,
gfp_t gfp)
{
return virtqueue_add_split(_vq, sgs, total_sg,
out_sgs, in_sgs, data, ctx, gfp);
}
/** /**
* virtqueue_add_sgs - expose buffers to other end * virtqueue_add_sgs - expose buffers to other end
* @vq: the struct virtqueue we're talking about. * @vq: the struct virtqueue we're talking about.
...@@ -536,18 +550,7 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq, ...@@ -536,18 +550,7 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
} }
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/** static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @vq: the struct virtqueue
*
* Instead of virtqueue_kick(), you can do:
* if (virtqueue_kick_prepare(vq))
* virtqueue_notify(vq);
*
* This is sometimes useful because the virtqueue_kick_prepare() needs
* to be serialized, but the actual virtqueue_notify() call does not.
*/
bool virtqueue_kick_prepare(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old; u16 new, old;
...@@ -579,6 +582,22 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) ...@@ -579,6 +582,22 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
END_USE(vq); END_USE(vq);
return needs_kick; return needs_kick;
} }
/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @vq: the struct virtqueue
*
* Instead of virtqueue_kick(), you can do:
* if (virtqueue_kick_prepare(vq))
* virtqueue_notify(vq);
*
* This is sometimes useful because the virtqueue_kick_prepare() needs
* to be serialized, but the actual virtqueue_notify() call does not.
*/
bool virtqueue_kick_prepare(struct virtqueue *_vq)
{
return virtqueue_kick_prepare_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
/** /**
...@@ -625,8 +644,8 @@ bool virtqueue_kick(struct virtqueue *vq) ...@@ -625,8 +644,8 @@ bool virtqueue_kick(struct virtqueue *vq)
} }
EXPORT_SYMBOL_GPL(virtqueue_kick); EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head, static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
void **ctx) void **ctx)
{ {
unsigned int i, j; unsigned int i, j;
__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
...@@ -638,12 +657,12 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head, ...@@ -638,12 +657,12 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
i = head; i = head;
while (vq->vring.desc[i].flags & nextflag) { while (vq->vring.desc[i].flags & nextflag) {
vring_unmap_one(vq, &vq->vring.desc[i]); vring_unmap_one_split(vq, &vq->vring.desc[i]);
i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
vq->vq.num_free++; vq->vq.num_free++;
} }
vring_unmap_one(vq, &vq->vring.desc[i]); vring_unmap_one_split(vq, &vq->vring.desc[i]);
vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
vq->free_head = head; vq->free_head = head;
...@@ -665,7 +684,7 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head, ...@@ -665,7 +684,7 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
BUG_ON(len == 0 || len % sizeof(struct vring_desc)); BUG_ON(len == 0 || len % sizeof(struct vring_desc));
for (j = 0; j < len / sizeof(struct vring_desc); j++) for (j = 0; j < len / sizeof(struct vring_desc); j++)
vring_unmap_one(vq, &indir_desc[j]); vring_unmap_one_split(vq, &indir_desc[j]);
kfree(indir_desc); kfree(indir_desc);
vq->desc_state[head].indir_desc = NULL; vq->desc_state[head].indir_desc = NULL;
...@@ -674,29 +693,14 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head, ...@@ -674,29 +693,14 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
} }
} }
static inline bool more_used(const struct vring_virtqueue *vq) static inline bool more_used_split(const struct vring_virtqueue *vq)
{ {
return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
} }
/** static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
* virtqueue_get_buf - get the next used buffer unsigned int *len,
* @vq: the struct virtqueue we're talking about. void **ctx)
* @len: the length written into the buffer
*
* If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
* beforehand to ensure there's no data leakage in the case of short
* writes.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*
* Returns NULL if there are no used buffers, or the "data" token
* handed to virtqueue_add_*().
*/
void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
void **ctx)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
void *ret; void *ret;
...@@ -710,7 +714,7 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, ...@@ -710,7 +714,7 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
return NULL; return NULL;
} }
if (!more_used(vq)) { if (!more_used_split(vq)) {
pr_debug("No more buffers in queue\n"); pr_debug("No more buffers in queue\n");
END_USE(vq); END_USE(vq);
return NULL; return NULL;
...@@ -732,9 +736,9 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, ...@@ -732,9 +736,9 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
return NULL; return NULL;
} }
/* detach_buf clears data, so grab it now. */ /* detach_buf_split clears data, so grab it now. */
ret = vq->desc_state[i].data; ret = vq->desc_state[i].data;
detach_buf(vq, i, ctx); detach_buf_split(vq, i, ctx);
vq->last_used_idx++; vq->last_used_idx++;
/* If we expect an interrupt for the next entry, tell host /* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before * by writing event index and flush out the write before
...@@ -751,6 +755,28 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, ...@@ -751,6 +755,28 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
END_USE(vq); END_USE(vq);
return ret; return ret;
} }
/**
* virtqueue_get_buf - get the next used buffer
* @vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
*
* If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
* beforehand to ensure there's no data leakage in the case of short
* writes.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*
* Returns NULL if there are no used buffers, or the "data" token
* handed to virtqueue_add_*().
*/
void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
void **ctx)
{
return virtqueue_get_buf_ctx_split(_vq, len, ctx);
}
EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
...@@ -758,16 +784,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) ...@@ -758,16 +784,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
return virtqueue_get_buf_ctx(_vq, len, NULL); return virtqueue_get_buf_ctx(_vq, len, NULL);
} }
EXPORT_SYMBOL_GPL(virtqueue_get_buf); EXPORT_SYMBOL_GPL(virtqueue_get_buf);
/**
* virtqueue_disable_cb - disable callbacks static void virtqueue_disable_cb_split(struct virtqueue *_vq)
* @vq: the struct virtqueue we're talking about.
*
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
*
* Unlike other operations, this need not be serialized.
*/
void virtqueue_disable_cb(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
...@@ -776,23 +794,24 @@ void virtqueue_disable_cb(struct virtqueue *_vq) ...@@ -776,23 +794,24 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
if (!vq->event) if (!vq->event)
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
} }
} }
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/** /**
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb * virtqueue_disable_cb - disable callbacks
* @vq: the struct virtqueue we're talking about. * @vq: the struct virtqueue we're talking about.
* *
* This re-enables callbacks; it returns current queue state * Note that this is not necessarily synchronous, hence unreliable and only
* in an opaque unsigned value. This value should be later tested by * useful as an optimization.
* virtqueue_poll, to detect a possible race between the driver checking for
* more work, and enabling callbacks.
* *
* Caller must ensure we don't call this with other virtqueue * Unlike other operations, this need not be serialized.
* operations at the same time (except where noted).
*/ */
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) void virtqueue_disable_cb(struct virtqueue *_vq)
{
virtqueue_disable_cb_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used_idx; u16 last_used_idx;
...@@ -813,8 +832,33 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) ...@@ -813,8 +832,33 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
END_USE(vq); END_USE(vq);
return last_used_idx; return last_used_idx;
} }
/**
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns current queue state
* in an opaque unsigned value. This value should be later tested by
* virtqueue_poll, to detect a possible race between the driver checking for
* more work, and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
return virtqueue_enable_cb_prepare_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
vq->vring.used->idx);
}
/** /**
* virtqueue_poll - query pending used buffers * virtqueue_poll - query pending used buffers
* @vq: the struct virtqueue we're talking about. * @vq: the struct virtqueue we're talking about.
...@@ -829,7 +873,7 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) ...@@ -829,7 +873,7 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
virtio_mb(vq->weak_barriers); virtio_mb(vq->weak_barriers);
return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); return virtqueue_poll_split(_vq, last_used_idx);
} }
EXPORT_SYMBOL_GPL(virtqueue_poll); EXPORT_SYMBOL_GPL(virtqueue_poll);
...@@ -851,20 +895,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) ...@@ -851,20 +895,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
} }
EXPORT_SYMBOL_GPL(virtqueue_enable_cb); EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
/** static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
* virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks but hints to the other side to delay
* interrupts until most of the available buffers have been processed;
* it returns "false" if there are many pending buffers in the queue,
* to detect a possible race between the driver checking for more work,
* and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
u16 bufs; u16 bufs;
...@@ -896,17 +927,27 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) ...@@ -896,17 +927,27 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
END_USE(vq); END_USE(vq);
return true; return true;
} }
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
/** /**
* virtqueue_detach_unused_buf - detach first unused buffer * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about. * @vq: the struct virtqueue we're talking about.
* *
* Returns NULL or the "data" token handed to virtqueue_add_*(). * This re-enables callbacks but hints to the other side to delay
* This is not valid on an active queue; it is useful only for device * interrupts until most of the available buffers have been processed;
* shutdown. * it returns "false" if there are many pending buffers in the queue,
* to detect a possible race between the driver checking for more work,
* and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/ */
void *virtqueue_detach_unused_buf(struct virtqueue *_vq) bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
return virtqueue_enable_cb_delayed_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i; unsigned int i;
...@@ -917,9 +958,9 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) ...@@ -917,9 +958,9 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
for (i = 0; i < vq->vring.num; i++) { for (i = 0; i < vq->vring.num; i++) {
if (!vq->desc_state[i].data) if (!vq->desc_state[i].data)
continue; continue;
/* detach_buf clears data, so grab it now. */ /* detach_buf_split clears data, so grab it now. */
buf = vq->desc_state[i].data; buf = vq->desc_state[i].data;
detach_buf(vq, i, NULL); detach_buf_split(vq, i, NULL);
vq->avail_idx_shadow--; vq->avail_idx_shadow--;
vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
END_USE(vq); END_USE(vq);
...@@ -931,8 +972,26 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) ...@@ -931,8 +972,26 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
END_USE(vq); END_USE(vq);
return NULL; return NULL;
} }
/**
* virtqueue_detach_unused_buf - detach first unused buffer
* @vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
* This is not valid on an active queue; it is useful only for device
* shutdown.
*/
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
return virtqueue_detach_unused_buf_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
static inline bool more_used(const struct vring_virtqueue *vq)
{
return more_used_split(vq);
}
irqreturn_t vring_interrupt(int irq, void *_vq) irqreturn_t vring_interrupt(int irq, void *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment