Commit 5dfc1762 authored by Rusty Russell's avatar Rusty Russell

virtio: document functions better.

The old documentation is left over from when we used a structure with
strategy pointers.

And move the documentation to the C file as per kernel practice.
Though I disagree...
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 1e214a5c
...@@ -166,6 +166,23 @@ static int vring_add_indirect(struct vring_virtqueue *vq, ...@@ -166,6 +166,23 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head; return head;
} }
/**
* virtqueue_add_buf_gfp - expose buffer to other end
* @vq: the struct virtqueue we're talking about.
* @sg: the description of the buffer(s).
* @out_num: the number of sg readable by other side
* @in_num: the number of sg which are writable (after readable ones)
* @data: the token identifying the buffer.
* @gfp: how to do memory allocations (if necessary).
*
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
* Returns remaining capacity of queue or a negative error
* (ie. ENOSPC). Note that it only really makes sense to treat all
* positive return values as "available": indirect buffers mean that
* we can put an entire sg[] array inside a single queue entry.
*/
int virtqueue_add_buf_gfp(struct virtqueue *_vq, int virtqueue_add_buf_gfp(struct virtqueue *_vq,
struct scatterlist sg[], struct scatterlist sg[],
unsigned int out, unsigned int out,
...@@ -244,6 +261,16 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq, ...@@ -244,6 +261,16 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq,
} }
EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
/**
* virtqueue_kick - update after add_buf
* @vq: the struct virtqueue
*
* After one or more virtqueue_add_buf_gfp calls, invoke this to kick
* the other side.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
void virtqueue_kick(struct virtqueue *_vq) void virtqueue_kick(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
...@@ -300,6 +327,22 @@ static inline bool more_used(const struct vring_virtqueue *vq) ...@@ -300,6 +327,22 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->last_used_idx != vq->vring.used->idx; return vq->last_used_idx != vq->vring.used->idx;
} }
/**
* virtqueue_get_buf - get the next used buffer
* @vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
*
* If the driver wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
* beforehand to ensure there's no data leakage in the case of short
* writes.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*
* Returns NULL if there are no used buffers, or the "data" token
* handed to virtqueue_add_buf_gfp().
*/
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
...@@ -351,6 +394,15 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) ...@@ -351,6 +394,15 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
} }
EXPORT_SYMBOL_GPL(virtqueue_get_buf); EXPORT_SYMBOL_GPL(virtqueue_get_buf);
/**
* virtqueue_disable_cb - disable callbacks
* @vq: the struct virtqueue we're talking about.
*
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
*
* Unlike other operations, this need not be serialized.
*/
void virtqueue_disable_cb(struct virtqueue *_vq) void virtqueue_disable_cb(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
...@@ -359,6 +411,17 @@ void virtqueue_disable_cb(struct virtqueue *_vq) ...@@ -359,6 +411,17 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
} }
EXPORT_SYMBOL_GPL(virtqueue_disable_cb); EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/**
* virtqueue_enable_cb - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
bool virtqueue_enable_cb(struct virtqueue *_vq) bool virtqueue_enable_cb(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
...@@ -383,6 +446,19 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) ...@@ -383,6 +446,19 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
} }
EXPORT_SYMBOL_GPL(virtqueue_enable_cb); EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
/**
* virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks but hints to the other side to delay
* interrupts until most of the available buffers have been processed;
* it returns "false" if there are many pending buffers in the queue,
* to detect a possible race between the driver checking for more work,
* and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
...@@ -410,6 +486,14 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) ...@@ -410,6 +486,14 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
} }
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
/**
* virtqueue_detach_unused_buf - detach first unused buffer
* @vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_buf_gfp().
* This is not valid on an active queue; it is useful only for device
* shutdown.
*/
void *virtqueue_detach_unused_buf(struct virtqueue *_vq) void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
...@@ -538,7 +622,13 @@ void vring_transport_features(struct virtio_device *vdev) ...@@ -538,7 +622,13 @@ void vring_transport_features(struct virtio_device *vdev)
} }
EXPORT_SYMBOL_GPL(vring_transport_features); EXPORT_SYMBOL_GPL(vring_transport_features);
/* return the size of the vring within the virtqueue */ /**
* virtqueue_get_vring_size - return the size of the virtqueue's vring
* @vq: the struct virtqueue containing the vring of interest.
*
* Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized.
*/
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
{ {
......
...@@ -25,53 +25,6 @@ struct virtqueue { ...@@ -25,53 +25,6 @@ struct virtqueue {
void *priv; void *priv;
}; };
/**
* operations for virtqueue
* virtqueue_add_buf: expose buffer to other end
* vq: the struct virtqueue we're talking about.
* sg: the description of the buffer(s).
* out_num: the number of sg readable by other side
* in_num: the number of sg which are writable (after readable ones)
* data: the token identifying the buffer.
* gfp: how to do memory allocations (if necessary).
* Returns remaining capacity of queue (sg segments) or a negative error.
* virtqueue_kick: update after add_buf
* vq: the struct virtqueue
* After one or more add_buf calls, invoke this to kick the other side.
* virtqueue_get_buf: get the next used buffer
* vq: the struct virtqueue we're talking about.
* len: the length written into the buffer
* Returns NULL or the "data" token handed to add_buf.
* virtqueue_disable_cb: disable callbacks
* vq: the struct virtqueue we're talking about.
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
* virtqueue_enable_cb: restart callbacks after disable_cb.
* vq: the struct virtqueue we're talking about.
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
* virtqueue_enable_cb_delayed: restart callbacks after disable_cb.
* vq: the struct virtqueue we're talking about.
* This re-enables callbacks but hints to the other side to delay
* interrupts until most of the available buffers have been processed;
* it returns "false" if there are many pending buffers in the queue,
* to detect a possible race between the driver checking for more work,
* and enabling callbacks.
* virtqueue_detach_unused_buf: detach first unused buffer
* vq: the struct virtqueue we're talking about.
* Returns NULL or the "data" token handed to add_buf
* virtqueue_get_vring_size: return the size of the virtqueue's vring
* vq: the struct virtqueue containing the vring of interest.
* Returns the size of the vring.
*
* Locking rules are straightforward: the driver is responsible for
* locking. No two operations may be invoked simultaneously, with the exception
* of virtqueue_disable_cb.
*
* All operations can be called in any context.
*/
int virtqueue_add_buf_gfp(struct virtqueue *vq, int virtqueue_add_buf_gfp(struct virtqueue *vq,
struct scatterlist sg[], struct scatterlist sg[],
unsigned int out_num, unsigned int out_num,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment