Commit 1adbd6b2 authored by Feng Liu's avatar Feng Liu Committed by Michael S. Tsirkin

virtio_ring: Avoid using inline for small functions

According to kernel coding style [1], defining inline functions is not
necessary and beneficial for simple functions. Hence clean up the code
by removing the inline keyword.

It is verified with GCC 12.2.0, the generated code with/without inline
is same. Additionally tested with pktgen and iperf, and verified the
result, the pps test results are the same in the cases of with/without
inline.

Iperf and pps of pktgen for virtio-net didn't change before and after
the change.

[1]
https://www.kernel.org/doc/html/v6.2-rc3/process/coding-style.html#the-inline-diseaseSigned-off-by: default avatarFeng Liu <feliu@nvidia.com>
Reviewed-by: default avatarJiri Pirko <jiri@nvidia.com>
Reviewed-by: default avatarParav Pandit <parav@nvidia.com>
Reviewed-by: default avatarGavin Li <gavinl@nvidia.com>
Reviewed-by: default avatarBodong Wang <bodong@nvidia.com>
Reviewed-by: default avatarDavid Edmondson <david.edmondson@oracle.com>
Message-Id: <20230310053428.3376-3-feliu@nvidia.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 6b27cd84
......@@ -233,8 +233,8 @@ static void vring_free(struct virtqueue *_vq);
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
unsigned int total_sg)
static bool virtqueue_use_indirect(struct vring_virtqueue *vq,
unsigned int total_sg)
{
/*
* If the host supports indirect descriptor tables, and we have multiple
......@@ -349,7 +349,7 @@ static void vring_free_queue(struct virtio_device *vdev, size_t size,
* making all of the arch DMA ops work on the vring device itself
* is a mess.
*/
static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
return vq->dma_dev;
}
......@@ -784,7 +784,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
}
}
static inline bool more_used_split(const struct vring_virtqueue *vq)
static bool more_used_split(const struct vring_virtqueue *vq)
{
return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
vq->split.vring.used->idx);
......@@ -1172,12 +1172,12 @@ static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
/*
* Packed ring specific functions - *_packed().
*/
static inline bool packed_used_wrap_counter(u16 last_used_idx)
static bool packed_used_wrap_counter(u16 last_used_idx)
{
return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}
static inline u16 packed_last_used(u16 last_used_idx)
static u16 packed_last_used(u16 last_used_idx)
{
return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}
......@@ -1612,7 +1612,7 @@ static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
return avail == used && used == used_wrap_counter;
}
static inline bool more_used_packed(const struct vring_virtqueue *vq)
static bool more_used_packed(const struct vring_virtqueue *vq)
{
u16 last_used;
u16 last_used_idx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment