Commit ddd3d408 authored by Stefan Hajnoczi's avatar Stefan Hajnoczi Committed by David S. Miller

vhost: return bool from *_access_ok() functions

Currently vhost *_access_ok() functions return int.  This is error-prone
because there are two popular conventions:

1. 0 means failure, 1 means success
2. -errno means failure, 0 means success

Although vhost mostly uses #1, it does not do so consistently.
umem_access_ok() uses #2.

This patch changes the return type from int to bool so that false means
failure and true means success.  This eliminates a potential source of
errors.
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d14d2b78
...@@ -641,14 +641,14 @@ void vhost_dev_cleanup(struct vhost_dev *dev) ...@@ -641,14 +641,14 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
} }
EXPORT_SYMBOL_GPL(vhost_dev_cleanup); EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{ {
u64 a = addr / VHOST_PAGE_SIZE / 8; u64 a = addr / VHOST_PAGE_SIZE / 8;
/* Make sure 64 bit math will not overflow. */ /* Make sure 64 bit math will not overflow. */
if (a > ULONG_MAX - (unsigned long)log_base || if (a > ULONG_MAX - (unsigned long)log_base ||
a + (unsigned long)log_base > ULONG_MAX) a + (unsigned long)log_base > ULONG_MAX)
return 0; return false;
return access_ok(VERIFY_WRITE, log_base + a, return access_ok(VERIFY_WRITE, log_base + a,
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
...@@ -661,30 +661,30 @@ static bool vhost_overflow(u64 uaddr, u64 size) ...@@ -661,30 +661,30 @@ static bool vhost_overflow(u64 uaddr, u64 size)
} }
/* Caller should have vq mutex and device mutex. */ /* Caller should have vq mutex and device mutex. */
static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
int log_all) int log_all)
{ {
struct vhost_umem_node *node; struct vhost_umem_node *node;
if (!umem) if (!umem)
return 0; return false;
list_for_each_entry(node, &umem->umem_list, link) { list_for_each_entry(node, &umem->umem_list, link) {
unsigned long a = node->userspace_addr; unsigned long a = node->userspace_addr;
if (vhost_overflow(node->userspace_addr, node->size)) if (vhost_overflow(node->userspace_addr, node->size))
return 0; return false;
if (!access_ok(VERIFY_WRITE, (void __user *)a, if (!access_ok(VERIFY_WRITE, (void __user *)a,
node->size)) node->size))
return 0; return false;
else if (log_all && !log_access_ok(log_base, else if (log_all && !log_access_ok(log_base,
node->start, node->start,
node->size)) node->size))
return 0; return false;
} }
return 1; return true;
} }
static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
...@@ -701,13 +701,13 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, ...@@ -701,13 +701,13 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
/* Can we switch to this memory table? */ /* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */ /* Caller should have device mutex but not vq mutex */
static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
int log_all) int log_all)
{ {
int i; int i;
for (i = 0; i < d->nvqs; ++i) { for (i = 0; i < d->nvqs; ++i) {
int ok; bool ok;
bool log; bool log;
mutex_lock(&d->vqs[i]->mutex); mutex_lock(&d->vqs[i]->mutex);
...@@ -717,12 +717,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, ...@@ -717,12 +717,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
ok = vq_memory_access_ok(d->vqs[i]->log_base, ok = vq_memory_access_ok(d->vqs[i]->log_base,
umem, log); umem, log);
else else
ok = 1; ok = true;
mutex_unlock(&d->vqs[i]->mutex); mutex_unlock(&d->vqs[i]->mutex);
if (!ok) if (!ok)
return 0; return false;
} }
return 1; return true;
} }
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
...@@ -959,21 +959,21 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, ...@@ -959,21 +959,21 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
spin_unlock(&d->iotlb_lock); spin_unlock(&d->iotlb_lock);
} }
static int umem_access_ok(u64 uaddr, u64 size, int access) static bool umem_access_ok(u64 uaddr, u64 size, int access)
{ {
unsigned long a = uaddr; unsigned long a = uaddr;
/* Make sure 64 bit math will not overflow. */ /* Make sure 64 bit math will not overflow. */
if (vhost_overflow(uaddr, size)) if (vhost_overflow(uaddr, size))
return -EFAULT; return false;
if ((access & VHOST_ACCESS_RO) && if ((access & VHOST_ACCESS_RO) &&
!access_ok(VERIFY_READ, (void __user *)a, size)) !access_ok(VERIFY_READ, (void __user *)a, size))
return -EFAULT; return false;
if ((access & VHOST_ACCESS_WO) && if ((access & VHOST_ACCESS_WO) &&
!access_ok(VERIFY_WRITE, (void __user *)a, size)) !access_ok(VERIFY_WRITE, (void __user *)a, size))
return -EFAULT; return false;
return 0; return true;
} }
static int vhost_process_iotlb_msg(struct vhost_dev *dev, static int vhost_process_iotlb_msg(struct vhost_dev *dev,
...@@ -988,7 +988,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, ...@@ -988,7 +988,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
ret = -EFAULT; ret = -EFAULT;
break; break;
} }
if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) { if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
ret = -EFAULT; ret = -EFAULT;
break; break;
} }
...@@ -1135,10 +1135,10 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) ...@@ -1135,10 +1135,10 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
return 0; return 0;
} }
static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
struct vring_desc __user *desc, struct vring_desc __user *desc,
struct vring_avail __user *avail, struct vring_avail __user *avail,
struct vring_used __user *used) struct vring_used __user *used)
{ {
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
...@@ -1161,8 +1161,8 @@ static void vhost_vq_meta_update(struct vhost_virtqueue *vq, ...@@ -1161,8 +1161,8 @@ static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
vq->meta_iotlb[type] = node; vq->meta_iotlb[type] = node;
} }
static int iotlb_access_ok(struct vhost_virtqueue *vq, static bool iotlb_access_ok(struct vhost_virtqueue *vq,
int access, u64 addr, u64 len, int type) int access, u64 addr, u64 len, int type)
{ {
const struct vhost_umem_node *node; const struct vhost_umem_node *node;
struct vhost_umem *umem = vq->iotlb; struct vhost_umem *umem = vq->iotlb;
...@@ -1220,7 +1220,7 @@ EXPORT_SYMBOL_GPL(vq_iotlb_prefetch); ...@@ -1220,7 +1220,7 @@ EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
/* Can we log writes? */ /* Can we log writes? */
/* Caller should have device mutex but not vq mutex */ /* Caller should have device mutex but not vq mutex */
int vhost_log_access_ok(struct vhost_dev *dev) bool vhost_log_access_ok(struct vhost_dev *dev)
{ {
return memory_access_ok(dev, dev->umem, 1); return memory_access_ok(dev, dev->umem, 1);
} }
...@@ -1228,8 +1228,8 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok); ...@@ -1228,8 +1228,8 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok);
/* Verify access for write logging. */ /* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */ /* Caller should have vq mutex and device mutex */
static int vq_log_access_ok(struct vhost_virtqueue *vq, static bool vq_log_access_ok(struct vhost_virtqueue *vq,
void __user *log_base) void __user *log_base)
{ {
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
...@@ -1242,14 +1242,14 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, ...@@ -1242,14 +1242,14 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
/* Can we start vq? */ /* Can we start vq? */
/* Caller should have vq mutex and device mutex */ /* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq) bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
{ {
if (!vq_log_access_ok(vq, vq->log_base)) if (!vq_log_access_ok(vq, vq->log_base))
return 0; return false;
/* Access validation occurs at prefetch time with IOTLB */ /* Access validation occurs at prefetch time with IOTLB */
if (vq->iotlb) if (vq->iotlb)
return 1; return true;
return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
} }
......
...@@ -178,8 +178,8 @@ void vhost_dev_cleanup(struct vhost_dev *); ...@@ -178,8 +178,8 @@ void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_stop(struct vhost_dev *); void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp); long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
int vhost_vq_access_ok(struct vhost_virtqueue *vq); bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
int vhost_log_access_ok(struct vhost_dev *); bool vhost_log_access_ok(struct vhost_dev *);
int vhost_get_vq_desc(struct vhost_virtqueue *, int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count, struct iovec iov[], unsigned int iov_count,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment