Commit 3ec60b92 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio/vhost fixes from Michael Tsirkin:
 - test fixes
 - a vsock fix

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  tools/virtio: add dma stubs
  vhost/test: fix after swiotlb changes
  vhost/vsock: drop space available check for TX vq
  ringtest: test build fix
parents 45b6ae76 6be3ffaa
...@@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n) ...@@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n)
{ {
void *priv = NULL; void *priv = NULL;
long err; long err;
struct vhost_memory *memory; struct vhost_umem *umem;
mutex_lock(&n->dev.mutex); mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev); err = vhost_dev_check_owner(&n->dev);
if (err) if (err)
goto done; goto done;
memory = vhost_dev_reset_owner_prepare(); umem = vhost_dev_reset_owner_prepare();
if (!memory) { if (!umem) {
err = -ENOMEM; err = -ENOMEM;
goto done; goto done;
} }
vhost_test_stop(n, &priv); vhost_test_stop(n, &priv);
vhost_test_flush(n); vhost_test_flush(n);
vhost_dev_reset_owner(&n->dev, memory); vhost_dev_reset_owner(&n->dev, umem);
done: done:
mutex_unlock(&n->dev.mutex); mutex_unlock(&n->dev.mutex);
return err; return err;
......
...@@ -87,9 +87,6 @@ virtio_transport_send_pkt_work(struct work_struct *work) ...@@ -87,9 +87,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX]; vq = vsock->vqs[VSOCK_VQ_TX];
/* Avoid unnecessary interrupts while we're processing the ring */
virtqueue_disable_cb(vq);
for (;;) { for (;;) {
struct virtio_vsock_pkt *pkt; struct virtio_vsock_pkt *pkt;
struct scatterlist hdr, buf, *sgs[2]; struct scatterlist hdr, buf, *sgs[2];
...@@ -99,7 +96,6 @@ virtio_transport_send_pkt_work(struct work_struct *work) ...@@ -99,7 +96,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
spin_lock_bh(&vsock->send_pkt_list_lock); spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) { if (list_empty(&vsock->send_pkt_list)) {
spin_unlock_bh(&vsock->send_pkt_list_lock); spin_unlock_bh(&vsock->send_pkt_list_lock);
virtqueue_enable_cb(vq);
break; break;
} }
...@@ -118,13 +114,13 @@ virtio_transport_send_pkt_work(struct work_struct *work) ...@@ -118,13 +114,13 @@ virtio_transport_send_pkt_work(struct work_struct *work)
} }
ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
/* Usually this means that there is no more space available in
* the vq
*/
if (ret < 0) { if (ret < 0) {
spin_lock_bh(&vsock->send_pkt_list_lock); spin_lock_bh(&vsock->send_pkt_list_lock);
list_add(&pkt->list, &vsock->send_pkt_list); list_add(&pkt->list, &vsock->send_pkt_list);
spin_unlock_bh(&vsock->send_pkt_list_lock); spin_unlock_bh(&vsock->send_pkt_list_lock);
if (!virtqueue_enable_cb(vq) && ret == -ENOSPC)
continue; /* retry now that we have more space */
break; break;
} }
......
...@@ -14,4 +14,20 @@ enum dma_data_direction { ...@@ -14,4 +14,20 @@ enum dma_data_direction {
DMA_NONE = 3, DMA_NONE = 3,
}; };
#define dma_alloc_coherent(d, s, hp, f) ({ \
void *__dma_alloc_coherent_p = kmalloc((s), (f)); \
*(hp) = (unsigned long)__dma_alloc_coherent_p; \
__dma_alloc_coherent_p; \
})
#define dma_free_coherent(d, s, p, h) kfree(p)
#define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
#define dma_map_single(d, p, s, dir) (virt_to_phys(p))
#define dma_mapping_error(...) (0)
#define dma_unmap_single(...) do { } while (0)
#define dma_unmap_page(...) do { } while (0)
#endif #endif
...@@ -20,7 +20,9 @@ ...@@ -20,7 +20,9 @@
#define PAGE_SIZE getpagesize() #define PAGE_SIZE getpagesize()
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
typedef unsigned long long phys_addr_t;
typedef unsigned long long dma_addr_t; typedef unsigned long long dma_addr_t;
typedef size_t __kernel_size_t; typedef size_t __kernel_size_t;
typedef unsigned int __wsum; typedef unsigned int __wsum;
...@@ -57,6 +59,11 @@ static inline void *kzalloc(size_t s, gfp_t gfp) ...@@ -57,6 +59,11 @@ static inline void *kzalloc(size_t s, gfp_t gfp)
return p; return p;
} }
static inline void *alloc_pages_exact(size_t s, gfp_t gfp)
{
return kmalloc(s, gfp);
}
static inline void kfree(void *p) static inline void kfree(void *p)
{ {
if (p >= __kfree_ignore_start && p < __kfree_ignore_end) if (p >= __kfree_ignore_start && p < __kfree_ignore_end)
...@@ -64,6 +71,11 @@ static inline void kfree(void *p) ...@@ -64,6 +71,11 @@ static inline void kfree(void *p)
free(p); free(p);
} }
static inline void free_pages_exact(void *p, size_t s)
{
kfree(p);
}
static inline void *krealloc(void *p, size_t s, gfp_t gfp) static inline void *krealloc(void *p, size_t s, gfp_t gfp)
{ {
return realloc(p, s); return realloc(p, s);
...@@ -105,6 +117,8 @@ static inline void free_page(unsigned long addr) ...@@ -105,6 +117,8 @@ static inline void free_page(unsigned long addr)
#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) #define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) #define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define WARN_ON_ONCE(cond) ((cond) && fprintf (stderr, "WARNING\n"))
#define min(x, y) ({ \ #define min(x, y) ({ \
typeof(x) _min1 = (x); \ typeof(x) _min1 = (x); \
typeof(y) _min2 = (y); \ typeof(y) _min2 = (y); \
......
#ifndef LINUX_SLAB_H #ifndef LINUX_SLAB_H
#define GFP_KERNEL 0
#define GFP_ATOMIC 0
#define __GFP_NOWARN 0
#define __GFP_ZERO 0
#endif #endif
...@@ -3,8 +3,12 @@ ...@@ -3,8 +3,12 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/kernel.h> #include <linux/kernel.h>
struct device {
void *parent;
};
struct virtio_device { struct virtio_device {
void *dev; struct device dev;
u64 features; u64 features;
}; };
......
...@@ -40,6 +40,19 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev, ...@@ -40,6 +40,19 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev,
#define virtio_has_feature(dev, feature) \ #define virtio_has_feature(dev, feature) \
(__virtio_test_bit((dev), feature)) (__virtio_test_bit((dev), feature))
/**
* virtio_has_iommu_quirk - determine whether this device has the iommu quirk
* @vdev: the device
*/
static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
{
/*
* Note the reverse polarity of the quirk feature (compared to most
* other features), this is for compatibility with legacy systems.
*/
return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
}
static inline bool virtio_is_little_endian(struct virtio_device *vdev) static inline bool virtio_is_little_endian(struct virtio_device *vdev)
{ {
return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define cache_line_size() SMP_CACHE_BYTES #define cache_line_size() SMP_CACHE_BYTES
#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
#define unlikely(x) (__builtin_expect(!!(x), 0)) #define unlikely(x) (__builtin_expect(!!(x), 0))
#define likely(x) (__builtin_expect(!!(x), 1))
#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
typedef pthread_spinlock_t spinlock_t; typedef pthread_spinlock_t spinlock_t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment