Commit 6b8a7946 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull last minute virtio bugfixes from Michael Tsirkin:
 "Minor bugfixes all over the place"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio_balloon: fix shrinker count
  virtio_balloon: fix shrinker scan number of pages
  virtio_console: allocate inbufs in add_port() only if it is needed
  virtio_ring: fix return code on DMA mapping fails
parents 2027cabe c9a6820f
...@@ -1325,24 +1325,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols) ...@@ -1325,24 +1325,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
port->cons.ws.ws_col = cols; port->cons.ws.ws_col = cols;
} }
static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
{ {
struct port_buffer *buf; struct port_buffer *buf;
unsigned int nr_added_bufs; int nr_added_bufs;
int ret; int ret;
nr_added_bufs = 0; nr_added_bufs = 0;
do { do {
buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
if (!buf) if (!buf)
break; return -ENOMEM;
spin_lock_irq(lock); spin_lock_irq(lock);
ret = add_inbuf(vq, buf); ret = add_inbuf(vq, buf);
if (ret < 0) { if (ret < 0) {
spin_unlock_irq(lock); spin_unlock_irq(lock);
free_buf(buf, true); free_buf(buf, true);
break; return ret;
} }
nr_added_bufs++; nr_added_bufs++;
spin_unlock_irq(lock); spin_unlock_irq(lock);
...@@ -1362,7 +1362,6 @@ static int add_port(struct ports_device *portdev, u32 id) ...@@ -1362,7 +1362,6 @@ static int add_port(struct ports_device *portdev, u32 id)
char debugfs_name[16]; char debugfs_name[16];
struct port *port; struct port *port;
dev_t devt; dev_t devt;
unsigned int nr_added_bufs;
int err; int err;
port = kmalloc(sizeof(*port), GFP_KERNEL); port = kmalloc(sizeof(*port), GFP_KERNEL);
...@@ -1421,11 +1420,13 @@ static int add_port(struct ports_device *portdev, u32 id) ...@@ -1421,11 +1420,13 @@ static int add_port(struct ports_device *portdev, u32 id)
spin_lock_init(&port->outvq_lock); spin_lock_init(&port->outvq_lock);
init_waitqueue_head(&port->waitqueue); init_waitqueue_head(&port->waitqueue);
/* Fill the in_vq with buffers so the host can send us data. */ /* We can safely ignore ENOSPC because it means
nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); * the queue already has buffers. Buffers are removed
if (!nr_added_bufs) { * only by virtcons_remove(), not by unplug_port()
*/
err = fill_queue(port->in_vq, &port->inbuf_lock);
if (err < 0 && err != -ENOSPC) {
dev_err(port->dev, "Error allocating inbufs\n"); dev_err(port->dev, "Error allocating inbufs\n");
err = -ENOMEM;
goto free_device; goto free_device;
} }
...@@ -2059,14 +2060,11 @@ static int virtcons_probe(struct virtio_device *vdev) ...@@ -2059,14 +2060,11 @@ static int virtcons_probe(struct virtio_device *vdev)
INIT_WORK(&portdev->control_work, &control_work_handler); INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) { if (multiport) {
unsigned int nr_added_bufs;
spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock); spin_lock_init(&portdev->c_ovq_lock);
nr_added_bufs = fill_queue(portdev->c_ivq, err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
&portdev->c_ivq_lock); if (err < 0) {
if (!nr_added_bufs) {
dev_err(&vdev->dev, dev_err(&vdev->dev,
"Error allocating buffers for control queue\n"); "Error allocating buffers for control queue\n");
/* /*
...@@ -2077,7 +2075,7 @@ static int virtcons_probe(struct virtio_device *vdev) ...@@ -2077,7 +2075,7 @@ static int virtcons_probe(struct virtio_device *vdev)
VIRTIO_CONSOLE_DEVICE_READY, 0); VIRTIO_CONSOLE_DEVICE_READY, 0);
/* Device was functional: we need full cleanup. */ /* Device was functional: we need full cleanup. */
virtcons_remove(vdev); virtcons_remove(vdev);
return -ENOMEM; return err;
} }
} else { } else {
/* /*
......
...@@ -772,6 +772,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb, ...@@ -772,6 +772,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER; return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER;
} }
static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
unsigned long pages_to_free)
{
return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) /
VIRTIO_BALLOON_PAGES_PER_PAGE;
}
static unsigned long shrink_balloon_pages(struct virtio_balloon *vb, static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
unsigned long pages_to_free) unsigned long pages_to_free)
{ {
...@@ -782,11 +789,10 @@ static unsigned long shrink_balloon_pages(struct virtio_balloon *vb, ...@@ -782,11 +789,10 @@ static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
* VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
* multiple times to deflate pages till reaching pages_to_free. * multiple times to deflate pages till reaching pages_to_free.
*/ */
while (vb->num_pages && pages_to_free) { while (vb->num_pages && pages_freed < pages_to_free)
pages_freed += leak_balloon(vb, pages_to_free) / pages_freed += leak_balloon_pages(vb,
VIRTIO_BALLOON_PAGES_PER_PAGE; pages_to_free - pages_freed);
pages_to_free -= pages_freed;
}
update_balloon_size(vb); update_balloon_size(vb);
return pages_freed; return pages_freed;
...@@ -799,7 +805,7 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, ...@@ -799,7 +805,7 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
struct virtio_balloon *vb = container_of(shrinker, struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker); struct virtio_balloon, shrinker);
pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE; pages_to_free = sc->nr_to_scan;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
pages_freed = shrink_free_pages(vb, pages_to_free); pages_freed = shrink_free_pages(vb, pages_to_free);
...@@ -820,7 +826,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, ...@@ -820,7 +826,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
unsigned long count; unsigned long count;
count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER; count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER;
return count; return count;
} }
......
...@@ -583,7 +583,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, ...@@ -583,7 +583,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
kfree(desc); kfree(desc);
END_USE(vq); END_USE(vq);
return -EIO; return -ENOMEM;
} }
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
...@@ -1085,7 +1085,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, ...@@ -1085,7 +1085,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
kfree(desc); kfree(desc);
END_USE(vq); END_USE(vq);
return -EIO; return -ENOMEM;
} }
static inline int virtqueue_add_packed(struct virtqueue *_vq, static inline int virtqueue_add_packed(struct virtqueue *_vq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment