Commit d5c0ed17 authored by Xuan Zhuo's avatar Xuan Zhuo Committed by Michael S. Tsirkin

virtio: packed: fix unmap leak for indirect desc table

When use_dma_api and premapped are true, then the do_unmap is false.

Because the do_unmap is false, vring_unmap_extra_packed is not called by
detach_buf_packed.

  if (unlikely(vq->do_unmap)) {
                curr = id;
                for (i = 0; i < state->num; i++) {
                        vring_unmap_extra_packed(vq,
                                                 &vq->packed.desc_extra[curr]);
                        curr = vq->packed.desc_extra[curr].next;
                }
  }

So the indirect desc table is not unmapped. This causes the unmap leak.

So here, we check vq->use_dma_api instead. Synchronously, dma info is
updated based on use_dma_api judgment

This bug does not occur, because no driver use the premapped with
indirect.

Fixes: b319940f ("virtio_ring: skip unmap for premapped")
Signed-off-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Message-Id: <20240223071833.26095-1-xuanzhuo@linux.alibaba.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 1ac61ddf
...@@ -1340,7 +1340,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, ...@@ -1340,7 +1340,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
sizeof(struct vring_packed_desc)); sizeof(struct vring_packed_desc));
vq->packed.vring.desc[head].id = cpu_to_le16(id); vq->packed.vring.desc[head].id = cpu_to_le16(id);
if (vq->do_unmap) { if (vq->use_dma_api) {
vq->packed.desc_extra[id].addr = addr; vq->packed.desc_extra[id].addr = addr;
vq->packed.desc_extra[id].len = total_sg * vq->packed.desc_extra[id].len = total_sg *
sizeof(struct vring_packed_desc); sizeof(struct vring_packed_desc);
...@@ -1481,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, ...@@ -1481,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
desc[i].len = cpu_to_le32(sg->length); desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id); desc[i].id = cpu_to_le16(id);
if (unlikely(vq->do_unmap)) { if (unlikely(vq->use_dma_api)) {
vq->packed.desc_extra[curr].addr = addr; vq->packed.desc_extra[curr].addr = addr;
vq->packed.desc_extra[curr].len = sg->length; vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags = vq->packed.desc_extra[curr].flags =
...@@ -1615,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq, ...@@ -1615,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
vq->free_head = id; vq->free_head = id;
vq->vq.num_free += state->num; vq->vq.num_free += state->num;
if (unlikely(vq->do_unmap)) { if (unlikely(vq->use_dma_api)) {
curr = id; curr = id;
for (i = 0; i < state->num; i++) { for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq, vring_unmap_extra_packed(vq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment