Commit 65f8453d authored by Gurchetan Singh's avatar Gurchetan Singh Committed by Gerd Hoffmann

drm/virtio: rename sync_seq and last_seq

To be clearer about our intentions to associate sequence numbers
and fence IDs, let's rename these variables.
Signed-off-by: default avatarGurchetan Singh <gurchetansingh@chromium.org>
Reviewed-by: default avatarAnthoine Bourgeois <anthoine.bourgeois@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20201119010809.528-5-gurchetansingh@chromium.orgSigned-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent bb53a604
...@@ -67,8 +67,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) ...@@ -67,8 +67,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
seq_printf(m, "fence %llu %lld\n", seq_printf(m, "fence %llu %lld\n",
(u64)atomic64_read(&vgdev->fence_drv.last_seq), (u64)atomic64_read(&vgdev->fence_drv.last_fence_id),
vgdev->fence_drv.sync_seq); vgdev->fence_drv.current_fence_id);
return 0; return 0;
} }
......
...@@ -127,8 +127,8 @@ typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev, ...@@ -127,8 +127,8 @@ typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf); struct virtio_gpu_vbuffer *vbuf);
struct virtio_gpu_fence_driver { struct virtio_gpu_fence_driver {
atomic64_t last_seq; atomic64_t last_fence_id;
uint64_t sync_seq; uint64_t current_fence_id;
uint64_t context; uint64_t context;
struct list_head fences; struct list_head fences;
spinlock_t lock; spinlock_t lock;
......
...@@ -48,7 +48,7 @@ static bool virtio_fence_signaled(struct dma_fence *f) ...@@ -48,7 +48,7 @@ static bool virtio_fence_signaled(struct dma_fence *f)
/* leaked fence outside driver before completing /* leaked fence outside driver before completing
* initialization with virtio_gpu_fence_emit */ * initialization with virtio_gpu_fence_emit */
return false; return false;
if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno) if (atomic64_read(&fence->drv->last_fence_id) >= fence->f.seqno)
return true; return true;
return false; return false;
} }
...@@ -62,7 +62,8 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size) ...@@ -62,7 +62,8 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
{ {
struct virtio_gpu_fence *fence = to_virtio_fence(f); struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); snprintf(str, size, "%llu",
(u64)atomic64_read(&fence->drv->last_fence_id));
} }
static const struct dma_fence_ops virtio_fence_ops = { static const struct dma_fence_ops virtio_fence_ops = {
...@@ -100,7 +101,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, ...@@ -100,7 +101,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
unsigned long irq_flags; unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags); spin_lock_irqsave(&drv->lock, irq_flags);
fence->f.seqno = ++drv->sync_seq; fence->f.seqno = ++drv->current_fence_id;
dma_fence_get(&fence->f); dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences); list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags); spin_unlock_irqrestore(&drv->lock, irq_flags);
...@@ -119,7 +120,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, ...@@ -119,7 +120,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
unsigned long irq_flags; unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags); spin_lock_irqsave(&drv->lock, irq_flags);
atomic64_set(&vgdev->fence_drv.last_seq, fence_id); atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
list_for_each_entry_safe(fence, tmp, &drv->fences, node) { list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
if (fence_id < fence->f.seqno) if (fence_id < fence->f.seqno)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment