Commit 1152b168 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2021-12-02' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

Switch back to drm_poll for virtio, multiple fixes (memory leak,
improper error check, some functional fixes too) for vc4, memory leak
fix in dma-buf,
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20211202084440.u3b7lbeulj7k3ltg@houat
parents 52e81b69 679d94cd
...@@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf) ...@@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
int i; int i;
table = &buffer->sg_table; table = &buffer->sg_table;
for_each_sg(table->sgl, sg, table->nents, i) { for_each_sgtable_sg(table, sg, i) {
struct page *page = sg_page(sg); struct page *page = sg_page(sg);
__free_pages(page, compound_order(page)); __free_pages(page, compound_order(page));
......
...@@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_device *dev = state->dev; struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs; struct vc4_hvs *hvs = vc4->hvs;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state; struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state; struct vc4_hvs_state *old_hvs_state;
unsigned int channel;
int i; int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
...@@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
} }
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 500000000);
old_hvs_state = vc4_hvs_get_old_global_state(state); old_hvs_state = vc4_hvs_get_old_global_state(state);
if (!old_hvs_state) if (IS_ERR(old_hvs_state))
return; return;
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
struct vc4_crtc_state *vc4_crtc_state = struct drm_crtc_commit *commit;
to_vc4_crtc_state(old_crtc_state);
unsigned int channel = vc4_crtc_state->assigned_channel;
int ret; int ret;
if (channel == VC4_HVS_CHANNEL_DISABLED) if (!old_hvs_state->fifo_state[channel].in_use)
continue; continue;
if (!old_hvs_state->fifo_state[channel].in_use) commit = old_hvs_state->fifo_state[channel].pending_commit;
if (!commit)
continue; continue;
ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit); ret = drm_crtc_commit_wait(commit);
if (ret) if (ret)
drm_err(dev, "Timed out waiting for commit\n"); drm_err(dev, "Timed out waiting for commit\n");
drm_crtc_commit_put(commit);
old_hvs_state->fifo_state[channel].pending_commit = NULL;
} }
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 500000000);
drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_disables(dev, state);
vc4_ctm_commit(vc4, state); vc4_ctm_commit(vc4, state);
...@@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state) ...@@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
unsigned int i; unsigned int i;
hvs_state = vc4_hvs_get_new_global_state(state); hvs_state = vc4_hvs_get_new_global_state(state);
if (!hvs_state) if (WARN_ON(IS_ERR(hvs_state)))
return -EINVAL; return PTR_ERR(hvs_state);
for_each_new_crtc_in_state(state, crtc, crtc_state, i) { for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state = struct vc4_crtc_state *vc4_crtc_state =
...@@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) ...@@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
for (i = 0; i < HVS_NUM_CHANNELS; i++) { for (i = 0; i < HVS_NUM_CHANNELS; i++) {
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
if (!old_state->fifo_state[i].pending_commit)
continue;
state->fifo_state[i].pending_commit =
drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
} }
return &state->base; return &state->base;
...@@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, ...@@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
unsigned int i; unsigned int i;
hvs_new_state = vc4_hvs_get_global_state(state); hvs_new_state = vc4_hvs_get_global_state(state);
if (!hvs_new_state) if (IS_ERR(hvs_new_state))
return -EINVAL; return PTR_ERR(hvs_new_state);
for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++) for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
if (!hvs_new_state->fifo_state[i].in_use) if (!hvs_new_state->fifo_state[i].in_use)
......
...@@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev) ...@@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
schedule_work(&vgdev->config_changed_work); schedule_work(&vgdev->config_changed_work);
} }
static __poll_t virtio_gpu_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct drm_file *drm_file = filp->private_data;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_device *dev = drm_file->minor->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_pending_event *e = NULL;
__poll_t mask = 0;
if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
return drm_poll(filp, wait);
poll_wait(filp, &drm_file->event_wait, wait);
if (!list_empty(&drm_file->event_list)) {
spin_lock_irq(&dev->event_lock);
e = list_first_entry(&drm_file->event_list,
struct drm_pending_event, link);
drm_file->event_space += e->event->length;
list_del(&e->link);
spin_unlock_irq(&dev->event_lock);
kfree(e);
mask |= EPOLLIN | EPOLLRDNORM;
}
return mask;
}
static struct virtio_device_id id_table[] = { static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID }, { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 }, { 0 },
...@@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); ...@@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy"); MODULE_AUTHOR("Alon Levy");
static const struct file_operations virtio_gpu_driver_fops = { DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.poll = virtio_gpu_poll,
.read = drm_read,
.llseek = noop_llseek,
.mmap = drm_gem_mmap
};
static const struct drm_driver driver = { static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
......
...@@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver { ...@@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver {
spinlock_t lock; spinlock_t lock;
}; };
#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
struct virtio_gpu_fence_event { struct virtio_gpu_fence_event {
struct drm_pending_event base; struct drm_pending_event base;
struct drm_event event; struct drm_event event;
......
...@@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev, ...@@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
if (!e) if (!e)
return -ENOMEM; return -ENOMEM;
e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL; e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
e->event.length = sizeof(e->event); e->event.length = sizeof(e->event);
ret = drm_event_reserve_init(dev, file, &e->base, &e->event); ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
......
...@@ -196,6 +196,13 @@ struct drm_virtgpu_context_init { ...@@ -196,6 +196,13 @@ struct drm_virtgpu_context_init {
__u64 ctx_set_params; __u64 ctx_set_params;
}; };
/*
* Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
* effect. The event size is sizeof(drm_event), since there is no additional
* payload.
*/
#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
#define DRM_IOCTL_VIRTGPU_MAP \ #define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment