Commit 61502e3b authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2021-01-12' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

Short summary of fixes pull:

 * dma-buf: Fix a memory leak in CMAV heap
 * drm: Fix format check for legacy pageflips
 * ttm: Pass correct address to dma_mapping_error(); Use mutex in pool
   shrinker
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/X/2iXO4ofFSZ39/v@linux-uq9g
parents 7c53f6b6 bb52cb0d
...@@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) ...@@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
buffer->vaddr = NULL; buffer->vaddr = NULL;
} }
/* free page list */
kfree(buffer->pages);
/* release memory */
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
kfree(buffer); kfree(buffer);
} }
......
...@@ -1163,7 +1163,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, ...@@ -1163,7 +1163,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (ret) if (ret)
goto out; goto out;
if (old_fb->format != fb->format) { /*
* Only check the FOURCC format code, excluding modifiers. This is
* enough for all legacy drivers. Atomic drivers have their own
* checks in their ->atomic_check implementation, which will
* return -EINVAL if any hw or driver constraint is violated due
* to modifier changes.
*/
if (old_fb->format->format != fb->format->format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
......
...@@ -66,7 +66,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER]; ...@@ -66,7 +66,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
static spinlock_t shrinker_lock; static struct mutex shrinker_lock;
static struct list_head shrinker_list; static struct list_head shrinker_list;
static struct shrinker mm_shrinker; static struct shrinker mm_shrinker;
...@@ -190,7 +190,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, ...@@ -190,7 +190,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
size_t size = (1ULL << order) * PAGE_SIZE; size_t size = (1ULL << order) * PAGE_SIZE;
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(pool->dev, **dma_addr)) if (dma_mapping_error(pool->dev, addr))
return -EFAULT; return -EFAULT;
} }
...@@ -249,9 +249,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, ...@@ -249,9 +249,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock); spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages); INIT_LIST_HEAD(&pt->pages);
spin_lock(&shrinker_lock); mutex_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list); list_add_tail(&pt->shrinker_list, &shrinker_list);
spin_unlock(&shrinker_lock); mutex_unlock(&shrinker_lock);
} }
/* Remove a pool_type from the global shrinker list and free all pages */ /* Remove a pool_type from the global shrinker list and free all pages */
...@@ -259,9 +259,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt) ...@@ -259,9 +259,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{ {
struct page *p, *tmp; struct page *p, *tmp;
spin_lock(&shrinker_lock); mutex_lock(&shrinker_lock);
list_del(&pt->shrinker_list); list_del(&pt->shrinker_list);
spin_unlock(&shrinker_lock); mutex_unlock(&shrinker_lock);
list_for_each_entry_safe(p, tmp, &pt->pages, lru) list_for_each_entry_safe(p, tmp, &pt->pages, lru)
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
...@@ -302,7 +302,7 @@ static unsigned int ttm_pool_shrink(void) ...@@ -302,7 +302,7 @@ static unsigned int ttm_pool_shrink(void)
unsigned int num_freed; unsigned int num_freed;
struct page *p; struct page *p;
spin_lock(&shrinker_lock); mutex_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
p = ttm_pool_type_take(pt); p = ttm_pool_type_take(pt);
...@@ -314,7 +314,7 @@ static unsigned int ttm_pool_shrink(void) ...@@ -314,7 +314,7 @@ static unsigned int ttm_pool_shrink(void)
} }
list_move_tail(&pt->shrinker_list, &shrinker_list); list_move_tail(&pt->shrinker_list, &shrinker_list);
spin_unlock(&shrinker_lock); mutex_unlock(&shrinker_lock);
return num_freed; return num_freed;
} }
...@@ -564,7 +564,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) ...@@ -564,7 +564,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{ {
unsigned int i; unsigned int i;
spin_lock(&shrinker_lock); mutex_lock(&shrinker_lock);
seq_puts(m, "\t "); seq_puts(m, "\t ");
for (i = 0; i < MAX_ORDER; ++i) for (i = 0; i < MAX_ORDER; ++i)
...@@ -600,7 +600,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) ...@@ -600,7 +600,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
seq_printf(m, "\ntotal\t: %8lu of %8lu\n", seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
atomic_long_read(&allocated_pages), page_pool_size); atomic_long_read(&allocated_pages), page_pool_size);
spin_unlock(&shrinker_lock); mutex_unlock(&shrinker_lock);
return 0; return 0;
} }
...@@ -644,7 +644,7 @@ int ttm_pool_mgr_init(unsigned long num_pages) ...@@ -644,7 +644,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size) if (!page_pool_size)
page_pool_size = num_pages; page_pool_size = num_pages;
spin_lock_init(&shrinker_lock); mutex_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list); INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) { for (i = 0; i < MAX_ORDER; ++i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment