Commit 67b7836d authored by Dmitry Osipenko's avatar Dmitry Osipenko

drm/shmem-helper: Switch to reservation lock

Replace all drm-shmem locks with a GEM reservation lock. This makes locks
consistent with dma-buf locking convention where importers are responsible
for holding reservation lock for all operations performed over dma-bufs,
preventing deadlock between dma-buf importers and exporters.
Suggested-by: default avatarDaniel Vetter <daniel@ffwll.ch>
Acked-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Signed-off-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://lore.kernel.org/all/20230108210445.3948344-8-dmitry.osipenko@collabora.com/
parent 67fe7487
This diff is collapsed.
...@@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) ...@@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
new_size = min(new_size, bo->base.base.size); new_size = min(new_size, bo->base.base.size);
mutex_lock(&bo->base.pages_lock); dma_resv_lock(bo->base.base.resv, NULL);
if (bo->base.pages) { if (bo->base.pages) {
pages = bo->base.pages; pages = bo->base.pages;
...@@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) ...@@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
sizeof(*pages), GFP_KERNEL | __GFP_ZERO); sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
if (!pages) { if (!pages) {
mutex_unlock(&bo->base.pages_lock); dma_resv_unlock(bo->base.base.resv);
return -ENOMEM; return -ENOMEM;
} }
...@@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) ...@@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
struct page *page = shmem_read_mapping_page(mapping, i); struct page *page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) { if (IS_ERR(page)) {
mutex_unlock(&bo->base.pages_lock); dma_resv_unlock(bo->base.base.resv);
return PTR_ERR(page); return PTR_ERR(page);
} }
pages[i] = page; pages[i] = page;
} }
mutex_unlock(&bo->base.pages_lock); dma_resv_unlock(bo->base.base.resv);
ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
new_size, GFP_KERNEL); new_size, GFP_KERNEL);
......
...@@ -407,6 +407,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, ...@@ -407,6 +407,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
bo = to_panfrost_bo(gem_obj); bo = to_panfrost_bo(gem_obj);
ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
if (ret)
goto out_put_object;
mutex_lock(&pfdev->shrinker_lock); mutex_lock(&pfdev->shrinker_lock);
mutex_lock(&bo->mappings.lock); mutex_lock(&bo->mappings.lock);
if (args->madv == PANFROST_MADV_DONTNEED) { if (args->madv == PANFROST_MADV_DONTNEED) {
...@@ -444,7 +448,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, ...@@ -444,7 +448,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
out_unlock_mappings: out_unlock_mappings:
mutex_unlock(&bo->mappings.lock); mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock); mutex_unlock(&pfdev->shrinker_lock);
dma_resv_unlock(bo->base.base.resv);
out_put_object:
drm_gem_object_put(gem_obj); drm_gem_object_put(gem_obj);
return ret; return ret;
} }
......
...@@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) ...@@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
if (!mutex_trylock(&bo->mappings.lock)) if (!mutex_trylock(&bo->mappings.lock))
return false; return false;
if (!mutex_trylock(&shmem->pages_lock)) if (!dma_resv_trylock(shmem->base.resv))
goto unlock_mappings; goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo); panfrost_gem_teardown_mappings_locked(bo);
drm_gem_shmem_purge_locked(&bo->base); drm_gem_shmem_purge(&bo->base);
ret = true; ret = true;
mutex_unlock(&shmem->pages_lock); dma_resv_unlock(shmem->base.resv);
unlock_mappings: unlock_mappings:
mutex_unlock(&bo->mappings.lock); mutex_unlock(&bo->mappings.lock);
......
...@@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
struct panfrost_gem_mapping *bomapping; struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
struct address_space *mapping; struct address_space *mapping;
struct drm_gem_object *obj;
pgoff_t page_offset; pgoff_t page_offset;
struct sg_table *sgt; struct sg_table *sgt;
struct page **pages; struct page **pages;
...@@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
page_offset = addr >> PAGE_SHIFT; page_offset = addr >> PAGE_SHIFT;
page_offset -= bomapping->mmnode.start; page_offset -= bomapping->mmnode.start;
mutex_lock(&bo->base.pages_lock); obj = &bo->base.base;
dma_resv_lock(obj->resv, NULL);
if (!bo->base.pages) { if (!bo->base.pages) {
bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
if (!bo->sgts) { if (!bo->sgts) {
mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM; ret = -ENOMEM;
goto err_bo; goto err_unlock;
} }
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
...@@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (!pages) { if (!pages) {
kvfree(bo->sgts); kvfree(bo->sgts);
bo->sgts = NULL; bo->sgts = NULL;
mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM; ret = -ENOMEM;
goto err_bo; goto err_unlock;
} }
bo->base.pages = pages; bo->base.pages = pages;
bo->base.pages_use_count = 1; bo->base.pages_use_count = 1;
...@@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
pages = bo->base.pages; pages = bo->base.pages;
if (pages[page_offset]) { if (pages[page_offset]) {
/* Pages are already mapped, bail out. */ /* Pages are already mapped, bail out. */
mutex_unlock(&bo->base.pages_lock);
goto out; goto out;
} }
} }
...@@ -502,14 +502,11 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -502,14 +502,11 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
pages[i] = shmem_read_mapping_page(mapping, i); pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) { if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]); ret = PTR_ERR(pages[i]);
goto err_pages; goto err_pages;
} }
} }
mutex_unlock(&bo->base.pages_lock);
sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
ret = sg_alloc_table_from_pages(sgt, pages + page_offset, ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
...@@ -528,6 +525,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -528,6 +525,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
out: out:
dma_resv_unlock(obj->resv);
panfrost_gem_mapping_put(bomapping); panfrost_gem_mapping_put(bomapping);
return 0; return 0;
...@@ -536,6 +535,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -536,6 +535,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
sg_free_table(sgt); sg_free_table(sgt);
err_pages: err_pages:
drm_gem_shmem_put_pages(&bo->base); drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo: err_bo:
panfrost_gem_mapping_put(bomapping); panfrost_gem_mapping_put(bomapping);
return ret; return ret;
......
...@@ -26,11 +26,6 @@ struct drm_gem_shmem_object { ...@@ -26,11 +26,6 @@ struct drm_gem_shmem_object {
*/ */
struct drm_gem_object base; struct drm_gem_object base;
/**
* @pages_lock: Protects the page table and use count
*/
struct mutex pages_lock;
/** /**
* @pages: Page table * @pages: Page table
*/ */
...@@ -65,11 +60,6 @@ struct drm_gem_shmem_object { ...@@ -65,11 +60,6 @@ struct drm_gem_shmem_object {
*/ */
struct sg_table *sgt; struct sg_table *sgt;
/**
* @vmap_lock: Protects the vmap address and use count
*/
struct mutex vmap_lock;
/** /**
* @vaddr: Kernel virtual address of the backing memory * @vaddr: Kernel virtual address of the backing memory
*/ */
...@@ -109,7 +99,6 @@ struct drm_gem_shmem_object { ...@@ -109,7 +99,6 @@ struct drm_gem_shmem_object {
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
...@@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem ...@@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
!shmem->base.dma_buf && !shmem->base.import_attach; !shmem->base.dma_buf && !shmem->base.import_attach;
} }
void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment