Commit 2f73503e authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2024-03-28' of...

Merge tag 'drm-misc-fixes-2024-03-28' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

Short summary of fixes pull:

bridge:
- select DRM_KMS_HELPER

dma-buf:
- fix NULL-pointer deref

dp:
- fix div-by-zero in DP MST unplug code

fbdev:
- select FB_IOMEM_FOPS for SBus

nouveau:
- dmem: handle kcalloc() allocation failures

qxl:
- remove unused variables

rockchip:
- vop2: remove support for AR30 and AB30 formats

sched:
- fix NULL-pointer deref

vmwgfx:
- debugfs: create ttm_resource_manager entry only if needed
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20240328134417.GA8673@localhost.localdomain
parents 197aa825 aba2a144
...@@ -84,11 +84,11 @@ static int sanitycheck(void *arg) ...@@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
return -ENOMEM; return -ENOMEM;
chain = mock_chain(NULL, f, 1); chain = mock_chain(NULL, f, 1);
if (!chain) if (chain)
dma_fence_enable_sw_signaling(chain);
else
err = -ENOMEM; err = -ENOMEM;
dma_fence_enable_sw_signaling(chain);
dma_fence_signal(f); dma_fence_signal(f);
dma_fence_put(f); dma_fence_put(f);
......
...@@ -4111,6 +4111,13 @@ int drm_dp_bw_overhead(int lane_count, int hactive, ...@@ -4111,6 +4111,13 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
u32 overhead = 1000000; u32 overhead = 1000000;
int symbol_cycles; int symbol_cycles;
if (lane_count == 0 || hactive == 0 || bpp_x16 == 0) {
DRM_DEBUG_KMS("Invalid BW overhead params: lane_count %d, hactive %d, bpp_x16 %d.%04d\n",
lane_count, hactive,
bpp_x16 >> 4, (bpp_x16 & 0xf) * 625);
return 0;
}
/* /*
* DP Standard v2.1 2.6.4.1 * DP Standard v2.1 2.6.4.1
* SSC downspread and ref clock variation margin: * SSC downspread and ref clock variation margin:
......
...@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk) ...@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dma_addr_t *dma_addrs; dma_addr_t *dma_addrs;
struct nouveau_fence *fence; struct nouveau_fence *fence;
src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL); src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL); dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL); dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT, migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages); npages);
...@@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk) ...@@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
migrate_device_pages(src_pfns, dst_pfns, npages); migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence); nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages); migrate_device_finalize(src_pfns, dst_pfns, npages);
kfree(src_pfns); kvfree(src_pfns);
kfree(dst_pfns); kvfree(dst_pfns);
for (i = 0; i < npages; i++) for (i = 0; i < npages; i++)
dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
kfree(dma_addrs); kvfree(dma_addrs);
} }
void void
......
...@@ -421,7 +421,6 @@ int qxl_surface_id_alloc(struct qxl_device *qdev, ...@@ -421,7 +421,6 @@ int qxl_surface_id_alloc(struct qxl_device *qdev,
{ {
uint32_t handle; uint32_t handle;
int idr_ret; int idr_ret;
int count = 0;
again: again:
idr_preload(GFP_ATOMIC); idr_preload(GFP_ATOMIC);
spin_lock(&qdev->surf_id_idr_lock); spin_lock(&qdev->surf_id_idr_lock);
...@@ -433,7 +432,6 @@ int qxl_surface_id_alloc(struct qxl_device *qdev, ...@@ -433,7 +432,6 @@ int qxl_surface_id_alloc(struct qxl_device *qdev,
handle = idr_ret; handle = idr_ret;
if (handle >= qdev->rom->n_surfaces) { if (handle >= qdev->rom->n_surfaces) {
count++;
spin_lock(&qdev->surf_id_idr_lock); spin_lock(&qdev->surf_id_idr_lock);
idr_remove(&qdev->surf_id_idr, handle); idr_remove(&qdev->surf_id_idr, handle);
spin_unlock(&qdev->surf_id_idr_lock); spin_unlock(&qdev->surf_id_idr_lock);
......
...@@ -145,7 +145,7 @@ static int qxl_process_single_command(struct qxl_device *qdev, ...@@ -145,7 +145,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
struct qxl_release *release; struct qxl_release *release;
struct qxl_bo *cmd_bo; struct qxl_bo *cmd_bo;
void *fb_cmd; void *fb_cmd;
int i, ret, num_relocs; int i, ret;
int unwritten; int unwritten;
switch (cmd->type) { switch (cmd->type) {
...@@ -200,7 +200,6 @@ static int qxl_process_single_command(struct qxl_device *qdev, ...@@ -200,7 +200,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
} }
/* fill out reloc info structs */ /* fill out reloc info structs */
num_relocs = 0;
for (i = 0; i < cmd->relocs_num; ++i) { for (i = 0; i < cmd->relocs_num; ++i) {
struct drm_qxl_reloc reloc; struct drm_qxl_reloc reloc;
struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs); struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
...@@ -230,7 +229,6 @@ static int qxl_process_single_command(struct qxl_device *qdev, ...@@ -230,7 +229,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
reloc_info[i].dst_bo = cmd_bo; reloc_info[i].dst_bo = cmd_bo;
reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
} }
num_relocs++;
/* reserve and validate the reloc dst bo */ /* reserve and validate the reloc dst bo */
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) { if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
......
...@@ -17,9 +17,7 @@ ...@@ -17,9 +17,7 @@
static const uint32_t formats_cluster[] = { static const uint32_t formats_cluster[] = {
DRM_FORMAT_XRGB2101010, DRM_FORMAT_XRGB2101010,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR2101010, DRM_FORMAT_XBGR2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888,
......
...@@ -71,13 +71,19 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, ...@@ -71,13 +71,19 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty; entity->guilty = guilty;
entity->num_sched_list = num_sched_list; entity->num_sched_list = num_sched_list;
entity->priority = priority; entity->priority = priority;
/*
* It's perfectly valid to initialize an entity without having a valid
* scheduler attached. It's just not valid to use the scheduler before it
* is initialized itself.
*/
entity->sched_list = num_sched_list > 1 ? sched_list : NULL; entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
RCU_INIT_POINTER(entity->last_scheduled, NULL); RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node); RB_CLEAR_NODE(&entity->rb_tree_node);
if (!sched_list[0]->sched_rq) { if (num_sched_list && !sched_list[0]->sched_rq) {
/* Warn drivers not to do this and to fix their DRM /* Since every entry covered by num_sched_list
* calling order. * should be non-NULL and therefore we warn drivers
* not to do this and to fix their DRM calling order.
*/ */
pr_warn("%s: called with uninitialized scheduler\n", __func__); pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) { } else if (num_sched_list) {
......
...@@ -1444,12 +1444,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw) ...@@ -1444,12 +1444,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
root, "system_ttm"); root, "system_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM), ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
root, "vram_ttm"); root, "vram_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR), if (vmw->has_gmr)
root, "gmr_ttm"); ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB), root, "gmr_ttm");
root, "mob_ttm"); if (vmw->has_mob) {
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM), ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
root, "system_mob_ttm"); root, "mob_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
root, "system_mob_ttm");
}
} }
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
......
...@@ -494,6 +494,7 @@ config FB_SBUS_HELPERS ...@@ -494,6 +494,7 @@ config FB_SBUS_HELPERS
select FB_CFB_COPYAREA select FB_CFB_COPYAREA
select FB_CFB_FILLRECT select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
config FB_BW2 config FB_BW2
bool "BWtwo support" bool "BWtwo support"
...@@ -514,6 +515,7 @@ config FB_CG6 ...@@ -514,6 +515,7 @@ config FB_CG6
depends on (FB = y) && (SPARC && FB_SBUS) depends on (FB = y) && (SPARC && FB_SBUS)
select FB_CFB_COPYAREA select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
help help
This is the frame buffer device driver for the CGsix (GX, TurboGX) This is the frame buffer device driver for the CGsix (GX, TurboGX)
frame buffer. frame buffer.
...@@ -523,6 +525,7 @@ config FB_FFB ...@@ -523,6 +525,7 @@ config FB_FFB
depends on FB_SBUS && SPARC64 depends on FB_SBUS && SPARC64
select FB_CFB_COPYAREA select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
help help
This is the frame buffer device driver for the Creator, Creator3D, This is the frame buffer device driver for the Creator, Creator3D,
and Elite3D graphics boards. and Elite3D graphics boards.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment