Commit ce22e89e authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2023-08-24' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

A samsung-dsim initialization fix, a devfreq fix for panfrost, a DP DSC
define fix, a recursive lock fix for dma-buf, a shader validation fix
and a reference counting fix for vmwgfx
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/amy26vu5xbeeikswpx7nt6rddwfocdidshrtt2qovipihx5poj@y45p3dtzrloc
parents 706a7415 f9e96bf1
......@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
*/
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
{
LIST_HEAD(signalled);
struct sync_pt *pt, *next;
trace_sync_timeline(obj);
......@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
if (!timeline_fence_signaled(&pt->base))
break;
list_del_init(&pt->link);
dma_fence_get(&pt->base);
list_move_tail(&pt->link, &signalled);
rb_erase(&pt->node, &obj->pt_tree);
/*
* A signal callback may release the last reference to this
* fence, causing it to be freed. That operation has to be
* last to avoid a use after free inside this loop, and must
* be after we remove the fence from the timeline in order to
* prevent deadlocking on timeline->lock inside
* timeline_fence_release().
*/
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
list_for_each_entry_safe(pt, next, &signalled, link) {
list_del_init(&pt->link);
dma_fence_put(&pt->base);
}
}
/**
......
......@@ -1386,6 +1386,18 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
disable_irq(dsi->irq);
}
static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
{
u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
if (enable)
reg |= DSIM_FORCE_STOP_STATE;
else
reg &= ~DSIM_FORCE_STOP_STATE;
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
}
static int samsung_dsim_init(struct samsung_dsim *dsi)
{
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
......@@ -1445,15 +1457,12 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
u32 reg;
if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
samsung_dsim_set_display_mode(dsi);
samsung_dsim_set_display_enable(dsi, true);
} else {
reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
reg &= ~DSIM_FORCE_STOP_STATE;
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
samsung_dsim_set_stop_state(dsi, false);
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
......@@ -1463,16 +1472,12 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
u32 reg;
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
reg |= DSIM_FORCE_STOP_STATE;
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
}
if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
samsung_dsim_set_stop_state(dsi, true);
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
}
......@@ -1775,6 +1780,8 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
if (ret)
return ret;
samsung_dsim_set_stop_state(dsi, false);
ret = mipi_dsi_create_packet(&xfer.packet, msg);
if (ret < 0)
return ret;
......
......@@ -96,7 +96,7 @@ static int panfrost_read_speedbin(struct device *dev)
* keep going without it; any other error means that we are
* supposed to read the bin value, but we failed doing so.
*/
if (ret != -ENOENT) {
if (ret != -ENOENT && ret != -EOPNOTSUPP) {
DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret);
return ret;
}
......
......@@ -497,10 +497,9 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
ttm_bo_put(&vmw_bo->tbo);
vmw_user_bo_unref(vmw_bo);
}
drm_gem_object_put(&vmw_bo->tbo.base);
return ret;
}
......@@ -540,8 +539,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
return ret;
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
drm_gem_object_put(&vbo->tbo.base);
vmw_user_bo_unref(vbo);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
......
......@@ -195,6 +195,14 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
return buf;
}
static inline void vmw_user_bo_unref(struct vmw_bo *vbo)
{
if (vbo) {
ttm_bo_put(&vbo->tbo);
drm_gem_object_put(&vbo->tbo.base);
}
}
static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
{
return container_of((gobj), struct vmw_bo, tbo.base);
......
......@@ -1513,4 +1513,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw)
return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
}
static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model,
u32 shader_type)
{
SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX;
if (shader_model >= VMW_SM_5)
max_allowed = SVGA3D_SHADERTYPE_MAX;
else if (shader_model >= VMW_SM_4)
max_allowed = SVGA3D_SHADERTYPE_DX10_MAX;
return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed;
}
#endif
......@@ -1164,8 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->tbo);
drm_gem_object_put(&vmw_bo->tbo.base);
vmw_user_bo_unref(vmw_bo);
if (unlikely(ret != 0))
return ret;
......@@ -1221,8 +1220,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->tbo);
drm_gem_object_put(&vmw_bo->tbo.base);
vmw_user_bo_unref(vmw_bo);
if (unlikely(ret != 0))
return ret;
......@@ -1992,7 +1990,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;
......@@ -2115,8 +2113,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
......@@ -2133,6 +2129,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
(unsigned int) cmd->body.slot);
return -EINVAL;
}
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb;
......@@ -2141,14 +2145,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
binding.size = cmd->body.sizeInBytes;
binding.slot = cmd->body.slot;
if (binding.shader_slot >= max_shader_num ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
(unsigned int) binding.slot);
return -EINVAL;
}
vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
binding.slot);
......@@ -2207,15 +2203,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
container_of(header, typeof(*cmd), header);
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
cmd->body.type >= max_allowed) {
!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
......@@ -2239,8 +2233,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
......@@ -2251,8 +2243,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= max_allowed ||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;
......
......@@ -1665,10 +1665,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo) {
vmw_bo_unreference(&bo);
drm_gem_object_put(&bo->tbo.base);
}
if (bo)
vmw_user_bo_unref(bo);
if (surface)
vmw_surface_unreference(&surface);
......
......@@ -451,8 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf);
drm_gem_object_put(&buf->tbo.base);
vmw_user_bo_unref(buf);
out_unlock:
mutex_unlock(&overlay->mutex);
......
......@@ -809,8 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
shader_type, num_input_sig,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_bo_unreference(&buffer);
drm_gem_object_put(&buffer->tbo.base);
vmw_user_bo_unref(buffer);
return ret;
}
......
......@@ -1537,7 +1537,7 @@ enum drm_dp_phy {
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
#define DP_LTTPR_COMMON_CAP_SIZE 8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment