Commit 591a495d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2021-10-15-1' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "It has a few scattered msm and i915 fixes, a few core fixes and a
  mediatek feature revert.

  I've had to pick a bunch of patches into this, as the drm-misc-fixes
  tree had a bunch of vc4 patches I wasn't comfortable with sending to
  you at least as part of this, they were delayed due to your reverts.
  If it's really useful as fixes I'll do a separate pull.

  Summary:

  Core:
   - clamp fbdev size
   - edid cap blocks read to avoid out of bounds

  panel:
   - fix missing crc32 dependency

  msm:
   - Fix a new crash on dev file close if the dev file was opened when
     GPU is not loaded (such as missing fw in initrd)
   - Switch to single drm_sched_entity per priority level per drm_file
     to unbreak multi-context userspace
   - Serialize GMU access to fix GMU OOB errors
   - Various error path fixes
   - A couple integer overflow fixes
   - Fix mdp5 cursor plane WARNs

  i915:
   - Fix ACPI object leak
   - Fix context leak in user proto-context creation
   - Fix missing i915_sw_fence_fini call

  hyperv:
   - hide hw pointer

  nouveau:
   - fix engine selection bit

  r128:
   - fix UML build

  rcar-du:
   - unconncted LVDS regression fix

  mediatek:
   - revert CMDQ refinement patches"

* tag 'drm-fixes-2021-10-15-1' of git://anongit.freedesktop.org/drm/drm: (34 commits)
  drm/panel: olimex-lcd-olinuxino: select CRC32
  drm/r128: fix build for UML
  drm/nouveau/fifo: Reinstate the correct engine bit programming
  drm/hyperv: Fix double mouse pointers
  drm/fbdev: Clamp fbdev surface size if too large
  drm/edid: In connector_bad_edid() cap num_of_ext by num_blocks read
  drm/i915: Free the returned object of acpi_evaluate_dsm()
  drm/i915: Fix bug in user proto-context creation that leaked contexts
  drm: rcar-du: Don't create encoder for unconnected LVDS outputs
  drm/msm/dsi: fix off by one in dsi_bus_clk_enable error handling
  drm/msm/dsi: Fix an error code in msm_dsi_modeset_init()
  drm/msm/dsi: dsi_phy_14nm: Take ready-bit into account in poll_for_ready
  drm/msm/dsi/phy: fix clock names in 28nm_8960 phy
  drm/msm/dpu: Fix address of SM8150 PINGPONG5 IRQ register
  drm/msm: Do not run snapshot on non-DPU devices
  drm/msm/a3xx: fix error handling in a3xx_gpu_init()
  drm/msm/a4xx: fix error handling in a4xx_gpu_init()
  drm/msm: Fix null pointer dereference on pointer edp
  drm/msm/mdp5: fix cursor-related warnings
  drm/msm: Avoid potential overflow in timeout_to_jiffies()
  ...
parents 86a44e90 a14bc107
...@@ -1834,11 +1834,20 @@ static void connector_bad_edid(struct drm_connector *connector, ...@@ -1834,11 +1834,20 @@ static void connector_bad_edid(struct drm_connector *connector,
u8 *edid, int num_blocks) u8 *edid, int num_blocks)
{ {
int i; int i;
u8 num_of_ext = edid[0x7e]; u8 last_block;
/*
* 0x7e in the EDID is the number of extension blocks. The EDID
* is 1 (base block) + num_ext_blocks big. That means we can think
* of 0x7e in the EDID of the _index_ of the last block in the
* combined chunk of memory.
*/
last_block = edid[0x7e];
/* Calculate real checksum for the last edid extension block data */ /* Calculate real checksum for the last edid extension block data */
if (last_block < num_blocks)
connector->real_edid_checksum = connector->real_edid_checksum =
drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH); drm_edid_block_checksum(edid + last_block * EDID_LENGTH);
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS)) if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return; return;
......
...@@ -1506,6 +1506,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, ...@@ -1506,6 +1506,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
{ {
struct drm_client_dev *client = &fb_helper->client; struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev; struct drm_device *dev = fb_helper->dev;
struct drm_mode_config *config = &dev->mode_config;
int ret = 0; int ret = 0;
int crtc_count = 0; int crtc_count = 0;
struct drm_connector_list_iter conn_iter; struct drm_connector_list_iter conn_iter;
...@@ -1663,6 +1664,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, ...@@ -1663,6 +1664,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* Handle our overallocation */ /* Handle our overallocation */
sizes.surface_height *= drm_fbdev_overalloc; sizes.surface_height *= drm_fbdev_overalloc;
sizes.surface_height /= 100; sizes.surface_height /= 100;
if (sizes.surface_height > config->max_height) {
drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n",
config->max_height);
sizes.surface_height = config->max_height;
}
/* push down into drivers */ /* push down into drivers */
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
......
...@@ -46,6 +46,7 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv); ...@@ -46,6 +46,7 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv);
int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp); int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp);
int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp, int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
u32 w, u32 h, u32 pitch); u32 w, u32 h, u32 pitch);
int hyperv_hide_hw_ptr(struct hv_device *hdev);
int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect); int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect);
int hyperv_connect_vsp(struct hv_device *hdev); int hyperv_connect_vsp(struct hv_device *hdev);
......
...@@ -101,6 +101,7 @@ static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe, ...@@ -101,6 +101,7 @@ static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev); struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
hyperv_hide_hw_ptr(hv->hdev);
hyperv_update_situation(hv->hdev, 1, hv->screen_depth, hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
crtc_state->mode.hdisplay, crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay, crtc_state->mode.vdisplay,
......
...@@ -299,6 +299,55 @@ int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp, ...@@ -299,6 +299,55 @@ int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
return 0; return 0;
} }
/*
* Hyper-V supports a hardware cursor feature. It's not used by Linux VM,
* but the Hyper-V host still draws a point as an extra mouse pointer,
* which is unwanted, especially when Xorg is running.
*
* The hyperv_fb driver uses synthvid_send_ptr() to hide the unwanted
* pointer, by setting msg.ptr_pos.is_visible = 1 and setting the
* msg.ptr_shape.data. Note: setting msg.ptr_pos.is_visible to 0 doesn't
* work in tests.
*
* Copy synthvid_send_ptr() to hyperv_drm and rename it to
* hyperv_hide_hw_ptr(). Note: hyperv_hide_hw_ptr() is also called in the
* handler of the SYNTHVID_FEATURE_CHANGE event, otherwise the host still
* draws an extra unwanted mouse pointer after the VM Connection window is
* closed and reopened.
*/
int hyperv_hide_hw_ptr(struct hv_device *hdev)
{
struct synthvid_msg msg;
memset(&msg, 0, sizeof(struct synthvid_msg));
msg.vid_hdr.type = SYNTHVID_POINTER_POSITION;
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_pointer_position);
msg.ptr_pos.is_visible = 1;
msg.ptr_pos.video_output = 0;
msg.ptr_pos.image_x = 0;
msg.ptr_pos.image_y = 0;
hyperv_sendpacket(hdev, &msg);
memset(&msg, 0, sizeof(struct synthvid_msg));
msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE;
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_pointer_shape);
msg.ptr_shape.part_idx = SYNTHVID_CURSOR_COMPLETE;
msg.ptr_shape.is_argb = 1;
msg.ptr_shape.width = 1;
msg.ptr_shape.height = 1;
msg.ptr_shape.hot_x = 0;
msg.ptr_shape.hot_y = 0;
msg.ptr_shape.data[0] = 0;
msg.ptr_shape.data[1] = 1;
msg.ptr_shape.data[2] = 1;
msg.ptr_shape.data[3] = 1;
hyperv_sendpacket(hdev, &msg);
return 0;
}
int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect) int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
{ {
struct hyperv_drm_device *hv = hv_get_drvdata(hdev); struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
...@@ -392,8 +441,11 @@ static void hyperv_receive_sub(struct hv_device *hdev) ...@@ -392,8 +441,11 @@ static void hyperv_receive_sub(struct hv_device *hdev)
return; return;
} }
if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) {
hv->dirt_needed = msg->feature_chg.is_dirt_needed; hv->dirt_needed = msg->feature_chg.is_dirt_needed;
if (hv->dirt_needed)
hyperv_hide_hw_ptr(hv->hdev);
}
} }
static void hyperv_receive(void *ctx) static void hyperv_receive(void *ctx)
......
...@@ -186,13 +186,16 @@ void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) ...@@ -186,13 +186,16 @@ void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
{ {
struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
acpi_handle dhandle; acpi_handle dhandle;
union acpi_object *obj;
dhandle = ACPI_HANDLE(&pdev->dev); dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle) if (!dhandle)
return; return;
acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID, obj = acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL); INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
if (obj)
ACPI_FREE(obj);
} }
/* /*
......
...@@ -937,6 +937,10 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, ...@@ -937,6 +937,10 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
unsigned int n; unsigned int n;
e = alloc_engines(num_engines); e = alloc_engines(num_engines);
if (!e)
return ERR_PTR(-ENOMEM);
e->num_engines = num_engines;
for (n = 0; n < num_engines; n++) { for (n = 0; n < num_engines; n++) {
struct intel_context *ce; struct intel_context *ce;
int ret; int ret;
...@@ -970,7 +974,6 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, ...@@ -970,7 +974,6 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
goto free_engines; goto free_engines;
} }
} }
e->num_engines = num_engines;
return e; return e;
......
...@@ -421,6 +421,7 @@ void intel_context_fini(struct intel_context *ce) ...@@ -421,6 +421,7 @@ void intel_context_fini(struct intel_context *ce)
mutex_destroy(&ce->pin_mutex); mutex_destroy(&ce->pin_mutex);
i915_active_fini(&ce->active); i915_active_fini(&ce->active);
i915_sw_fence_fini(&ce->guc_blocked);
} }
void i915_context_module_exit(void) void i915_context_module_exit(void)
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
*/ */
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/mailbox_controller.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h> #include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h> #include <linux/soc/mediatek/mtk-mmsys.h>
...@@ -52,11 +50,8 @@ struct mtk_drm_crtc { ...@@ -52,11 +50,8 @@ struct mtk_drm_crtc {
bool pending_async_planes; bool pending_async_planes;
#if IS_REACHABLE(CONFIG_MTK_CMDQ) #if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct mbox_client cmdq_cl; struct cmdq_client *cmdq_client;
struct mbox_chan *cmdq_chan;
struct cmdq_pkt cmdq_handle;
u32 cmdq_event; u32 cmdq_event;
u32 cmdq_vblank_cnt;
#endif #endif
struct device *mmsys_dev; struct device *mmsys_dev;
...@@ -227,79 +222,9 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, ...@@ -227,79 +222,9 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
} }
#if IS_REACHABLE(CONFIG_MTK_CMDQ) #if IS_REACHABLE(CONFIG_MTK_CMDQ)
static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt, static void ddp_cmdq_cb(struct cmdq_cb_data data)
size_t size)
{ {
struct device *dev; cmdq_pkt_destroy(data.data);
dma_addr_t dma_addr;
pkt->va_base = kzalloc(size, GFP_KERNEL);
if (!pkt->va_base) {
kfree(pkt);
return -ENOMEM;
}
pkt->buf_size = size;
dev = chan->mbox->dev;
dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
kfree(pkt->va_base);
kfree(pkt);
return -ENOMEM;
}
pkt->pa_base = dma_addr;
return 0;
}
static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
{
dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size,
DMA_TO_DEVICE);
kfree(pkt->va_base);
kfree(pkt);
}
static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
{
struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
struct cmdq_cb_data *data = mssg;
struct mtk_crtc_state *state;
unsigned int i;
state = to_mtk_crtc_state(mtk_crtc->base.state);
state->pending_config = false;
if (mtk_crtc->pending_planes) {
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
if (mtk_crtc->pending_async_planes) {
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
plane_state->pending.async_config = false;
}
mtk_crtc->pending_async_planes = false;
}
mtk_crtc->cmdq_vblank_cnt = 0;
mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
} }
#endif #endif
...@@ -453,7 +378,6 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, ...@@ -453,7 +378,6 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
state->pending_vrefresh, 0, state->pending_vrefresh, 0,
cmdq_handle); cmdq_handle);
if (!cmdq_handle)
state->pending_config = false; state->pending_config = false;
} }
...@@ -474,11 +398,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, ...@@ -474,11 +398,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer, mtk_ddp_comp_layer_config(comp, local_layer,
plane_state, plane_state,
cmdq_handle); cmdq_handle);
if (!cmdq_handle)
plane_state->pending.config = false; plane_state->pending.config = false;
} }
if (!cmdq_handle)
mtk_crtc->pending_planes = false; mtk_crtc->pending_planes = false;
} }
...@@ -499,11 +420,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, ...@@ -499,11 +420,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer, mtk_ddp_comp_layer_config(comp, local_layer,
plane_state, plane_state,
cmdq_handle); cmdq_handle);
if (!cmdq_handle)
plane_state->pending.async_config = false; plane_state->pending.async_config = false;
} }
if (!cmdq_handle)
mtk_crtc->pending_async_planes = false; mtk_crtc->pending_async_planes = false;
} }
} }
...@@ -512,7 +430,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, ...@@ -512,7 +430,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
bool needs_vblank) bool needs_vblank)
{ {
#if IS_REACHABLE(CONFIG_MTK_CMDQ) #if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; struct cmdq_pkt *cmdq_handle;
#endif #endif
struct drm_crtc *crtc = &mtk_crtc->base; struct drm_crtc *crtc = &mtk_crtc->base;
struct mtk_drm_private *priv = crtc->dev->dev_private; struct mtk_drm_private *priv = crtc->dev->dev_private;
...@@ -550,24 +468,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, ...@@ -550,24 +468,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
mtk_mutex_release(mtk_crtc->mutex); mtk_mutex_release(mtk_crtc->mutex);
} }
#if IS_REACHABLE(CONFIG_MTK_CMDQ) #if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_chan) { if (mtk_crtc->cmdq_client) {
mbox_flush(mtk_crtc->cmdq_chan, 2000); mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
cmdq_handle->cmd_buf_size = 0; cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle); mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle); cmdq_pkt_finalize(cmdq_handle);
dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev, cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
cmdq_handle->pa_base,
cmdq_handle->cmd_buf_size,
DMA_TO_DEVICE);
/*
* CMDQ command should execute in next vblank,
* If it fail to execute in next 2 vblank, timeout happen.
*/
mtk_crtc->cmdq_vblank_cnt = 2;
mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
} }
#endif #endif
mtk_crtc->config_updating = false; mtk_crtc->config_updating = false;
...@@ -581,15 +489,12 @@ static void mtk_crtc_ddp_irq(void *data) ...@@ -581,15 +489,12 @@ static void mtk_crtc_ddp_irq(void *data)
struct mtk_drm_private *priv = crtc->dev->dev_private; struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ) #if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan) if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
mtk_crtc_ddp_config(crtc, NULL);
else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
drm_crtc_index(&mtk_crtc->base));
#else #else
if (!priv->data->shadow_register) if (!priv->data->shadow_register)
mtk_crtc_ddp_config(crtc, NULL);
#endif #endif
mtk_crtc_ddp_config(crtc, NULL);
mtk_drm_finish_page_flip(mtk_crtc); mtk_drm_finish_page_flip(mtk_crtc);
} }
...@@ -924,20 +829,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, ...@@ -924,20 +829,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mutex_init(&mtk_crtc->hw_lock); mutex_init(&mtk_crtc->hw_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ) #if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev; mtk_crtc->cmdq_client =
mtk_crtc->cmdq_cl.tx_block = false; cmdq_mbox_create(mtk_crtc->mmsys_dev,
mtk_crtc->cmdq_cl.knows_txdone = true;
mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb;
mtk_crtc->cmdq_chan =
mbox_request_channel(&mtk_crtc->cmdq_cl,
drm_crtc_index(&mtk_crtc->base)); drm_crtc_index(&mtk_crtc->base));
if (IS_ERR(mtk_crtc->cmdq_chan)) { if (IS_ERR(mtk_crtc->cmdq_client)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base)); drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_chan = NULL; mtk_crtc->cmdq_client = NULL;
} }
if (mtk_crtc->cmdq_chan) { if (mtk_crtc->cmdq_client) {
ret = of_property_read_u32_index(priv->mutex_node, ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events", "mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base), drm_crtc_index(&mtk_crtc->base),
...@@ -945,18 +846,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, ...@@ -945,18 +846,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (ret) { if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
drm_crtc_index(&mtk_crtc->base)); drm_crtc_index(&mtk_crtc->base));
mbox_free_channel(mtk_crtc->cmdq_chan); cmdq_mbox_destroy(mtk_crtc->cmdq_client);
mtk_crtc->cmdq_chan = NULL; mtk_crtc->cmdq_client = NULL;
} else {
ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan,
&mtk_crtc->cmdq_handle,
PAGE_SIZE);
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
drm_crtc_index(&mtk_crtc->base));
mbox_free_channel(mtk_crtc->cmdq_chan);
mtk_crtc->cmdq_chan = NULL;
}
} }
} }
#endif #endif
......
...@@ -571,13 +571,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) ...@@ -571,13 +571,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
} }
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
ret = IS_ERR(icc_path); if (IS_ERR(icc_path)) {
if (ret) ret = PTR_ERR(icc_path);
goto fail; goto fail;
}
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
ret = IS_ERR(ocmem_icc_path); if (IS_ERR(ocmem_icc_path)) {
if (ret) { ret = PTR_ERR(ocmem_icc_path);
/* allow -ENODATA, ocmem icc is optional */ /* allow -ENODATA, ocmem icc is optional */
if (ret != -ENODATA) if (ret != -ENODATA)
goto fail; goto fail;
......
...@@ -699,13 +699,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) ...@@ -699,13 +699,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
} }
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
ret = IS_ERR(icc_path); if (IS_ERR(icc_path)) {
if (ret) ret = PTR_ERR(icc_path);
goto fail; goto fail;
}
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
ret = IS_ERR(ocmem_icc_path); if (IS_ERR(ocmem_icc_path)) {
if (ret) { ret = PTR_ERR(ocmem_icc_path);
/* allow -ENODATA, ocmem icc is optional */ /* allow -ENODATA, ocmem icc is optional */
if (ret != -ENODATA) if (ret != -ENODATA)
goto fail; goto fail;
......
...@@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) ...@@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
u32 val; u32 val;
int request, ack; int request, ack;
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return -EINVAL; return -EINVAL;
...@@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) ...@@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{ {
int bit; int bit;
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return; return;
...@@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) ...@@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (!pdev) if (!pdev)
return -ENODEV; return -ENODEV;
mutex_init(&gmu->lock);
gmu->dev = &pdev->dev; gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true); of_dma_configure(gmu->dev, node, true);
......
...@@ -44,6 +44,9 @@ struct a6xx_gmu_bo { ...@@ -44,6 +44,9 @@ struct a6xx_gmu_bo {
struct a6xx_gmu { struct a6xx_gmu {
struct device *dev; struct device *dev;
/* For serializing communication with the GMU: */
struct mutex lock;
struct msm_gem_address_space *aspace; struct msm_gem_address_space *aspace;
void * __iomem mmio; void * __iomem mmio;
......
...@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, ...@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
u32 asid; u32 asid;
u64 memptr = rbmemptr(ring, ttbr0); u64 memptr = rbmemptr(ring, ttbr0);
if (ctx == a6xx_gpu->cur_ctx) if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
return; return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
...@@ -139,7 +139,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, ...@@ -139,7 +139,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_PKT7(ring, CP_EVENT_WRITE, 1); OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31); OUT_RING(ring, 0x31);
a6xx_gpu->cur_ctx = ctx; a6xx_gpu->cur_ctx_seqno = ctx->seqno;
} }
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
...@@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu) ...@@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR) A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
static int a6xx_hw_init(struct msm_gpu *gpu) static int hw_init(struct msm_gpu *gpu)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
...@@ -1081,7 +1081,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu) ...@@ -1081,7 +1081,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Always come up on rb 0 */ /* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0]; a6xx_gpu->cur_ring = gpu->rb[0];
a6xx_gpu->cur_ctx = NULL; a6xx_gpu->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */ /* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
...@@ -1135,6 +1135,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu) ...@@ -1135,6 +1135,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
return ret; return ret;
} }
static int a6xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
mutex_lock(&a6xx_gpu->gmu.lock);
ret = hw_init(gpu);
mutex_unlock(&a6xx_gpu->gmu.lock);
return ret;
}
static void a6xx_dump(struct msm_gpu *gpu) static void a6xx_dump(struct msm_gpu *gpu)
{ {
DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
...@@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) ...@@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
trace_msm_gpu_resume(0); trace_msm_gpu_resume(0);
mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_resume(a6xx_gpu); ret = a6xx_gmu_resume(a6xx_gpu);
mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret) if (ret)
return ret; return ret;
...@@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) ...@@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
msm_devfreq_suspend(gpu); msm_devfreq_suspend(gpu);
mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_stop(a6xx_gpu); ret = a6xx_gmu_stop(a6xx_gpu);
mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret) if (ret)
return ret; return ret;
...@@ -1547,9 +1564,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) ...@@ -1547,9 +1564,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
static DEFINE_MUTEX(perfcounter_oob);
mutex_lock(&perfcounter_oob); mutex_lock(&a6xx_gpu->gmu.lock);
/* Force the GPU power on so we can read this register */ /* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
...@@ -1558,7 +1574,9 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) ...@@ -1558,7 +1574,9 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
REG_A6XX_CP_ALWAYS_ON_COUNTER_HI); REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
mutex_unlock(&perfcounter_oob);
mutex_unlock(&a6xx_gpu->gmu.lock);
return 0; return 0;
} }
...@@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) ...@@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time; return (unsigned long)busy_time;
} }
void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
mutex_lock(&a6xx_gpu->gmu.lock);
a6xx_gmu_set_freq(gpu, opp);
mutex_unlock(&a6xx_gpu->gmu.lock);
}
static struct msm_gem_address_space * static struct msm_gem_address_space *
a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{ {
...@@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = { ...@@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif #endif
.gpu_busy = a6xx_gpu_busy, .gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq, .gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gmu_set_freq, .gpu_set_freq = a6xx_gpu_set_freq,
#if defined(CONFIG_DRM_MSM_GPU_STATE) #if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get, .gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put, .gpu_state_put = a6xx_gpu_state_put,
......
...@@ -19,7 +19,16 @@ struct a6xx_gpu { ...@@ -19,7 +19,16 @@ struct a6xx_gpu {
uint64_t sqe_iova; uint64_t sqe_iova;
struct msm_ringbuffer *cur_ring; struct msm_ringbuffer *cur_ring;
struct msm_file_private *cur_ctx;
/**
* cur_ctx_seqno:
*
* The ctx->seqno value of the context with current pgtables
* installed. Tracked by seqno rather than pointer value to
* avoid dangling pointers, and cases where a ctx can be freed
* and a new one created with the same address.
*/
int cur_ctx_seqno;
struct a6xx_gmu gmu; struct a6xx_gmu gmu;
......
...@@ -794,7 +794,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = { ...@@ -794,7 +794,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
-1), -1),
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk, PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
-1), -1),
}; };
......
...@@ -1125,6 +1125,20 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) ...@@ -1125,6 +1125,20 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
} }
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
.atomic_destroy_state = mdp5_crtc_destroy_state,
.atomic_print_state = mdp5_crtc_atomic_print_state,
.get_vblank_counter = mdp5_crtc_get_vblank_counter,
.enable_vblank = msm_crtc_enable_vblank,
.disable_vblank = msm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_funcs mdp5_crtc_funcs = { static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config, .set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy, .destroy = mdp5_crtc_destroy,
...@@ -1313,6 +1327,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, ...@@ -1313,6 +1327,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true; mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
cursor_plane ?
&mdp5_crtc_no_lm_cursor_funcs :
&mdp5_crtc_funcs, NULL); &mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work, drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
......
...@@ -1309,14 +1309,14 @@ static int dp_pm_resume(struct device *dev) ...@@ -1309,14 +1309,14 @@ static int dp_pm_resume(struct device *dev)
* can not declared display is connected unless * can not declared display is connected unless
* HDMI cable is plugged in and sink_count of * HDMI cable is plugged in and sink_count of
* dongle become 1 * dongle become 1
* also only signal audio when disconnected
*/ */
if (dp->link->sink_count) if (dp->link->sink_count) {
dp->dp_display.is_connected = true; dp->dp_display.is_connected = true;
else } else {
dp->dp_display.is_connected = false; dp->dp_display.is_connected = false;
dp_display_handle_plugged_change(g_dp_display, false);
dp_display_handle_plugged_change(g_dp_display, }
dp->dp_display.is_connected);
DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n", DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
dp->link->sink_count, dp->dp_display.is_connected, dp->link->sink_count, dp->dp_display.is_connected,
......
...@@ -215,8 +215,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, ...@@ -215,8 +215,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail; goto fail;
} }
if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
ret = -EINVAL;
goto fail; goto fail;
}
msm_dsi->encoder = encoder; msm_dsi->encoder = encoder;
......
...@@ -451,7 +451,7 @@ static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host) ...@@ -451,7 +451,7 @@ static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
return 0; return 0;
err: err:
for (; i > 0; i--) while (--i >= 0)
clk_disable_unprepare(msm_host->bus_clks[i]); clk_disable_unprepare(msm_host->bus_clks[i]);
return ret; return ret;
......
...@@ -110,14 +110,13 @@ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; ...@@ -110,14 +110,13 @@ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
u32 nb_tries, u32 timeout_us) u32 nb_tries, u32 timeout_us)
{ {
bool pll_locked = false; bool pll_locked = false, pll_ready = false;
void __iomem *base = pll_14nm->phy->pll_base; void __iomem *base = pll_14nm->phy->pll_base;
u32 tries, val; u32 tries, val;
tries = nb_tries; tries = nb_tries;
while (tries--) { while (tries--) {
val = dsi_phy_read(base + val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
pll_locked = !!(val & BIT(5)); pll_locked = !!(val & BIT(5));
if (pll_locked) if (pll_locked)
...@@ -126,23 +125,24 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, ...@@ -126,23 +125,24 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
udelay(timeout_us); udelay(timeout_us);
} }
if (!pll_locked) { if (!pll_locked)
goto out;
tries = nb_tries; tries = nb_tries;
while (tries--) { while (tries--) {
val = dsi_phy_read(base + val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); pll_ready = !!(val & BIT(0));
pll_locked = !!(val & BIT(0));
if (pll_locked) if (pll_ready)
break; break;
udelay(timeout_us); udelay(timeout_us);
} }
}
DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); out:
DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* ");
return pll_locked; return pll_locked && pll_ready;
} }
static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf) static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
......
...@@ -428,7 +428,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov ...@@ -428,7 +428,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9; bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id); snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id); snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
bytediv_init.name = clk_name; bytediv_init.name = clk_name;
bytediv_init.ops = &clk_bytediv_ops; bytediv_init.ops = &clk_bytediv_ops;
...@@ -442,7 +442,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov ...@@ -442,7 +442,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
return ret; return ret;
provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw; provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id); snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
/* DIV3 */ /* DIV3 */
hw = devm_clk_hw_register_divider(dev, clk_name, hw = devm_clk_hw_register_divider(dev, clk_name,
parent_name, 0, pll_28nm->phy->pll_base + parent_name, 0, pll_28nm->phy->pll_base +
......
...@@ -1116,7 +1116,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on) ...@@ -1116,7 +1116,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
int msm_edp_ctrl_init(struct msm_edp *edp) int msm_edp_ctrl_init(struct msm_edp *edp)
{ {
struct edp_ctrl *ctrl = NULL; struct edp_ctrl *ctrl = NULL;
struct device *dev = &edp->pdev->dev; struct device *dev;
int ret; int ret;
if (!edp) { if (!edp) {
...@@ -1124,6 +1124,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp) ...@@ -1124,6 +1124,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
return -EINVAL; return -EINVAL;
} }
dev = &edp->pdev->dev;
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl) if (!ctrl)
return -ENOMEM; return -ENOMEM;
......
...@@ -630,10 +630,11 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) ...@@ -630,10 +630,11 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret) if (ret)
goto err_msm_uninit; goto err_msm_uninit;
if (kms) {
ret = msm_disp_snapshot_init(ddev); ret = msm_disp_snapshot_init(ddev);
if (ret) if (ret)
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret); DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
}
drm_mode_config_reset(ddev); drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
...@@ -682,6 +683,7 @@ static void load_gpu(struct drm_device *dev) ...@@ -682,6 +683,7 @@ static void load_gpu(struct drm_device *dev)
static int context_init(struct drm_device *dev, struct drm_file *file) static int context_init(struct drm_device *dev, struct drm_file *file)
{ {
static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx; struct msm_file_private *ctx;
...@@ -689,12 +691,17 @@ static int context_init(struct drm_device *dev, struct drm_file *file) ...@@ -689,12 +691,17 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&ctx->submitqueues);
rwlock_init(&ctx->queuelock);
kref_init(&ctx->ref); kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx); msm_submitqueue_init(dev, ctx);
ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current); ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx; file->driver_priv = ctx;
ctx->seqno = atomic_inc_return(&ident);
return 0; return 0;
} }
......
...@@ -53,14 +53,6 @@ struct msm_disp_state; ...@@ -53,14 +53,6 @@ struct msm_disp_state;
#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) #define FRAC_16_16(mult, div) (((mult) << 16) / (div))
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
int queueid;
struct msm_gem_address_space *aspace;
struct kref ref;
};
enum msm_mdp_plane_property { enum msm_mdp_plane_property {
PLANE_PROP_ZPOS, PLANE_PROP_ZPOS,
PLANE_PROP_ALPHA, PLANE_PROP_ALPHA,
...@@ -488,41 +480,6 @@ void msm_writel(u32 data, void __iomem *addr); ...@@ -488,41 +480,6 @@ void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr); u32 msm_readl(const void __iomem *addr);
void msm_rmw(void __iomem *addr, u32 mask, u32 or); void msm_rmw(void __iomem *addr, u32 mask, u32 or);
struct msm_gpu_submitqueue;
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm,
struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
struct drm_msm_submitqueue_query *args);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
void msm_submitqueue_destroy(struct kref *kref);
static inline void __msm_file_private_destroy(struct kref *kref)
{
struct msm_file_private *ctx = container_of(kref,
struct msm_file_private, ref);
msm_gem_address_space_put(ctx->aspace);
kfree(ctx);
}
static inline void msm_file_private_put(struct msm_file_private *ctx)
{
kref_put(&ctx->ref, __msm_file_private_destroy);
}
static inline struct msm_file_private *msm_file_private_get(
struct msm_file_private *ctx)
{
kref_get(&ctx->ref);
return ctx;
}
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
...@@ -547,7 +504,7 @@ static inline int align_pitch(int width, int bpp) ...@@ -547,7 +504,7 @@ static inline int align_pitch(int width, int bpp)
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
{ {
ktime_t now = ktime_get(); ktime_t now = ktime_get();
unsigned long remaining_jiffies; s64 remaining_jiffies;
if (ktime_compare(*timeout, now) < 0) { if (ktime_compare(*timeout, now) < 0) {
remaining_jiffies = 0; remaining_jiffies = 0;
...@@ -556,7 +513,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) ...@@ -556,7 +513,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
} }
return remaining_jiffies; return clamp(remaining_jiffies, 0LL, (s64)INT_MAX);
} }
#endif /* __MSM_DRV_H__ */ #endif /* __MSM_DRV_H__ */
...@@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, ...@@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (!submit) if (!submit)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = drm_sched_job_init(&submit->base, &queue->entity, queue); ret = drm_sched_job_init(&submit->base, queue->entity, queue);
if (ret) { if (ret) {
kfree(submit); kfree(submit);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -171,7 +171,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, ...@@ -171,7 +171,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
static int submit_lookup_cmds(struct msm_gem_submit *submit, static int submit_lookup_cmds(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file) struct drm_msm_gem_submit *args, struct drm_file *file)
{ {
unsigned i, sz; unsigned i;
size_t sz;
int ret = 0; int ret = 0;
for (i = 0; i < args->nr_cmds; i++) { for (i = 0; i < args->nr_cmds; i++) {
...@@ -907,7 +908,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -907,7 +908,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* The scheduler owns a ref now: */ /* The scheduler owns a ref now: */
msm_gem_submit_get(submit); msm_gem_submit_get(submit);
drm_sched_entity_push_job(&submit->base, &queue->entity); drm_sched_entity_push_job(&submit->base, queue->entity);
args->fence = submit->fence_id; args->fence = submit->fence_id;
......
...@@ -257,6 +257,39 @@ struct msm_gpu_perfcntr { ...@@ -257,6 +257,39 @@ struct msm_gpu_perfcntr {
*/ */
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
/**
* struct msm_file_private - per-drm_file context
*
* @queuelock: synchronizes access to submitqueues list
* @submitqueues: list of &msm_gpu_submitqueue created by userspace
* @queueid: counter incremented each time a submitqueue is created,
* used to assign &msm_gpu_submitqueue.id
* @aspace: the per-process GPU address-space
* @ref: reference count
* @seqno: unique per process seqno
*/
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
int queueid;
struct msm_gem_address_space *aspace;
struct kref ref;
int seqno;
/**
* entities:
*
* Table of per-priority-level sched entities used by submitqueues
* associated with this &drm_file. Because some userspace apps
* make assumptions about rendering from multiple gl contexts
* (of the same priority) within the process happening in FIFO
* order without requiring any fencing beyond MakeCurrent(), we
* create at most one &drm_sched_entity per-process per-priority-
* level.
*/
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
};
/** /**
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
* *
...@@ -304,6 +337,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, ...@@ -304,6 +337,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
} }
/** /**
* struct msm_gpu_submitqueues - Userspace created context.
*
* A submitqueue is associated with a gl context or vk queue (or equiv) * A submitqueue is associated with a gl context or vk queue (or equiv)
* in userspace. * in userspace.
* *
...@@ -333,7 +368,7 @@ struct msm_gpu_submitqueue { ...@@ -333,7 +368,7 @@ struct msm_gpu_submitqueue {
struct idr fence_idr; struct idr fence_idr;
struct mutex lock; struct mutex lock;
struct kref ref; struct kref ref;
struct drm_sched_entity entity; struct drm_sched_entity *entity;
}; };
struct msm_gpu_state_bo { struct msm_gpu_state_bo {
...@@ -421,6 +456,33 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) ...@@ -421,6 +456,33 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu); int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu); int msm_gpu_pm_resume(struct msm_gpu *gpu);
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm,
struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
struct drm_msm_submitqueue_query *args);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
void msm_submitqueue_destroy(struct kref *kref);
void __msm_file_private_destroy(struct kref *kref);
static inline void msm_file_private_put(struct msm_file_private *ctx)
{
kref_put(&ctx->ref, __msm_file_private_destroy);
}
static inline struct msm_file_private *msm_file_private_get(
struct msm_file_private *ctx)
{
kref_get(&ctx->ref);
return ctx;
}
void msm_devfreq_init(struct msm_gpu *gpu); void msm_devfreq_init(struct msm_gpu *gpu);
void msm_devfreq_cleanup(struct msm_gpu *gpu); void msm_devfreq_cleanup(struct msm_gpu *gpu);
void msm_devfreq_resume(struct msm_gpu *gpu); void msm_devfreq_resume(struct msm_gpu *gpu);
......
...@@ -151,6 +151,9 @@ void msm_devfreq_active(struct msm_gpu *gpu) ...@@ -151,6 +151,9 @@ void msm_devfreq_active(struct msm_gpu *gpu)
unsigned int idle_time; unsigned int idle_time;
unsigned long target_freq = df->idle_freq; unsigned long target_freq = df->idle_freq;
if (!df->devfreq)
return;
/* /*
* Hold devfreq lock to synchronize with get_dev_status()/ * Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks * target() callbacks
...@@ -186,6 +189,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu) ...@@ -186,6 +189,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
struct msm_gpu_devfreq *df = &gpu->devfreq; struct msm_gpu_devfreq *df = &gpu->devfreq;
unsigned long idle_freq, target_freq = 0; unsigned long idle_freq, target_freq = 0;
if (!df->devfreq)
return;
/* /*
* Hold devfreq lock to synchronize with get_dev_status()/ * Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks * target() callbacks
......
...@@ -7,6 +7,24 @@ ...@@ -7,6 +7,24 @@
#include "msm_gpu.h" #include "msm_gpu.h"
void __msm_file_private_destroy(struct kref *kref)
{
struct msm_file_private *ctx = container_of(kref,
struct msm_file_private, ref);
int i;
for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
if (!ctx->entities[i])
continue;
drm_sched_entity_destroy(ctx->entities[i]);
kfree(ctx->entities[i]);
}
msm_gem_address_space_put(ctx->aspace);
kfree(ctx);
}
void msm_submitqueue_destroy(struct kref *kref) void msm_submitqueue_destroy(struct kref *kref)
{ {
struct msm_gpu_submitqueue *queue = container_of(kref, struct msm_gpu_submitqueue *queue = container_of(kref,
...@@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref) ...@@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr); idr_destroy(&queue->fence_idr);
drm_sched_entity_destroy(&queue->entity);
msm_file_private_put(queue->ctx); msm_file_private_put(queue->ctx);
kfree(queue); kfree(queue);
...@@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx) ...@@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
} }
} }
static struct drm_sched_entity *
get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
unsigned ring_nr, enum drm_sched_priority sched_prio)
{
static DEFINE_MUTEX(entity_lock);
unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
/* We should have already validated that the requested priority is
* valid by the time we get here.
*/
if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
return ERR_PTR(-EINVAL);
mutex_lock(&entity_lock);
if (!ctx->entities[idx]) {
struct drm_sched_entity *entity;
struct drm_gpu_scheduler *sched = &ring->sched;
int ret;
entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
if (ret) {
kfree(entity);
return ERR_PTR(ret);
}
ctx->entities[idx] = entity;
}
mutex_unlock(&entity_lock);
return ctx->entities[idx];
}
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id) u32 prio, u32 flags, u32 *id)
{ {
struct msm_drm_private *priv = drm->dev_private; struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue; struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
struct drm_gpu_scheduler *sched;
enum drm_sched_priority sched_prio; enum drm_sched_priority sched_prio;
unsigned ring_nr; unsigned ring_nr;
int ret; int ret;
...@@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, ...@@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags; queue->flags = flags;
queue->ring_nr = ring_nr; queue->ring_nr = ring_nr;
ring = priv->gpu->rb[ring_nr]; queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
sched = &ring->sched; ring_nr, sched_prio);
if (IS_ERR(queue->entity)) {
ret = drm_sched_entity_init(&queue->entity, ret = PTR_ERR(queue->entity);
sched_prio, &sched, 1, NULL);
if (ret) {
kfree(queue); kfree(queue);
return ret; return ret;
} }
...@@ -140,10 +188,6 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx) ...@@ -140,10 +188,6 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
*/ */
default_prio = DIV_ROUND_UP(max_priority, 2); default_prio = DIV_ROUND_UP(max_priority, 2);
INIT_LIST_HEAD(&ctx->submitqueues);
rwlock_init(&ctx->queuelock);
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL); return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
} }
......
...@@ -82,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base, ...@@ -82,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
if (offset < 0) if (offset < 0)
return 0; return 0;
engn = fifo->base.func->engine_id(&fifo->base, engine); engn = fifo->base.func->engine_id(&fifo->base, engine) - 1;
save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn); save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12); nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
done = nvkm_msec(device, 2000, done = nvkm_msec(device, 2000,
......
...@@ -295,6 +295,7 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO ...@@ -295,6 +295,7 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO
depends on OF depends on OF
depends on I2C depends on I2C
depends on BACKLIGHT_CLASS_DEVICE depends on BACKLIGHT_CLASS_DEVICE
select CRC32
help help
The panel is used with different sizes LCDs, from 480x272 to The panel is used with different sizes LCDs, from 480x272 to
1280x800, and 24 bit per pixel. 1280x800, and 24 bit per pixel.
......
...@@ -214,7 +214,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga ...@@ -214,7 +214,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
} }
ret = 0; ret = 0;
#if defined(__i386__) || defined(__x86_64__) #ifdef CONFIG_X86
wbinvd(); wbinvd();
#else #else
mb(); mb();
......
...@@ -86,12 +86,20 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, ...@@ -86,12 +86,20 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
} }
/* /*
* Create and initialize the encoder. On Gen3 skip the LVDS1 output if * Create and initialize the encoder. On Gen3, skip the LVDS1 output if
* the LVDS1 encoder is used as a companion for LVDS0 in dual-link * the LVDS1 encoder is used as a companion for LVDS0 in dual-link
* mode. * mode, or any LVDS output if it isn't connected. The latter may happen
* on D3 or E3 as the LVDS encoders are needed to provide the pixel
* clock to the DU, even when the LVDS outputs are not used.
*/ */
if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) { if (rcdu->info->gen >= 3) {
if (rcar_lvds_dual_link(bridge)) if (output == RCAR_DU_OUTPUT_LVDS1 &&
rcar_lvds_dual_link(bridge))
return -ENOLINK;
if ((output == RCAR_DU_OUTPUT_LVDS0 ||
output == RCAR_DU_OUTPUT_LVDS1) &&
!rcar_lvds_is_connected(bridge))
return -ENOLINK; return -ENOLINK;
} }
......
...@@ -576,6 +576,9 @@ static int rcar_lvds_attach(struct drm_bridge *bridge, ...@@ -576,6 +576,9 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
{ {
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
if (!lvds->next_bridge)
return 0;
return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge, return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
flags); flags);
} }
...@@ -598,6 +601,14 @@ bool rcar_lvds_dual_link(struct drm_bridge *bridge) ...@@ -598,6 +601,14 @@ bool rcar_lvds_dual_link(struct drm_bridge *bridge)
} }
EXPORT_SYMBOL_GPL(rcar_lvds_dual_link); EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
bool rcar_lvds_is_connected(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
return lvds->next_bridge != NULL;
}
EXPORT_SYMBOL_GPL(rcar_lvds_is_connected);
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* Probe & Remove * Probe & Remove
*/ */
......
...@@ -16,6 +16,7 @@ struct drm_bridge; ...@@ -16,6 +16,7 @@ struct drm_bridge;
int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq); int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
void rcar_lvds_clk_disable(struct drm_bridge *bridge); void rcar_lvds_clk_disable(struct drm_bridge *bridge);
bool rcar_lvds_dual_link(struct drm_bridge *bridge); bool rcar_lvds_dual_link(struct drm_bridge *bridge);
bool rcar_lvds_is_connected(struct drm_bridge *bridge);
#else #else
static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge, static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
unsigned long freq) unsigned long freq)
...@@ -27,6 +28,10 @@ static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge) ...@@ -27,6 +28,10 @@ static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{ {
return false; return false;
} }
static inline bool rcar_lvds_is_connected(struct drm_bridge *bridge)
{
return false;
}
#endif /* CONFIG_DRM_RCAR_LVDS */ #endif /* CONFIG_DRM_RCAR_LVDS */
#endif /* __RCAR_LVDS_H__ */ #endif /* __RCAR_LVDS_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment