Commit 311ac00d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.13-rc1' of git://people.freedesktop.org/~airlied/linux

Pull more drm updates from Dave Airlie:
 "i915, amd and some core fixes + mediatek color support.

  Some fixes tree came in since the main pull request for rc1, primarily
  i915 and drm-misc and one amd fix. The drm core vblank regression fix
  is probably the most important thing.

  I've also added the mediatek feature pull, it wasn't that big and
  didn't look like it would have any impact outside of mediatek, in fact
  it looks to just be a single feature, and some cleanups"

* tag 'drm-fixes-for-v4.13-rc1' of git://people.freedesktop.org/~airlied/linux: (31 commits)
  drm/i915: Make DP-MST connector info work
  drm/i915/gvt: Use fence error from GVT request for workload status
  drm/i915/gvt: remove scheduler_mutex in per-engine workload_thread
  drm/i915/gvt: Revert "drm/i915/gvt: Fix possible recursive locking issue"
  drm/i915/gvt: Audit the command buffer address
  drm/i915/gvt: Fix a memory leak in intel_gvt_init_gtt()
  drm/rockchip: fix NULL check on devm_kzalloc() return value
  drm/i915/fbdev: Check for existence of ifbdev->vma before operations
  drm/radeon: Fix eDP for single-display iMac10,1 (v2)
  drm/i915: Hold RPM wakelock while initializing OA buffer
  drm/i915/cnl: Fix the CURSOR_COEFF_MASK used in DDI Vswing Programming
  drm/i915/cfl: Fix Workarounds.
  drm/i915: Avoid undefined behaviour of "u32 >> 32"
  drm/i915: reintroduce VLV/CHV PFI programming power domain workaround
  drm/i915: Fix an error checking test
  drm/i915: Disable MSI for all pre-gen5
  drm/atomic: Add missing drm_atomic_state_clear to atomic_remove_fb
  drm: vblank: Fix vblank timestamp update
  drm/i915/gvt: Make function dpy_reg_mmio_readx safe
  drm/mediatek: separate color module to fixup error memory reallocation
  ...
parents 266530b3 6419ec78
...@@ -161,7 +161,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, ...@@ -161,7 +161,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
int ret; int ret;
if (!panel) if (!panel)
return ERR_PTR(EINVAL); return ERR_PTR(-EINVAL);
panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge), panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge),
GFP_KERNEL); GFP_KERNEL);
......
...@@ -832,6 +832,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb) ...@@ -832,6 +832,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
drm_atomic_clean_old_fb(dev, plane_mask, ret); drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK) { if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx); drm_modeset_backoff(&ctx);
goto retry; goto retry;
} }
......
...@@ -242,7 +242,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, ...@@ -242,7 +242,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
* Otherwise reinitialize delayed at next vblank interrupt and assign 0 * Otherwise reinitialize delayed at next vblank interrupt and assign 0
* for now, to mark the vblanktimestamp as invalid. * for now, to mark the vblanktimestamp as invalid.
*/ */
if (!rc && in_vblank_irq) if (!rc && !in_vblank_irq)
t_vblank = (struct timeval) {0, 0}; t_vblank = (struct timeval) {0, 0};
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
......
...@@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload) ...@@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail) gma_head == gma_tail)
return 0; return 0;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL;
goto out;
}
ret = ip_gma_set(&s, gma_head); ret = ip_gma_set(&s, gma_head);
if (ret) if (ret)
goto out; goto out;
...@@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.rb_va = wa_ctx->indirect_ctx.shadow_va; s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload; s.workload = workload;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL;
goto out;
}
ret = ip_gma_set(&s, gma_head); ret = ip_gma_set(&s, gma_head);
if (ret) if (ret)
goto out; goto out;
......
...@@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) | (PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
PORT_CLK_SEL_LCPLL_810;
}
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
...@@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) | (PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
PORT_CLK_SEL_LCPLL_810;
}
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
...@@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) | (PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
PORT_CLK_SEL_LCPLL_810;
}
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
...@@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
} }
/* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
} }
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
......
...@@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
ret = setup_spt_oos(gvt); ret = setup_spt_oos(gvt);
if (ret) { if (ret) {
gvt_err("fail to initialize SPT oos\n"); gvt_err("fail to initialize SPT oos\n");
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_ggtt_page);
return ret; return ret;
} }
} }
......
...@@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
*(u32 *)p_data = (1 << 17); switch (offset) {
return 0; case 0xe651c:
} case 0xe661c:
case 0xe671c:
static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset, case 0xe681c:
void *p_data, unsigned int bytes) vgpu_vreg(vgpu, offset) = 1 << 17;
{ break;
*(u32 *)p_data = 3; case 0xe6c04:
return 0; vgpu_vreg(vgpu, offset) = 0x3;
} break;
case 0xe6e1c:
vgpu_vreg(vgpu, offset) = 0x2f << 16;
break;
default:
return -EINVAL;
}
static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset, read_vreg(vgpu, offset, p_data, bytes);
void *p_data, unsigned int bytes)
{
*(u32 *)p_data = (0x2f << 16);
return 0; return 0;
} }
...@@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write); dp_aux_ch_ctl_mmio_write);
MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write); MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write); MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write); MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
...@@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL); MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL); MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0, MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
PORTA_HOTPLUG_STATUS_MASK PORTA_HOTPLUG_STATUS_MASK
......
...@@ -232,16 +232,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) ...@@ -232,16 +232,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
struct device *dev = mdev_dev(vgpu->vdev.mdev); struct device *dev = mdev_dev(vgpu->vdev.mdev);
unsigned long gfn; unsigned long gfn;
mutex_lock(&vgpu->vdev.cache_lock); for (;;) {
while ((node = rb_first(&vgpu->vdev.cache))) { mutex_lock(&vgpu->vdev.cache_lock);
node = rb_first(&vgpu->vdev.cache);
if (!node) {
mutex_unlock(&vgpu->vdev.cache_lock);
break;
}
dma = rb_entry(node, struct gvt_dma, node); dma = rb_entry(node, struct gvt_dma, node);
gvt_dma_unmap_iova(vgpu, dma->iova); gvt_dma_unmap_iova(vgpu, dma->iova);
gfn = dma->gfn; gfn = dma->gfn;
vfio_unpin_pages(dev, &gfn, 1);
__gvt_cache_remove_entry(vgpu, dma); __gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vgpu->vdev.cache_lock);
vfio_unpin_pages(dev, &gfn, 1);
} }
mutex_unlock(&vgpu->vdev.cache_lock);
} }
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
......
...@@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1); atomic_set(&workload->shadow_ctx_active, 1);
break; break;
case INTEL_CONTEXT_SCHEDULE_OUT: case INTEL_CONTEXT_SCHEDULE_OUT:
/* If the status is -EINPROGRESS means this workload
* doesn't meet any issue during dispatching so when
* get the SCHEDULE_OUT set the status to be zero for
* good. If the status is NOT -EINPROGRESS means there
* is something wrong happened during dispatching and
* the status should not be set to zero
*/
if (workload->status == -EINPROGRESS)
workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
break; break;
default: default:
...@@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
wait_event(workload->shadow_ctx_status_wq, wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active)); !atomic_read(&workload->shadow_ctx_active));
/* If this request caused GPU hang, req->fence.error will
* be set to -EIO. Use -EIO to set workload status so
* that when this request caused GPU hang, didn't trigger
* context switch interrupt to guest.
*/
if (likely(workload->status == -EINPROGRESS)) {
if (workload->req->fence.error == -EIO)
workload->status = -EIO;
else
workload->status = 0;
}
i915_gem_request_put(fetch_and_zero(&workload->req)); i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) { if (!workload->status && !vgpu->resetting) {
...@@ -464,8 +467,6 @@ struct workload_thread_param { ...@@ -464,8 +467,6 @@ struct workload_thread_param {
int ring_id; int ring_id;
}; };
static DEFINE_MUTEX(scheduler_mutex);
static int workload_thread(void *priv) static int workload_thread(void *priv)
{ {
struct workload_thread_param *p = (struct workload_thread_param *)priv; struct workload_thread_param *p = (struct workload_thread_param *)priv;
...@@ -497,8 +498,6 @@ static int workload_thread(void *priv) ...@@ -497,8 +498,6 @@ static int workload_thread(void *priv)
if (!workload) if (!workload)
break; break;
mutex_lock(&scheduler_mutex);
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n", gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload, workload->ring_id, workload,
workload->vgpu->id); workload->vgpu->id);
...@@ -537,9 +536,6 @@ static int workload_thread(void *priv) ...@@ -537,9 +536,6 @@ static int workload_thread(void *priv)
FORCEWAKE_ALL); FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv); intel_runtime_pm_put(gvt->dev_priv);
mutex_unlock(&scheduler_mutex);
} }
return 0; return 0;
} }
......
...@@ -3087,7 +3087,7 @@ static void intel_connector_info(struct seq_file *m, ...@@ -3087,7 +3087,7 @@ static void intel_connector_info(struct seq_file *m,
connector->display_info.cea_rev); connector->display_info.cea_rev);
} }
if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) if (!intel_encoder)
return; return;
switch (connector->connector_type) { switch (connector->connector_type) {
......
...@@ -1132,10 +1132,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1132,10 +1132,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* and the registers being closely associated. * and the registers being closely associated.
* *
* According to chipset errata, on the 965GM, MSI interrupts may * According to chipset errata, on the 965GM, MSI interrupts may
* be lost or delayed, but we use them anyways to avoid * be lost or delayed, and was defeatured. MSI interrupts seem to
* stuck interrupts on some machines. * get lost on g4x as well, and interrupt delivery seems to stay
* properly dead afterwards. So we'll just disable them for all
* pre-gen5 chipsets.
*/ */
if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) { if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0) if (pci_enable_msi(pdev) < 0)
DRM_DEBUG_DRIVER("can't enable MSI"); DRM_DEBUG_DRIVER("can't enable MSI");
} }
......
...@@ -288,20 +288,26 @@ static int eb_create(struct i915_execbuffer *eb) ...@@ -288,20 +288,26 @@ static int eb_create(struct i915_execbuffer *eb)
* direct lookup. * direct lookup.
*/ */
do { do {
unsigned int flags;
/* While we can still reduce the allocation size, don't
* raise a warning and allow the allocation to fail.
* On the last pass though, we want to try as hard
* as possible to perform the allocation and warn
* if it fails.
*/
flags = GFP_TEMPORARY;
if (size > 1)
flags |= __GFP_NORETRY | __GFP_NOWARN;
eb->buckets = kzalloc(sizeof(struct hlist_head) << size, eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
GFP_TEMPORARY | flags);
__GFP_NORETRY |
__GFP_NOWARN);
if (eb->buckets) if (eb->buckets)
break; break;
} while (--size); } while (--size);
if (unlikely(!eb->buckets)) { if (unlikely(!size))
eb->buckets = kzalloc(sizeof(struct hlist_head), return -ENOMEM;
GFP_TEMPORARY);
if (unlikely(!eb->buckets))
return -ENOMEM;
}
eb->lut_size = size; eb->lut_size = size;
} else { } else {
...@@ -452,7 +458,7 @@ eb_add_vma(struct i915_execbuffer *eb, ...@@ -452,7 +458,7 @@ eb_add_vma(struct i915_execbuffer *eb,
return err; return err;
} }
if (eb->lut_size >= 0) { if (eb->lut_size > 0) {
vma->exec_handle = entry->handle; vma->exec_handle = entry->handle;
hlist_add_head(&vma->exec_node, hlist_add_head(&vma->exec_node,
&eb->buckets[hash_32(entry->handle, &eb->buckets[hash_32(entry->handle,
...@@ -894,7 +900,7 @@ static void eb_release_vmas(const struct i915_execbuffer *eb) ...@@ -894,7 +900,7 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
static void eb_reset_vmas(const struct i915_execbuffer *eb) static void eb_reset_vmas(const struct i915_execbuffer *eb)
{ {
eb_release_vmas(eb); eb_release_vmas(eb);
if (eb->lut_size >= 0) if (eb->lut_size > 0)
memset(eb->buckets, 0, memset(eb->buckets, 0,
sizeof(struct hlist_head) << eb->lut_size); sizeof(struct hlist_head) << eb->lut_size);
} }
...@@ -903,7 +909,7 @@ static void eb_destroy(const struct i915_execbuffer *eb) ...@@ -903,7 +909,7 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{ {
GEM_BUG_ON(eb->reloc_cache.rq); GEM_BUG_ON(eb->reloc_cache.rq);
if (eb->lut_size >= 0) if (eb->lut_size > 0)
kfree(eb->buckets); kfree(eb->buckets);
} }
...@@ -2180,8 +2186,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2180,8 +2186,11 @@ i915_gem_do_execbuffer(struct drm_device *dev,
} }
} }
if (eb_create(&eb)) err = eb_create(&eb);
return -ENOMEM; if (err)
goto err_out_fence;
GEM_BUG_ON(!eb.lut_size);
/* /*
* Take a local wakeref for preparing to dispatch the execbuf as * Take a local wakeref for preparing to dispatch the execbuf as
...@@ -2340,6 +2349,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2340,6 +2349,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err_rpm: err_rpm:
intel_runtime_pm_put(eb.i915); intel_runtime_pm_put(eb.i915);
eb_destroy(&eb); eb_destroy(&eb);
err_out_fence:
if (out_fence_fd != -1) if (out_fence_fd != -1)
put_unused_fd(out_fence_fd); put_unused_fd(out_fence_fd);
err_in_fence: err_in_fence:
......
...@@ -2067,10 +2067,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, ...@@ -2067,10 +2067,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return ret; return ret;
} }
ret = alloc_oa_buffer(dev_priv);
if (ret)
goto err_oa_buf_alloc;
/* PRM - observability performance counters: /* PRM - observability performance counters:
* *
* OACONTROL, performance counter enable, note: * OACONTROL, performance counter enable, note:
...@@ -2086,6 +2082,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, ...@@ -2086,6 +2082,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
if (ret)
goto err_oa_buf_alloc;
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv); ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
if (ret) if (ret)
goto err_enable; goto err_enable;
...@@ -2097,11 +2097,11 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, ...@@ -2097,11 +2097,11 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0; return 0;
err_enable: err_enable:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv);
free_oa_buffer(dev_priv); free_oa_buffer(dev_priv);
err_oa_buf_alloc: err_oa_buf_alloc:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv);
if (stream->ctx) if (stream->ctx)
oa_put_render_ctx_id(stream); oa_put_render_ctx_id(stream);
......
...@@ -1802,7 +1802,7 @@ enum skl_disp_power_wells { ...@@ -1802,7 +1802,7 @@ enum skl_disp_power_wells {
#define POST_CURSOR_2(x) ((x) << 6) #define POST_CURSOR_2(x) ((x) << 6)
#define POST_CURSOR_2_MASK (0x3F << 6) #define POST_CURSOR_2_MASK (0x3F << 6)
#define CURSOR_COEFF(x) ((x) << 0) #define CURSOR_COEFF(x) ((x) << 0)
#define CURSOR_COEFF_MASK (0x3F << 6) #define CURSOR_COEFF_MASK (0x3F << 0)
#define _CNL_PORT_TX_DW5_GRP_AE 0x162354 #define _CNL_PORT_TX_DW5_GRP_AE 0x162354
#define _CNL_PORT_TX_DW5_GRP_B 0x1623D4 #define _CNL_PORT_TX_DW5_GRP_B 0x1623D4
......
...@@ -491,6 +491,14 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -491,6 +491,14 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
int cdclk = cdclk_state->cdclk; int cdclk = cdclk_state->cdclk;
u32 val, cmd; u32 val, cmd;
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
* a system suspend. So grab the PIPE-A domain, which covers
* the HW blocks needed for the following programming.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2; cmd = 2;
else if (cdclk == 266667) else if (cdclk == 266667)
...@@ -549,6 +557,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -549,6 +557,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
intel_update_cdclk(dev_priv); intel_update_cdclk(dev_priv);
vlv_program_pfi_credits(dev_priv); vlv_program_pfi_credits(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
} }
static void chv_set_cdclk(struct drm_i915_private *dev_priv, static void chv_set_cdclk(struct drm_i915_private *dev_priv,
...@@ -568,6 +578,14 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -568,6 +578,14 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
return; return;
} }
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
* a system suspend. So grab the PIPE-A domain, which covers
* the HW blocks needed for the following programming.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
/* /*
* Specs are full of misinformation, but testing on actual * Specs are full of misinformation, but testing on actual
* hardware has shown that we just need to write the desired * hardware has shown that we just need to write the desired
...@@ -590,6 +608,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -590,6 +608,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
intel_update_cdclk(dev_priv); intel_update_cdclk(dev_priv);
vlv_program_pfi_credits(dev_priv); vlv_program_pfi_credits(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
} }
static int bdw_calc_cdclk(int max_pixclk) static int bdw_calc_cdclk(int max_pixclk)
......
...@@ -821,9 +821,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) ...@@ -821,9 +821,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
/* WaDisableKillLogic:bxt,skl,kbl,cfl */ /* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | if (!IS_COFFEELAKE(dev_priv))
ECOCHK_DIS_TLB); I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
...@@ -894,10 +895,9 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) ...@@ -894,10 +895,9 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HDC_CHICKEN0, WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT); HDC_FORCE_NON_COHERENT);
/* WaDisableHDCInvalidation:skl,bxt,kbl */ /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
if (!IS_COFFEELAKE(dev_priv)) I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | BDW_DISABLE_HDC_INVALIDATION);
BDW_DISABLE_HDC_INVALIDATION);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(dev_priv) || if (IS_SKYLAKE(dev_priv) ||
......
...@@ -535,13 +535,14 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) ...@@ -535,13 +535,14 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
drm_fb_helper_fini(&ifbdev->helper); drm_fb_helper_fini(&ifbdev->helper);
if (ifbdev->fb) { if (ifbdev->vma) {
mutex_lock(&ifbdev->helper.dev->struct_mutex); mutex_lock(&ifbdev->helper.dev->struct_mutex);
intel_unpin_fb_vma(ifbdev->vma); intel_unpin_fb_vma(ifbdev->vma);
mutex_unlock(&ifbdev->helper.dev->struct_mutex); mutex_unlock(&ifbdev->helper.dev->struct_mutex);
}
if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base); drm_framebuffer_remove(&ifbdev->fb->base);
}
kfree(ifbdev); kfree(ifbdev);
} }
...@@ -765,7 +766,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous ...@@ -765,7 +766,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
struct intel_fbdev *ifbdev = dev_priv->fbdev; struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct fb_info *info; struct fb_info *info;
if (!ifbdev || !ifbdev->fb) if (!ifbdev || !ifbdev->vma)
return; return;
info = ifbdev->helper.fbdev; info = ifbdev->helper.fbdev;
...@@ -812,7 +813,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) ...@@ -812,7 +813,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
{ {
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
if (ifbdev && ifbdev->fb) if (ifbdev && ifbdev->vma)
drm_fb_helper_hotplug_event(&ifbdev->helper); drm_fb_helper_hotplug_event(&ifbdev->helper);
} }
...@@ -824,7 +825,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev) ...@@ -824,7 +825,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
return; return;
intel_fbdev_sync(ifbdev); intel_fbdev_sync(ifbdev);
if (!ifbdev->fb) if (!ifbdev->vma)
return; return;
if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0) if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
......
...@@ -246,9 +246,9 @@ static int igt_dmabuf_export_vmap(void *arg) ...@@ -246,9 +246,9 @@ static int igt_dmabuf_export_vmap(void *arg)
i915_gem_object_put(obj); i915_gem_object_put(obj);
ptr = dma_buf_vmap(dmabuf); ptr = dma_buf_vmap(dmabuf);
if (IS_ERR(ptr)) { if (!ptr) {
err = PTR_ERR(ptr); pr_err("dma_buf_vmap failed\n");
pr_err("dma_buf_vmap failed with err=%d\n", err); err = -ENOMEM;
goto out; goto out;
} }
......
mediatek-drm-y := mtk_disp_ovl.o \ mediatek-drm-y := mtk_disp_color.o \
mtk_disp_ovl.o \
mtk_disp_rdma.o \ mtk_disp_rdma.o \
mtk_drm_crtc.o \ mtk_drm_crtc.o \
mtk_drm_ddp.o \ mtk_drm_ddp.o \
......
/*
* Copyright (c) 2017 MediaTek Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <drm/drmP.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#define DISP_COLOR_CFG_MAIN 0x0400
#define DISP_COLOR_START_MT2701 0x0f00
#define DISP_COLOR_START_MT8173 0x0c00
#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
#define COLOR_BYPASS_ALL BIT(7)
#define COLOR_SEQ_SEL BIT(13)
struct mtk_disp_color_data {
unsigned int color_offset;
};
/**
* struct mtk_disp_color - DISP_COLOR driver structure
* @ddp_comp - structure containing type enum and hardware resources
* @crtc - associated crtc to report irq events to
*/
struct mtk_disp_color {
struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
const struct mtk_disp_color_data *data;
};
static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
{
return container_of(comp, struct mtk_disp_color, ddp_comp);
}
static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
{
struct mtk_disp_color *color = comp_to_color(comp);
writel(w, comp->regs + DISP_COLOR_WIDTH(color));
writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
}
static void mtk_color_start(struct mtk_ddp_comp *comp)
{
struct mtk_disp_color *color = comp_to_color(comp);
writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
comp->regs + DISP_COLOR_CFG_MAIN);
writel(0x1, comp->regs + DISP_COLOR_START(color));
}
static const struct mtk_ddp_comp_funcs mtk_disp_color_funcs = {
.config = mtk_color_config,
.start = mtk_color_start,
};
static int mtk_disp_color_bind(struct device *dev, struct device *master,
void *data)
{
struct mtk_disp_color *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
int ret;
ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
if (ret < 0) {
dev_err(dev, "Failed to register component %s: %d\n",
dev->of_node->full_name, ret);
return ret;
}
return 0;
}
static void mtk_disp_color_unbind(struct device *dev, struct device *master,
void *data)
{
struct mtk_disp_color *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
}
static const struct component_ops mtk_disp_color_component_ops = {
.bind = mtk_disp_color_bind,
.unbind = mtk_disp_color_unbind,
};
static int mtk_disp_color_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_color *priv;
int comp_id;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_COLOR);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
return comp_id;
}
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_color_funcs);
if (ret) {
dev_err(dev, "Failed to initialize component: %d\n", ret);
return ret;
}
priv->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_color_component_ops);
if (ret)
dev_err(dev, "Failed to add component: %d\n", ret);
return ret;
}
static int mtk_disp_color_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_color_component_ops);
return 0;
}
static const struct mtk_disp_color_data mt2701_color_driver_data = {
.color_offset = DISP_COLOR_START_MT2701,
};
static const struct mtk_disp_color_data mt8173_color_driver_data = {
.color_offset = DISP_COLOR_START_MT8173,
};
static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-color",
.data = &mt2701_color_driver_data},
{ .compatible = "mediatek,mt8173-disp-color",
.data = &mt8173_color_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match);
struct platform_driver mtk_disp_color_driver = {
.probe = mtk_disp_color_probe,
.remove = mtk_disp_color_remove,
.driver = {
.name = "mediatek-disp-color",
.owner = THIS_MODULE,
.of_match_table = mtk_disp_color_driver_dt_match,
},
};
...@@ -42,9 +42,12 @@ ...@@ -42,9 +42,12 @@
#define OVL_RDMA_MEM_GMC 0x40402020 #define OVL_RDMA_MEM_GMC 0x40402020
#define OVL_CON_BYTE_SWAP BIT(24) #define OVL_CON_BYTE_SWAP BIT(24)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
#define OVL_CON_CLRFMT_RGB (1 << 12) #define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (2 << 12) #define OVL_CON_CLRFMT_RGBA8888 (2 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (3 << 12) #define OVL_CON_CLRFMT_ARGB8888 (3 << 12)
#define OVL_CON_CLRFMT_UYVY (4 << 12)
#define OVL_CON_CLRFMT_YUYV (5 << 12)
#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ #define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
0 : OVL_CON_CLRFMT_RGB) 0 : OVL_CON_CLRFMT_RGB)
#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ #define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
...@@ -176,6 +179,10 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) ...@@ -176,6 +179,10 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888: case DRM_FORMAT_ABGR8888:
return OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP; return OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_UYVY:
return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
case DRM_FORMAT_YUYV:
return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB;
} }
} }
......
...@@ -559,6 +559,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, ...@@ -559,6 +559,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr, mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
sizeof(*mtk_crtc->ddp_comp), sizeof(*mtk_crtc->ddp_comp),
GFP_KERNEL); GFP_KERNEL);
if (!mtk_crtc->ddp_comp)
return -ENOMEM;
mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe); mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe);
if (IS_ERR(mtk_crtc->mutex)) { if (IS_ERR(mtk_crtc->mutex)) {
......
...@@ -38,13 +38,6 @@ ...@@ -38,13 +38,6 @@
#define DISP_REG_UFO_START 0x0000 #define DISP_REG_UFO_START 0x0000
#define DISP_COLOR_CFG_MAIN 0x0400
#define DISP_COLOR_START_MT2701 0x0f00
#define DISP_COLOR_START_MT8173 0x0c00
#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
#define DISP_AAL_EN 0x0000 #define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030 #define DISP_AAL_SIZE 0x0030
...@@ -55,9 +48,6 @@ ...@@ -55,9 +48,6 @@
#define LUT_10BIT_MASK 0x03ff #define LUT_10BIT_MASK 0x03ff
#define COLOR_BYPASS_ALL BIT(7)
#define COLOR_SEQ_SEL BIT(13)
#define OD_RELAYMODE BIT(0) #define OD_RELAYMODE BIT(0)
#define UFO_BYPASS BIT(2) #define UFO_BYPASS BIT(2)
...@@ -82,20 +72,6 @@ ...@@ -82,20 +72,6 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) #define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
struct mtk_disp_color_data {
unsigned int color_offset;
};
struct mtk_disp_color {
struct mtk_ddp_comp ddp_comp;
const struct mtk_disp_color_data *data;
};
static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
{
return container_of(comp, struct mtk_disp_color, ddp_comp);
}
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
unsigned int CFG) unsigned int CFG)
{ {
...@@ -119,25 +95,6 @@ void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, ...@@ -119,25 +95,6 @@ void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
} }
} }
static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
{
struct mtk_disp_color *color = comp_to_color(comp);
writel(w, comp->regs + DISP_COLOR_WIDTH(color));
writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
}
static void mtk_color_start(struct mtk_ddp_comp *comp)
{
struct mtk_disp_color *color = comp_to_color(comp);
writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
comp->regs + DISP_COLOR_CFG_MAIN);
writel(0x1, comp->regs + DISP_COLOR_START(color));
}
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh, unsigned int h, unsigned int vrefresh,
unsigned int bpc) unsigned int bpc)
...@@ -229,11 +186,6 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = { ...@@ -229,11 +186,6 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = {
.stop = mtk_gamma_stop, .stop = mtk_gamma_stop,
}; };
static const struct mtk_ddp_comp_funcs ddp_color = {
.config = mtk_color_config,
.start = mtk_color_start,
};
static const struct mtk_ddp_comp_funcs ddp_od = { static const struct mtk_ddp_comp_funcs ddp_od = {
.config = mtk_od_config, .config = mtk_od_config,
.start = mtk_od_start, .start = mtk_od_start,
...@@ -268,8 +220,8 @@ struct mtk_ddp_comp_match { ...@@ -268,8 +220,8 @@ struct mtk_ddp_comp_match {
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal }, [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL }, [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL }, [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
[DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL }, [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
...@@ -286,22 +238,6 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { ...@@ -286,22 +238,6 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
}; };
static const struct mtk_disp_color_data mt2701_color_driver_data = {
.color_offset = DISP_COLOR_START_MT2701,
};
static const struct mtk_disp_color_data mt8173_color_driver_data = {
.color_offset = DISP_COLOR_START_MT8173,
};
static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-color",
.data = &mt2701_color_driver_data},
{ .compatible = "mediatek,mt8173-disp-color",
.data = &mt8173_color_driver_data},
{},
};
int mtk_ddp_comp_get_id(struct device_node *node, int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type) enum mtk_ddp_comp_type comp_type)
{ {
...@@ -324,23 +260,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, ...@@ -324,23 +260,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
enum mtk_ddp_comp_type type; enum mtk_ddp_comp_type type;
struct device_node *larb_node; struct device_node *larb_node;
struct platform_device *larb_pdev; struct platform_device *larb_pdev;
const struct of_device_id *match;
struct mtk_disp_color *color;
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX) if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL; return -EINVAL;
type = mtk_ddp_matches[comp_id].type; type = mtk_ddp_matches[comp_id].type;
if (type == MTK_DISP_COLOR) {
devm_kfree(dev, comp);
color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL);
if (!color)
return -ENOMEM;
match = of_match_node(mtk_disp_color_driver_dt_match, node);
color->data = match->data;
comp = &color->ddp_comp;
}
comp->id = comp_id; comp->id = comp_id;
comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs; comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
......
...@@ -439,11 +439,12 @@ static int mtk_drm_probe(struct platform_device *pdev) ...@@ -439,11 +439,12 @@ static int mtk_drm_probe(struct platform_device *pdev)
private->comp_node[comp_id] = of_node_get(node); private->comp_node[comp_id] = of_node_get(node);
/* /*
* Currently only the OVL, RDMA, DSI, and DPI blocks have * Currently only the COLOR, OVL, RDMA, DSI, and DPI blocks have
* separate component platform drivers and initialize their own * separate component platform drivers and initialize their own
* DDP component structure. The others are initialized here. * DDP component structure. The others are initialized here.
*/ */
if (comp_type == MTK_DISP_OVL || if (comp_type == MTK_DISP_COLOR ||
comp_type == MTK_DISP_OVL ||
comp_type == MTK_DISP_RDMA || comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DSI || comp_type == MTK_DSI ||
comp_type == MTK_DPI) { comp_type == MTK_DPI) {
...@@ -566,6 +567,7 @@ static struct platform_driver mtk_drm_platform_driver = { ...@@ -566,6 +567,7 @@ static struct platform_driver mtk_drm_platform_driver = {
static struct platform_driver * const mtk_drm_drivers[] = { static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_ddp_driver, &mtk_ddp_driver,
&mtk_disp_color_driver,
&mtk_disp_ovl_driver, &mtk_disp_ovl_driver,
&mtk_disp_rdma_driver, &mtk_disp_rdma_driver,
&mtk_dpi_driver, &mtk_dpi_driver,
...@@ -576,33 +578,14 @@ static struct platform_driver * const mtk_drm_drivers[] = { ...@@ -576,33 +578,14 @@ static struct platform_driver * const mtk_drm_drivers[] = {
static int __init mtk_drm_init(void) static int __init mtk_drm_init(void)
{ {
int ret; return platform_register_drivers(mtk_drm_drivers,
int i; ARRAY_SIZE(mtk_drm_drivers));
for (i = 0; i < ARRAY_SIZE(mtk_drm_drivers); i++) {
ret = platform_driver_register(mtk_drm_drivers[i]);
if (ret < 0) {
pr_err("Failed to register %s driver: %d\n",
mtk_drm_drivers[i]->driver.name, ret);
goto err;
}
}
return 0;
err:
while (--i >= 0)
platform_driver_unregister(mtk_drm_drivers[i]);
return ret;
} }
static void __exit mtk_drm_exit(void) static void __exit mtk_drm_exit(void)
{ {
int i; platform_unregister_drivers(mtk_drm_drivers,
ARRAY_SIZE(mtk_drm_drivers));
for (i = ARRAY_SIZE(mtk_drm_drivers) - 1; i >= 0; i--)
platform_driver_unregister(mtk_drm_drivers[i]);
} }
module_init(mtk_drm_init); module_init(mtk_drm_init);
......
...@@ -59,6 +59,7 @@ struct mtk_drm_private { ...@@ -59,6 +59,7 @@ struct mtk_drm_private {
}; };
extern struct platform_driver mtk_ddp_driver; extern struct platform_driver mtk_ddp_driver;
extern struct platform_driver mtk_disp_color_driver;
extern struct platform_driver mtk_disp_ovl_driver; extern struct platform_driver mtk_disp_ovl_driver;
extern struct platform_driver mtk_disp_rdma_driver; extern struct platform_driver mtk_disp_rdma_driver;
extern struct platform_driver mtk_dpi_driver; extern struct platform_driver mtk_dpi_driver;
......
...@@ -28,6 +28,8 @@ static const u32 formats[] = { ...@@ -28,6 +28,8 @@ static const u32 formats[] = {
DRM_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
}; };
static void mtk_plane_reset(struct drm_plane *plane) static void mtk_plane_reset(struct drm_plane *plane)
......
...@@ -930,7 +930,7 @@ static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data) ...@@ -930,7 +930,7 @@ static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
DRM_INFO("type is 0x02, try again\n"); DRM_INFO("type is 0x02, try again\n");
break; break;
default: default:
DRM_INFO("type(0x%x) cannot be non-recognite\n", type); DRM_INFO("type(0x%x) not recognized\n", type);
break; break;
} }
......
...@@ -1778,33 +1778,14 @@ static struct platform_driver * const mtk_hdmi_drivers[] = { ...@@ -1778,33 +1778,14 @@ static struct platform_driver * const mtk_hdmi_drivers[] = {
static int __init mtk_hdmitx_init(void) static int __init mtk_hdmitx_init(void)
{ {
int ret; return platform_register_drivers(mtk_hdmi_drivers,
int i; ARRAY_SIZE(mtk_hdmi_drivers));
for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) {
ret = platform_driver_register(mtk_hdmi_drivers[i]);
if (ret < 0) {
pr_err("Failed to register %s driver: %d\n",
mtk_hdmi_drivers[i]->driver.name, ret);
goto err;
}
}
return 0;
err:
while (--i >= 0)
platform_driver_unregister(mtk_hdmi_drivers[i]);
return ret;
} }
static void __exit mtk_hdmitx_exit(void) static void __exit mtk_hdmitx_exit(void)
{ {
int i; platform_unregister_drivers(mtk_hdmi_drivers,
ARRAY_SIZE(mtk_hdmi_drivers));
for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--)
platform_driver_unregister(mtk_hdmi_drivers[i]);
} }
module_init(mtk_hdmitx_init); module_init(mtk_hdmitx_init);
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "radeon_asic.h" #include "radeon_asic.h"
#include "atom.h" #include "atom.h"
#include <linux/backlight.h> #include <linux/backlight.h>
#include <linux/dmi.h>
extern int atom_debug; extern int atom_debug;
...@@ -2184,9 +2185,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx) ...@@ -2184,9 +2185,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
goto assigned; goto assigned;
} }
/* on DCE32 and encoder can driver any block so just crtc id */ /*
* On DCE32 any encoder can drive any block so usually just use crtc id,
* but Apple thinks different at least on iMac10,1, so there use linkb,
* otherwise the internal eDP panel will stay dark.
*/
if (ASIC_IS_DCE32(rdev)) { if (ASIC_IS_DCE32(rdev)) {
enc_idx = radeon_crtc->crtc_id; if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
enc_idx = (dig->linkb) ? 1 : 0;
else
enc_idx = radeon_crtc->crtc_id;
goto assigned; goto assigned;
} }
......
...@@ -1195,7 +1195,7 @@ static int cdn_dp_probe(struct platform_device *pdev) ...@@ -1195,7 +1195,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
continue; continue;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!dp) if (!port)
return -ENOMEM; return -ENOMEM;
port->extcon = extcon; port->extcon = extcon;
......
...@@ -45,13 +45,13 @@ struct rockchip_crtc_state { ...@@ -45,13 +45,13 @@ struct rockchip_crtc_state {
* *
* @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc. * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
* @num_pipe: number of pipes for this device. * @num_pipe: number of pipes for this device.
* @mm_lock: protect drm_mm on multi-threads.
*/ */
struct rockchip_drm_private { struct rockchip_drm_private {
struct drm_fb_helper fbdev_helper; struct drm_fb_helper fbdev_helper;
struct drm_gem_object *fbdev_bo; struct drm_gem_object *fbdev_bo;
struct drm_atomic_state *state; struct drm_atomic_state *state;
struct iommu_domain *domain; struct iommu_domain *domain;
/* protect drm_mm on multi-threads */
struct mutex mm_lock; struct mutex mm_lock;
struct drm_mm mm; struct drm_mm mm;
struct list_head psr_list; struct list_head psr_list;
......
...@@ -29,12 +29,11 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) ...@@ -29,12 +29,11 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
ssize_t ret; ssize_t ret;
mutex_lock(&private->mm_lock); mutex_lock(&private->mm_lock);
ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
rk_obj->base.size, PAGE_SIZE, rk_obj->base.size, PAGE_SIZE,
0, 0); 0, 0);
mutex_unlock(&private->mm_lock); mutex_unlock(&private->mm_lock);
if (ret < 0) { if (ret < 0) {
DRM_ERROR("out of I/O virtual memory: %zd\n", ret); DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
return ret; return ret;
...@@ -56,7 +55,9 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) ...@@ -56,7 +55,9 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
return 0; return 0;
err_remove_node: err_remove_node:
mutex_lock(&private->mm_lock);
drm_mm_remove_node(&rk_obj->mm); drm_mm_remove_node(&rk_obj->mm);
mutex_unlock(&private->mm_lock);
return ret; return ret;
} }
......
...@@ -22,56 +22,56 @@ struct dw_hdmi; ...@@ -22,56 +22,56 @@ struct dw_hdmi;
* 48bit bus. * 48bit bus.
* *
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + Format Name + Format Code + Encodings + * | Format Name | Format Code | Encodings |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + RGB 4:4:4 8bit + ``MEDIA_BUS_FMT_RGB888_1X24`` + ``V4L2_YCBCR_ENC_DEFAULT`` + * | RGB 4:4:4 8bit | ``MEDIA_BUS_FMT_RGB888_1X24`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + RGB 4:4:4 10bits + ``MEDIA_BUS_FMT_RGB101010_1X30`` + ``V4L2_YCBCR_ENC_DEFAULT`` + * | RGB 4:4:4 10bits | ``MEDIA_BUS_FMT_RGB101010_1X30`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + RGB 4:4:4 12bits + ``MEDIA_BUS_FMT_RGB121212_1X36`` + ``V4L2_YCBCR_ENC_DEFAULT`` + * | RGB 4:4:4 12bits | ``MEDIA_BUS_FMT_RGB121212_1X36`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + RGB 4:4:4 16bits + ``MEDIA_BUS_FMT_RGB161616_1X48`` + ``V4L2_YCBCR_ENC_DEFAULT`` + * | RGB 4:4:4 16bits | ``MEDIA_BUS_FMT_RGB161616_1X48`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:4:4 8bit + ``MEDIA_BUS_FMT_YUV8_1X24`` + ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:4:4 8bit | ``MEDIA_BUS_FMT_YUV8_1X24`` | ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* + + + or ``V4L2_YCBCR_ENC_XV601`` + * | | | or ``V4L2_YCBCR_ENC_XV601`` |
* + + + or ``V4L2_YCBCR_ENC_XV709`` + * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:4:4 10bits + ``MEDIA_BUS_FMT_YUV10_1X30`` + ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:4:4 10bits | ``MEDIA_BUS_FMT_YUV10_1X30`` | ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* + + + or ``V4L2_YCBCR_ENC_XV601`` + * | | | or ``V4L2_YCBCR_ENC_XV601`` |
* + + + or ``V4L2_YCBCR_ENC_XV709`` + * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:4:4 12bits + ``MEDIA_BUS_FMT_YUV12_1X36`` + ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:4:4 12bits | ``MEDIA_BUS_FMT_YUV12_1X36`` | ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* + + + or ``V4L2_YCBCR_ENC_XV601`` + * | | | or ``V4L2_YCBCR_ENC_XV601`` |
* + + + or ``V4L2_YCBCR_ENC_XV709`` + * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:4:4 16bits + ``MEDIA_BUS_FMT_YUV16_1X48`` + ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:4:4 16bits | ``MEDIA_BUS_FMT_YUV16_1X48`` | ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* + + + or ``V4L2_YCBCR_ENC_XV601`` + * | | | or ``V4L2_YCBCR_ENC_XV601`` |
* + + + or ``V4L2_YCBCR_ENC_XV709`` + * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:2:2 8bit + ``MEDIA_BUS_FMT_UYVY8_1X16`` + ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:2:2 8bit | ``MEDIA_BUS_FMT_UYVY8_1X16`` | ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:2:2 10bits + ``MEDIA_BUS_FMT_UYVY10_1X20`` + ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:2:2 10bits | ``MEDIA_BUS_FMT_UYVY10_1X20`` | ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:2:2 12bits + ``MEDIA_BUS_FMT_UYVY12_1X24`` + ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:2:2 12bits | ``MEDIA_BUS_FMT_UYVY12_1X24`` | ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:2:0 8bit + ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` + ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:2:0 8bit | ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` | ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:2:0 10bits + ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``+ ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:2:0 10bits | ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``| ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:2:0 12bits + ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``+ ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:2:0 12bits | ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``| ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
* + YCbCr 4:2:0 16bits + ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``+ ``V4L2_YCBCR_ENC_601`` + * | YCbCr 4:2:0 16bits | ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``| ``V4L2_YCBCR_ENC_601`` |
* + + + or ``V4L2_YCBCR_ENC_709`` + * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+ * +----------------------+----------------------------------+------------------------------+
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment