Commit 0f95ee9a authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'drm-misc-next-2022-06-08' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.20:

UAPI Changes:

 * connector: export bpc limits in debugfs

 * dma-buf: Print buffer name in debugfs

Cross-subsystem Changes:

 * dma-buf: Improve dma-fence handling; Cleanups

 * fbdev: Device-unregistering fixes

Core Changes:

 * client: Only use driver-validated modes to avoid blank screen

 * dp-aux: Make probing more reliable; Small fixes

 * edit: CEA data-block iterators; Introduce struct drm_edid; Many cleanups

 * gem: Don't use framebuffer format's non-exising color planes

 * probe-helper: Use 640x480 as DisplayPort fallback; Refactoring

 * scheduler: Don't kill jobs in interrupt context

Driver Changes:

 * amdgpu: Use atomic fence helpers in DM; Fix VRAM address calculation;
   Export CRTC bpc settings via debugfs

 * bridge: Add TI-DLPC3433;  anx7625: Fixes;  fy07024di26a30d: Optional
   GPIO reset;  icn6211: Cleanups;  ldb: Add reg and reg-name properties
   to bindings, Kconfig fixes;  lt9611: Fix display sensing;  lt9611uxc:
   Fixes;  nwl-dsi: Fixes;  ps8640: Cleanups;  st7735r: Fixes;  tc358767:
   DSI/DPI refactoring and DSI-to-eDP support, Fixes; ti-sn65dsi83:
   Fixes;

 * gma500: Cleanup connector I2C handling

 * hyperv: Unify VRAM allocation of Gen1 and Gen2

 * i915: export CRTC bpc settings via debugfs

 * meson: Support YUV422 output; Refcount fixes

 * mgag200: Support damage clipping; Support gamma handling; Protect
   concurrent HW access; Fixes to connector; Store model-specific limits
   in device-info structure; Cleanups

 * nouveau: Fixes and Cleanups

 * panel: Kconfig fixes

 * panfrost: Valhall support

 * r128: Fix bit-shift overflow

 * rockchip: Locking fixes in error path; Minor cleanups

 * ssd130x: Fix built-in linkage

 * ttm: Cleanups

 * udl; Always advertize VGA connector

 * fbdev/vesa: Support COMPILE_TEST
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/YqBtumw05JZDEZE2@linux-uq9g
parents b13baccc dfa687bf
......@@ -24,6 +24,15 @@ properties:
clock-names:
const: ldb
reg:
minItems: 2
maxItems: 2
reg-names:
items:
- const: ldb
- const: lvds
ports:
$ref: /schemas/graph.yaml#/properties/ports
......@@ -56,10 +65,15 @@ examples:
#include <dt-bindings/clock/imx8mp-clock.h>
blk-ctrl {
bridge {
#address-cells = <1>;
#size-cells = <1>;
bridge@5c {
compatible = "fsl,imx8mp-ldb";
clocks = <&clk IMX8MP_CLK_MEDIA_LDB>;
clock-names = "ldb";
reg = <0x5c 0x4>, <0x128 0x4>;
reg-names = "ldb", "lvds";
ports {
#address-cells = <1>;
......
......@@ -55,7 +55,6 @@ examples:
compatible = "ingenic,jz4780-dw-hdmi";
reg = <0x10180000 0x8000>;
reg-io-width = <4>;
ddc-i2c-bus = <&i2c4>;
interrupt-parent = <&intc>;
interrupts = <3>;
clocks = <&cgu JZ4780_CLK_AHB0>, <&cgu JZ4780_CLK_HDMI>;
......
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/bridge/ti,dlpc3433.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI DLPC3433 MIPI DSI to DMD bridge
maintainers:
- Jagan Teki <jagan@amarulasolutions.com>
- Christopher Vollo <chris@renewoutreach.org>
description: |
TI DLPC3433 is a MIPI DSI based display controller bridge
for processing high resolution DMD based projectors.
It has a flexible configuration of MIPI DSI and DPI signal
input that produces a DMD output in RGB565, RGB666, RGB888
formats.
It supports upto 720p resolution with 60 and 120 Hz refresh
rates.
properties:
compatible:
const: ti,dlpc3433
reg:
enum:
- 0x1b
- 0x1d
enable-gpios:
description: PROJ_ON pin, chip powers up PROJ_ON is high.
vcc_intf-supply:
description: A 1.8V/3.3V supply that power the Host I/O.
vcc_flsh-supply:
description: A 1.8V/3.3V supply that power the Flash I/O.
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description: Video port for MIPI DSI input.
properties:
endpoint:
$ref: /schemas/media/video-interfaces.yaml#
unevaluatedProperties: false
properties:
data-lanes:
description: array of physical DSI data lane indexes.
minItems: 1
items:
- const: 1
- const: 2
- const: 3
- const: 4
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: Video port for DMD output.
required:
- port@0
- port@1
required:
- compatible
- reg
- enable-gpios
- ports
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
i2c1 {
#address-cells = <1>;
#size-cells = <0>;
bridge@1b {
compatible = "ti,dlpc3433";
reg = <0x1b>;
enable-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
bridge_in_dsi: endpoint {
remote-endpoint = <&dsi_out_bridge>;
data-lanes = <1 2 3 4>;
};
};
port@1 {
reg = <1>;
bridge_out_panel: endpoint {
remote-endpoint = <&panel_out_bridge>;
};
};
};
};
};
......@@ -35,7 +35,6 @@ required:
- reg
- avdd-supply
- dvdd-supply
- reset-gpios
additionalProperties: false
......
......@@ -14,16 +14,21 @@ properties:
pattern: '^gpu@[a-f0-9]+$'
compatible:
items:
- enum:
- amlogic,meson-g12a-mali
- mediatek,mt8183-mali
- realtek,rtd1619-mali
- renesas,r9a07g044-mali
- renesas,r9a07g054-mali
- rockchip,px30-mali
- rockchip,rk3568-mali
- const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
oneOf:
- items:
- enum:
- amlogic,meson-g12a-mali
- mediatek,mt8183-mali
- realtek,rtd1619-mali
- renesas,r9a07g044-mali
- renesas,r9a07g054-mali
- rockchip,px30-mali
- rockchip,rk3568-mali
- const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
- items:
- enum:
- mediatek,mt8192-mali
- const: arm,mali-valhall-jm # Mali Valhall GPU model/revision is fully discoverable
reg:
maxItems: 1
......
......@@ -617,6 +617,17 @@ Contact: Javier Martinez Canillas <javierm@redhat.com>
Level: Intermediate
Convert Kernel Selftests (kselftest) to KUnit tests when appropriate
--------------------------------------------------------------------
Many of the `Kselftest <https://www.kernel.org/doc/html/latest/dev-tools/kselftest.html>`_
tests in DRM could be converted to Kunit tests instead, since that framework
is more suitable for unit testing.
Contact: Javier Martinez Canillas <javierm@redhat.com>
Level: Starter
Enable trinity for DRM
----------------------
......
......@@ -6462,6 +6462,7 @@ F: include/uapi/drm/savage_drm.h
DRM DRIVER FOR SIMPLE FRAMEBUFFERS
M: Thomas Zimmermann <tzimmermann@suse.de>
M: Javier Martinez Canillas <javierm@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
......@@ -6503,6 +6504,12 @@ DRM DRIVER FOR TDFX VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/tdfx/
DRM DRIVER FOR TI DLPC3433 MIPI DSI TO DMD BRIDGE
M: Jagan Teki <jagan@amarulasolutions.com>
S: Maintained
F: Documentation/devicetree/bindings/display/bridge/ti,dlpc3433.yaml
F: drivers/gpu/drm/bridge/ti-dlpc3433.c
DRM DRIVER FOR TI SN65DSI86 BRIDGE CHIP
R: Douglas Anderson <dianders@chromium.org>
F: Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
......@@ -6791,6 +6798,7 @@ F: drivers/gpu/drm/omapdrm/
DRM DRIVERS FOR V3D
M: Emma Anholt <emma@anholt.net>
M: Melissa Wen <mwen@igalia.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
......
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
dma-resv.o
dma-fence-unwrap.o dma-resv.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
......
......@@ -1359,7 +1359,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
return ret;
seq_puts(s, "\nDma-buf Objects:\n");
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
......@@ -1376,7 +1376,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
file_count(buf_obj->file),
buf_obj->exp_name,
file_inode(buf_obj->file)->i_ino,
buf_obj->name ?: "");
buf_obj->name ?: "<none>");
spin_unlock(&buf_obj->name_lock);
dma_resv_describe(buf_obj->resv, s);
......
......@@ -62,8 +62,8 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
replacement = NULL;
}
tmp = cmpxchg((struct dma_fence __force **)&chain->prev,
prev, replacement);
tmp = unrcu_pointer(cmpxchg(&chain->prev, RCU_INITIALIZER(prev),
RCU_INITIALIZER(replacement)));
if (tmp == prev)
dma_fence_put(tmp);
else
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* dma-fence-util: misc functions for dma_fence objects
*
* Copyright (C) 2022 Advanced Micro Devices, Inc.
* Authors:
* Christian König <christian.koenig@amd.com>
*/
#include <linux/dma-fence.h>
#include <linux/dma-fence-array.h>
#include <linux/dma-fence-chain.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/slab.h>
/* Internal helper to start new array iteration, don't use directly */
static struct dma_fence *
__dma_fence_unwrap_array(struct dma_fence_unwrap *cursor)
{
cursor->array = dma_fence_chain_contained(cursor->chain);
cursor->index = 0;
return dma_fence_array_first(cursor->array);
}
/**
* dma_fence_unwrap_first - return the first fence from fence containers
* @head: the entrypoint into the containers
* @cursor: current position inside the containers
*
* Unwraps potential dma_fence_chain/dma_fence_array containers and return the
* first fence.
*/
struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
struct dma_fence_unwrap *cursor)
{
cursor->chain = dma_fence_get(head);
return __dma_fence_unwrap_array(cursor);
}
EXPORT_SYMBOL_GPL(dma_fence_unwrap_first);
/**
* dma_fence_unwrap_next - return the next fence from a fence containers
* @cursor: current position inside the containers
*
* Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
* the next fence from them.
*/
struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
{
struct dma_fence *tmp;
++cursor->index;
tmp = dma_fence_array_next(cursor->array, cursor->index);
if (tmp)
return tmp;
cursor->chain = dma_fence_chain_walk(cursor->chain);
return __dma_fence_unwrap_array(cursor);
}
EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
struct dma_fence_unwrap *iter)
{
struct dma_fence_array *result;
struct dma_fence *tmp, **array;
unsigned int i;
size_t count;
count = 0;
for (i = 0; i < num_fences; ++i) {
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
++count;
}
if (count == 0)
return dma_fence_get_stub();
array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
if (!array)
return NULL;
/*
* This trashes the input fence array and uses it as position for the
* following merge loop. This works because the dma_fence_merge()
* wrapper macro is creating this temporary array on the stack together
* with the iterators.
*/
for (i = 0; i < num_fences; ++i)
fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
count = 0;
do {
unsigned int sel;
restart:
tmp = NULL;
for (i = 0; i < num_fences; ++i) {
struct dma_fence *next;
while (fences[i] && dma_fence_is_signaled(fences[i]))
fences[i] = dma_fence_unwrap_next(&iter[i]);
next = fences[i];
if (!next)
continue;
/*
* We can't guarantee that inpute fences are ordered by
* context, but it is still quite likely when this
* function is used multiple times. So attempt to order
* the fences by context as we pass over them and merge
* fences with the same context.
*/
if (!tmp || tmp->context > next->context) {
tmp = next;
sel = i;
} else if (tmp->context < next->context) {
continue;
} else if (dma_fence_is_later(tmp, next)) {
fences[i] = dma_fence_unwrap_next(&iter[i]);
goto restart;
} else {
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
goto restart;
}
}
if (tmp) {
array[count++] = dma_fence_get(tmp);
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
}
} while (tmp);
if (count == 0) {
tmp = dma_fence_get_stub();
goto return_tmp;
}
if (count == 1) {
tmp = array[0];
goto return_tmp;
}
result = dma_fence_array_create(count, array,
dma_fence_context_alloc(1),
1, false);
if (!result) {
tmp = NULL;
goto return_tmp;
}
return &result->base;
return_tmp:
kfree(array);
return tmp;
}
EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);
......@@ -4,27 +4,19 @@
* Copyright (C) 2022 Advanced Micro Devices, Inc.
*/
#include <linux/dma-fence.h>
#include <linux/dma-fence-array.h>
#include <linux/dma-fence-chain.h>
#include <linux/dma-fence-unwrap.h>
#if 0
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#endif
#include "selftest.h"
#define CHAIN_SZ (4 << 10)
static inline struct mock_fence {
struct mock_fence {
struct dma_fence base;
spinlock_t lock;
} *to_mock_fence(struct dma_fence *f) {
return container_of(f, struct mock_fence, base);
}
};
static const char *mock_name(struct dma_fence *f)
{
......@@ -45,7 +37,8 @@ static struct dma_fence *mock_fence(void)
return NULL;
spin_lock_init(&f->lock);
dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
dma_fence_init(&f->base, &mock_ops, &f->lock,
dma_fence_context_alloc(1), 1);
return &f->base;
}
......@@ -59,7 +52,7 @@ static struct dma_fence *mock_array(unsigned int num_fences, ...)
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences)
return NULL;
goto error_put;
va_start(valist, num_fences);
for (i = 0; i < num_fences; ++i)
......@@ -70,13 +63,17 @@ static struct dma_fence *mock_array(unsigned int num_fences, ...)
dma_fence_context_alloc(1),
1, false);
if (!array)
goto cleanup;
goto error_free;
return &array->base;
cleanup:
for (i = 0; i < num_fences; ++i)
dma_fence_put(fences[i]);
error_free:
kfree(fences);
error_put:
va_start(valist, num_fences);
for (i = 0; i < num_fences; ++i)
dma_fence_put(va_arg(valist, typeof(*fences)));
va_end(valist);
return NULL;
}
......@@ -113,7 +110,6 @@ static int sanitycheck(void *arg)
if (!chain)
return -ENOMEM;
dma_fence_signal(f);
dma_fence_put(chain);
return err;
}
......@@ -154,10 +150,8 @@ static int unwrap_array(void *arg)
err = -EINVAL;
}
dma_fence_signal(f1);
dma_fence_signal(f2);
dma_fence_put(array);
return 0;
return err;
}
static int unwrap_chain(void *arg)
......@@ -196,10 +190,8 @@ static int unwrap_chain(void *arg)
err = -EINVAL;
}
dma_fence_signal(f1);
dma_fence_signal(f2);
dma_fence_put(chain);
return 0;
return err;
}
static int unwrap_chain_array(void *arg)
......@@ -242,10 +234,115 @@ static int unwrap_chain_array(void *arg)
err = -EINVAL;
}
dma_fence_signal(f1);
dma_fence_signal(f2);
dma_fence_put(chain);
return 0;
return err;
}
static int unwrap_merge(void *arg)
{
struct dma_fence *fence, *f1, *f2, *f3;
struct dma_fence_unwrap iter;
int err = 0;
f1 = mock_fence();
if (!f1)
return -ENOMEM;
f2 = mock_fence();
if (!f2) {
err = -ENOMEM;
goto error_put_f1;
}
f3 = dma_fence_unwrap_merge(f1, f2);
if (!f3) {
err = -ENOMEM;
goto error_put_f2;
}
dma_fence_unwrap_for_each(fence, &iter, f3) {
if (fence == f1) {
dma_fence_put(f1);
f1 = NULL;
} else if (fence == f2) {
dma_fence_put(f2);
f2 = NULL;
} else {
pr_err("Unexpected fence!\n");
err = -EINVAL;
}
}
if (f1 || f2) {
pr_err("Not all fences seen!\n");
err = -EINVAL;
}
dma_fence_put(f3);
error_put_f2:
dma_fence_put(f2);
error_put_f1:
dma_fence_put(f1);
return err;
}
static int unwrap_merge_complex(void *arg)
{
struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5;
struct dma_fence_unwrap iter;
int err = -ENOMEM;
f1 = mock_fence();
if (!f1)
return -ENOMEM;
f2 = mock_fence();
if (!f2)
goto error_put_f1;
f3 = dma_fence_unwrap_merge(f1, f2);
if (!f3)
goto error_put_f2;
/* The resulting array has the fences in reverse */
f4 = dma_fence_unwrap_merge(f2, f1);
if (!f4)
goto error_put_f3;
/* Signaled fences should be filtered, the two arrays merged. */
f5 = dma_fence_unwrap_merge(f3, f4, dma_fence_get_stub());
if (!f5)
goto error_put_f4;
err = 0;
dma_fence_unwrap_for_each(fence, &iter, f5) {
if (fence == f1) {
dma_fence_put(f1);
f1 = NULL;
} else if (fence == f2) {
dma_fence_put(f2);
f2 = NULL;
} else {
pr_err("Unexpected fence!\n");
err = -EINVAL;
}
}
if (f1 || f2) {
pr_err("Not all fences seen!\n");
err = -EINVAL;
}
dma_fence_put(f5);
error_put_f4:
dma_fence_put(f4);
error_put_f3:
dma_fence_put(f3);
error_put_f2:
dma_fence_put(f2);
error_put_f1:
dma_fence_put(f1);
return err;
}
int dma_fence_unwrap(void)
......@@ -255,6 +352,8 @@ int dma_fence_unwrap(void)
SUBTEST(unwrap_array),
SUBTEST(unwrap_chain),
SUBTEST(unwrap_chain_array),
SUBTEST(unwrap_merge),
SUBTEST(unwrap_merge_complex),
};
return subtests(tests, NULL);
......
......@@ -146,50 +146,6 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
return buf;
}
static int sync_file_set_fence(struct sync_file *sync_file,
struct dma_fence **fences, int num_fences)
{
struct dma_fence_array *array;
/*
* The reference for the fences in the new sync_file and held
* in add_fence() during the merge procedure, so for num_fences == 1
* we already own a new reference to the fence. For num_fence > 1
* we own the reference of the dma_fence_array creation.
*/
if (num_fences == 0) {
sync_file->fence = dma_fence_get_stub();
kfree(fences);
} else if (num_fences == 1) {
sync_file->fence = fences[0];
kfree(fences);
} else {
array = dma_fence_array_create(num_fences, fences,
dma_fence_context_alloc(1),
1, false);
if (!array)
return -ENOMEM;
sync_file->fence = &array->base;
}
return 0;
}
static void add_fence(struct dma_fence **fences,
int *i, struct dma_fence *fence)
{
fences[*i] = fence;
if (!dma_fence_is_signaled(fence)) {
dma_fence_get(fence);
(*i)++;
}
}
/**
* sync_file_merge() - merge two sync_files
* @name: name of new fence
......@@ -203,84 +159,21 @@ static void add_fence(struct dma_fence **fences,
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct dma_fence *a_fence, *b_fence, **fences;
struct dma_fence_unwrap a_iter, b_iter;
unsigned int index, num_fences;
struct sync_file *sync_file;
struct dma_fence *fence;
sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
num_fences = 0;
dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence)
++num_fences;
dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence)
++num_fences;
if (num_fences > INT_MAX)
goto err_free_sync_file;
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences)
goto err_free_sync_file;
/*
* We can't guarantee that fences in both a and b are ordered, but it is
* still quite likely.
*
* So attempt to order the fences as we pass over them and merge fences
* with the same context.
*/
index = 0;
for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter),
b_fence = dma_fence_unwrap_first(b->fence, &b_iter);
a_fence || b_fence; ) {
if (!b_fence) {
add_fence(fences, &index, a_fence);
a_fence = dma_fence_unwrap_next(&a_iter);
} else if (!a_fence) {
add_fence(fences, &index, b_fence);
b_fence = dma_fence_unwrap_next(&b_iter);
} else if (a_fence->context < b_fence->context) {
add_fence(fences, &index, a_fence);
a_fence = dma_fence_unwrap_next(&a_iter);
} else if (b_fence->context < a_fence->context) {
add_fence(fences, &index, b_fence);
b_fence = dma_fence_unwrap_next(&b_iter);
} else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno,
a_fence->ops)) {
add_fence(fences, &index, a_fence);
a_fence = dma_fence_unwrap_next(&a_iter);
b_fence = dma_fence_unwrap_next(&b_iter);
} else {
add_fence(fences, &index, b_fence);
a_fence = dma_fence_unwrap_next(&a_iter);
b_fence = dma_fence_unwrap_next(&b_iter);
}
fence = dma_fence_unwrap_merge(a->fence, b->fence);
if (!fence) {
fput(sync_file->file);
return NULL;
}
if (sync_file_set_fence(sync_file, fences, index) < 0)
goto err_put_fences;
sync_file->fence = fence;
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
err_put_fences:
while (index)
dma_fence_put(fences[--index]);
kfree(fences);
err_free_sync_file:
fput(sync_file->file);
return NULL;
}
static int sync_file_release(struct inode *inode, struct file *file)
......
......@@ -50,6 +50,35 @@ to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
}
static inline struct drm_buddy_block *
amdgpu_vram_mgr_first_block(struct list_head *list)
{
return list_first_entry_or_null(list, struct drm_buddy_block, link);
}
static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
{
struct drm_buddy_block *block;
u64 start, size;
block = amdgpu_vram_mgr_first_block(head);
if (!block)
return false;
while (head != block->link.next) {
start = amdgpu_vram_mgr_block_start(block);
size = amdgpu_vram_mgr_block_size(block);
block = list_entry(block->link.next, struct drm_buddy_block, link);
if (start + size != amdgpu_vram_mgr_block_start(block))
return false;
}
return true;
}
/**
* DOC: mem_info_vram_total
*
......@@ -496,16 +525,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
list_splice_tail(trim_list, &vres->blocks);
}
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
vres->base.start = 0;
list_for_each_entry(block, &vres->blocks, link) {
unsigned long start;
block = amdgpu_vram_mgr_first_block(&vres->blocks);
if (!block) {
r = -EINVAL;
goto error_fini;
}
start = amdgpu_vram_mgr_block_start(block) +
amdgpu_vram_mgr_block_size(block);
start >>= PAGE_SHIFT;
if (start > vres->base.num_pages)
start -= vres->base.num_pages;
else
start = 0;
vres->base.start = max(vres->base.start, start);
vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
}
if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
......
......@@ -53,33 +53,6 @@ static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
return PAGE_SIZE << drm_buddy_block_order(block);
}
static inline struct drm_buddy_block *
amdgpu_vram_mgr_first_block(struct list_head *list)
{
return list_first_entry_or_null(list, struct drm_buddy_block, link);
}
static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
{
struct drm_buddy_block *block;
u64 start, size;
block = amdgpu_vram_mgr_first_block(head);
if (!block)
return false;
while (head != block->link.next) {
start = amdgpu_vram_mgr_block_start(block);
size = amdgpu_vram_mgr_block_size(block);
block = list_entry(block->link.next, struct drm_buddy_block, link);
if (start + size != amdgpu_vram_mgr_block_start(block))
return false;
}
return true;
}
static inline struct amdgpu_vram_mgr_resource *
to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
{
......
......@@ -83,6 +83,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
......@@ -6591,14 +6592,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
return &state->base;
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
{
crtc_debugfs_init(crtc);
return 0;
}
#endif
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
......@@ -6692,9 +6691,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
.late_register = amdgpu_dm_crtc_late_register,
#endif
};
static enum drm_connector_status
......@@ -7598,6 +7595,10 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
goto error_unpin;
}
r = drm_gem_plane_helper_prepare_fb(plane, new_state);
if (unlikely(r != 0))
goto error_unpin;
amdgpu_bo_unreserve(rbo);
afb->address = amdgpu_bo_gpu_offset(rbo);
......@@ -9132,7 +9133,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
long r;
unsigned long flags;
struct amdgpu_bo *abo;
uint32_t target_vblank, last_flip_vblank;
......@@ -9207,18 +9207,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
}
abo = gem_to_amdgpu_bo(fb->obj[0]);
/*
* Wait for all fences on this FB. Do limited wait to avoid
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
r = dma_resv_wait_timeout(abo->tbo.base.resv,
DMA_RESV_USAGE_WRITE, false,
msecs_to_jiffies(5000));
if (unlikely(r <= 0))
DRM_ERROR("Waiting for fences timed out!");
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state,
afb->tiling_flags,
......@@ -9561,9 +9549,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
int crtc_disable_count = 0;
bool mode_set_reset_required = false;
int r;
trace_amdgpu_dm_atomic_commit_tail_begin(state);
r = drm_atomic_helper_wait_for_fences(dev, state, false);
if (unlikely(r))
DRM_ERROR("Waiting for fences timed out!");
drm_atomic_helper_update_legacy_modeset_state(dev, state);
dm_state = dm_atomic_get_new_state(state);
......
......@@ -871,28 +871,18 @@ static int psr_capability_show(struct seq_file *m, void *data)
}
/*
* Returns the current and maximum output bpc for the connector.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
* Returns the current bpc for the crtc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_bpc
*/
static int output_bpc_show(struct seq_file *m, void *data)
static int amdgpu_current_bpc_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct drm_device *dev = connector->dev;
struct drm_crtc *crtc = NULL;
struct drm_crtc *crtc = m->private;
struct drm_device *dev = crtc->dev;
struct dm_crtc_state *dm_crtc_state = NULL;
int res = -ENODEV;
unsigned int bpc;
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
if (connector->state == NULL)
goto unlock;
crtc = connector->state->crtc;
if (crtc == NULL)
goto unlock;
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->state == NULL)
goto unlock;
......@@ -922,18 +912,15 @@ static int output_bpc_show(struct seq_file *m, void *data)
}
seq_printf(m, "Current: %u\n", bpc);
seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
res = 0;
unlock:
if (crtc)
drm_modeset_unlock(&crtc->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
drm_modeset_unlock(&crtc->mutex);
mutex_unlock(&dev->mode_config.mutex);
return res;
}
DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc);
/*
* Example usage:
......@@ -2539,7 +2526,6 @@ static int target_backlight_show(struct seq_file *m, void *unused)
DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
#ifdef CONFIG_DRM_AMD_DC_HDCP
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
......@@ -2786,7 +2772,6 @@ static const struct {
const struct file_operations *fops;
} connector_debugfs_entries[] = {
{"force_yuv420_output", &force_yuv420_output_fops},
{"output_bpc", &output_bpc_fops},
{"trigger_hotplug", &trigger_hotplug_debugfs_fops},
{"internal_display", &internal_display_fops}
};
......@@ -3170,9 +3155,10 @@ static int crc_win_update_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get,
crc_win_update_set, "%llu\n");
#endif
void crtc_debugfs_init(struct drm_crtc *crtc)
{
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry);
if (!dir)
......@@ -3188,9 +3174,11 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
&crc_win_y_end_fops);
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
&crc_win_update_fops);
}
#endif
debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
crtc, &amdgpu_current_bpc_fops);
}
/*
* Writes DTN log state to the user supplied buffer.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
......
......@@ -31,8 +31,6 @@
void connector_debugfs_init(struct amdgpu_dm_connector *connector);
void dtn_debugfs_init(struct amdgpu_device *adev);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
void crtc_debugfs_init(struct drm_crtc *crtc);
#endif
#endif
......@@ -78,6 +78,7 @@ config DRM_DISPLAY_CONNECTOR
config DRM_FSL_LDB
tristate "Freescale i.MX8MP LDB bridge"
depends on OF
depends on ARCH_MXC || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
......@@ -321,6 +322,22 @@ config DRM_TOSHIBA_TC358775
help
Toshiba TC358775 DSI/LVDS bridge chip driver.
config DRM_TI_DLPC3433
tristate "TI DLPC3433 Display controller"
depends on DRM && DRM_PANEL
depends on OF
select DRM_MIPI_DSI
help
TI DLPC3433 is a MIPI DSI based display controller bridge
for processing high resolution DMD based projectors.
It has a flexible configuration of MIPI DSI and DPI signal
input that produces a DMD output in RGB565, RGB666, RGB888
formats.
It supports upto 720p resolution with 60 and 120 Hz refresh
rates.
config DRM_TI_TFP410
tristate "TI TFP410 DVI/HDMI bridge"
depends on OF
......
......@@ -26,6 +26,7 @@ obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
obj-$(CONFIG_DRM_TOSHIBA_TC358775) += tc358775.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_DLPC3433) += ti-dlpc3433.o
obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
......
......@@ -226,18 +226,6 @@
#define ADV7511_REG_CEC_CLK_DIV 0x4e
#define ADV7511_REG_CEC_SOFT_RESET 0x50
static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = {
ADV7511_REG_CEC_RX1_FRAME_HDR,
ADV7511_REG_CEC_RX2_FRAME_HDR,
ADV7511_REG_CEC_RX3_FRAME_HDR,
};
static const u8 ADV7511_REG_CEC_RX_FRAME_LEN[] = {
ADV7511_REG_CEC_RX1_FRAME_LEN,
ADV7511_REG_CEC_RX2_FRAME_LEN,
ADV7511_REG_CEC_RX3_FRAME_LEN,
};
#define ADV7533_REG_CEC_OFFSET 0x70
enum adv7511_input_clock {
......
......@@ -15,6 +15,18 @@
#include "adv7511.h"
static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = {
ADV7511_REG_CEC_RX1_FRAME_HDR,
ADV7511_REG_CEC_RX2_FRAME_HDR,
ADV7511_REG_CEC_RX3_FRAME_HDR,
};
static const u8 ADV7511_REG_CEC_RX_FRAME_LEN[] = {
ADV7511_REG_CEC_RX1_FRAME_LEN,
ADV7511_REG_CEC_RX2_FRAME_LEN,
ADV7511_REG_CEC_RX3_FRAME_LEN,
};
#define ADV7511_INT1_CEC_MASK \
(ADV7511_INT1_CEC_TX_READY | ADV7511_INT1_CEC_TX_ARBIT_LOST | \
ADV7511_INT1_CEC_TX_RETRY_TIMEOUT | ADV7511_INT1_CEC_RX_READY1 | \
......
......@@ -1638,6 +1638,7 @@ static int anx7625_parse_dt(struct device *dev,
bus_type = 0;
mipi_lanes = of_property_count_u32_elems(ep0, "data-lanes");
of_node_put(ep0);
}
if (bus_type == V4L2_FWNODE_BUS_TYPE_PARALLEL) /* bus type is Parallel(DSI) */
......
......@@ -462,6 +462,7 @@ struct cdns_dsi {
struct reset_control *dsi_p_rst;
struct clk *dsi_sys_clk;
bool link_initialized;
bool phy_initialized;
struct phy *dphy;
};
......@@ -711,11 +712,21 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put(dsi->base.dev);
}
static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
pm_runtime_put(dsi->base.dev);
}
static void cdns_dsi_hs_init(struct cdns_dsi *dsi)
{
struct cdns_dsi_output *output = &dsi->output;
u32 status;
if (dsi->phy_initialized)
return;
/*
* Power all internal DPHY blocks down and maintain their reset line
* asserted before changing the DPHY config.
......@@ -739,6 +750,7 @@ static void cdns_dsi_hs_init(struct cdns_dsi *dsi)
writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN |
DPHY_D_RSTB(output->dev->lanes) | DPHY_C_RSTB,
dsi->regs + MCTL_DPHY_CFG0);
dsi->phy_initialized = true;
}
static void cdns_dsi_init_link(struct cdns_dsi *dsi)
......@@ -914,11 +926,25 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
cdns_dsi_init_link(dsi);
cdns_dsi_hs_init(dsi);
}
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
.disable = cdns_dsi_bridge_disable,
.pre_enable = cdns_dsi_bridge_pre_enable,
.enable = cdns_dsi_bridge_enable,
.post_disable = cdns_dsi_bridge_post_disable,
};
static int cdns_dsi_attach(struct mipi_dsi_host *host,
......
......@@ -9,6 +9,8 @@
#include <drm/drm_print.h>
#include <drm/drm_mipi_dsi.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
......@@ -26,6 +28,11 @@
#define PD_CTRL(n) (0x0a + ((n) & 0x3)) /* 0..3 */
#define RST_CTRL(n) (0x0e + ((n) & 0x1)) /* 0..1 */
#define SYS_CTRL(n) (0x10 + ((n) & 0x7)) /* 0..4 */
#define SYS_CTRL_1_CLK_PHASE_MSK GENMASK(5, 4)
#define CLK_PHASE_0 0
#define CLK_PHASE_1_4 1
#define CLK_PHASE_1_2 2
#define CLK_PHASE_3_4 3
#define RGB_DRV(n) (0x18 + ((n) & 0x3)) /* 0..3 */
#define RGB_DLY(n) (0x1c + ((n) & 0x1)) /* 0..1 */
#define RGB_TEST_CTRL 0x1e
......@@ -100,7 +107,7 @@
#define MIPI_PN_SWAP 0x87
#define MIPI_PN_SWAP_CLK BIT(4)
#define MIPI_PN_SWAP_D(n) BIT((n) & 0x3)
#define MIPI_SOT_SYNC_BIT_(n) (0x88 + ((n) & 0x1)) /* 0..1 */
#define MIPI_SOT_SYNC_BIT(n) (0x88 + ((n) & 0x1)) /* 0..1 */
#define MIPI_ULPS_CTRL 0x8a
#define MIPI_CLK_CHK_VAR 0x8e
#define MIPI_CLK_CHK_INI 0x8f
......@@ -115,7 +122,7 @@
#define MIPI_T_CLK_SETTLE 0x9a
#define MIPI_TO_HS_RX_L 0x9e
#define MIPI_TO_HS_RX_H 0x9f
#define MIPI_PHY_(n) (0xa0 + ((n) & 0x7)) /* 0..5 */
#define MIPI_PHY(n) (0xa0 + ((n) & 0x7)) /* 0..5 */
#define MIPI_PD_RX 0xb0
#define MIPI_PD_TERM 0xb1
#define MIPI_PD_HSRX 0xb2
......@@ -125,13 +132,11 @@
#define MIPI_FORCE_0 0xb6
#define MIPI_RST_CTRL 0xb7
#define MIPI_RST_NUM 0xb8
#define MIPI_DBG_SET_(n) (0xc0 + ((n) & 0xf)) /* 0..9 */
#define MIPI_DBG_SET(n) (0xc0 + ((n) & 0xf)) /* 0..9 */
#define MIPI_DBG_SEL 0xe0
#define MIPI_DBG_DATA 0xe1
#define MIPI_ATE_TEST_SEL 0xe2
#define MIPI_ATE_STATUS_(n) (0xe3 + ((n) & 0x1)) /* 0..1 */
#define MIPI_ATE_STATUS_1 0xe4
#define ICN6211_MAX_REGISTER MIPI_ATE_STATUS(1)
#define MIPI_ATE_STATUS(n) (0xe3 + ((n) & 0x1)) /* 0..1 */
struct chipone {
struct device *dev;
......@@ -155,10 +160,10 @@ static const struct regmap_range chipone_dsi_readable_ranges[] = {
regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY(5)),
regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
regmap_reg_range(MIPI_DBG_SET(0), MIPI_DBG_SET(9)),
regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS(1)),
};
static const struct regmap_access_table chipone_dsi_readable_table = {
......@@ -172,10 +177,10 @@ static const struct regmap_range chipone_dsi_writeable_ranges[] = {
regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY(5)),
regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
regmap_reg_range(MIPI_DBG_SET(0), MIPI_DBG_SET(9)),
regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS(1)),
};
static const struct regmap_access_table chipone_dsi_writeable_table = {
......@@ -189,7 +194,7 @@ static const struct regmap_config chipone_regmap_config = {
.rd_table = &chipone_dsi_readable_table,
.wr_table = &chipone_dsi_writeable_table,
.cache_type = REGCACHE_RBTREE,
.max_register = MIPI_ATE_STATUS_(1),
.max_register = MIPI_ATE_STATUS(1),
};
static int chipone_dsi_read(void *context,
......@@ -336,7 +341,7 @@ static void chipone_atomic_enable(struct drm_bridge *bridge,
const struct drm_bridge_state *bridge_state;
u16 hfp, hbp, hsync;
u32 bus_flags;
u8 pol, id[4];
u8 pol, sys_ctrl_1, id[4];
chipone_readb(icn, VENDOR_ID, id);
chipone_readb(icn, DEVICE_ID_H, id + 1);
......@@ -414,7 +419,14 @@ static void chipone_atomic_enable(struct drm_bridge *bridge,
chipone_configure_pll(icn, mode);
chipone_writeb(icn, SYS_CTRL(0), 0x40);
chipone_writeb(icn, SYS_CTRL(1), 0x88);
sys_ctrl_1 = 0x88;
if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
sys_ctrl_1 |= FIELD_PREP(SYS_CTRL_1_CLK_PHASE_MSK, CLK_PHASE_0);
else
sys_ctrl_1 |= FIELD_PREP(SYS_CTRL_1_CLK_PHASE_MSK, CLK_PHASE_1_2);
chipone_writeb(icn, SYS_CTRL(1), sys_ctrl_1);
/* icn6211 specific sequence */
chipone_writeb(icn, MIPI_FORCE_0, 0x20);
......
......@@ -578,15 +578,13 @@ static struct lt9611_mode *lt9611_find_mode(const struct drm_display_mode *mode)
}
/* connector funcs */
static enum drm_connector_status
lt9611_connector_detect(struct drm_connector *connector, bool force)
static enum drm_connector_status __lt9611_detect(struct lt9611 *lt9611)
{
struct lt9611 *lt9611 = connector_to_lt9611(connector);
unsigned int reg_val = 0;
int connected = 0;
regmap_read(lt9611->regmap, 0x825e, &reg_val);
connected = (reg_val & BIT(0));
connected = (reg_val & (BIT(2) | BIT(0)));
lt9611->status = connected ? connector_status_connected :
connector_status_disconnected;
......@@ -594,6 +592,12 @@ lt9611_connector_detect(struct drm_connector *connector, bool force)
return lt9611->status;
}
static enum drm_connector_status
lt9611_connector_detect(struct drm_connector *connector, bool force)
{
return __lt9611_detect(connector_to_lt9611(connector));
}
static int lt9611_read_edid(struct lt9611 *lt9611)
{
unsigned int temp;
......@@ -893,17 +897,7 @@ static void lt9611_bridge_mode_set(struct drm_bridge *bridge,
static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
unsigned int reg_val = 0;
int connected;
regmap_read(lt9611->regmap, 0x825e, &reg_val);
connected = reg_val & BIT(0);
lt9611->status = connected ? connector_status_connected :
connector_status_disconnected;
return lt9611->status;
return __lt9611_detect(bridge_to_lt9611(bridge));
}
static struct edid *lt9611_bridge_get_edid(struct drm_bridge *bridge,
......
......@@ -982,7 +982,7 @@ static int lt9611uxc_remove(struct i2c_client *client)
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
disable_irq(client->irq);
flush_scheduled_work();
cancel_work_sync(&lt9611uxc->work);
lt9611uxc_audio_exit(lt9611uxc);
drm_bridge_remove(&lt9611uxc->bridge);
......
......@@ -665,6 +665,12 @@ static int nwl_dsi_mode_set(struct nwl_dsi *dsi)
return ret;
}
ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to set DSI phy mode: %d\n", ret);
goto uninit_phy;
}
ret = phy_configure(dsi->phy, phy_cfg);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
......
......@@ -537,12 +537,11 @@ static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.pre_enable = ps8640_pre_enable,
};
static int ps8640_bridge_host_attach(struct device *dev, struct ps8640 *ps_bridge)
static int ps8640_bridge_get_dsi_resources(struct device *dev, struct ps8640 *ps_bridge)
{
struct device_node *in_ep, *dsi_node;
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
int ret;
const struct mipi_dsi_device_info info = { .type = "ps8640",
.channel = 0,
.node = NULL,
......@@ -577,17 +576,40 @@ static int ps8640_bridge_host_attach(struct device *dev, struct ps8640 *ps_bridg
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = NUM_MIPI_LANES;
ret = devm_mipi_dsi_attach(dev, dsi);
return 0;
}
static int ps8640_bridge_link_panel(struct drm_dp_aux *aux)
{
struct ps8640 *ps_bridge = aux_to_ps8640(aux);
struct device *dev = aux->dev;
struct device_node *np = dev->of_node;
int ret;
/*
* NOTE about returning -EPROBE_DEFER from this function: if we
* return an error (most relevant to -EPROBE_DEFER) it will only
* be passed out to ps8640_probe() if it called this directly (AKA the
* panel isn't under the "aux-bus" node). That should be fine because
* if the panel is under "aux-bus" it's guaranteed to have probed by
* the time this function has been called.
*/
/* port@1 is ps8640 output port */
ps_bridge->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
if (IS_ERR(ps_bridge->panel_bridge))
return PTR_ERR(ps_bridge->panel_bridge);
ret = devm_drm_bridge_add(dev, &ps_bridge->bridge);
if (ret)
return ret;
return 0;
return devm_mipi_dsi_attach(dev, ps_bridge->dsi);
}
static int ps8640_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
struct ps8640 *ps_bridge;
int ret;
u32 i;
......@@ -628,6 +650,14 @@ static int ps8640_probe(struct i2c_client *client)
if (!ps8640_of_panel_on_aux_bus(&client->dev))
ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
/*
* Get MIPI DSI resources early. These can return -EPROBE_DEFER so
* we want to get them out of the way sooner.
*/
ret = ps8640_bridge_get_dsi_resources(&client->dev, ps_bridge);
if (ret)
return ret;
ps_bridge->page[PAGE0_DP_CNTL] = client;
ps_bridge->regmap[PAGE0_DP_CNTL] = devm_regmap_init_i2c(client, ps8640_regmap_config);
......@@ -670,35 +700,19 @@ static int ps8640_probe(struct i2c_client *client)
if (ret)
return ret;
devm_of_dp_aux_populate_ep_devices(&ps_bridge->aux);
/* port@1 is ps8640 output port */
ps_bridge->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
if (IS_ERR(ps_bridge->panel_bridge))
return PTR_ERR(ps_bridge->panel_bridge);
drm_bridge_add(&ps_bridge->bridge);
ret = ps8640_bridge_host_attach(dev, ps_bridge);
if (ret)
goto err_bridge_remove;
ret = devm_of_dp_aux_populate_bus(&ps_bridge->aux, ps8640_bridge_link_panel);
return 0;
/*
* If devm_of_dp_aux_populate_bus() returns -ENODEV then it's up to
* usa to call ps8640_bridge_link_panel() directly. NOTE: in this case
* the function is allowed to -EPROBE_DEFER.
*/
if (ret == -ENODEV)
return ps8640_bridge_link_panel(&ps_bridge->aux);
err_bridge_remove:
drm_bridge_remove(&ps_bridge->bridge);
return ret;
}
static int ps8640_remove(struct i2c_client *client)
{
struct ps8640 *ps_bridge = i2c_get_clientdata(client);
drm_bridge_remove(&ps_bridge->bridge);
return 0;
}
static const struct of_device_id ps8640_match[] = {
{ .compatible = "parade,ps8640" },
{ }
......@@ -707,7 +721,6 @@ MODULE_DEVICE_TABLE(of, ps8640_match);
static struct i2c_driver ps8640_driver = {
.probe_new = ps8640_probe,
.remove = ps8640_remove,
.driver = {
.name = "ps8640",
.of_match_table = ps8640_match,
......
......@@ -3,10 +3,7 @@
* TC358767/TC358867/TC9595 DSI/DPI-to-DPI/(e)DP bridge driver
*
* The TC358767/TC358867/TC9595 can operate in multiple modes.
* The following modes are supported:
* DPI->(e)DP -- supported
* DSI->DPI .... supported
* DSI->(e)DP .. NOT supported
* All modes are supported -- DPI->(e)DP / DSI->DPI / DSI->(e)DP .
*
* Copyright (C) 2016 CogentEmbedded Inc
* Author: Andrey Gusakov <andrey.gusakov@cogentembedded.com>
......@@ -309,6 +306,9 @@ struct tc_data {
/* do we have IRQ */
bool have_irq;
/* Input connector type, DSI and not DPI. */
bool input_connector_dsi;
/* HPD pin number (0 or 1) or -ENODEV */
int hpd_pin;
};
......@@ -1247,11 +1247,60 @@ static int tc_main_link_disable(struct tc_data *tc)
return regmap_write(tc->regmap, DP0CTL, 0);
}
static int tc_dpi_stream_enable(struct tc_data *tc)
static int tc_dsi_rx_enable(struct tc_data *tc)
{
u32 value;
int ret;
regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
regmap_write(tc->regmap, PPI_LPTXTIMECNT, LPX_PERIOD);
value = ((LANEENABLE_L0EN << tc->dsi_lanes) - LANEENABLE_L0EN) |
LANEENABLE_CLEN;
regmap_write(tc->regmap, PPI_LANEENABLE, value);
regmap_write(tc->regmap, DSI_LANEENABLE, value);
/* Set input interface */
value = DP0_AUDSRC_NO_INPUT;
if (tc_test_pattern)
value |= DP0_VIDSRC_COLOR_BAR;
else
value |= DP0_VIDSRC_DSI_RX;
ret = regmap_write(tc->regmap, SYSCTRL, value);
if (ret)
return ret;
usleep_range(120, 150);
regmap_write(tc->regmap, PPI_STARTPPI, PPI_START_FUNCTION);
regmap_write(tc->regmap, DSI_STARTDSI, DSI_RX_START);
return 0;
}
static int tc_dpi_rx_enable(struct tc_data *tc)
{
u32 value;
/* Set input interface */
value = DP0_AUDSRC_NO_INPUT;
if (tc_test_pattern)
value |= DP0_VIDSRC_COLOR_BAR;
else
value |= DP0_VIDSRC_DPI_RX;
return regmap_write(tc->regmap, SYSCTRL, value);
}
static int tc_dpi_stream_enable(struct tc_data *tc)
{
int ret;
dev_dbg(tc->dev, "enable video stream\n");
/* Setup PLL */
......@@ -1277,20 +1326,6 @@ static int tc_dpi_stream_enable(struct tc_data *tc)
if (ret)
return ret;
regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
regmap_write(tc->regmap, PPI_LPTXTIMECNT, LPX_PERIOD);
value = ((LANEENABLE_L0EN << tc->dsi_lanes) - LANEENABLE_L0EN) |
LANEENABLE_CLEN;
regmap_write(tc->regmap, PPI_LANEENABLE, value);
regmap_write(tc->regmap, DSI_LANEENABLE, value);
ret = tc_set_common_video_mode(tc, &tc->mode);
if (ret)
return ret;
......@@ -1299,22 +1334,7 @@ static int tc_dpi_stream_enable(struct tc_data *tc)
if (ret)
return ret;
/* Set input interface */
value = DP0_AUDSRC_NO_INPUT;
if (tc_test_pattern)
value |= DP0_VIDSRC_COLOR_BAR;
else
value |= DP0_VIDSRC_DSI_RX;
ret = regmap_write(tc->regmap, SYSCTRL, value);
if (ret)
return ret;
usleep_range(120, 150);
regmap_write(tc->regmap, PPI_STARTPPI, PPI_START_FUNCTION);
regmap_write(tc->regmap, DSI_STARTDSI, DSI_RX_START);
return 0;
return tc_dsi_rx_enable(tc);
}
static int tc_dpi_stream_disable(struct tc_data *tc)
......@@ -1333,8 +1353,18 @@ static int tc_edp_stream_enable(struct tc_data *tc)
dev_dbg(tc->dev, "enable video stream\n");
/* PXL PLL setup */
if (tc_test_pattern) {
/*
* Pixel PLL must be enabled for DSI input mode and test pattern.
*
* Per TC9595XBG datasheet Revision 0.1 2018-12-27 Figure 4.18
* "Clock Mode Selection and Clock Sources", either Pixel PLL
* or DPI_PCLK supplies StrmClk. DPI_PCLK is only available in
* case valid Pixel Clock are supplied to the chip DPI input.
* In case built-in test pattern is desired OR DSI input mode
* is used, DPI_PCLK is not available and thus Pixel PLL must
* be used instead.
*/
if (tc->input_connector_dsi || tc_test_pattern) {
ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
1000 * tc->mode.clock);
if (ret)
......@@ -1372,17 +1402,12 @@ static int tc_edp_stream_enable(struct tc_data *tc)
ret = regmap_write(tc->regmap, DP0CTL, value);
if (ret)
return ret;
/* Set input interface */
value = DP0_AUDSRC_NO_INPUT;
if (tc_test_pattern)
value |= DP0_VIDSRC_COLOR_BAR;
if (tc->input_connector_dsi)
return tc_dsi_rx_enable(tc);
else
value |= DP0_VIDSRC_DPI_RX;
ret = regmap_write(tc->regmap, SYSCTRL, value);
if (ret)
return ret;
return 0;
return tc_dpi_rx_enable(tc);
}
static int tc_edp_stream_disable(struct tc_data *tc)
......@@ -1871,7 +1896,7 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
of_node_put(host_node);
of_node_put(endpoint);
if (dsi_lanes < 0 || dsi_lanes > 4)
if (dsi_lanes <= 0 || dsi_lanes > 4)
return -EINVAL;
if (!host)
......@@ -1992,18 +2017,29 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
mode |= BIT(endpoint.port);
}
if (mode == mode_dpi_to_edp || mode == mode_dpi_to_dp)
if (mode == mode_dpi_to_edp || mode == mode_dpi_to_dp) {
tc->input_connector_dsi = false;
return tc_probe_edp_bridge_endpoint(tc);
else if (mode == mode_dsi_to_dpi)
} else if (mode == mode_dsi_to_dpi) {
tc->input_connector_dsi = true;
return tc_probe_dpi_bridge_endpoint(tc);
else if (mode == mode_dsi_to_edp || mode == mode_dsi_to_dp)
dev_warn(dev, "The mode DSI-to-(e)DP is not supported!\n");
else
dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
} else if (mode == mode_dsi_to_edp || mode == mode_dsi_to_dp) {
tc->input_connector_dsi = true;
return tc_probe_edp_bridge_endpoint(tc);
}
dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
return -EINVAL;
}
static void tc_clk_disable(void *data)
{
struct clk *refclk = data;
clk_disable_unprepare(refclk);
}
static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
......@@ -2020,6 +2056,24 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (ret)
return ret;
tc->refclk = devm_clk_get(dev, "ref");
if (IS_ERR(tc->refclk)) {
ret = PTR_ERR(tc->refclk);
dev_err(dev, "Failed to get refclk: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(tc->refclk);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, tc_clk_disable, tc->refclk);
if (ret)
return ret;
/* tRSTW = 100 cycles , at 13 MHz that is ~7.69 us */
usleep_range(10, 15);
/* Shut down GPIO is optional */
tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
if (IS_ERR(tc->sd_gpio))
......@@ -2040,13 +2094,6 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
usleep_range(5000, 10000);
}
tc->refclk = devm_clk_get(dev, "ref");
if (IS_ERR(tc->refclk)) {
ret = PTR_ERR(tc->refclk);
dev_err(dev, "Failed to get refclk: %d\n", ret);
return ret;
}
tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config);
if (IS_ERR(tc->regmap)) {
ret = PTR_ERR(tc->regmap);
......@@ -2137,7 +2184,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, tc);
if (tc->bridge.type == DRM_MODE_CONNECTOR_DPI) { /* DPI output */
if (tc->input_connector_dsi) { /* DSI input */
ret = tc_mipi_dsi_host_attach(tc);
if (ret) {
drm_bridge_remove(&tc->bridge);
......
This diff is collapsed.
This diff is collapsed.
......@@ -170,6 +170,29 @@ void drm_bridge_add(struct drm_bridge *bridge)
}
EXPORT_SYMBOL(drm_bridge_add);
static void drm_bridge_remove_void(void *bridge)
{
drm_bridge_remove(bridge);
}
/**
* devm_drm_bridge_add - devm managed version of drm_bridge_add()
*
* @dev: device to tie the bridge lifetime to
* @bridge: bridge control structure
*
* This is the managed version of drm_bridge_add() which automatically
* calls drm_bridge_remove() when @dev is unbound.
*
* Return: 0 if no error or negative error code.
*/
int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
{
drm_bridge_add(bridge);
return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
}
EXPORT_SYMBOL(devm_drm_bridge_add);
/**
* drm_bridge_remove - remove the given bridge from the global bridge list
*
......
......@@ -158,22 +158,31 @@ drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int
return NULL;
}
static struct drm_display_mode *
drm_connector_pick_cmdline_mode(struct drm_connector *connector)
static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode;
bool prefer_non_interlace;
/*
* Find a user-defined mode. If the user gave us a valid
* mode on the kernel command line, it will show up in this
* list.
*/
list_for_each_entry(mode, &connector->modes, head) {
if (mode->type & DRM_MODE_TYPE_USERDEF)
return mode;
}
cmdline_mode = &connector->cmdline_mode;
if (cmdline_mode->specified == false)
return NULL;
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
/*
* Attempt to find a matching mode in the list of modes we
* have gotten so far.
*/
if (cmdline_mode->rb || cmdline_mode->margins)
goto create_mode;
prefer_non_interlace = !cmdline_mode->interlace;
again:
......@@ -207,12 +216,7 @@ drm_connector_pick_cmdline_mode(struct drm_connector *connector)
goto again;
}
create_mode:
mode = drm_mode_create_from_cmdline_mode(connector->dev, cmdline_mode);
if (mode)
list_add(&mode->head, &connector->modes);
return mode;
return NULL;
}
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
......
......@@ -395,6 +395,23 @@ static int vrr_range_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(vrr_range);
/*
* Returns Connector's max supported bpc through debugfs file.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
*/
static int output_bpc_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
if (connector->status != connector_status_connected)
return -ENODEV;
seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(output_bpc);
static const struct file_operations drm_edid_fops = {
.owner = THIS_MODULE,
.open = edid_open,
......@@ -437,6 +454,10 @@ void drm_debugfs_connector_add(struct drm_connector *connector)
debugfs_create_file("vrr_range", S_IRUGO, root, connector,
&vrr_range_fops);
/* max bpc */
debugfs_create_file("output_bpc", 0444, root, connector,
&output_bpc_fops);
if (connector->funcs->debugfs_init)
connector->funcs->debugfs_init(connector, root);
}
......
......@@ -33,11 +33,11 @@ static int validate_displayid(const u8 *displayid, int length, int idx)
return 0;
}
static const u8 *drm_find_displayid_extension(const struct edid *edid,
static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
int *length, int *idx,
int *ext_index)
{
const u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT, ext_index);
const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
int ret;
......@@ -58,12 +58,12 @@ static const u8 *drm_find_displayid_extension(const struct edid *edid,
return displayid;
}
void displayid_iter_edid_begin(const struct edid *edid,
void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
iter->edid = edid;
iter->drm_edid = drm_edid;
}
static const struct displayid_block *
......@@ -88,7 +88,7 @@ __displayid_iter_next(struct displayid_iter *iter)
{
const struct displayid_block *block;
if (!iter->edid)
if (!iter->drm_edid)
return NULL;
if (iter->section) {
......@@ -96,7 +96,7 @@ __displayid_iter_next(struct displayid_iter *iter)
block = displayid_iter_block(iter);
if (WARN_ON(!block)) {
iter->section = NULL;
iter->edid = NULL;
iter->drm_edid = NULL;
return NULL;
}
......@@ -109,12 +109,12 @@ __displayid_iter_next(struct displayid_iter *iter)
}
for (;;) {
iter->section = drm_find_displayid_extension(iter->edid,
iter->section = drm_find_displayid_extension(iter->drm_edid,
&iter->length,
&iter->idx,
&iter->ext_index);
if (!iter->section) {
iter->edid = NULL;
iter->drm_edid = NULL;
return NULL;
}
......
This diff is collapsed.
......@@ -226,7 +226,7 @@ void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
container = drmm_kzalloc(dev, size, GFP_KERNEL);
if (!container)
return ERR_PTR(-EINVAL);
return ERR_PTR(-ENOMEM);
encoder = container + offset;
......
......@@ -169,8 +169,10 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i);
struct dma_fence *new;
if (WARN_ON_ONCE(!obj))
continue;
if (!obj) {
ret = -EINVAL;
goto error;
}
ret = dma_resv_get_singleton(obj->resv, usage, &new);
if (ret)
......
......@@ -53,7 +53,11 @@ MODULE_IMPORT_NS(DMA_BUF);
struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
if (plane >= ARRAY_SIZE(fb->obj))
struct drm_device *dev = fb->dev;
if (drm_WARN_ON_ONCE(dev, plane >= ARRAY_SIZE(fb->obj)))
return NULL;
else if (drm_WARN_ON_ONCE(dev, !fb->obj[plane]))
return NULL;
return fb->obj[plane];
......@@ -92,9 +96,9 @@ drm_gem_fb_init(struct drm_device *dev,
*/
void drm_gem_fb_destroy(struct drm_framebuffer *fb)
{
size_t i;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(fb->obj); i++)
for (i = 0; i < fb->format->num_planes; i++)
drm_gem_object_put(fb->obj[i]);
drm_framebuffer_cleanup(fb);
......@@ -329,24 +333,26 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
* The argument returns the addresses of the data stored in each BO. This
* is different from @map if the framebuffer's offsets field is non-zero.
*
* Both, @map and @data, must each refer to arrays with at least
* fb->format->num_planes elements.
*
* See drm_gem_fb_vunmap() for unmapping.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_fb_vmap(struct drm_framebuffer *fb,
struct iosys_map map[static DRM_FORMAT_MAX_PLANES],
struct iosys_map data[DRM_FORMAT_MAX_PLANES])
int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
struct iosys_map *data)
{
struct drm_gem_object *obj;
unsigned int i;
int ret;
for (i = 0; i < DRM_FORMAT_MAX_PLANES; ++i) {
for (i = 0; i < fb->format->num_planes; ++i) {
obj = drm_gem_fb_get_obj(fb, i);
if (!obj) {
iosys_map_clear(&map[i]);
continue;
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
ret = drm_gem_vmap(obj, &map[i]);
if (ret)
......@@ -354,7 +360,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb,
}
if (data) {
for (i = 0; i < DRM_FORMAT_MAX_PLANES; ++i) {
for (i = 0; i < fb->format->num_planes; ++i) {
memcpy(&data[i], &map[i], sizeof(data[i]));
if (iosys_map_is_null(&data[i]))
continue;
......@@ -385,10 +391,9 @@ EXPORT_SYMBOL(drm_gem_fb_vmap);
*
* See drm_gem_fb_vmap() for more information.
*/
void drm_gem_fb_vunmap(struct drm_framebuffer *fb,
struct iosys_map map[static DRM_FORMAT_MAX_PLANES])
void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
{
unsigned int i = DRM_FORMAT_MAX_PLANES;
unsigned int i = fb->format->num_planes;
struct drm_gem_object *obj;
while (i) {
......@@ -403,6 +408,28 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir,
unsigned int num_planes)
{
struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
int ret;
while (num_planes) {
--num_planes;
obj = drm_gem_fb_get_obj(fb, num_planes);
if (!obj)
continue;
import_attach = obj->import_attach;
if (!import_attach)
continue;
ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
if (ret)
drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n",
ret, num_planes, dir);
}
}
/**
* drm_gem_fb_begin_cpu_access - prepares GEM buffer objects for CPU access
* @fb: the framebuffer
......@@ -421,40 +448,27 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct
{
struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
size_t i;
int ret, ret2;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(fb->obj); ++i) {
for (i = 0; i < fb->format->num_planes; ++i) {
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
if (!obj) {
ret = -EINVAL;
goto err___drm_gem_fb_end_cpu_access;
}
import_attach = obj->import_attach;
if (!import_attach)
continue;
ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
if (ret)
goto err_dma_buf_end_cpu_access;
goto err___drm_gem_fb_end_cpu_access;
}
return 0;
err_dma_buf_end_cpu_access:
while (i) {
--i;
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
import_attach = obj->import_attach;
if (!import_attach)
continue;
ret2 = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
if (ret2) {
drm_err(fb->dev,
"dma_buf_end_cpu_access() failed during error handling: %d\n",
ret2);
}
}
err___drm_gem_fb_end_cpu_access:
__drm_gem_fb_end_cpu_access(fb, dir, i);
return ret;
}
EXPORT_SYMBOL(drm_gem_fb_begin_cpu_access);
......@@ -472,23 +486,7 @@ EXPORT_SYMBOL(drm_gem_fb_begin_cpu_access);
*/
void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
{
size_t i = ARRAY_SIZE(fb->obj);
struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
int ret;
while (i) {
--i;
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
import_attach = obj->import_attach;
if (!import_attach)
continue;
ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
if (ret)
drm_err(fb->dev, "dma_buf_end_cpu_access() failed: %d\n", ret);
}
__drm_gem_fb_end_cpu_access(fb, dir, fb->format->num_planes);
}
EXPORT_SYMBOL(drm_gem_fb_end_cpu_access);
......
......@@ -9,6 +9,7 @@
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
......@@ -630,6 +631,24 @@ EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
* Helpers for struct drm_plane_helper_funcs
*/
static void __drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state,
unsigned int num_planes)
{
struct drm_gem_object *obj;
struct drm_gem_vram_object *gbo;
struct drm_framebuffer *fb = state->fb;
while (num_planes) {
--num_planes;
obj = drm_gem_fb_get_obj(fb, num_planes);
if (!obj)
continue;
gbo = drm_gem_vram_of_gem(obj);
drm_gem_vram_unpin(gbo);
}
}
/**
* drm_gem_vram_plane_helper_prepare_fb() - \
* Implements &struct drm_plane_helper_funcs.prepare_fb
......@@ -648,17 +667,22 @@ int
drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
size_t i;
struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_vram_object *gbo;
struct drm_gem_object *obj;
unsigned int i;
int ret;
if (!new_state->fb)
if (!fb)
return 0;
for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
if (!new_state->fb->obj[i])
continue;
gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
for (i = 0; i < fb->format->num_planes; ++i) {
obj = drm_gem_fb_get_obj(fb, i);
if (!obj) {
ret = -EINVAL;
goto err_drm_gem_vram_unpin;
}
gbo = drm_gem_vram_of_gem(obj);
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
goto err_drm_gem_vram_unpin;
......@@ -671,11 +695,7 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
return 0;
err_drm_gem_vram_unpin:
while (i) {
--i;
gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
drm_gem_vram_unpin(gbo);
}
__drm_gem_vram_plane_helper_cleanup_fb(plane, new_state, i);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
......@@ -694,18 +714,12 @@ void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
size_t i;
struct drm_gem_vram_object *gbo;
struct drm_framebuffer *fb = old_state->fb;
if (!old_state->fb)
if (!fb)
return;
for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
if (!old_state->fb->obj[i])
continue;
gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
drm_gem_vram_unpin(gbo);
}
__drm_gem_vram_plane_helper_cleanup_fb(plane, old_state, fb->format->num_planes);
}
EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
......
......@@ -1199,6 +1199,13 @@ int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
size_t chunk;
int ret;
/* In __spi_validate, there's a validation that no partial transfers
* are accepted (xfer->len % w_size must be zero).
* Here we align max_chunk to multiple of 2 (16bits),
* to prevent transfers from being rejected.
*/
max_chunk = ALIGN_DOWN(max_chunk, 2);
spi_message_init_with_transfers(&m, &tr, 1);
while (len) {
......
......@@ -1328,6 +1328,10 @@ void drm_mode_prune_invalid(struct drm_device *dev,
list_for_each_entry_safe(mode, t, mode_list, head) {
if (mode->status != MODE_OK) {
list_del(&mode->head);
if (mode->type & DRM_MODE_TYPE_USERDEF) {
drm_warn(dev, "User-defined mode not supported: "
DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
if (verbose) {
drm_mode_debug_printmodeline(mode);
DRM_DEBUG_KMS("Not using %s mode: %s\n",
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment