Commit 535f6f5d authored by Dave Airlie's avatar Dave Airlie

Merge tag 'du-next-20190318' of git://linuxtv.org/pinchartl/media into drm-next

Renesas display drivers changes for v5.2:

- Display writeback (includes VSP changes and DRM/KMS API changes)
(All v4l patches acked by Mauro)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190318153613.GE12707@pendragon.ideasonboard.com
parents b9e687fc 12e32f55
......@@ -252,8 +252,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
&mw_state->addrs[0],
mw_state->format);
drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
conn_state->writeback_job = NULL;
drm_writeback_queue_job(mw_conn, conn_state);
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes,
fb->width, fb->height, mw_state->format,
......
......@@ -2261,10 +2261,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
int ret, i, j;
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
if (!new_conn_state->writeback_job)
continue;
ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
if (ret < 0)
return ret;
}
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
......
......@@ -30,6 +30,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
......@@ -412,6 +413,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
if (state->commit)
drm_crtc_commit_put(state->commit);
if (state->writeback_job)
drm_writeback_cleanup_job(state->writeback_job);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
......
......@@ -647,28 +647,15 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
static struct drm_writeback_job *
drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
{
WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
if (!conn_state->writeback_job)
conn_state->writeback_job =
kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
return conn_state->writeback_job;
}
static int drm_atomic_set_writeback_fb_for_connector(
struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
struct drm_writeback_job *job =
drm_atomic_get_writeback_job(conn_state);
if (!job)
return -ENOMEM;
int ret;
drm_framebuffer_assign(&job->fb, fb);
ret = drm_writeback_set_fb(conn_state, fb);
if (ret < 0)
return ret;
if (fb)
DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
......@@ -1158,19 +1145,17 @@ static int prepare_signaling(struct drm_device *dev,
for_each_new_connector_in_state(state, conn, conn_state, i) {
struct drm_writeback_connector *wb_conn;
struct drm_writeback_job *job;
struct drm_out_fence_state *f;
struct dma_fence *fence;
s32 __user *fence_ptr;
if (!conn_state->writeback_job)
continue;
fence_ptr = get_out_fence_for_connector(state, conn);
if (!fence_ptr)
continue;
job = drm_atomic_get_writeback_job(conn_state);
if (!job)
return -ENOMEM;
f = krealloc(*fence_state, sizeof(**fence_state) *
(*num_fences + 1), GFP_KERNEL);
if (!f)
......@@ -1192,7 +1177,7 @@ static int prepare_signaling(struct drm_device *dev,
return ret;
}
job->out_fence = fence;
conn_state->writeback_job->out_fence = fence;
}
/*
......
......@@ -239,14 +239,52 @@ int drm_writeback_connector_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_writeback_connector_init);
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
if (!conn_state->writeback_job) {
conn_state->writeback_job =
kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
if (!conn_state->writeback_job)
return -ENOMEM;
conn_state->writeback_job->connector =
drm_connector_to_writeback(conn_state->connector);
}
drm_framebuffer_assign(&conn_state->writeback_job->fb, fb);
return 0;
}
int drm_writeback_prepare_job(struct drm_writeback_job *job)
{
struct drm_writeback_connector *connector = job->connector;
const struct drm_connector_helper_funcs *funcs =
connector->base.helper_private;
int ret;
if (funcs->prepare_writeback_job) {
ret = funcs->prepare_writeback_job(connector, job);
if (ret < 0)
return ret;
}
job->prepared = true;
return 0;
}
EXPORT_SYMBOL(drm_writeback_prepare_job);
/**
* drm_writeback_queue_job - Queue a writeback job for later signalling
* @wb_connector: The writeback connector to queue a job on
* @job: The job to queue
* @conn_state: The connector state containing the job to queue
*
* This function adds a job to the job_queue for a writeback connector. It
* should be considered to take ownership of the writeback job, and so any other
* references to the job must be cleared after calling this function.
* This function adds the job contained in @conn_state to the job_queue for a
* writeback connector. It takes ownership of the writeback job and sets the
* @conn_state->writeback_job to NULL, and so no access to the job may be
* performed by the caller after this function returns.
*
* Drivers must ensure that for a given writeback connector, jobs are queued in
* exactly the same order as they will be completed by the hardware (and
......@@ -258,16 +296,36 @@ EXPORT_SYMBOL(drm_writeback_connector_init);
* See also: drm_writeback_signal_completion()
*/
void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
struct drm_writeback_job *job)
struct drm_connector_state *conn_state)
{
struct drm_writeback_job *job;
unsigned long flags;
job = conn_state->writeback_job;
conn_state->writeback_job = NULL;
spin_lock_irqsave(&wb_connector->job_lock, flags);
list_add_tail(&job->list_entry, &wb_connector->job_queue);
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
}
EXPORT_SYMBOL(drm_writeback_queue_job);
void drm_writeback_cleanup_job(struct drm_writeback_job *job)
{
struct drm_writeback_connector *connector = job->connector;
const struct drm_connector_helper_funcs *funcs =
connector->base.helper_private;
if (job->prepared && funcs->cleanup_writeback_job)
funcs->cleanup_writeback_job(connector, job);
if (job->fb)
drm_framebuffer_put(job->fb);
kfree(job);
}
EXPORT_SYMBOL(drm_writeback_cleanup_job);
/*
* @cleanup_work: deferred cleanup of a writeback job
*
......@@ -280,10 +338,9 @@ static void cleanup_work(struct work_struct *work)
struct drm_writeback_job *job = container_of(work,
struct drm_writeback_job,
cleanup_work);
drm_framebuffer_put(job->fb);
kfree(job);
}
drm_writeback_cleanup_job(job);
}
/**
* drm_writeback_signal_completion - Signal the completion of a writeback job
......
......@@ -36,3 +36,7 @@ config DRM_RCAR_VSP
depends on VIDEO_RENESAS_VSP1=y || (VIDEO_RENESAS_VSP1 && DRM_RCAR_DU=m)
help
Enable support to expose the R-Car VSP Compositor as KMS planes.
config DRM_RCAR_WRITEBACK
bool
default y if ARM64
......@@ -4,7 +4,7 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_encoder.o \
rcar_du_group.o \
rcar_du_kms.o \
rcar_du_plane.o
rcar_du_plane.o \
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar_du_of_lvds_r8a7790.dtb.o \
......@@ -13,6 +13,7 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar_du_of_lvds_r8a7795.dtb.o \
rcar_du_of_lvds_r8a7796.dtb.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
......
......@@ -648,8 +648,13 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
rstate->outputs = 0;
drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
struct rcar_du_encoder *renc;
/* Skip the writeback encoder. */
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
continue;
renc = to_rcar_encoder(encoder);
rstate->outputs |= BIT(renc->output);
}
......
......@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include <drm/drm_crtc.h>
#include <drm/drm_writeback.h>
#include <media/vsp1.h>
......@@ -27,7 +28,7 @@ struct rcar_du_vsp;
* @clock: the CRTC functional clock
* @extclock: external pixel dot clock (optional)
* @mmio_offset: offset of the CRTC registers in the DU MMIO block
* @index: CRTC software and hardware index
* @index: CRTC hardware index
* @initialized: whether the CRTC has been initialized and clocks enabled
* @dsysr: cached value of the DSYSR register
* @vblank_enable: whether vblank events are enabled on this CRTC
......@@ -39,6 +40,7 @@ struct rcar_du_vsp;
* @group: CRTC group this CRTC belongs to
* @vsp: VSP feeding video to this CRTC
* @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
* @writeback: the writeback connector
*/
struct rcar_du_crtc {
struct drm_crtc crtc;
......@@ -65,9 +67,12 @@ struct rcar_du_crtc {
const char *const *sources;
unsigned int sources_count;
struct drm_writeback_connector writeback;
};
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
#define wb_to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, writeback)
/**
* struct rcar_du_crtc_state - Driver-specific CRTC state
......
......@@ -26,6 +26,7 @@
#include "rcar_du_kms.h"
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
#include "rcar_du_writeback.h"
/* -----------------------------------------------------------------------------
* Format helpers
......@@ -34,60 +35,70 @@
static const struct rcar_du_format_info rcar_du_format_infos[] = {
{
.fourcc = DRM_FORMAT_RGB565,
.v4l2 = V4L2_PIX_FMT_RGB565,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_ARGB1555,
.v4l2 = V4L2_PIX_FMT_ARGB555,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB1555,
.v4l2 = V4L2_PIX_FMT_XRGB555,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB8888,
.v4l2 = V4L2_PIX_FMT_XBGR32,
.bpp = 32,
.planes = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_RGB888,
}, {
.fourcc = DRM_FORMAT_ARGB8888,
.v4l2 = V4L2_PIX_FMT_ABGR32,
.bpp = 32,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_ARGB8888,
}, {
.fourcc = DRM_FORMAT_UYVY,
.v4l2 = V4L2_PIX_FMT_UYVY,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_YUYV,
.v4l2 = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV12,
.v4l2 = V4L2_PIX_FMT_NV12M,
.bpp = 12,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV21,
.v4l2 = V4L2_PIX_FMT_NV21M,
.bpp = 12,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV16,
.v4l2 = V4L2_PIX_FMT_NV16M,
.bpp = 16,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
......@@ -99,62 +110,77 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
*/
{
.fourcc = DRM_FORMAT_RGB332,
.v4l2 = V4L2_PIX_FMT_RGB332,
.bpp = 8,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_ARGB4444,
.v4l2 = V4L2_PIX_FMT_ARGB444,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_XRGB4444,
.v4l2 = V4L2_PIX_FMT_XRGB444,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGR888,
.v4l2 = V4L2_PIX_FMT_RGB24,
.bpp = 24,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
.v4l2 = V4L2_PIX_FMT_BGR24,
.bpp = 24,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGRA8888,
.v4l2 = V4L2_PIX_FMT_ARGB32,
.bpp = 32,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGRX8888,
.v4l2 = V4L2_PIX_FMT_XRGB32,
.bpp = 32,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_YVYU,
.v4l2 = V4L2_PIX_FMT_YVYU,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_NV61,
.v4l2 = V4L2_PIX_FMT_NV61M,
.bpp = 16,
.planes = 2,
}, {
.fourcc = DRM_FORMAT_YUV420,
.v4l2 = V4L2_PIX_FMT_YUV420M,
.bpp = 12,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU420,
.v4l2 = V4L2_PIX_FMT_YVU420M,
.bpp = 12,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YUV422,
.v4l2 = V4L2_PIX_FMT_YUV422M,
.bpp = 16,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU422,
.v4l2 = V4L2_PIX_FMT_YVU422M,
.bpp = 16,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YUV444,
.v4l2 = V4L2_PIX_FMT_YUV444M,
.bpp = 24,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU444,
.v4l2 = V4L2_PIX_FMT_YVU444M,
.bpp = 24,
.planes = 3,
},
......@@ -639,6 +665,17 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
encoder->possible_clones = (1 << num_encoders) - 1;
}
/* Create the writeback connectors. */
if (rcdu->info->gen >= 3) {
for (i = 0; i < rcdu->num_crtcs; ++i) {
struct rcar_du_crtc *rcrtc = &rcdu->crtcs[i];
ret = rcar_du_writeback_init(rcdu, rcrtc);
if (ret < 0)
return ret;
}
}
/*
* Initialize the default DPAD0 source to the index of the first DU
* channel that can be connected to DPAD0. The exact value doesn't
......
......@@ -19,6 +19,7 @@ struct rcar_du_device;
struct rcar_du_format_info {
u32 fourcc;
u32 v4l2;
unsigned int bpp;
unsigned int planes;
unsigned int pnmr;
......
......@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
......@@ -26,16 +27,19 @@
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_vsp.h"
#include "rcar_du_writeback.h"
static void rcar_du_vsp_complete(void *private, bool completed, u32 crc)
static void rcar_du_vsp_complete(void *private, unsigned int status, u32 crc)
{
struct rcar_du_crtc *crtc = private;
if (crtc->vblank_enable)
drm_crtc_handle_vblank(&crtc->crtc);
if (completed)
if (status & VSP1_DU_STATUS_COMPLETE)
rcar_du_crtc_finish_page_flip(crtc);
if (status & VSP1_DU_STATUS_WRITEBACK)
rcar_du_writeback_complete(crtc);
drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc);
}
......@@ -107,11 +111,12 @@ void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
state = to_rcar_crtc_state(crtc->crtc.state);
cfg.crc = state->crc;
rcar_du_writeback_setup(crtc, &cfg.writeback);
vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
/* Keep the two tables in sync. */
static const u32 formats_kms[] = {
static const u32 rcar_du_vsp_formats[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
......@@ -139,40 +144,13 @@ static const u32 formats_kms[] = {
DRM_FORMAT_YVU444,
};
static const u32 formats_v4l2[] = {
V4L2_PIX_FMT_RGB332,
V4L2_PIX_FMT_ARGB444,
V4L2_PIX_FMT_XRGB444,
V4L2_PIX_FMT_ARGB555,
V4L2_PIX_FMT_XRGB555,
V4L2_PIX_FMT_RGB565,
V4L2_PIX_FMT_RGB24,
V4L2_PIX_FMT_BGR24,
V4L2_PIX_FMT_ARGB32,
V4L2_PIX_FMT_XRGB32,
V4L2_PIX_FMT_ABGR32,
V4L2_PIX_FMT_XBGR32,
V4L2_PIX_FMT_UYVY,
V4L2_PIX_FMT_YUYV,
V4L2_PIX_FMT_YVYU,
V4L2_PIX_FMT_NV12M,
V4L2_PIX_FMT_NV21M,
V4L2_PIX_FMT_NV16M,
V4L2_PIX_FMT_NV61M,
V4L2_PIX_FMT_YUV420M,
V4L2_PIX_FMT_YVU420M,
V4L2_PIX_FMT_YUV422M,
V4L2_PIX_FMT_YVU422M,
V4L2_PIX_FMT_YUV444M,
V4L2_PIX_FMT_YVU444M,
};
static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
to_rcar_vsp_plane_state(plane->plane.state);
struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc);
struct drm_framebuffer *fb = plane->plane.state->fb;
const struct rcar_du_format_info *format;
struct vsp1_du_atomic_config cfg = {
.pixelformat = 0,
.pitch = fb->pitches[0],
......@@ -195,37 +173,23 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) {
if (formats_kms[i] == state->format->fourcc) {
cfg.pixelformat = formats_v4l2[i];
break;
}
}
format = rcar_du_format_info(state->format->fourcc);
cfg.pixelformat = format->v4l2;
vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
plane->index, &cfg);
}
static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
struct sg_table sg_tables[3])
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
struct rcar_du_device *rcdu = vsp->dev;
unsigned int i;
int ret;
/*
* There's no need to prepare (and unprepare) the framebuffer when the
* plane is not visible, as it will not be displayed.
*/
if (!state->visible)
return 0;
for (i = 0; i < rstate->format->planes; ++i) {
struct drm_gem_cma_object *gem =
drm_fb_cma_get_gem_obj(state->fb, i);
struct sg_table *sgt = &rstate->sg_tables[i];
for (i = 0; i < fb->format->num_planes; ++i) {
struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
struct sg_table *sgt = &sg_tables[i];
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr,
gem->base.size);
......@@ -240,15 +204,11 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
}
}
ret = drm_gem_fb_prepare_fb(plane, state);
if (ret)
goto fail;
return 0;
fail:
while (i--) {
struct sg_table *sgt = &rstate->sg_tables[i];
struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
......@@ -257,24 +217,52 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
return ret;
}
static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
unsigned int i;
int ret;
/*
* There's no need to prepare (and unprepare) the framebuffer when the
* plane is not visible, as it will not be displayed.
*/
if (!state->visible)
return;
return 0;
ret = rcar_du_vsp_map_fb(vsp, state->fb, rstate->sg_tables);
if (ret < 0)
return ret;
return drm_gem_fb_prepare_fb(plane, state);
}
void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
struct sg_table sg_tables[3])
{
unsigned int i;
for (i = 0; i < rstate->format->planes; ++i) {
struct sg_table *sgt = &rstate->sg_tables[i];
for (i = 0; i < fb->format->num_planes; ++i) {
struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
}
}
static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
if (!state->visible)
return;
rcar_du_vsp_unmap_fb(vsp, state->fb, rstate->sg_tables);
}
static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
......@@ -395,8 +383,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_vsp_plane_funcs,
formats_kms,
ARRAY_SIZE(formats_kms),
rcar_du_vsp_formats,
ARRAY_SIZE(rcar_du_vsp_formats),
NULL, type, NULL);
if (ret < 0)
return ret;
......
......@@ -12,8 +12,10 @@
#include <drm/drm_plane.h>
struct drm_framebuffer;
struct rcar_du_format_info;
struct rcar_du_vsp;
struct sg_table;
struct rcar_du_vsp_plane {
struct drm_plane plane;
......@@ -60,6 +62,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
struct sg_table sg_tables[3]);
void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
struct sg_table sg_tables[3]);
#else
static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp,
struct device_node *np,
......@@ -71,6 +77,17 @@ static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { };
static inline int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp,
struct drm_framebuffer *fb,
struct sg_table sg_tables[3])
{
return -ENXIO;
}
static inline void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp,
struct drm_framebuffer *fb,
struct sg_table sg_tables[3])
{
}
#endif
#endif /* __RCAR_DU_VSP_H__ */
// SPDX-License-Identifier: GPL-2.0
/*
* rcar_du_writeback.c -- R-Car Display Unit Writeback Support
*
* Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
/**
* struct rcar_du_wb_conn_state - Driver-specific writeback connector state
* @state: base DRM connector state
* @format: format of the writeback framebuffer
*/
struct rcar_du_wb_conn_state {
struct drm_connector_state state;
const struct rcar_du_format_info *format;
};
#define to_rcar_wb_conn_state(s) \
container_of(s, struct rcar_du_wb_conn_state, state)
/**
* struct rcar_du_wb_job - Driver-private data for writeback jobs
* @sg_tables: scatter-gather tables for the framebuffer memory
*/
struct rcar_du_wb_job {
struct sg_table sg_tables[3];
};
static int rcar_du_wb_conn_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
dev->mode_config.max_height);
}
static int rcar_du_wb_prepare_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
struct rcar_du_wb_job *rjob;
int ret;
if (!job->fb)
return 0;
rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
if (!rjob)
return -ENOMEM;
/* Map the framebuffer to the VSP. */
ret = rcar_du_vsp_map_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
if (ret < 0) {
kfree(rjob);
return ret;
}
job->priv = rjob;
return 0;
}
static void rcar_du_wb_cleanup_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
struct rcar_du_wb_job *rjob = job->priv;
if (!job->fb)
return;
rcar_du_vsp_unmap_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
kfree(rjob);
}
static const struct drm_connector_helper_funcs rcar_du_wb_conn_helper_funcs = {
.get_modes = rcar_du_wb_conn_get_modes,
.prepare_writeback_job = rcar_du_wb_prepare_job,
.cleanup_writeback_job = rcar_du_wb_cleanup_job,
};
static struct drm_connector_state *
rcar_du_wb_conn_duplicate_state(struct drm_connector *connector)
{
struct rcar_du_wb_conn_state *copy;
if (WARN_ON(!connector->state))
return NULL;
copy = kzalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
__drm_atomic_helper_connector_duplicate_state(connector, &copy->state);
return &copy->state;
}
static void rcar_du_wb_conn_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
__drm_atomic_helper_connector_destroy_state(state);
kfree(to_rcar_wb_conn_state(state));
}
static void rcar_du_wb_conn_reset(struct drm_connector *connector)
{
struct rcar_du_wb_conn_state *state;
if (connector->state) {
rcar_du_wb_conn_destroy_state(connector, connector->state);
connector->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return;
__drm_atomic_helper_connector_reset(connector, &state->state);
}
static const struct drm_connector_funcs rcar_du_wb_conn_funcs = {
.reset = rcar_du_wb_conn_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = rcar_du_wb_conn_duplicate_state,
.atomic_destroy_state = rcar_du_wb_conn_destroy_state,
};
static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rcar_du_wb_conn_state *wb_state =
to_rcar_wb_conn_state(conn_state);
const struct drm_display_mode *mode = &crtc_state->mode;
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
fb = conn_state->writeback_job->fb;
/*
* Verify that the framebuffer format is supported and that its size
* matches the current mode.
*/
if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
dev_dbg(dev->dev, "%s: invalid framebuffer size %ux%u\n",
__func__, fb->width, fb->height);
return -EINVAL;
}
wb_state->format = rcar_du_format_info(fb->format->format);
if (wb_state->format == NULL) {
dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
fb->format->format);
return -EINVAL;
}
return 0;
}
static const struct drm_encoder_helper_funcs rcar_du_wb_enc_helper_funcs = {
.atomic_check = rcar_du_wb_enc_atomic_check,
};
/*
* Only RGB formats are currently supported as the VSP outputs RGB to the DU
* and can't convert to YUV separately for writeback.
*/
static const u32 writeback_formats[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
};
int rcar_du_writeback_init(struct rcar_du_device *rcdu,
struct rcar_du_crtc *rcrtc)
{
struct drm_writeback_connector *wb_conn = &rcrtc->writeback;
wb_conn->encoder.possible_crtcs = 1 << drm_crtc_index(&rcrtc->crtc);
drm_connector_helper_add(&wb_conn->base,
&rcar_du_wb_conn_helper_funcs);
return drm_writeback_connector_init(rcdu->ddev, wb_conn,
&rcar_du_wb_conn_funcs,
&rcar_du_wb_enc_helper_funcs,
writeback_formats,
ARRAY_SIZE(writeback_formats));
}
void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
struct vsp1_du_writeback_config *cfg)
{
struct rcar_du_wb_conn_state *wb_state;
struct drm_connector_state *state;
struct rcar_du_wb_job *rjob;
struct drm_framebuffer *fb;
unsigned int i;
state = rcrtc->writeback.base.state;
if (!state || !state->writeback_job || !state->writeback_job->fb)
return;
fb = state->writeback_job->fb;
rjob = state->writeback_job->priv;
wb_state = to_rcar_wb_conn_state(state);
cfg->pixelformat = wb_state->format->v4l2;
cfg->pitch = fb->pitches[0];
for (i = 0; i < wb_state->format->planes; ++i)
cfg->mem[i] = sg_dma_address(rjob->sg_tables[i].sgl)
+ fb->offsets[i];
drm_writeback_queue_job(&rcrtc->writeback, state);
}
void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc)
{
drm_writeback_signal_completion(&rcrtc->writeback, 0);
}
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* rcar_du_writeback.h -- R-Car Display Unit Writeback Support
*
* Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
#ifndef __RCAR_DU_WRITEBACK_H__
#define __RCAR_DU_WRITEBACK_H__
#include <drm/drm_plane.h>
struct rcar_du_crtc;
struct rcar_du_device;
struct vsp1_du_atomic_pipe_config;
#ifdef CONFIG_DRM_RCAR_WRITEBACK
int rcar_du_writeback_init(struct rcar_du_device *rcdu,
struct rcar_du_crtc *rcrtc);
void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
struct vsp1_du_writeback_config *cfg);
void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc);
#else
static inline int rcar_du_writeback_init(struct rcar_du_device *rcdu,
struct rcar_du_crtc *rcrtc)
{
return -ENXIO;
}
static inline void
rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
struct vsp1_du_writeback_config *cfg)
{
}
static inline void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc)
{
}
#endif
#endif /* __RCAR_DU_WRITEBACK_H__ */
......@@ -327,7 +327,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
TXP_WRITE(TXP_DST_CTRL, ctrl);
drm_writeback_queue_job(&txp->connector, conn_state->writeback_job);
drm_writeback_queue_job(&txp->connector, conn_state);
}
static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
......
......@@ -283,6 +283,7 @@ static const struct v4l2_subdev_ops brx_ops = {
static void brx_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_brx *brx = to_brx(&entity->subdev);
......
......@@ -171,6 +171,7 @@ static const struct v4l2_subdev_ops clu_ops = {
static void clu_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_clu *clu = to_clu(&entity->subdev);
......
......@@ -178,7 +178,7 @@ struct vsp1_dl_cmd_pool {
* @post_cmd: post command to be issued through extended dl header
* @has_chain: if true, indicates that there's a partition chain
* @chain: entry in the display list partition chain
* @internal: whether the display list is used for internal purpose
* @flags: display list flags, a combination of VSP1_DL_FRAME_END_*
*/
struct vsp1_dl_list {
struct list_head list;
......@@ -197,7 +197,7 @@ struct vsp1_dl_list {
bool has_chain;
struct list_head chain;
bool internal;
unsigned int flags;
};
/**
......@@ -699,8 +699,8 @@ struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
* which bodies are added.
*
* Adding a body to a display list passes ownership of the body to the list. The
* caller retains its reference to the fragment when adding it to the display
* list, but is not allowed to add new entries to the body.
* caller retains its reference to the body when adding it to the display list,
* but is not allowed to add new entries to the body.
*
* The reference must be explicitly released by a call to vsp1_dl_body_put()
* when the body isn't needed anymore.
......@@ -770,17 +770,35 @@ static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
}
dl->header->num_lists = num_lists;
dl->header->flags = 0;
if (!list_empty(&dl->chain) && !is_last) {
/*
* Enable the interrupt for the end of each frame. In continuous mode
* chained lists are used with one list per frame, so enable the
* interrupt for each list. In singleshot mode chained lists are used
* to partition a single frame, so enable the interrupt for the last
* list only.
*/
if (!dlm->singleshot || is_last)
dl->header->flags |= VSP1_DLH_INT_ENABLE;
/*
* In continuous mode enable auto-start for all lists, as the VSP must
* loop on the same list until a new one is queued. In singleshot mode
* enable auto-start for all lists but the last to chain processing of
* partitions without software intervention.
*/
if (!dlm->singleshot || !is_last)
dl->header->flags |= VSP1_DLH_AUTO_START;
if (!is_last) {
/*
* If this display list's chain is not empty, we are on a list,
* and the next item is the display list that we must queue for
* automatic processing by the hardware.
* If this is not the last display list in the chain, queue the
* next item for automatic processing by the hardware.
*/
struct vsp1_dl_list *next = list_next_entry(dl, chain);
dl->header->next_header = next->dma;
dl->header->flags = VSP1_DLH_AUTO_START;
} else if (!dlm->singleshot) {
/*
* if the display list manager works in continuous mode, the VSP
......@@ -788,13 +806,6 @@ static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
* instructed to do otherwise.
*/
dl->header->next_header = dl->dma;
dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
} else {
/*
* Otherwise, in mem-to-mem mode, we work in single-shot mode
* and the next display list must not be started automatically.
*/
dl->header->flags = VSP1_DLH_INT_ENABLE;
}
if (!dl->extension)
......@@ -861,13 +872,15 @@ static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
*
* If a display list is already pending we simply drop it as the new
* display list is assumed to contain a more recent configuration. It is
* an error if the already pending list has the internal flag set, as
* there is then a process waiting for that list to complete. This
* shouldn't happen as the waiting process should perform proper
* locking, but warn just in case.
* an error if the already pending list has the
* VSP1_DL_FRAME_END_INTERNAL flag set, as there is then a process
* waiting for that list to complete. This shouldn't happen as the
* waiting process should perform proper locking, but warn just in
* case.
*/
if (vsp1_dl_list_hw_update_pending(dlm)) {
WARN_ON(dlm->pending && dlm->pending->internal);
WARN_ON(dlm->pending &&
(dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL));
__vsp1_dl_list_put(dlm->pending);
dlm->pending = dl;
return;
......@@ -897,7 +910,7 @@ static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
dlm->active = dl;
}
void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags)
{
struct vsp1_dl_manager *dlm = dl->dlm;
struct vsp1_dl_list *dl_next;
......@@ -912,7 +925,7 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
vsp1_dl_list_fill_header(dl_next, last);
}
dl->internal = internal;
dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED;
spin_lock_irqsave(&dlm->lock, flags);
......@@ -941,9 +954,13 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
* set in single-shot mode as display list processing is then not continuous and
* races never occur.
*
* The VSP1_DL_FRAME_END_INTERNAL flag indicates that the previous display list
* has completed and had been queued with the internal notification flag.
* Internal notification is only supported for continuous mode.
* The following flags are only supported for continuous mode.
*
* The VSP1_DL_FRAME_END_INTERNAL flag indicates that the display list that just
* became active had been queued with the internal notification flag.
*
* The VSP1_DL_FRAME_END_WRITEBACK flag indicates that the previously active
* display list had been queued with the writeback flag.
*/
unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
{
......@@ -981,14 +998,25 @@ unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
if (status & VI6_STATUS_FLD_STD(dlm->index))
goto done;
/*
* If the active display list has the writeback flag set, the frame
* completion marks the end of the writeback capture. Return the
* VSP1_DL_FRAME_END_WRITEBACK flag and reset the display list's
* writeback flag.
*/
if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) {
flags |= VSP1_DL_FRAME_END_WRITEBACK;
dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK;
}
/*
* The device starts processing the queued display list right after the
* frame end interrupt. The display list thus becomes active.
*/
if (dlm->queued) {
if (dlm->queued->internal)
if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL)
flags |= VSP1_DL_FRAME_END_INTERNAL;
dlm->queued->internal = false;
dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL;
__vsp1_dl_list_put(dlm->active);
dlm->active = dlm->queued;
......
......@@ -17,8 +17,10 @@ struct vsp1_dl_body_pool;
struct vsp1_dl_list;
struct vsp1_dl_manager;
/* Keep these flags in sync with VSP1_DU_STATUS_* in include/media/vsp1.h. */
#define VSP1_DL_FRAME_END_COMPLETED BIT(0)
#define VSP1_DL_FRAME_END_INTERNAL BIT(1)
#define VSP1_DL_FRAME_END_WRITEBACK BIT(1)
#define VSP1_DL_FRAME_END_INTERNAL BIT(2)
/**
* struct vsp1_dl_ext_cmd - Extended Display command
......@@ -61,7 +63,7 @@ struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
void vsp1_dl_list_put(struct vsp1_dl_list *dl);
struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl);
struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl);
void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal);
void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags);
struct vsp1_dl_body_pool *
vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
......
......@@ -34,14 +34,16 @@ static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe,
unsigned int completion)
{
struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
bool complete = completion == VSP1_DL_FRAME_END_COMPLETED;
if (drm_pipe->du_complete) {
struct vsp1_entity *uif = drm_pipe->uif;
unsigned int status = completion
& (VSP1_DU_STATUS_COMPLETE |
VSP1_DU_STATUS_WRITEBACK);
u32 crc;
crc = uif ? vsp1_uif_get_crc(to_uif(&uif->subdev)) : 0;
drm_pipe->du_complete(drm_pipe->du_private, complete, crc);
drm_pipe->du_complete(drm_pipe->du_private, status, crc);
}
if (completion & VSP1_DL_FRAME_END_INTERNAL) {
......@@ -537,6 +539,12 @@ static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe)
struct vsp1_entity *next;
struct vsp1_dl_list *dl;
struct vsp1_dl_body *dlb;
unsigned int dl_flags = 0;
if (drm_pipe->force_brx_release)
dl_flags |= VSP1_DL_FRAME_END_INTERNAL;
if (pipe->output->writeback)
dl_flags |= VSP1_DL_FRAME_END_WRITEBACK;
dl = vsp1_dl_list_get(pipe->output->dlm);
dlb = vsp1_dl_list_get_body0(dl);
......@@ -554,12 +562,42 @@ static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe)
}
vsp1_entity_route_setup(entity, pipe, dlb);
vsp1_entity_configure_stream(entity, pipe, dlb);
vsp1_entity_configure_stream(entity, pipe, dl, dlb);
vsp1_entity_configure_frame(entity, pipe, dl, dlb);
vsp1_entity_configure_partition(entity, pipe, dl, dlb);
}
vsp1_dl_list_commit(dl, drm_pipe->force_brx_release);
vsp1_dl_list_commit(dl, dl_flags);
}
static int vsp1_du_pipeline_set_rwpf_format(struct vsp1_device *vsp1,
struct vsp1_rwpf *rwpf,
u32 pixelformat, unsigned int pitch)
{
const struct vsp1_format_info *fmtinfo;
unsigned int chroma_hsub;
fmtinfo = vsp1_get_format_info(vsp1, pixelformat);
if (!fmtinfo) {
dev_dbg(vsp1->dev, "Unsupported pixel format %08x\n",
pixelformat);
return -EINVAL;
}
/*
* Only formats with three planes can affect the chroma planes pitch.
* All formats with two planes have a horizontal subsampling value of 2,
* but combine U and V in a single chroma plane, which thus results in
* the luma plane and chroma plane having the same pitch.
*/
chroma_hsub = (fmtinfo->planes == 3) ? fmtinfo->hsub : 1;
rwpf->fmtinfo = fmtinfo;
rwpf->format.num_planes = fmtinfo->planes;
rwpf->format.plane_fmt[0].bytesperline = pitch;
rwpf->format.plane_fmt[1].bytesperline = pitch / chroma_hsub;
return 0;
}
/* -----------------------------------------------------------------------------
......@@ -700,8 +738,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
drm_pipe->du_private = cfg->callback_data;
/* Disable the display interrupts. */
vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0);
vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0);
vsp1_write(vsp1, VI6_DISP_IRQ_STA(pipe_index), 0);
vsp1_write(vsp1, VI6_DISP_IRQ_ENB(pipe_index), 0);
/* Configure all entities in the pipeline. */
vsp1_du_pipeline_configure(pipe);
......@@ -769,9 +807,8 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
const struct vsp1_format_info *fmtinfo;
unsigned int chroma_hsub;
struct vsp1_rwpf *rpf;
int ret;
if (rpf_index >= vsp1->info->rpf_count)
return -EINVAL;
......@@ -804,25 +841,11 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
* Store the format, stride, memory buffer address, crop and compose
* rectangles and Z-order position and for the input.
*/
fmtinfo = vsp1_get_format_info(vsp1, cfg->pixelformat);
if (!fmtinfo) {
dev_dbg(vsp1->dev, "Unsupported pixel format %08x for RPF\n",
cfg->pixelformat);
return -EINVAL;
}
/*
* Only formats with three planes can affect the chroma planes pitch.
* All formats with two planes have a horizontal subsampling value of 2,
* but combine U and V in a single chroma plane, which thus results in
* the luma plane and chroma plane having the same pitch.
*/
chroma_hsub = (fmtinfo->planes == 3) ? fmtinfo->hsub : 1;
ret = vsp1_du_pipeline_set_rwpf_format(vsp1, rpf, cfg->pixelformat,
cfg->pitch);
if (ret < 0)
return ret;
rpf->fmtinfo = fmtinfo;
rpf->format.num_planes = fmtinfo->planes;
rpf->format.plane_fmt[0].bytesperline = cfg->pitch;
rpf->format.plane_fmt[1].bytesperline = cfg->pitch / chroma_hsub;
rpf->alpha = cfg->alpha;
rpf->mem.addr[0] = cfg->mem[0];
......@@ -851,12 +874,31 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
struct vsp1_pipeline *pipe = &drm_pipe->pipe;
int ret;
drm_pipe->crc = cfg->crc;
mutex_lock(&vsp1->drm->lock);
if (cfg->writeback.pixelformat) {
const struct vsp1_du_writeback_config *wb_cfg = &cfg->writeback;
ret = vsp1_du_pipeline_set_rwpf_format(vsp1, pipe->output,
wb_cfg->pixelformat,
wb_cfg->pitch);
if (WARN_ON(ret < 0))
goto done;
pipe->output->mem.addr[0] = wb_cfg->mem[0];
pipe->output->mem.addr[1] = wb_cfg->mem[1];
pipe->output->mem.addr[2] = wb_cfg->mem[2];
pipe->output->writeback = true;
}
vsp1_du_pipeline_setup_inputs(vsp1, pipe);
vsp1_du_pipeline_configure(pipe);
done:
mutex_unlock(&vsp1->drm->lock);
}
EXPORT_SYMBOL_GPL(vsp1_du_atomic_flush);
......
......@@ -42,7 +42,7 @@ struct vsp1_drm_pipeline {
struct vsp1_du_crc_config crc;
/* Frame synchronisation */
void (*du_complete)(void *data, bool completed, u32 crc);
void (*du_complete)(void *data, unsigned int status, u32 crc);
void *du_private;
};
......
......@@ -71,10 +71,11 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity,
void vsp1_entity_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
if (entity->ops->configure_stream)
entity->ops->configure_stream(entity, pipe, dlb);
entity->ops->configure_stream(entity, pipe, dl, dlb);
}
void vsp1_entity_configure_frame(struct vsp1_entity *entity,
......
......@@ -67,7 +67,9 @@ struct vsp1_route {
* struct vsp1_entity_operations - Entity operations
* @destroy: Destroy the entity.
* @configure_stream: Setup the hardware parameters for the stream which do
* not vary between frames (pipeline, formats).
* not vary between frames (pipeline, formats). Note that
* the vsp1_dl_list argument is only valid for display
* pipeline and will be NULL for mem-to-mem pipelines.
* @configure_frame: Configure the runtime parameters for each frame.
* @configure_partition: Configure partition specific parameters.
* @max_width: Return the max supported width of data that the entity can
......@@ -78,7 +80,7 @@ struct vsp1_route {
struct vsp1_entity_operations {
void (*destroy)(struct vsp1_entity *);
void (*configure_stream)(struct vsp1_entity *, struct vsp1_pipeline *,
struct vsp1_dl_body *);
struct vsp1_dl_list *, struct vsp1_dl_body *);
void (*configure_frame)(struct vsp1_entity *, struct vsp1_pipeline *,
struct vsp1_dl_list *, struct vsp1_dl_body *);
void (*configure_partition)(struct vsp1_entity *,
......@@ -155,6 +157,7 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity,
void vsp1_entity_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb);
void vsp1_entity_configure_frame(struct vsp1_entity *entity,
......
......@@ -131,6 +131,7 @@ static const struct v4l2_ctrl_config hgo_num_bins_control = {
static void hgo_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_hgo *hgo = to_hgo(&entity->subdev);
......
......@@ -127,6 +127,7 @@ static const struct v4l2_ctrl_config hgt_hue_areas = {
static void hgt_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_hgt *hgt = to_hgt(&entity->subdev);
......
......@@ -129,6 +129,7 @@ static const struct v4l2_subdev_ops hsit_ops = {
static void hsit_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_hsit *hsit = to_hsit(&entity->subdev);
......
......@@ -84,6 +84,7 @@ static const struct v4l2_subdev_ops lif_ops = {
static void lif_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
const struct v4l2_mbus_framefmt *format;
......
......@@ -147,6 +147,7 @@ static const struct v4l2_subdev_ops lut_ops = {
static void lut_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_lut *lut = to_lut(&entity->subdev);
......
......@@ -39,12 +39,12 @@
#define VI6_WFP_IRQ_STA_DFE (1 << 1)
#define VI6_WFP_IRQ_STA_FRE (1 << 0)
#define VI6_DISP_IRQ_ENB 0x0078
#define VI6_DISP_IRQ_ENB(n) (0x0078 + (n) * 60)
#define VI6_DISP_IRQ_ENB_DSTE (1 << 8)
#define VI6_DISP_IRQ_ENB_MAEE (1 << 5)
#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << (n))
#define VI6_DISP_IRQ_STA 0x007c
#define VI6_DISP_IRQ_STA(n) (0x007c + (n) * 60)
#define VI6_DISP_IRQ_STA_DST (1 << 8)
#define VI6_DISP_IRQ_STA_MAE (1 << 5)
#define VI6_DISP_IRQ_STA_LNE(n) (1 << (n))
......@@ -307,7 +307,7 @@
#define VI6_WPF_DSTM_ADDR_C0 0x1028
#define VI6_WPF_DSTM_ADDR_C1 0x102c
#define VI6_WPF_WRBCK_CTRL 0x1034
#define VI6_WPF_WRBCK_CTRL(n) (0x1034 + (n) * 0x100)
#define VI6_WPF_WRBCK_CTRL_WBMD (1 << 0)
/* -----------------------------------------------------------------------------
......
......@@ -57,6 +57,7 @@ static const struct v4l2_subdev_ops rpf_ops = {
static void rpf_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
......
......@@ -61,6 +61,7 @@ struct vsp1_rwpf {
} flip;
struct vsp1_rwpf_memory mem;
bool writeback;
struct vsp1_dl_manager *dlm;
};
......
......@@ -269,6 +269,7 @@ static const struct v4l2_subdev_ops sru_ops = {
static void sru_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
const struct vsp1_sru_param *param;
......
......@@ -257,6 +257,7 @@ static const struct v4l2_subdev_ops uds_ops = {
static void uds_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_uds *uds = to_uds(&entity->subdev);
......
......@@ -192,6 +192,7 @@ static const struct v4l2_subdev_ops uif_ops = {
static void uif_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_uif *uif = to_uif(&entity->subdev);
......
......@@ -307,11 +307,6 @@ static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
* This function completes the current buffer by filling its sequence number,
* time stamp and payload size, and hands it back to the videobuf core.
*
* When operating in DU output mode (deep pipeline to the DU through the LIF),
* the VSP1 needs to constantly supply frames to the display. In that case, if
* no other buffer is queued, reuse the one that has just been processed instead
* of handing it back to the videobuf core.
*
* Return the next queued buffer or NULL if the queue is empty.
*/
static struct vsp1_vb2_buffer *
......@@ -333,12 +328,6 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
done = list_first_entry(&video->irqqueue,
struct vsp1_vb2_buffer, queue);
/* In DU output mode reuse the buffer if the list is singular. */
if (pipe->lif && list_is_singular(&video->irqqueue)) {
spin_unlock_irqrestore(&video->irqlock, flags);
return done;
}
list_del(&done->queue);
if (!list_empty(&video->irqqueue))
......@@ -432,7 +421,7 @@ static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
}
/* Complete, and commit the head display list. */
vsp1_dl_list_commit(dl, false);
vsp1_dl_list_commit(dl, 0);
pipe->configured = true;
vsp1_pipeline_run(pipe);
......@@ -836,7 +825,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
list_for_each_entry(entity, &pipe->entities, list_pipe) {
vsp1_entity_route_setup(entity, pipe, pipe->stream_config);
vsp1_entity_configure_stream(entity, pipe, pipe->stream_config);
vsp1_entity_configure_stream(entity, pipe, NULL,
pipe->stream_config);
}
return 0;
......
......@@ -232,17 +232,41 @@ static void vsp1_wpf_destroy(struct vsp1_entity *entity)
vsp1_dlm_destroy(wpf->dlm);
}
static int wpf_configure_writeback_chain(struct vsp1_rwpf *wpf,
struct vsp1_dl_list *dl)
{
unsigned int index = wpf->entity.index;
struct vsp1_dl_list *dl_next;
struct vsp1_dl_body *dlb;
dl_next = vsp1_dl_list_get(wpf->dlm);
if (!dl_next) {
dev_err(wpf->entity.vsp1->dev,
"Failed to obtain a dl list, disabling writeback\n");
return -ENOMEM;
}
dlb = vsp1_dl_list_get_body0(dl_next);
vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL(index), 0);
vsp1_dl_list_add_chain(dl, dl_next);
return 0;
}
static void wpf_configure_stream(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
struct vsp1_device *vsp1 = wpf->entity.vsp1;
const struct v4l2_mbus_framefmt *source_format;
const struct v4l2_mbus_framefmt *sink_format;
unsigned int index = wpf->entity.index;
unsigned int i;
u32 outfmt = 0;
u32 srcrpf = 0;
int ret;
sink_format = vsp1_entity_get_pad_format(&wpf->entity,
wpf->entity.config,
......@@ -250,8 +274,9 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
source_format = vsp1_entity_get_pad_format(&wpf->entity,
wpf->entity.config,
RWPF_PAD_SOURCE);
/* Format */
if (!pipe->lif) {
if (!pipe->lif || wpf->writeback) {
const struct v4l2_pix_format_mplane *format = &wpf->format;
const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
......@@ -276,8 +301,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSWAP, fmtinfo->swap);
if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP) &&
wpf->entity.index == 0)
if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP) && index == 0)
vsp1_wpf_write(wpf, dlb, VI6_WPF_ROT_CTRL,
VI6_WPF_ROT_CTRL_LN16 |
(256 << VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT));
......@@ -288,11 +312,9 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
wpf->outfmt = outfmt;
vsp1_dl_body_write(dlb, VI6_DPR_WPF_FPORCH(wpf->entity.index),
vsp1_dl_body_write(dlb, VI6_DPR_WPF_FPORCH(index),
VI6_DPR_WPF_FPORCH_FP_WPFN);
vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL, 0);
/*
* Sources. If the pipeline has a single input and BRx is not used,
* configure it as the master layer. Otherwise configure all
......@@ -318,9 +340,26 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
vsp1_wpf_write(wpf, dlb, VI6_WPF_SRCRPF, srcrpf);
/* Enable interrupts. */
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(wpf->entity.index),
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(index), 0);
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(index),
VI6_WFP_IRQ_ENB_DFEE);
/*
* Configure writeback for display pipelines (the wpf writeback flag is
* never set for memory-to-memory pipelines). Start by adding a chained
* display list to disable writeback after a single frame, and process
* to enable writeback. If the display list allocation fails don't
* enable writeback as we wouldn't be able to safely disable it,
* resulting in possible memory corruption.
*/
if (wpf->writeback) {
ret = wpf_configure_writeback_chain(wpf, dl);
if (ret < 0)
wpf->writeback = false;
}
vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL(index),
wpf->writeback ? VI6_WPF_WRBCK_CTRL_WBMD : 0);
}
static void wpf_configure_frame(struct vsp1_entity *entity,
......@@ -362,6 +401,7 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
unsigned int width;
unsigned int height;
unsigned int left;
unsigned int offset;
unsigned int flip;
unsigned int i;
......@@ -371,13 +411,16 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
RWPF_PAD_SINK);
width = sink_format->width;
height = sink_format->height;
left = 0;
/*
* Cropping. The partition algorithm can split the image into
* multiple slices.
*/
if (pipe->partitions > 1)
if (pipe->partitions > 1) {
width = pipe->partition->wpf.width;
left = pipe->partition->wpf.left;
}
vsp1_wpf_write(wpf, dlb, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
(0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
......@@ -386,7 +429,11 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
(0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
(height << VI6_WPF_SZCLIP_SIZE_SHIFT));
if (pipe->lif)
/*
* For display pipelines without writeback enabled there's no memory
* address to configure, return now.
*/
if (pipe->lif && !wpf->writeback)
return;
/*
......@@ -408,13 +455,11 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
flip = wpf->flip.active;
if (flip & BIT(WPF_CTRL_HFLIP) && !wpf->flip.rotate)
offset = format->width - pipe->partition->wpf.left
- pipe->partition->wpf.width;
offset = format->width - left - width;
else if (flip & BIT(WPF_CTRL_VFLIP) && wpf->flip.rotate)
offset = format->height - pipe->partition->wpf.left
- pipe->partition->wpf.width;
offset = format->height - left - width;
else
offset = pipe->partition->wpf.left;
offset = left;
for (i = 0; i < format->num_planes; ++i) {
unsigned int hsub = i > 0 ? fmtinfo->hsub : 1;
......@@ -436,7 +481,7 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
* image height.
*/
if (wpf->flip.rotate)
height = pipe->partition->wpf.width;
height = width;
else
height = format->height;
......@@ -477,6 +522,12 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
/*
* Writeback operates in single-shot mode and lasts for a single frame,
* reset the writeback flag to false for the next frame.
*/
wpf->writeback = false;
}
static unsigned int wpf_max_width(struct vsp1_entity *entity,
......
......@@ -49,6 +49,8 @@
*/
enum mode_set_atomic;
struct drm_writeback_connector;
struct drm_writeback_job;
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
......@@ -989,6 +991,11 @@ struct drm_connector_helper_funcs {
*/
void (*atomic_commit)(struct drm_connector *connector,
struct drm_connector_state *state);
int (*prepare_writeback_job)(struct drm_writeback_connector *connector,
struct drm_writeback_job *job);
void (*cleanup_writeback_job)(struct drm_writeback_connector *connector,
struct drm_writeback_job *job);
};
/**
......
......@@ -79,6 +79,20 @@ struct drm_writeback_connector {
};
struct drm_writeback_job {
/**
* @connector:
*
* Back-pointer to the writeback connector associated with the job
*/
struct drm_writeback_connector *connector;
/**
* @prepared:
*
* Set when the job has been prepared with drm_writeback_prepare_job()
*/
bool prepared;
/**
* @cleanup_work:
*
......@@ -98,7 +112,7 @@ struct drm_writeback_job {
* @fb:
*
* Framebuffer to be written to by the writeback connector. Do not set
* directly, use drm_atomic_set_writeback_fb_for_connector()
* directly, use drm_writeback_set_fb()
*/
struct drm_framebuffer *fb;
......@@ -108,6 +122,13 @@ struct drm_writeback_job {
* Fence which will signal once the writeback has completed
*/
struct dma_fence *out_fence;
/**
* @priv:
*
* Driver-private data
*/
void *priv;
};
static inline struct drm_writeback_connector *
......@@ -122,8 +143,13 @@ int drm_writeback_connector_init(struct drm_device *dev,
const struct drm_encoder_helper_funcs *enc_helper_funcs,
const u32 *formats, int n_formats);
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb);
int drm_writeback_prepare_job(struct drm_writeback_job *job);
void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
struct drm_writeback_job *job);
struct drm_connector_state *conn_state);
void drm_writeback_cleanup_job(struct drm_writeback_job *job);
......
......@@ -17,6 +17,9 @@ struct device;
int vsp1_du_init(struct device *dev);
#define VSP1_DU_STATUS_COMPLETE BIT(0)
#define VSP1_DU_STATUS_WRITEBACK BIT(1)
/**
* struct vsp1_du_lif_config - VSP LIF configuration
* @width: output frame width
......@@ -32,7 +35,7 @@ struct vsp1_du_lif_config {
unsigned int height;
bool interlaced;
void (*callback)(void *data, bool completed, u32 crc);
void (*callback)(void *data, unsigned int status, u32 crc);
void *callback_data;
};
......@@ -81,12 +84,26 @@ struct vsp1_du_crc_config {
unsigned int index;
};
/**
* struct vsp1_du_writeback_config - VSP writeback configuration parameters
* @pixelformat: plane pixel format (V4L2 4CC)
* @pitch: line pitch in bytes for the first plane
* @mem: DMA memory address for each plane of the frame buffer
*/
struct vsp1_du_writeback_config {
u32 pixelformat;
unsigned int pitch;
dma_addr_t mem[3];
};
/**
* struct vsp1_du_atomic_pipe_config - VSP atomic pipe configuration parameters
* @crc: CRC computation configuration
* @writeback: writeback configuration
*/
struct vsp1_du_atomic_pipe_config {
struct vsp1_du_crc_config crc;
struct vsp1_du_writeback_config writeback;
};
void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment