Commit 351bbf99 authored by Laurent Pinchart's avatar Laurent Pinchart Committed by Mauro Carvalho Chehab

[media] v4l: vsp1: Use display lists with the userspace API

Don't restrict display list usage to the DRM pipeline, use them
unconditionally. This prepares the driver to support the request API.
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@osg.samsung.com>
parent 12161989
...@@ -311,14 +311,15 @@ void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm) ...@@ -311,14 +311,15 @@ void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
/* Hardware Setup */ /* Hardware Setup */
void vsp1_dlm_setup(struct vsp1_device *vsp1) void vsp1_dlm_setup(struct vsp1_device *vsp1)
{ {
u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT); u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
| VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
| VI6_DL_CTRL_DLE;
/* The DRM pipeline operates with header-less display lists in /* The DRM pipeline operates with display lists in Continuous Frame
* Continuous Frame Mode. * Mode, all other pipelines use manual start.
*/ */
if (vsp1->drm) if (vsp1->drm)
ctrl |= VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0 ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
| VI6_DL_CTRL_DLE | VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
vsp1_write(vsp1, VI6_DL_CTRL, ctrl); vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS); vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
......
...@@ -36,11 +36,6 @@ void vsp1_drm_display_start(struct vsp1_device *vsp1) ...@@ -36,11 +36,6 @@ void vsp1_drm_display_start(struct vsp1_device *vsp1)
vsp1_dlm_irq_display_start(vsp1->drm->pipe.output->dlm); vsp1_dlm_irq_display_start(vsp1->drm->pipe.output->dlm);
} }
static void vsp1_drm_frame_end(struct vsp1_pipeline *pipe)
{
vsp1_dlm_irq_frame_end(pipe->output->dlm);
}
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* DU Driver API * DU Driver API
*/ */
...@@ -280,7 +275,6 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index, ...@@ -280,7 +275,6 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
const struct vsp1_format_info *fmtinfo; const struct vsp1_format_info *fmtinfo;
struct v4l2_subdev_selection sel; struct v4l2_subdev_selection sel;
struct v4l2_subdev_format format; struct v4l2_subdev_format format;
struct vsp1_rwpf_memory memory;
struct vsp1_rwpf *rpf; struct vsp1_rwpf *rpf;
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -420,15 +414,12 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index, ...@@ -420,15 +414,12 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
rpf->location.left = dst->left; rpf->location.left = dst->left;
rpf->location.top = dst->top; rpf->location.top = dst->top;
/* Set the memory buffer address but don't apply the values to the /* Cache the memory buffer address but don't apply the values to the
* hardware as the crop offsets haven't been computed yet. * hardware as the crop offsets haven't been computed yet.
*/ */
memory.num_planes = fmtinfo->planes; rpf->mem.addr[0] = mem[0];
memory.addr[0] = mem[0]; rpf->mem.addr[1] = mem[1];
memory.addr[1] = mem[1]; rpf->mem.addr[2] = 0;
memory.addr[2] = 0;
vsp1_rwpf_set_memory(rpf, &memory, false);
spin_lock_irqsave(&pipe->irqlock, flags); spin_lock_irqsave(&pipe->irqlock, flags);
...@@ -482,14 +473,17 @@ void vsp1_du_atomic_flush(struct device *dev) ...@@ -482,14 +473,17 @@ void vsp1_du_atomic_flush(struct device *dev)
entity->subdev.name); entity->subdev.name);
return; return;
} }
if (entity->type == VSP1_ENTITY_RPF)
vsp1_rwpf_set_memory(to_rwpf(&entity->subdev));
} }
vsp1_dl_list_commit(pipe->dl); vsp1_dl_list_commit(pipe->dl);
pipe->dl = NULL; pipe->dl = NULL;
/* Start or stop the pipeline if needed. */
spin_lock_irqsave(&pipe->irqlock, flags); spin_lock_irqsave(&pipe->irqlock, flags);
/* Start or stop the pipeline if needed. */
if (!vsp1->drm->num_inputs && pipe->num_inputs) { if (!vsp1->drm->num_inputs && pipe->num_inputs) {
vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0); vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0);
vsp1_write(vsp1, VI6_DISP_IRQ_ENB, VI6_DISP_IRQ_ENB_DSTE); vsp1_write(vsp1, VI6_DISP_IRQ_ENB, VI6_DISP_IRQ_ENB_DSTE);
...@@ -569,7 +563,6 @@ int vsp1_drm_init(struct vsp1_device *vsp1) ...@@ -569,7 +563,6 @@ int vsp1_drm_init(struct vsp1_device *vsp1)
pipe = &vsp1->drm->pipe; pipe = &vsp1->drm->pipe;
vsp1_pipeline_init(pipe); vsp1_pipeline_init(pipe);
pipe->frame_end = vsp1_drm_frame_end;
/* The DRM pipeline is static, add entities manually. */ /* The DRM pipeline is static, add entities manually. */
for (i = 0; i < vsp1->info->rpf_count; ++i) { for (i = 0; i < vsp1->info->rpf_count; ++i) {
......
...@@ -27,10 +27,7 @@ void vsp1_mod_write(struct vsp1_entity *e, u32 reg, u32 data) ...@@ -27,10 +27,7 @@ void vsp1_mod_write(struct vsp1_entity *e, u32 reg, u32 data)
{ {
struct vsp1_pipeline *pipe = to_vsp1_pipeline(&e->subdev.entity); struct vsp1_pipeline *pipe = to_vsp1_pipeline(&e->subdev.entity);
if (pipe->dl)
vsp1_dl_list_write(pipe->dl, reg, data); vsp1_dl_list_write(pipe->dl, reg, data);
else
vsp1_write(e->vsp1, reg, data);
} }
void vsp1_entity_route_setup(struct vsp1_entity *source) void vsp1_entity_route_setup(struct vsp1_entity *source)
......
...@@ -273,42 +273,13 @@ bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe) ...@@ -273,42 +273,13 @@ bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe) void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
{ {
enum vsp1_pipeline_state state;
unsigned long flags;
if (pipe == NULL) if (pipe == NULL)
return; return;
/* Signal frame end to the pipeline handler. */ vsp1_dlm_irq_frame_end(pipe->output->dlm);
if (pipe->frame_end) if (pipe->frame_end)
pipe->frame_end(pipe); pipe->frame_end(pipe);
spin_lock_irqsave(&pipe->irqlock, flags);
state = pipe->state;
/* When using display lists in continuous frame mode the pipeline is
* automatically restarted by the hardware.
*/
if (pipe->lif)
goto done;
pipe->state = VSP1_PIPELINE_STOPPED;
/* If a stop has been requested, mark the pipeline as stopped and
* return.
*/
if (state == VSP1_PIPELINE_STOPPING) {
wake_up(&pipe->wq);
goto done;
}
/* Restart the pipeline if ready. */
if (vsp1_pipeline_ready(pipe))
vsp1_pipeline_run(pipe);
done:
spin_unlock_irqrestore(&pipe->irqlock, flags);
} }
/* /*
......
...@@ -78,9 +78,6 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable) ...@@ -78,9 +78,6 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride); vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride);
/* Now that the offsets have been computed program the DMA addresses. */
rpf->ops->set_memory(rpf);
/* Format */ /* Format */
infmt = VI6_RPF_INFMT_CIPM infmt = VI6_RPF_INFMT_CIPM
| (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT); | (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT);
...@@ -150,11 +147,11 @@ static struct v4l2_subdev_ops rpf_ops = { ...@@ -150,11 +147,11 @@ static struct v4l2_subdev_ops rpf_ops = {
static void rpf_set_memory(struct vsp1_rwpf *rpf) static void rpf_set_memory(struct vsp1_rwpf *rpf)
{ {
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y, vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
rpf->buf_addr[0] + rpf->offsets[0]); rpf->mem.addr[0] + rpf->offsets[0]);
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0, vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
rpf->buf_addr[1] + rpf->offsets[1]); rpf->mem.addr[1] + rpf->offsets[1]);
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1, vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
rpf->buf_addr[2] + rpf->offsets[1]); rpf->mem.addr[2] + rpf->offsets[1]);
} }
static const struct vsp1_rwpf_operations rpf_vdev_ops = { static const struct vsp1_rwpf_operations rpf_vdev_ops = {
......
...@@ -269,29 +269,3 @@ int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf) ...@@ -269,29 +269,3 @@ int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf)
return rwpf->ctrls.error; return rwpf->ctrls.error;
} }
/* -----------------------------------------------------------------------------
* Buffers
*/
/**
* vsp1_rwpf_set_memory - Configure DMA addresses for a [RW]PF
* @rwpf: the [RW]PF instance
* @mem: DMA memory addresses
* @apply: whether to apply the configuration to the hardware
*
* This function stores the DMA addresses for all planes in the rwpf instance
* and optionally applies the configuration to hardware registers if the apply
* argument is set to true.
*/
void vsp1_rwpf_set_memory(struct vsp1_rwpf *rwpf, struct vsp1_rwpf_memory *mem,
bool apply)
{
unsigned int i;
for (i = 0; i < 3; ++i)
rwpf->buf_addr[i] = mem->addr[i];
if (apply)
rwpf->ops->set_memory(rwpf);
}
...@@ -29,15 +29,13 @@ struct vsp1_rwpf; ...@@ -29,15 +29,13 @@ struct vsp1_rwpf;
struct vsp1_video; struct vsp1_video;
struct vsp1_rwpf_memory { struct vsp1_rwpf_memory {
unsigned int num_planes;
dma_addr_t addr[3]; dma_addr_t addr[3];
unsigned int length[3];
}; };
/** /**
* struct vsp1_rwpf_operations - RPF and WPF operations * struct vsp1_rwpf_operations - RPF and WPF operations
* @set_memory: Setup memory buffer access. This operation applies the settings * @set_memory: Setup memory buffer access. This operation applies the settings
* stored in the rwpf buf_addr field to the hardware. * stored in the rwpf mem field to the hardware.
*/ */
struct vsp1_rwpf_operations { struct vsp1_rwpf_operations {
void (*set_memory)(struct vsp1_rwpf *rwpf); void (*set_memory)(struct vsp1_rwpf *rwpf);
...@@ -65,7 +63,7 @@ struct vsp1_rwpf { ...@@ -65,7 +63,7 @@ struct vsp1_rwpf {
unsigned int alpha; unsigned int alpha;
unsigned int offsets[2]; unsigned int offsets[2];
dma_addr_t buf_addr[3]; struct vsp1_rwpf_memory mem;
struct vsp1_dl_manager *dlm; struct vsp1_dl_manager *dlm;
}; };
...@@ -99,7 +97,15 @@ int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev, ...@@ -99,7 +97,15 @@ int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel); struct v4l2_subdev_selection *sel);
void vsp1_rwpf_set_memory(struct vsp1_rwpf *rwpf, struct vsp1_rwpf_memory *mem, /**
bool apply); * vsp1_rwpf_set_memory - Configure DMA addresses for a [RW]PF
* @rwpf: the [RW]PF instance
*
* This function applies the cached memory buffer address to the hardware.
*/
static inline void vsp1_rwpf_set_memory(struct vsp1_rwpf *rwpf)
{
rwpf->ops->set_memory(rwpf);
}
#endif /* __VSP1_RWPF_H__ */ #endif /* __VSP1_RWPF_H__ */
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "vsp1.h" #include "vsp1.h"
#include "vsp1_bru.h" #include "vsp1_bru.h"
#include "vsp1_dl.h"
#include "vsp1_entity.h" #include "vsp1_entity.h"
#include "vsp1_pipe.h" #include "vsp1_pipe.h"
#include "vsp1_rwpf.h" #include "vsp1_rwpf.h"
...@@ -424,7 +425,7 @@ vsp1_video_complete_buffer(struct vsp1_video *video) ...@@ -424,7 +425,7 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
done->buf.vb2_buf.timestamp = ktime_get_ns(); done->buf.vb2_buf.timestamp = ktime_get_ns();
for (i = 0; i < done->buf.vb2_buf.num_planes; ++i) for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
vb2_set_plane_payload(&done->buf.vb2_buf, i, vb2_set_plane_payload(&done->buf.vb2_buf, i,
done->mem.length[i]); vb2_plane_size(&done->buf.vb2_buf, i));
vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE); vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
return next; return next;
...@@ -443,15 +444,41 @@ static void vsp1_video_frame_end(struct vsp1_pipeline *pipe, ...@@ -443,15 +444,41 @@ static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
spin_lock_irqsave(&pipe->irqlock, flags); spin_lock_irqsave(&pipe->irqlock, flags);
vsp1_rwpf_set_memory(video->rwpf, &buf->mem, true); video->rwpf->mem = buf->mem;
pipe->buffers_ready |= 1 << video->pipe_index; pipe->buffers_ready |= 1 << video->pipe_index;
spin_unlock_irqrestore(&pipe->irqlock, flags); spin_unlock_irqrestore(&pipe->irqlock, flags);
} }
static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
{
struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
unsigned int i;
if (!pipe->dl)
pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *rwpf = pipe->inputs[i];
if (rwpf)
vsp1_rwpf_set_memory(rwpf);
}
if (!pipe->lif)
vsp1_rwpf_set_memory(pipe->output);
vsp1_dl_list_commit(pipe->dl);
pipe->dl = NULL;
vsp1_pipeline_run(pipe);
}
static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe) static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
{ {
struct vsp1_device *vsp1 = pipe->output->entity.vsp1; struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
enum vsp1_pipeline_state state;
unsigned long flags;
unsigned int i; unsigned int i;
/* Complete buffers on all video nodes. */ /* Complete buffers on all video nodes. */
...@@ -462,8 +489,22 @@ static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe) ...@@ -462,8 +489,22 @@ static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
vsp1_video_frame_end(pipe, pipe->inputs[i]); vsp1_video_frame_end(pipe, pipe->inputs[i]);
} }
if (!pipe->lif)
vsp1_video_frame_end(pipe, pipe->output); vsp1_video_frame_end(pipe, pipe->output);
spin_lock_irqsave(&pipe->irqlock, flags);
state = pipe->state;
pipe->state = VSP1_PIPELINE_STOPPED;
/* If a stop has been requested, mark the pipeline as stopped and
* return. Otherwise restart the pipeline if ready.
*/
if (state == VSP1_PIPELINE_STOPPING)
wake_up(&pipe->wq);
else if (vsp1_pipeline_ready(pipe))
vsp1_video_pipeline_run(pipe);
spin_unlock_irqrestore(&pipe->irqlock, flags);
} }
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
...@@ -512,20 +553,15 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb) ...@@ -512,20 +553,15 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
if (vb->num_planes < format->num_planes) if (vb->num_planes < format->num_planes)
return -EINVAL; return -EINVAL;
buf->mem.num_planes = vb->num_planes;
for (i = 0; i < vb->num_planes; ++i) { for (i = 0; i < vb->num_planes; ++i) {
buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i); buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
buf->mem.length[i] = vb2_plane_size(vb, i);
if (buf->mem.length[i] < format->plane_fmt[i].sizeimage) if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
return -EINVAL; return -EINVAL;
} }
for ( ; i < 3; ++i) { for ( ; i < 3; ++i)
buf->mem.addr[i] = 0; buf->mem.addr[i] = 0;
buf->mem.length[i] = 0;
}
return 0; return 0;
} }
...@@ -549,34 +585,33 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb) ...@@ -549,34 +585,33 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&pipe->irqlock, flags); spin_lock_irqsave(&pipe->irqlock, flags);
vsp1_rwpf_set_memory(video->rwpf, &buf->mem, true); video->rwpf->mem = buf->mem;
pipe->buffers_ready |= 1 << video->pipe_index; pipe->buffers_ready |= 1 << video->pipe_index;
if (vb2_is_streaming(&video->queue) && if (vb2_is_streaming(&video->queue) &&
vsp1_pipeline_ready(pipe)) vsp1_pipeline_ready(pipe))
vsp1_pipeline_run(pipe); vsp1_video_pipeline_run(pipe);
spin_unlock_irqrestore(&pipe->irqlock, flags); spin_unlock_irqrestore(&pipe->irqlock, flags);
} }
static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
{ {
struct vsp1_video *video = vb2_get_drv_priv(vq);
struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
struct vsp1_entity *entity; struct vsp1_entity *entity;
unsigned long flags;
int ret; int ret;
mutex_lock(&pipe->lock); /* Prepare the display list. */
if (pipe->stream_count == pipe->num_inputs) { pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
if (!pipe->dl)
return -ENOMEM;
if (pipe->uds) { if (pipe->uds) {
struct vsp1_uds *uds = to_uds(&pipe->uds->subdev); struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
/* If a BRU is present in the pipeline before the UDS, /* If a BRU is present in the pipeline before the UDS, the alpha
* the alpha component doesn't need to be scaled as the * component doesn't need to be scaled as the BRU output alpha
* BRU output alpha value is fixed to 255. Otherwise we * value is fixed to 255. Otherwise we need to scale the alpha
* need to scale the alpha component only when available * component only when available at the input RPF.
* at the input RPF.
*/ */
if (pipe->uds_input->type == VSP1_ENTITY_BRU) { if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
uds->scale_alpha = false; uds->scale_alpha = false;
...@@ -591,21 +626,42 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) ...@@ -591,21 +626,42 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
list_for_each_entry(entity, &pipe->entities, list_pipe) { list_for_each_entry(entity, &pipe->entities, list_pipe) {
vsp1_entity_route_setup(entity); vsp1_entity_route_setup(entity);
ret = v4l2_subdev_call(&entity->subdev, video, ret = v4l2_subdev_call(&entity->subdev, video, s_stream, 1);
s_stream, 1); if (ret < 0)
goto error;
}
return 0;
error:
vsp1_dl_list_put(pipe->dl);
pipe->dl = NULL;
return ret;
}
static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vsp1_video *video = vb2_get_drv_priv(vq);
struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
unsigned long flags;
int ret;
mutex_lock(&pipe->lock);
if (pipe->stream_count == pipe->num_inputs) {
ret = vsp1_video_setup_pipeline(pipe);
if (ret < 0) { if (ret < 0) {
mutex_unlock(&pipe->lock); mutex_unlock(&pipe->lock);
return ret; return ret;
} }
} }
}
pipe->stream_count++; pipe->stream_count++;
mutex_unlock(&pipe->lock); mutex_unlock(&pipe->lock);
spin_lock_irqsave(&pipe->irqlock, flags); spin_lock_irqsave(&pipe->irqlock, flags);
if (vsp1_pipeline_ready(pipe)) if (vsp1_pipeline_ready(pipe))
vsp1_pipeline_run(pipe); vsp1_video_pipeline_run(pipe);
spin_unlock_irqrestore(&pipe->irqlock, flags); spin_unlock_irqrestore(&pipe->irqlock, flags);
return 0; return 0;
...@@ -625,6 +681,9 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq) ...@@ -625,6 +681,9 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
ret = vsp1_pipeline_stop(pipe); ret = vsp1_pipeline_stop(pipe);
if (ret == -ETIMEDOUT) if (ret == -ETIMEDOUT)
dev_err(video->vsp1->dev, "pipeline stop timeout\n"); dev_err(video->vsp1->dev, "pipeline stop timeout\n");
vsp1_dl_list_put(pipe->dl);
pipe->dl = NULL;
} }
mutex_unlock(&pipe->lock); mutex_unlock(&pipe->lock);
......
...@@ -157,9 +157,9 @@ static struct v4l2_subdev_ops wpf_ops = { ...@@ -157,9 +157,9 @@ static struct v4l2_subdev_ops wpf_ops = {
static void wpf_set_memory(struct vsp1_rwpf *wpf) static void wpf_set_memory(struct vsp1_rwpf *wpf)
{ {
vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, wpf->buf_addr[0]); vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, wpf->mem.addr[0]);
vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, wpf->buf_addr[1]); vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, wpf->mem.addr[1]);
vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, wpf->buf_addr[2]); vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, wpf->mem.addr[2]);
} }
static const struct vsp1_rwpf_operations wpf_vdev_ops = { static const struct vsp1_rwpf_operations wpf_vdev_ops = {
...@@ -200,14 +200,12 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index) ...@@ -200,14 +200,12 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
if (ret < 0) if (ret < 0)
return ERR_PTR(ret); return ERR_PTR(ret);
/* Initialize the display list manager if the WPF is used for display */ /* Initialize the display list manager. */
if ((vsp1->info->features & VSP1_HAS_LIF) && index == 0) {
wpf->dlm = vsp1_dlm_create(vsp1, index, 4); wpf->dlm = vsp1_dlm_create(vsp1, index, 4);
if (!wpf->dlm) { if (!wpf->dlm) {
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }
}
/* Initialize the V4L2 subdev. */ /* Initialize the V4L2 subdev. */
subdev = &wpf->entity.subdev; subdev = &wpf->entity.subdev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment