Commit b4e52199 authored by Moudy Ho's avatar Moudy Ho Committed by Hans Verkuil

media: platform: mtk-mdp3: reconfigure shared memory

For performance and multi-chip support, use dynamic layout instead of
statically configured pools.
Divide the shared memory into the 3 64-bit aligned layouts listed below:

    vpu->param_addr  -> +-----------------------------------------+
                        |                                         |
                        | To SCP : Input frame parameters         |
                        |          (struct img_ipi_frameparam)    |
                        |                                         |
                        +-----------------------------------------+

    vpu->work_addr   -> +-----------------------------------------+
                        |                                         |
                        | In SCP : Reserve for SCP calculation    |
                        |                                         |
                        +-----------------------------------------+

    vpu->config_addr -> +-----------------------------------------+
                        |                                         |
                        | From SCP : Output component config      |
                        |            (struct img_config)          |
                        |                                         |
                        +-----------------------------------------+
Signed-off-by: default avatarMoudy Ho <moudy.ho@mediatek.com>
Signed-off-by: default avatarHans Verkuil <hverkuil-cisco@xs4all.nl>
parent 09e694f1
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
/* /*
* ISP-MDP generic output information * ISP-MDP generic output information
* MD5 of the target SCP blob: * MD5 of the target SCP prebuild:
* 6da52bdcf4bf76a0983b313e1d4745d6 * 2d995ddb5c3b0cf26e96d6a823481886
*/ */
#define IMG_MAX_SUBFRAMES_8183 14 #define IMG_MAX_SUBFRAMES_8183 14
......
...@@ -87,14 +87,14 @@ static void mdp_m2m_device_run(void *priv) ...@@ -87,14 +87,14 @@ static void mdp_m2m_device_run(void *priv)
dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
mdp_set_dst_config(&param.outputs[0], frame, &dst_vb->vb2_buf); mdp_set_dst_config(&param.outputs[0], frame, &dst_vb->vb2_buf);
ret = mdp_vpu_process(&ctx->vpu, &param); ret = mdp_vpu_process(&ctx->mdp_dev->vpu, &param);
if (ret) { if (ret) {
dev_err(&ctx->mdp_dev->pdev->dev, dev_err(&ctx->mdp_dev->pdev->dev,
"VPU MDP process failed: %d\n", ret); "VPU MDP process failed: %d\n", ret);
goto worker_end; goto worker_end;
} }
task.config = ctx->vpu.config; task.config = ctx->mdp_dev->vpu.config;
task.param = &param; task.param = &param;
task.composes[0] = &frame->compose; task.composes[0] = &frame->compose;
task.cmdq_cb = NULL; task.cmdq_cb = NULL;
...@@ -150,11 +150,6 @@ static int mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count) ...@@ -150,11 +150,6 @@ static int mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
if (!mdp_m2m_ctx_is_state_set(ctx, MDP_VPU_INIT)) { if (!mdp_m2m_ctx_is_state_set(ctx, MDP_VPU_INIT)) {
ret = mdp_vpu_get_locked(ctx->mdp_dev); ret = mdp_vpu_get_locked(ctx->mdp_dev);
if (ret)
return ret;
ret = mdp_vpu_ctx_init(&ctx->vpu, &ctx->mdp_dev->vpu,
MDP_DEV_M2M);
if (ret) { if (ret) {
dev_err(&ctx->mdp_dev->pdev->dev, dev_err(&ctx->mdp_dev->pdev->dev,
"VPU init failed %d\n", ret); "VPU init failed %d\n", ret);
...@@ -641,10 +636,8 @@ static int mdp_m2m_release(struct file *file) ...@@ -641,10 +636,8 @@ static int mdp_m2m_release(struct file *file)
mutex_lock(&mdp->m2m_lock); mutex_lock(&mdp->m2m_lock);
v4l2_m2m_ctx_release(ctx->m2m_ctx); v4l2_m2m_ctx_release(ctx->m2m_ctx);
if (mdp_m2m_ctx_is_state_set(ctx, MDP_VPU_INIT)) { if (mdp_m2m_ctx_is_state_set(ctx, MDP_VPU_INIT))
mdp_vpu_ctx_deinit(&ctx->vpu);
mdp_vpu_put_locked(mdp); mdp_vpu_put_locked(mdp);
}
v4l2_ctrl_handler_free(&ctx->ctrl_handler); v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_fh_del(&ctx->fh); v4l2_fh_del(&ctx->fh);
......
...@@ -33,7 +33,6 @@ struct mdp_m2m_ctx { ...@@ -33,7 +33,6 @@ struct mdp_m2m_ctx {
struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl_handler ctrl_handler;
struct mdp_m2m_ctrls ctrls; struct mdp_m2m_ctrls ctrls;
struct v4l2_m2m_ctx *m2m_ctx; struct v4l2_m2m_ctx *m2m_ctx;
struct mdp_vpu_ctx vpu;
u32 frame_count[MDP_M2M_MAX]; u32 frame_count[MDP_M2M_MAX];
struct mdp_frameparam curr_param; struct mdp_frameparam curr_param;
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include "mtk-mdp3-core.h" #include "mtk-mdp3-core.h"
#define MDP_VPU_MESSAGE_TIMEOUT 500U #define MDP_VPU_MESSAGE_TIMEOUT 500U
#define vpu_alloc_size 0x600000
static inline struct mdp_dev *vpu_to_mdp(struct mdp_vpu_dev *vpu) static inline struct mdp_dev *vpu_to_mdp(struct mdp_vpu_dev *vpu)
{ {
...@@ -19,23 +18,63 @@ static inline struct mdp_dev *vpu_to_mdp(struct mdp_vpu_dev *vpu) ...@@ -19,23 +18,63 @@ static inline struct mdp_dev *vpu_to_mdp(struct mdp_vpu_dev *vpu)
static int mdp_vpu_shared_mem_alloc(struct mdp_vpu_dev *vpu) static int mdp_vpu_shared_mem_alloc(struct mdp_vpu_dev *vpu)
{ {
if (vpu->work && vpu->work_addr) struct device *dev;
return 0;
vpu->work = dma_alloc_coherent(scp_get_device(vpu->scp), vpu_alloc_size, if (IS_ERR_OR_NULL(vpu))
&vpu->work_addr, GFP_KERNEL); goto err_return;
if (!vpu->work) dev = scp_get_device(vpu->scp);
return -ENOMEM;
else if (!vpu->param) {
return 0; vpu->param = dma_alloc_wc(dev, vpu->param_size,
&vpu->param_addr, GFP_KERNEL);
if (!vpu->param)
goto err_return;
}
if (!vpu->work) {
vpu->work = dma_alloc_wc(dev, vpu->work_size,
&vpu->work_addr, GFP_KERNEL);
if (!vpu->work)
goto err_free_param;
}
if (!vpu->config) {
vpu->config = dma_alloc_wc(dev, vpu->config_size,
&vpu->config_addr, GFP_KERNEL);
if (!vpu->config)
goto err_free_work;
}
return 0;
err_free_work:
dma_free_wc(dev, vpu->work_size, vpu->work, vpu->work_addr);
vpu->work = NULL;
err_free_param:
dma_free_wc(dev, vpu->param_size, vpu->param, vpu->param_addr);
vpu->param = NULL;
err_return:
return -ENOMEM;
} }
void mdp_vpu_shared_mem_free(struct mdp_vpu_dev *vpu) void mdp_vpu_shared_mem_free(struct mdp_vpu_dev *vpu)
{ {
struct device *dev;
if (IS_ERR_OR_NULL(vpu))
return;
dev = scp_get_device(vpu->scp);
if (vpu->param && vpu->param_addr)
dma_free_wc(dev, vpu->param_size, vpu->param, vpu->param_addr);
if (vpu->work && vpu->work_addr) if (vpu->work && vpu->work_addr)
dma_free_coherent(scp_get_device(vpu->scp), vpu_alloc_size, dma_free_wc(dev, vpu->work_size, vpu->work, vpu->work_addr);
vpu->work, vpu->work_addr);
if (vpu->config && vpu->config_addr)
dma_free_wc(dev, vpu->config_size, vpu->config, vpu->config_addr);
} }
static void mdp_vpu_ipi_handle_init_ack(void *data, unsigned int len, static void mdp_vpu_ipi_handle_init_ack(void *data, unsigned int len,
...@@ -69,16 +108,16 @@ static void mdp_vpu_ipi_handle_frame_ack(void *data, unsigned int len, ...@@ -69,16 +108,16 @@ static void mdp_vpu_ipi_handle_frame_ack(void *data, unsigned int len,
struct img_sw_addr *addr = (struct img_sw_addr *)data; struct img_sw_addr *addr = (struct img_sw_addr *)data;
struct img_ipi_frameparam *param = struct img_ipi_frameparam *param =
(struct img_ipi_frameparam *)(unsigned long)addr->va; (struct img_ipi_frameparam *)(unsigned long)addr->va;
struct mdp_vpu_ctx *ctx = struct mdp_vpu_dev *vpu =
(struct mdp_vpu_ctx *)(unsigned long)param->drv_data; (struct mdp_vpu_dev *)(unsigned long)param->drv_data;
if (param->state) { if (param->state) {
struct mdp_dev *mdp = vpu_to_mdp(ctx->vpu_dev); struct mdp_dev *mdp = vpu_to_mdp(vpu);
dev_err(&mdp->pdev->dev, "VPU MDP failure:%d\n", param->state); dev_err(&mdp->pdev->dev, "VPU MDP failure:%d\n", param->state);
} }
ctx->vpu_dev->status = param->state; vpu->status = param->state;
complete(&ctx->vpu_dev->ipi_acked); complete(&vpu->ipi_acked);
} }
int mdp_vpu_register(struct mdp_dev *mdp) int mdp_vpu_register(struct mdp_dev *mdp)
...@@ -157,9 +196,6 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp, ...@@ -157,9 +196,6 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
struct mdp_ipi_init_msg msg = { struct mdp_ipi_init_msg msg = {
.drv_data = (unsigned long)vpu, .drv_data = (unsigned long)vpu,
}; };
size_t mem_size;
phys_addr_t pool;
const size_t pool_size = sizeof(struct mdp_config_pool);
struct mdp_dev *mdp = vpu_to_mdp(vpu); struct mdp_dev *mdp = vpu_to_mdp(vpu);
int err; int err;
...@@ -172,34 +208,29 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp, ...@@ -172,34 +208,29 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
goto err_work_size; goto err_work_size;
/* vpu work_size was set in mdp_vpu_ipi_handle_init_ack */ /* vpu work_size was set in mdp_vpu_ipi_handle_init_ack */
mem_size = vpu_alloc_size; mutex_lock(vpu->lock);
vpu->work_size = ALIGN(vpu->work_size, 64);
vpu->param_size = ALIGN(sizeof(struct img_ipi_frameparam), 64);
vpu->config_size = ALIGN(sizeof(struct img_config), 64);
err = mdp_vpu_shared_mem_alloc(vpu); err = mdp_vpu_shared_mem_alloc(vpu);
mutex_unlock(vpu->lock);
if (err) { if (err) {
dev_err(&mdp->pdev->dev, "VPU memory alloc fail!"); dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
goto err_mem_alloc; goto err_mem_alloc;
} }
pool = ALIGN((uintptr_t)vpu->work + vpu->work_size, 8);
if (pool + pool_size - (uintptr_t)vpu->work > mem_size) {
dev_err(&mdp->pdev->dev,
"VPU memory insufficient: %zx + %zx > %zx",
vpu->work_size, pool_size, mem_size);
err = -ENOMEM;
goto err_mem_size;
}
dev_dbg(&mdp->pdev->dev, dev_dbg(&mdp->pdev->dev,
"VPU work:%pK pa:%pad sz:%zx pool:%pa sz:%zx (mem sz:%zx)", "VPU param:%pK pa:%pad sz:%zx, work:%pK pa:%pad sz:%zx, config:%pK pa:%pad sz:%zx",
vpu->param, &vpu->param_addr, vpu->param_size,
vpu->work, &vpu->work_addr, vpu->work_size, vpu->work, &vpu->work_addr, vpu->work_size,
&pool, pool_size, mem_size); vpu->config, &vpu->config_addr, vpu->config_size);
vpu->pool = (struct mdp_config_pool *)(uintptr_t)pool;
msg.work_addr = vpu->work_addr; msg.work_addr = vpu->work_addr;
msg.work_size = vpu->work_size; msg.work_size = vpu->work_size;
err = mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_INIT, &msg, sizeof(msg)); err = mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_INIT, &msg, sizeof(msg));
if (err) if (err)
goto err_work_size; goto err_work_size;
memset(vpu->pool, 0, sizeof(*vpu->pool));
return 0; return 0;
err_work_size: err_work_size:
...@@ -212,7 +243,6 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp, ...@@ -212,7 +243,6 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
break; break;
} }
return err; return err;
err_mem_size:
err_mem_alloc: err_mem_alloc:
return err; return err;
} }
...@@ -227,88 +257,31 @@ int mdp_vpu_dev_deinit(struct mdp_vpu_dev *vpu) ...@@ -227,88 +257,31 @@ int mdp_vpu_dev_deinit(struct mdp_vpu_dev *vpu)
return mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_DEINIT, &msg, sizeof(msg)); return mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_DEINIT, &msg, sizeof(msg));
} }
static struct img_config *mdp_config_get(struct mdp_vpu_dev *vpu, int mdp_vpu_process(struct mdp_vpu_dev *vpu, struct img_ipi_frameparam *param)
enum mdp_config_id id, uint32_t *addr)
{ {
struct img_config *config; struct mdp_dev *mdp = vpu_to_mdp(vpu);
struct img_sw_addr addr;
if (id < 0 || id >= MDP_CONFIG_POOL_SIZE)
return ERR_PTR(-EINVAL);
mutex_lock(vpu->lock); mutex_lock(vpu->lock);
vpu->pool->cfg_count[id]++; if (mdp_vpu_shared_mem_alloc(vpu)) {
config = &vpu->pool->configs[id]; dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
*addr = vpu->work_addr + ((uintptr_t)config - (uintptr_t)vpu->work);
mutex_unlock(vpu->lock);
return config;
}
static int mdp_config_put(struct mdp_vpu_dev *vpu,
enum mdp_config_id id,
const struct img_config *config)
{
int err = 0;
if (id < 0 || id >= MDP_CONFIG_POOL_SIZE)
return -EINVAL;
if (vpu->lock)
mutex_lock(vpu->lock);
if (!vpu->pool->cfg_count[id] || config != &vpu->pool->configs[id])
err = -EINVAL;
else
vpu->pool->cfg_count[id]--;
if (vpu->lock)
mutex_unlock(vpu->lock); mutex_unlock(vpu->lock);
return err; return -ENOMEM;
}
int mdp_vpu_ctx_init(struct mdp_vpu_ctx *ctx, struct mdp_vpu_dev *vpu,
enum mdp_config_id id)
{
ctx->config = mdp_config_get(vpu, id, &ctx->inst_addr);
if (IS_ERR(ctx->config)) {
int err = PTR_ERR(ctx->config);
ctx->config = NULL;
return err;
} }
ctx->config_id = id;
ctx->vpu_dev = vpu;
return 0;
}
int mdp_vpu_ctx_deinit(struct mdp_vpu_ctx *ctx) memset(vpu->param, 0, vpu->param_size);
{ memset(vpu->work, 0, vpu->work_size);
int err = mdp_config_put(ctx->vpu_dev, ctx->config_id, ctx->config); memset(vpu->config, 0, vpu->config_size);
ctx->config_id = 0; param->self_data.va = (unsigned long)vpu->work;
ctx->config = NULL; param->self_data.pa = vpu->work_addr;
ctx->inst_addr = 0; param->config_data.va = (unsigned long)vpu->config;
return err; param->config_data.pa = vpu->config_addr;
} param->drv_data = (unsigned long)vpu;
memcpy(vpu->param, param, sizeof(*param));
int mdp_vpu_process(struct mdp_vpu_ctx *ctx, struct img_ipi_frameparam *param) addr.pa = vpu->param_addr;
{ addr.va = (unsigned long)vpu->param;
struct mdp_vpu_dev *vpu = ctx->vpu_dev; mutex_unlock(vpu->lock);
struct mdp_dev *mdp = vpu_to_mdp(vpu); return mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_FRAME, &addr, sizeof(addr));
struct img_sw_addr addr;
if (!ctx->vpu_dev->work || !ctx->vpu_dev->work_addr) {
if (mdp_vpu_shared_mem_alloc(vpu)) {
dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
return -ENOMEM;
}
}
memset((void *)ctx->vpu_dev->work, 0, ctx->vpu_dev->work_size);
memset(ctx->config, 0, sizeof(*ctx->config));
param->config_data.va = (unsigned long)ctx->config;
param->config_data.pa = ctx->inst_addr;
param->drv_data = (unsigned long)ctx;
memcpy((void *)ctx->vpu_dev->work, param, sizeof(*param));
addr.pa = ctx->vpu_dev->work_addr;
addr.va = (uintptr_t)ctx->vpu_dev->work;
return mdp_vpu_sendmsg(ctx->vpu_dev, SCP_IPI_MDP_FRAME,
&addr, sizeof(addr));
} }
...@@ -37,42 +37,27 @@ struct mdp_ipi_deinit_msg { ...@@ -37,42 +37,27 @@ struct mdp_ipi_deinit_msg {
u32 work_addr; u32 work_addr;
} __packed; } __packed;
enum mdp_config_id {
MDP_DEV_M2M = 0,
MDP_CONFIG_POOL_SIZE /* ALWAYS keep at the end */
};
struct mdp_config_pool {
u64 cfg_count[MDP_CONFIG_POOL_SIZE];
struct img_config configs[MDP_CONFIG_POOL_SIZE];
};
struct mdp_vpu_dev { struct mdp_vpu_dev {
/* synchronization protect for accessing vpu working buffer info */ /* synchronization protect for accessing vpu working buffer info */
struct mutex *lock; struct mutex *lock;
struct mtk_scp *scp; struct mtk_scp *scp;
struct completion ipi_acked; struct completion ipi_acked;
void *param;
dma_addr_t param_addr;
size_t param_size;
void *work; void *work;
dma_addr_t work_addr; dma_addr_t work_addr;
size_t work_size; size_t work_size;
struct mdp_config_pool *pool; void *config;
dma_addr_t config_addr;
size_t config_size;
u32 status; u32 status;
}; };
struct mdp_vpu_ctx {
struct mdp_vpu_dev *vpu_dev;
u32 config_id;
struct img_config *config;
u32 inst_addr;
};
void mdp_vpu_shared_mem_free(struct mdp_vpu_dev *vpu); void mdp_vpu_shared_mem_free(struct mdp_vpu_dev *vpu);
int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp, int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
struct mutex *lock /* for sync */); struct mutex *lock /* for sync */);
int mdp_vpu_dev_deinit(struct mdp_vpu_dev *vpu); int mdp_vpu_dev_deinit(struct mdp_vpu_dev *vpu);
int mdp_vpu_ctx_init(struct mdp_vpu_ctx *ctx, struct mdp_vpu_dev *vpu, int mdp_vpu_process(struct mdp_vpu_dev *vpu, struct img_ipi_frameparam *param);
enum mdp_config_id id);
int mdp_vpu_ctx_deinit(struct mdp_vpu_ctx *ctx);
int mdp_vpu_process(struct mdp_vpu_ctx *vpu, struct img_ipi_frameparam *param);
#endif /* __MTK_MDP3_VPU_H__ */ #endif /* __MTK_MDP3_VPU_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment