Commit d455937e authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

Noteworthy changes this time:
1) 4k support for newer chips (ganging up hwpipes and mixers)
2) using OPP bindings for gpu
3) more prep work towards per-process pagetables

* 'msm-next' of git://people.freedesktop.org/~robclark/linux: (47 commits)
  msm/drm: gpu: Dynamically locate the clocks from the device tree
  drm/msm: gpu: Use OPP tables if we can
  drm/msm: Hard code the GPU "slow frequency"
  drm/msm: Add MSM_PARAM_GMEM_BASE
  drm/msm: Reference count address spaces
  drm/msm: Make sure to detach the MMU during GPU cleanup
  drm/msm/mdp5: Enable 3D mux in mdp5_ctl
  drm/msm/mdp5: Reset CTL blend registers before configuring them
  drm/msm/mdp5: Assign 'right' mixer to CRTC state
  drm/msm/mdp5: Stage border out on base stage if CRTC has 2 LMs
  drm/msm/mdp5: Stage right side hwpipes on Right-side Layer Mixer
  drm/msm/mdp5: Prepare Layer Mixers for source split
  drm/msm/mdp5: Configure 'right' hwpipe
  drm/msm/mdp5: Assign a 'right hwpipe' to plane state
  drm/msm/mdp5: Create mdp5_hwpipe_mode_set
  drm/msm/mdp5: Add optional 'right' Layer Mixer in CRTC state
  drm/msm/mdp5: Add a CAP for Source Split
  drm/msm/mdp5: Remove mixer/intf pointers from mdp5_ctl
  drm/msm/mdp5: Start using parameters from CRTC state
  drm/msm/mdp5: Add more stuff to CRTC state
  ...
parents df45eaca 98db803f
......@@ -40,6 +40,7 @@ msm-y := \
mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_pipe.o \
mdp/mdp5/mdp5_mixer.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_atomic.o \
......
......@@ -412,10 +412,8 @@ static const unsigned int a3xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
}
#endif
......
......@@ -456,12 +456,8 @@ static const unsigned int a4xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
}
......
......@@ -638,10 +638,8 @@ static void a5xx_cp_err_irq(struct msm_gpu *gpu)
}
}
static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
{
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
......@@ -653,6 +651,10 @@ static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
/* Clear the error */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
/* Clear the interrupt */
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
}
if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
......@@ -704,10 +706,16 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
{
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status);
/*
* Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
* before the source is cleared the interrupt will storm.
*/
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
/* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
if (status & RBBM_ERROR_MASK)
a5xx_rbbm_err_irq(gpu);
a5xx_rbbm_err_irq(gpu, status);
if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
a5xx_cp_err_irq(gpu);
......@@ -837,12 +845,8 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
#ifdef CONFIG_DEBUG_FS
static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
}
#endif
......
......@@ -2,7 +2,7 @@
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
......@@ -17,6 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/pm_opp.h>
#include "adreno_gpu.h"
#define ANY_ID 0xff
......@@ -155,21 +156,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
if (gpu) {
int ret;
mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
mutex_unlock(&dev->struct_mutex);
disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
pm_runtime_get_sync(&pdev->dev);
ret = msm_gpu_hw_init(gpu);
pm_runtime_put_sync(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
} else {
enable_irq(gpu->irq);
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
}
......@@ -220,10 +214,71 @@ static int find_chipid(struct device *dev, u32 *chipid)
return 0;
}
/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
static int adreno_get_legacy_pwrlevels(struct device *dev)
{
struct device_node *child, *node;
int ret;
node = of_find_compatible_node(dev->of_node, NULL,
"qcom,gpu-pwrlevels");
if (!node) {
dev_err(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
for_each_child_of_node(node, child) {
unsigned int val;
ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
if (ret)
continue;
/*
* Skip the intentionally bogus clock value found at the bottom
* of most legacy frequency tables
*/
if (val != 27000000)
dev_pm_opp_add(dev, val, 0);
}
return 0;
}
static int adreno_get_pwrlevels(struct device *dev,
struct adreno_platform_config *config)
{
unsigned long freq = ULONG_MAX;
struct dev_pm_opp *opp;
int ret;
/* You down with OPP? */
if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
ret = adreno_get_legacy_pwrlevels(dev);
else
ret = dev_pm_opp_of_add_table(dev);
if (ret)
return ret;
/* Find the fastest defined rate */
opp = dev_pm_opp_find_freq_floor(dev, &freq);
if (!IS_ERR(opp))
config->fast_rate = dev_pm_opp_get_freq(opp);
if (!config->fast_rate) {
DRM_DEV_INFO(dev,
"Could not find clock rate. Using default\n");
/* Pick a suitably safe clock speed for any target */
config->fast_rate = 200000000;
}
return 0;
}
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
struct device_node *child, *node = dev->of_node;
u32 val;
int ret;
......@@ -238,28 +293,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
/* find clock rates: */
config.fast_rate = 0;
config.slow_rate = ~0;
for_each_child_of_node(node, child) {
if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
struct device_node *pwrlvl;
for_each_child_of_node(child, pwrlvl) {
ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
if (ret) {
dev_err(dev, "could not find gpu-freq: %d\n", ret);
return ret;
}
config.fast_rate = max(config.fast_rate, val);
config.slow_rate = min(config.slow_rate, val);
}
}
}
if (!config.fast_rate) {
dev_warn(dev, "could not find clk rates\n");
/* This is a safe low speed for all devices: */
config.fast_rate = 200000000;
config.slow_rate = 27000000;
}
ret = adreno_get_pwrlevels(dev, &config);
if (ret)
return ret;
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
......@@ -296,12 +333,35 @@ static const struct of_device_id dt_match[] = {
{}
};
#ifdef CONFIG_PM
static int adreno_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_gpu *gpu = platform_get_drvdata(pdev);
return gpu->funcs->pm_resume(gpu);
}
static int adreno_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_gpu *gpu = platform_get_drvdata(pdev);
return gpu->funcs->pm_suspend(gpu);
}
#endif
static const struct dev_pm_ops adreno_pm_ops = {
SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
};
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
.remove = adreno_remove,
.driver = {
.name = "adreno",
.of_match_table = dt_match,
.pm = &adreno_pm_ops,
},
};
......
......@@ -35,6 +35,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->gmem;
return 0;
case MSM_PARAM_GMEM_BASE:
*value = 0x100000;
return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
(adreno_gpu->rev.minor << 8) |
......@@ -68,6 +71,14 @@ int adreno_hw_init(struct msm_gpu *gpu)
return ret;
}
/* reset ringbuffer: */
gpu->rb->cur = gpu->rb->start;
/* reset completed fence seqno: */
adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
adreno_gpu->memptrs->rptr = 0;
adreno_gpu->memptrs->wptr = 0;
/* Setup REG_CP_RB_CNTL: */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
/* size is log2(quad-words): */
......@@ -111,29 +122,20 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu)
void adreno_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
int ret;
gpu->funcs->pm_suspend(gpu);
/* reset ringbuffer: */
gpu->rb->cur = gpu->rb->start;
/* reset completed fence seqno: */
adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
adreno_gpu->memptrs->rptr = 0;
adreno_gpu->memptrs->wptr = 0;
// XXX pm-runtime?? we *need* the device to be off after this
// so maybe continuing to call ->pm_suspend/resume() is better?
gpu->funcs->pm_suspend(gpu);
gpu->funcs->pm_resume(gpu);
disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
ret = msm_gpu_hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
enable_irq(gpu->irq);
}
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
......@@ -259,8 +261,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
gpu->funcs->pm_resume(gpu);
/* dump these out in a form that can be parsed by demsm: */
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
......@@ -273,8 +273,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
}
}
gpu->funcs->pm_suspend(gpu);
}
#endif
......@@ -354,14 +352,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->rev = config->rev;
gpu->fast_rate = config->fast_rate;
gpu->slow_rate = config->slow_rate;
gpu->bus_freq = config->bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
gpu->bus_scale_table = config->bus_scale_table;
#endif
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
gpu->fast_rate, gpu->bus_freq);
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
......@@ -369,6 +366,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (ret)
return ret;
pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
......@@ -439,6 +440,6 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
if (gpu->aspace) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(gpu->aspace);
msm_gem_address_space_put(gpu->aspace);
}
}
......@@ -123,7 +123,7 @@ struct adreno_gpu {
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
uint32_t fast_rate, slow_rate, bus_freq;
uint32_t fast_rate, bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
#endif
......
......@@ -114,15 +114,9 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp4_crtc->event;
if (event) {
/* if regular vblank case (!file) or if cancel-flip from
* preclose on file that requested flip, then send the
* event:
*/
if (!file || (event->base.file_priv == file)) {
mdp4_crtc->event = NULL;
DBG("%s: send event: %p", mdp4_crtc->name, event);
drm_crtc_send_vblank_event(crtc, event);
}
mdp4_crtc->event = NULL;
DBG("%s: send event: %p", mdp4_crtc->name, event);
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
......
......@@ -169,7 +169,7 @@ static void mdp4_destroy(struct msm_kms *kms)
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(aspace);
msm_gem_address_space_put(aspace);
}
if (mdp4_kms->rpm_enabled)
......
......@@ -70,6 +70,18 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.lm = {
.count = 5,
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = 2,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 5,
},
.dspp = {
......@@ -134,6 +146,18 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
.lm = {
.count = 5,
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = 2,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
},
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
......@@ -167,6 +191,7 @@ const struct mdp5_cfg_hw apq8084_config = {
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
MDP_CAP_SRC_SPLIT |
0,
},
.smp = {
......@@ -211,6 +236,22 @@ const struct mdp5_cfg_hw apq8084_config = {
.lm = {
.count = 6,
.base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = 2,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 5, .pp = 3, .dspp = 3,
.caps = MDP_LM_CAP_DISPLAY, },
},
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
......@@ -282,6 +323,12 @@ const struct mdp5_cfg_hw msm8x16_config = {
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x44000, 0x47000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
......@@ -306,6 +353,7 @@ const struct mdp5_cfg_hw msm8x94_config = {
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
MDP_CAP_SRC_SPLIT |
0,
},
.smp = {
......@@ -350,6 +398,22 @@ const struct mdp5_cfg_hw msm8x94_config = {
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = 2,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 5, .pp = 3, .dspp = 3,
.caps = MDP_LM_CAP_DISPLAY, },
},
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
......@@ -385,6 +449,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
.count = 1,
.caps = MDP_CAP_DSC |
MDP_CAP_CDM |
MDP_CAP_SRC_SPLIT |
0,
},
.ctl = {
......@@ -434,6 +499,22 @@ const struct mdp5_cfg_hw msm8x96_config = {
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = -1,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 5, .pp = 3, .dspp = -1,
.caps = MDP_LM_CAP_DISPLAY, },
},
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
......
......@@ -39,8 +39,16 @@ struct mdp5_sub_block {
MDP5_SUB_BLOCK_DEFINITION;
};
struct mdp5_lm_instance {
int id;
int pp;
int dspp;
uint32_t caps;
};
struct mdp5_lm_block {
MDP5_SUB_BLOCK_DEFINITION;
struct mdp5_lm_instance instances[MAX_BASES];
uint32_t nb_stages; /* number of stages per blender */
uint32_t max_width; /* Maximum output resolution */
uint32_t max_height;
......
......@@ -51,7 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
struct device *dev = encoder->dev->dev;
u32 total_lines_x100, vclks_line, cfg;
long vsync_clk_speed;
int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
int pp_id = mixer->pp;
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
dev_err(dev, "vsync_clk is not initialized\n");
......@@ -94,7 +95,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
int pp_id = mixer->pp;
int ret;
ret = clk_set_rate(mdp5_kms->vsync_clk,
......@@ -119,7 +121,8 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
int pp_id = mixer->pp;
mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
clk_disable_unprepare(mdp5_kms->vsync_clk);
......@@ -129,8 +132,6 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
mode = adjusted_mode;
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
......@@ -142,23 +143,23 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
pingpong_tearcheck_setup(encoder, mode);
mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
mdp5_cmd_enc->ctl);
mdp5_crtc_set_pipeline(encoder->crtc);
}
void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
struct mdp5_interface *intf = mdp5_cmd_enc->intf;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
if (WARN_ON(!mdp5_cmd_enc->enabled))
return;
pingpong_tearcheck_disable(encoder);
mdp5_ctl_set_encoder_state(ctl, false);
mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
bs_set(mdp5_cmd_enc, 0);
......@@ -169,7 +170,8 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
struct mdp5_interface *intf = mdp5_cmd_enc->intf;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
if (WARN_ON(mdp5_cmd_enc->enabled))
return;
......@@ -178,9 +180,9 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
if (pingpong_tearcheck_enable(encoder))
return;
mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
mdp5_ctl_set_encoder_state(ctl, true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
mdp5_cmd_enc->enabled = true;
}
......@@ -197,7 +199,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
return -EINVAL;
mdp5_kms = get_kms(encoder);
intf_num = mdp5_cmd_enc->intf.num;
intf_num = mdp5_cmd_enc->intf->num;
/* Switch slave encoder's trigger MUX, to use the master's
* start signal for the slave encoder
......
This diff is collapsed.
This diff is collapsed.
......@@ -37,13 +37,17 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
struct mdp5_interface;
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
int lm);
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
struct mdp5_pipeline;
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p);
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p,
bool enabled);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
int cursor_id, bool enable);
int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
#define MAX_PIPE_STAGE 2
/*
* mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
*
......@@ -56,8 +60,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
u32 ctl_blend_op_flags);
int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
enum mdp5_pipe stage[][MAX_PIPE_STAGE],
enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
u32 stage_cnt, u32 ctl_blend_op_flags);
/**
* mdp_ctl_flush_mask...() - Register FLUSH masks
......@@ -71,7 +77,8 @@ u32 mdp_ctl_flush_mask_cursor(int cursor_id);
u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
/* @flush_mask: see CTL flush masks definitions below */
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
u32 flush_mask);
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
......
......@@ -109,7 +109,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
int intf = mdp5_encoder->intf.num;
int intf = mdp5_encoder->intf->num;
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
......@@ -130,7 +130,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
ctrl_pol = 0;
/* DSI controller cannot handle active-low sync signals. */
if (mdp5_encoder->intf.type != INTF_DSI) {
if (mdp5_encoder->intf->type != INTF_DSI) {
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
......@@ -175,7 +175,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
if (mdp5_encoder->intf.type == INTF_eDP) {
if (mdp5_encoder->intf->type == INTF_eDP) {
display_v_start += mode->htotal - mode->hsync_start;
display_v_end -= mode->hsync_start - mode->hdisplay;
}
......@@ -206,8 +206,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
mdp5_encoder->ctl);
mdp5_crtc_set_pipeline(encoder->crtc);
}
static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
......@@ -215,20 +214,21 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
int lm = mdp5_crtc_get_lm(encoder->crtc);
struct mdp5_interface *intf = &mdp5_encoder->intf;
int intfn = mdp5_encoder->intf.num;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
struct mdp5_interface *intf = mdp5_encoder->intf;
int intfn = mdp5_encoder->intf->num;
unsigned long flags;
if (WARN_ON(!mdp5_encoder->enabled))
return;
mdp5_ctl_set_encoder_state(ctl, false);
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
/*
* Wait for a vsync so we know the ENABLE=0 latched before
......@@ -238,7 +238,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf));
mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
bs_set(mdp5_encoder, 0);
......@@ -250,8 +250,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
struct mdp5_interface *intf = &mdp5_encoder->intf;
int intfn = mdp5_encoder->intf.num;
struct mdp5_interface *intf = mdp5_encoder->intf;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
int intfn = intf->num;
unsigned long flags;
if (WARN_ON(mdp5_encoder->enabled))
......@@ -261,9 +262,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
mdp5_ctl_set_encoder_state(ctl, true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
mdp5_encoder->enabled = true;
}
......@@ -273,7 +274,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = &mdp5_encoder->intf;
struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
......@@ -284,7 +285,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = &mdp5_encoder->intf;
struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
......@@ -295,7 +296,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = &mdp5_encoder->intf;
struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
......@@ -303,17 +304,33 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
mdp5_vid_encoder_enable(encoder);
}
static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
struct mdp5_interface *intf = mdp5_encoder->intf;
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
mdp5_cstate->ctl = ctl;
mdp5_cstate->pipeline.intf = intf;
return 0;
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.mode_set = mdp5_encoder_mode_set,
.disable = mdp5_encoder_disable,
.enable = mdp5_encoder_enable,
.atomic_check = mdp5_encoder_atomic_check,
};
int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf.num;
int intf = mdp5_encoder->intf->num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
}
......@@ -322,7 +339,7 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf.num;
int intf = mdp5_encoder->intf->num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
}
......@@ -340,7 +357,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
return -EINVAL;
mdp5_kms = get_kms(encoder);
intf_num = mdp5_encoder->intf.num;
intf_num = mdp5_encoder->intf->num;
/* Switch slave encoder's TimingGen Sync mode,
* to use the master's enable signal for the slave encoder.
......@@ -369,7 +386,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = &mdp5_encoder->intf;
struct mdp5_interface *intf = mdp5_encoder->intf;
/* TODO: Expand this to set writeback modes too */
if (cmd_mode) {
......@@ -385,7 +402,8 @@ void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
/* initialize encoder */
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf, struct mdp5_ctl *ctl)
struct mdp5_interface *intf,
struct mdp5_ctl *ctl)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
......@@ -399,9 +417,9 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
goto fail;
}
memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
encoder = &mdp5_encoder->base;
mdp5_encoder->ctl = ctl;
mdp5_encoder->intf = intf;
spin_lock_init(&mdp5_encoder->intf_lock);
......
......@@ -93,6 +93,7 @@ struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
/* Copy state: */
new_state->hwpipe = mdp5_kms->state->hwpipe;
new_state->hwmixer = mdp5_kms->state->hwmixer;
if (mdp5_kms->smp)
new_state->smp = mdp5_kms->state->smp;
......@@ -165,13 +166,16 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
struct msm_gem_address_space *aspace = mdp5_kms->aspace;
int i;
for (i = 0; i < mdp5_kms->num_hwmixers; i++)
mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
for (i = 0; i < mdp5_kms->num_hwpipes; i++)
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(aspace);
msm_gem_address_space_put(aspace);
}
}
......@@ -268,19 +272,14 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
}
static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
enum mdp5_intf_type intf_type, int intf_num,
struct mdp5_ctl *ctl)
struct mdp5_interface *intf,
struct mdp5_ctl *ctl)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
struct mdp5_interface intf = {
.num = intf_num,
.type = intf_type,
.mode = MDP5_INTF_MODE_NONE,
};
encoder = mdp5_encoder_init(dev, &intf, ctl);
encoder = mdp5_encoder_init(dev, intf, ctl);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n");
return encoder;
......@@ -309,32 +308,28 @@ static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
return -EINVAL;
}
static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
struct mdp5_interface *intf)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
const struct mdp5_cfg_hw *hw_cfg =
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
struct mdp5_ctl *ctl;
struct drm_encoder *encoder;
int ret = 0;
switch (intf_type) {
case INTF_DISABLED:
break;
switch (intf->type) {
case INTF_eDP:
if (!priv->edp)
break;
ctl = mdp5_ctlm_request(ctlm, intf_num);
ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl);
encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
......@@ -346,13 +341,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->hdmi)
break;
ctl = mdp5_ctlm_request(ctlm, intf_num);
ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl);
encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
......@@ -362,11 +357,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break;
case INTF_DSI:
{
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
const struct mdp5_cfg_hw *hw_cfg =
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
dev_err(dev->dev, "failed to find dsi from intf %d\n",
intf_num);
intf->num);
ret = -EINVAL;
break;
}
......@@ -374,13 +371,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->dsi[dsi_id])
break;
ctl = mdp5_ctlm_request(ctlm, intf_num);
ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl);
encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
......@@ -390,7 +387,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break;
}
default:
dev_err(dev->dev, "unknown intf: %d\n", intf_type);
dev_err(dev->dev, "unknown intf: %d\n", intf->type);
ret = -EINVAL;
break;
}
......@@ -414,8 +411,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
* Construct encoders and modeset initialize connector devices
* for each external display interface.
*/
for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
ret = modeset_init_intf(mdp5_kms, i);
for (i = 0; i < mdp5_kms->num_intfs; i++) {
ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
if (ret)
goto fail;
}
......@@ -425,7 +422,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
* the MDP5 interfaces) than the number of layer mixers present in HW,
* but let's be safe here anyway
*/
num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count);
num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
/*
* Construct planes equaling the number of hw pipes, and CRTCs for the
......@@ -744,6 +741,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
static void mdp5_destroy(struct platform_device *pdev)
{
struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
int i;
if (mdp5_kms->ctlm)
mdp5_ctlm_destroy(mdp5_kms->ctlm);
......@@ -752,6 +750,9 @@ static void mdp5_destroy(struct platform_device *pdev)
if (mdp5_kms->cfg)
mdp5_cfg_destroy(mdp5_kms->cfg);
for (i = 0; i < mdp5_kms->num_intfs; i++)
kfree(mdp5_kms->intfs[i]);
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
......@@ -829,6 +830,64 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
return 0;
}
static int hwmixer_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
for (i = 0; i < hw_cfg->lm.count; i++) {
struct mdp5_hw_mixer *mixer;
mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
dev_err(dev->dev, "failed to construct LM%d (%d)\n",
i, ret);
return ret;
}
mixer->idx = mdp5_kms->num_hwmixers;
mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
}
return 0;
}
static int interface_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
const struct mdp5_cfg_hw *hw_cfg;
const enum mdp5_intf_type *intf_types;
int i;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
intf_types = hw_cfg->intf.connect;
for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
struct mdp5_interface *intf;
if (intf_types[i] == INTF_DISABLED)
continue;
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf) {
dev_err(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
}
intf->num = i;
intf->type = intf_types[i];
intf->mode = MDP5_INTF_MODE_NONE;
intf->idx = mdp5_kms->num_intfs;
mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
}
return 0;
}
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
......@@ -929,6 +988,14 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
if (ret)
goto fail;
ret = hwmixer_init(mdp5_kms);
if (ret)
goto fail;
ret = interface_init(mdp5_kms);
if (ret)
goto fail;
/* set uninit-ed kms */
priv->kms = &mdp5_kms->base.base;
......
......@@ -23,8 +23,9 @@
#include "mdp/mdp_kms.h"
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
#include "mdp5_ctl.h"
#include "mdp5_pipe.h"
#include "mdp5_mixer.h"
#include "mdp5_ctl.h"
#include "mdp5_smp.h"
struct mdp5_state;
......@@ -39,6 +40,12 @@ struct mdp5_kms {
unsigned num_hwpipes;
struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
unsigned num_hwmixers;
struct mdp5_hw_mixer *hwmixers[8];
unsigned num_intfs;
struct mdp5_interface *intfs[5];
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
......@@ -83,6 +90,7 @@ struct mdp5_kms {
*/
struct mdp5_state {
struct mdp5_hw_pipe_state hwpipe;
struct mdp5_hw_mixer_state hwmixer;
struct mdp5_smp_state smp;
};
......@@ -96,6 +104,7 @@ struct mdp5_plane_state {
struct drm_plane_state base;
struct mdp5_hw_pipe *hwpipe;
struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */
/* aligned with property */
uint8_t premultiplied;
......@@ -108,6 +117,28 @@ struct mdp5_plane_state {
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
struct mdp5_pipeline {
struct mdp5_interface *intf;
struct mdp5_hw_mixer *mixer;
struct mdp5_hw_mixer *r_mixer; /* right mixer */
};
struct mdp5_crtc_state {
struct drm_crtc_state base;
struct mdp5_ctl *ctl;
struct mdp5_pipeline pipeline;
/* these are derivatives of intf/mixer state in mdp5_pipeline */
u32 vblank_irqmask;
u32 err_irqmask;
u32 pp_done_irqmask;
bool cmd_mode;
};
#define to_mdp5_crtc_state(x) \
container_of(x, struct mdp5_crtc_state, base)
enum mdp5_intf_mode {
MDP5_INTF_MODE_NONE = 0,
......@@ -121,6 +152,7 @@ enum mdp5_intf_mode {
};
struct mdp5_interface {
int idx;
int num; /* display interface number */
enum mdp5_intf_type type;
enum mdp5_intf_mode mode;
......@@ -128,11 +160,11 @@ struct mdp5_interface {
struct mdp5_encoder {
struct drm_encoder base;
struct mdp5_interface intf;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
struct mdp5_interface *intf;
struct mdp5_ctl *ctl;
};
#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
......@@ -197,8 +229,8 @@ static inline uint32_t intf2err(int intf_num)
}
}
#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer)
static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer,
struct mdp5_interface *intf)
{
/*
* In case of DSI Command Mode, the Ping Pong's read pointer IRQ
......@@ -208,7 +240,7 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
if ((intf->type == INTF_DSI) &&
(intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm);
return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp;
if (intf->type == INTF_WB)
return MDP5_IRQ_WB_2_DONE;
......@@ -222,9 +254,9 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
}
}
static inline uint32_t lm2ppdone(int lm)
static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
{
return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm);
return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
}
int mdp5_disable(struct mdp5_kms *mdp5_kms);
......@@ -243,15 +275,16 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum drm_plane_type type);
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
struct mdp5_interface *intf, struct mdp5_ctl *ctl);
struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc);
struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc);
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc);
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane,
......
/*
* Copyright (C) 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mdp5_kms.h"
/*
* As of now, there are only 2 combinations possible for source split:
*
* Left | Right
* -----|------
* LM0 | LM1
* LM2 | LM5
*
*/
static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 };
static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm)
{
int i;
int pair_lm;
pair_lm = lm_right_pair[lm];
if (pair_lm < 0)
return -EINVAL;
for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i];
if (mixer->lm == pair_lm)
return mixer->idx;
}
return -1;
}
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_state *state = mdp5_get_state(s);
struct mdp5_hw_mixer_state *new_state;
int i;
if (IS_ERR(state))
return PTR_ERR(state);
new_state = &state->hwmixer;
for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
/*
* skip if already in-use by a different CRTC. If there is a
* mixer already assigned to this CRTC, it means this call is
* a request to get an additional right mixer. Assume that the
* existing mixer is the 'left' one, and try to see if we can
* get its corresponding 'right' pair.
*/
if (new_state->hwmixer_to_crtc[cur->idx] &&
new_state->hwmixer_to_crtc[cur->idx] != crtc)
continue;
/* skip if doesn't support some required caps: */
if (caps & ~cur->caps)
continue;
if (r_mixer) {
int pair_idx;
pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
if (pair_idx < 0)
return -EINVAL;
if (new_state->hwmixer_to_crtc[pair_idx])
continue;
*r_mixer = mdp5_kms->hwmixers[pair_idx];
}
/*
* prefer a pair-able LM over an unpairable one. We can
* switch the CRTC from Normal mode to Source Split mode
* without requiring a full modeset if we had already
* assigned this CRTC a pair-able LM.
*
* TODO: There will be assignment sequences which would
* result in the CRTC requiring a full modeset, even
* if we have the LM resources to prevent it. For a platform
* with a few displays, we don't run out of pair-able LMs
* so easily. For now, ignore the possibility of requiring
* a full modeset.
*/
if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
*mixer = cur;
}
if (!(*mixer))
return -ENOMEM;
if (r_mixer && !(*r_mixer))
return -ENOMEM;
DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name);
new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc;
if (r_mixer) {
DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm,
crtc->name);
new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc;
}
return 0;
}
void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
struct mdp5_state *state = mdp5_get_state(s);
struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
if (!mixer)
return;
if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
return;
DBG("%s: release from crtc %s", mixer->name,
new_state->hwmixer_to_crtc[mixer->idx]->name);
new_state->hwmixer_to_crtc[mixer->idx] = NULL;
}
void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
{
kfree(mixer);
}
static const char * const mixer_names[] = {
"LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
};
struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
{
struct mdp5_hw_mixer *mixer;
mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return ERR_PTR(-ENOMEM);
mixer->name = mixer_names[lm->id];
mixer->lm = lm->id;
mixer->caps = lm->caps;
mixer->pp = lm->pp;
mixer->dspp = lm->dspp;
mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id);
return mixer;
}
/*
* Copyright (C) 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP5_LM_H__
#define __MDP5_LM_H__
/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */
struct mdp5_hw_mixer {
int idx;
const char *name;
int lm; /* the LM instance # */
uint32_t caps;
int pp;
int dspp;
uint32_t flush_mask; /* used to commit LM registers */
};
/* global atomic state of assignment between CRTCs and Layer Mixers: */
struct mdp5_hw_mixer_state {
struct drm_crtc *hwmixer_to_crtc[8];
};
struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer);
void mdp5_mixer_release(struct drm_atomic_state *s,
struct mdp5_hw_mixer *mixer);
#endif /* __MDP5_LM_H__ */
......@@ -135,7 +135,5 @@ struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
hwpipe->caps = caps;
hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
spin_lock_init(&hwpipe->pipe_lock);
return hwpipe;
}
......@@ -28,7 +28,6 @@ struct mdp5_hw_pipe {
const char *name;
enum mdp5_pipe pipe;
spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
uint32_t reg_offset;
uint32_t caps;
......
This diff is collapsed.
......@@ -104,6 +104,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */
#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */
/* MDP pipe capabilities */
#define MDP_PIPE_CAP_HFLIP BIT(0)
......@@ -114,6 +115,11 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
#define MDP_PIPE_CAP_CURSOR BIT(6)
/* MDP layer mixer caps */
#define MDP_LM_CAP_DISPLAY BIT(0)
#define MDP_LM_CAP_WB BIT(1)
#define MDP_LM_CAP_PAIR BIT(2)
static inline bool pipe_supports_yuv(uint32_t pipe_caps)
{
return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
......
......@@ -28,7 +28,9 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
if (gpu) {
seq_printf(m, "%s Status:\n", gpu->name);
pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->show(gpu, m);
pm_runtime_put_sync(&gpu->pdev->dev);
}
return 0;
......
......@@ -55,14 +55,13 @@ int msm_register_address_space(struct drm_device *dev,
struct msm_gem_address_space *aspace)
{
struct msm_drm_private *priv = dev->dev_private;
int idx = priv->num_aspaces++;
if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
if (WARN_ON(priv->num_aspaces >= ARRAY_SIZE(priv->aspace)))
return -EINVAL;
priv->aspace[idx] = aspace;
priv->aspace[priv->num_aspaces] = aspace;
return idx;
return priv->num_aspaces++;
}
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
......@@ -265,6 +264,8 @@ static int msm_drm_uninit(struct device *dev)
if (gpu) {
mutex_lock(&ddev->struct_mutex);
// XXX what do we do here?
//pm_runtime_enable(&pdev->dev);
gpu->funcs->pm_suspend(gpu);
mutex_unlock(&ddev->struct_mutex);
gpu->funcs->destroy(gpu);
......@@ -539,7 +540,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
static void msm_preclose(struct drm_device *dev, struct drm_file *file)
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
......@@ -812,7 +813,7 @@ static struct drm_driver msm_driver = {
DRIVER_ATOMIC |
DRIVER_MODESET,
.open = msm_open,
.preclose = msm_preclose,
.postclose = msm_postclose,
.lastclose = msm_lastclose,
.irq_handler = msm_irq,
.irq_preinstall = msm_irq_preinstall,
......
......@@ -191,7 +191,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
......
......@@ -18,6 +18,7 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
#include <linux/kref.h>
#include <linux/reservation.h>
#include "msm_drv.h"
......@@ -31,6 +32,7 @@ struct msm_gem_address_space {
*/
struct drm_mm mm;
struct msm_mmu *mmu;
struct kref kref;
};
struct msm_gem_vma {
......
......@@ -404,6 +404,24 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence)
return -EINVAL;
/* TODO if we get an array-fence due to userspace merging multiple
* fences, we need a way to determine if all the backing fences
* are from our own context..
*/
if (in_fence->context != gpu->fctx->context) {
ret = dma_fence_wait(in_fence, true);
if (ret)
return ret;
}
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
......@@ -431,27 +449,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
ret = -EINVAL;
goto out;
}
/* TODO if we get an array-fence due to userspace merging multiple
* fences, we need a way to determine if all the backing fences
* are from our own context..
*/
if (in_fence->context != gpu->fctx->context) {
ret = dma_fence_wait(in_fence, true);
if (ret)
goto out;
}
}
if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit);
if (ret)
......
......@@ -19,6 +19,25 @@
#include "msm_gem.h"
#include "msm_mmu.h"
static void
msm_gem_address_space_destroy(struct kref *kref)
{
struct msm_gem_address_space *aspace = container_of(kref,
struct msm_gem_address_space, kref);
drm_mm_takedown(&aspace->mm);
if (aspace->mmu)
aspace->mmu->funcs->destroy(aspace->mmu);
kfree(aspace);
}
void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
{
if (aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt)
......@@ -34,6 +53,8 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
drm_mm_remove_node(&vma->node);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
int
......@@ -57,16 +78,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
size, IOMMU_READ | IOMMU_WRITE);
}
return ret;
}
/* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
void
msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
{
drm_mm_takedown(&aspace->mm);
if (aspace->mmu)
aspace->mmu->funcs->destroy(aspace->mmu);
kfree(aspace);
return ret;
}
struct msm_gem_address_space *
......@@ -85,5 +100,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
(domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
kref_init(&aspace->kref);
return aspace;
}
......@@ -93,18 +93,18 @@ static int enable_clk(struct msm_gpu *gpu)
{
int i;
if (gpu->grp_clks[0] && gpu->fast_rate)
clk_set_rate(gpu->grp_clks[0], gpu->fast_rate);
if (gpu->core_clk && gpu->fast_rate)
clk_set_rate(gpu->core_clk, gpu->fast_rate);
/* Set the RBBM timer rate to 19.2Mhz */
if (gpu->grp_clks[2])
clk_set_rate(gpu->grp_clks[2], 19200000);
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 19200000);
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_prepare(gpu->grp_clks[i]);
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
......@@ -115,19 +115,24 @@ static int disable_clk(struct msm_gpu *gpu)
{
int i;
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
if (gpu->grp_clks[0] && gpu->slow_rate)
clk_set_rate(gpu->grp_clks[0], gpu->slow_rate);
/*
* Set the clock to a deliberately low rate. On older targets the clock
* speed had to be non zero to avoid problems. On newer targets this
* will be rounded down to zero anyway so it all works out.
*/
if (gpu->core_clk)
clk_set_rate(gpu->core_clk, 27000000);
if (gpu->grp_clks[2])
clk_set_rate(gpu->grp_clks[2], 0);
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 0);
return 0;
}
......@@ -152,18 +157,9 @@ static int disable_axi(struct msm_gpu *gpu)
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
int ret;
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
if (gpu->active_cnt++ > 0)
return 0;
if (WARN_ON(gpu->active_cnt <= 0))
return -EINVAL;
DBG("%s", gpu->name);
ret = enable_pwrrail(gpu);
if (ret)
......@@ -177,23 +173,16 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
gpu->needs_hw_init = true;
return 0;
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
int ret;
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
if (--gpu->active_cnt > 0)
return 0;
if (WARN_ON(gpu->active_cnt < 0))
return -EINVAL;
DBG("%s", gpu->name);
ret = disable_axi(gpu);
if (ret)
......@@ -210,53 +199,20 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0;
}
/*
* Inactivity detection (for suspend):
*/
static void inactive_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
struct drm_device *dev = gpu->dev;
if (gpu->inactive)
return;
DBG("%s: inactive!\n", gpu->name);
mutex_lock(&dev->struct_mutex);
if (!(msm_gpu_active(gpu) || gpu->inactive)) {
disable_axi(gpu);
disable_clk(gpu);
gpu->inactive = true;
}
mutex_unlock(&dev->struct_mutex);
}
static void inactive_handler(unsigned long data)
int msm_gpu_hw_init(struct msm_gpu *gpu)
{
struct msm_gpu *gpu = (struct msm_gpu *)data;
struct msm_drm_private *priv = gpu->dev->dev_private;
int ret;
queue_work(priv->wq, &gpu->inactive_work);
}
if (!gpu->needs_hw_init)
return 0;
/* cancel inactive timer and make sure we are awake: */
static void inactive_cancel(struct msm_gpu *gpu)
{
DBG("%s", gpu->name);
del_timer(&gpu->inactive_timer);
if (gpu->inactive) {
enable_clk(gpu);
enable_axi(gpu);
gpu->inactive = false;
}
}
disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
if (!ret)
gpu->needs_hw_init = false;
enable_irq(gpu->irq);
static void inactive_start(struct msm_gpu *gpu)
{
DBG("%s", gpu->name);
mod_timer(&gpu->inactive_timer,
round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
return ret;
}
/*
......@@ -296,8 +252,9 @@ static void recover_worker(struct work_struct *work)
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
inactive_cancel(gpu);
pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
/* replay the remaining submits after the one that hung: */
list_for_each_entry(submit, &gpu->submit_list, node) {
......@@ -400,6 +357,8 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
{
unsigned long flags;
pm_runtime_get_sync(&gpu->pdev->dev);
spin_lock_irqsave(&gpu->perf_lock, flags);
/* we could dynamically enable/disable perfcntr registers too.. */
gpu->last_sample.active = msm_gpu_active(gpu);
......@@ -413,6 +372,7 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
{
gpu->perfcntr_active = false;
pm_runtime_put_sync(&gpu->pdev->dev);
}
/* returns -errno or # of cntrs sampled */
......@@ -458,6 +418,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
drm_gem_object_unreference(&msm_obj->base);
}
pm_runtime_mark_last_busy(&gpu->pdev->dev);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
msm_gem_submit_free(submit);
}
......@@ -492,9 +454,6 @@ static void retire_worker(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
mutex_unlock(&dev->struct_mutex);
if (!msm_gpu_active(gpu))
inactive_start(gpu);
}
/* call from irq handler to schedule work to retire bo's */
......@@ -515,7 +474,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
inactive_cancel(gpu);
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_hw_init(gpu);
list_add_tail(&submit->node, &gpu->submit_list);
......@@ -559,16 +520,52 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
static const char *clk_names[] = {
"core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface",
};
static struct clk *get_clock(struct device *dev, const char *name)
{
struct clk *clk = devm_clk_get(dev, name);
return IS_ERR(clk) ? NULL : clk;
}
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
{
struct device *dev = &pdev->dev;
struct property *prop;
const char *name;
int i = 0;
gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
if (gpu->nr_clocks < 1) {
gpu->nr_clocks = 0;
return 0;
}
gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
GFP_KERNEL);
if (!gpu->grp_clks)
return -ENOMEM;
of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
gpu->grp_clks[i] = get_clock(dev, name);
/* Remember the key clocks that we need to control later */
if (!strcmp(name, "core"))
gpu->core_clk = gpu->grp_clks[i];
else if (!strcmp(name, "rbbmtimer"))
gpu->rbbmtimer_clk = gpu->grp_clks[i];
++i;
}
return 0;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, const char *ioname, const char *irqname, int ringsz)
{
struct iommu_domain *iommu;
int i, ret;
int ret;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
......@@ -576,7 +573,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
gpu->inactive = true;
gpu->fctx = msm_fence_context_alloc(drm, name);
if (IS_ERR(gpu->fctx)) {
ret = PTR_ERR(gpu->fctx);
......@@ -586,19 +582,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->inactive_work, inactive_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
INIT_LIST_HEAD(&gpu->submit_list);
setup_timer(&gpu->inactive_timer, inactive_handler,
(unsigned long)gpu);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
spin_lock_init(&gpu->perf_lock);
BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
/* Map registers: */
gpu->mmio = msm_ioremap(pdev, ioname, name);
......@@ -622,13 +614,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
/* Acquire clocks: */
for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]);
DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
if (IS_ERR(gpu->grp_clks[i]))
gpu->grp_clks[i] = NULL;
}
ret = get_clocks(pdev, gpu);
if (ret)
goto fail;
gpu->ebi1_clk = msm_clk_get(pdev, "bus");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
......@@ -684,6 +672,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
gpu->pdev = pdev;
platform_set_drvdata(pdev, gpu);
bs_init(gpu);
return 0;
......
......@@ -64,6 +64,7 @@ struct msm_gpu_funcs {
struct msm_gpu {
const char *name;
struct drm_device *dev;
struct platform_device *pdev;
const struct msm_gpu_funcs *funcs;
/* performance counters (hw & sw): */
......@@ -88,9 +89,8 @@ struct msm_gpu {
/* fencing: */
struct msm_fence_context *fctx;
/* is gpu powered/active? */
int active_cnt;
bool inactive;
/* does gpu need hw_init? */
bool needs_hw_init;
/* worker for handling active-list retiring: */
struct work_struct retire_work;
......@@ -103,8 +103,10 @@ struct msm_gpu {
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
struct clk *ebi1_clk, *grp_clks[6];
uint32_t fast_rate, slow_rate, bus_freq;
struct clk **grp_clks;
int nr_clocks;
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate, bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
......@@ -114,9 +116,7 @@ struct msm_gpu {
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
struct timer_list inactive_timer;
struct work_struct inactive_work;
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
......@@ -196,6 +196,8 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
int msm_gpu_hw_init(struct msm_gpu *gpu);
void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
......
......@@ -38,78 +38,47 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
return iommu_attach_device(iommu->domain, mmu->dev);
int ret;
pm_runtime_get_sync(mmu->dev);
ret = iommu_attach_device(iommu->domain, mmu->dev);
pm_runtime_put_sync(mmu->dev);
return ret;
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
pm_runtime_get_sync(mmu->dev);
iommu_detach_device(iommu->domain, mmu->dev);
pm_runtime_put_sync(mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned long da = iova;
unsigned int i, j;
int ret;
if (!domain || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
dma_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
size_t ret;
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
goto fail;
// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
// pm_runtime_put_sync(mmu->dev);
WARN_ON(ret < 0);
da += bytes;
}
return 0;
fail:
da = iova;
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg->length + sg->offset;
iommu_unmap(domain, da, bytes);
da += bytes;
}
return ret;
return (ret == len) ? 0 : -EINVAL;
}
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned long da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg->length + sg->offset;
size_t unmapped;
unmapped = iommu_unmap(domain, da, bytes);
if (unmapped < bytes)
return unmapped;
VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
da += bytes;
}
pm_runtime_get_sync(mmu->dev);
iommu_unmap(iommu->domain, iova, len);
pm_runtime_put_sync(mmu->dev);
return 0;
}
......
......@@ -322,7 +322,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
}
for (i = 0; i < submit->nr_cmds; i++) {
uint32_t iova = submit->cmd[i].iova;
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
......@@ -341,7 +341,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
case MSM_SUBMIT_CMD_BUF:
rd_write_section(rd, RD_CMDSTREAM_ADDR,
(uint32_t[2]){ iova, szd }, 8);
(uint32_t[3]){ iova, szd, iova >> 32 }, 12);
break;
}
}
......
......@@ -72,6 +72,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_CHIP_ID 0x03
#define MSM_PARAM_MAX_FREQ 0x04
#define MSM_PARAM_TIMESTAMP 0x05
#define MSM_PARAM_GMEM_BASE 0x06
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment