Commit d7ca2d19 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2020-01-14' of https://gitlab.freedesktop.org/drm/msm into drm-next

+ sc7180 display + DSI support
+ a618 (sc7180) support
+ more UBWC (bandwidth compression) support
+ various cleanups to handle devices that use vs don't
  use zap fw, etc
+ usual random cleanups and fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGvv03ifuP0tp7-dmqZtr1iS=s8Vc=az8BNGtEoSMD-dkw@mail.gmail.com
parents 3d474313 5f9935f5
......@@ -8,7 +8,7 @@ The DPU display controller is found in SDM845 SoC.
MDSS:
Required properties:
- compatible: "qcom,sdm845-mdss"
- compatible: "qcom,sdm845-mdss", "qcom,sc7180-mdss"
- reg: physical base address and length of contoller's registers.
- reg-names: register region names. The following region is required:
* "mdss"
......@@ -41,7 +41,7 @@ Optional properties:
MDP:
Required properties:
- compatible: "qcom,sdm845-dpu"
- compatible: "qcom,sdm845-dpu", "qcom,sc7180-dpu"
- reg: physical base address and length of controller's registers.
- reg-names : register region names. The following region is required:
* "mdp"
......
......@@ -23,13 +23,18 @@ Required properties:
- iommus: optional phandle to an adreno iommu instance
- operating-points-v2: optional phandle to the OPP operating points
- interconnects: optional phandle to an interconnect provider. See
../interconnect/interconnect.txt for details.
../interconnect/interconnect.txt for details. Some A3xx and all A4xx platforms
will have two paths; all others will have one path.
- interconnect-names: The names of the interconnect paths that correspond to the
interconnects property. Values must be gfx-mem and ocmem.
- qcom,gmu: For GMU attached devices a phandle to the GMU device that will
control the power for the GPU. Applicable targets:
- qcom,adreno-630.2
- zap-shader: For a5xx and a6xx devices this node contains a memory-region that
points to reserved memory to store the zap shader that can be used to help
bring the GPU out of secure mode.
- firmware-name: optional property of the 'zap-shader' node, listing the
relative path of the device specific zap firmware.
Example 3xx/4xx/a5xx:
......@@ -76,11 +81,13 @@ Example a6xx (with GMU):
operating-points-v2 = <&gpu_opp_table>;
interconnects = <&rsc_hlos MASTER_GFX3D &rsc_hlos SLAVE_EBI1>;
interconnect-names = "gfx-mem";
qcom,gmu = <&gmu>;
zap-shader {
memory-region = <&zap_shader_region>;
firmware-name = "qcom/LENOVO/81JL/qcdxkmsuc850.mbn"
};
};
};
......@@ -506,6 +506,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
}
/*
* Set the ICC path to maximum speed for now by multiplying the fastest
* frequency by the bus width (8). We'll want to scale this later on to
* improve battery life.
*/
icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
return gpu;
fail:
......
......@@ -591,6 +591,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
goto fail;
}
/*
* Set the ICC path to maximum speed for now by multiplying the fastest
* frequency by the bus width (8). We'll want to scale this later on to
* improve battery life.
*/
icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
return gpu;
fail:
......
......@@ -753,11 +753,18 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu->funcs->flush(gpu, gpu->rb[0]);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
} else {
/* Print a warning so if we die, we know why */
} else if (ret == -ENODEV) {
/*
* This device does not use zap shader (but print a warning
* just in case someone got their dt wrong.. hopefully they
* have a debug UART to realize the error of their ways...
* if you mess this up you are about to crash horribly)
*/
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
} else {
return ret;
}
/* Last step - yield the ringbuffer */
......
......@@ -16,11 +16,11 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/smasetty/playarea/envytools/rnndb/adreno/a6xx.xml ( 161969 bytes, from 2019-11-29 07:18:16)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
Copyright (C) 2013-2019 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
......@@ -2519,6 +2519,54 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02
#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03
#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04
#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05
#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06
#define REG_A6XX_GBIF_HALT 0x00003c45
#define REG_A6XX_GBIF_HALT_ACK 0x00003c46
#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0
#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2
#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3
#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4
#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5
#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6
#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7
#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8
#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9
#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca
#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb
#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc
#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd
#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce
#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf
#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0
#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1
#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
#define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000
#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/interconnect.h>
......@@ -149,6 +149,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
if (freq == gmu->gpu_freqs[perf_index])
break;
gmu->current_perf_index = perf_index;
__a6xx_gmu_set_freq(gmu, perf_index);
}
......@@ -433,6 +435,8 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
......@@ -480,20 +484,34 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
if (adreno_is_a618(adreno_gpu))
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
else
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
if (adreno_is_a618(adreno_gpu))
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
else
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
if (adreno_is_a618(adreno_gpu))
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
else
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
......@@ -741,8 +759,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
enable_irq(gmu->hfi_irq);
/* Set the GPU to the highest power frequency */
__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
/* Set the GPU to the current freq */
__a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
/*
* "enable" the GX power domain which won't actually do anything but it
......@@ -1166,6 +1184,8 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
/* Build the list of RPMh votes that we'll send to the GMU */
return a6xx_gmu_rpmh_votes_init(gmu);
}
......
......@@ -63,6 +63,9 @@ struct a6xx_gmu {
struct clk_bulk_data *clocks;
struct clk *core_clk;
/* current performance index set externally */
int current_perf_index;
int nr_gpu_freqs;
unsigned long gpu_freqs[16];
u32 gx_arc_votes[16];
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include "msm_gem.h"
......@@ -378,6 +378,18 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
/*
* During a previous slumber, GBIF halt is asserted to ensure
* no further transaction can go through GPU before GPU
* headswitch is turned off.
*
* This halt is deasserted once headswitch goes off but
* incase headswitch doesn't goes off clear GBIF halt
* here to ensure GPU wake-up doesn't fail because of
* halted GPU transactions.
*/
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
......@@ -406,12 +418,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
/* enable hardware clockgating */
a6xx_set_hwcg(gpu, true);
/*
* enable hardware clockgating
* For now enable clock gating only for a630
*/
if (adreno_is_a630(adreno_gpu))
a6xx_set_hwcg(gpu, true);
/* VBIF start */
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* VBIF/GBIF start*/
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
if (adreno_is_a630(adreno_gpu))
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
......@@ -537,12 +554,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_flush(gpu, gpu->rb[0]);
if (!a6xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
} else {
/* Print a warning so if we die, we know why */
} else if (ret == -ENODEV) {
/*
* This device does not use zap shader (but print a warning
* just in case someone got their dt wrong.. hopefully they
* have a debug UART to realize the error of their ways...
* if you mess this up you are about to crash horribly)
*/
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
ret = 0;
} else {
return ret;
}
out:
......@@ -724,6 +748,39 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
#define GBIF_CLIENT_HALT_MASK BIT(0)
#define GBIF_ARB_HALT_MASK BIT(1)
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;
if(!a6xx_has_gbif(adreno_gpu)){
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
0xf) == 0xf);
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
return;
}
/* Halt new client requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
/* Halt all AXI requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
/*
* GMU needs DDR access in slumber path. Deassert GBIF halt now
* to allow for GMU to access system memory.
*/
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
}
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
......@@ -748,6 +805,16 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
/*
* Make sure the GMU is idle before continuing (because some transitions
* may use VBIF
*/
a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
/* Clear the VBIF pipe before shutting down */
/* FIXME: This accesses the GPU - do we need to make sure it is on? */
a6xx_bus_clear_pending_transactions(adreno_gpu);
return a6xx_gmu_stop(a6xx_gpu);
}
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
#ifndef __A6XX_GPU_H__
#define __A6XX_GPU_H__
......@@ -42,6 +42,13 @@ struct a6xx_gpu {
#define A6XX_PROTECT_RDONLY(_reg, _len) \
((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
{
if(adreno_is_a630(gpu))
return false;
return true;
}
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
#include <linux/ascii85.h>
#include "msm_gem.h"
......@@ -320,6 +320,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
{
struct resource *res;
void __iomem *cxdbg = NULL;
int nr_debugbus_blocks;
/* Set up the GX debug bus */
......@@ -374,9 +375,11 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
}
a6xx_state->debugbus = state_kcalloc(a6xx_state,
ARRAY_SIZE(a6xx_debugbus_blocks),
sizeof(*a6xx_state->debugbus));
nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
(a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
sizeof(*a6xx_state->debugbus));
if (a6xx_state->debugbus) {
int i;
......@@ -388,15 +391,31 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
&a6xx_state->debugbus[i]);
a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
/*
* GBIF has same debugbus as of other GPU blocks, fall back to
* default path if GPU uses GBIF, also GBIF uses exactly same
* ID as of VBIF.
*/
if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
a6xx_get_debugbus_block(gpu, a6xx_state,
&a6xx_gbif_debugbus_block,
&a6xx_state->debugbus[i]);
a6xx_state->nr_debugbus += 1;
}
}
a6xx_state->vbif_debugbus =
state_kcalloc(a6xx_state, 1,
sizeof(*a6xx_state->vbif_debugbus));
/* Dump the VBIF debugbus on applicable targets */
if (!a6xx_has_gbif(to_adreno_gpu(gpu))) {
a6xx_state->vbif_debugbus =
state_kcalloc(a6xx_state, 1,
sizeof(*a6xx_state->vbif_debugbus));
if (a6xx_state->vbif_debugbus)
a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
a6xx_state->vbif_debugbus);
if (a6xx_state->vbif_debugbus)
a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
a6xx_state->vbif_debugbus);
}
if (cxdbg) {
a6xx_state->cx_debugbus =
......@@ -770,14 +789,16 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
&a6xx_state->gmu_registers[1]);
}
#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
ARRAY_SIZE(a6xx_reglist) +
ARRAY_SIZE(a6xx_hlsq_reglist);
ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
int index = 0;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
a6xx_state->registers = state_kcalloc(a6xx_state,
count, sizeof(*a6xx_state->registers));
......@@ -792,6 +813,15 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
a6xx_state, &a6xx_ahb_reglist[i],
&a6xx_state->registers[index++]);
if (a6xx_has_gbif(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_gbif_reglist,
&a6xx_state->registers[index++]);
else
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_vbif_reglist,
&a6xx_state->registers[index++]);
for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
a6xx_get_crashdumper_registers(gpu,
a6xx_state, &a6xx_reglist[i],
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
#ifndef _A6XX_CRASH_DUMP_H_
#define _A6XX_CRASH_DUMP_H_
......@@ -307,11 +307,20 @@ static const u32 a6xx_vbif_registers[] = {
0x3410, 0x3410, 0x3800, 0x3801,
};
static const u32 a6xx_gbif_registers[] = {
0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
};
static const struct a6xx_registers a6xx_ahb_reglist[] = {
REGS(a6xx_ahb_registers, 0, 0),
REGS(a6xx_vbif_registers, 0, 0),
};
static const struct a6xx_registers a6xx_vbif_reglist =
REGS(a6xx_vbif_registers, 0, 0);
static const struct a6xx_registers a6xx_gbif_reglist =
REGS(a6xx_gbif_registers, 0, 0);
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
......@@ -422,6 +431,9 @@ static const struct a6xx_debugbus_block {
DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
};
static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block =
DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100);
static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
......
......@@ -166,6 +166,17 @@ static const struct adreno_info gpulist[] = {
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a540_zap.mdt",
}, {
.rev = ADRENO_REV(6, 1, 8, ANY_ID),
.revn = 618,
.name = "A618",
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
}, {
.rev = ADRENO_REV(6, 3, 0, ANY_ID),
.revn = 630,
......
......@@ -26,6 +26,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
{
struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
const char *signed_fwname = NULL;
struct device_node *np, *mem_np;
struct resource r;
phys_addr_t mem_phys;
......@@ -58,8 +59,43 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
mem_phys = r.start;
/* Request the MDT file for the firmware */
fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
/*
* Check for a firmware-name property. This is the new scheme
* to handle firmware that may be signed with device specific
* keys, allowing us to have a different zap fw path for different
* devices.
*
* If the firmware-name property is found, we bypass the
* adreno_request_fw() mechanism, because we don't need to handle
* the /lib/firmware/qcom/... vs /lib/firmware/... case.
*
* If the firmware-name property is not found, for backwards
* compatibility we fall back to the fwname from the gpulist
* table.
*/
of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
if (signed_fwname) {
fwname = signed_fwname;
ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
if (ret)
fw = ERR_PTR(ret);
} else if (fwname) {
/* Request the MDT file from the default location: */
fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
} else {
/*
* For new targets, we require the firmware-name property,
* if a zap-shader is required, rather than falling back
* to a firmware name specified in gpulist.
*
* Because the firmware is signed with a (potentially)
* device specific key, having the name come from gpulist
* was a bad idea, and is only provided for backwards
* compatibility for older targets.
*/
return -ENODEV;
}
if (IS_ERR(fw)) {
DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
return PTR_ERR(fw);
......@@ -95,7 +131,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
* not. But since we've already gotten through adreno_request_fw()
* we know which of the two cases it is:
*/
if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
ret = qcom_mdt_load(dev, fw, fwname, pasid,
mem_region, mem_phys, mem_size, NULL);
} else {
......@@ -146,14 +182,6 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return -EPROBE_DEFER;
}
/* Each GPU has a target specific zap shader firmware name to use */
if (!adreno_gpu->info->zapfw) {
zap_available = false;
DRM_DEV_ERROR(&pdev->dev,
"Zap shader firmware file not specified for this target\n");
return -ENODEV;
}
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
......@@ -826,7 +854,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
......@@ -887,10 +915,21 @@ static int adreno_get_pwrlevels(struct device *dev,
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
/* Check for an interconnect path for the bus */
gpu->icc_path = of_icc_get(dev, NULL);
gpu->icc_path = of_icc_get(dev, "gfx-mem");
if (!gpu->icc_path) {
/*
* Keep compatbility with device trees that don't have an
* interconnect-names property.
*/
gpu->icc_path = of_icc_get(dev, NULL);
}
if (IS_ERR(gpu->icc_path))
gpu->icc_path = NULL;
gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
if (IS_ERR(gpu->ocmem_icc_path))
gpu->ocmem_icc_path = NULL;
return 0;
}
......@@ -977,6 +1016,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
release_firmware(adreno_gpu->fw[i]);
icc_put(gpu->icc_path);
icc_put(gpu->ocmem_icc_path);
msm_gpu_cleanup(&adreno_gpu->base);
}
......@@ -3,7 +3,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
* Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
*/
#ifndef __ADRENO_GPU_H__
......@@ -227,6 +227,16 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;
}
static inline int adreno_is_a630(struct adreno_gpu *gpu)
{
return gpu->revn == 630;
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
......@@ -330,10 +340,7 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
static inline bool adreno_reg_check(struct adreno_gpu *gpu,
enum adreno_regs offset_name)
{
if (offset_name >= REG_ADRENO_REGISTER_MAX ||
!gpu->reg_offsets[offset_name]) {
BUG();
}
BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
/*
* REG_SKIP is a special value that tell us that the register in
......
......@@ -197,10 +197,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
DPU_DEBUG("%s\n", dpu_crtc->name);
for (i = 0; i < cstate->num_mixers; i++) {
if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
DPU_ERROR("invalid lm or ctl assigned to mixer\n");
return;
}
mixer[i].mixer_op_mode = 0;
mixer[i].flush_mask = 0;
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
......@@ -1113,14 +1109,9 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
for (i = 0; i < cstate->num_mixers; ++i) {
m = &cstate->mixers[i];
if (!m->hw_lm)
seq_printf(s, "\tmixer[%d] has no lm\n", i);
else if (!m->lm_ctl)
seq_printf(s, "\tmixer[%d] has no ctl\n", i);
else
seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
out_width, mode->vdisplay);
seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
out_width, mode->vdisplay);
}
seq_puts(s, "\n");
......
This diff is collapsed.
......@@ -45,8 +45,7 @@ static bool dpu_encoder_phys_cmd_mode_fixup(
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
if (phys_enc)
DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
return true;
}
......@@ -58,11 +57,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
struct dpu_hw_ctl *ctl;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
if (!phys_enc)
return;
ctl = phys_enc->hw_ctl;
if (!ctl || !ctl->ops.setup_intf_cfg)
if (!ctl->ops.setup_intf_cfg)
return;
intf_cfg.intf = phys_enc->intf_idx;
......@@ -79,7 +75,7 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
int new_cnt;
u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
if (!phys_enc || !phys_enc->hw_pp)
if (!phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("pp_done_irq");
......@@ -106,7 +102,7 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc || !phys_enc->hw_pp)
if (!phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("rd_ptr_irq");
......@@ -125,9 +121,6 @@ static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
if (!phys_enc || !phys_enc->hw_ctl)
return;
DPU_ATRACE_BEGIN("ctl_start_irq");
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
......@@ -141,9 +134,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
if (!phys_enc)
return;
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
......@@ -179,7 +169,7 @@ static void dpu_encoder_phys_cmd_mode_set(
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc || !mode || !adj_mode) {
if (!mode || !adj_mode) {
DPU_ERROR("invalid args\n");
return;
}
......@@ -198,7 +188,7 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
bool do_log = false;
if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
if (!phys_enc->hw_pp)
return -EINVAL;
cmd_enc->pp_timeout_report_cnt++;
......@@ -247,11 +237,6 @@ static int _dpu_encoder_phys_cmd_wait_for_idle(
struct dpu_encoder_wait_info wait_info;
int ret;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
......@@ -273,7 +258,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
int ret = 0;
int refcount;
if (!phys_enc || !phys_enc->hw_pp) {
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
......@@ -314,9 +299,6 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
if (!phys_enc)
return;
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable, atomic_read(&phys_enc->vblank_refcount));
......@@ -351,7 +333,7 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
u32 vsync_hz;
struct dpu_kms *dpu_kms;
if (!phys_enc || !phys_enc->hw_pp) {
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
......@@ -428,8 +410,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
|| !phys_enc->hw_ctl->ops.setup_intf_cfg) {
if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
return;
}
......@@ -458,7 +439,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
......@@ -480,7 +461,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc || !phys_enc->hw_pp) {
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid phys encoder\n");
return;
}
......@@ -499,8 +480,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
static void _dpu_encoder_phys_cmd_connect_te(
struct dpu_encoder_phys *phys_enc, bool enable)
{
if (!phys_enc || !phys_enc->hw_pp ||
!phys_enc->hw_pp->ops.connect_external_te)
if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
return;
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
......@@ -518,7 +498,7 @@ static int dpu_encoder_phys_cmd_get_line_count(
{
struct dpu_hw_pingpong *hw_pp;
if (!phys_enc || !phys_enc->hw_pp)
if (!phys_enc->hw_pp)
return -EINVAL;
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
......@@ -536,7 +516,7 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc || !phys_enc->hw_pp) {
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
......@@ -559,10 +539,6 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
kfree(cmd_enc);
}
......@@ -580,7 +556,7 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
to_dpu_encoder_phys_cmd(phys_enc);
int ret;
if (!phys_enc || !phys_enc->hw_pp) {
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
......@@ -614,11 +590,6 @@ static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_wait_info wait_info;
int ret;
if (!phys_enc || !phys_enc->hw_ctl) {
DPU_ERROR("invalid argument(s)\n");
return -EINVAL;
}
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
......@@ -639,9 +610,6 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
{
int rc;
if (!phys_enc)
return -EINVAL;
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (rc) {
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
......@@ -658,9 +626,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc)
return -EINVAL;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
......@@ -681,9 +646,6 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
struct dpu_encoder_phys_cmd *cmd_enc;
struct dpu_encoder_wait_info wait_info;
if (!phys_enc)
return -EINVAL;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
......@@ -715,9 +677,6 @@ static void dpu_encoder_phys_cmd_handle_post_kickoff(
static void dpu_encoder_phys_cmd_trigger_start(
struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc)
return;
dpu_encoder_helper_trigger_start(phys_enc);
}
......@@ -816,6 +775,4 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
DPU_DEBUG_CMDENC(cmd_enc, "created\n");
return phys_enc;
return ERR_PTR(ret);
}
......@@ -220,8 +220,7 @@ static bool dpu_encoder_phys_vid_mode_fixup(
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
if (phys_enc)
DPU_DEBUG_VIDENC(phys_enc, "\n");
DPU_DEBUG_VIDENC(phys_enc, "\n");
/*
* Modifying mode has consequences when the mode comes back to us
......@@ -239,7 +238,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
unsigned long lock_flags;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
return;
}
......@@ -280,6 +279,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
if (phys_enc->hw_intf->ops.bind_pingpong_blk)
phys_enc->hw_intf->ops.bind_pingpong_blk(
phys_enc->hw_intf,
true,
phys_enc->hw_pp->idx);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
......@@ -293,12 +300,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
u32 flush_register = 0;
int new_cnt = -1, old_cnt = -1;
if (!phys_enc)
return;
hw_ctl = phys_enc->hw_ctl;
if (!hw_ctl)
return;
DPU_ATRACE_BEGIN("vblank_irq");
......@@ -314,7 +316,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
* so we need to double-check with hw that it accepted the flush bits
*/
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
if (hw_ctl && hw_ctl->ops.get_flush_register)
if (hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
......@@ -335,9 +337,6 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
if (!phys_enc)
return;
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
......@@ -374,11 +373,6 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
if (!phys_enc) {
DPU_ERROR("invalid encoder/kms\n");
return;
}
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
......@@ -395,11 +389,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
int ret = 0;
int refcount;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
refcount = atomic_read(&phys_enc->vblank_refcount);
/* Slave encoders don't report vblank */
......@@ -435,6 +424,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
u32 intf_flush_mask = 0;
ctl = phys_enc->hw_ctl;
......@@ -459,10 +449,18 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
if (ctl->ops.get_bitmask_active_intf)
ctl->ops.get_bitmask_active_intf(ctl, &intf_flush_mask,
phys_enc->hw_intf->idx);
if (ctl->ops.update_pending_intf_flush)
ctl->ops.update_pending_intf_flush(ctl, intf_flush_mask);
skip_flush:
DPU_DEBUG_VIDENC(phys_enc,
"update pending flush ctl %d flush_mask %x\n",
ctl->idx - CTL_0, flush_mask);
"update pending flush ctl %d flush_mask 0%x intf_mask 0x%x\n",
ctl->idx - CTL_0, flush_mask, intf_flush_mask);
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
......@@ -471,11 +469,6 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
DPU_DEBUG_VIDENC(phys_enc, "\n");
kfree(phys_enc);
}
......@@ -493,11 +486,6 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
struct dpu_encoder_wait_info wait_info;
int ret;
if (!phys_enc) {
pr_err("invalid encoder\n");
return -EINVAL;
}
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
......@@ -543,13 +531,8 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_hw_ctl *ctl;
int rc;
if (!phys_enc) {
DPU_ERROR("invalid encoder/parameters\n");
return;
}
ctl = phys_enc->hw_ctl;
if (!ctl || !ctl->ops.wait_reset_status)
if (!ctl->ops.wait_reset_status)
return;
/*
......@@ -569,12 +552,12 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
unsigned long lock_flags;
int ret;
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) {
if (!phys_enc->parent || !phys_enc->parent->dev) {
DPU_ERROR("invalid encoder/device\n");
return;
}
if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
......@@ -639,9 +622,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
{
int ret;
if (!phys_enc)
return;
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0,
enable,
......@@ -662,9 +642,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc)
return -EINVAL;
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
......
......@@ -489,12 +489,28 @@ static const struct dpu_format dpu_format_map_ubwc[] = {
true, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
/* ARGB8888 and ABGR8888 purposely have the same color
* ordering. The hardware only supports ABGR8888 UBWC
* natively.
*/
INTERLEAVED_RGB_FMT_TILED(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
INTERLEAVED_RGB_FMT_TILED(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
INTERLEAVED_RGB_FMT_TILED(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
......@@ -550,7 +566,9 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
{
static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
......
......@@ -38,6 +38,7 @@
#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
......@@ -45,6 +46,7 @@
#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
#define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620)
#define DPU_HW_BLK_NAME_LEN 16
......@@ -92,6 +94,7 @@ enum {
* @DPU_SSPP_SRC Src and fetch part of the pipes,
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
* @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
......@@ -110,6 +113,7 @@ enum {
DPU_SSPP_SRC = 0x1,
DPU_SSPP_SCALER_QSEED2,
DPU_SSPP_SCALER_QSEED3,
DPU_SSPP_SCALER_QSEED4,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
DPU_SSPP_CSC_10BIT,
......@@ -166,6 +170,7 @@ enum {
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
DPU_CTL_ACTIVE_CFG,
DPU_CTL_MAX
};
......@@ -269,7 +274,7 @@ struct dpu_qos_lut_entry {
*/
struct dpu_qos_lut_tbl {
u32 nentry;
struct dpu_qos_lut_entry *entries;
const struct dpu_qos_lut_entry *entries;
};
/**
......@@ -283,6 +288,7 @@ struct dpu_qos_lut_tbl {
* @has_src_split source split feature status
* @has_dim_layer dim layer feature status
* @has_idle_pc indicate if idle power collapse feature is supported
* @has_3d_merge indicate if 3D merge is supported
*/
struct dpu_caps {
u32 max_mixer_width;
......@@ -293,6 +299,7 @@ struct dpu_caps {
bool has_src_split;
bool has_dim_layer;
bool has_idle_pc;
bool has_3d_merge;
};
/**
......@@ -320,6 +327,7 @@ struct dpu_sspp_blks_common {
* @maxupscale: maxupscale ratio supported
* @smart_dma_priority: hw priority of rect1 of multirect pipe
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
* @qseed_ver: qseed version
* @src_blk:
* @scaler_blk:
* @csc_blk:
......@@ -340,6 +348,7 @@ struct dpu_sspp_sub_blks {
u32 maxupscale;
u32 smart_dma_priority;
u32 max_per_pipe_bw;
u32 qseed_ver;
struct dpu_src_blk src_blk;
struct dpu_scaler_blk scaler_blk;
struct dpu_pp_blk csc_blk;
......@@ -511,7 +520,7 @@ struct dpu_vbif_dynamic_ot_cfg {
*/
struct dpu_vbif_dynamic_ot_tbl {
u32 count;
struct dpu_vbif_dynamic_ot_cfg *cfg;
const struct dpu_vbif_dynamic_ot_cfg *cfg;
};
/**
......@@ -521,7 +530,7 @@ struct dpu_vbif_dynamic_ot_tbl {
*/
struct dpu_vbif_qos_tbl {
u32 npriority_lvl;
u32 *priority_lvl;
const u32 *priority_lvl;
};
/**
......@@ -646,6 +655,7 @@ struct dpu_perf_cfg {
* @dma_formats Supported formats for dma pipe
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
* @mdss_irqs: Bitmap with the irqs supported by the target
*/
struct dpu_mdss_cfg {
u32 hwversion;
......@@ -653,25 +663,25 @@ struct dpu_mdss_cfg {
const struct dpu_caps *caps;
u32 mdp_count;
struct dpu_mdp_cfg *mdp;
const struct dpu_mdp_cfg *mdp;
u32 ctl_count;
struct dpu_ctl_cfg *ctl;
const struct dpu_ctl_cfg *ctl;
u32 sspp_count;
struct dpu_sspp_cfg *sspp;
const struct dpu_sspp_cfg *sspp;
u32 mixer_count;
struct dpu_lm_cfg *mixer;
const struct dpu_lm_cfg *mixer;
u32 pingpong_count;
struct dpu_pingpong_cfg *pingpong;
const struct dpu_pingpong_cfg *pingpong;
u32 intf_count;
struct dpu_intf_cfg *intf;
const struct dpu_intf_cfg *intf;
u32 vbif_count;
struct dpu_vbif_cfg *vbif;
const struct dpu_vbif_cfg *vbif;
u32 reg_dma_count;
struct dpu_reg_dma_cfg dma_cfg;
......@@ -681,9 +691,11 @@ struct dpu_mdss_cfg {
/* Add additional block data structures here */
struct dpu_perf_cfg perf;
struct dpu_format_extended *dma_formats;
struct dpu_format_extended *cursor_formats;
struct dpu_format_extended *vig_formats;
const struct dpu_format_extended *dma_formats;
const struct dpu_format_extended *cursor_formats;
const struct dpu_format_extended *vig_formats;
unsigned long mdss_irqs;
};
struct dpu_mdss_hw_cfg_handler {
......
......@@ -6,8 +6,12 @@
static const uint32_t qcom_compressed_supported_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
};
static const uint32_t plane_formats[] = {
......
......@@ -22,14 +22,18 @@
#define CTL_PREPARE 0x0d0
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_INTF_FLUSH 0x110
#define CTL_INTF_MASTER 0x134
#define CTL_MIXER_BORDER_OUT BIT(24)
#define CTL_FLUSH_MASK_CTL BIT(17)
#define DPU_REG_RESET_TIMEOUT_US 2000
#define INTF_IDX 31
static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
struct dpu_mdss_cfg *m,
static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
......@@ -100,11 +104,27 @@ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= flushbits;
}
static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx,
u32 flushbits)
{
ctx->pending_intf_flush_mask |= flushbits;
}
static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
{
return ctx->pending_flush_mask;
}
static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
{
if (ctx->pending_flush_mask & BIT(INTF_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
ctx->pending_intf_flush_mask);
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
......@@ -222,6 +242,36 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
switch (intf) {
case INTF_0:
case INTF_1:
*flushbits |= BIT(31);
break;
default:
return 0;
}
return 0;
}
static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
switch (intf) {
case INTF_0:
*flushbits |= BIT(0);
break;
case INTF_1:
*flushbits |= BIT(1);
break;
default:
return 0;
}
return 0;
}
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
......@@ -422,6 +472,24 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
}
static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 mode_sel = 0;
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
mode_sel |= BIT(17);
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active |= BIT(cfg->intf - INTF_0);
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
......@@ -455,31 +523,41 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
unsigned long cap)
{
if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1;
ops->get_bitmask_active_intf =
dpu_hw_ctl_active_get_bitmask_intf;
ops->update_pending_intf_flush =
dpu_hw_ctl_update_pending_intf_flush;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->get_flush_register = dpu_hw_ctl_get_flush_register;
ops->trigger_start = dpu_hw_ctl_trigger_start;
ops->trigger_pending = dpu_hw_ctl_trigger_pending;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->reset = dpu_hw_ctl_reset_control;
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
struct dpu_mdss_cfg *m)
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_ctl *c;
struct dpu_ctl_cfg *cfg;
const struct dpu_ctl_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
......
......@@ -90,6 +90,15 @@ struct dpu_hw_ctl_ops {
void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
u32 flushbits);
/**
* OR in the given flushbits to the cached pending_intf_flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
* @flushbits : module flushmask
*/
void (*update_pending_intf_flush)(struct dpu_hw_ctl *ctx,
u32 flushbits);
/**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
......@@ -130,10 +139,23 @@ struct dpu_hw_ctl_ops {
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
/**
* Query the value of the intf flush mask
* No effect on hardware
* @ctx : ctl path ctx pointer
*/
int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
u32 *flushbits,
enum dpu_intf blk);
/**
* Query the value of the intf active flush mask
* No effect on hardware
* @ctx : ctl path ctx pointer
*/
int (*get_bitmask_active_intf)(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf blk);
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
......@@ -159,6 +181,7 @@ struct dpu_hw_ctl_ops {
* @mixer_count: number of mixers
* @mixer_hw_caps: mixer hardware capabilities
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @pending_intf_flush_mask: pending INTF flush
* @ops: operation list
*/
struct dpu_hw_ctl {
......@@ -171,6 +194,7 @@ struct dpu_hw_ctl {
int mixer_count;
const struct dpu_lm_cfg *mixer_hw_caps;
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
/* ops */
struct dpu_hw_ctl_ops ops;
......@@ -195,7 +219,7 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
*/
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
struct dpu_mdss_cfg *m);
const struct dpu_mdss_cfg *m);
/**
* dpu_hw_ctl_destroy(): Destroys ctl driver context
......
......@@ -800,8 +800,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
start_idx = reg_idx * 32;
end_idx = start_idx + 32;
if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
end_idx > ARRAY_SIZE(dpu_irq_map))
if (!test_bit(reg_idx, &intr->irq_mask) ||
start_idx >= ARRAY_SIZE(dpu_irq_map))
continue;
/*
......@@ -955,8 +955,11 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
if (!intr)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
if (test_bit(i, &intr->irq_mask))
DPU_REG_WRITE(&intr->hw,
dpu_intr_set[i].clr_off, 0xffffffff);
}
/* ensure register writes go through */
wmb();
......@@ -971,8 +974,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
if (!intr)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
if (test_bit(i, &intr->irq_mask))
DPU_REG_WRITE(&intr->hw,
dpu_intr_set[i].en_off, 0x00000000);
}
/* ensure register writes go through */
wmb();
......@@ -991,6 +997,9 @@ static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
if (!test_bit(i, &intr->irq_mask))
continue;
/* Read interrupt status */
intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
dpu_intr_set[i].status_off);
......@@ -1115,6 +1124,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
return ERR_PTR(-ENOMEM);
}
intr->irq_mask = m->mdss_irqs;
spin_lock_init(&intr->irq_lock);
return intr;
......
......@@ -187,6 +187,7 @@ struct dpu_hw_intr {
u32 *save_irq_status;
u32 irq_idx_tbl_size;
spinlock_t irq_lock;
unsigned long irq_mask;
};
/**
......
......@@ -56,8 +56,10 @@
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
struct dpu_mdss_cfg *m,
#define INTF_MUX 0x25C
static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
......@@ -218,6 +220,30 @@ static void dpu_hw_intf_setup_prg_fetch(
DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
}
static void dpu_hw_intf_bind_pingpong_blk(
struct dpu_hw_intf *intf,
bool enable,
const enum dpu_pingpong pp)
{
struct dpu_hw_blk_reg_map *c;
u32 mux_cfg;
if (!intf)
return;
c = &intf->hw;
mux_cfg = DPU_REG_READ(c, INTF_MUX);
mux_cfg &= ~0xf;
if (enable)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
DPU_REG_WRITE(c, INTF_MUX, mux_cfg);
}
static void dpu_hw_intf_get_status(
struct dpu_hw_intf *intf,
struct intf_status *s)
......@@ -254,16 +280,18 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
ops->get_line_count = dpu_hw_intf_get_line_count;
if (cap & BIT(DPU_CTL_ACTIVE_CFG))
ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
}
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
struct dpu_mdss_cfg *m)
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intf *c;
struct dpu_intf_cfg *cfg;
const struct dpu_intf_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
......
......@@ -52,6 +52,8 @@ struct intf_status {
* @ enable_timing: enable/disable timing engine
* @ get_status: returns if timing engine is enabled or not
* @ get_line_count: reads current vertical line counter
* @bind_pingpong_blk: enable/disable the connection with pingpong which will
* feed pixels to this interface
*/
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
......@@ -68,6 +70,10 @@ struct dpu_hw_intf_ops {
struct intf_status *status);
u32 (*get_line_count)(struct dpu_hw_intf *intf);
void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
bool enable,
const enum dpu_pingpong pp);
};
struct dpu_hw_intf {
......@@ -92,7 +98,7 @@ struct dpu_hw_intf {
*/
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
struct dpu_mdss_cfg *m);
const struct dpu_mdss_cfg *m);
/**
* dpu_hw_intf_destroy(): Destroys INTF driver context
......
......@@ -24,8 +24,8 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
struct dpu_mdss_cfg *m,
static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
......@@ -147,12 +147,13 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
{
ops->setup_mixer_out = dpu_hw_lm_setup_out;
if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion)
|| IS_SC7180_TARGET(m->hwversion))
ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
else
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
......@@ -164,10 +165,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
struct dpu_mdss_cfg *m)
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mixer *c;
struct dpu_lm_cfg *cfg;
const struct dpu_lm_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
......
......@@ -91,7 +91,7 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
*/
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
struct dpu_mdss_cfg *m);
const struct dpu_mdss_cfg *m);
/**
* dpu_hw_lm_destroy(): Destroys layer mixer driver context
......
......@@ -28,8 +28,8 @@
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
struct dpu_mdss_cfg *m,
static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
......@@ -195,10 +195,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
struct dpu_mdss_cfg *m)
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_pingpong *c;
struct dpu_pingpong_cfg *cfg;
const struct dpu_pingpong_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
......
......@@ -106,7 +106,7 @@ struct dpu_hw_pingpong {
*/
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
struct dpu_mdss_cfg *m);
const struct dpu_mdss_cfg *m);
/**
* dpu_hw_pingpong_destroy - destroys pingpong driver context
......
......@@ -132,6 +132,7 @@
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
......@@ -657,7 +658,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
}
......@@ -666,7 +668,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
}
static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
void __iomem *addr,
struct dpu_mdss_cfg *catalog,
struct dpu_hw_blk_reg_map *b)
......@@ -696,7 +698,7 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
bool is_virtual_pipe)
{
struct dpu_hw_pipe *hw_pipe;
struct dpu_sspp_cfg *cfg;
const struct dpu_sspp_cfg *cfg;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
......
......@@ -27,7 +27,8 @@ struct dpu_hw_pipe;
*/
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
(1UL << DPU_SSPP_SCALER_QSEED2) | \
(1UL << DPU_SSPP_SCALER_QSEED3))
(1UL << DPU_SSPP_SCALER_QSEED3) | \
(1UL << DPU_SSPP_SCALER_QSEED4))
/**
* Component indices
......@@ -373,7 +374,7 @@ struct dpu_hw_pipe {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
struct dpu_mdss_cfg *catalog;
struct dpu_mdp_cfg *mdp;
const struct dpu_mdp_cfg *mdp;
/* Pipe */
enum dpu_sspp idx;
......
......@@ -93,19 +93,12 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
DEV_DBG("%pS->%s: enable '%s'\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
if (clk_arry[i].clk) {
rc = clk_prepare_enable(clk_arry[i].clk);
if (rc)
DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
__builtin_return_address(0),
__func__,
clk_arry[i].clk_name, rc);
} else {
DEV_ERR("%pS->%s: '%s' is not available\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
rc = -EPERM;
}
rc = clk_prepare_enable(clk_arry[i].clk);
if (rc)
DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
__builtin_return_address(0),
__func__,
clk_arry[i].clk_name, rc);
if (rc && i) {
msm_dss_enable_clk(&clk_arry[i - 1],
......@@ -119,12 +112,7 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
if (clk_arry[i].clk)
clk_disable_unprepare(clk_arry[i].clk);
else
DEV_ERR("%pS->%s: '%s' is not available\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
clk_disable_unprepare(clk_arry[i].clk);
}
}
......@@ -187,6 +175,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
continue;
mp->clk_config[i].rate = rate;
mp->clk_config[i].type = DSS_CLK_PCLK;
mp->clk_config[i].max_rate = rate;
}
mp->num_clk = num_clk;
......
......@@ -1059,6 +1059,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sdm845-dpu", },
{ .compatible = "qcom,sc7180-dpu", },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
......
......@@ -53,8 +53,13 @@ enum {
R_MAX
};
/*
* Default Preload Values
*/
#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2
#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4
#define DEFAULT_REFRESH_RATE 60
......@@ -477,8 +482,16 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->src_width[i] /= chroma_subsmpl_h;
scale_cfg->src_height[i] /= chroma_subsmpl_v;
}
scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
if (pdpu->pipe_hw->cap->features &
BIT(DPU_SSPP_SCALER_QSEED4)) {
scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
} else {
scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
}
pstate->pixel_ext.num_ext_pxls_top[i] =
scale_cfg->src_height[i];
pstate->pixel_ext.num_ext_pxls_left[i] =
......@@ -738,7 +751,7 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
} else {
pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
};
}
DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
......@@ -858,7 +871,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
pdpu->pipe_sblk->maxupscale << 16,
true, true);
if (ret) {
DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
return ret;
}
if (!state->visible)
......@@ -884,13 +897,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
(!(pdpu->features & DPU_SSPP_SCALER) ||
!(pdpu->features & (BIT(DPU_SSPP_CSC)
| BIT(DPU_SSPP_CSC_10BIT))))) {
DPU_ERROR_PLANE(pdpu,
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
/* check src bounds */
} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
return -E2BIG;
......@@ -899,19 +912,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
(src.x1 & 0x1 || src.y1 & 0x1 ||
drm_rect_width(&src) & 0x1 ||
drm_rect_height(&src) & 0x1)) {
DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
return -EINVAL;
/* min dst support */
} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&dst));
return -EINVAL;
/* check decimated source width */
} else if (drm_rect_width(&src) > max_linewidth) {
DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&src), max_linewidth);
return -E2BIG;
}
......@@ -1337,7 +1350,8 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
pdpu->debugfs_root, &pdpu->debugfs_src);
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
sblk->scaler_blk.base + cfg->base,
sblk->scaler_blk.len,
......
......@@ -141,11 +141,11 @@ int dpu_rm_destroy(struct dpu_rm *rm)
static int _dpu_rm_hw_blk_create(
struct dpu_rm *rm,
struct dpu_mdss_cfg *cat,
const struct dpu_mdss_cfg *cat,
void __iomem *mmio,
enum dpu_hw_blk_type type,
uint32_t id,
void *hw_catalog_info)
const void *hw_catalog_info)
{
struct dpu_rm_hw_blk *blk;
void *hw;
......@@ -215,7 +215,7 @@ int dpu_rm_init(struct dpu_rm *rm,
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct dpu_lm_cfg *lm = &cat->mixer[i];
const struct dpu_lm_cfg *lm = &cat->mixer[i];
if (lm->pingpong == PINGPONG_MAX) {
DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
......
......@@ -299,7 +299,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
entry = debugfs_create_dir("vbif", debugfs_root);
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
......@@ -318,7 +318,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
(u32 *)&vbif->default_ot_wr_limit);
for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
struct dpu_vbif_dynamic_ot_cfg *cfg =
const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_rd_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
......@@ -332,7 +332,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
}
for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
struct dpu_vbif_dynamic_ot_cfg *cfg =
const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_wr_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
......
......@@ -121,7 +121,7 @@ static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
if (mdp4_dsi_encoder->enabled)
return;
mdp4_crtc_set_config(encoder->crtc,
mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN |
......
......@@ -902,7 +902,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
major, minor);
ret = -ENXIO;
goto fail;
};
}
/* only after mdp5_cfg global pointer's init can we access the hw */
for (i = 0; i < num_handlers; i++) {
......
......@@ -178,6 +178,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host);
int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
......
......@@ -153,6 +153,10 @@ static const char * const dsi_sdm845_bus_clk_names[] = {
"iface", "bus",
};
static const char * const dsi_sc7180_bus_clk_names[] = {
"iface", "bus",
};
static const struct msm_dsi_config sdm845_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
......@@ -167,7 +171,22 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
static const struct msm_dsi_config sc7180_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 1,
.regs = {
{"vdda", 21800, 4 }, /* 1.2 V */
},
},
.bus_clk_names = dsi_sc7180_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
.io_start = { 0xae94000 },
.num_dsi = 1,
};
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
.link_clk_disable = dsi_link_clk_disable_v2,
.clk_init_ver = dsi_clk_init_v2,
......@@ -179,6 +198,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
};
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = NULL,
......@@ -190,6 +210,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
};
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = dsi_clk_init_6g_v2,
......@@ -223,6 +244,9 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
&sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
......
......@@ -20,6 +20,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
......@@ -35,6 +36,7 @@ struct msm_dsi_config {
};
struct msm_dsi_host_cfg_ops {
int (*link_clk_set_rate)(struct msm_dsi_host *msm_host);
int (*link_clk_enable)(struct msm_dsi_host *msm_host);
void (*link_clk_disable)(struct msm_dsi_host *msm_host);
int (*clk_init_ver)(struct msm_dsi_host *msm_host);
......
......@@ -505,7 +505,7 @@ int msm_dsi_runtime_resume(struct device *dev)
return dsi_bus_clk_enable(msm_host);
}
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
int ret;
......@@ -515,13 +515,13 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
goto error;
return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
return ret;
}
if (msm_host->byte_intf_clk) {
......@@ -530,10 +530,18 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
if (ret) {
pr_err("%s: Failed to set rate byte intf clk, %d\n",
__func__, ret);
goto error;
return ret;
}
}
return 0;
}
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
......@@ -573,7 +581,7 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
return ret;
}
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
{
int ret;
......@@ -584,27 +592,34 @@ int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
goto error;
return ret;
}
ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
goto error;
return ret;
}
ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
goto error;
return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
return ret;
}
return 0;
}
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
{
int ret;
ret = clk_prepare_enable(msm_host->byte_clk);
if (ret) {
pr_err("%s: Failed to enable dsi byte clk\n", __func__);
......@@ -818,7 +833,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u32 data = 0;
u32 data = 0, lane_ctrl = 0;
if (!enable) {
dsi_write(msm_host, REG_DSI_CTRL, 0);
......@@ -906,9 +921,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
dsi_write(msm_host, REG_DSI_LANE_CTRL,
DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
}
data |= DSI_CTRL_ENABLE;
......@@ -1996,6 +2013,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
cfg_hnd->ops->link_clk_set_rate(msm_host);
cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
......@@ -2344,7 +2362,9 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
}
pm_runtime_get_sync(&msm_host->pdev->dev);
ret = cfg_hnd->ops->link_clk_enable(msm_host);
ret = cfg_hnd->ops->link_clk_set_rate(msm_host);
if (!ret)
ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
......
......@@ -432,20 +432,8 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
}
}
if (panel) {
ret = drm_panel_enable(panel);
if (ret) {
pr_err("%s: enable panel %d failed, %d\n", __func__, id,
ret);
goto panel_en_fail;
}
}
return;
panel_en_fail:
if (is_dual_dsi && msm_dsi1)
msm_dsi_host_disable(msm_dsi1->host);
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
......@@ -464,12 +452,51 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
{
DBG("");
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
bool is_dual_dsi = IS_DUAL_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
/* Do nothing with the host if it is slave-DSI in case of dual DSI */
if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
if (panel) {
ret = drm_panel_enable(panel);
if (ret) {
pr_err("%s: enable panel %d failed, %d\n", __func__, id,
ret);
}
}
}
static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
{
DBG("");
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
bool is_dual_dsi = IS_DUAL_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
/* Do nothing with the host if it is slave-DSI in case of dual DSI */
if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
if (panel) {
ret = drm_panel_disable(panel);
if (ret)
pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
ret);
}
}
static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
......@@ -495,13 +522,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
if (panel) {
ret = drm_panel_disable(panel);
if (ret)
pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
ret);
}
ret = msm_dsi_host_disable(host);
if (ret)
pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
......
......@@ -101,7 +101,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
gpiod_set_value_cansleep(gpio.gpiod, value);
}
};
}
DBG("gpio off");
}
......
......@@ -1192,7 +1192,8 @@ static int add_display_components(struct device *dev,
* the interfaces to our components list.
*/
if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") ||
of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to populate children devices\n");
......@@ -1317,6 +1318,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
{ .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
......
......@@ -111,8 +111,15 @@ struct msm_gpu {
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
/* The gfx-mem interconnect path that's used by all GPU types. */
struct icc_path *icc_path;
/*
* Second interconnect path for some A3xx and all A4xx GPUs to the
* On Chip MEMory (OCMEM).
*/
struct icc_path *ocmem_icc_path;
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment