Commit dc100bc8 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2020-07-30' of https://gitlab.freedesktop.org/drm/msm into drm-next

Take 2 of msm-next pull, this version drops the OPP patch due to [1],
so I'll send the gpu opp/bw scaling patch after the OPP patch lands.
Since I had to force-push I took the opportunity to rebase on
drm-next, and since you already merged in 5.8-rc6 a few fixes from the
last cycle dropped out.

This time around:

* A bunch more a650/a640 (sm8150/sm8250) display and GPU enablement
  and fixes
* Enable dpu dither block for 6bpc panels
* dpu suspend fixes
* dpu fix for cursor on 2nd display
* dsi/mdp5 enablement for sdm630/sdm636/sdm660

I also regenerated the register headers, which accounts for a good
bit of the size this time, because we hadn't re-synced the register
headers since the early days of a6xx bringup.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGs_eswoX-E0Ddg5DoEQy35x3GG+6SDXUAjPMrtAWFkqng@mail.gmail.com
parents 418eda8f 1041dee2
...@@ -87,6 +87,7 @@ Required properties: ...@@ -87,6 +87,7 @@ Required properties:
* "qcom,dsi-phy-20nm" * "qcom,dsi-phy-20nm"
* "qcom,dsi-phy-28nm-8960" * "qcom,dsi-phy-28nm-8960"
* "qcom,dsi-phy-14nm" * "qcom,dsi-phy-14nm"
* "qcom,dsi-phy-14nm-660"
* "qcom,dsi-phy-10nm" * "qcom,dsi-phy-10nm"
* "qcom,dsi-phy-10nm-8998" * "qcom,dsi-phy-10nm-8998"
- reg: Physical base address and length of the registers of PLL, PHY. Some - reg: Physical base address and length of the registers of PLL, PHY. Some
......
...@@ -112,6 +112,34 @@ Example a6xx (with GMU): ...@@ -112,6 +112,34 @@ Example a6xx (with GMU):
interconnects = <&rsc_hlos MASTER_GFX3D &rsc_hlos SLAVE_EBI1>; interconnects = <&rsc_hlos MASTER_GFX3D &rsc_hlos SLAVE_EBI1>;
interconnect-names = "gfx-mem"; interconnect-names = "gfx-mem";
gpu_opp_table: opp-table {
compatible = "operating-points-v2";
opp-430000000 {
opp-hz = /bits/ 64 <430000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
opp-peak-kBps = <5412000>;
};
opp-355000000 {
opp-hz = /bits/ 64 <355000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
opp-peak-kBps = <3072000>;
};
opp-267000000 {
opp-hz = /bits/ 64 <267000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
opp-peak-kBps = <3072000>;
};
opp-180000000 {
opp-hz = /bits/ 64 <180000000>;
opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
opp-peak-kBps = <1804000>;
};
};
qcom,gmu = <&gmu>; qcom,gmu = <&gmu>;
zap-shader { zap-shader {
......
This diff is collapsed.
...@@ -8,19 +8,21 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,19 +8,21 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03) - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54) - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54) - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54) - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54) - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
Copyright (C) 2013-2018 by the following authors: - /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
...@@ -48,7 +50,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ...@@ -48,7 +50,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum a3xx_tile_mode { enum a3xx_tile_mode {
LINEAR = 0, LINEAR = 0,
TILE_4X4 = 1,
TILE_32X32 = 2, TILE_32X32 = 2,
TILE_4X2 = 3,
}; };
enum a3xx_state_block_id { enum a3xx_state_block_id {
...@@ -123,6 +127,7 @@ enum a3xx_vtx_fmt { ...@@ -123,6 +127,7 @@ enum a3xx_vtx_fmt {
VFMT_2_10_10_10_UNORM = 61, VFMT_2_10_10_10_UNORM = 61,
VFMT_2_10_10_10_SINT = 62, VFMT_2_10_10_10_SINT = 62,
VFMT_2_10_10_10_SNORM = 63, VFMT_2_10_10_10_SNORM = 63,
VFMT_NONE = 255,
}; };
enum a3xx_tex_fmt { enum a3xx_tex_fmt {
...@@ -206,15 +211,7 @@ enum a3xx_tex_fmt { ...@@ -206,15 +211,7 @@ enum a3xx_tex_fmt {
TFMT_ETC2_RGBA8 = 116, TFMT_ETC2_RGBA8 = 116,
TFMT_ETC2_RGB8A1 = 117, TFMT_ETC2_RGB8A1 = 117,
TFMT_ETC2_RGB8 = 118, TFMT_ETC2_RGB8 = 118,
}; TFMT_NONE = 255,
enum a3xx_tex_fetchsize {
TFETCH_DISABLE = 0,
TFETCH_1_BYTE = 1,
TFETCH_2_BYTE = 2,
TFETCH_4_BYTE = 3,
TFETCH_8_BYTE = 4,
TFETCH_16_BYTE = 5,
}; };
enum a3xx_color_fmt { enum a3xx_color_fmt {
...@@ -228,8 +225,8 @@ enum a3xx_color_fmt { ...@@ -228,8 +225,8 @@ enum a3xx_color_fmt {
RB_R8G8B8A8_SINT = 11, RB_R8G8B8A8_SINT = 11,
RB_R8G8_UNORM = 12, RB_R8G8_UNORM = 12,
RB_R8G8_SNORM = 13, RB_R8G8_SNORM = 13,
RB_R8_UINT = 14, RB_R8G8_UINT = 14,
RB_R8_SINT = 15, RB_R8G8_SINT = 15,
RB_R10G10B10A2_UNORM = 16, RB_R10G10B10A2_UNORM = 16,
RB_A2R10G10B10_UNORM = 17, RB_A2R10G10B10_UNORM = 17,
RB_R10G10B10A2_UINT = 18, RB_R10G10B10A2_UINT = 18,
...@@ -261,6 +258,7 @@ enum a3xx_color_fmt { ...@@ -261,6 +258,7 @@ enum a3xx_color_fmt {
RB_R32_UINT = 56, RB_R32_UINT = 56,
RB_R32G32_UINT = 57, RB_R32G32_UINT = 57,
RB_R32G32B32A32_UINT = 59, RB_R32G32B32A32_UINT = 59,
RB_NONE = 255,
}; };
enum a3xx_cp_perfcounter_select { enum a3xx_cp_perfcounter_select {
...@@ -932,6 +930,9 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 ...@@ -932,6 +930,9 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040 #define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000 #define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTER 0x00002000
#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTROID 0x00004000
#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTROID 0x00008000
#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000 #define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000 #define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000 #define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
...@@ -1170,10 +1171,12 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val) ...@@ -1170,10 +1171,12 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
} }
#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000 #define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000 #define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
#define A3XX_RB_RENDER_CONTROL_XCOORD 0x00004000 #define A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK 0x0003c000
#define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000 #define A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT 14
#define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000 static inline uint32_t A3XX_RB_RENDER_CONTROL_COORD_MASK(uint32_t val)
#define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000 {
return ((val) << A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT) & A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK;
}
#define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000 #define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000
#define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000 #define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
...@@ -1755,11 +1758,29 @@ static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) ...@@ -1755,11 +1758,29 @@ static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
} }
#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203 #define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
#define A3XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff #define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK 0x000000ff
#define A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0 #define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT 0
static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val) static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK;
}
#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK 0x0000ff00
#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT 8
static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK;
}
#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK 0x00ff0000
#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT 16
static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK;
}
#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK 0xff000000
#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT 24
static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID(uint32_t val)
{ {
return ((val) << A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_REGID__MASK; return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK;
} }
#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204 #define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
...@@ -1944,8 +1965,6 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val) ...@@ -1944,8 +1965,6 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245 #define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; } static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; } static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
...@@ -3107,7 +3126,12 @@ static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val) ...@@ -3107,7 +3126,12 @@ static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
} }
#define REG_A3XX_TEX_CONST_0 0x00000000 #define REG_A3XX_TEX_CONST_0 0x00000000
#define A3XX_TEX_CONST_0_TILED 0x00000001 #define A3XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
#define A3XX_TEX_CONST_0_TILE_MODE__SHIFT 0
static inline uint32_t A3XX_TEX_CONST_0_TILE_MODE(enum a3xx_tile_mode val)
{
return ((val) << A3XX_TEX_CONST_0_TILE_MODE__SHIFT) & A3XX_TEX_CONST_0_TILE_MODE__MASK;
}
#define A3XX_TEX_CONST_0_SRGB 0x00000004 #define A3XX_TEX_CONST_0_SRGB 0x00000004
#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 #define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4 #define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
...@@ -3172,11 +3196,11 @@ static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val) ...@@ -3172,11 +3196,11 @@ static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
{ {
return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK; return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
} }
#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000 #define A3XX_TEX_CONST_1_PITCHALIGN__MASK 0xf0000000
#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28 #define A3XX_TEX_CONST_1_PITCHALIGN__SHIFT 28
static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val) static inline uint32_t A3XX_TEX_CONST_1_PITCHALIGN(uint32_t val)
{ {
return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK; return ((val) << A3XX_TEX_CONST_1_PITCHALIGN__SHIFT) & A3XX_TEX_CONST_1_PITCHALIGN__MASK;
} }
#define REG_A3XX_TEX_CONST_2 0x00000002 #define REG_A3XX_TEX_CONST_2 0x00000002
......
This diff is collapsed.
This diff is collapsed.
...@@ -186,7 +186,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -186,7 +186,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
* timestamp is written to the memory and then triggers the interrupt * timestamp is written to the memory and then triggers the interrupt
*/ */
OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno); OUT_RING(ring, submit->seqno);
...@@ -730,7 +731,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) ...@@ -730,7 +731,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
*/ */
if (adreno_is_a530(adreno_gpu)) { if (adreno_is_a530(adreno_gpu)) {
OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1); OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
OUT_RING(gpu->rb[0], 0x0F); OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
gpu->funcs->flush(gpu, gpu->rb[0]); gpu->funcs->flush(gpu, gpu->rb[0]);
if (!a5xx_idle(gpu, gpu->rb[0])) if (!a5xx_idle(gpu, gpu->rb[0]))
......
This diff is collapsed.
...@@ -103,17 +103,45 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) ...@@ -103,17 +103,45 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
} }
static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
{ {
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int ret; u32 perf_index;
unsigned long gpu_freq;
int ret = 0;
gpu_freq = dev_pm_opp_get_freq(opp);
if (gpu_freq == gmu->freq)
return;
for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
if (gpu_freq == gmu->gpu_freqs[perf_index])
break;
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
/*
* This can get called from devfreq while the hardware is idle. Don't
* bring up the power if it isn't already active
*/
if (pm_runtime_get_if_in_use(gmu->dev) == 0)
return;
if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index);
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
pm_runtime_put(gmu->dev);
return;
}
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
((3 & 0xf) << 28) | index); ((3 & 0xf) << 28) | perf_index);
/* /*
* Send an invalid index as a vote for the bus bandwidth and let the * Send an invalid index as a vote for the bus bandwidth and let the
...@@ -134,37 +162,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) ...@@ -134,37 +162,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
* for now leave it at max so that the performance is nominal. * for now leave it at max so that the performance is nominal.
*/ */
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
}
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 perf_index = 0;
if (freq == gmu->freq)
return;
for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
if (freq == gmu->gpu_freqs[perf_index])
break;
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
/*
* This can get called from devfreq while the hardware is idle. Don't
* bring up the power if it isn't already active
*/
if (pm_runtime_get_if_in_use(gmu->dev) == 0)
return;
if (gmu->legacy)
__a6xx_gmu_set_freq(gmu, perf_index);
else
a6xx_hfi_set_freq(gmu, perf_index);
pm_runtime_put(gmu->dev); pm_runtime_put(gmu->dev);
} }
...@@ -839,6 +836,19 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) ...@@ -839,6 +836,19 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
a6xx_gmu_rpmh_off(gmu); a6xx_gmu_rpmh_off(gmu);
} }
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
{
struct dev_pm_opp *gpu_opp;
unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
if (IS_ERR_OR_NULL(gpu_opp))
return;
a6xx_gmu_set_freq(gpu, gpu_opp);
dev_pm_opp_put(gpu_opp);
}
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{ {
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
...@@ -854,10 +864,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) ...@@ -854,10 +864,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Turn on the resources */ /* Turn on the resources */
pm_runtime_get_sync(gmu->dev); pm_runtime_get_sync(gmu->dev);
/*
* "enable" the GX power domain which won't actually do anything but it
* will make sure that the refcounting is correct in case we need to
* bring down the GX after a GMU failure
*/
if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_get_sync(gmu->gxpd);
/* Use a known rate to bring up the GMU */ /* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000); clk_set_rate(gmu->core_clk, 200000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) { if (ret) {
pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev); pm_runtime_put(gmu->dev);
return ret; return ret;
} }
...@@ -898,24 +917,14 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) ...@@ -898,24 +917,14 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->hfi_irq); enable_irq(gmu->hfi_irq);
/* Set the GPU to the current freq */ /* Set the GPU to the current freq */
if (gmu->legacy) a6xx_gmu_set_initial_freq(gpu, gmu);
__a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
else
a6xx_hfi_set_freq(gmu, gmu->current_perf_index);
/*
* "enable" the GX power domain which won't actually do anything but it
* will make sure that the refcounting is correct in case we need to
* bring down the GX after a GMU failure
*/
if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_get(gmu->gxpd);
out: out:
/* On failure, shut down the GMU to leave it in a good state */ /* On failure, shut down the GMU to leave it in a good state */
if (ret) { if (ret) {
disable_irq(gmu->gmu_irq); disable_irq(gmu->gmu_irq);
a6xx_rpmh_stop(gmu); a6xx_rpmh_stop(gmu);
pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev); pm_runtime_put(gmu->dev);
} }
......
...@@ -127,6 +127,11 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) ...@@ -127,6 +127,11 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \ readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout) interval, timeout)
static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
{
return msm_readl(gmu->rscc + (offset << 2));
}
static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value) static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
{ {
return msm_writel(value, gmu->rscc + (offset << 2)); return msm_writel(value, gmu->rscc + (offset << 2));
......
...@@ -8,19 +8,21 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,19 +8,21 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03) - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54) - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54) - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54) - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54) - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
Copyright (C) 2013-2018 by the following authors: - /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
...@@ -46,24 +48,109 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ...@@ -46,24 +48,109 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB 0x00800000 #define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK 0x00800000
#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB 0x40000000 #define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT 23
#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK 0x00400000 static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB(uint32_t val)
#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK 0x40000000 {
#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK 0x40000000 return ((val) << A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK;
#define A6XX_GMU_OOB_DCVS_SET_MASK 0x00800000 }
#define A6XX_GMU_OOB_DCVS_CHECK_MASK 0x80000000 #define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK 0x40000000
#define A6XX_GMU_OOB_DCVS_CLEAR_MASK 0x80000000 #define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT 30
#define A6XX_GMU_OOB_GPU_SET_MASK 0x00040000 static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB(uint32_t val)
#define A6XX_GMU_OOB_GPU_CHECK_MASK 0x04000000 {
#define A6XX_GMU_OOB_GPU_CLEAR_MASK 0x04000000 return ((val) << A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK;
#define A6XX_GMU_OOB_PERFCNTR_SET_MASK 0x00020000 }
#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK 0x02000000 #define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK 0x00400000
#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK 0x02000000 #define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT 22
static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK;
}
#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK 0x40000000
#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT 30
static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK;
}
#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK 0x40000000
#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT 30
static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK;
}
#define A6XX_GMU_OOB_DCVS_SET_MASK__MASK 0x00800000
#define A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT 23
static inline uint32_t A6XX_GMU_OOB_DCVS_SET_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_SET_MASK__MASK;
}
#define A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK 0x80000000
#define A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT 31
static inline uint32_t A6XX_GMU_OOB_DCVS_CHECK_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK;
}
#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK 0x80000000
#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT 31
static inline uint32_t A6XX_GMU_OOB_DCVS_CLEAR_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK;
}
#define A6XX_GMU_OOB_GPU_SET_MASK__MASK 0x00040000
#define A6XX_GMU_OOB_GPU_SET_MASK__SHIFT 18
static inline uint32_t A6XX_GMU_OOB_GPU_SET_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_GPU_SET_MASK__SHIFT) & A6XX_GMU_OOB_GPU_SET_MASK__MASK;
}
#define A6XX_GMU_OOB_GPU_CHECK_MASK__MASK 0x04000000
#define A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT 26
static inline uint32_t A6XX_GMU_OOB_GPU_CHECK_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CHECK_MASK__MASK;
}
#define A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK 0x04000000
#define A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT 26
static inline uint32_t A6XX_GMU_OOB_GPU_CLEAR_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK;
}
#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK 0x00020000
#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT 17
static inline uint32_t A6XX_GMU_OOB_PERFCNTR_SET_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK;
}
#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK 0x02000000
#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT 25
static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CHECK_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK;
}
#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK 0x02000000
#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT 25
static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK(uint32_t val)
{
return ((val) << A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK;
}
#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001 #define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
#define A6XX_HFI_IRQ_DSGQ_MASK 0x00000002 #define A6XX_HFI_IRQ_DSGQ_MASK__MASK 0x00000002
#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK 0x00000004 #define A6XX_HFI_IRQ_DSGQ_MASK__SHIFT 1
#define A6XX_HFI_IRQ_CM3_FAULT_MASK 0x00800000 static inline uint32_t A6XX_HFI_IRQ_DSGQ_MASK(uint32_t val)
{
return ((val) << A6XX_HFI_IRQ_DSGQ_MASK__SHIFT) & A6XX_HFI_IRQ_DSGQ_MASK__MASK;
}
#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK 0x00000004
#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT 2
static inline uint32_t A6XX_HFI_IRQ_BLOCKED_MSG_MASK(uint32_t val)
{
return ((val) << A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT) & A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK;
}
#define A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK 0x00800000
#define A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT 23
static inline uint32_t A6XX_HFI_IRQ_CM3_FAULT_MASK(uint32_t val)
{
return ((val) << A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT) & A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK;
}
#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000 #define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16 #define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val) static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
......
...@@ -74,7 +74,9 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, ...@@ -74,7 +74,9 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
u64 iova) u64 iova)
{ {
OUT_PKT7(ring, CP_REG_TO_MEM, 3); OUT_PKT7(ring, CP_REG_TO_MEM, 3);
OUT_RING(ring, counter | (1 << 30) | (2 << 18)); OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
CP_REG_TO_MEM_0_CNT(2) |
CP_REG_TO_MEM_0_64B);
OUT_RING(ring, lower_32_bits(iova)); OUT_RING(ring, lower_32_bits(iova));
OUT_RING(ring, upper_32_bits(iova)); OUT_RING(ring, upper_32_bits(iova));
} }
...@@ -102,10 +104,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -102,10 +104,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* Invalidate CCU depth and color */ /* Invalidate CCU depth and color */
OUT_PKT7(ring, CP_EVENT_WRITE, 1); OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH); OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
OUT_PKT7(ring, CP_EVENT_WRITE, 1); OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, PC_CCU_INVALIDATE_COLOR); OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
/* Submit the commands */ /* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) { for (i = 0; i < submit->nr_cmds; i++) {
...@@ -139,7 +141,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -139,7 +141,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
* timestamp is written to the memory and then triggers the interrupt * timestamp is written to the memory and then triggers the interrupt
*/ */
OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno); OUT_RING(ring, submit->seqno);
...@@ -151,10 +154,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -151,10 +154,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
a6xx_flush(gpu, ring); a6xx_flush(gpu, ring);
} }
static const struct { const struct adreno_reglist a630_hwcg[] = {
u32 offset;
u32 value;
} a6xx_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222}, {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222}, {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222}, {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
...@@ -259,7 +259,114 @@ static const struct { ...@@ -259,7 +259,114 @@ static const struct {
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555} {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
};
const struct adreno_reglist a640_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
};
const struct adreno_reglist a650_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
}; };
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
...@@ -267,26 +374,65 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) ...@@ -267,26 +374,65 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu; struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
const struct adreno_reglist *reg;
unsigned int i; unsigned int i;
u32 val; u32 val, clock_cntl_on;
if (!adreno_gpu->info->hwcg)
return;
if (adreno_is_a630(adreno_gpu))
clock_cntl_on = 0x8aa8aa02;
else
clock_cntl_on = 0x8aa8aa82;
val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL); val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
/* Don't re-program the registers if they are already correct */ /* Don't re-program the registers if they are already correct */
if ((!state && !val) || (state && (val == 0x8aa8aa02))) if ((!state && !val) || (state && (val == clock_cntl_on)))
return; return;
/* Disable SP clock before programming HWCG registers */ /* Disable SP clock before programming HWCG registers */
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++) for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
gpu_write(gpu, a6xx_hwcg[i].offset, gpu_write(gpu, reg->offset, state ? reg->value : 0);
state ? a6xx_hwcg[i].value : 0);
/* Enable SP clock */ /* Enable SP clock */
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0); gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
}
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
u32 lower_bit = 2;
u32 amsbc = 0;
u32 rgb565_predicator = 0;
u32 uavflagprd_inv = 0;
/* a618 is using the hw default values */
if (adreno_is_a618(adreno_gpu))
return;
if (adreno_is_a640(adreno_gpu))
amsbc = 1;
if (adreno_is_a650(adreno_gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
lower_bit = 3;
amsbc = 1;
rgb565_predicator = 1;
uavflagprd_inv = 2;
}
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
uavflagprd_inv >> 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
} }
static int a6xx_cp_init(struct msm_gpu *gpu) static int a6xx_cp_init(struct msm_gpu *gpu)
...@@ -406,11 +552,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu) ...@@ -406,11 +552,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
/* /* enable hardware clockgating */
* enable hardware clockgating
* For now enable clock gating only for a630
*/
if (adreno_is_a630(adreno_gpu))
a6xx_set_hwcg(gpu, true); a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/ /* VBIF/GBIF start*/
...@@ -478,12 +620,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu) ...@@ -478,12 +620,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Select CP0 to always count cycles */ /* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT); gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
if (adreno_is_a630(adreno_gpu)) { a6xx_set_ubwc_config(gpu);
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
}
/* Enable fault detection */ /* Enable fault detection */
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
......
...@@ -63,7 +63,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); ...@@ -63,7 +63,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq); void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu); unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
......
...@@ -736,7 +736,8 @@ static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu, ...@@ -736,7 +736,8 @@ static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
static void _a6xx_get_gmu_registers(struct msm_gpu *gpu, static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state, struct a6xx_gpu_state *a6xx_state,
const struct a6xx_registers *regs, const struct a6xx_registers *regs,
struct a6xx_gpu_state_obj *obj) struct a6xx_gpu_state_obj *obj,
bool rscc)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
...@@ -755,9 +756,17 @@ static void _a6xx_get_gmu_registers(struct msm_gpu *gpu, ...@@ -755,9 +756,17 @@ static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
u32 count = RANGE(regs->registers, i); u32 count = RANGE(regs->registers, i);
int j; int j;
for (j = 0; j < count; j++) for (j = 0; j < count; j++) {
obj->data[index++] = gmu_read(gmu, u32 offset = regs->registers[i] + j;
regs->registers[i] + j); u32 val;
if (rscc)
val = gmu_read_rscc(gmu, offset);
else
val = gmu_read(gmu, offset);
obj->data[index++] = val;
}
} }
} }
...@@ -777,7 +786,9 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, ...@@ -777,7 +786,9 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
/* Get the CX GMU registers from AHB */ /* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0], _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
&a6xx_state->gmu_registers[0]); &a6xx_state->gmu_registers[0], false);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
&a6xx_state->gmu_registers[1], true);
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return; return;
...@@ -785,8 +796,8 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, ...@@ -785,8 +796,8 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
/* Set the fence to ALLOW mode so we can access the registers */ /* Set the fence to ALLOW mode so we can access the registers */
gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1], _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
&a6xx_state->gmu_registers[1]); &a6xx_state->gmu_registers[2], false);
} }
#define A6XX_GBIF_REGLIST_SIZE 1 #define A6XX_GBIF_REGLIST_SIZE 1
......
...@@ -341,10 +341,6 @@ static const u32 a6xx_gmu_cx_registers[] = { ...@@ -341,10 +341,6 @@ static const u32 a6xx_gmu_cx_registers[] = {
0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165, 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc, 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201, 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
/* GPU RSCC */
0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
/* GMU AO */ /* GMU AO */
0x9300, 0x9316, 0x9400, 0x9400, 0x9300, 0x9316, 0x9400, 0x9400,
/* GPU CC */ /* GPU CC */
...@@ -357,8 +353,16 @@ static const u32 a6xx_gmu_cx_registers[] = { ...@@ -357,8 +353,16 @@ static const u32 a6xx_gmu_cx_registers[] = {
0xbc00, 0xbc16, 0xbc20, 0xbc27, 0xbc00, 0xbc16, 0xbc20, 0xbc27,
}; };
static const u32 a6xx_gmu_cx_rscc_registers[] = {
/* GPU RSCC */
0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347,
0x034c, 0x0387, 0x03ec, 0x03ef, 0x03f4, 0x042f, 0x0494, 0x0497,
0x049c, 0x04d7, 0x053c, 0x053f, 0x0544, 0x057f,
};
static const struct a6xx_registers a6xx_gmu_reglist[] = { static const struct a6xx_registers a6xx_gmu_reglist[] = {
REGS(a6xx_gmu_cx_registers, 0, 0), REGS(a6xx_gmu_cx_registers, 0, 0),
REGS(a6xx_gmu_cx_rscc_registers, 0, 0),
REGS(a6xx_gmu_gx_registers, 0, 0), REGS(a6xx_gmu_gx_registers, 0, 0),
}; };
......
...@@ -281,6 +281,76 @@ static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) ...@@ -281,6 +281,76 @@ static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001; msg->cnoc_cmds_data[1][0] = 0x60000001;
} }
static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
* Send a single "off" entry just to get things running
* TODO: bus scaling
*/
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x01;
msg->ddr_cmds_addrs[0] = 0x50000;
msg->ddr_cmds_addrs[1] = 0x5003c;
msg->ddr_cmds_addrs[2] = 0x5000c;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes - these are used by the GMU but the
* votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 3;
msg->cnoc_wait_bitmask = 0x01;
msg->cnoc_cmds_addrs[0] = 0x50034;
msg->cnoc_cmds_addrs[1] = 0x5007c;
msg->cnoc_cmds_addrs[2] = 0x5004c;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[0][1] = 0x00000000;
msg->cnoc_cmds_data[0][2] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
msg->cnoc_cmds_data[1][1] = 0x20000001;
msg->cnoc_cmds_data[1][2] = 0x60000001;
}
static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
* Send a single "off" entry just to get things running
* TODO: bus scaling
*/
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x01;
msg->ddr_cmds_addrs[0] = 0x50000;
msg->ddr_cmds_addrs[1] = 0x50004;
msg->ddr_cmds_addrs[2] = 0x5007c;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes - these are used by the GMU but the
* votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
msg->cnoc_cmds_addrs[0] = 0x500a4;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ {
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
...@@ -327,6 +397,10 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) ...@@ -327,6 +397,10 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
if (adreno_is_a618(adreno_gpu)) if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(&msg); a618_build_bw_table(&msg);
else if (adreno_is_a640(adreno_gpu))
a640_build_bw_table(&msg);
else if (adreno_is_a650(adreno_gpu))
a650_build_bw_table(&msg);
else else
a6xx_build_bw_table(&msg); a6xx_build_bw_table(&msg);
......
...@@ -200,6 +200,7 @@ static const struct adreno_info gpulist[] = { ...@@ -200,6 +200,7 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD, .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init, .init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt", .zapfw = "a630_zap.mdt",
.hwcg = a630_hwcg,
}, { }, {
.rev = ADRENO_REV(6, 4, 0, ANY_ID), .rev = ADRENO_REV(6, 4, 0, ANY_ID),
.revn = 640, .revn = 640,
...@@ -212,6 +213,7 @@ static const struct adreno_info gpulist[] = { ...@@ -212,6 +213,7 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD, .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init, .init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt", .zapfw = "a640_zap.mdt",
.hwcg = a640_hwcg,
}, { }, {
.rev = ADRENO_REV(6, 5, 0, ANY_ID), .rev = ADRENO_REV(6, 5, 0, ANY_ID),
.revn = 650, .revn = 650,
...@@ -224,6 +226,7 @@ static const struct adreno_info gpulist[] = { ...@@ -224,6 +226,7 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD, .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init, .init = a6xx_gpu_init,
.zapfw = "a650_zap.mdt", .zapfw = "a650_zap.mdt",
.hwcg = a650_hwcg,
}, },
}; };
......
...@@ -895,7 +895,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev) ...@@ -895,7 +895,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
return 0; return 0;
} }
static int adreno_get_pwrlevels(struct device *dev, static void adreno_get_pwrlevels(struct device *dev,
struct msm_gpu *gpu) struct msm_gpu *gpu)
{ {
unsigned long freq = ULONG_MAX; unsigned long freq = ULONG_MAX;
...@@ -930,24 +930,6 @@ static int adreno_get_pwrlevels(struct device *dev, ...@@ -930,24 +930,6 @@ static int adreno_get_pwrlevels(struct device *dev,
} }
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
/* Check for an interconnect path for the bus */
gpu->icc_path = of_icc_get(dev, "gfx-mem");
if (!gpu->icc_path) {
/*
* Keep compatbility with device trees that don't have an
* interconnect-names property.
*/
gpu->icc_path = of_icc_get(dev, NULL);
}
if (IS_ERR(gpu->icc_path))
gpu->icc_path = NULL;
gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
if (IS_ERR(gpu->ocmem_icc_path))
gpu->ocmem_icc_path = NULL;
return 0;
} }
int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
...@@ -993,9 +975,11 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -993,9 +975,11 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu, struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings) const struct adreno_gpu_funcs *funcs, int nr_rings)
{ {
struct adreno_platform_config *config = pdev->dev.platform_data; struct device *dev = &pdev->dev;
struct adreno_platform_config *config = dev->platform_data;
struct msm_gpu_config adreno_gpu_config = { 0 }; struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
adreno_gpu->funcs = funcs; adreno_gpu->funcs = funcs;
adreno_gpu->info = adreno_info(config->rev); adreno_gpu->info = adreno_info(config->rev);
...@@ -1007,27 +991,59 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -1007,27 +991,59 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.nr_rings = nr_rings; adreno_gpu_config.nr_rings = nr_rings;
adreno_get_pwrlevels(&pdev->dev, gpu); adreno_get_pwrlevels(dev, gpu);
pm_runtime_set_autosuspend_delay(&pdev->dev, pm_runtime_set_autosuspend_delay(dev,
adreno_gpu->info->inactive_period); adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_use_autosuspend(dev);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(dev);
return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config); adreno_gpu->info->name, &adreno_gpu_config);
if (ret)
return ret;
/*
* The legacy case, before "interconnect-names", only has a
* single interconnect path which is equivalent to "gfx-mem"
*/
if (!of_find_property(dev->of_node, "interconnect-names", NULL)) {
gpu->icc_path = of_icc_get(dev, NULL);
} else {
gpu->icc_path = of_icc_get(dev, "gfx-mem");
gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
}
if (IS_ERR(gpu->icc_path)) {
ret = PTR_ERR(gpu->icc_path);
gpu->icc_path = NULL;
return ret;
}
if (IS_ERR(gpu->ocmem_icc_path)) {
ret = PTR_ERR(gpu->ocmem_icc_path);
gpu->ocmem_icc_path = NULL;
/* allow -ENODATA, ocmem icc is optional */
if (ret != -ENODATA)
return ret;
}
return 0;
} }
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{ {
struct msm_gpu *gpu = &adreno_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_drm_private *priv = gpu->dev->dev_private;
unsigned int i; unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]); release_firmware(adreno_gpu->fw[i]);
icc_put(gpu->icc_path); pm_runtime_disable(&priv->gpu_pdev->dev);
icc_put(gpu->ocmem_icc_path);
msm_gpu_cleanup(&adreno_gpu->base); msm_gpu_cleanup(&adreno_gpu->base);
icc_put(gpu->icc_path);
icc_put(gpu->ocmem_icc_path);
} }
...@@ -68,6 +68,13 @@ struct adreno_gpu_funcs { ...@@ -68,6 +68,13 @@ struct adreno_gpu_funcs {
int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
}; };
struct adreno_reglist {
u32 offset;
u32 value;
};
extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[];
struct adreno_info { struct adreno_info {
struct adreno_rev rev; struct adreno_rev rev;
uint32_t revn; uint32_t revn;
...@@ -78,6 +85,7 @@ struct adreno_info { ...@@ -78,6 +85,7 @@ struct adreno_info {
struct msm_gpu *(*init)(struct drm_device *dev); struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw; const char *zapfw;
u32 inactive_period; u32 inactive_period;
const struct adreno_reglist *hwcg;
}; };
const struct adreno_info *adreno_info(struct adreno_rev rev); const struct adreno_info *adreno_info(struct adreno_rev rev);
......
This diff is collapsed.
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pm_opp.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
...@@ -218,7 +219,7 @@ static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate) ...@@ -218,7 +219,7 @@ static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
rate = core_clk->max_rate; rate = core_clk->max_rate;
core_clk->rate = rate; core_clk->rate = rate;
return msm_dss_clk_set_rate(core_clk, 1); return dev_pm_opp_set_rate(&kms->pdev->dev, core_clk->rate);
} }
static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms) static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
......
...@@ -389,14 +389,14 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event) ...@@ -389,14 +389,14 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
if (!fevent) { if (!fevent) {
DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
return; return;
} }
fevent->event = event; fevent->event = event;
fevent->crtc = crtc; fevent->crtc = crtc;
fevent->ts = ktime_get(); fevent->ts = ktime_get();
kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work); kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
} }
void dpu_crtc_complete_commit(struct drm_crtc *crtc) void dpu_crtc_complete_commit(struct drm_crtc *crtc)
......
...@@ -208,6 +208,36 @@ struct dpu_encoder_virt { ...@@ -208,6 +208,36 @@ struct dpu_encoder_virt {
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
static u32 dither_matrix[DITHER_MATRIX_SZ] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
};
static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
{
struct dpu_hw_dither_cfg dither_cfg = { 0 };
if (!hw_pp->ops.setup_dither)
return;
switch (bpc) {
case 6:
dither_cfg.c0_bitdepth = 6;
dither_cfg.c1_bitdepth = 6;
dither_cfg.c2_bitdepth = 6;
dither_cfg.c3_bitdepth = 6;
dither_cfg.temporal_en = 0;
break;
default:
hw_pp->ops.setup_dither(hw_pp, NULL);
return;
}
memcpy(&dither_cfg.matrix, dither_matrix,
sizeof(u32) * DITHER_MATRIX_SZ);
hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
}
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx) enum dpu_intr_idx intr_idx)
{ {
...@@ -1058,7 +1088,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) ...@@ -1058,7 +1088,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
{ {
struct dpu_encoder_virt *dpu_enc = NULL; struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv; struct msm_drm_private *priv;
struct dpu_kms *dpu_kms; int i;
if (!drm_enc || !drm_enc->dev) { if (!drm_enc || !drm_enc->dev) {
DPU_ERROR("invalid parameters\n"); DPU_ERROR("invalid parameters\n");
...@@ -1066,7 +1096,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) ...@@ -1066,7 +1096,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
} }
priv = drm_enc->dev->dev_private; priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
dpu_enc = to_dpu_encoder_virt(drm_enc); dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!dpu_enc || !dpu_enc->cur_master) { if (!dpu_enc || !dpu_enc->cur_master) {
...@@ -1074,13 +1103,17 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) ...@@ -1074,13 +1103,17 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
return; return;
} }
if (dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
dpu_enc->cur_master->hw_mdptop,
dpu_kms->catalog);
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
!WARN_ON(dpu_enc->num_phys_encs == 0)) {
unsigned bpc = dpu_enc->phys_encs[0]->connector->display_info.bpc;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
if (!dpu_enc->hw_pp[i])
continue;
_dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
}
}
} }
void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
......
...@@ -245,30 +245,14 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx, ...@@ -245,30 +245,14 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx, static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf) u32 *flushbits, enum dpu_intf intf)
{ {
switch (intf) {
case INTF_0:
case INTF_1:
*flushbits |= BIT(31); *flushbits |= BIT(31);
break;
default:
return 0;
}
return 0; return 0;
} }
static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx, static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf) u32 *flushbits, enum dpu_intf intf)
{ {
switch (intf) { *flushbits |= BIT(intf - INTF_0);
case INTF_0:
*flushbits |= BIT(0);
break;
case INTF_1:
*flushbits |= BIT(1);
break;
default:
return 0;
}
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -152,14 +152,13 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m, ...@@ -152,14 +152,13 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
unsigned long features) unsigned long features)
{ {
ops->setup_mixer_out = dpu_hw_lm_setup_out; ops->setup_mixer_out = dpu_hw_lm_setup_out;
if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion) if (m->hwversion >= DPU_HW_VER_400)
|| IS_SC7180_TARGET(m->hwversion))
ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845; ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
else else
ops->setup_blend_config = dpu_hw_lm_setup_blend_config; ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3; ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color; ops->setup_border_color = dpu_hw_lm_setup_border_color;
}; }
static struct dpu_hw_blk_ops dpu_hw_ops; static struct dpu_hw_blk_ops dpu_hw_ops;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment