Commit 59e7a8cc authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2020-03-22' of https://gitlab.freedesktop.org/drm/msm into drm-next

A bit smaller this time around.. there are still a couple uabi
additions for vulkan waiting in the wings, but I punted on them this
cycle due to running low on time.  (They should be easy enough to
rebase, and if it is a problem for anyone I can push a next+uabi
branch so that tu work can proceed.)

The bigger change is refactoring dpu resource manager and moving dpu
to use atomic global state.  Other than that, it is mostly cleanups
and fixes.

From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGuf1R4Xz-t9Z7_cwx9jD=b4wUvvwfqA5cHR8fCSXSd5XQ@mail.gmail.com
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parents 5fc0df93 a5fb8b91
Qualcomm adreno/snapdragon GMU (Graphics management unit)
The GMU is a programmable power controller for the GPU. the CPU controls the
GMU which in turn handles power controls for the GPU.
Required properties:
- compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu"
for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu"
Note that you need to list the less specific "qcom,adreno-gmu"
for generic matches and the more specific identifier to identify
the specific device.
- reg: Physical base address and length of the GMU registers.
- reg-names: Matching names for the register regions
* "gmu"
* "gmu_pdc"
* "gmu_pdc_seg"
- interrupts: The interrupt signals from the GMU.
- interrupt-names: Matching names for the interrupts
* "hfi"
* "gmu"
- clocks: phandles to the device clocks
- clock-names: Matching names for the clocks
* "gmu"
* "cxo"
* "axi"
* "mnoc"
- power-domains: should be:
<&clock_gpucc GPU_CX_GDSC>
<&clock_gpucc GPU_GX_GDSC>
- power-domain-names: Matching names for the power domains
- iommus: phandle to the adreno iommu
- operating-points-v2: phandle to the OPP operating points
Optional properties:
- sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon
SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
Example:
/ {
...
gmu: gmu@506a000 {
compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
reg = <0x506a000 0x30000>,
<0xb280000 0x10000>,
<0xb480000 0x10000>;
reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hfi", "gmu";
clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
<&gpucc GPU_CC_CXO_CLK>,
<&gcc GCC_DDRSS_GPU_AXI_CLK>,
<&gcc GCC_GPU_MEMNOC_GFX_CLK>;
clock-names = "gmu", "cxo", "axi", "memnoc";
power-domains = <&gpucc GPU_CX_GDSC>,
<&gpucc GPU_GX_GDSC>;
power-domain-names = "cx", "gx";
iommus = <&adreno_smmu 5>;
operating-points-v2 = <&gmu_opp_table>;
};
};
a3xx example with OCMEM support:
/ {
...
gpu: adreno@fdb00000 {
compatible = "qcom,adreno-330.2",
"qcom,adreno";
reg = <0xfdb00000 0x10000>;
reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "kgsl_3d0_irq";
clock-names = "core",
"iface",
"mem_iface";
clocks = <&mmcc OXILI_GFX3D_CLK>,
<&mmcc OXILICX_AHB_CLK>,
<&mmcc OXILICX_AXI_CLK>;
sram = <&gmu_sram>;
power-domains = <&mmcc OXILICX_GDSC>;
operating-points-v2 = <&gpu_opp_table>;
iommus = <&gpu_iommu 0>;
};
ocmem@fdd00000 {
compatible = "qcom,msm8974-ocmem";
reg = <0xfdd00000 0x2000>,
<0xfec00000 0x180000>;
reg-names = "ctrl",
"mem";
clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
<&mmcc OCMEMCX_OCMEMNOC_CLK>;
clock-names = "core",
"iface";
#address-cells = <1>;
#size-cells = <1>;
gmu_sram: gmu-sram@0 {
reg = <0x0 0x100000>;
ranges = <0 0 0xfec00000 0x100000>;
};
};
};
# SPDX-License-Identifier: GPL-2.0-only
# Copyright 2019-2020, The Linux Foundation, All Rights Reserved
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/msm/gmu.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Devicetree bindings for the GMU attached to certain Adreno GPUs
maintainers:
- Rob Clark <robdclark@gmail.com>
description: |
These bindings describe the Graphics Management Unit (GMU) that is attached
to members of the Adreno A6xx GPU family. The GMU provides on-device power
management and support to improve power efficiency and reduce the load on
the CPU.
properties:
compatible:
items:
- enum:
- qcom,adreno-gmu-630.2
- const: qcom,adreno-gmu
reg:
items:
- description: Core GMU registers
- description: GMU PDC registers
- description: GMU PDC sequence registers
reg-names:
items:
- const: gmu
- const: gmu_pdc
- const: gmu_pdc_seq
clocks:
items:
- description: GMU clock
- description: GPU CX clock
- description: GPU AXI clock
- description: GPU MEMNOC clock
clock-names:
items:
- const: gmu
- const: cxo
- const: axi
- const: memnoc
interrupts:
items:
- description: GMU HFI interrupt
- description: GMU interrupt
interrupt-names:
items:
- const: hfi
- const: gmu
power-domains:
items:
- description: CX power domain
- description: GX power domain
power-domain-names:
items:
- const: cx
- const: gx
iommus:
maxItems: 1
operating-points-v2: true
required:
- compatible
- reg
- reg-names
- clocks
- clock-names
- interrupts
- interrupt-names
- power-domains
- power-domain-names
- iommus
- operating-points-v2
examples:
- |
#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
gmu: gmu@506a000 {
compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
reg = <0x506a000 0x30000>,
<0xb280000 0x10000>,
<0xb480000 0x10000>;
reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
<&gpucc GPU_CC_CXO_CLK>,
<&gcc GCC_DDRSS_GPU_AXI_CLK>,
<&gcc GCC_GPU_MEMNOC_GFX_CLK>;
clock-names = "gmu", "cxo", "axi", "memnoc";
interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hfi", "gmu";
power-domains = <&gpucc GPU_CX_GDSC>,
<&gpucc GPU_GX_GDSC>;
power-domain-names = "cx", "gx";
iommus = <&adreno_smmu 5>;
operating-points-v2 = <&gmu_opp_table>;
};
......@@ -35,25 +35,54 @@ Required properties:
bring the GPU out of secure mode.
- firmware-name: optional property of the 'zap-shader' node, listing the
relative path of the device specific zap firmware.
- sram: phandle to the On Chip Memory (OCMEM) that's present on some a3xx and
a4xx Snapdragon SoCs. See
Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
Example 3xx/4xx/a5xx:
Example 3xx/4xx:
/ {
...
gpu: qcom,kgsl-3d0@4300000 {
compatible = "qcom,adreno-320.2", "qcom,adreno";
reg = <0x04300000 0x20000>;
gpu: adreno@fdb00000 {
compatible = "qcom,adreno-330.2",
"qcom,adreno";
reg = <0xfdb00000 0x10000>;
reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 80 0>;
clock-names =
"core",
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "kgsl_3d0_irq";
clock-names = "core",
"iface",
"mem_iface";
clocks =
<&mmcc GFX3D_CLK>,
<&mmcc GFX3D_AHB_CLK>,
<&mmcc MMSS_IMEM_AHB_CLK>;
clocks = <&mmcc OXILI_GFX3D_CLK>,
<&mmcc OXILICX_AHB_CLK>,
<&mmcc OXILICX_AXI_CLK>;
sram = <&gpu_sram>;
power-domains = <&mmcc OXILICX_GDSC>;
operating-points-v2 = <&gpu_opp_table>;
iommus = <&gpu_iommu 0>;
};
gpu_sram: ocmem@fdd00000 {
compatible = "qcom,msm8974-ocmem";
reg = <0xfdd00000 0x2000>,
<0xfec00000 0x180000>;
reg-names = "ctrl",
"mem";
clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
<&mmcc OCMEMCX_OCMEMNOC_CLK>;
clock-names = "core",
"iface";
#address-cells = <1>;
#size-cells = <1>;
gpu_sram: gpu-sram@0 {
reg = <0x0 0x100000>;
ranges = <0 0 0xfec00000 0x100000>;
};
};
};
......
......@@ -1446,18 +1446,31 @@ static const struct adreno_gpu_funcs funcs = {
static void check_speed_bin(struct device *dev)
{
struct nvmem_cell *cell;
u32 bin, val;
u32 val;
/*
* If the OPP table specifies a opp-supported-hw property then we have
* to set something with dev_pm_opp_set_supported_hw() or the table
* doesn't get populated so pick an arbitrary value that should
* ensure the default frequencies are selected but not conflict with any
* actual bins
*/
val = 0x80;
cell = nvmem_cell_get(dev, "speed_bin");
/* If a nvmem cell isn't defined, nothing to do */
if (IS_ERR(cell))
return;
if (!IS_ERR(cell)) {
void *buf = nvmem_cell_read(cell, NULL);
bin = *((u32 *) nvmem_cell_read(cell, NULL));
nvmem_cell_put(cell);
if (!IS_ERR(buf)) {
u8 bin = *((u8 *) buf);
val = (1 << bin);
kfree(buf);
}
nvmem_cell_put(cell);
}
dev_pm_opp_set_supported_hw(dev, &val, 1);
}
......
......@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
......@@ -920,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
{
int count, i;
u64 iova;
if (IS_ERR_OR_NULL(bo))
return;
count = bo->size >> PAGE_SHIFT;
iova = bo->iova;
for (i = 0; i < count; i++, iova += PAGE_SIZE) {
iommu_unmap(gmu->domain, iova, PAGE_SIZE);
__free_pages(bo->pages[i], 0);
}
kfree(bo->pages);
dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
kfree(bo);
}
......@@ -942,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
size_t size)
{
struct a6xx_gmu_bo *bo;
int ret, count, i;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
......@@ -950,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
bo->size = PAGE_ALIGN(size);
count = bo->size >> PAGE_SHIFT;
bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
if (!bo->pages) {
if (!bo->virt) {
kfree(bo);
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < count; i++) {
bo->pages[i] = alloc_page(GFP_KERNEL);
if (!bo->pages[i])
goto err;
}
bo->iova = gmu->uncached_iova_base;
for (i = 0; i < count; i++) {
ret = iommu_map(gmu->domain,
bo->iova + (PAGE_SIZE * i),
page_to_phys(bo->pages[i]), PAGE_SIZE,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
for (i = i - 1 ; i >= 0; i--)
iommu_unmap(gmu->domain,
bo->iova + (PAGE_SIZE * i),
PAGE_SIZE);
goto err;
}
}
bo->virt = vmap(bo->pages, count, VM_IOREMAP,
pgprot_writecombine(PAGE_KERNEL));
if (!bo->virt)
goto err;
/* Align future IOVA addresses on 1MB boundaries */
gmu->uncached_iova_base += ALIGN(size, SZ_1M);
return bo;
err:
for (i = 0; i < count; i++) {
if (bo->pages[i])
__free_pages(bo->pages[i], 0);
}
kfree(bo->pages);
kfree(bo);
return ERR_PTR(-ENOMEM);
}
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
int ret;
/*
* The GMU address space is hardcoded to treat the range
* 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
* between the GMU and the CPU will live in this space
*/
gmu->uncached_iova_base = 0x60000000;
gmu->domain = iommu_domain_alloc(&platform_bus_type);
if (!gmu->domain)
return -ENODEV;
ret = iommu_attach_device(gmu->domain, gmu->dev);
if (ret) {
iommu_domain_free(gmu->domain);
gmu->domain = NULL;
}
return ret;
}
/* Return the 'arc-level' for the given frequency */
......@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_memory_free(gmu, gmu->hfi);
iommu_detach_device(gmu->domain, gmu->dev);
iommu_domain_free(gmu->domain);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
......@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);
/* Pass force_dma false to require the DT to set the dma region */
ret = of_dma_configure(gmu->dev, node, false);
if (ret)
return ret;
/* Set the mask after the of_dma_configure() */
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
if (ret)
return ret;
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
......@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
/* Set up the IOMMU context bank */
ret = a6xx_gmu_memory_probe(gmu);
if (ret)
goto err_put_device;
/* Allocate memory for for the HFI queues */
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->hfi))
......@@ -1375,11 +1291,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
err_memory:
a6xx_gmu_memory_free(gmu, gmu->hfi);
if (gmu->domain) {
iommu_detach_device(gmu->domain, gmu->dev);
iommu_domain_free(gmu->domain);
}
ret = -ENODEV;
err_put_device:
......
......@@ -12,8 +12,7 @@
struct a6xx_gmu_bo {
void *virt;
size_t size;
u64 iova;
struct page **pages;
dma_addr_t iova;
};
/*
......@@ -49,9 +48,6 @@ struct a6xx_gmu {
int hfi_irq;
int gmu_irq;
struct iommu_domain *domain;
u64 uncached_iova_base;
struct device *gxpd;
int idle_level;
......
......@@ -379,7 +379,7 @@ static const struct a6xx_indexed_registers {
};
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
};
......
......@@ -673,7 +673,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
return NULL;
for (i = 0; i < l; i++)
buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
ascii85_encode(src[i], out));
return buf;
......
......@@ -164,7 +164,6 @@ enum dpu_enc_rc_states {
* clks and resources after IDLE_TIMEOUT time.
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @mode_set_complete: flag to indicate modeset completion
* @idle_timeout: idle timeout duration in milliseconds
*/
struct dpu_encoder_virt {
......@@ -202,7 +201,6 @@ struct dpu_encoder_virt {
struct delayed_work delayed_off_work;
struct kthread_work vsync_event_work;
struct msm_display_topology topology;
bool mode_set_complete;
u32 idle_timeout;
};
......@@ -461,7 +459,7 @@ void dpu_encoder_helper_split_config(
struct msm_display_info *disp_info;
if (!phys_enc->hw_mdptop || !phys_enc->parent) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
......@@ -562,12 +560,13 @@ static int dpu_encoder_virt_atomic_check(
const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
struct dpu_global_state *global_state;
int i = 0;
int ret = 0;
if (!drm_enc || !crtc_state || !conn_state) {
DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
drm_enc != 0, crtc_state != 0, conn_state != 0);
drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
return -EINVAL;
}
......@@ -578,6 +577,7 @@ static int dpu_encoder_virt_atomic_check(
dpu_kms = to_dpu_kms(priv->kms);
mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_atomic_check(DRMID(drm_enc));
/*
......@@ -609,17 +609,15 @@ static int dpu_encoder_virt_atomic_check(
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
/* Reserve dynamic resources now. Indicating AtomicTest phase */
/* Reserve dynamic resources now. */
if (!ret) {
/*
* Avoid reserving resources when mode set is pending. Topology
* info may not be available to complete reservation.
*/
if (drm_atomic_crtc_needs_modeset(crtc_state)
&& dpu_enc->mode_set_complete) {
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
topology, true);
dpu_enc->mode_set_complete = false;
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
drm_enc, crtc_state, topology);
}
}
......@@ -956,12 +954,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct drm_connector *conn = NULL, *conn_iter;
struct drm_crtc *drm_crtc;
struct dpu_crtc_state *cstate;
struct dpu_rm_hw_iter hw_iter;
struct dpu_global_state *global_state;
struct msm_display_topology topology;
struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
int num_lm = 0, num_ctl = 0;
int i, j, ret;
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
int num_lm, num_ctl, num_pp;
int i, j;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
......@@ -975,6 +974,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_kms = to_dpu_kms(priv->kms);
connector_list = &dpu_kms->dev->mode_config.connector_list;
global_state = dpu_kms_get_existing_global_state(dpu_kms);
if (IS_ERR_OR_NULL(global_state)) {
DPU_ERROR("Failed to get global state");
return;
}
trace_dpu_enc_mode_set(DRMID(drm_enc));
list_for_each_entry(conn_iter, connector_list, head)
......@@ -995,77 +1000,57 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
topology, false);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
"failed to reserve hw resources, %d\n", ret);
return;
}
dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
dpu_enc->hw_pp[i] = NULL;
if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
}
dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
num_ctl++;
}
/* Query resource that have been reserved in atomic check step. */
num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
ARRAY_SIZE(hw_pp));
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
num_lm++;
}
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL;
cstate = to_dpu_crtc_state(drm_crtc->state);
for (i = 0; i < num_lm; i++) {
int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
cstate->mixers[i].hw_lm = hw_lm[i];
cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
}
cstate->num_mixers = num_lm;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
int num_blk;
struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i);
goto error;
return;
}
if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
goto error;
return;
}
phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = hw_ctl[i];
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
DPU_HW_BLK_INTF);
for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
hw_blk, ARRAY_SIZE(hw_blk));
for (j = 0; j < num_blk; j++) {
struct dpu_hw_intf *hw_intf;
if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
hw_intf = to_dpu_hw_intf(hw_blk[i]);
if (hw_intf->idx == phys->intf_idx)
phys->hw_intf = hw_intf;
}
......@@ -1073,18 +1058,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
if (!phys->hw_intf) {
DPU_ERROR_ENC(dpu_enc,
"no intf block assigned at idx: %d\n", i);
goto error;
return;
}
phys->connector = conn->state->connector;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
}
dpu_enc->mode_set_complete = true;
error:
dpu_rm_release(&dpu_kms->rm, drm_enc);
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
......@@ -1181,6 +1161,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
struct dpu_global_state *global_state;
int i = 0;
if (!drm_enc) {
......@@ -1199,6 +1180,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_disable(DRMID(drm_enc));
......@@ -1228,7 +1210,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc);
dpu_rm_release(global_state, drm_enc);
mutex_unlock(&dpu_enc->enc_lock);
}
......@@ -1964,7 +1946,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
PTR_ERR(enc));
return enc == 0 ? -EINVAL : PTR_ERR(enc);
return enc == NULL ? -EINVAL : PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
......@@ -1977,7 +1959,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
PTR_ERR(enc));
return enc == 0 ? -EINVAL : PTR_ERR(enc);
return enc == NULL ? -EINVAL : PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
......@@ -2008,7 +1990,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params phys_params;
if (!dpu_enc) {
DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0);
DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
return -EINVAL;
}
......
......@@ -411,7 +411,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
return;
}
......@@ -440,7 +440,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
u32 flush_mask = 0;
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
......
......@@ -239,7 +239,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_hw_intf_cfg intf_cfg = { 0 };
if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
return;
}
......@@ -559,7 +559,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
return;
}
......
......@@ -89,6 +89,16 @@ struct dpu_hw_intf {
struct dpu_hw_intf_ops ops;
};
/**
* to_dpu_hw_intf - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_intf, base);
}
/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.
......
......@@ -96,6 +96,16 @@ struct dpu_hw_pingpong {
struct dpu_hw_pingpong_ops ops;
};
/**
* to_dpu_hw_pingpong - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_pingpong, base);
}
/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
......
......@@ -138,16 +138,12 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
struct dpu_debugfs_regset32 *regset = s->private;
struct dpu_kms *dpu_kms = regset->dpu_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
void __iomem *base;
uint32_t i, addr;
if (!dpu_kms->mmio)
return 0;
dev = dpu_kms->dev;
priv = dev->dev_private;
base = dpu_kms->mmio + regset->offset;
/* insert padding spaces, if needed */
......@@ -228,6 +224,85 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
}
#endif
/* Global/shared object state funcs */
/*
* This is a helper that returns the private state currently in operation.
* Note that this would return the "old_state" if called in the atomic check
* path, and the "new_state" after the atomic swap has been done.
*/
struct dpu_global_state *
dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
{
return to_dpu_global_state(dpu_kms->global_state.state);
}
/*
* This acquires the modeset lock set aside for global state, creates
* a new duplicated private object state.
*/
struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_private_state *priv_state;
int ret;
ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
if (ret)
return ERR_PTR(ret);
priv_state = drm_atomic_get_private_obj_state(s,
&dpu_kms->global_state);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_dpu_global_state(priv_state);
}
static struct drm_private_state *
dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
{
struct dpu_global_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct dpu_global_state *dpu_state = to_dpu_global_state(state);
kfree(dpu_state);
}
static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
.atomic_duplicate_state = dpu_kms_global_duplicate_state,
.atomic_destroy_state = dpu_kms_global_destroy_state,
};
static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
{
struct dpu_global_state *state;
drm_modeset_lock_init(&dpu_kms->global_state_lock);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
&state->base,
&dpu_kms_global_state_funcs);
return 0;
}
static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
return dpu_crtc_vblank(crtc, true);
......@@ -267,8 +342,6 @@ static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
struct dpu_kms *dpu_kms;
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder;
......@@ -276,8 +349,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
if (!kms)
return;
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
/* Call prepare_commit for all affected encoders */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
......@@ -552,11 +623,8 @@ static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
{
struct drm_device *dev;
int i;
dev = dpu_kms->dev;
if (dpu_kms->hw_intr)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
......@@ -760,7 +828,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
int i, rc = -EINVAL;
if (!kms) {
......@@ -770,7 +837,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
priv = dev->dev_private;
rc = dpu_kms_global_obj_init(dpu_kms);
if (rc)
return rc;
atomic_set(&dpu_kms->bandwidth_ref, 0);
......@@ -1018,10 +1088,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
......
......@@ -111,6 +111,13 @@ struct dpu_kms {
struct dpu_core_perf perf;
/*
* Global private object state, Do not access directly, use
* dpu_kms_global_get_state()
*/
struct drm_modeset_lock global_state_lock;
struct drm_private_obj global_state;
struct dpu_rm rm;
bool rm_init;
......@@ -139,6 +146,25 @@ struct vsync_info {
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base)
/* Global private object state for tracking resources that are shared across
* multiple kms objects (planes/crtcs/etc).
*/
struct dpu_global_state {
struct drm_private_state base;
uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
};
struct dpu_global_state
*dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms);
struct dpu_global_state
*__must_check dpu_kms_get_global_state(struct drm_atomic_state *s);
/**
* Debugfs functions - extra helper functions for debugfs support
*
......
This diff is collapsed.
......@@ -11,37 +11,24 @@
#include "msm_kms.h"
#include "dpu_hw_top.h"
struct dpu_global_state;
/**
* struct dpu_rm - DPU dynamic hardware resource manager
* @hw_blks: array of lists of hardware resources present in the system, one
* list per type of hardware block
* @pingpong_blks: array of pingpong hardware resources
* @mixer_blks: array of layer mixer hardware resources
* @ctl_blks: array of ctl hardware resources
* @intf_blks: array of intf hardware resources
* @lm_max_width: cached layer mixer maximum width
* @rm_lock: resource manager mutex
*/
struct dpu_rm {
struct list_head hw_blks[DPU_HW_BLK_MAX];
uint32_t lm_max_width;
struct mutex rm_lock;
};
struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
/**
* struct dpu_rm_hw_blk - resource manager internal structure
* forward declaration for single iterator definition without void pointer
*/
struct dpu_rm_hw_blk;
/**
* struct dpu_rm_hw_iter - iterator for use with dpu_rm
* @hw: dpu_hw object requested, or NULL on failure
* @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
* @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
* @type: Hardware Block Type client wishes to search for.
*/
struct dpu_rm_hw_iter {
void *hw;
struct dpu_rm_hw_blk *blk;
uint32_t enc_id;
enum dpu_hw_blk_type type;
uint32_t lm_max_width;
};
/**
......@@ -74,14 +61,13 @@ int dpu_rm_destroy(struct dpu_rm *rm);
* @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle
* @topology: Pointer to topology info for the display
* @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
struct msm_display_topology topology,
bool test_only);
struct msm_display_topology topology);
/**
* dpu_rm_reserve - Given the encoder for the display chain, release any
......@@ -90,31 +76,14 @@ int dpu_rm_reserve(struct dpu_rm *rm,
* @enc: DRM Encoder handle
* @Return: 0 on Success otherwise -ERROR
*/
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc);
/**
* dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
* using dpu_rm_get_hw
* @iter: iter object to initialize
* @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
* @type: Hardware Block Type client wishes to search for.
*/
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
enum dpu_hw_blk_type type);
/**
* dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
* Meant to do a single pass through the hardware list to iteratively
* retrieve hardware blocks of a given type for a given encoder.
* Initialize an iterator object.
* Set hw block type of interest. Set encoder id of interest, 0 for any.
* Function returns first hw of type for that encoder.
* Subsequent calls will return the next reserved hw of that type in-order.
* Iterator HW pointer will be null on failure to find hw.
* @rm: DPU Resource Manager handle
* @iter: iterator object
* @Return: true on match found, false on no match found
* Get hw resources of the given type that are assigned to this encoder.
*/
bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
#endif /* __DPU_RM_H__ */
......@@ -24,7 +24,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
int rc;
if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
......@@ -106,7 +106,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
u32 val;
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
......@@ -164,7 +164,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
if (!vbif || !mdp) {
DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
vbif != 0, mdp != 0);
vbif != NULL, mdp != NULL);
return;
}
......
......@@ -178,10 +178,6 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
ret = drm_bridge_attach(encoder, edp->bridge, NULL, 0);
if (ret)
goto fail;
priv->bridges[priv->num_bridges++] = edp->bridge;
priv->connectors[priv->num_connectors++] = edp->connector;
......
......@@ -327,10 +327,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
ret = drm_bridge_attach(encoder, hdmi->bridge, NULL, 0);
if (ret)
goto fail;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
......
......@@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
if (!dev->dma_parms)
return -ENOMEM;
if (!dev->dma_parms) {
ret = -ENOMEM;
goto err_msm_uninit;
}
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
......
......@@ -157,7 +157,17 @@ struct msm_gem_submit {
uint32_t handle;
};
uint64_t iova;
} bos[0];
} bos[];
};
/* helper to determine of a buffer in submit should be dumped, used for both
* devcoredump and debugfs cmdstream dumping:
*/
static inline bool
should_dump(struct msm_gem_submit *submit, int idx)
{
extern bool rd_full;
return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
}
#endif /* __MSM_GEM_H__ */
......@@ -355,18 +355,36 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
state->cmd = kstrdup(cmd, GFP_KERNEL);
if (submit) {
int i;
state->bos = kcalloc(submit->nr_cmds,
int i, nr = 0;
/* count # of buffers to dump: */
for (i = 0; i < submit->nr_bos; i++)
if (should_dump(submit, i))
nr++;
/* always dump cmd bo's, but don't double count them: */
for (i = 0; i < submit->nr_cmds; i++)
if (!should_dump(submit, submit->cmd[i].idx))
nr++;
state->bos = kcalloc(nr,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
for (i = 0; i < submit->nr_bos; i++) {
if (should_dump(submit, i)) {
msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
submit->bos[i].iova, submit->bos[i].flags);
}
}
for (i = 0; state->bos && i < submit->nr_cmds; i++) {
int idx = submit->cmd[i].idx;
if (!should_dump(submit, submit->cmd[i].idx)) {
msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
submit->bos[idx].iova, submit->bos[idx].flags);
}
}
}
/* Set the active crash state to be dumped on failure */
gpu->crashstate = state;
......
......@@ -43,7 +43,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
static bool rd_full = false;
bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
......@@ -336,12 +336,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
msm_gem_put_vaddr(&obj->base);
}
static bool
should_dump(struct msm_gem_submit *submit, int idx)
{
return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
}
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment