Commit 87331c83 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2017-11-01' of git://people.freedesktop.org/~robclark/linux into drm-next

 + preemption support for a5xx[1][2]

 + display fixes for 8x96 (snapdragon 820) including fixes for 4k scanout
   (hwpipe assignment re-work to handle multiple hwpipe assigned to plane
   for wide scanout)

 + async cursor plane updates and fixes

 + refactor adreno_bind/hwinit.. still defer fw loading until device open,
   but move clk/irq/etc to probe/bind time to fix issues when fw isn't
   present in filesys

 + clk/dt bindings cleanups w/ backward compat via msm_clk_get() (dt docs
   part ack'ed by Rob Herring)

 + fw loading re-work with helper to handle either /lib/firmware/qcom/$fw
   or /lib/firmware/$fw.. background, we've started landing fw for some of
   generations in linux-firmware, but there is a preference to put fw files
   under 'qcom' subdirectory, which is not what was done on android or for
   people who copied fw from android.  So now we first look in qcom subdir
   and then fallback to the original location.

 + bunch of GPU debugging enhancements, to dump full cmdline of processes
   that trigger faults, and to add a new debugfs to capture cmdstream of
   just submits that triggered faults.. both quite useful for piglit ;-)

* tag 'drm-msm-next-2017-11-01' of git://people.freedesktop.org/~robclark/linux: (38 commits)
  drm/msm: use %z format modifier for printing size_t
  drm/msm/mdp5: Don't use async plane update path if plane visibility changes
  drm/msm/mdp5: mdp5_crtc: Restore cursor state only if LM cursors are enabled
  drm/msm/mdp5: Update mdp5_pipe_assign to spit out both planes
  drm/msm/mdp5: Prepare mdp5_pipe_assign for some rework
  drm/msm: remove mdp5_cursor_plane_funcs
  drm/msm: update cursors asynchronously through atomic
  drm/msm/atomic: switch to drm_atomic_helper_check
  drm/msm/mdp5: restore cursor state when enabling crtc
  drm/msm/mdp5: don't use autosuspend
  drm/msm/mdp5: ignore planes that are not visible
  drm/msm: dump submits which triggered gpu hang
  drm/msm: preserve IOVAs in submit's bo table
  drm/msm/rd: allow adding addition msg to top of dump
  drm/msm: split rd debugfs file
  drm/msm: add special _get_vaddr_active() for cmdstream dumps
  drm/msm: show task cmdline in gpu recovery messages
  drm/msm: dump a rd GPUADDR header for all buffers in the command
  drm/msm: Removed unused struct_mutex_task
  drm/msm: Implement preemption for A5XX targets
  ...
parents 43106e25 39ae0d3e
......@@ -13,16 +13,16 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks.
- clock-names: the following clocks are required:
* "mdp_core_clk"
* "iface_clk"
* "bus_clk"
* "core_mmss_clk"
* "byte_clk"
* "pixel_clk"
* "core_clk"
* "mdp_core"
* "iface"
* "bus"
* "core_mmss"
* "byte"
* "pixel"
* "core"
For DSIv2, we need an additional clock:
* "src_clk"
- assigned-clocks: Parents of "byte_clk" and "pixel_clk" for the given platform.
* "src"
- assigned-clocks: Parents of "byte" and "pixel" for the given platform.
- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
by a DSI PHY block. See [1] for details on clock bindings.
- vdd-supply: phandle to vdd regulator device node
......@@ -101,7 +101,7 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required:
* "iface_clk"
* "iface"
- vddio-supply: phandle to vdd-io regulator device node
Optional properties:
......@@ -123,13 +123,13 @@ Example:
reg = <0xfd922800 0x200>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names =
"bus_clk",
"byte_clk",
"core_clk",
"core_mmss_clk",
"iface_clk",
"mdp_core_clk",
"pixel_clk";
"bus",
"byte",
"core",
"core_mmss",
"iface",
"mdp_core",
"pixel";
clocks =
<&mmcc MDSS_AXI_CLK>,
<&mmcc MDSS_BYTE0_CLK>,
......@@ -207,7 +207,7 @@ Example:
reg = <0xfd922a00 0xd4>,
<0xfd922b00 0x2b0>,
<0xfd922d80 0x7b>;
clock-names = "iface_clk";
clock-names = "iface";
clocks = <&mmcc MDSS_AHB_CLK>;
#clock-cells = <1>;
vddio-supply = <&pma8084_l12>;
......
......@@ -12,11 +12,11 @@ Required properties:
- clocks: device clocks
See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
- clock-names: the following clocks are required:
* "core_clk"
* "iface_clk"
* "mdp_core_clk"
* "pixel_clk"
* "link_clk"
* "core"
* "iface"
* "mdp_core"
* "pixel"
* "link"
- #clock-cells: The value should be 1.
- vdda-supply: phandle to vdda regulator device node
- lvl-vdd-supply: phandle to regulator device node which is used to supply power
......@@ -41,11 +41,11 @@ Example:
interrupts = <12 0>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names =
"core_clk",
"pixel_clk",
"iface_clk",
"link_clk",
"mdp_core_clk";
"core",
"pixel",
"iface",
"link",
"mdp_core";
clocks =
<&mmcc MDSS_EDPAUX_CLK>,
<&mmcc MDSS_EDPPIXEL_CLK>,
......
......@@ -64,9 +64,9 @@ Example:
interrupts = <GIC_SPI 79 0>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names =
"core_clk",
"master_iface_clk",
"slave_iface_clk";
"core",
"master_iface",
"slave_iface";
clocks =
<&mmcc HDMI_APP_CLK>,
<&mmcc HDMI_M_AHB_CLK>,
......@@ -92,7 +92,7 @@ Example:
<0x4a00500 0x100>;
#phy-cells = <0>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names = "slave_iface_clk";
clock-names = "slave_iface";
clocks = <&mmcc HDMI_S_AHB_CLK>;
core-vdda-supply = <&pm8921_hdmi_mvs>;
};
......
......@@ -22,16 +22,16 @@ Required properties:
Documentation/devicetree/bindings/power/power_domain.txt
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
* "iface_clk"
* "bus_clk"
* "vsync_clk"
* "iface"
* "bus"
* "vsync"
- #address-cells: number of address cells for the MDSS children. Should be 1.
- #size-cells: Should be 1.
- ranges: parent bus address space is the same as the child bus address space.
Optional properties:
- clock-names: the following clocks are optional:
* "lut_clk"
* "lut"
MDP5:
Required properties:
......@@ -45,10 +45,10 @@ Required properties:
through MDP block
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
- * "bus_clk"
- * "iface_clk"
- * "core_clk"
- * "vsync_clk"
- * "bus"
- * "iface"
- * "core"
- * "vsync"
- ports: contains the list of output ports from MDP. These connect to interfaces
that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
special case since it is a part of the MDP block itself).
......@@ -77,7 +77,7 @@ Required properties:
Optional properties:
- clock-names: the following clocks are optional:
* "lut_clk"
* "lut"
Example:
......@@ -95,9 +95,9 @@ Example:
clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk",
"bus_clk",
"vsync_clk"
clock-names = "iface",
"bus",
"vsync"
interrupts = <0 72 0>;
......@@ -120,10 +120,10 @@ Example:
<&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk",
"bus_clk",
"core_clk",
"vsync_clk";
clock-names = "iface",
"bus",
"core",
"vsync";
ports {
#address-cells = <1>;
......
......@@ -8,6 +8,7 @@ msm-y := \
adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \
adreno/a5xx_power.o \
adreno/a5xx_preempt.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
......@@ -57,7 +58,8 @@ msm-y := \
msm_iommu.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o
msm_ringbuffer.o \
msm_submitqueue.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
......
......@@ -44,7 +44,7 @@ static bool a3xx_idle(struct msm_gpu *gpu);
static bool a3xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
......@@ -65,7 +65,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
gpu->funcs->flush(gpu, ring);
return a3xx_idle(gpu);
}
......@@ -339,7 +339,7 @@ static void a3xx_destroy(struct msm_gpu *gpu)
static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
if (!adreno_idle(gpu))
if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
......@@ -444,9 +444,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
.last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
.active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
#ifdef CONFIG_DEBUG_FS
......@@ -492,7 +492,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
......
......@@ -116,7 +116,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
static bool a4xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
......@@ -137,7 +137,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
gpu->funcs->flush(gpu, ring);
return a4xx_idle(gpu);
}
......@@ -337,7 +337,7 @@ static void a4xx_destroy(struct msm_gpu *gpu)
static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
if (!adreno_idle(gpu))
if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
......@@ -532,9 +532,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = a4xx_pm_suspend,
.pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
.last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
.active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
#ifdef CONFIG_DEBUG_FS
......@@ -574,7 +574,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
......
......@@ -26,8 +26,9 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
{
struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
struct device_node *np;
struct resource r;
......@@ -55,10 +56,10 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
mem_size = resource_size(&r);
/* Request the MDT file for the firmware */
ret = request_firmware(&fw, fwname, dev);
if (ret) {
fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
if (IS_ERR(fw)) {
DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
return ret;
return PTR_ERR(fw);
}
/* Figure out how much memory we need */
......@@ -75,9 +76,26 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
goto out;
}
/* Load the rest of the MDT */
ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID, mem_region, mem_phys,
mem_size);
/*
* Load the rest of the MDT
*
* Note that we could be dealing with two different paths, since
* with upstream linux-firmware it would be in a qcom/ subdir..
* adreno_request_fw() handles this, but qcom_mdt_load() does
* not. But since we've already gotten thru adreno_request_fw()
* we know which of the two cases it is:
*/
if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
mem_region, mem_phys, mem_size);
} else {
char newname[strlen("qcom/") + strlen(fwname) + 1];
sprintf(newname, "qcom/%s", fwname);
ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
mem_region, mem_phys, mem_size);
}
if (ret)
goto out;
......@@ -95,14 +113,65 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
return ret;
}
static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
uint32_t wptr;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
/* Copy the shadow to the actual register */
ring->cur = ring->next;
/* Make sure to wrap wptr if we need to */
wptr = get_wptr(ring);
spin_unlock_irqrestore(&ring->lock, flags);
/* Make sure everything is posted before making a decision */
mb();
/* Update HW if this is the current ring and we are not in preempt */
if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = gpu->rb;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x02);
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
/* Set the save preemption record for the ring/command */
OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
/* Turn back on protected mode */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
/* Enable local preemption for finegrain preemption */
OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x02);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
OUT_RING(ring, 0x02);
/* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
......@@ -120,16 +189,54 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
}
/*
* Write the render mode to NULL (0) to indicate to the CP that the IBs
* are done rendering - otherwise a lucky preemption would start
* replaying from the last checkpoint
*/
OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
OUT_RING(ring, 0);
OUT_RING(ring, 0);
OUT_RING(ring, 0);
OUT_RING(ring, 0);
OUT_RING(ring, 0);
/* Turn off IB level preemptions */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
OUT_RING(ring, 0x01);
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->fence->seqno);
OUT_RING(ring, submit->seqno);
/*
* Execute a CACHE_FLUSH_TS event. This will ensure that the
* timestamp is written to the memory and then triggers the interrupt
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
OUT_RING(ring, submit->fence->seqno);
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
gpu->funcs->flush(gpu);
/* Yield the floor on command completion */
OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
/*
* If dword[2:1] are non zero, they specify an address for the CP to
* write the value of dword[3] to on preemption complete. Write 0 to
* skip the write
*/
OUT_RING(ring, 0x00);
OUT_RING(ring, 0x00);
/* Data value - not used if the address above is 0 */
OUT_RING(ring, 0x01);
/* Set bit 0 to trigger an interrupt on preempt complete */
OUT_RING(ring, 0x01);
a5xx_flush(gpu, ring);
/* Check to see if we need to start preemption */
a5xx_preempt_trigger(gpu);
}
static const struct {
......@@ -245,7 +352,7 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
static int a5xx_me_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_ringbuffer *ring = gpu->rb;
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT7(ring, CP_ME_INIT, 8);
......@@ -276,11 +383,54 @@ static int a5xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
gpu->funcs->flush(gpu, ring);
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
static int a5xx_preempt_start(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb[0];
if (gpu->nr_rings == 1)
return 0;
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
/* Set the save preemption record for the ring/command */
OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
/* Turn back on protected mode */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x00);
OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
OUT_RING(ring, 0x01);
return a5xx_idle(gpu) ? 0 : -EINVAL;
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
OUT_RING(ring, 0x01);
/* Yield the floor on command completion */
OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
OUT_RING(ring, 0x00);
OUT_RING(ring, 0x00);
OUT_RING(ring, 0x01);
OUT_RING(ring, 0x01);
gpu->funcs->flush(gpu, ring);
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
......@@ -381,7 +531,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV;
}
ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
ret = zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw);
loaded = !ret;
......@@ -396,6 +546,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
A5XX_RBBM_INT_0_MASK_CP_SW | \
A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
......@@ -536,13 +687,14 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
/* Load the GPMU firmware before starting the HW init */
a5xx_gpmu_ucode_init(gpu);
ret = adreno_hw_init(gpu);
if (ret)
return ret;
a5xx_preempt_hw_init(gpu);
a5xx_gpmu_ucode_init(gpu);
ret = a5xx_ucode_init(gpu);
if (ret)
return ret;
......@@ -565,11 +717,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
* ticking correctly
*/
if (adreno_is_a530(adreno_gpu)) {
OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
OUT_RING(gpu->rb, 0x0F);
OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
OUT_RING(gpu->rb[0], 0x0F);
gpu->funcs->flush(gpu);
if (!a5xx_idle(gpu))
gpu->funcs->flush(gpu, gpu->rb[0]);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
}
......@@ -582,11 +734,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
*/
ret = a5xx_zap_shader_init(gpu);
if (!ret) {
OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1);
OUT_RING(gpu->rb, 0x00000000);
OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
OUT_RING(gpu->rb[0], 0x00000000);
gpu->funcs->flush(gpu);
if (!a5xx_idle(gpu))
gpu->funcs->flush(gpu, gpu->rb[0]);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
} else {
/* Print a warning so if we die, we know why */
......@@ -595,6 +747,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
}
/* Last step - yield the ringbuffer */
a5xx_preempt_start(gpu);
return 0;
}
......@@ -625,6 +780,8 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
......@@ -660,18 +817,27 @@ static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
}
bool a5xx_idle(struct msm_gpu *gpu)
bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (ring != a5xx_gpu->cur_ring) {
WARN(1, "Tried to idle a non-current ringbuffer\n");
return false;
}
/* wait for CP to drain ringbuffer: */
if (!adreno_idle(gpu))
if (!adreno_idle(gpu, ring))
return false;
if (spin_until(_a5xx_check_idle(gpu))) {
DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
gpu->name, __builtin_return_address(0),
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
return false;
}
......@@ -802,9 +968,10 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
gpu->funcs->last_fence(gpu),
dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
......@@ -854,8 +1021,13 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
a5xx_gpmu_err_irq(gpu);
if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
a5xx_preempt_trigger(gpu);
msm_gpu_retire(gpu);
}
if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
a5xx_preempt_irq(gpu);
return IRQ_HANDLED;
}
......@@ -985,6 +1157,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
}
#endif
static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
return a5xx_gpu->cur_ring;
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
......@@ -992,9 +1172,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = a5xx_pm_suspend,
.pm_resume = a5xx_pm_resume,
.recover = a5xx_recover,
.last_fence = adreno_last_fence,
.submit = a5xx_submit,
.flush = adreno_flush,
.flush = a5xx_flush,
.active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
#ifdef CONFIG_DEBUG_FS
......@@ -1030,7 +1210,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
a5xx_gpu->lm_leakage = 0x4E001A;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
......@@ -1039,5 +1219,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
return gpu;
}
/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
......@@ -35,10 +35,100 @@ struct a5xx_gpu {
uint32_t gpmu_dwords;
uint32_t lm_leakage;
struct msm_ringbuffer *cur_ring;
struct msm_ringbuffer *next_ring;
struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
atomic_t preempt_state;
struct timer_list preempt_timer;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
/*
* In order to do lockless preemption we use a simple state machine to progress
* through the process.
*
* PREEMPT_NONE - no preemption in progress. Next state START.
* PREEMPT_START - The trigger is evaulating if preemption is possible. Next
* states: TRIGGERED, NONE
* PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
* state: NONE.
* PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
* states: FAULTED, PENDING
* PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
* recovery. Next state: N/A
* PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
* checking the success of the operation. Next state: FAULTED, NONE.
*/
enum preempt_state {
PREEMPT_NONE = 0,
PREEMPT_START,
PREEMPT_ABORT,
PREEMPT_TRIGGERED,
PREEMPT_FAULTED,
PREEMPT_PENDING,
};
/*
* struct a5xx_preempt_record is a shared buffer between the microcode and the
* CPU to store the state for preemption. The record itself is much larger
* (64k) but most of that is used by the CP for storage.
*
* There is a preemption record assigned per ringbuffer. When the CPU triggers a
* preemption, it fills out the record with the useful information (wptr, ring
* base, etc) and the microcode uses that information to set up the CP following
* the preemption. When a ring is switched out, the CP will save the ringbuffer
* state back to the record. In this way, once the records are properly set up
* the CPU can quickly switch back and forth between ringbuffers by only
* updating a few registers (often only the wptr).
*
* These are the CPU aware registers in the record:
* @magic: Must always be 0x27C4BAFC
* @info: Type of the record - written 0 by the CPU, updated by the CP
* @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
* the CP
* @cntl: Value of RB_CNTL written by CPU, save/restored by CP
* @rptr: Value of RB_RPTR written by CPU, save/restored by CP
* @wptr: Value of RB_WPTR written by CPU, save/restored by CP
* @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
* @rbase: Value of RB_BASE written by CPU, save/restored by CP
* @counter: GPU address of the storage area for the performance counters
*/
struct a5xx_preempt_record {
uint32_t magic;
uint32_t info;
uint32_t data;
uint32_t cntl;
uint32_t rptr;
uint32_t wptr;
uint64_t rptr_addr;
uint64_t rbase;
uint64_t counter;
};
/* Magic identifier for the preemption record */
#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
/*
* Even though the structure above is only a few bytes, we need a full 64k to
* store the entire preemption record from the CP
*/
#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
/*
* The preemption counter block is a storage area for the value of the
* preemption counters that are saved immediately before context switch. We
* append it on to the end of the allocation for the preemption record.
*/
#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
int a5xx_power_init(struct msm_gpu *gpu);
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
......@@ -55,7 +145,22 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
return -ETIMEDOUT;
}
bool a5xx_idle(struct msm_gpu *gpu);
bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
void a5xx_preempt_init(struct msm_gpu *gpu);
void a5xx_preempt_hw_init(struct msm_gpu *gpu);
void a5xx_preempt_trigger(struct msm_gpu *gpu);
void a5xx_preempt_irq(struct msm_gpu *gpu);
void a5xx_preempt_fini(struct msm_gpu *gpu);
/* Return true if we are in a preempt state */
static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
{
int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
return !(preempt_state == PREEMPT_NONE ||
preempt_state == PREEMPT_ABORT);
}
#endif /* __A5XX_GPU_H__ */
......@@ -173,7 +173,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb;
struct msm_ringbuffer *ring = gpu->rb[0];
if (!a5xx_gpu->gpmu_dwords)
return 0;
......@@ -192,9 +192,9 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
gpu->funcs->flush(gpu);
gpu->funcs->flush(gpu, ring);
if (!a5xx_idle(gpu)) {
if (!a5xx_idle(gpu, ring)) {
DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
gpu->name);
return -EINVAL;
......@@ -264,7 +264,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
return;
/* Get the firmware */
if (request_firmware(&fw, adreno_gpu->info->gpmufw, drm->dev)) {
fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw);
if (IS_ERR(fw)) {
DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
gpu->name);
return;
......
/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_gem.h"
#include "a5xx_gpu.h"
/*
* Try to transition the preemption state from old to new. Return
* true on success or false if the original state wasn't 'old'
*/
static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
enum preempt_state old, enum preempt_state new)
{
enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
old, new);
return (cur == old);
}
/*
* Force the preemption state to the specified state. This is used in cases
* where the current state is known and won't change
*/
static inline void set_preempt_state(struct a5xx_gpu *gpu,
enum preempt_state new)
{
/*
* preempt_state may be read by other cores trying to trigger a
* preemption or in the interrupt handler so barriers are needed
* before...
*/
smp_mb__before_atomic();
atomic_set(&gpu->preempt_state, new);
/* ... and after*/
smp_mb__after_atomic();
}
/* Write the most recent wptr for the given ring into the hardware */
static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
unsigned long flags;
uint32_t wptr;
if (!ring)
return;
spin_lock_irqsave(&ring->lock, flags);
wptr = get_wptr(ring);
spin_unlock_irqrestore(&ring->lock, flags);
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
unsigned long flags;
int i;
for (i = 0; i < gpu->nr_rings; i++) {
bool empty;
struct msm_ringbuffer *ring = gpu->rb[i];
spin_lock_irqsave(&ring->lock, flags);
empty = (get_wptr(ring) == ring->memptrs->rptr);
spin_unlock_irqrestore(&ring->lock, flags);
if (!empty)
return ring;
}
return NULL;
}
static void a5xx_preempt_timer(unsigned long data)
{
struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
struct msm_gpu *gpu = &a5xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
return;
dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
queue_work(priv->wq, &gpu->recover_work);
}
/* Try to trigger a preemption switch */
void a5xx_preempt_trigger(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned long flags;
struct msm_ringbuffer *ring;
if (gpu->nr_rings == 1)
return;
/*
* Try to start preemption by moving from NONE to START. If
* unsuccessful, a preemption is already in flight
*/
if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
return;
/* Get the next ring to preempt to */
ring = get_next_ring(gpu);
/*
* If no ring is populated or the highest priority ring is the current
* one do nothing except to update the wptr to the latest and greatest
*/
if (!ring || (a5xx_gpu->cur_ring == ring)) {
/*
* Its possible that while a preemption request is in progress
* from an irq context, a user context trying to submit might
* fail to update the write pointer, because it determines
* that the preempt state is not PREEMPT_NONE.
*
* Close the race by introducing an intermediate
* state PREEMPT_ABORT to let the submit path
* know that the ringbuffer is not going to change
* and can safely update the write pointer.
*/
set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
return;
}
/* Make sure the wptr doesn't update while we're in motion */
spin_lock_irqsave(&ring->lock, flags);
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
spin_unlock_irqrestore(&ring->lock, flags);
/* Set the address of the incoming preemption record */
gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
a5xx_gpu->preempt_iova[ring->id]);
a5xx_gpu->next_ring = ring;
/* Start a timer to catch a stuck preemption */
mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
/* Set the preemption state to triggered */
set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
/* Make sure everything is written before hitting the button */
wmb();
/* And actually start the preemption */
gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
}
void a5xx_preempt_irq(struct msm_gpu *gpu)
{
uint32_t status;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
return;
/* Delete the preemption watchdog timer */
del_timer(&a5xx_gpu->preempt_timer);
/*
* The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
* firing the interrupt, but there is a non zero chance of a hardware
* condition or a software race that could set it again before we have a
* chance to finish. If that happens, log and go for recovery
*/
status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
if (unlikely(status)) {
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
dev_err(dev->dev, "%s: Preemption failed to complete\n",
gpu->name);
queue_work(priv->wq, &gpu->recover_work);
return;
}
a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
a5xx_gpu->next_ring = NULL;
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
}
void a5xx_preempt_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
for (i = 0; i < gpu->nr_rings; i++) {
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
}
/* Write a 0 to signal that we aren't switching pagetables */
gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
/* Reset the preemption state */
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
/* Always come up on rb 0 */
a5xx_gpu->cur_ring = gpu->rb[0];
}
static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a5xx_preempt_record *ptr;
struct drm_gem_object *bo = NULL;
u64 iova = 0;
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
/* Set up the defaults on the preemption record */
ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
ptr->info = 0;
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(ring, rptr);
ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
return 0;
}
void a5xx_preempt_fini(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
for (i = 0; i < gpu->nr_rings; i++) {
if (!a5xx_gpu->preempt_bo[i])
continue;
msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
if (a5xx_gpu->preempt_iova[i])
msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]);
a5xx_gpu->preempt_bo[i] = NULL;
}
}
void a5xx_preempt_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
/* No preemption if we only have one ring */
if (gpu->nr_rings <= 1)
return;
for (i = 0; i < gpu->nr_rings; i++) {
if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
/*
* On any failure our adventure is over. Clean up and
* set nr_rings to 1 to force preemption off
*/
a5xx_preempt_fini(gpu);
gpu->nr_rings = 1;
return;
}
}
setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
(unsigned long) a5xx_gpu);
}
......@@ -125,51 +125,24 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config;
struct adreno_rev rev;
const struct adreno_info *info;
struct msm_gpu *gpu = NULL;
struct msm_gpu *gpu = platform_get_drvdata(priv->gpu_pdev);
int ret;
if (!pdev) {
if (!gpu) {
dev_err(dev->dev, "no adreno device\n");
return NULL;
}
config = pdev->dev.platform_data;
rev = config->rev;
info = adreno_info(config->rev);
if (!info) {
dev_warn(dev->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
rev.core, rev.major, rev.minor, rev.patchid);
pm_runtime_get_sync(&pdev->dev);
mutex_lock(&dev->struct_mutex);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
pm_runtime_put_sync(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
}
DBG("Found GPU: %u.%u.%u.%u", rev.core, rev.major,
rev.minor, rev.patchid);
gpu = info->init(dev);
if (IS_ERR(gpu)) {
dev_warn(dev->dev, "failed to load adreno gpu\n");
gpu = NULL;
/* not fatal */
}
if (gpu) {
int ret;
pm_runtime_get_sync(&pdev->dev);
mutex_lock(&dev->struct_mutex);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
pm_runtime_put_sync(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
}
}
return gpu;
}
......@@ -282,6 +255,9 @@ static int adreno_get_pwrlevels(struct device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
const struct adreno_info *info;
struct drm_device *drm = dev_get_drvdata(master);
struct msm_gpu *gpu;
u32 val;
int ret;
......@@ -302,13 +278,39 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return ret;
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
set_gpu_pdev(drm, to_platform_device(dev));
info = adreno_info(config.rev);
if (!info) {
dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
config.rev.core, config.rev.major,
config.rev.minor, config.rev.patchid);
return -ENXIO;
}
DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
config.rev.minor, config.rev.patchid);
gpu = info->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
return PTR_ERR(gpu);
}
dev_set_drvdata(dev, gpu);
return 0;
}
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
struct msm_gpu *gpu = dev_get_drvdata(dev);
gpu->funcs->pm_suspend(gpu);
gpu->funcs->destroy(gpu);
set_gpu_pdev(dev_get_drvdata(master), NULL);
}
......
......@@ -21,8 +21,6 @@
#include "msm_gem.h"
#include "msm_mmu.h"
#define RB_SIZE SZ_32K
#define RB_BLKSIZE 32
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
......@@ -58,72 +56,181 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
return ret;
}
return -EINVAL;
case MSM_PARAM_NR_RINGS:
*value = gpu->nr_rings;
return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
}
}
const struct firmware *
adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
{
struct drm_device *drm = adreno_gpu->base.dev;
const struct firmware *fw = NULL;
char newname[strlen("qcom/") + strlen(fwname) + 1];
int ret;
sprintf(newname, "qcom/%s", fwname);
/*
* Try first to load from qcom/$fwfile using a direct load (to avoid
* a potential timeout waiting for usermode helper)
*/
if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
(adreno_gpu->fwloc == FW_LOCATION_NEW)) {
ret = request_firmware_direct(&fw, newname, drm->dev);
if (!ret) {
dev_info(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
return fw;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
return ERR_PTR(ret);
}
}
/*
* Then try the legacy location without qcom/ prefix
*/
if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
(adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
dev_info(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
return fw;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
fwname, ret);
return ERR_PTR(ret);
}
}
/*
* Finally fall back to request_firmware() for cases where the
* usermode helper is needed (I think mainly android)
*/
if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
(adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
ret = request_firmware(&fw, newname, drm->dev);
if (!ret) {
dev_info(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
return fw;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
return ERR_PTR(ret);
}
}
dev_err(drm->dev, "failed to load %s\n", fwname);
return ERR_PTR(-ENOENT);
}
static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
{
const struct firmware *fw;
if (adreno_gpu->pm4)
return 0;
fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pm4fw);
if (IS_ERR(fw))
return PTR_ERR(fw);
adreno_gpu->pm4 = fw;
fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pfpfw);
if (IS_ERR(fw)) {
release_firmware(adreno_gpu->pm4);
adreno_gpu->pm4 = NULL;
return PTR_ERR(fw);
}
adreno_gpu->pfp = fw;
return 0;
}
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
int ret, i;
DBG("%s", gpu->name);
ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
if (ret) {
gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
ret = adreno_load_fw(adreno_gpu);
if (ret)
return ret;
}
/* reset ringbuffer: */
gpu->rb->cur = gpu->rb->start;
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
if (!ring)
continue;
ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
if (ret) {
ring->iova = 0;
dev_err(gpu->dev->dev,
"could not map ringbuffer %d: %d\n", i, ret);
return ret;
}
ring->cur = ring->start;
ring->next = ring->start;
/* reset completed fence seqno: */
adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
adreno_gpu->memptrs->rptr = 0;
/* reset completed fence seqno: */
ring->memptrs->fence = ring->seqno;
ring->memptrs->rptr = 0;
}
/* Setup REG_CP_RB_CNTL: */
/*
* Setup REG_CP_RB_CNTL. The same value is used across targets (with
* the excpetion of A430 that disables the RPTR shadow) - the cacluation
* for the ringbuffer size and block size is moved to msm_gpu.h for the
* pre-processor to deal with and the A430 variant is ORed in here
*/
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
/* size is log2(quad-words): */
AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
MSM_GPU_RB_CNTL_DEFAULT |
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address: */
/* Setup ringbuffer address - use ringbuffer[0] for GPU init */
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova);
REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
if (!adreno_is_a430(adreno_gpu)) {
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
rbmemptr(adreno_gpu, rptr));
rbmemptr(gpu->rb[0], rptr));
}
return 0;
}
static uint32_t get_wptr(struct msm_ringbuffer *ring)
{
return ring->cur - ring->start;
}
/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring)
{
if (adreno_is_a430(adreno_gpu))
return adreno_gpu->memptrs->rptr = adreno_gpu_read(
return ring->memptrs->rptr = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
else
return adreno_gpu->memptrs->rptr;
return ring->memptrs->rptr;
}
uint32_t adreno_last_fence(struct msm_gpu *gpu)
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
return adreno_gpu->memptrs->fence;
return gpu->rb[0];
}
void adreno_recover(struct msm_gpu *gpu)
......@@ -149,7 +256,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = gpu->rb;
struct msm_ringbuffer *ring = submit->ring;
unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
......@@ -164,7 +271,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, submit->cmd[i].iova);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
......@@ -172,7 +279,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence->seqno);
OUT_RING(ring, submit->seqno);
if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
/* Flush HLSQ lazy updates to make sure there is nothing
......@@ -188,8 +295,8 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
OUT_RING(ring, rbmemptr(adreno_gpu, fence));
OUT_RING(ring, submit->fence->seqno);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
OUT_PKT3(ring, CP_INTERRUPT, 1);
......@@ -215,20 +322,23 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
#endif
gpu->funcs->flush(gpu);
gpu->funcs->flush(gpu, ring);
}
void adreno_flush(struct msm_gpu *gpu)
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr;
/* Copy the shadow to the actual register */
ring->cur = ring->next;
/*
* Mask wptr value that we calculate to fit in the HW range. This is
* to account for the possibility that the last command fit exactly into
* the ringbuffer and rb->next hasn't wrapped to zero yet
*/
wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
wptr = get_wptr(ring);
/* ensure writes to ringbuffer have hit system memory: */
mb();
......@@ -236,17 +346,19 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
bool adreno_idle(struct msm_gpu *gpu)
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr = get_wptr(gpu->rb);
uint32_t wptr = get_wptr(ring);
/* wait for CP to drain ringbuffer: */
if (!spin_until(get_rptr(adreno_gpu) == wptr))
if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
return false;
}
......@@ -261,10 +373,16 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
gpu->fctx->last_fence);
seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
seq_printf(m, "rb %d: fence: %d/%d\n", i,
ring->memptrs->fence, ring->seqno);
seq_printf(m, " rptr: %d\n",
get_rptr(adreno_gpu, ring));
seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
}
/* dump these out in a form that can be parsed by demsm: */
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
......@@ -290,16 +408,23 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
void adreno_dump_info(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
printk("revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
gpu->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu));
printk("rb wptr: %d\n", get_wptr(gpu->rb));
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
printk("rb %d: fence: %d/%d\n", i,
ring->memptrs->fence,
ring->seqno);
printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
printk("rb wptr: %d\n", get_wptr(ring));
}
}
/* would be nice to not have to duplicate the _show() stuff with printk(): */
......@@ -322,28 +447,31 @@ void adreno_dump(struct msm_gpu *gpu)
}
}
static uint32_t ring_freewords(struct msm_gpu *gpu)
static uint32_t ring_freewords(struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t size = gpu->rb->size / 4;
uint32_t wptr = get_wptr(gpu->rb);
uint32_t rptr = get_rptr(adreno_gpu);
struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
/* Use ring->next to calculate free size */
uint32_t wptr = ring->next - ring->start;
uint32_t rptr = get_rptr(adreno_gpu, ring);
return (rptr + (size - 1) - wptr) % size;
}
void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
if (spin_until(ring_freewords(gpu) >= ndwords))
DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
if (spin_until(ring_freewords(ring) >= ndwords))
DRM_DEV_ERROR(ring->gpu->dev->dev,
"timeout waiting for space in ringubffer %d\n",
ring->id);
}
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
adreno_gpu->funcs = funcs;
adreno_gpu->info = adreno_info(config->rev);
......@@ -366,59 +494,20 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
adreno_gpu_config.ringsz = RB_SIZE;
adreno_gpu_config.nr_rings = nr_rings;
pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
if (ret)
return ret;
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
adreno_gpu->info->pm4fw, ret);
return ret;
}
ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
adreno_gpu->info->pfpfw, ret);
return ret;
}
adreno_gpu->memptrs = msm_gem_kernel_new(drm,
sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
&adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
if (IS_ERR(adreno_gpu->memptrs)) {
ret = PTR_ERR(adreno_gpu->memptrs);
adreno_gpu->memptrs = NULL;
dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
}
return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;
if (adreno_gpu->memptrs_bo) {
if (adreno_gpu->memptrs)
msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
if (adreno_gpu->memptrs_iova)
msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
}
release_firmware(adreno_gpu->pm4);
release_firmware(adreno_gpu->pfp);
msm_gpu_cleanup(gpu);
msm_gpu_cleanup(&adreno_gpu->base);
}
......@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
......@@ -82,14 +82,6 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
#define rbmemptr(adreno_gpu, member) \
((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
struct adreno_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
};
struct adreno_gpu {
struct msm_gpu base;
struct adreno_rev rev;
......@@ -101,16 +93,30 @@ struct adreno_gpu {
/* interesting register offsets to dump: */
const unsigned int *registers;
/*
* Are we loading fw from legacy path? Prior to addition
* of gpu firmware to linux-firmware, the fw files were
* placed in toplevel firmware directory, following qcom's
* android kernel. But linux-firmware preferred they be
* placed in a 'qcom' subdirectory.
*
* For backwards compatibility, we try first to load from
* the new path, using request_firmware_direct() to avoid
* any potential timeout waiting for usermode helper, then
* fall back to the old path (with direct load). And
* finally fall back to request_firmware() with the new
* path to allow the usermode helper.
*/
enum {
FW_LOCATION_UNKNOWN = 0,
FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */
FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */
FW_LOCATION_HELPER,
} fwloc;
/* firmware: */
const struct firmware *pm4, *pfp;
/* ringbuffer rptr/wptr: */
// TODO should this be in msm_ringbuffer? I think it would be
// different for z180..
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
uint64_t memptrs_iova;
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
......@@ -196,22 +202,25 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu);
bool adreno_idle(struct msm_gpu *gpu);
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
......@@ -220,7 +229,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu);
static inline void
OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
adreno_wait_ring(ring->gpu, cnt+1);
adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
}
......@@ -228,14 +237,14 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
static inline void
OUT_PKT2(struct msm_ringbuffer *ring)
{
adreno_wait_ring(ring->gpu, 1);
adreno_wait_ring(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT);
}
static inline void
OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
adreno_wait_ring(ring->gpu, cnt+1);
adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
......@@ -257,14 +266,14 @@ static inline u32 PM4_PARITY(u32 val)
static inline void
OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
adreno_wait_ring(ring->gpu, cnt + 1);
adreno_wait_ring(ring, cnt + 1);
OUT_RING(ring, PKT4(regindx, cnt));
}
static inline void
OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
adreno_wait_ring(ring->gpu, cnt + 1);
adreno_wait_ring(ring, cnt + 1);
OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
}
......@@ -323,6 +332,11 @@ static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
adreno_gpu_write(gpu, hi, upper_32_bits(data));
}
static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
{
return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
}
/*
* Given a register and a count, return a value to program into
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
......
......@@ -14,7 +14,7 @@
#include "dsi_cfg.h"
static const char * const dsi_v2_bus_clk_names[] = {
"core_mmss_clk", "iface_clk", "bus_clk",
"core_mmss", "iface", "bus",
};
static const struct msm_dsi_config apq8064_dsi_cfg = {
......@@ -34,7 +34,7 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
};
static const char * const dsi_6g_bus_clk_names[] = {
"mdp_core_clk", "iface_clk", "bus_clk", "core_mmss_clk",
"mdp_core", "iface", "bus", "core_mmss",
};
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
......@@ -55,7 +55,7 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
};
static const char * const dsi_8916_bus_clk_names[] = {
"mdp_core_clk", "iface_clk", "bus_clk",
"mdp_core", "iface", "bus",
};
static const struct msm_dsi_config msm8916_dsi_cfg = {
......@@ -99,7 +99,7 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
* without it too. Figure out why it doesn't enable and uncomment below
*/
static const char * const dsi_8996_bus_clk_names[] = {
"mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */
"mdp_core", "iface", "bus", /* "core_mmss", */
};
static const struct msm_dsi_config msm8996_dsi_cfg = {
......
......@@ -334,46 +334,46 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
struct platform_device *pdev = msm_host->pdev;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
const struct msm_dsi_config *cfg = cfg_hnd->cfg;
int i, ret = 0;
/* get bus clocks */
for (i = 0; i < cfg->num_bus_clks; i++) {
msm_host->bus_clks[i] = devm_clk_get(dev,
msm_host->bus_clks[i] = msm_clk_get(pdev,
cfg->bus_clk_names[i]);
if (IS_ERR(msm_host->bus_clks[i])) {
ret = PTR_ERR(msm_host->bus_clks[i]);
pr_err("%s: Unable to get %s, ret = %d\n",
pr_err("%s: Unable to get %s clock, ret = %d\n",
__func__, cfg->bus_clk_names[i], ret);
goto exit;
}
}
/* get link and source clocks */
msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
msm_host->byte_clk = msm_clk_get(pdev, "byte");
if (IS_ERR(msm_host->byte_clk)) {
ret = PTR_ERR(msm_host->byte_clk);
pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
pr_err("%s: can't find dsi_byte clock. ret=%d\n",
__func__, ret);
msm_host->byte_clk = NULL;
goto exit;
}
msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
if (IS_ERR(msm_host->pixel_clk)) {
ret = PTR_ERR(msm_host->pixel_clk);
pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
__func__, ret);
msm_host->pixel_clk = NULL;
goto exit;
}
msm_host->esc_clk = devm_clk_get(dev, "core_clk");
msm_host->esc_clk = msm_clk_get(pdev, "core");
if (IS_ERR(msm_host->esc_clk)) {
ret = PTR_ERR(msm_host->esc_clk);
pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
pr_err("%s: can't find dsi_esc clock. ret=%d\n",
__func__, ret);
msm_host->esc_clk = NULL;
goto exit;
......@@ -382,22 +382,22 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
if (!msm_host->pixel_clk_src) {
ret = -ENODEV;
pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
msm_host->src_clk = devm_clk_get(dev, "src_clk");
msm_host->src_clk = msm_clk_get(pdev, "src");
if (IS_ERR(msm_host->src_clk)) {
ret = PTR_ERR(msm_host->src_clk);
pr_err("%s: can't find dsi_src_clk. ret=%d\n",
pr_err("%s: can't find src clock. ret=%d\n",
__func__, ret);
msm_host->src_clk = NULL;
goto exit;
......@@ -406,7 +406,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
if (!msm_host->esc_clk_src) {
ret = -ENODEV;
pr_err("%s: can't get esc_clk_src. ret=%d\n",
pr_err("%s: can't get esc clock parent. ret=%d\n",
__func__, ret);
goto exit;
}
......@@ -414,7 +414,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
if (!msm_host->dsi_clk_src) {
ret = -ENODEV;
pr_err("%s: can't get dsi_clk_src. ret=%d\n",
pr_err("%s: can't get src clock parent. ret=%d\n",
__func__, ret);
}
}
......
......@@ -482,7 +482,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
}
phy->ahb_clk = devm_clk_get(dev, "iface_clk");
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
......
......@@ -150,46 +150,46 @@ static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
static int edp_clk_init(struct edp_ctrl *ctrl)
{
struct device *dev = &ctrl->pdev->dev;
struct platform_device *pdev = ctrl->pdev;
int ret;
ctrl->aux_clk = devm_clk_get(dev, "core_clk");
ctrl->aux_clk = msm_clk_get(pdev, "core");
if (IS_ERR(ctrl->aux_clk)) {
ret = PTR_ERR(ctrl->aux_clk);
pr_err("%s: Can't find aux_clk, %d\n", __func__, ret);
pr_err("%s: Can't find core clock, %d\n", __func__, ret);
ctrl->aux_clk = NULL;
return ret;
}
ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk");
ctrl->pixel_clk = msm_clk_get(pdev, "pixel");
if (IS_ERR(ctrl->pixel_clk)) {
ret = PTR_ERR(ctrl->pixel_clk);
pr_err("%s: Can't find pixel_clk, %d\n", __func__, ret);
pr_err("%s: Can't find pixel clock, %d\n", __func__, ret);
ctrl->pixel_clk = NULL;
return ret;
}
ctrl->ahb_clk = devm_clk_get(dev, "iface_clk");
ctrl->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(ctrl->ahb_clk)) {
ret = PTR_ERR(ctrl->ahb_clk);
pr_err("%s: Can't find ahb_clk, %d\n", __func__, ret);
pr_err("%s: Can't find iface clock, %d\n", __func__, ret);
ctrl->ahb_clk = NULL;
return ret;
}
ctrl->link_clk = devm_clk_get(dev, "link_clk");
ctrl->link_clk = msm_clk_get(pdev, "link");
if (IS_ERR(ctrl->link_clk)) {
ret = PTR_ERR(ctrl->link_clk);
pr_err("%s: Can't find link_clk, %d\n", __func__, ret);
pr_err("%s: Can't find link clock, %d\n", __func__, ret);
ctrl->link_clk = NULL;
return ret;
}
/* need mdp core clock to receive irq */
ctrl->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
ctrl->mdp_core_clk = msm_clk_get(pdev, "mdp_core");
if (IS_ERR(ctrl->mdp_core_clk)) {
ret = PTR_ERR(ctrl->mdp_core_clk);
pr_err("%s: Can't find mdp_core_clk, %d\n", __func__, ret);
pr_err("%s: Can't find mdp_core clock, %d\n", __func__, ret);
ctrl->mdp_core_clk = NULL;
return ret;
}
......
......@@ -208,7 +208,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
for (i = 0; i < config->hpd_clk_cnt; i++) {
struct clk *clk;
clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
......@@ -228,7 +228,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
for (i = 0; i < config->pwr_clk_cnt; i++) {
struct clk *clk;
clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
......@@ -361,7 +361,7 @@ static const char *hpd_reg_names_none[] = {};
static struct hdmi_platform_config hdmi_tx_8660_config;
static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"};
static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static struct hdmi_platform_config hdmi_tx_8960_config = {
HDMI_CFG(hpd_reg, 8960),
......@@ -370,8 +370,8 @@ static struct hdmi_platform_config hdmi_tx_8960_config = {
static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
static const char *hpd_reg_names_8x74[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"};
static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
static struct hdmi_platform_config hdmi_tx_8974_config = {
......
......@@ -48,7 +48,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
for (i = 0; i < cfg->num_clks; i++) {
struct clk *clk;
clk = devm_clk_get(dev, cfg->clk_names[i]);
clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev, "failed to get phy clock: %s (%d)\n",
......
......@@ -48,7 +48,7 @@ static const char * const hdmi_phy_8960_reg_names[] = {
};
static const char * const hdmi_phy_8960_clk_names[] = {
"slave_iface_clk",
"slave_iface",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = {
......
......@@ -758,9 +758,7 @@ static const char * const hdmi_phy_8996_reg_names[] = {
};
static const char * const hdmi_phy_8996_clk_names[] = {
"mmagic_iface_clk",
"iface_clk",
"ref_clk",
"iface", "ref",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = {
......
......@@ -41,8 +41,7 @@ static const char * const hdmi_phy_8x74_reg_names[] = {
};
static const char * const hdmi_phy_8x74_clk_names[] = {
"iface_clk",
"alt_iface_clk"
"iface", "alt_iface"
};
const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = {
......
......@@ -290,6 +290,9 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
if (WARN_ON(!mdp4_crtc->enabled))
return;
/* Disable/save vblank irq handling before power is disabled */
drm_crtc_vblank_off(crtc);
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);
......@@ -308,6 +311,10 @@ static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
return;
mdp4_enable(mdp4_kms);
/* Restore vblank irq handling after power is enabled */
drm_crtc_vblank_on(crtc);
mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
crtc_flush(crtc);
......
......@@ -224,7 +224,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
return 0;
}
......@@ -55,18 +55,23 @@ struct mdp5_crtc {
struct completion pp_completion;
bool lm_cursor_enabled;
struct {
/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
spinlock_t lock;
/* current cursor being scanned out: */
struct drm_gem_object *scanout_bo;
uint64_t iova;
uint32_t width, height;
uint32_t x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
......@@ -114,6 +119,8 @@ static u32 crtc_flush_all(struct drm_crtc *crtc)
return 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
if (!plane->state->visible)
continue;
flush_mask |= mdp5_plane_get_flush(plane);
}
......@@ -242,6 +249,9 @@ static void blend_setup(struct drm_crtc *crtc)
drm_atomic_crtc_for_each_plane(plane, crtc) {
enum mdp5_pipe right_pipe;
if (!plane->state->visible)
continue;
pstate = to_mdp5_plane_state(plane->state);
pstates[pstate->stage] = pstate;
stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
......@@ -422,11 +432,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
if (WARN_ON(!mdp5_crtc->enabled))
return;
/* Disable/save vblank irq handling before power is disabled */
drm_crtc_vblank_off(crtc);
if (mdp5_cstate->cmd_mode)
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
mdp5_crtc->enabled = false;
}
......@@ -446,6 +459,29 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
pm_runtime_get_sync(dev);
if (mdp5_crtc->lm_cursor_enabled) {
/*
* Restore LM cursor state, as it might have been lost
* with suspend:
*/
if (mdp5_crtc->cursor.iova) {
unsigned long flags;
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
mdp5_ctl_set_cursor(mdp5_cstate->ctl,
&mdp5_cstate->pipeline, 0, true);
} else {
mdp5_ctl_set_cursor(mdp5_cstate->ctl,
&mdp5_cstate->pipeline, 0, false);
}
}
/* Restore vblank irq handling after power is enabled */
drm_crtc_vblank_on(crtc);
mdp5_crtc_mode_set_nofb(crtc);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
......@@ -580,6 +616,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", crtc->name);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (!pstate->visible)
continue;
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
......@@ -723,6 +762,50 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
mdp5_crtc->cursor.y);
}
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t blendcfg, stride;
uint32_t x, y, width, height;
uint32_t roi_w, roi_h;
int lm;
assert_spin_locked(&mdp5_crtc->cursor.lock);
lm = mdp5_cstate->pipeline.mixer->lm;
x = mdp5_crtc->cursor.x;
y = mdp5_crtc->cursor.y;
width = mdp5_crtc->cursor.width;
height = mdp5_crtc->cursor.height;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
get_roi(crtc, &roi_w, &roi_h);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
mdp5_crtc->cursor.iova);
blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
}
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file, uint32_t handle,
uint32_t width, uint32_t height)
......@@ -735,16 +818,18 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct platform_device *pdev = mdp5_kms->pdev;
struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, stride;
uint64_t cursor_addr;
struct mdp5_ctl *ctl;
int ret, lm;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
int ret;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w, roi_h;
bool cursor_enable = true;
unsigned long flags;
if (!mdp5_crtc->lm_cursor_enabled) {
dev_warn(dev->dev,
"cursor_set is deprecated with cursor planes\n");
return -EINVAL;
}
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
......@@ -761,6 +846,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
mdp5_crtc->cursor.iova = 0;
pm_runtime_get_sync(&pdev->dev);
goto set_cursor;
}
......@@ -769,13 +855,11 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr);
ret = msm_gem_get_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
if (ret)
return -EINVAL;
lm = mdp5_cstate->pipeline.mixer->lm;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
pm_runtime_get_sync(&pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
......@@ -785,22 +869,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
mdp5_crtc->cursor.width = width;
mdp5_crtc->cursor.height = height;
get_roi(crtc, &roi_w, &roi_h);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
......@@ -817,7 +886,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
crtc_flush(crtc, flush_mask);
end:
pm_runtime_put_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
......@@ -831,12 +900,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
struct drm_device *dev = crtc->dev;
uint32_t roi_w;
uint32_t roi_h;
unsigned long flags;
if (!mdp5_crtc->lm_cursor_enabled) {
dev_warn(dev->dev,
"cursor_move is deprecated with cursor planes\n");
return -EINVAL;
}
/* don't support LM cursors when we we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
......@@ -853,17 +928,12 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
pm_runtime_get_sync(&mdp5_kms->pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
crtc_flush(crtc, flush_mask);
pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev);
pm_runtime_put_sync(&mdp5_kms->pdev->dev);
return 0;
}
......@@ -943,16 +1013,6 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.atomic_print_state = mdp5_crtc_atomic_print_state,
};
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
.atomic_destroy_state = mdp5_crtc_destroy_state,
.atomic_print_state = mdp5_crtc_atomic_print_state,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
.atomic_check = mdp5_crtc_atomic_check,
......@@ -1121,12 +1181,10 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
if (cursor_plane)
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
&mdp5_crtc_no_lm_cursor_funcs, NULL);
else
drm_crtc_init_with_planes(dev, crtc, plane, NULL,
&mdp5_crtc_funcs, NULL);
mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
&mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
......
......@@ -384,7 +384,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
return 0;
}
......
......@@ -54,7 +54,7 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
}
int mdp5_irq_postinstall(struct msm_kms *kms)
......@@ -72,7 +72,7 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
pm_runtime_get_sync(dev);
mdp_irq_register(mdp_kms, error_handler);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
return 0;
}
......@@ -84,7 +84,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
......@@ -119,7 +119,7 @@ int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
return 0;
}
......@@ -132,5 +132,5 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
}
......@@ -125,7 +125,7 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
}
static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
......@@ -496,12 +496,12 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
pm_runtime_get_sync(dev);
version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
pm_runtime_put_autosuspend(dev);
pm_runtime_put_sync(dev);
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor);
dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
......@@ -683,7 +683,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;;
}
pm_runtime_put_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
ret = modeset_init(mdp5_kms);
if (ret) {
......
......@@ -17,19 +17,20 @@
#include "mdp5_kms.h"
struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
struct drm_plane *plane, uint32_t caps, uint32_t blkcfg)
int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
uint32_t caps, uint32_t blkcfg,
struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_state *state;
struct mdp5_hw_pipe_state *old_state, *new_state;
struct mdp5_hw_pipe *hwpipe = NULL;
int i;
int i, j;
state = mdp5_get_state(s);
if (IS_ERR(state))
return ERR_CAST(state);
return PTR_ERR(state);
/* grab old_state after mdp5_get_state(), since now we hold lock: */
old_state = &mdp5_kms->state->hwpipe;
......@@ -64,31 +65,67 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
/* possible candidate, take the one with the
* fewest unneeded caps bits set:
*/
if (!hwpipe || (hweight_long(cur->caps & ~caps) <
hweight_long(hwpipe->caps & ~caps)))
hwpipe = cur;
if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) <
hweight_long((*hwpipe)->caps & ~caps))) {
bool r_found = false;
if (r_hwpipe) {
for (j = i + 1; j < mdp5_kms->num_hwpipes;
j++) {
struct mdp5_hw_pipe *r_cur =
mdp5_kms->hwpipes[j];
/* reject different types of hwpipes */
if (r_cur->caps != cur->caps)
continue;
/* respect priority, eg. VIG0 > VIG1 */
if (cur->pipe > r_cur->pipe)
continue;
*r_hwpipe = r_cur;
r_found = true;
break;
}
}
if (!r_hwpipe || r_found)
*hwpipe = cur;
}
}
if (!hwpipe)
return ERR_PTR(-ENOMEM);
if (!(*hwpipe))
return -ENOMEM;
if (r_hwpipe && !(*r_hwpipe))
return -ENOMEM;
if (mdp5_kms->smp) {
int ret;
DBG("%s: alloc SMP blocks", hwpipe->name);
/* We don't support SMP and 2 hwpipes/plane together */
WARN_ON(r_hwpipe);
DBG("%s: alloc SMP blocks", (*hwpipe)->name);
ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp,
hwpipe->pipe, blkcfg);
(*hwpipe)->pipe, blkcfg);
if (ret)
return ERR_PTR(-ENOMEM);
return -ENOMEM;
hwpipe->blkcfg = blkcfg;
(*hwpipe)->blkcfg = blkcfg;
}
DBG("%s: assign to plane %s for caps %x",
hwpipe->name, plane->name, caps);
new_state->hwpipe_to_plane[hwpipe->idx] = plane;
(*hwpipe)->name, plane->name, caps);
new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane;
return hwpipe;
if (r_hwpipe) {
DBG("%s: assign to right of plane %s for caps %x",
(*r_hwpipe)->name, plane->name, caps);
new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane;
}
return 0;
}
void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
......
......@@ -44,9 +44,10 @@ struct mdp5_hw_pipe_state {
struct drm_plane *hwpipe_to_plane[SSPP_MAX];
};
struct mdp5_hw_pipe *__must_check
mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
uint32_t caps, uint32_t blkcfg);
int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
uint32_t caps, uint32_t blkcfg,
struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe);
void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
......
......@@ -31,15 +31,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_rect *src, struct drm_rect *dest);
static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx);
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
......@@ -254,18 +245,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.atomic_print_state = mdp5_plane_atomic_print_state,
};
static const struct drm_plane_funcs mdp5_cursor_plane_funcs = {
.update_plane = mdp5_update_cursor_plane_legacy,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
.atomic_set_property = mdp5_plane_atomic_set_property,
.atomic_get_property = mdp5_plane_atomic_get_property,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
.atomic_print_state = mdp5_plane_atomic_print_state,
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
......@@ -414,31 +393,30 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
struct mdp5_hw_pipe *old_right_hwpipe =
mdp5_state->r_hwpipe;
mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
plane, caps, blkcfg);
if (IS_ERR(mdp5_state->hwpipe)) {
DBG("%s: failed to assign hwpipe!", plane->name);
return PTR_ERR(mdp5_state->hwpipe);
struct mdp5_hw_pipe *new_hwpipe = NULL;
struct mdp5_hw_pipe *new_right_hwpipe = NULL;
ret = mdp5_pipe_assign(state->state, plane, caps,
blkcfg, &new_hwpipe,
need_right_hwpipe ?
&new_right_hwpipe : NULL);
if (ret) {
DBG("%s: failed to assign hwpipe(s)!",
plane->name);
return ret;
}
if (need_right_hwpipe) {
mdp5_state->r_hwpipe =
mdp5_pipe_assign(state->state, plane,
caps, blkcfg);
if (IS_ERR(mdp5_state->r_hwpipe)) {
DBG("%s: failed to assign right hwpipe",
plane->name);
return PTR_ERR(mdp5_state->r_hwpipe);
}
} else {
mdp5_state->hwpipe = new_hwpipe;
if (need_right_hwpipe)
mdp5_state->r_hwpipe = new_right_hwpipe;
else
/*
* set it to NULL so that the driver knows we
* don't have a right hwpipe when committing a
* new state
*/
mdp5_state->r_hwpipe = NULL;
}
mdp5_pipe_release(state->state, old_hwpipe);
mdp5_pipe_release(state->state, old_right_hwpipe);
......@@ -487,11 +465,98 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
}
}
static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
struct drm_crtc_state *crtc_state;
struct drm_rect clip;
int min_scale, max_scale;
int ret;
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
if (!crtc_state->active)
return -EINVAL;
mdp5_state = to_mdp5_plane_state(state);
/* don't use fast path if we don't have a hwpipe allocated yet */
if (!mdp5_state->hwpipe)
return -EINVAL;
/* only allow changing of position(crtc x/y or src x/y) in fast path */
if (plane->state->crtc != state->crtc ||
plane->state->src_w != state->src_w ||
plane->state->src_h != state->src_h ||
plane->state->crtc_w != state->crtc_w ||
plane->state->crtc_h != state->crtc_h ||
!plane->state->fb ||
plane->state->fb != state->fb)
return -EINVAL;
clip.x1 = 0;
clip.y1 = 0;
clip.x2 = crtc_state->adjusted_mode.hdisplay;
clip.y2 = crtc_state->adjusted_mode.vdisplay;
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
ret = drm_plane_helper_check_state(state, &clip, min_scale,
max_scale, true, true);
if (ret)
return ret;
/*
* if the visibility of the plane changes (i.e, if the cursor is
* clipped out completely, we can't take the async path because
* we need to stage/unstage the plane from the Layer Mixer(s). We
* also assign/unassign the hwpipe(s) tied to the plane. We avoid
* taking the fast path for both these reasons.
*/
if (state->visible != plane->state->visible)
return -EINVAL;
return 0;
}
static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->crtc_x = new_state->crtc_x;
plane->state->crtc_y = new_state->crtc_y;
if (plane_enabled(new_state)) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline =
mdp5_crtc_get_pipeline(plane->crtc);
int ret;
ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
&new_state->src, &new_state->dst);
WARN_ON(ret < 0);
ctl = mdp5_crtc_get_ctl(new_state->crtc);
mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
}
*to_mdp5_plane_state(plane->state) =
*to_mdp5_plane_state(new_state);
}
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
.prepare_fb = mdp5_plane_prepare_fb,
.cleanup_fb = mdp5_plane_cleanup_fb,
.atomic_check = mdp5_plane_atomic_check,
.atomic_update = mdp5_plane_atomic_update,
.atomic_async_check = mdp5_plane_atomic_async_check,
.atomic_async_update = mdp5_plane_atomic_async_update,
};
static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
......@@ -996,84 +1061,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
return ret;
}
static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane_state *plane_state, *new_plane_state;
struct mdp5_plane_state *mdp5_pstate;
struct drm_crtc_state *crtc_state = crtc->state;
int ret;
if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state))
goto slow;
plane_state = plane->state;
mdp5_pstate = to_mdp5_plane_state(plane_state);
/* don't use fast path if we don't have a hwpipe allocated yet */
if (!mdp5_pstate->hwpipe)
goto slow;
/* only allow changing of position(crtc x/y or src x/y) in fast path */
if (plane_state->crtc != crtc ||
plane_state->src_w != src_w ||
plane_state->src_h != src_h ||
plane_state->crtc_w != crtc_w ||
plane_state->crtc_h != crtc_h ||
!plane_state->fb ||
plane_state->fb != fb)
goto slow;
new_plane_state = mdp5_plane_duplicate_state(plane);
if (!new_plane_state)
return -ENOMEM;
new_plane_state->src_x = src_x;
new_plane_state->src_y = src_y;
new_plane_state->src_w = src_w;
new_plane_state->src_h = src_h;
new_plane_state->crtc_x = crtc_x;
new_plane_state->crtc_y = crtc_y;
new_plane_state->crtc_w = crtc_w;
new_plane_state->crtc_h = crtc_h;
ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
if (ret)
goto slow_free;
if (new_plane_state->visible) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc);
ret = mdp5_plane_mode_set(plane, crtc, fb,
&new_plane_state->src,
&new_plane_state->dst);
WARN_ON(ret < 0);
ctl = mdp5_crtc_get_ctl(crtc);
mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
}
*to_mdp5_plane_state(plane_state) =
*to_mdp5_plane_state(new_plane_state);
mdp5_plane_destroy_state(plane, new_plane_state);
return 0;
slow_free:
mdp5_plane_destroy_state(plane, new_plane_state);
slow:
return drm_atomic_helper_update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
}
/*
* Use this func and the one below only after the atomic state has been
* successfully swapped
......@@ -1133,16 +1120,9 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats), false);
if (type == DRM_PLANE_TYPE_CURSOR)
ret = drm_universal_plane_init(dev, plane, 0xff,
&mdp5_cursor_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
NULL, type, NULL);
else
ret = drm_universal_plane_init(dev, plane, 0xff,
&mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
NULL, type, NULL);
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
NULL, type, NULL);
if (ret)
goto fail;
......
......@@ -146,35 +146,6 @@ static void commit_worker(struct work_struct *work)
complete_commit(container_of(work, struct msm_commit, work), true);
}
/*
* this func is identical to the drm_atomic_helper_check, but we keep this
* because we might eventually need to have a more finegrained check
* sequence without using the atomic helpers.
*
* In the past, we first called drm_atomic_helper_check_planes, and then
* drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's
* ->atomic_check could update ->mode_changed for pixel format changes.
* This, however isn't needed now because if there is a pixel format change,
* we just assign a new hwpipe for it with a new SMP allocation. We might
* eventually hit a condition where we would need to do a full modeset if
* we run out of planes. There, we'd probably need to set mode_changed.
*/
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
return ret;
}
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
......@@ -202,6 +173,18 @@ int msm_atomic_commit(struct drm_device *dev,
if (ret)
return ret;
/*
* Note that plane->atomic_async_check() should fail if we need
* to re-assign hwpipe or anything that touches global atomic
* state, so we'll never go down the async update path in those
* cases.
*/
if (state->async_update) {
drm_atomic_helper_async_commit(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
return 0;
}
c = commit_init(state);
if (!c) {
ret = -ENOMEM;
......
......@@ -29,9 +29,12 @@
* - 1.0.0 - initial interface
* - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
* - 1.2.0 - adds explicit fence support for submit ioctl
* - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
* SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
* MSM_GEM_INFO ioctl.
*/
#define MSM_VERSION_MAJOR 1
#define MSM_VERSION_MINOR 2
#define MSM_VERSION_MINOR 3
#define MSM_VERSION_PATCHLEVEL 0
static void msm_fb_output_poll_changed(struct drm_device *dev)
......@@ -44,7 +47,7 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
.atomic_check = msm_atomic_check,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = msm_atomic_commit,
.atomic_state_alloc = msm_atomic_state_alloc,
.atomic_state_clear = msm_atomic_state_clear,
......@@ -211,7 +214,6 @@ static int msm_drm_uninit(struct device *dev)
struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_gpu *gpu = priv->gpu;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
......@@ -253,15 +255,6 @@ static int msm_drm_uninit(struct device *dev)
if (kms && kms->funcs)
kms->funcs->destroy(kms);
if (gpu) {
mutex_lock(&ddev->struct_mutex);
// XXX what do we do here?
//pm_runtime_enable(&pdev->dev);
gpu->funcs->pm_suspend(gpu);
mutex_unlock(&ddev->struct_mutex);
gpu->funcs->destroy(gpu);
}
if (priv->vram.paddr) {
unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
drm_mm_takedown(&priv->vram.mm);
......@@ -514,24 +507,37 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
static int context_init(struct drm_device *dev, struct drm_file *file)
{
struct msm_file_private *ctx;
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
msm_submitqueue_init(dev, ctx);
file->driver_priv = ctx;
return 0;
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
{
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
return context_init(dev, file);
}
static void context_close(struct msm_file_private *ctx)
{
msm_submitqueue_close(ctx);
kfree(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
......@@ -542,7 +548,7 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
priv->lastctx = NULL;
mutex_unlock(&dev->struct_mutex);
kfree(ctx);
context_close(ctx);
}
static void msm_lastclose(struct drm_device *dev)
......@@ -737,16 +743,27 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
ktime_t timeout = to_ktime(args->timeout);
struct msm_gpu_submitqueue *queue;
struct msm_gpu *gpu = priv->gpu;
int ret;
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL;
}
if (!priv->gpu)
if (!gpu)
return 0;
return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
if (!queue)
return -ENOENT;
ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
true);
msm_submitqueue_put(queue);
return ret;
}
static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
......@@ -787,6 +804,28 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
return ret;
}
static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_submitqueue *args = data;
if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
return -EINVAL;
return msm_submitqueue_create(dev, file->driver_priv, args->prio,
args->flags, &args->id);
}
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
{
u32 id = *(u32 *) data;
return msm_submitqueue_remove(file->driver_priv, id);
}
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
......@@ -796,6 +835,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
......
......@@ -56,11 +56,9 @@ struct msm_gem_address_space;
struct msm_gem_vma;
struct msm_file_private {
/* currently we don't do anything useful with this.. but when
* per-context address spaces are supported we'd keep track of
* the context's page-tables here.
*/
int dummy;
rwlock_t queuelock;
struct list_head submitqueues;
int queueid;
};
enum msm_mdp_plane_property {
......@@ -76,6 +74,8 @@ struct msm_vblank_ctrl {
spinlock_t lock;
};
#define MSM_GPU_MAX_RINGS 4
struct msm_drm_private {
struct drm_device *dev;
......@@ -108,7 +108,8 @@ struct msm_drm_private {
struct drm_fb_helper *fbdev;
struct msm_rd_state *rd;
struct msm_rd_state *rd; /* debugfs to dump all submits */
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
struct msm_perf_state *perf;
/* list of GEM objects: */
......@@ -154,20 +155,12 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
/* task holding struct_mutex.. currently only used in submit path
* to detect and reject faults from copy_from_user() for submit
* ioctl.
*/
struct task_struct *struct_mutex_task;
};
struct msm_format {
uint32_t pixel_format;
};
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
......@@ -219,6 +212,7 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
int msm_gem_sync_object(struct drm_gem_object *obj,
......@@ -303,7 +297,8 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
int msm_debugfs_late_init(struct drm_device *dev);
int msm_rd_debugfs_init(struct drm_minor *minor);
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
void msm_rd_dump_submit(struct msm_gem_submit *submit);
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...);
int msm_perf_debugfs_init(struct drm_minor *minor);
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
......@@ -319,6 +314,18 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
struct msm_gpu_submitqueue;
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
void msm_submitqueue_destroy(struct kref *kref);
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
......
......@@ -31,7 +31,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
return ERR_PTR(-ENOMEM);
fctx->dev = dev;
fctx->name = name;
strncpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
......
......@@ -22,7 +22,7 @@
struct msm_fence_context {
struct drm_device *dev;
const char *name;
char name[32];
unsigned context;
/* last_fence == completed_fence --> no pending work */
uint32_t last_fence; /* last assigned fence */
......
......@@ -470,14 +470,16 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return ret;
}
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
if (WARN_ON(msm_obj->madv > madv)) {
dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY);
}
......@@ -513,6 +515,22 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
return ERR_PTR(ret);
}
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
return get_vaddr(obj, MSM_MADV_WILLNEED);
}
/*
* Don't use this! It is for the very special case of dumping
* submits from GPU hangs or faults, were the bo may already
* be MSM_MADV_DONTNEED, but we know the buffer is still on the
* active list.
*/
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
return get_vaddr(obj, __MSM_MADV_PURGED);
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
......@@ -610,17 +628,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence;
int i, ret;
if (!exclusive) {
/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
* which makes this a slightly strange place to call it. OTOH this
* is a convenient can-fail point to hook it in. (And similar to
* how etnaviv and nouveau handle this.)
*/
ret = reservation_object_reserve_shared(msm_obj->resv);
if (ret)
return ret;
}
fobj = reservation_object_get_list(msm_obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
fence = reservation_object_get_excl(msm_obj->resv);
......
......@@ -138,12 +138,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
struct msm_gem_submit {
struct drm_device *dev;
struct msm_gpu *gpu;
struct list_head node; /* node in gpu submit_list */
struct list_head node; /* node in ring submit list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
uint32_t seqno; /* Sequence number of the submit on the ring */
struct dma_fence *fence;
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
......
......@@ -31,7 +31,8 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
......@@ -49,6 +50,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
submit->ring = gpu->rb[queue->prio];
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
......@@ -66,6 +69,8 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
dma_fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
msm_submitqueue_put(submit->queue);
kfree(submit);
}
......@@ -156,7 +161,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
return ret;
}
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
int i, bool backoff)
{
struct msm_gem_object *msm_obj = submit->bos[i].obj;
......@@ -166,7 +172,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
if (!(submit->bos[i].flags & BO_VALID))
if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
......@@ -201,10 +207,10 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
fail:
for (; i >= 0; i--)
submit_unlock_unpin_bo(submit, i);
submit_unlock_unpin_bo(submit, i, true);
if (slow_locked > 0)
submit_unlock_unpin_bo(submit, slow_locked);
submit_unlock_unpin_bo(submit, slow_locked, true);
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
......@@ -221,7 +227,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
return ret;
}
static int submit_fence_sync(struct msm_gem_submit *submit)
static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
{
int i, ret = 0;
......@@ -229,7 +235,22 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
if (!write) {
/* NOTE: _reserve_shared() must happen before
* _add_shared_fence(), which makes this a slightly
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
ret = reservation_object_reserve_shared(msm_obj->resv);
if (ret)
return ret;
}
if (no_implicit)
continue;
ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
write);
if (ret)
break;
}
......@@ -373,7 +394,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i);
submit_unlock_unpin_bo(submit, i, false);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
......@@ -391,6 +412,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_gpu *gpu = priv->gpu;
struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
int out_fence_fd = -1;
unsigned i;
int ret;
......@@ -407,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
queue = msm_submitqueue_get(ctx, args->queueid);
if (!queue)
return -ENOENT;
ring = gpu->rb[queue->prio];
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
in_fence = sync_file_get_fence(args->fence_fd);
......@@ -417,7 +446,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
ret = dma_fence_wait(in_fence, true);
if (ret)
return ret;
......@@ -435,9 +464,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
}
priv->struct_mutex_task = current;
submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
goto out_unlock;
......@@ -451,11 +479,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit);
if (ret)
goto out;
}
ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
if (ret)
goto out;
ret = submit_pin_objects(submit);
if (ret)
......@@ -522,7 +548,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
submit->fence = msm_fence_alloc(gpu->fctx);
submit->fence = msm_fence_alloc(ring->fctx);
if (IS_ERR(submit->fence)) {
ret = PTR_ERR(submit->fence);
submit->fence = NULL;
......@@ -555,7 +581,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
priv->struct_mutex_task = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
......@@ -20,6 +20,8 @@
#include "msm_mmu.h"
#include "msm_fence.h"
#include <linux/string_helpers.h>
/*
* Power Management:
......@@ -221,33 +223,102 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
* Hangcheck detection for locked gpu:
*/
static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
uint32_t fence)
{
struct msm_gem_submit *submit;
list_for_each_entry(submit, &ring->submits, node) {
if (submit->seqno > fence)
break;
msm_update_fence(submit->ring->fctx,
submit->fence->seqno);
}
}
static struct msm_gem_submit *
find_submit(struct msm_ringbuffer *ring, uint32_t fence)
{
struct msm_gem_submit *submit;
WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
list_for_each_entry(submit, &ring->submits, node)
if (submit->seqno == fence)
return submit;
return NULL;
}
static void retire_submits(struct msm_gpu *gpu);
static void recover_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
uint32_t fence = gpu->funcs->last_fence(gpu);
msm_update_fence(gpu->fctx, fence + 1);
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
int i;
mutex_lock(&dev->struct_mutex);
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
list_for_each_entry(submit, &gpu->submit_list, node) {
if (submit->fence->seqno == (fence + 1)) {
struct task_struct *task;
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
dev_err(dev->dev, "%s: offending task: %s\n",
gpu->name, task->comm);
}
rcu_read_unlock();
break;
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit) {
struct task_struct *task;
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
char *cmd;
/*
* So slightly annoying, in other paths like
* mmap'ing gem buffers, mmap_sem is acquired
* before struct_mutex, which means we can't
* hold struct_mutex across the call to
* get_cmdline(). But submits are retired
* from the same in-order workqueue, so we can
* safely drop the lock here without worrying
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
mutex_lock(&dev->struct_mutex);
dev_err(dev->dev, "%s: offending task: %s (%s)\n",
gpu->name, task->comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
"offending task: %s (%s)", task->comm, cmd);
} else {
msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
rcu_read_unlock();
}
/*
* Update all the rings with the latest and greatest fence.. this
* needs to happen after msm_rd_dump_submit() to ensure that the
* bo's referenced by the offending submit are still around.
*/
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
uint32_t fence = ring->memptrs->fence;
/*
* For the current (faulting?) ring/submit advance the fence by
* one more to clear the faulting submit
*/
if (ring == cur_ring)
fence++;
update_fences(gpu, ring, fence);
}
if (msm_gpu_active(gpu)) {
......@@ -258,9 +329,15 @@ static void recover_worker(struct work_struct *work)
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
/* replay the remaining submits after the one that hung: */
list_for_each_entry(submit, &gpu->submit_list, node) {
gpu->funcs->submit(gpu, submit, NULL);
/*
* Replay all remaining submits starting with highest priority
* ring
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
list_for_each_entry(submit, &ring->submits, node)
gpu->funcs->submit(gpu, submit, NULL);
}
}
......@@ -281,25 +358,27 @@ static void hangcheck_handler(unsigned long data)
struct msm_gpu *gpu = (struct msm_gpu *)data;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
uint32_t fence = gpu->funcs->last_fence(gpu);
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
uint32_t fence = ring->memptrs->fence;
if (fence != gpu->hangcheck_fence) {
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
gpu->hangcheck_fence = fence;
} else if (fence < gpu->fctx->last_fence) {
ring->hangcheck_fence = fence;
} else if (fence < ring->seqno) {
/* no progress and not done.. hung! */
gpu->hangcheck_fence = fence;
dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
gpu->name);
ring->hangcheck_fence = fence;
dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
gpu->name, ring->id);
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
gpu->name, gpu->fctx->last_fence);
gpu->name, ring->seqno);
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
if (gpu->fctx->last_fence > gpu->hangcheck_fence)
if (ring->seqno > ring->hangcheck_fence)
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
......@@ -428,19 +507,18 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
static void retire_submits(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
struct msm_gem_submit *submit, *tmp;
int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
while (!list_empty(&gpu->submit_list)) {
struct msm_gem_submit *submit;
/* Retire the commits starting with highest priority */
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
submit = list_first_entry(&gpu->submit_list,
struct msm_gem_submit, node);
if (dma_fence_is_signaled(submit->fence)) {
retire_submit(gpu, submit);
} else {
break;
list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
if (dma_fence_is_signaled(submit->fence))
retire_submit(gpu, submit);
}
}
}
......@@ -449,9 +527,10 @@ static void retire_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
struct drm_device *dev = gpu->dev;
uint32_t fence = gpu->funcs->last_fence(gpu);
int i;
msm_update_fence(gpu->fctx, fence);
for (i = 0; i < gpu->nr_rings; i++)
update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
......@@ -472,6 +551,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
......@@ -480,9 +560,11 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gpu_hw_init(gpu);
list_add_tail(&submit->node, &gpu->submit_list);
submit->seqno = ++ring->seqno;
msm_rd_dump_submit(submit);
list_add_tail(&submit->node, &ring->submits);
msm_rd_dump_submit(priv->rd, submit, NULL);
update_sw_cntrs(gpu);
......@@ -605,7 +687,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
int ret;
int i, ret, nr_rings = config->nr_rings;
void *memptrs;
uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
......@@ -613,18 +697,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
gpu->fctx = msm_fence_context_alloc(drm, name);
if (IS_ERR(gpu->fctx)) {
ret = PTR_ERR(gpu->fctx);
gpu->fctx = NULL;
goto fail;
}
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
INIT_LIST_HEAD(&gpu->submit_list);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
......@@ -689,36 +766,79 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
/* Create ringbuffer: */
gpu->rb = msm_ringbuffer_new(gpu, config->ringsz);
if (IS_ERR(gpu->rb)) {
ret = PTR_ERR(gpu->rb);
gpu->rb = NULL;
dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
ret = PTR_ERR(memptrs);
dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
goto fail;
}
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
ARRAY_SIZE(gpu->rb));
nr_rings = ARRAY_SIZE(gpu->rb);
}
/* Create ringbuffer(s): */
for (i = 0; i < nr_rings; i++) {
gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
dev_err(drm->dev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
memptrs += sizeof(struct msm_rbmemptrs);
memptrs_iova += sizeof(struct msm_rbmemptrs);
}
gpu->nr_rings = nr_rings;
return 0;
fail:
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
}
if (gpu->memptrs_bo) {
msm_gem_put_vaddr(gpu->memptrs_bo);
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
platform_set_drvdata(pdev, NULL);
return ret;
}
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
int i;
DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
bs_fini(gpu);
if (gpu->rb) {
if (gpu->rb_iova)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
}
if (gpu->aspace) {
if (gpu->memptrs_bo) {
msm_gem_put_vaddr(gpu->memptrs_bo);
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
NULL, 0);
msm_gem_address_space_put(gpu->aspace);
......
......@@ -33,7 +33,7 @@ struct msm_gpu_config {
const char *irqname;
uint64_t va_start;
uint64_t va_end;
unsigned int ringsz;
unsigned int nr_rings;
};
/* So far, with hardware that I've seen to date, we can have:
......@@ -57,9 +57,9 @@ struct msm_gpu_funcs {
int (*pm_resume)(struct msm_gpu *gpu);
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void (*flush)(struct msm_gpu *gpu);
void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
uint32_t (*last_fence)(struct msm_gpu *gpu);
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
......@@ -86,16 +86,12 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs;
/* ringbuffer: */
struct msm_ringbuffer *rb;
uint64_t rb_iova;
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
int nr_rings;
/* list of GEM active objects: */
struct list_head active_list;
/* fencing: */
struct msm_fence_context *fctx;
/* does gpu need hw_init? */
bool needs_hw_init;
......@@ -126,15 +122,31 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
uint32_t hangcheck_fence;
struct work_struct recover_work;
struct list_head submit_list;
struct drm_gem_object *memptrs_bo;
};
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
#define MSM_GPU_RB_CNTL_DEFAULT \
(AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
int i;
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
if (ring->seqno > ring->memptrs->fence)
return true;
}
return false;
}
/* Perf-Counters:
......@@ -150,6 +162,15 @@ struct msm_gpu_perfcntr {
const char *name;
};
struct msm_gpu_submitqueue {
int id;
u32 flags;
u32 prio;
int faults;
struct list_head node;
struct kref ref;
};
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
......@@ -223,4 +244,10 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
void __init adreno_register(void);
void __exit adreno_unregister(void);
static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
{
if (queue)
kref_put(&queue->ref, msm_submitqueue_destroy);
}
#endif /* __MSM_GPU_H__ */
......@@ -19,11 +19,17 @@
*
* tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
*
* To log the cmdstream in a format that is understood by freedreno/cffdump
* to log the cmdstream in a format that is understood by freedreno/cffdump
* utility. By comparing the last successfully completed fence #, to the
* cmdstream for the next fence, you can narrow down which process and submit
* caused the gpu crash/lockup.
*
* Additionally:
*
* tail -f /sys/kernel/debug/dri/<minor>/hangrd > logfile.rd
*
* will capture just the cmdstream from submits which triggered a GPU hang.
*
* This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open.
......@@ -212,53 +218,89 @@ static const struct file_operations rd_debugfs_fops = {
.release = rd_release,
};
int msm_rd_debugfs_init(struct drm_minor *minor)
static void rd_cleanup(struct msm_rd_state *rd)
{
if (!rd)
return;
mutex_destroy(&rd->read_lock);
kfree(rd);
}
static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
{
struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_rd_state *rd;
struct dentry *ent;
/* only create on first minor: */
if (priv->rd)
return 0;
int ret = 0;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
rd->dev = minor->dev;
rd->fifo.buf = rd->buf;
mutex_init(&rd->read_lock);
priv->rd = rd;
init_waitqueue_head(&rd->fifo_event);
ent = debugfs_create_file("rd", S_IFREG | S_IRUGO,
ent = debugfs_create_file(name, S_IFREG | S_IRUGO,
minor->debugfs_root, rd, &rd_debugfs_fops);
if (!ent) {
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/rd\n",
minor->debugfs_root);
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n",
minor->debugfs_root, name);
ret = -ENOMEM;
goto fail;
}
return rd;
fail:
rd_cleanup(rd);
return ERR_PTR(ret);
}
int msm_rd_debugfs_init(struct drm_minor *minor)
{
struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_rd_state *rd;
int ret;
/* only create on first minor: */
if (priv->rd)
return 0;
rd = rd_init(minor, "rd");
if (IS_ERR(rd)) {
ret = PTR_ERR(rd);
goto fail;
}
priv->rd = rd;
rd = rd_init(minor, "hangrd");
if (IS_ERR(rd)) {
ret = PTR_ERR(rd);
goto fail;
}
priv->hangrd = rd;
return 0;
fail:
msm_rd_debugfs_cleanup(priv);
return -1;
return ret;
}
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
{
struct msm_rd_state *rd = priv->rd;
if (!rd)
return;
rd_cleanup(priv->rd);
priv->rd = NULL;
mutex_destroy(&rd->read_lock);
kfree(rd);
rd_cleanup(priv->hangrd);
priv->hangrd = NULL;
}
static void snapshot_buf(struct msm_rd_state *rd,
......@@ -268,10 +310,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf;
buf = msm_gem_get_vaddr(&obj->base);
if (IS_ERR(buf))
return;
if (iova) {
buf += iova - submit->bos[idx].iova;
} else {
......@@ -279,20 +317,33 @@ static void snapshot_buf(struct msm_rd_state *rd,
size = obj->base.size;
}
/*
* Always write the GPUADDR header so can get a complete list of all the
* buffers in the cmd
*/
rd_write_section(rd, RD_GPUADDR,
(uint32_t[3]){ iova, size, iova >> 32 }, 12);
/* But only dump the contents of buffers marked READ */
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
return;
buf = msm_gem_get_vaddr_active(&obj->base);
if (IS_ERR(buf))
return;
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr(&obj->base);
}
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_gem_submit *submit)
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
struct drm_device *dev = submit->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_rd_state *rd = priv->rd;
char msg[128];
struct task_struct *task;
char msg[256];
int i, n;
if (!rd->open)
......@@ -303,23 +354,32 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
*/
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, current->comm, task_pid_nr(current),
submit->fence->seqno);
if (fmt) {
va_list args;
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
va_start(args, fmt);
n = vsnprintf(msg, sizeof(msg), fmt, args);
va_end(args);
if (rd_full) {
for (i = 0; i < submit->nr_bos; i++) {
/* buffers that are written to probably don't start out
* with anything interesting:
*/
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
continue;
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
}
snapshot_buf(rd, submit, i, 0, 0);
}
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, task->comm,
pid_nr(submit->pid), submit->seqno);
} else {
n = snprintf(msg, sizeof(msg), "???/%d: fence=%u",
pid_nr(submit->pid), submit->seqno);
}
rcu_read_unlock();
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
for (i = 0; rd_full && i < submit->nr_bos; i++)
snapshot_buf(rd, submit, i, 0, 0);
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
......
......@@ -18,13 +18,15 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
char name[32];
int ret;
if (WARN_ON(!is_power_of_2(size)))
return ERR_PTR(-EINVAL);
/* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
......@@ -33,32 +35,46 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
ring->id = id;
/* Pass NULL for the iova pointer - we will map it later */
ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
gpu->aspace, &ring->bo, NULL);
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
ring->start = 0;
goto fail;
}
ring->end = ring->start + (size / 4);
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
ring->cur = ring->start;
ring->size = size;
ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
INIT_LIST_HEAD(&ring->submits);
spin_lock_init(&ring->lock);
snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
ring->fctx = msm_fence_context_alloc(gpu->dev, name);
return ring;
fail:
if (ring)
msm_ringbuffer_destroy(ring);
msm_ringbuffer_destroy(ring);
return ERR_PTR(ret);
}
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
if (IS_ERR_OR_NULL(ring))
return;
msm_fence_context_free(ring->fctx);
if (ring->bo) {
msm_gem_put_iova(ring->bo, ring->gpu->aspace);
msm_gem_put_vaddr(ring->bo);
drm_gem_object_unreference_unlocked(ring->bo);
}
......
......@@ -20,14 +20,31 @@
#include "msm_drv.h"
#define rbmemptr(ring, member) \
((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
};
struct msm_ringbuffer {
struct msm_gpu *gpu;
int size;
int id;
struct drm_gem_object *bo;
uint32_t *start, *end, *cur;
uint32_t *start, *end, *cur, *next;
struct list_head submits;
uint64_t iova;
uint32_t seqno;
uint32_t hangcheck_fence;
struct msm_rbmemptrs *memptrs;
uint64_t memptrs_iova;
struct msm_fence_context *fctx;
spinlock_t lock;
};
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
......@@ -35,9 +52,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
static inline void
OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
{
if (ring->cur == ring->end)
ring->cur = ring->start;
*(ring->cur++) = data;
/*
* ring->next points to the current command being written - it won't be
* committed as ring->cur until the flush
*/
if (ring->next == ring->end)
ring->next = ring->start;
*(ring->next++) = data;
}
#endif /* __MSM_RINGBUFFER_H__ */
/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kref.h>
#include "msm_gpu.h"
void msm_submitqueue_destroy(struct kref *kref)
{
struct msm_gpu_submitqueue *queue = container_of(kref,
struct msm_gpu_submitqueue, ref);
kfree(queue);
}
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id)
{
struct msm_gpu_submitqueue *entry;
if (!ctx)
return NULL;
read_lock(&ctx->queuelock);
list_for_each_entry(entry, &ctx->submitqueues, node) {
if (entry->id == id) {
kref_get(&entry->ref);
read_unlock(&ctx->queuelock);
return entry;
}
}
read_unlock(&ctx->queuelock);
return NULL;
}
void msm_submitqueue_close(struct msm_file_private *ctx)
{
struct msm_gpu_submitqueue *entry, *tmp;
if (!ctx)
return;
/*
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
msm_submitqueue_put(entry);
}
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id)
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
if (!ctx)
return -ENODEV;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue)
return -ENOMEM;
kref_init(&queue->ref);
queue->flags = flags;
if (priv->gpu) {
if (prio >= priv->gpu->nr_rings)
return -EINVAL;
queue->prio = prio;
}
write_lock(&ctx->queuelock);
queue->id = ctx->queueid++;
if (id)
*id = queue->id;
list_add_tail(&queue->node, &ctx->submitqueues);
write_unlock(&ctx->queuelock);
return 0;
}
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
{
struct msm_drm_private *priv = drm->dev_private;
int default_prio;
if (!ctx)
return 0;
/*
* Select priority 2 as the "default priority" unless nr_rings is less
* than 2 and then pick the lowest pirority
*/
default_prio = priv->gpu ?
clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
INIT_LIST_HEAD(&ctx->submitqueues);
rwlock_init(&ctx->queuelock);
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
if (!ctx)
return 0;
/*
* id 0 is the "default" queue and can't be destroyed
* by the user
*/
if (!id)
return -ENOENT;
write_lock(&ctx->queuelock);
list_for_each_entry(entry, &ctx->submitqueues, node) {
if (entry->id == id) {
list_del(&entry->node);
write_unlock(&ctx->queuelock);
msm_submitqueue_put(entry);
return 0;
}
}
write_unlock(&ctx->queuelock);
return -ENOENT;
}
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MSM_BUS_IDS_H
#define __MSM_BUS_IDS_H
/* Aggregation types */
#define AGG_SCHEME_NONE 0
#define AGG_SCHEME_LEG 1
#define AGG_SCHEME_1 2
/* Topology related enums */
#define MSM_BUS_FAB_DEFAULT 0
#define MSM_BUS_FAB_APPSS 0
#define MSM_BUS_FAB_SYSTEM 1024
#define MSM_BUS_FAB_MMSS 2048
#define MSM_BUS_FAB_SYSTEM_FPB 3072
#define MSM_BUS_FAB_CPSS_FPB 4096
#define MSM_BUS_FAB_BIMC 0
#define MSM_BUS_FAB_SYS_NOC 1024
#define MSM_BUS_FAB_MMSS_NOC 2048
#define MSM_BUS_FAB_OCMEM_NOC 3072
#define MSM_BUS_FAB_PERIPH_NOC 4096
#define MSM_BUS_FAB_CONFIG_NOC 5120
#define MSM_BUS_FAB_OCMEM_VNOC 6144
#define MSM_BUS_FAB_MMSS_AHB 2049
#define MSM_BUS_FAB_A0_NOC 6145
#define MSM_BUS_FAB_A1_NOC 6146
#define MSM_BUS_FAB_A2_NOC 6147
#define MSM_BUS_FAB_GNOC 6148
#define MSM_BUS_FAB_CR_VIRT 6149
#define MSM_BUS_MASTER_FIRST 1
#define MSM_BUS_MASTER_AMPSS_M0 1
#define MSM_BUS_MASTER_AMPSS_M1 2
#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3
#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
#define MSM_BUS_MASTER_SPS 6
#define MSM_BUS_MASTER_ADM_PORT0 7
#define MSM_BUS_MASTER_ADM_PORT1 8
#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
#define MSM_BUS_MASTER_ADM1_PORT1 10
#define MSM_BUS_MASTER_LPASS_PROC 11
#define MSM_BUS_MASTER_MSS_PROCI 12
#define MSM_BUS_MASTER_MSS_PROCD 13
#define MSM_BUS_MASTER_MSS_MDM_PORT0 14
#define MSM_BUS_MASTER_LPASS 15
#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
#define MSM_BUS_MASTER_ADM1_CI 19
#define MSM_BUS_MASTER_ADM0_CI 20
#define MSM_BUS_MASTER_MSS_MDM_PORT1 21
#define MSM_BUS_MASTER_MDP_PORT0 22
#define MSM_BUS_MASTER_MDP_PORT1 23
#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
#define MSM_BUS_MASTER_ROTATOR 25
#define MSM_BUS_MASTER_GRAPHICS_3D 26
#define MSM_BUS_MASTER_JPEG_DEC 27
#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
#define MSM_BUS_MASTER_VFE 29
#define MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
#define MSM_BUS_MASTER_VPE 30
#define MSM_BUS_MASTER_JPEG_ENC 31
#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
#define MSM_BUS_MMSS_MASTER_APPS_FAB 33
#define MSM_BUS_MASTER_HD_CODEC_PORT0 34
#define MSM_BUS_MASTER_HD_CODEC_PORT1 35
#define MSM_BUS_MASTER_SPDM 36
#define MSM_BUS_MASTER_RPM 37
#define MSM_BUS_MASTER_MSS 38
#define MSM_BUS_MASTER_RIVA 39
#define MSM_BUS_MASTER_SNOC_VMEM 40
#define MSM_BUS_MASTER_MSS_SW_PROC 41
#define MSM_BUS_MASTER_MSS_FW_PROC 42
#define MSM_BUS_MASTER_HMSS 43
#define MSM_BUS_MASTER_GSS_NAV 44
#define MSM_BUS_MASTER_PCIE 45
#define MSM_BUS_MASTER_SATA 46
#define MSM_BUS_MASTER_CRYPTO 47
#define MSM_BUS_MASTER_VIDEO_CAP 48
#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
#define MSM_BUS_MASTER_VIDEO_ENC 50
#define MSM_BUS_MASTER_VIDEO_DEC 51
#define MSM_BUS_MASTER_LPASS_AHB 52
#define MSM_BUS_MASTER_QDSS_BAM 53
#define MSM_BUS_MASTER_SNOC_CFG 54
#define MSM_BUS_MASTER_CRYPTO_CORE0 55
#define MSM_BUS_MASTER_CRYPTO_CORE1 56
#define MSM_BUS_MASTER_MSS_NAV 57
#define MSM_BUS_MASTER_OCMEM_DMA 58
#define MSM_BUS_MASTER_WCSS 59
#define MSM_BUS_MASTER_QDSS_ETR 60
#define MSM_BUS_MASTER_USB3 61
#define MSM_BUS_MASTER_JPEG 62
#define MSM_BUS_MASTER_VIDEO_P0 63
#define MSM_BUS_MASTER_VIDEO_P1 64
#define MSM_BUS_MASTER_MSS_PROC 65
#define MSM_BUS_MASTER_JPEG_OCMEM 66
#define MSM_BUS_MASTER_MDP_OCMEM 67
#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
#define MSM_BUS_MASTER_VFE_OCMEM 70
#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71
#define MSM_BUS_MASTER_RPM_INST 72
#define MSM_BUS_MASTER_RPM_DATA 73
#define MSM_BUS_MASTER_RPM_SYS 74
#define MSM_BUS_MASTER_DEHR 75
#define MSM_BUS_MASTER_QDSS_DAP 76
#define MSM_BUS_MASTER_TIC 77
#define MSM_BUS_MASTER_SDCC_1 78
#define MSM_BUS_MASTER_SDCC_3 79
#define MSM_BUS_MASTER_SDCC_4 80
#define MSM_BUS_MASTER_SDCC_2 81
#define MSM_BUS_MASTER_TSIF 82
#define MSM_BUS_MASTER_BAM_DMA 83
#define MSM_BUS_MASTER_BLSP_2 84
#define MSM_BUS_MASTER_USB_HSIC 85
#define MSM_BUS_MASTER_BLSP_1 86
#define MSM_BUS_MASTER_USB_HS 87
#define MSM_BUS_MASTER_PNOC_CFG 88
#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89
#define MSM_BUS_MASTER_IPA 90
#define MSM_BUS_MASTER_QPIC 91
#define MSM_BUS_MASTER_MDPE 92
#define MSM_BUS_MASTER_USB_HS2 93
#define MSM_BUS_MASTER_VPU 94
#define MSM_BUS_MASTER_UFS 95
#define MSM_BUS_MASTER_BCAST 96
#define MSM_BUS_MASTER_CRYPTO_CORE2 97
#define MSM_BUS_MASTER_EMAC 98
#define MSM_BUS_MASTER_VPU_1 99
#define MSM_BUS_MASTER_PCIE_1 100
#define MSM_BUS_MASTER_USB3_1 101
#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103
#define MSM_BUS_MASTER_TCU_0 104
#define MSM_BUS_MASTER_TCU_1 105
#define MSM_BUS_MASTER_CPP 106
#define MSM_BUS_MASTER_AUDIO 107
#define MSM_BUS_MASTER_PCIE_2 108
#define MSM_BUS_MASTER_VFE1 109
#define MSM_BUS_MASTER_XM_USB_HS1 110
#define MSM_BUS_MASTER_PCNOC_BIMC_1 111
#define MSM_BUS_MASTER_BIMC_PCNOC 112
#define MSM_BUS_MASTER_XI_USB_HSIC 113
#define MSM_BUS_MASTER_SGMII 114
#define MSM_BUS_SPMI_FETCHER 115
#define MSM_BUS_MASTER_GNOC_BIMC 116
#define MSM_BUS_MASTER_CRVIRT_A2NOC 117
#define MSM_BUS_MASTER_CNOC_A2NOC 118
#define MSM_BUS_MASTER_WLAN 119
#define MSM_BUS_MASTER_MSS_CE 120
#define MSM_BUS_MASTER_CDSP_PROC 121
#define MSM_BUS_MASTER_GNOC_SNOC 122
#define MSM_BUS_MASTER_PIMEM 123
#define MSM_BUS_MASTER_MASTER_LAST 124
#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
#define MSM_BUS_SNOC_MM_INT_0 10000
#define MSM_BUS_SNOC_MM_INT_1 10001
#define MSM_BUS_SNOC_MM_INT_2 10002
#define MSM_BUS_SNOC_MM_INT_BIMC 10003
#define MSM_BUS_SNOC_INT_0 10004
#define MSM_BUS_SNOC_INT_1 10005
#define MSM_BUS_SNOC_INT_BIMC 10006
#define MSM_BUS_SNOC_BIMC_0_MAS 10007
#define MSM_BUS_SNOC_BIMC_1_MAS 10008
#define MSM_BUS_SNOC_QDSS_INT 10009
#define MSM_BUS_PNOC_SNOC_MAS 10010
#define MSM_BUS_PNOC_SNOC_SLV 10011
#define MSM_BUS_PNOC_INT_0 10012
#define MSM_BUS_PNOC_INT_1 10013
#define MSM_BUS_PNOC_M_0 10014
#define MSM_BUS_PNOC_M_1 10015
#define MSM_BUS_BIMC_SNOC_MAS 10016
#define MSM_BUS_BIMC_SNOC_SLV 10017
#define MSM_BUS_PNOC_SLV_0 10018
#define MSM_BUS_PNOC_SLV_1 10019
#define MSM_BUS_PNOC_SLV_2 10020
#define MSM_BUS_PNOC_SLV_3 10021
#define MSM_BUS_PNOC_SLV_4 10022
#define MSM_BUS_PNOC_SLV_8 10023
#define MSM_BUS_PNOC_SLV_9 10024
#define MSM_BUS_SNOC_BIMC_0_SLV 10025
#define MSM_BUS_SNOC_BIMC_1_SLV 10026
#define MSM_BUS_MNOC_BIMC_MAS 10027
#define MSM_BUS_MNOC_BIMC_SLV 10028
#define MSM_BUS_BIMC_MNOC_MAS 10029
#define MSM_BUS_BIMC_MNOC_SLV 10030
#define MSM_BUS_SNOC_BIMC_MAS 10031
#define MSM_BUS_SNOC_BIMC_SLV 10032
#define MSM_BUS_CNOC_SNOC_MAS 10033
#define MSM_BUS_CNOC_SNOC_SLV 10034
#define MSM_BUS_SNOC_CNOC_MAS 10035
#define MSM_BUS_SNOC_CNOC_SLV 10036
#define MSM_BUS_OVNOC_SNOC_MAS 10037
#define MSM_BUS_OVNOC_SNOC_SLV 10038
#define MSM_BUS_SNOC_OVNOC_MAS 10039
#define MSM_BUS_SNOC_OVNOC_SLV 10040
#define MSM_BUS_SNOC_PNOC_MAS 10041
#define MSM_BUS_SNOC_PNOC_SLV 10042
#define MSM_BUS_BIMC_INT_APPS_EBI 10043
#define MSM_BUS_BIMC_INT_APPS_SNOC 10044
#define MSM_BUS_SNOC_BIMC_2_MAS 10045
#define MSM_BUS_SNOC_BIMC_2_SLV 10046
#define MSM_BUS_PNOC_SLV_5 10047
#define MSM_BUS_PNOC_SLV_7 10048
#define MSM_BUS_PNOC_INT_2 10049
#define MSM_BUS_PNOC_INT_3 10050
#define MSM_BUS_PNOC_INT_4 10051
#define MSM_BUS_PNOC_INT_5 10052
#define MSM_BUS_PNOC_INT_6 10053
#define MSM_BUS_PNOC_INT_7 10054
#define MSM_BUS_BIMC_SNOC_1_MAS 10055
#define MSM_BUS_BIMC_SNOC_1_SLV 10056
#define MSM_BUS_PNOC_A1NOC_MAS 10057
#define MSM_BUS_PNOC_A1NOC_SLV 10058
#define MSM_BUS_CNOC_A1NOC_MAS 10059
#define MSM_BUS_A0NOC_SNOC_MAS 10060
#define MSM_BUS_A0NOC_SNOC_SLV 10061
#define MSM_BUS_A1NOC_SNOC_SLV 10062
#define MSM_BUS_A1NOC_SNOC_MAS 10063
#define MSM_BUS_A2NOC_SNOC_MAS 10064
#define MSM_BUS_A2NOC_SNOC_SLV 10065
#define MSM_BUS_SNOC_INT_2 10066
#define MSM_BUS_A0NOC_QDSS_INT 10067
#define MSM_BUS_INT_LAST 10068
#define MSM_BUS_INT_TEST_ID 20000
#define MSM_BUS_INT_TEST_LAST 20050
#define MSM_BUS_SLAVE_FIRST 512
#define MSM_BUS_SLAVE_EBI_CH0 512
#define MSM_BUS_SLAVE_EBI_CH1 513
#define MSM_BUS_SLAVE_AMPSS_L2 514
#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
#define MSM_BUS_SLAVE_SPS 518
#define MSM_BUS_SLAVE_SYSTEM_IMEM 519
#define MSM_BUS_SLAVE_AMPSS 520
#define MSM_BUS_SLAVE_MSS 521
#define MSM_BUS_SLAVE_LPASS 522
#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
#define MSM_BUS_SLAVE_CORESIGHT 526
#define MSM_BUS_SLAVE_RIVA 527
#define MSM_BUS_SLAVE_SMI 528
#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529
#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
#define MSM_BUS_SLAVE_MM_IMEM 531
#define MSM_BUS_SLAVE_CRYPTO 532
#define MSM_BUS_SLAVE_SPDM 533
#define MSM_BUS_SLAVE_RPM 534
#define MSM_BUS_SLAVE_RPM_MSG_RAM 535
#define MSM_BUS_SLAVE_MPM 536
#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
#define MSM_BUS_SLAVE_GSBI1_UART 542
#define MSM_BUS_SLAVE_GSBI2_UART 543
#define MSM_BUS_SLAVE_GSBI3_UART 544
#define MSM_BUS_SLAVE_GSBI4_UART 545
#define MSM_BUS_SLAVE_GSBI5_UART 546
#define MSM_BUS_SLAVE_GSBI6_UART 547
#define MSM_BUS_SLAVE_GSBI7_UART 548
#define MSM_BUS_SLAVE_GSBI8_UART 549
#define MSM_BUS_SLAVE_GSBI9_UART 550
#define MSM_BUS_SLAVE_GSBI10_UART 551
#define MSM_BUS_SLAVE_GSBI11_UART 552
#define MSM_BUS_SLAVE_GSBI12_UART 553
#define MSM_BUS_SLAVE_GSBI1_QUP 554
#define MSM_BUS_SLAVE_GSBI2_QUP 555
#define MSM_BUS_SLAVE_GSBI3_QUP 556
#define MSM_BUS_SLAVE_GSBI4_QUP 557
#define MSM_BUS_SLAVE_GSBI5_QUP 558
#define MSM_BUS_SLAVE_GSBI6_QUP 559
#define MSM_BUS_SLAVE_GSBI7_QUP 560
#define MSM_BUS_SLAVE_GSBI8_QUP 561
#define MSM_BUS_SLAVE_GSBI9_QUP 562
#define MSM_BUS_SLAVE_GSBI10_QUP 563
#define MSM_BUS_SLAVE_GSBI11_QUP 564
#define MSM_BUS_SLAVE_GSBI12_QUP 565
#define MSM_BUS_SLAVE_EBI2_NAND 566
#define MSM_BUS_SLAVE_EBI2_CS0 567
#define MSM_BUS_SLAVE_EBI2_CS1 568
#define MSM_BUS_SLAVE_EBI2_CS2 569
#define MSM_BUS_SLAVE_EBI2_CS3 570
#define MSM_BUS_SLAVE_EBI2_CS4 571
#define MSM_BUS_SLAVE_EBI2_CS5 572
#define MSM_BUS_SLAVE_USB_FS1 573
#define MSM_BUS_SLAVE_USB_FS2 574
#define MSM_BUS_SLAVE_TSIF 575
#define MSM_BUS_SLAVE_MSM_TSSC 576
#define MSM_BUS_SLAVE_MSM_PDM 577
#define MSM_BUS_SLAVE_MSM_DIMEM 578
#define MSM_BUS_SLAVE_MSM_TCSR 579
#define MSM_BUS_SLAVE_MSM_PRNG 580
#define MSM_BUS_SLAVE_GSS 581
#define MSM_BUS_SLAVE_SATA 582
#define MSM_BUS_SLAVE_USB3 583
#define MSM_BUS_SLAVE_WCSS 584
#define MSM_BUS_SLAVE_OCIMEM 585
#define MSM_BUS_SLAVE_SNOC_OCMEM 586
#define MSM_BUS_SLAVE_SERVICE_SNOC 587
#define MSM_BUS_SLAVE_QDSS_STM 588
#define MSM_BUS_SLAVE_CAMERA_CFG 589
#define MSM_BUS_SLAVE_DISPLAY_CFG 590
#define MSM_BUS_SLAVE_OCMEM_CFG 591
#define MSM_BUS_SLAVE_CPR_CFG 592
#define MSM_BUS_SLAVE_CPR_XPU_CFG 593
#define MSM_BUS_SLAVE_MISC_CFG 594
#define MSM_BUS_SLAVE_MISC_XPU_CFG 595
#define MSM_BUS_SLAVE_VENUS_CFG 596
#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597
#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599
#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601
#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602
#define MSM_BUS_SLAVE_SERVICE_MNOC 603
#define MSM_BUS_SLAVE_OCMEM 604
#define MSM_BUS_SLAVE_SERVICE_ONOC 605
#define MSM_BUS_SLAVE_SDCC_1 606
#define MSM_BUS_SLAVE_SDCC_3 607
#define MSM_BUS_SLAVE_SDCC_2 608
#define MSM_BUS_SLAVE_SDCC_4 609
#define MSM_BUS_SLAVE_BAM_DMA 610
#define MSM_BUS_SLAVE_BLSP_2 611
#define MSM_BUS_SLAVE_USB_HSIC 612
#define MSM_BUS_SLAVE_BLSP_1 613
#define MSM_BUS_SLAVE_USB_HS 614
#define MSM_BUS_SLAVE_PDM 615
#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616
#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617
#define MSM_BUS_SLAVE_PRNG 618
#define MSM_BUS_SLAVE_SERVICE_PNOC 619
#define MSM_BUS_SLAVE_CLK_CTL 620
#define MSM_BUS_SLAVE_CNOC_MSS 621
#define MSM_BUS_SLAVE_SECURITY 622
#define MSM_BUS_SLAVE_TCSR 623
#define MSM_BUS_SLAVE_TLMM 624
#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625
#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626
#define MSM_BUS_SLAVE_IMEM_CFG 627
#define MSM_BUS_SLAVE_MESSAGE_RAM 628
#define MSM_BUS_SLAVE_BIMC_CFG 629
#define MSM_BUS_SLAVE_BOOT_ROM 630
#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
#define MSM_BUS_SLAVE_PMIC_ARB 632
#define MSM_BUS_SLAVE_SPDM_WRAPPER 633
#define MSM_BUS_SLAVE_DEHR_CFG 634
#define MSM_BUS_SLAVE_QDSS_CFG 635
#define MSM_BUS_SLAVE_RBCPR_CFG 636
#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638
#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
#define MSM_BUS_SLAVE_PNOC_CFG 641
#define MSM_BUS_SLAVE_SNOC_CFG 642
#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643
#define MSM_BUS_SLAVE_PHY_APU_CFG 644
#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645
#define MSM_BUS_SLAVE_SERVICE_CNOC 646
#define MSM_BUS_SLAVE_IPS_CFG 647
#define MSM_BUS_SLAVE_QPIC 648
#define MSM_BUS_SLAVE_DSI_CFG 649
#define MSM_BUS_SLAVE_UFS_CFG 650
#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651
#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652
#define MSM_BUS_SLAVE_PCIE_CFG 653
#define MSM_BUS_SLAVE_USB_PHYS_CFG 654
#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
#define MSM_BUS_SLAVE_AVSYNC_CFG 656
#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657
#define MSM_BUS_SLAVE_VPU_CFG 658
#define MSM_BUS_SLAVE_BCAST_CFG 659
#define MSM_BUS_SLAVE_KLM_CFG 660
#define MSM_BUS_SLAVE_GENI_IR_CFG 661
#define MSM_BUS_SLAVE_OCMEM_GFX 662
#define MSM_BUS_SLAVE_CATS_128 663
#define MSM_BUS_SLAVE_OCMEM_64 664
#define MSM_BUS_SLAVE_PCIE_0 665
#define MSM_BUS_SLAVE_PCIE_1 666
#define MSM_BUS_SLAVE_PCIE_0_CFG 667
#define MSM_BUS_SLAVE_PCIE_1_CFG 668
#define MSM_BUS_SLAVE_SRVC_MNOC 669
#define MSM_BUS_SLAVE_USB_HS2 670
#define MSM_BUS_SLAVE_AUDIO 671
#define MSM_BUS_SLAVE_TCU 672
#define MSM_BUS_SLAVE_APPSS 673
#define MSM_BUS_SLAVE_PCIE_PARF 674
#define MSM_BUS_SLAVE_USB3_PHY_CFG 675
#define MSM_BUS_SLAVE_IPA_CFG 676
#define MSM_BUS_SLAVE_A0NOC_SNOC 677
#define MSM_BUS_SLAVE_A1NOC_SNOC 678
#define MSM_BUS_SLAVE_A2NOC_SNOC 679
#define MSM_BUS_SLAVE_HMSS_L3 680
#define MSM_BUS_SLAVE_PIMEM_CFG 681
#define MSM_BUS_SLAVE_DCC_CFG 682
#define MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
#define MSM_BUS_SLAVE_PCIE_2_CFG 684
#define MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
#define MSM_BUS_SLAVE_A0NOC_CFG 686
#define MSM_BUS_SLAVE_A1NOC_CFG 687
#define MSM_BUS_SLAVE_A2NOC_CFG 688
#define MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
#define MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
#define MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
#define MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
#define MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
#define MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
#define MSM_BUS_SLAVE_MMAGIC_CFG 695
#define MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
#define MSM_BUS_SLAVE_SSC_CFG 697
#define MSM_BUS_SLAVE_DSA_CFG 698
#define MSM_BUS_SLAVE_DSA_MPU_CFG 699
#define MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
#define MSM_BUS_SLAVE_SMMU_CPP_CFG 701
#define MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
#define MSM_BUS_SLAVE_SMMU_MDP_CFG 703
#define MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
#define MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
#define MSM_BUS_SLAVE_SMMU_VFE_CFG 706
#define MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
#define MSM_BUS_SLAVE_VMEM_CFG 708
#define MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
#define MSM_BUS_SLAVE_VMEM 710
#define MSM_BUS_SLAVE_AHB2PHY 711
#define MSM_BUS_SLAVE_PIMEM 712
#define MSM_BUS_SLAVE_SNOC_VMEM 713
#define MSM_BUS_SLAVE_PCIE_2 714
#define MSM_BUS_SLAVE_RBCPR_MX 715
#define MSM_BUS_SLAVE_RBCPR_CX 716
#define MSM_BUS_SLAVE_BIMC_PCNOC 717
#define MSM_BUS_SLAVE_PCNOC_BIMC_1 718
#define MSM_BUS_SLAVE_SGMII 719
#define MSM_BUS_SLAVE_SPMI_FETCHER 720
#define MSM_BUS_PNOC_SLV_6 721
#define MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
#define MSM_BUS_SLAVE_WLAN 723
#define MSM_BUS_SLAVE_CRVIRT_A2NOC 724
#define MSM_BUS_SLAVE_CNOC_A2NOC 725
#define MSM_BUS_SLAVE_GLM 726
#define MSM_BUS_SLAVE_GNOC_BIMC 727
#define MSM_BUS_SLAVE_GNOC_SNOC 728
#define MSM_BUS_SLAVE_QM_CFG 729
#define MSM_BUS_SLAVE_TLMM_EAST 730
#define MSM_BUS_SLAVE_TLMM_NORTH 731
#define MSM_BUS_SLAVE_TLMM_WEST 732
#define MSM_BUS_SLAVE_SKL 733
#define MSM_BUS_SLAVE_LPASS_TCM 734
#define MSM_BUS_SLAVE_TLMM_SOUTH 735
#define MSM_BUS_SLAVE_TLMM_CENTER 736
#define MSM_BUS_MSS_NAV_CE_MPU_CFG 737
#define MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
#define MSM_BUS_SLAVE_CDSP 739
#define MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
#define MSM_BUS_SLAVE_LPASS_MPU_CFG 741
#define MSM_BUS_SLAVE_CSI_PHY_CFG 742
#define MSM_BUS_SLAVE_LAST 743
#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
/*
* ID's used in RPM messages
*/
#define ICBID_MASTER_APPSS_PROC 0
#define ICBID_MASTER_MSS_PROC 1
#define ICBID_MASTER_MNOC_BIMC 2
#define ICBID_MASTER_SNOC_BIMC 3
#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
#define ICBID_MASTER_CNOC_MNOC_CFG 5
#define ICBID_MASTER_GFX3D 6
#define ICBID_MASTER_JPEG 7
#define ICBID_MASTER_MDP 8
#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP
#define ICBID_MASTER_MDPS ICBID_MASTER_MDP
#define ICBID_MASTER_VIDEO 9
#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
#define ICBID_MASTER_VIDEO_P1 10
#define ICBID_MASTER_VFE 11
#define ICBID_MASTER_VFE0 ICBID_MASTER_VFE
#define ICBID_MASTER_CNOC_ONOC_CFG 12
#define ICBID_MASTER_JPEG_OCMEM 13
#define ICBID_MASTER_MDP_OCMEM 14
#define ICBID_MASTER_VIDEO_P0_OCMEM 15
#define ICBID_MASTER_VIDEO_P1_OCMEM 16
#define ICBID_MASTER_VFE_OCMEM 17
#define ICBID_MASTER_LPASS_AHB 18
#define ICBID_MASTER_QDSS_BAM 19
#define ICBID_MASTER_SNOC_CFG 20
#define ICBID_MASTER_BIMC_SNOC 21
#define ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
#define ICBID_MASTER_CNOC_SNOC 22
#define ICBID_MASTER_CRYPTO 23
#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
#define ICBID_MASTER_CRYPTO_CORE1 24
#define ICBID_MASTER_LPASS_PROC 25
#define ICBID_MASTER_MSS 26
#define ICBID_MASTER_MSS_NAV 27
#define ICBID_MASTER_OCMEM_DMA 28
#define ICBID_MASTER_PNOC_SNOC 29
#define ICBID_MASTER_WCSS 30
#define ICBID_MASTER_QDSS_ETR 31
#define ICBID_MASTER_USB3 32
#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
#define ICBID_MASTER_SDCC_1 33
#define ICBID_MASTER_SDCC_3 34
#define ICBID_MASTER_SDCC_2 35
#define ICBID_MASTER_SDCC_4 36
#define ICBID_MASTER_TSIF 37
#define ICBID_MASTER_BAM_DMA 38
#define ICBID_MASTER_BLSP_2 39
#define ICBID_MASTER_USB_HSIC 40
#define ICBID_MASTER_BLSP_1 41
#define ICBID_MASTER_USB_HS 42
#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
#define ICBID_MASTER_PNOC_CFG 43
#define ICBID_MASTER_SNOC_PNOC 44
#define ICBID_MASTER_RPM_INST 45
#define ICBID_MASTER_RPM_DATA 46
#define ICBID_MASTER_RPM_SYS 47
#define ICBID_MASTER_DEHR 48
#define ICBID_MASTER_QDSS_DAP 49
#define ICBID_MASTER_SPDM 50
#define ICBID_MASTER_TIC 51
#define ICBID_MASTER_SNOC_CNOC 52
#define ICBID_MASTER_GFX3D_OCMEM 53
#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
#define ICBID_MASTER_OVIRT_SNOC 54
#define ICBID_MASTER_SNOC_OVIRT 55
#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
#define ICBID_MASTER_ONOC_OVIRT 56
#define ICBID_MASTER_USB_HS2 57
#define ICBID_MASTER_QPIC 58
#define ICBID_MASTER_IPA 59
#define ICBID_MASTER_DSI 60
#define ICBID_MASTER_MDP1 61
#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1
#define ICBID_MASTER_VPU_PROC 62
#define ICBID_MASTER_VPU 63
#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU
#define ICBID_MASTER_CRYPTO_CORE2 64
#define ICBID_MASTER_PCIE_0 65
#define ICBID_MASTER_PCIE_1 66
#define ICBID_MASTER_SATA 67
#define ICBID_MASTER_UFS 68
#define ICBID_MASTER_USB3_1 69
#define ICBID_MASTER_VIDEO_OCMEM 70
#define ICBID_MASTER_VPU1 71
#define ICBID_MASTER_VCAP 72
#define ICBID_MASTER_EMAC 73
#define ICBID_MASTER_BCAST 74
#define ICBID_MASTER_MMSS_PROC 75
#define ICBID_MASTER_SNOC_BIMC_1 76
#define ICBID_MASTER_SNOC_PCNOC 77
#define ICBID_MASTER_AUDIO 78
#define ICBID_MASTER_MM_INT_0 79
#define ICBID_MASTER_MM_INT_1 80
#define ICBID_MASTER_MM_INT_2 81
#define ICBID_MASTER_MM_INT_BIMC 82
#define ICBID_MASTER_MSS_INT 83
#define ICBID_MASTER_PCNOC_CFG 84
#define ICBID_MASTER_PCNOC_INT_0 85
#define ICBID_MASTER_PCNOC_INT_1 86
#define ICBID_MASTER_PCNOC_M_0 87
#define ICBID_MASTER_PCNOC_M_1 88
#define ICBID_MASTER_PCNOC_S_0 89
#define ICBID_MASTER_PCNOC_S_1 90
#define ICBID_MASTER_PCNOC_S_2 91
#define ICBID_MASTER_PCNOC_S_3 92
#define ICBID_MASTER_PCNOC_S_4 93
#define ICBID_MASTER_PCNOC_S_6 94
#define ICBID_MASTER_PCNOC_S_7 95
#define ICBID_MASTER_PCNOC_S_8 96
#define ICBID_MASTER_PCNOC_S_9 97
#define ICBID_MASTER_QDSS_INT 98
#define ICBID_MASTER_SNOC_INT_0 99
#define ICBID_MASTER_SNOC_INT_1 100
#define ICBID_MASTER_SNOC_INT_BIMC 101
#define ICBID_MASTER_TCU_0 102
#define ICBID_MASTER_TCU_1 103
#define ICBID_MASTER_BIMC_INT_0 104
#define ICBID_MASTER_BIMC_INT_1 105
#define ICBID_MASTER_CAMERA 106
#define ICBID_MASTER_RICA 107
#define ICBID_MASTER_SNOC_BIMC_2 108
#define ICBID_MASTER_BIMC_SNOC_1 109
#define ICBID_MASTER_A0NOC_SNOC 110
#define ICBID_MASTER_A1NOC_SNOC 111
#define ICBID_MASTER_A2NOC_SNOC 112
#define ICBID_MASTER_PIMEM 113
#define ICBID_MASTER_SNOC_VMEM 114
#define ICBID_MASTER_CPP 115
#define ICBID_MASTER_CNOC_A1NOC 116
#define ICBID_MASTER_PNOC_A1NOC 117
#define ICBID_MASTER_HMSS 118
#define ICBID_MASTER_PCIE_2 119
#define ICBID_MASTER_ROTATOR 120
#define ICBID_MASTER_VENUS_VMEM 121
#define ICBID_MASTER_DCC 122
#define ICBID_MASTER_MCDMA 123
#define ICBID_MASTER_PCNOC_INT_2 124
#define ICBID_MASTER_PCNOC_INT_3 125
#define ICBID_MASTER_PCNOC_INT_4 126
#define ICBID_MASTER_PCNOC_INT_5 127
#define ICBID_MASTER_PCNOC_INT_6 128
#define ICBID_MASTER_PCNOC_S_5 129
#define ICBID_MASTER_SENSORS_AHB 130
#define ICBID_MASTER_SENSORS_PROC 131
#define ICBID_MASTER_QSPI 132
#define ICBID_MASTER_VFE1 133
#define ICBID_MASTER_SNOC_INT_2 134
#define ICBID_MASTER_SMMNOC_BIMC 135
#define ICBID_MASTER_CRVIRT_A1NOC 136
#define ICBID_MASTER_XM_USB_HS1 137
#define ICBID_MASTER_XI_USB_HS1 138
#define ICBID_MASTER_PCNOC_BIMC_1 139
#define ICBID_MASTER_BIMC_PCNOC 140
#define ICBID_MASTER_XI_HSIC 141
#define ICBID_MASTER_SGMII 142
#define ICBID_MASTER_SPMI_FETCHER 143
#define ICBID_MASTER_GNOC_BIMC 144
#define ICBID_MASTER_CRVIRT_A2NOC 145
#define ICBID_MASTER_CNOC_A2NOC 146
#define ICBID_MASTER_WLAN 147
#define ICBID_MASTER_MSS_CE 148
#define ICBID_MASTER_CDSP_PROC 149
#define ICBID_MASTER_GNOC_SNOC 150
#define ICBID_SLAVE_EBI1 0
#define ICBID_SLAVE_APPSS_L2 1
#define ICBID_SLAVE_BIMC_SNOC 2
#define ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
#define ICBID_SLAVE_CAMERA_CFG 3
#define ICBID_SLAVE_DISPLAY_CFG 4
#define ICBID_SLAVE_OCMEM_CFG 5
#define ICBID_SLAVE_CPR_CFG 6
#define ICBID_SLAVE_CPR_XPU_CFG 7
#define ICBID_SLAVE_MISC_CFG 8
#define ICBID_SLAVE_MISC_XPU_CFG 9
#define ICBID_SLAVE_VENUS_CFG 10
#define ICBID_SLAVE_GFX3D_CFG 11
#define ICBID_SLAVE_MMSS_CLK_CFG 12
#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
#define ICBID_SLAVE_MNOC_MPU_CFG 14
#define ICBID_SLAVE_ONOC_MPU_CFG 15
#define ICBID_SLAVE_MNOC_BIMC 16
#define ICBID_SLAVE_SERVICE_MNOC 17
#define ICBID_SLAVE_OCMEM 18
#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
#define ICBID_SLAVE_SERVICE_ONOC 19
#define ICBID_SLAVE_APPSS 20
#define ICBID_SLAVE_LPASS 21
#define ICBID_SLAVE_USB3 22
#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
#define ICBID_SLAVE_WCSS 23
#define ICBID_SLAVE_SNOC_BIMC 24
#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
#define ICBID_SLAVE_SNOC_CNOC 25
#define ICBID_SLAVE_IMEM 26
#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
#define ICBID_SLAVE_SNOC_OVIRT 27
#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
#define ICBID_SLAVE_SNOC_PNOC 28
#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
#define ICBID_SLAVE_SERVICE_SNOC 29
#define ICBID_SLAVE_QDSS_STM 30
#define ICBID_SLAVE_SDCC_1 31
#define ICBID_SLAVE_SDCC_3 32
#define ICBID_SLAVE_SDCC_2 33
#define ICBID_SLAVE_SDCC_4 34
#define ICBID_SLAVE_TSIF 35
#define ICBID_SLAVE_BAM_DMA 36
#define ICBID_SLAVE_BLSP_2 37
#define ICBID_SLAVE_USB_HSIC 38
#define ICBID_SLAVE_BLSP_1 39
#define ICBID_SLAVE_USB_HS 40
#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
#define ICBID_SLAVE_PDM 41
#define ICBID_SLAVE_PERIPH_APU_CFG 42
#define ICBID_SLAVE_PNOC_MPU_CFG 43
#define ICBID_SLAVE_PRNG 44
#define ICBID_SLAVE_PNOC_SNOC 45
#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
#define ICBID_SLAVE_SERVICE_PNOC 46
#define ICBID_SLAVE_CLK_CTL 47
#define ICBID_SLAVE_CNOC_MSS 48
#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
#define ICBID_SLAVE_SECURITY 49
#define ICBID_SLAVE_TCSR 50
#define ICBID_SLAVE_TLMM 51
#define ICBID_SLAVE_CRYPTO_0_CFG 52
#define ICBID_SLAVE_CRYPTO_1_CFG 53
#define ICBID_SLAVE_IMEM_CFG 54
#define ICBID_SLAVE_MESSAGE_RAM 55
#define ICBID_SLAVE_BIMC_CFG 56
#define ICBID_SLAVE_BOOT_ROM 57
#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
#define ICBID_SLAVE_PMIC_ARB 59
#define ICBID_SLAVE_SPDM_WRAPPER 60
#define ICBID_SLAVE_DEHR_CFG 61
#define ICBID_SLAVE_MPM 62
#define ICBID_SLAVE_QDSS_CFG 63
#define ICBID_SLAVE_RBCPR_CFG 64
#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
#define ICBID_SLAVE_CNOC_MNOC_CFG 66
#define ICBID_SLAVE_SNOC_MPU_CFG 67
#define ICBID_SLAVE_CNOC_ONOC_CFG 68
#define ICBID_SLAVE_PNOC_CFG 69
#define ICBID_SLAVE_SNOC_CFG 70
#define ICBID_SLAVE_EBI1_DLL_CFG 71
#define ICBID_SLAVE_PHY_APU_CFG 72
#define ICBID_SLAVE_EBI1_PHY_CFG 73
#define ICBID_SLAVE_RPM 74
#define ICBID_SLAVE_CNOC_SNOC 75
#define ICBID_SLAVE_SERVICE_CNOC 76
#define ICBID_SLAVE_OVIRT_SNOC 77
#define ICBID_SLAVE_OVIRT_OCMEM 78
#define ICBID_SLAVE_USB_HS2 79
#define ICBID_SLAVE_QPIC 80
#define ICBID_SLAVE_IPS_CFG 81
#define ICBID_SLAVE_DSI_CFG 82
#define ICBID_SLAVE_USB3_1 83
#define ICBID_SLAVE_PCIE_0 84
#define ICBID_SLAVE_PCIE_1 85
#define ICBID_SLAVE_PSS_SMMU_CFG 86
#define ICBID_SLAVE_CRYPTO_2_CFG 87
#define ICBID_SLAVE_PCIE_0_CFG 88
#define ICBID_SLAVE_PCIE_1_CFG 89
#define ICBID_SLAVE_SATA_CFG 90
#define ICBID_SLAVE_SPSS_GENI_IR 91
#define ICBID_SLAVE_UFS_CFG 92
#define ICBID_SLAVE_AVSYNC_CFG 93
#define ICBID_SLAVE_VPU_CFG 94
#define ICBID_SLAVE_USB_PHY_CFG 95
#define ICBID_SLAVE_RBCPR_MX_CFG 96
#define ICBID_SLAVE_PCIE_PARF 97
#define ICBID_SLAVE_VCAP_CFG 98
#define ICBID_SLAVE_EMAC_CFG 99
#define ICBID_SLAVE_BCAST_CFG 100
#define ICBID_SLAVE_KLM_CFG 101
#define ICBID_SLAVE_DISPLAY_PWM 102
#define ICBID_SLAVE_GENI 103
#define ICBID_SLAVE_SNOC_BIMC_1 104
#define ICBID_SLAVE_AUDIO 105
#define ICBID_SLAVE_CATS_0 106
#define ICBID_SLAVE_CATS_1 107
#define ICBID_SLAVE_MM_INT_0 108
#define ICBID_SLAVE_MM_INT_1 109
#define ICBID_SLAVE_MM_INT_2 110
#define ICBID_SLAVE_MM_INT_BIMC 111
#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
#define ICBID_SLAVE_MSS_INT 113
#define ICBID_SLAVE_PCNOC_INT_0 114
#define ICBID_SLAVE_PCNOC_INT_1 115
#define ICBID_SLAVE_PCNOC_M_0 116
#define ICBID_SLAVE_PCNOC_M_1 117
#define ICBID_SLAVE_PCNOC_S_0 118
#define ICBID_SLAVE_PCNOC_S_1 119
#define ICBID_SLAVE_PCNOC_S_2 120
#define ICBID_SLAVE_PCNOC_S_3 121
#define ICBID_SLAVE_PCNOC_S_4 122
#define ICBID_SLAVE_PCNOC_S_6 123
#define ICBID_SLAVE_PCNOC_S_7 124
#define ICBID_SLAVE_PCNOC_S_8 125
#define ICBID_SLAVE_PCNOC_S_9 126
#define ICBID_SLAVE_PRNG_XPU_CFG 127
#define ICBID_SLAVE_QDSS_INT 128
#define ICBID_SLAVE_RPM_XPU_CFG 129
#define ICBID_SLAVE_SNOC_INT_0 130
#define ICBID_SLAVE_SNOC_INT_1 131
#define ICBID_SLAVE_SNOC_INT_BIMC 132
#define ICBID_SLAVE_TCU 133
#define ICBID_SLAVE_BIMC_INT_0 134
#define ICBID_SLAVE_BIMC_INT_1 135
#define ICBID_SLAVE_RICA_CFG 136
#define ICBID_SLAVE_SNOC_BIMC_2 137
#define ICBID_SLAVE_BIMC_SNOC_1 138
#define ICBID_SLAVE_PNOC_A1NOC 139
#define ICBID_SLAVE_SNOC_VMEM 140
#define ICBID_SLAVE_A0NOC_SNOC 141
#define ICBID_SLAVE_A1NOC_SNOC 142
#define ICBID_SLAVE_A2NOC_SNOC 143
#define ICBID_SLAVE_A0NOC_CFG 144
#define ICBID_SLAVE_A0NOC_MPU_CFG 145
#define ICBID_SLAVE_A0NOC_SMMU_CFG 146
#define ICBID_SLAVE_A1NOC_CFG 147
#define ICBID_SLAVE_A1NOC_MPU_CFG 148
#define ICBID_SLAVE_A1NOC_SMMU_CFG 149
#define ICBID_SLAVE_A2NOC_CFG 150
#define ICBID_SLAVE_A2NOC_MPU_CFG 151
#define ICBID_SLAVE_A2NOC_SMMU_CFG 152
#define ICBID_SLAVE_AHB2PHY 153
#define ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
#define ICBID_SLAVE_DCC_CFG 155
#define ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
#define ICBID_SLAVE_DSA_CFG 157
#define ICBID_SLAVE_DSA_MPU_CFG 158
#define ICBID_SLAVE_SSC_MPU_CFG 159
#define ICBID_SLAVE_HMSS_L3 160
#define ICBID_SLAVE_LPASS_SMMU_CFG 161
#define ICBID_SLAVE_MMAGIC_CFG 162
#define ICBID_SLAVE_PCIE20_AHB2PHY 163
#define ICBID_SLAVE_PCIE_2 164
#define ICBID_SLAVE_PCIE_2_CFG 165
#define ICBID_SLAVE_PIMEM 166
#define ICBID_SLAVE_PIMEM_CFG 167
#define ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
#define ICBID_SLAVE_RBCPR_CX 169
#define ICBID_SLAVE_RBCPR_MX 170
#define ICBID_SLAVE_SMMU_CPP_CFG 171
#define ICBID_SLAVE_SMMU_JPEG_CFG 172
#define ICBID_SLAVE_SMMU_MDP_CFG 173
#define ICBID_SLAVE_SMMU_ROTATOR_CFG 174
#define ICBID_SLAVE_SMMU_VENUS_CFG 175
#define ICBID_SLAVE_SMMU_VFE_CFG 176
#define ICBID_SLAVE_SSC_CFG 177
#define ICBID_SLAVE_VENUS_THROTTLE_CFG 178
#define ICBID_SLAVE_VMEM 179
#define ICBID_SLAVE_VMEM_CFG 180
#define ICBID_SLAVE_QDSS_MPU_CFG 181
#define ICBID_SLAVE_USB3_PHY_CFG 182
#define ICBID_SLAVE_IPA_CFG 183
#define ICBID_SLAVE_PCNOC_INT_2 184
#define ICBID_SLAVE_PCNOC_INT_3 185
#define ICBID_SLAVE_PCNOC_INT_4 186
#define ICBID_SLAVE_PCNOC_INT_5 187
#define ICBID_SLAVE_PCNOC_INT_6 188
#define ICBID_SLAVE_PCNOC_S_5 189
#define ICBID_SLAVE_QSPI 190
#define ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
#define ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
#define ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
#define ICBID_SLAVE_MSS_MPU_CFG 194
#define ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
#define ICBID_SLAVE_SKL 196
#define ICBID_SLAVE_SNOC_INT_2 197
#define ICBID_SLAVE_SMMNOC_BIMC 198
#define ICBID_SLAVE_CRVIRT_A1NOC 199
#define ICBID_SLAVE_SGMII 200
#define ICBID_SLAVE_QHS4_APPS 201
#define ICBID_SLAVE_BIMC_PCNOC 202
#define ICBID_SLAVE_PCNOC_BIMC_1 203
#define ICBID_SLAVE_SPMI_FETCHER 204
#define ICBID_SLAVE_MMSS_SMMU_CFG 205
#define ICBID_SLAVE_WLAN 206
#define ICBID_SLAVE_CRVIRT_A2NOC 207
#define ICBID_SLAVE_CNOC_A2NOC 208
#define ICBID_SLAVE_GLM 209
#define ICBID_SLAVE_GNOC_BIMC 210
#define ICBID_SLAVE_GNOC_SNOC 211
#define ICBID_SLAVE_QM_CFG 212
#define ICBID_SLAVE_TLMM_EAST 213
#define ICBID_SLAVE_TLMM_NORTH 214
#define ICBID_SLAVE_TLMM_WEST 215
#define ICBID_SLAVE_LPASS_TCM 216
#define ICBID_SLAVE_TLMM_SOUTH 217
#define ICBID_SLAVE_TLMM_CENTER 218
#define ICBID_SLAVE_MSS_NAV_CE_MPU_CFG 219
#define ICBID_SLAVE_A2NOC_THROTTLE_CFG 220
#define ICBID_SLAVE_CDSP 221
#define ICBID_SLAVE_CDSP_SMMU_CFG 222
#define ICBID_SLAVE_LPASS_MPU_CFG 223
#define ICBID_SLAVE_CSI_PHY_CFG 224
#endif
......@@ -73,6 +73,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_MAX_FREQ 0x04
#define MSM_PARAM_TIMESTAMP 0x05
#define MSM_PARAM_GMEM_BASE 0x06
#define MSM_PARAM_NR_RINGS 0x07
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
......@@ -218,6 +219,7 @@ struct drm_msm_gem_submit {
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
__u32 queueid; /* in, submitqueue id */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
......@@ -231,6 +233,7 @@ struct drm_msm_wait_fence {
__u32 fence; /* in */
__u32 pad;
struct drm_msm_timespec timeout; /* in */
__u32 queueid; /* in, submitqueue id */
};
/* madvise provides a way to tell the kernel in case a buffers contents
......@@ -254,6 +257,20 @@ struct drm_msm_gem_madvise {
__u32 retained; /* out, whether backing store still exists */
};
/*
* Draw queues allow the user to set specific submission parameter. Command
* submissions specify a specific submitqueue to use. ID 0 is reserved for
* backwards compatibility as a "default" submitqueue
*/
#define MSM_SUBMITQUEUE_FLAGS (0)
struct drm_msm_submitqueue {
__u32 flags; /* in, MSM_SUBMITQUEUE_x */
__u32 prio; /* in, Priority level */
__u32 id; /* out, identifier */
};
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
......@@ -265,6 +282,11 @@ struct drm_msm_gem_madvise {
#define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07
#define DRM_MSM_GEM_MADVISE 0x08
/* placeholder:
#define DRM_MSM_GEM_SVM_NEW 0x09
*/
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
......@@ -274,6 +296,8 @@ struct drm_msm_gem_madvise {
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#if defined(__cplusplus)
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment