Commit 87331c83 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2017-11-01' of git://people.freedesktop.org/~robclark/linux into drm-next

 + preemption support for a5xx[1][2]

 + display fixes for 8x96 (snapdragon 820) including fixes for 4k scanout
   (hwpipe assignment re-work to handle multiple hwpipe assigned to plane
   for wide scanout)

 + async cursor plane updates and fixes

 + refactor adreno_bind/hwinit.. still defer fw loading until device open,
   but move clk/irq/etc to probe/bind time to fix issues when fw isn't
   present in filesys

 + clk/dt bindings cleanups w/ backward compat via msm_clk_get() (dt docs
   part ack'ed by Rob Herring)

 + fw loading re-work with helper to handle either /lib/firmware/qcom/$fw
   or /lib/firmware/$fw.. background, we've started landing fw for some of
   generations in linux-firmware, but there is a preference to put fw files
   under 'qcom' subdirectory, which is not what was done on android or for
   people who copied fw from android.  So now we first look in qcom subdir
   and then fallback to the original location.

 + bunch of GPU debugging enhancements, to dump full cmdline of processes
   that trigger faults, and to add a new debugfs to capture cmdstream of
   just submits that triggered faults.. both quite useful for piglit ;-)

* tag 'drm-msm-next-2017-11-01' of git://people.freedesktop.org/~robclark/linux: (38 commits)
  drm/msm: use %z format modifier for printing size_t
  drm/msm/mdp5: Don't use async plane update path if plane visibility changes
  drm/msm/mdp5: mdp5_crtc: Restore cursor state only if LM cursors are enabled
  drm/msm/mdp5: Update mdp5_pipe_assign to spit out both planes
  drm/msm/mdp5: Prepare mdp5_pipe_assign for some rework
  drm/msm: remove mdp5_cursor_plane_funcs
  drm/msm: update cursors asynchronously through atomic
  drm/msm/atomic: switch to drm_atomic_helper_check
  drm/msm/mdp5: restore cursor state when enabling crtc
  drm/msm/mdp5: don't use autosuspend
  drm/msm/mdp5: ignore planes that are not visible
  drm/msm: dump submits which triggered gpu hang
  drm/msm: preserve IOVAs in submit's bo table
  drm/msm/rd: allow adding addition msg to top of dump
  drm/msm: split rd debugfs file
  drm/msm: add special _get_vaddr_active() for cmdstream dumps
  drm/msm: show task cmdline in gpu recovery messages
  drm/msm: dump a rd GPUADDR header for all buffers in the command
  drm/msm: Removed unused struct_mutex_task
  drm/msm: Implement preemption for A5XX targets
  ...
parents 43106e25 39ae0d3e
...@@ -13,16 +13,16 @@ Required properties: ...@@ -13,16 +13,16 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>. - power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks. - clocks: Phandles to device clocks.
- clock-names: the following clocks are required: - clock-names: the following clocks are required:
* "mdp_core_clk" * "mdp_core"
* "iface_clk" * "iface"
* "bus_clk" * "bus"
* "core_mmss_clk" * "core_mmss"
* "byte_clk" * "byte"
* "pixel_clk" * "pixel"
* "core_clk" * "core"
For DSIv2, we need an additional clock: For DSIv2, we need an additional clock:
* "src_clk" * "src"
- assigned-clocks: Parents of "byte_clk" and "pixel_clk" for the given platform. - assigned-clocks: Parents of "byte" and "pixel" for the given platform.
- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided - assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
by a DSI PHY block. See [1] for details on clock bindings. by a DSI PHY block. See [1] for details on clock bindings.
- vdd-supply: phandle to vdd regulator device node - vdd-supply: phandle to vdd regulator device node
...@@ -101,7 +101,7 @@ Required properties: ...@@ -101,7 +101,7 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>. - power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks. See [1] for details on clock bindings. - clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required: - clock-names: the following clocks are required:
* "iface_clk" * "iface"
- vddio-supply: phandle to vdd-io regulator device node - vddio-supply: phandle to vdd-io regulator device node
Optional properties: Optional properties:
...@@ -123,13 +123,13 @@ Example: ...@@ -123,13 +123,13 @@ Example:
reg = <0xfd922800 0x200>; reg = <0xfd922800 0x200>;
power-domains = <&mmcc MDSS_GDSC>; power-domains = <&mmcc MDSS_GDSC>;
clock-names = clock-names =
"bus_clk", "bus",
"byte_clk", "byte",
"core_clk", "core",
"core_mmss_clk", "core_mmss",
"iface_clk", "iface",
"mdp_core_clk", "mdp_core",
"pixel_clk"; "pixel";
clocks = clocks =
<&mmcc MDSS_AXI_CLK>, <&mmcc MDSS_AXI_CLK>,
<&mmcc MDSS_BYTE0_CLK>, <&mmcc MDSS_BYTE0_CLK>,
...@@ -207,7 +207,7 @@ Example: ...@@ -207,7 +207,7 @@ Example:
reg = <0xfd922a00 0xd4>, reg = <0xfd922a00 0xd4>,
<0xfd922b00 0x2b0>, <0xfd922b00 0x2b0>,
<0xfd922d80 0x7b>; <0xfd922d80 0x7b>;
clock-names = "iface_clk"; clock-names = "iface";
clocks = <&mmcc MDSS_AHB_CLK>; clocks = <&mmcc MDSS_AHB_CLK>;
#clock-cells = <1>; #clock-cells = <1>;
vddio-supply = <&pma8084_l12>; vddio-supply = <&pma8084_l12>;
......
...@@ -12,11 +12,11 @@ Required properties: ...@@ -12,11 +12,11 @@ Required properties:
- clocks: device clocks - clocks: device clocks
See Documentation/devicetree/bindings/clock/clock-bindings.txt for details. See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
- clock-names: the following clocks are required: - clock-names: the following clocks are required:
* "core_clk" * "core"
* "iface_clk" * "iface"
* "mdp_core_clk" * "mdp_core"
* "pixel_clk" * "pixel"
* "link_clk" * "link"
- #clock-cells: The value should be 1. - #clock-cells: The value should be 1.
- vdda-supply: phandle to vdda regulator device node - vdda-supply: phandle to vdda regulator device node
- lvl-vdd-supply: phandle to regulator device node which is used to supply power - lvl-vdd-supply: phandle to regulator device node which is used to supply power
...@@ -41,11 +41,11 @@ Example: ...@@ -41,11 +41,11 @@ Example:
interrupts = <12 0>; interrupts = <12 0>;
power-domains = <&mmcc MDSS_GDSC>; power-domains = <&mmcc MDSS_GDSC>;
clock-names = clock-names =
"core_clk", "core",
"pixel_clk", "pixel",
"iface_clk", "iface",
"link_clk", "link",
"mdp_core_clk"; "mdp_core";
clocks = clocks =
<&mmcc MDSS_EDPAUX_CLK>, <&mmcc MDSS_EDPAUX_CLK>,
<&mmcc MDSS_EDPPIXEL_CLK>, <&mmcc MDSS_EDPPIXEL_CLK>,
......
...@@ -64,9 +64,9 @@ Example: ...@@ -64,9 +64,9 @@ Example:
interrupts = <GIC_SPI 79 0>; interrupts = <GIC_SPI 79 0>;
power-domains = <&mmcc MDSS_GDSC>; power-domains = <&mmcc MDSS_GDSC>;
clock-names = clock-names =
"core_clk", "core",
"master_iface_clk", "master_iface",
"slave_iface_clk"; "slave_iface";
clocks = clocks =
<&mmcc HDMI_APP_CLK>, <&mmcc HDMI_APP_CLK>,
<&mmcc HDMI_M_AHB_CLK>, <&mmcc HDMI_M_AHB_CLK>,
...@@ -92,7 +92,7 @@ Example: ...@@ -92,7 +92,7 @@ Example:
<0x4a00500 0x100>; <0x4a00500 0x100>;
#phy-cells = <0>; #phy-cells = <0>;
power-domains = <&mmcc MDSS_GDSC>; power-domains = <&mmcc MDSS_GDSC>;
clock-names = "slave_iface_clk"; clock-names = "slave_iface";
clocks = <&mmcc HDMI_S_AHB_CLK>; clocks = <&mmcc HDMI_S_AHB_CLK>;
core-vdda-supply = <&pm8921_hdmi_mvs>; core-vdda-supply = <&pm8921_hdmi_mvs>;
}; };
......
...@@ -22,16 +22,16 @@ Required properties: ...@@ -22,16 +22,16 @@ Required properties:
Documentation/devicetree/bindings/power/power_domain.txt Documentation/devicetree/bindings/power/power_domain.txt
- clocks: device clocks. See ../clocks/clock-bindings.txt for details. - clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required. - clock-names: the following clocks are required.
* "iface_clk" * "iface"
* "bus_clk" * "bus"
* "vsync_clk" * "vsync"
- #address-cells: number of address cells for the MDSS children. Should be 1. - #address-cells: number of address cells for the MDSS children. Should be 1.
- #size-cells: Should be 1. - #size-cells: Should be 1.
- ranges: parent bus address space is the same as the child bus address space. - ranges: parent bus address space is the same as the child bus address space.
Optional properties: Optional properties:
- clock-names: the following clocks are optional: - clock-names: the following clocks are optional:
* "lut_clk" * "lut"
MDP5: MDP5:
Required properties: Required properties:
...@@ -45,10 +45,10 @@ Required properties: ...@@ -45,10 +45,10 @@ Required properties:
through MDP block through MDP block
- clocks: device clocks. See ../clocks/clock-bindings.txt for details. - clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required. - clock-names: the following clocks are required.
- * "bus_clk" - * "bus"
- * "iface_clk" - * "iface"
- * "core_clk" - * "core"
- * "vsync_clk" - * "vsync"
- ports: contains the list of output ports from MDP. These connect to interfaces - ports: contains the list of output ports from MDP. These connect to interfaces
that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
special case since it is a part of the MDP block itself). special case since it is a part of the MDP block itself).
...@@ -77,7 +77,7 @@ Required properties: ...@@ -77,7 +77,7 @@ Required properties:
Optional properties: Optional properties:
- clock-names: the following clocks are optional: - clock-names: the following clocks are optional:
* "lut_clk" * "lut"
Example: Example:
...@@ -95,9 +95,9 @@ Example: ...@@ -95,9 +95,9 @@ Example:
clocks = <&gcc GCC_MDSS_AHB_CLK>, clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>, <&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>; <&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk", clock-names = "iface",
"bus_clk", "bus",
"vsync_clk" "vsync"
interrupts = <0 72 0>; interrupts = <0 72 0>;
...@@ -120,10 +120,10 @@ Example: ...@@ -120,10 +120,10 @@ Example:
<&gcc GCC_MDSS_AXI_CLK>, <&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_MDP_CLK>, <&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>; <&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk", clock-names = "iface",
"bus_clk", "bus",
"core_clk", "core",
"vsync_clk"; "vsync";
ports { ports {
#address-cells = <1>; #address-cells = <1>;
......
...@@ -8,6 +8,7 @@ msm-y := \ ...@@ -8,6 +8,7 @@ msm-y := \
adreno/a4xx_gpu.o \ adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \ adreno/a5xx_gpu.o \
adreno/a5xx_power.o \ adreno/a5xx_power.o \
adreno/a5xx_preempt.o \
hdmi/hdmi.o \ hdmi/hdmi.o \
hdmi/hdmi_audio.o \ hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \ hdmi/hdmi_bridge.o \
...@@ -57,7 +58,8 @@ msm-y := \ ...@@ -57,7 +58,8 @@ msm-y := \
msm_iommu.o \ msm_iommu.o \
msm_perf.o \ msm_perf.o \
msm_rd.o \ msm_rd.o \
msm_ringbuffer.o msm_ringbuffer.o \
msm_submitqueue.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
......
...@@ -44,7 +44,7 @@ static bool a3xx_idle(struct msm_gpu *gpu); ...@@ -44,7 +44,7 @@ static bool a3xx_idle(struct msm_gpu *gpu);
static bool a3xx_me_init(struct msm_gpu *gpu) static bool a3xx_me_init(struct msm_gpu *gpu)
{ {
struct msm_ringbuffer *ring = gpu->rb; struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17); OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7); OUT_RING(ring, 0x000003f7);
...@@ -65,7 +65,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu) ...@@ -65,7 +65,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu); gpu->funcs->flush(gpu, ring);
return a3xx_idle(gpu); return a3xx_idle(gpu);
} }
...@@ -339,7 +339,7 @@ static void a3xx_destroy(struct msm_gpu *gpu) ...@@ -339,7 +339,7 @@ static void a3xx_destroy(struct msm_gpu *gpu)
static bool a3xx_idle(struct msm_gpu *gpu) static bool a3xx_idle(struct msm_gpu *gpu)
{ {
/* wait for ringbuffer to drain: */ /* wait for ringbuffer to drain: */
if (!adreno_idle(gpu)) if (!adreno_idle(gpu, gpu->rb[0]))
return false; return false;
/* then wait for GPU to finish: */ /* then wait for GPU to finish: */
...@@ -444,9 +444,9 @@ static const struct adreno_gpu_funcs funcs = { ...@@ -444,9 +444,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = msm_gpu_pm_suspend, .pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume, .pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover, .recover = a3xx_recover,
.last_fence = adreno_last_fence,
.submit = adreno_submit, .submit = adreno_submit,
.flush = adreno_flush, .flush = adreno_flush,
.active_ring = adreno_active_ring,
.irq = a3xx_irq, .irq = a3xx_irq,
.destroy = a3xx_destroy, .destroy = a3xx_destroy,
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
...@@ -492,7 +492,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) ...@@ -492,7 +492,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers; adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets; adreno_gpu->reg_offsets = a3xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) if (ret)
goto fail; goto fail;
......
...@@ -116,7 +116,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu) ...@@ -116,7 +116,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
static bool a4xx_me_init(struct msm_gpu *gpu) static bool a4xx_me_init(struct msm_gpu *gpu)
{ {
struct msm_ringbuffer *ring = gpu->rb; struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17); OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7); OUT_RING(ring, 0x000003f7);
...@@ -137,7 +137,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu) ...@@ -137,7 +137,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu); gpu->funcs->flush(gpu, ring);
return a4xx_idle(gpu); return a4xx_idle(gpu);
} }
...@@ -337,7 +337,7 @@ static void a4xx_destroy(struct msm_gpu *gpu) ...@@ -337,7 +337,7 @@ static void a4xx_destroy(struct msm_gpu *gpu)
static bool a4xx_idle(struct msm_gpu *gpu) static bool a4xx_idle(struct msm_gpu *gpu)
{ {
/* wait for ringbuffer to drain: */ /* wait for ringbuffer to drain: */
if (!adreno_idle(gpu)) if (!adreno_idle(gpu, gpu->rb[0]))
return false; return false;
/* then wait for GPU to finish: */ /* then wait for GPU to finish: */
...@@ -532,9 +532,9 @@ static const struct adreno_gpu_funcs funcs = { ...@@ -532,9 +532,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = a4xx_pm_suspend, .pm_suspend = a4xx_pm_suspend,
.pm_resume = a4xx_pm_resume, .pm_resume = a4xx_pm_resume,
.recover = a4xx_recover, .recover = a4xx_recover,
.last_fence = adreno_last_fence,
.submit = adreno_submit, .submit = adreno_submit,
.flush = adreno_flush, .flush = adreno_flush,
.active_ring = adreno_active_ring,
.irq = a4xx_irq, .irq = a4xx_irq,
.destroy = a4xx_destroy, .destroy = a4xx_destroy,
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
...@@ -574,7 +574,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) ...@@ -574,7 +574,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers; adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets; adreno_gpu->reg_offsets = a4xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) if (ret)
goto fail; goto fail;
......
This diff is collapsed.
/* Copyright (c) 2016 The Linux Foundation. All rights reserved. /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
...@@ -35,10 +35,100 @@ struct a5xx_gpu { ...@@ -35,10 +35,100 @@ struct a5xx_gpu {
uint32_t gpmu_dwords; uint32_t gpmu_dwords;
uint32_t lm_leakage; uint32_t lm_leakage;
struct msm_ringbuffer *cur_ring;
struct msm_ringbuffer *next_ring;
struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
atomic_t preempt_state;
struct timer_list preempt_timer;
}; };
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
/*
* In order to do lockless preemption we use a simple state machine to progress
* through the process.
*
* PREEMPT_NONE - no preemption in progress. Next state START.
* PREEMPT_START - The trigger is evaulating if preemption is possible. Next
* states: TRIGGERED, NONE
* PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
* state: NONE.
* PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
* states: FAULTED, PENDING
* PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
* recovery. Next state: N/A
* PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
* checking the success of the operation. Next state: FAULTED, NONE.
*/
enum preempt_state {
PREEMPT_NONE = 0,
PREEMPT_START,
PREEMPT_ABORT,
PREEMPT_TRIGGERED,
PREEMPT_FAULTED,
PREEMPT_PENDING,
};
/*
* struct a5xx_preempt_record is a shared buffer between the microcode and the
* CPU to store the state for preemption. The record itself is much larger
* (64k) but most of that is used by the CP for storage.
*
* There is a preemption record assigned per ringbuffer. When the CPU triggers a
* preemption, it fills out the record with the useful information (wptr, ring
* base, etc) and the microcode uses that information to set up the CP following
* the preemption. When a ring is switched out, the CP will save the ringbuffer
* state back to the record. In this way, once the records are properly set up
* the CPU can quickly switch back and forth between ringbuffers by only
* updating a few registers (often only the wptr).
*
* These are the CPU aware registers in the record:
* @magic: Must always be 0x27C4BAFC
* @info: Type of the record - written 0 by the CPU, updated by the CP
* @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
* the CP
* @cntl: Value of RB_CNTL written by CPU, save/restored by CP
* @rptr: Value of RB_RPTR written by CPU, save/restored by CP
* @wptr: Value of RB_WPTR written by CPU, save/restored by CP
* @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
* @rbase: Value of RB_BASE written by CPU, save/restored by CP
* @counter: GPU address of the storage area for the performance counters
*/
struct a5xx_preempt_record {
uint32_t magic;
uint32_t info;
uint32_t data;
uint32_t cntl;
uint32_t rptr;
uint32_t wptr;
uint64_t rptr_addr;
uint64_t rbase;
uint64_t counter;
};
/* Magic identifier for the preemption record */
#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
/*
* Even though the structure above is only a few bytes, we need a full 64k to
* store the entire preemption record from the CP
*/
#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
/*
* The preemption counter block is a storage area for the value of the
* preemption counters that are saved immediately before context switch. We
* append it on to the end of the allocation for the preemption record.
*/
#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
int a5xx_power_init(struct msm_gpu *gpu); int a5xx_power_init(struct msm_gpu *gpu);
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu); void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
...@@ -55,7 +145,22 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, ...@@ -55,7 +145,22 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
return -ETIMEDOUT; return -ETIMEDOUT;
} }
bool a5xx_idle(struct msm_gpu *gpu); bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
void a5xx_preempt_init(struct msm_gpu *gpu);
void a5xx_preempt_hw_init(struct msm_gpu *gpu);
void a5xx_preempt_trigger(struct msm_gpu *gpu);
void a5xx_preempt_irq(struct msm_gpu *gpu);
void a5xx_preempt_fini(struct msm_gpu *gpu);
/* Return true if we are in a preempt state */
static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
{
int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
return !(preempt_state == PREEMPT_NONE ||
preempt_state == PREEMPT_ABORT);
}
#endif /* __A5XX_GPU_H__ */ #endif /* __A5XX_GPU_H__ */
...@@ -173,7 +173,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu) ...@@ -173,7 +173,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb; struct msm_ringbuffer *ring = gpu->rb[0];
if (!a5xx_gpu->gpmu_dwords) if (!a5xx_gpu->gpmu_dwords)
return 0; return 0;
...@@ -192,9 +192,9 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu) ...@@ -192,9 +192,9 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1); OUT_RING(ring, 1);
gpu->funcs->flush(gpu); gpu->funcs->flush(gpu, ring);
if (!a5xx_idle(gpu)) { if (!a5xx_idle(gpu, ring)) {
DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n", DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
gpu->name); gpu->name);
return -EINVAL; return -EINVAL;
...@@ -264,7 +264,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -264,7 +264,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
return; return;
/* Get the firmware */ /* Get the firmware */
if (request_firmware(&fw, adreno_gpu->info->gpmufw, drm->dev)) { fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw);
if (IS_ERR(fw)) {
DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n", DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
gpu->name); gpu->name);
return; return;
......
/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_gem.h"
#include "a5xx_gpu.h"
/*
* Try to transition the preemption state from old to new. Return
* true on success or false if the original state wasn't 'old'
*/
static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
enum preempt_state old, enum preempt_state new)
{
enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
old, new);
return (cur == old);
}
/*
* Force the preemption state to the specified state. This is used in cases
* where the current state is known and won't change
*/
static inline void set_preempt_state(struct a5xx_gpu *gpu,
enum preempt_state new)
{
/*
* preempt_state may be read by other cores trying to trigger a
* preemption or in the interrupt handler so barriers are needed
* before...
*/
smp_mb__before_atomic();
atomic_set(&gpu->preempt_state, new);
/* ... and after*/
smp_mb__after_atomic();
}
/* Write the most recent wptr for the given ring into the hardware */
static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
unsigned long flags;
uint32_t wptr;
if (!ring)
return;
spin_lock_irqsave(&ring->lock, flags);
wptr = get_wptr(ring);
spin_unlock_irqrestore(&ring->lock, flags);
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
unsigned long flags;
int i;
for (i = 0; i < gpu->nr_rings; i++) {
bool empty;
struct msm_ringbuffer *ring = gpu->rb[i];
spin_lock_irqsave(&ring->lock, flags);
empty = (get_wptr(ring) == ring->memptrs->rptr);
spin_unlock_irqrestore(&ring->lock, flags);
if (!empty)
return ring;
}
return NULL;
}
static void a5xx_preempt_timer(unsigned long data)
{
struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
struct msm_gpu *gpu = &a5xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
return;
dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
queue_work(priv->wq, &gpu->recover_work);
}
/* Try to trigger a preemption switch */
void a5xx_preempt_trigger(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned long flags;
struct msm_ringbuffer *ring;
if (gpu->nr_rings == 1)
return;
/*
* Try to start preemption by moving from NONE to START. If
* unsuccessful, a preemption is already in flight
*/
if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
return;
/* Get the next ring to preempt to */
ring = get_next_ring(gpu);
/*
* If no ring is populated or the highest priority ring is the current
* one do nothing except to update the wptr to the latest and greatest
*/
if (!ring || (a5xx_gpu->cur_ring == ring)) {
/*
* Its possible that while a preemption request is in progress
* from an irq context, a user context trying to submit might
* fail to update the write pointer, because it determines
* that the preempt state is not PREEMPT_NONE.
*
* Close the race by introducing an intermediate
* state PREEMPT_ABORT to let the submit path
* know that the ringbuffer is not going to change
* and can safely update the write pointer.
*/
set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
return;
}
/* Make sure the wptr doesn't update while we're in motion */
spin_lock_irqsave(&ring->lock, flags);
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
spin_unlock_irqrestore(&ring->lock, flags);
/* Set the address of the incoming preemption record */
gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
a5xx_gpu->preempt_iova[ring->id]);
a5xx_gpu->next_ring = ring;
/* Start a timer to catch a stuck preemption */
mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
/* Set the preemption state to triggered */
set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
/* Make sure everything is written before hitting the button */
wmb();
/* And actually start the preemption */
gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
}
void a5xx_preempt_irq(struct msm_gpu *gpu)
{
uint32_t status;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
return;
/* Delete the preemption watchdog timer */
del_timer(&a5xx_gpu->preempt_timer);
/*
* The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
* firing the interrupt, but there is a non zero chance of a hardware
* condition or a software race that could set it again before we have a
* chance to finish. If that happens, log and go for recovery
*/
status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
if (unlikely(status)) {
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
dev_err(dev->dev, "%s: Preemption failed to complete\n",
gpu->name);
queue_work(priv->wq, &gpu->recover_work);
return;
}
a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
a5xx_gpu->next_ring = NULL;
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
}
void a5xx_preempt_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
for (i = 0; i < gpu->nr_rings; i++) {
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
}
/* Write a 0 to signal that we aren't switching pagetables */
gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
/* Reset the preemption state */
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
/* Always come up on rb 0 */
a5xx_gpu->cur_ring = gpu->rb[0];
}
static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a5xx_preempt_record *ptr;
struct drm_gem_object *bo = NULL;
u64 iova = 0;
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
/* Set up the defaults on the preemption record */
ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
ptr->info = 0;
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(ring, rptr);
ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
return 0;
}
void a5xx_preempt_fini(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
for (i = 0; i < gpu->nr_rings; i++) {
if (!a5xx_gpu->preempt_bo[i])
continue;
msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
if (a5xx_gpu->preempt_iova[i])
msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]);
a5xx_gpu->preempt_bo[i] = NULL;
}
}
void a5xx_preempt_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
/* No preemption if we only have one ring */
if (gpu->nr_rings <= 1)
return;
for (i = 0; i < gpu->nr_rings; i++) {
if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
/*
* On any failure our adventure is over. Clean up and
* set nr_rings to 1 to force preemption off
*/
a5xx_preempt_fini(gpu);
gpu->nr_rings = 1;
return;
}
}
setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
(unsigned long) a5xx_gpu);
}
...@@ -125,51 +125,24 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) ...@@ -125,51 +125,24 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev; struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config; struct msm_gpu *gpu = platform_get_drvdata(priv->gpu_pdev);
struct adreno_rev rev; int ret;
const struct adreno_info *info;
struct msm_gpu *gpu = NULL;
if (!pdev) { if (!gpu) {
dev_err(dev->dev, "no adreno device\n"); dev_err(dev->dev, "no adreno device\n");
return NULL; return NULL;
} }
config = pdev->dev.platform_data; pm_runtime_get_sync(&pdev->dev);
rev = config->rev; mutex_lock(&dev->struct_mutex);
info = adreno_info(config->rev); ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
if (!info) { pm_runtime_put_sync(&pdev->dev);
dev_warn(dev->dev, "Unknown GPU revision: %u.%u.%u.%u\n", if (ret) {
rev.core, rev.major, rev.minor, rev.patchid); dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL; return NULL;
} }
DBG("Found GPU: %u.%u.%u.%u", rev.core, rev.major,
rev.minor, rev.patchid);
gpu = info->init(dev);
if (IS_ERR(gpu)) {
dev_warn(dev->dev, "failed to load adreno gpu\n");
gpu = NULL;
/* not fatal */
}
if (gpu) {
int ret;
pm_runtime_get_sync(&pdev->dev);
mutex_lock(&dev->struct_mutex);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
pm_runtime_put_sync(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
}
}
return gpu; return gpu;
} }
...@@ -282,6 +255,9 @@ static int adreno_get_pwrlevels(struct device *dev, ...@@ -282,6 +255,9 @@ static int adreno_get_pwrlevels(struct device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data) static int adreno_bind(struct device *dev, struct device *master, void *data)
{ {
static struct adreno_platform_config config = {}; static struct adreno_platform_config config = {};
const struct adreno_info *info;
struct drm_device *drm = dev_get_drvdata(master);
struct msm_gpu *gpu;
u32 val; u32 val;
int ret; int ret;
...@@ -302,13 +278,39 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) ...@@ -302,13 +278,39 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return ret; return ret;
dev->platform_data = &config; dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); set_gpu_pdev(drm, to_platform_device(dev));
info = adreno_info(config.rev);
if (!info) {
dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
config.rev.core, config.rev.major,
config.rev.minor, config.rev.patchid);
return -ENXIO;
}
DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
config.rev.minor, config.rev.patchid);
gpu = info->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
return PTR_ERR(gpu);
}
dev_set_drvdata(dev, gpu);
return 0; return 0;
} }
static void adreno_unbind(struct device *dev, struct device *master, static void adreno_unbind(struct device *dev, struct device *master,
void *data) void *data)
{ {
struct msm_gpu *gpu = dev_get_drvdata(dev);
gpu->funcs->pm_suspend(gpu);
gpu->funcs->destroy(gpu);
set_gpu_pdev(dev_get_drvdata(master), NULL); set_gpu_pdev(dev_get_drvdata(master), NULL);
} }
......
This diff is collapsed.
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat * Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com> * Author: Rob Clark <robdclark@gmail.com>
* *
* Copyright (c) 2014 The Linux Foundation. All rights reserved. * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by * under the terms of the GNU General Public License version 2 as published by
...@@ -82,14 +82,6 @@ struct adreno_info { ...@@ -82,14 +82,6 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev); const struct adreno_info *adreno_info(struct adreno_rev rev);
#define rbmemptr(adreno_gpu, member) \
((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
struct adreno_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
};
struct adreno_gpu { struct adreno_gpu {
struct msm_gpu base; struct msm_gpu base;
struct adreno_rev rev; struct adreno_rev rev;
...@@ -101,16 +93,30 @@ struct adreno_gpu { ...@@ -101,16 +93,30 @@ struct adreno_gpu {
/* interesting register offsets to dump: */ /* interesting register offsets to dump: */
const unsigned int *registers; const unsigned int *registers;
/*
* Are we loading fw from legacy path? Prior to addition
* of gpu firmware to linux-firmware, the fw files were
* placed in toplevel firmware directory, following qcom's
* android kernel. But linux-firmware preferred they be
* placed in a 'qcom' subdirectory.
*
* For backwards compatibility, we try first to load from
* the new path, using request_firmware_direct() to avoid
* any potential timeout waiting for usermode helper, then
* fall back to the old path (with direct load). And
* finally fall back to request_firmware() with the new
* path to allow the usermode helper.
*/
enum {
FW_LOCATION_UNKNOWN = 0,
FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */
FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */
FW_LOCATION_HELPER,
} fwloc;
/* firmware: */ /* firmware: */
const struct firmware *pm4, *pfp; const struct firmware *pm4, *pfp;
/* ringbuffer rptr/wptr: */
// TODO should this be in msm_ringbuffer? I think it would be
// different for z180..
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
uint64_t memptrs_iova;
/* /*
* Register offsets are different between some GPUs. * Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific * GPU specific offsets will be exported by GPU specific
...@@ -196,22 +202,25 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu) ...@@ -196,22 +202,25 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
} }
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
int adreno_hw_init(struct msm_gpu *gpu); int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu); void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx); struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu); void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
bool adreno_idle(struct msm_gpu *gpu); bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m); void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif #endif
void adreno_dump_info(struct msm_gpu *gpu); void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu); void adreno_dump(struct msm_gpu *gpu);
void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords); void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs); struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu); void adreno_gpu_cleanup(struct adreno_gpu *gpu);
...@@ -220,7 +229,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu); ...@@ -220,7 +229,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu);
static inline void static inline void
OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{ {
adreno_wait_ring(ring->gpu, cnt+1); adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
} }
...@@ -228,14 +237,14 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) ...@@ -228,14 +237,14 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
static inline void static inline void
OUT_PKT2(struct msm_ringbuffer *ring) OUT_PKT2(struct msm_ringbuffer *ring)
{ {
adreno_wait_ring(ring->gpu, 1); adreno_wait_ring(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT); OUT_RING(ring, CP_TYPE2_PKT);
} }
static inline void static inline void
OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{ {
adreno_wait_ring(ring->gpu, cnt+1); adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
} }
...@@ -257,14 +266,14 @@ static inline u32 PM4_PARITY(u32 val) ...@@ -257,14 +266,14 @@ static inline u32 PM4_PARITY(u32 val)
static inline void static inline void
OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{ {
adreno_wait_ring(ring->gpu, cnt + 1); adreno_wait_ring(ring, cnt + 1);
OUT_RING(ring, PKT4(regindx, cnt)); OUT_RING(ring, PKT4(regindx, cnt));
} }
static inline void static inline void
OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{ {
adreno_wait_ring(ring->gpu, cnt + 1); adreno_wait_ring(ring, cnt + 1);
OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
} }
...@@ -323,6 +332,11 @@ static inline void adreno_gpu_write64(struct adreno_gpu *gpu, ...@@ -323,6 +332,11 @@ static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
adreno_gpu_write(gpu, hi, upper_32_bits(data)); adreno_gpu_write(gpu, hi, upper_32_bits(data));
} }
static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
{
return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
}
/* /*
* Given a register and a count, return a value to program into * Given a register and a count, return a value to program into
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include "dsi_cfg.h" #include "dsi_cfg.h"
static const char * const dsi_v2_bus_clk_names[] = { static const char * const dsi_v2_bus_clk_names[] = {
"core_mmss_clk", "iface_clk", "bus_clk", "core_mmss", "iface", "bus",
}; };
static const struct msm_dsi_config apq8064_dsi_cfg = { static const struct msm_dsi_config apq8064_dsi_cfg = {
...@@ -34,7 +34,7 @@ static const struct msm_dsi_config apq8064_dsi_cfg = { ...@@ -34,7 +34,7 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
}; };
static const char * const dsi_6g_bus_clk_names[] = { static const char * const dsi_6g_bus_clk_names[] = {
"mdp_core_clk", "iface_clk", "bus_clk", "core_mmss_clk", "mdp_core", "iface", "bus", "core_mmss",
}; };
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
...@@ -55,7 +55,7 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { ...@@ -55,7 +55,7 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
}; };
static const char * const dsi_8916_bus_clk_names[] = { static const char * const dsi_8916_bus_clk_names[] = {
"mdp_core_clk", "iface_clk", "bus_clk", "mdp_core", "iface", "bus",
}; };
static const struct msm_dsi_config msm8916_dsi_cfg = { static const struct msm_dsi_config msm8916_dsi_cfg = {
...@@ -99,7 +99,7 @@ static const struct msm_dsi_config msm8994_dsi_cfg = { ...@@ -99,7 +99,7 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
* without it too. Figure out why it doesn't enable and uncomment below * without it too. Figure out why it doesn't enable and uncomment below
*/ */
static const char * const dsi_8996_bus_clk_names[] = { static const char * const dsi_8996_bus_clk_names[] = {
"mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */ "mdp_core", "iface", "bus", /* "core_mmss", */
}; };
static const struct msm_dsi_config msm8996_dsi_cfg = { static const struct msm_dsi_config msm8996_dsi_cfg = {
......
...@@ -334,46 +334,46 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host) ...@@ -334,46 +334,46 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
static int dsi_clk_init(struct msm_dsi_host *msm_host) static int dsi_clk_init(struct msm_dsi_host *msm_host)
{ {
struct device *dev = &msm_host->pdev->dev; struct platform_device *pdev = msm_host->pdev;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
const struct msm_dsi_config *cfg = cfg_hnd->cfg; const struct msm_dsi_config *cfg = cfg_hnd->cfg;
int i, ret = 0; int i, ret = 0;
/* get bus clocks */ /* get bus clocks */
for (i = 0; i < cfg->num_bus_clks; i++) { for (i = 0; i < cfg->num_bus_clks; i++) {
msm_host->bus_clks[i] = devm_clk_get(dev, msm_host->bus_clks[i] = msm_clk_get(pdev,
cfg->bus_clk_names[i]); cfg->bus_clk_names[i]);
if (IS_ERR(msm_host->bus_clks[i])) { if (IS_ERR(msm_host->bus_clks[i])) {
ret = PTR_ERR(msm_host->bus_clks[i]); ret = PTR_ERR(msm_host->bus_clks[i]);
pr_err("%s: Unable to get %s, ret = %d\n", pr_err("%s: Unable to get %s clock, ret = %d\n",
__func__, cfg->bus_clk_names[i], ret); __func__, cfg->bus_clk_names[i], ret);
goto exit; goto exit;
} }
} }
/* get link and source clocks */ /* get link and source clocks */
msm_host->byte_clk = devm_clk_get(dev, "byte_clk"); msm_host->byte_clk = msm_clk_get(pdev, "byte");
if (IS_ERR(msm_host->byte_clk)) { if (IS_ERR(msm_host->byte_clk)) {
ret = PTR_ERR(msm_host->byte_clk); ret = PTR_ERR(msm_host->byte_clk);
pr_err("%s: can't find dsi_byte_clk. ret=%d\n", pr_err("%s: can't find dsi_byte clock. ret=%d\n",
__func__, ret); __func__, ret);
msm_host->byte_clk = NULL; msm_host->byte_clk = NULL;
goto exit; goto exit;
} }
msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk"); msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
if (IS_ERR(msm_host->pixel_clk)) { if (IS_ERR(msm_host->pixel_clk)) {
ret = PTR_ERR(msm_host->pixel_clk); ret = PTR_ERR(msm_host->pixel_clk);
pr_err("%s: can't find dsi_pixel_clk. ret=%d\n", pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
__func__, ret); __func__, ret);
msm_host->pixel_clk = NULL; msm_host->pixel_clk = NULL;
goto exit; goto exit;
} }
msm_host->esc_clk = devm_clk_get(dev, "core_clk"); msm_host->esc_clk = msm_clk_get(pdev, "core");
if (IS_ERR(msm_host->esc_clk)) { if (IS_ERR(msm_host->esc_clk)) {
ret = PTR_ERR(msm_host->esc_clk); ret = PTR_ERR(msm_host->esc_clk);
pr_err("%s: can't find dsi_esc_clk. ret=%d\n", pr_err("%s: can't find dsi_esc clock. ret=%d\n",
__func__, ret); __func__, ret);
msm_host->esc_clk = NULL; msm_host->esc_clk = NULL;
goto exit; goto exit;
...@@ -382,22 +382,22 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) ...@@ -382,22 +382,22 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk); msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) { if (!msm_host->byte_clk_src) {
ret = -ENODEV; ret = -ENODEV;
pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret); pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
goto exit; goto exit;
} }
msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk); msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
if (!msm_host->pixel_clk_src) { if (!msm_host->pixel_clk_src) {
ret = -ENODEV; ret = -ENODEV;
pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret); pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
goto exit; goto exit;
} }
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) { if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
msm_host->src_clk = devm_clk_get(dev, "src_clk"); msm_host->src_clk = msm_clk_get(pdev, "src");
if (IS_ERR(msm_host->src_clk)) { if (IS_ERR(msm_host->src_clk)) {
ret = PTR_ERR(msm_host->src_clk); ret = PTR_ERR(msm_host->src_clk);
pr_err("%s: can't find dsi_src_clk. ret=%d\n", pr_err("%s: can't find src clock. ret=%d\n",
__func__, ret); __func__, ret);
msm_host->src_clk = NULL; msm_host->src_clk = NULL;
goto exit; goto exit;
...@@ -406,7 +406,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) ...@@ -406,7 +406,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk); msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
if (!msm_host->esc_clk_src) { if (!msm_host->esc_clk_src) {
ret = -ENODEV; ret = -ENODEV;
pr_err("%s: can't get esc_clk_src. ret=%d\n", pr_err("%s: can't get esc clock parent. ret=%d\n",
__func__, ret); __func__, ret);
goto exit; goto exit;
} }
...@@ -414,7 +414,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) ...@@ -414,7 +414,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk); msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
if (!msm_host->dsi_clk_src) { if (!msm_host->dsi_clk_src) {
ret = -ENODEV; ret = -ENODEV;
pr_err("%s: can't get dsi_clk_src. ret=%d\n", pr_err("%s: can't get src clock parent. ret=%d\n",
__func__, ret); __func__, ret);
} }
} }
......
...@@ -482,7 +482,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) ...@@ -482,7 +482,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail; goto fail;
} }
phy->ahb_clk = devm_clk_get(dev, "iface_clk"); phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) { if (IS_ERR(phy->ahb_clk)) {
dev_err(dev, "%s: Unable to get ahb clk\n", __func__); dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk); ret = PTR_ERR(phy->ahb_clk);
......
...@@ -150,46 +150,46 @@ static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = { ...@@ -150,46 +150,46 @@ static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
static int edp_clk_init(struct edp_ctrl *ctrl) static int edp_clk_init(struct edp_ctrl *ctrl)
{ {
struct device *dev = &ctrl->pdev->dev; struct platform_device *pdev = ctrl->pdev;
int ret; int ret;
ctrl->aux_clk = devm_clk_get(dev, "core_clk"); ctrl->aux_clk = msm_clk_get(pdev, "core");
if (IS_ERR(ctrl->aux_clk)) { if (IS_ERR(ctrl->aux_clk)) {
ret = PTR_ERR(ctrl->aux_clk); ret = PTR_ERR(ctrl->aux_clk);
pr_err("%s: Can't find aux_clk, %d\n", __func__, ret); pr_err("%s: Can't find core clock, %d\n", __func__, ret);
ctrl->aux_clk = NULL; ctrl->aux_clk = NULL;
return ret; return ret;
} }
ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk"); ctrl->pixel_clk = msm_clk_get(pdev, "pixel");
if (IS_ERR(ctrl->pixel_clk)) { if (IS_ERR(ctrl->pixel_clk)) {
ret = PTR_ERR(ctrl->pixel_clk); ret = PTR_ERR(ctrl->pixel_clk);
pr_err("%s: Can't find pixel_clk, %d\n", __func__, ret); pr_err("%s: Can't find pixel clock, %d\n", __func__, ret);
ctrl->pixel_clk = NULL; ctrl->pixel_clk = NULL;
return ret; return ret;
} }
ctrl->ahb_clk = devm_clk_get(dev, "iface_clk"); ctrl->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(ctrl->ahb_clk)) { if (IS_ERR(ctrl->ahb_clk)) {
ret = PTR_ERR(ctrl->ahb_clk); ret = PTR_ERR(ctrl->ahb_clk);
pr_err("%s: Can't find ahb_clk, %d\n", __func__, ret); pr_err("%s: Can't find iface clock, %d\n", __func__, ret);
ctrl->ahb_clk = NULL; ctrl->ahb_clk = NULL;
return ret; return ret;
} }
ctrl->link_clk = devm_clk_get(dev, "link_clk"); ctrl->link_clk = msm_clk_get(pdev, "link");
if (IS_ERR(ctrl->link_clk)) { if (IS_ERR(ctrl->link_clk)) {
ret = PTR_ERR(ctrl->link_clk); ret = PTR_ERR(ctrl->link_clk);
pr_err("%s: Can't find link_clk, %d\n", __func__, ret); pr_err("%s: Can't find link clock, %d\n", __func__, ret);
ctrl->link_clk = NULL; ctrl->link_clk = NULL;
return ret; return ret;
} }
/* need mdp core clock to receive irq */ /* need mdp core clock to receive irq */
ctrl->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk"); ctrl->mdp_core_clk = msm_clk_get(pdev, "mdp_core");
if (IS_ERR(ctrl->mdp_core_clk)) { if (IS_ERR(ctrl->mdp_core_clk)) {
ret = PTR_ERR(ctrl->mdp_core_clk); ret = PTR_ERR(ctrl->mdp_core_clk);
pr_err("%s: Can't find mdp_core_clk, %d\n", __func__, ret); pr_err("%s: Can't find mdp_core clock, %d\n", __func__, ret);
ctrl->mdp_core_clk = NULL; ctrl->mdp_core_clk = NULL;
return ret; return ret;
} }
......
...@@ -208,7 +208,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) ...@@ -208,7 +208,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
for (i = 0; i < config->hpd_clk_cnt; i++) { for (i = 0; i < config->hpd_clk_cnt; i++) {
struct clk *clk; struct clk *clk;
clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]); clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) { if (IS_ERR(clk)) {
ret = PTR_ERR(clk); ret = PTR_ERR(clk);
dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n", dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
...@@ -228,7 +228,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) ...@@ -228,7 +228,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
for (i = 0; i < config->pwr_clk_cnt; i++) { for (i = 0; i < config->pwr_clk_cnt; i++) {
struct clk *clk; struct clk *clk;
clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]); clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) { if (IS_ERR(clk)) {
ret = PTR_ERR(clk); ret = PTR_ERR(clk);
dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n", dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
...@@ -361,7 +361,7 @@ static const char *hpd_reg_names_none[] = {}; ...@@ -361,7 +361,7 @@ static const char *hpd_reg_names_none[] = {};
static struct hdmi_platform_config hdmi_tx_8660_config; static struct hdmi_platform_config hdmi_tx_8660_config;
static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"}; static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"};
static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static struct hdmi_platform_config hdmi_tx_8960_config = { static struct hdmi_platform_config hdmi_tx_8960_config = {
HDMI_CFG(hpd_reg, 8960), HDMI_CFG(hpd_reg, 8960),
...@@ -370,8 +370,8 @@ static struct hdmi_platform_config hdmi_tx_8960_config = { ...@@ -370,8 +370,8 @@ static struct hdmi_platform_config hdmi_tx_8960_config = {
static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"}; static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
static const char *hpd_reg_names_8x74[] = {"hpd-gdsc", "hpd-5v"}; static const char *hpd_reg_names_8x74[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"}; static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"}; static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0}; static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
static struct hdmi_platform_config hdmi_tx_8974_config = { static struct hdmi_platform_config hdmi_tx_8974_config = {
......
...@@ -48,7 +48,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy) ...@@ -48,7 +48,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
for (i = 0; i < cfg->num_clks; i++) { for (i = 0; i < cfg->num_clks; i++) {
struct clk *clk; struct clk *clk;
clk = devm_clk_get(dev, cfg->clk_names[i]); clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
if (IS_ERR(clk)) { if (IS_ERR(clk)) {
ret = PTR_ERR(clk); ret = PTR_ERR(clk);
dev_err(dev, "failed to get phy clock: %s (%d)\n", dev_err(dev, "failed to get phy clock: %s (%d)\n",
......
...@@ -48,7 +48,7 @@ static const char * const hdmi_phy_8960_reg_names[] = { ...@@ -48,7 +48,7 @@ static const char * const hdmi_phy_8960_reg_names[] = {
}; };
static const char * const hdmi_phy_8960_clk_names[] = { static const char * const hdmi_phy_8960_clk_names[] = {
"slave_iface_clk", "slave_iface",
}; };
const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = { const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = {
......
...@@ -758,9 +758,7 @@ static const char * const hdmi_phy_8996_reg_names[] = { ...@@ -758,9 +758,7 @@ static const char * const hdmi_phy_8996_reg_names[] = {
}; };
static const char * const hdmi_phy_8996_clk_names[] = { static const char * const hdmi_phy_8996_clk_names[] = {
"mmagic_iface_clk", "iface", "ref",
"iface_clk",
"ref_clk",
}; };
const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = { const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = {
......
...@@ -41,8 +41,7 @@ static const char * const hdmi_phy_8x74_reg_names[] = { ...@@ -41,8 +41,7 @@ static const char * const hdmi_phy_8x74_reg_names[] = {
}; };
static const char * const hdmi_phy_8x74_clk_names[] = { static const char * const hdmi_phy_8x74_clk_names[] = {
"iface_clk", "iface", "alt_iface"
"alt_iface_clk"
}; };
const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = { const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = {
......
...@@ -290,6 +290,9 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, ...@@ -290,6 +290,9 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
if (WARN_ON(!mdp4_crtc->enabled)) if (WARN_ON(!mdp4_crtc->enabled))
return; return;
/* Disable/save vblank irq handling before power is disabled */
drm_crtc_vblank_off(crtc);
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms); mdp4_disable(mdp4_kms);
...@@ -308,6 +311,10 @@ static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc, ...@@ -308,6 +311,10 @@ static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
return; return;
mdp4_enable(mdp4_kms); mdp4_enable(mdp4_kms);
/* Restore vblank irq handling after power is enabled */
drm_crtc_vblank_on(crtc);
mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
crtc_flush(crtc); crtc_flush(crtc);
......
...@@ -224,7 +224,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, ...@@ -224,7 +224,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
MDP5_SPLIT_DPL_LOWER_SMART_PANEL); MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
return 0; return 0;
} }
...@@ -55,18 +55,23 @@ struct mdp5_crtc { ...@@ -55,18 +55,23 @@ struct mdp5_crtc {
struct completion pp_completion; struct completion pp_completion;
bool lm_cursor_enabled;
struct { struct {
/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
spinlock_t lock; spinlock_t lock;
/* current cursor being scanned out: */ /* current cursor being scanned out: */
struct drm_gem_object *scanout_bo; struct drm_gem_object *scanout_bo;
uint64_t iova;
uint32_t width, height; uint32_t width, height;
uint32_t x, y; uint32_t x, y;
} cursor; } cursor;
}; };
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
static struct mdp5_kms *get_kms(struct drm_crtc *crtc) static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{ {
struct msm_drm_private *priv = crtc->dev->dev_private; struct msm_drm_private *priv = crtc->dev->dev_private;
...@@ -114,6 +119,8 @@ static u32 crtc_flush_all(struct drm_crtc *crtc) ...@@ -114,6 +119,8 @@ static u32 crtc_flush_all(struct drm_crtc *crtc)
return 0; return 0;
drm_atomic_crtc_for_each_plane(plane, crtc) { drm_atomic_crtc_for_each_plane(plane, crtc) {
if (!plane->state->visible)
continue;
flush_mask |= mdp5_plane_get_flush(plane); flush_mask |= mdp5_plane_get_flush(plane);
} }
...@@ -242,6 +249,9 @@ static void blend_setup(struct drm_crtc *crtc) ...@@ -242,6 +249,9 @@ static void blend_setup(struct drm_crtc *crtc)
drm_atomic_crtc_for_each_plane(plane, crtc) { drm_atomic_crtc_for_each_plane(plane, crtc) {
enum mdp5_pipe right_pipe; enum mdp5_pipe right_pipe;
if (!plane->state->visible)
continue;
pstate = to_mdp5_plane_state(plane->state); pstate = to_mdp5_plane_state(plane->state);
pstates[pstate->stage] = pstate; pstates[pstate->stage] = pstate;
stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane); stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
...@@ -422,11 +432,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, ...@@ -422,11 +432,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
if (WARN_ON(!mdp5_crtc->enabled)) if (WARN_ON(!mdp5_crtc->enabled))
return; return;
/* Disable/save vblank irq handling before power is disabled */
drm_crtc_vblank_off(crtc);
if (mdp5_cstate->cmd_mode) if (mdp5_cstate->cmd_mode)
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
mdp5_crtc->enabled = false; mdp5_crtc->enabled = false;
} }
...@@ -446,6 +459,29 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, ...@@ -446,6 +459,29 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
if (mdp5_crtc->lm_cursor_enabled) {
/*
* Restore LM cursor state, as it might have been lost
* with suspend:
*/
if (mdp5_crtc->cursor.iova) {
unsigned long flags;
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
mdp5_ctl_set_cursor(mdp5_cstate->ctl,
&mdp5_cstate->pipeline, 0, true);
} else {
mdp5_ctl_set_cursor(mdp5_cstate->ctl,
&mdp5_cstate->pipeline, 0, false);
}
}
/* Restore vblank irq handling after power is enabled */
drm_crtc_vblank_on(crtc);
mdp5_crtc_mode_set_nofb(crtc); mdp5_crtc_mode_set_nofb(crtc);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
...@@ -580,6 +616,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -580,6 +616,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", crtc->name); DBG("%s: check", crtc->name);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (!pstate->visible)
continue;
pstates[cnt].plane = plane; pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate); pstates[cnt].state = to_mdp5_plane_state(pstate);
...@@ -723,6 +762,50 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) ...@@ -723,6 +762,50 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
mdp5_crtc->cursor.y); mdp5_crtc->cursor.y);
} }
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t blendcfg, stride;
uint32_t x, y, width, height;
uint32_t roi_w, roi_h;
int lm;
assert_spin_locked(&mdp5_crtc->cursor.lock);
lm = mdp5_cstate->pipeline.mixer->lm;
x = mdp5_crtc->cursor.x;
y = mdp5_crtc->cursor.y;
width = mdp5_crtc->cursor.width;
height = mdp5_crtc->cursor.height;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
get_roi(crtc, &roi_w, &roi_h);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
mdp5_crtc->cursor.iova);
blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
}
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file, uint32_t handle, struct drm_file *file, uint32_t handle,
uint32_t width, uint32_t height) uint32_t width, uint32_t height)
...@@ -735,16 +818,18 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -735,16 +818,18 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct platform_device *pdev = mdp5_kms->pdev; struct platform_device *pdev = mdp5_kms->pdev;
struct msm_kms *kms = &mdp5_kms->base.base; struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_gem_object *cursor_bo, *old_bo = NULL; struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, stride;
uint64_t cursor_addr;
struct mdp5_ctl *ctl; struct mdp5_ctl *ctl;
int ret, lm; int ret;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w, roi_h;
bool cursor_enable = true; bool cursor_enable = true;
unsigned long flags; unsigned long flags;
if (!mdp5_crtc->lm_cursor_enabled) {
dev_warn(dev->dev,
"cursor_set is deprecated with cursor planes\n");
return -EINVAL;
}
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL; return -EINVAL;
...@@ -761,6 +846,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -761,6 +846,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) { if (!handle) {
DBG("Cursor off"); DBG("Cursor off");
cursor_enable = false; cursor_enable = false;
mdp5_crtc->cursor.iova = 0;
pm_runtime_get_sync(&pdev->dev); pm_runtime_get_sync(&pdev->dev);
goto set_cursor; goto set_cursor;
} }
...@@ -769,13 +855,11 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -769,13 +855,11 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo) if (!cursor_bo)
return -ENOENT; return -ENOENT;
ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr); ret = msm_gem_get_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
if (ret) if (ret)
return -EINVAL; return -EINVAL;
lm = mdp5_cstate->pipeline.mixer->lm;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
pm_runtime_get_sync(&pdev->dev); pm_runtime_get_sync(&pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
...@@ -785,22 +869,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -785,22 +869,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
mdp5_crtc->cursor.width = width; mdp5_crtc->cursor.width = width;
mdp5_crtc->cursor.height = height; mdp5_crtc->cursor.height = height;
get_roi(crtc, &roi_w, &roi_h); mdp5_crtc_restore_cursor(crtc);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
...@@ -817,7 +886,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -817,7 +886,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
crtc_flush(crtc, flush_mask); crtc_flush(crtc, flush_mask);
end: end:
pm_runtime_put_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev);
if (old_bo) { if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */ /* enable vblank to complete cursor work: */
...@@ -831,12 +900,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) ...@@ -831,12 +900,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct mdp5_kms *mdp5_kms = get_kms(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
struct drm_device *dev = crtc->dev;
uint32_t roi_w; uint32_t roi_w;
uint32_t roi_h; uint32_t roi_h;
unsigned long flags; unsigned long flags;
if (!mdp5_crtc->lm_cursor_enabled) {
dev_warn(dev->dev,
"cursor_move is deprecated with cursor planes\n");
return -EINVAL;
}
/* don't support LM cursors when we we have source split enabled */ /* don't support LM cursors when we we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer) if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL; return -EINVAL;
...@@ -853,17 +928,12 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) ...@@ -853,17 +928,12 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
pm_runtime_get_sync(&mdp5_kms->pdev->dev); pm_runtime_get_sync(&mdp5_kms->pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), mdp5_crtc_restore_cursor(crtc);
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
crtc_flush(crtc, flush_mask); crtc_flush(crtc, flush_mask);
pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev); pm_runtime_put_sync(&mdp5_kms->pdev->dev);
return 0; return 0;
} }
...@@ -943,16 +1013,6 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { ...@@ -943,16 +1013,6 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.atomic_print_state = mdp5_crtc_atomic_print_state, .atomic_print_state = mdp5_crtc_atomic_print_state,
}; };
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
.atomic_destroy_state = mdp5_crtc_destroy_state,
.atomic_print_state = mdp5_crtc_atomic_print_state,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.mode_set_nofb = mdp5_crtc_mode_set_nofb, .mode_set_nofb = mdp5_crtc_mode_set_nofb,
.atomic_check = mdp5_crtc_atomic_check, .atomic_check = mdp5_crtc_atomic_check,
...@@ -1121,12 +1181,10 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, ...@@ -1121,12 +1181,10 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->err.irq = mdp5_crtc_err_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq;
mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq; mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
if (cursor_plane) mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
&mdp5_crtc_no_lm_cursor_funcs, NULL); drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
else &mdp5_crtc_funcs, NULL);
drm_crtc_init_with_planes(dev, crtc, plane, NULL,
&mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work, drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker); "unref cursor", unref_cursor_worker);
......
...@@ -384,7 +384,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, ...@@ -384,7 +384,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
return 0; return 0;
} }
......
...@@ -54,7 +54,7 @@ void mdp5_irq_preinstall(struct msm_kms *kms) ...@@ -54,7 +54,7 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
} }
int mdp5_irq_postinstall(struct msm_kms *kms) int mdp5_irq_postinstall(struct msm_kms *kms)
...@@ -72,7 +72,7 @@ int mdp5_irq_postinstall(struct msm_kms *kms) ...@@ -72,7 +72,7 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
mdp_irq_register(mdp_kms, error_handler); mdp_irq_register(mdp_kms, error_handler);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
return 0; return 0;
} }
...@@ -84,7 +84,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms) ...@@ -84,7 +84,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
} }
irqreturn_t mdp5_irq(struct msm_kms *kms) irqreturn_t mdp5_irq(struct msm_kms *kms)
...@@ -119,7 +119,7 @@ int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) ...@@ -119,7 +119,7 @@ int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms), mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true); mdp5_crtc_vblank(crtc), true);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
return 0; return 0;
} }
...@@ -132,5 +132,5 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) ...@@ -132,5 +132,5 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms), mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false); mdp5_crtc_vblank(crtc), false);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
} }
...@@ -125,7 +125,7 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s ...@@ -125,7 +125,7 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
if (mdp5_kms->smp) if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
} }
static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms, static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
...@@ -496,12 +496,12 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, ...@@ -496,12 +496,12 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
*major = FIELD(version, MDP5_HW_VERSION_MAJOR); *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR); *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor); dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
} }
static int get_clk(struct platform_device *pdev, struct clk **clkp, static int get_clk(struct platform_device *pdev, struct clk **clkp,
...@@ -683,7 +683,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) ...@@ -683,7 +683,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;; aspace = NULL;;
} }
pm_runtime_put_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev);
ret = modeset_init(mdp5_kms); ret = modeset_init(mdp5_kms);
if (ret) { if (ret) {
......
...@@ -17,19 +17,20 @@ ...@@ -17,19 +17,20 @@
#include "mdp5_kms.h" #include "mdp5_kms.h"
struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s, int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
struct drm_plane *plane, uint32_t caps, uint32_t blkcfg) uint32_t caps, uint32_t blkcfg,
struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe)
{ {
struct msm_drm_private *priv = s->dev->dev_private; struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_state *state; struct mdp5_state *state;
struct mdp5_hw_pipe_state *old_state, *new_state; struct mdp5_hw_pipe_state *old_state, *new_state;
struct mdp5_hw_pipe *hwpipe = NULL; int i, j;
int i;
state = mdp5_get_state(s); state = mdp5_get_state(s);
if (IS_ERR(state)) if (IS_ERR(state))
return ERR_CAST(state); return PTR_ERR(state);
/* grab old_state after mdp5_get_state(), since now we hold lock: */ /* grab old_state after mdp5_get_state(), since now we hold lock: */
old_state = &mdp5_kms->state->hwpipe; old_state = &mdp5_kms->state->hwpipe;
...@@ -64,31 +65,67 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s, ...@@ -64,31 +65,67 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
/* possible candidate, take the one with the /* possible candidate, take the one with the
* fewest unneeded caps bits set: * fewest unneeded caps bits set:
*/ */
if (!hwpipe || (hweight_long(cur->caps & ~caps) < if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) <
hweight_long(hwpipe->caps & ~caps))) hweight_long((*hwpipe)->caps & ~caps))) {
hwpipe = cur; bool r_found = false;
if (r_hwpipe) {
for (j = i + 1; j < mdp5_kms->num_hwpipes;
j++) {
struct mdp5_hw_pipe *r_cur =
mdp5_kms->hwpipes[j];
/* reject different types of hwpipes */
if (r_cur->caps != cur->caps)
continue;
/* respect priority, eg. VIG0 > VIG1 */
if (cur->pipe > r_cur->pipe)
continue;
*r_hwpipe = r_cur;
r_found = true;
break;
}
}
if (!r_hwpipe || r_found)
*hwpipe = cur;
}
} }
if (!hwpipe) if (!(*hwpipe))
return ERR_PTR(-ENOMEM); return -ENOMEM;
if (r_hwpipe && !(*r_hwpipe))
return -ENOMEM;
if (mdp5_kms->smp) { if (mdp5_kms->smp) {
int ret; int ret;
DBG("%s: alloc SMP blocks", hwpipe->name); /* We don't support SMP and 2 hwpipes/plane together */
WARN_ON(r_hwpipe);
DBG("%s: alloc SMP blocks", (*hwpipe)->name);
ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp, ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp,
hwpipe->pipe, blkcfg); (*hwpipe)->pipe, blkcfg);
if (ret) if (ret)
return ERR_PTR(-ENOMEM); return -ENOMEM;
hwpipe->blkcfg = blkcfg; (*hwpipe)->blkcfg = blkcfg;
} }
DBG("%s: assign to plane %s for caps %x", DBG("%s: assign to plane %s for caps %x",
hwpipe->name, plane->name, caps); (*hwpipe)->name, plane->name, caps);
new_state->hwpipe_to_plane[hwpipe->idx] = plane; new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane;
return hwpipe; if (r_hwpipe) {
DBG("%s: assign to right of plane %s for caps %x",
(*r_hwpipe)->name, plane->name, caps);
new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane;
}
return 0;
} }
void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
......
...@@ -44,9 +44,10 @@ struct mdp5_hw_pipe_state { ...@@ -44,9 +44,10 @@ struct mdp5_hw_pipe_state {
struct drm_plane *hwpipe_to_plane[SSPP_MAX]; struct drm_plane *hwpipe_to_plane[SSPP_MAX];
}; };
struct mdp5_hw_pipe *__must_check int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, uint32_t caps, uint32_t blkcfg,
uint32_t caps, uint32_t blkcfg); struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe);
void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
......
...@@ -31,15 +31,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, ...@@ -31,15 +31,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_rect *src, struct drm_rect *dest); struct drm_rect *src, struct drm_rect *dest);
static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx);
static struct mdp5_kms *get_kms(struct drm_plane *plane) static struct mdp5_kms *get_kms(struct drm_plane *plane)
{ {
struct msm_drm_private *priv = plane->dev->dev_private; struct msm_drm_private *priv = plane->dev->dev_private;
...@@ -254,18 +245,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = { ...@@ -254,18 +245,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.atomic_print_state = mdp5_plane_atomic_print_state, .atomic_print_state = mdp5_plane_atomic_print_state,
}; };
static const struct drm_plane_funcs mdp5_cursor_plane_funcs = {
.update_plane = mdp5_update_cursor_plane_legacy,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
.atomic_set_property = mdp5_plane_atomic_set_property,
.atomic_get_property = mdp5_plane_atomic_get_property,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
.atomic_print_state = mdp5_plane_atomic_print_state,
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane, static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state) struct drm_plane_state *new_state)
{ {
...@@ -414,31 +393,30 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, ...@@ -414,31 +393,30 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe; struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
struct mdp5_hw_pipe *old_right_hwpipe = struct mdp5_hw_pipe *old_right_hwpipe =
mdp5_state->r_hwpipe; mdp5_state->r_hwpipe;
struct mdp5_hw_pipe *new_hwpipe = NULL;
mdp5_state->hwpipe = mdp5_pipe_assign(state->state, struct mdp5_hw_pipe *new_right_hwpipe = NULL;
plane, caps, blkcfg);
if (IS_ERR(mdp5_state->hwpipe)) { ret = mdp5_pipe_assign(state->state, plane, caps,
DBG("%s: failed to assign hwpipe!", plane->name); blkcfg, &new_hwpipe,
return PTR_ERR(mdp5_state->hwpipe); need_right_hwpipe ?
&new_right_hwpipe : NULL);
if (ret) {
DBG("%s: failed to assign hwpipe(s)!",
plane->name);
return ret;
} }
if (need_right_hwpipe) { mdp5_state->hwpipe = new_hwpipe;
mdp5_state->r_hwpipe = if (need_right_hwpipe)
mdp5_pipe_assign(state->state, plane, mdp5_state->r_hwpipe = new_right_hwpipe;
caps, blkcfg); else
if (IS_ERR(mdp5_state->r_hwpipe)) {
DBG("%s: failed to assign right hwpipe",
plane->name);
return PTR_ERR(mdp5_state->r_hwpipe);
}
} else {
/* /*
* set it to NULL so that the driver knows we * set it to NULL so that the driver knows we
* don't have a right hwpipe when committing a * don't have a right hwpipe when committing a
* new state * new state
*/ */
mdp5_state->r_hwpipe = NULL; mdp5_state->r_hwpipe = NULL;
}
mdp5_pipe_release(state->state, old_hwpipe); mdp5_pipe_release(state->state, old_hwpipe);
mdp5_pipe_release(state->state, old_right_hwpipe); mdp5_pipe_release(state->state, old_right_hwpipe);
...@@ -487,11 +465,98 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, ...@@ -487,11 +465,98 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
} }
} }
static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
struct drm_crtc_state *crtc_state;
struct drm_rect clip;
int min_scale, max_scale;
int ret;
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
if (!crtc_state->active)
return -EINVAL;
mdp5_state = to_mdp5_plane_state(state);
/* don't use fast path if we don't have a hwpipe allocated yet */
if (!mdp5_state->hwpipe)
return -EINVAL;
/* only allow changing of position(crtc x/y or src x/y) in fast path */
if (plane->state->crtc != state->crtc ||
plane->state->src_w != state->src_w ||
plane->state->src_h != state->src_h ||
plane->state->crtc_w != state->crtc_w ||
plane->state->crtc_h != state->crtc_h ||
!plane->state->fb ||
plane->state->fb != state->fb)
return -EINVAL;
clip.x1 = 0;
clip.y1 = 0;
clip.x2 = crtc_state->adjusted_mode.hdisplay;
clip.y2 = crtc_state->adjusted_mode.vdisplay;
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
ret = drm_plane_helper_check_state(state, &clip, min_scale,
max_scale, true, true);
if (ret)
return ret;
/*
* if the visibility of the plane changes (i.e, if the cursor is
* clipped out completely, we can't take the async path because
* we need to stage/unstage the plane from the Layer Mixer(s). We
* also assign/unassign the hwpipe(s) tied to the plane. We avoid
* taking the fast path for both these reasons.
*/
if (state->visible != plane->state->visible)
return -EINVAL;
return 0;
}
static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->crtc_x = new_state->crtc_x;
plane->state->crtc_y = new_state->crtc_y;
if (plane_enabled(new_state)) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline =
mdp5_crtc_get_pipeline(plane->crtc);
int ret;
ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
&new_state->src, &new_state->dst);
WARN_ON(ret < 0);
ctl = mdp5_crtc_get_ctl(new_state->crtc);
mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
}
*to_mdp5_plane_state(plane->state) =
*to_mdp5_plane_state(new_state);
}
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
.prepare_fb = mdp5_plane_prepare_fb, .prepare_fb = mdp5_plane_prepare_fb,
.cleanup_fb = mdp5_plane_cleanup_fb, .cleanup_fb = mdp5_plane_cleanup_fb,
.atomic_check = mdp5_plane_atomic_check, .atomic_check = mdp5_plane_atomic_check,
.atomic_update = mdp5_plane_atomic_update, .atomic_update = mdp5_plane_atomic_update,
.atomic_async_check = mdp5_plane_atomic_async_check,
.atomic_async_update = mdp5_plane_atomic_async_update,
}; };
static void set_scanout_locked(struct mdp5_kms *mdp5_kms, static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
...@@ -996,84 +1061,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, ...@@ -996,84 +1061,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
return ret; return ret;
} }
static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane_state *plane_state, *new_plane_state;
struct mdp5_plane_state *mdp5_pstate;
struct drm_crtc_state *crtc_state = crtc->state;
int ret;
if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state))
goto slow;
plane_state = plane->state;
mdp5_pstate = to_mdp5_plane_state(plane_state);
/* don't use fast path if we don't have a hwpipe allocated yet */
if (!mdp5_pstate->hwpipe)
goto slow;
/* only allow changing of position(crtc x/y or src x/y) in fast path */
if (plane_state->crtc != crtc ||
plane_state->src_w != src_w ||
plane_state->src_h != src_h ||
plane_state->crtc_w != crtc_w ||
plane_state->crtc_h != crtc_h ||
!plane_state->fb ||
plane_state->fb != fb)
goto slow;
new_plane_state = mdp5_plane_duplicate_state(plane);
if (!new_plane_state)
return -ENOMEM;
new_plane_state->src_x = src_x;
new_plane_state->src_y = src_y;
new_plane_state->src_w = src_w;
new_plane_state->src_h = src_h;
new_plane_state->crtc_x = crtc_x;
new_plane_state->crtc_y = crtc_y;
new_plane_state->crtc_w = crtc_w;
new_plane_state->crtc_h = crtc_h;
ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
if (ret)
goto slow_free;
if (new_plane_state->visible) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc);
ret = mdp5_plane_mode_set(plane, crtc, fb,
&new_plane_state->src,
&new_plane_state->dst);
WARN_ON(ret < 0);
ctl = mdp5_crtc_get_ctl(crtc);
mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
}
*to_mdp5_plane_state(plane_state) =
*to_mdp5_plane_state(new_plane_state);
mdp5_plane_destroy_state(plane, new_plane_state);
return 0;
slow_free:
mdp5_plane_destroy_state(plane, new_plane_state);
slow:
return drm_atomic_helper_update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
}
/* /*
* Use this func and the one below only after the atomic state has been * Use this func and the one below only after the atomic state has been
* successfully swapped * successfully swapped
...@@ -1133,16 +1120,9 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, ...@@ -1133,16 +1120,9 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats), false); ARRAY_SIZE(mdp5_plane->formats), false);
if (type == DRM_PLANE_TYPE_CURSOR) ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
ret = drm_universal_plane_init(dev, plane, 0xff, mdp5_plane->formats, mdp5_plane->nformats,
&mdp5_cursor_plane_funcs, NULL, type, NULL);
mdp5_plane->formats, mdp5_plane->nformats,
NULL, type, NULL);
else
ret = drm_universal_plane_init(dev, plane, 0xff,
&mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
NULL, type, NULL);
if (ret) if (ret)
goto fail; goto fail;
......
...@@ -146,35 +146,6 @@ static void commit_worker(struct work_struct *work) ...@@ -146,35 +146,6 @@ static void commit_worker(struct work_struct *work)
complete_commit(container_of(work, struct msm_commit, work), true); complete_commit(container_of(work, struct msm_commit, work), true);
} }
/*
* this func is identical to the drm_atomic_helper_check, but we keep this
* because we might eventually need to have a more finegrained check
* sequence without using the atomic helpers.
*
* In the past, we first called drm_atomic_helper_check_planes, and then
* drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's
* ->atomic_check could update ->mode_changed for pixel format changes.
* This, however isn't needed now because if there is a pixel format change,
* we just assign a new hwpipe for it with a new SMP allocation. We might
* eventually hit a condition where we would need to do a full modeset if
* we run out of planes. There, we'd probably need to set mode_changed.
*/
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
return ret;
}
/** /**
* drm_atomic_helper_commit - commit validated state object * drm_atomic_helper_commit - commit validated state object
* @dev: DRM device * @dev: DRM device
...@@ -202,6 +173,18 @@ int msm_atomic_commit(struct drm_device *dev, ...@@ -202,6 +173,18 @@ int msm_atomic_commit(struct drm_device *dev,
if (ret) if (ret)
return ret; return ret;
/*
* Note that plane->atomic_async_check() should fail if we need
* to re-assign hwpipe or anything that touches global atomic
* state, so we'll never go down the async update path in those
* cases.
*/
if (state->async_update) {
drm_atomic_helper_async_commit(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
return 0;
}
c = commit_init(state); c = commit_init(state);
if (!c) { if (!c) {
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -29,9 +29,12 @@ ...@@ -29,9 +29,12 @@
* - 1.0.0 - initial interface * - 1.0.0 - initial interface
* - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
* - 1.2.0 - adds explicit fence support for submit ioctl * - 1.2.0 - adds explicit fence support for submit ioctl
* - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
* SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
* MSM_GEM_INFO ioctl.
*/ */
#define MSM_VERSION_MAJOR 1 #define MSM_VERSION_MAJOR 1
#define MSM_VERSION_MINOR 2 #define MSM_VERSION_MINOR 3
#define MSM_VERSION_PATCHLEVEL 0 #define MSM_VERSION_PATCHLEVEL 0
static void msm_fb_output_poll_changed(struct drm_device *dev) static void msm_fb_output_poll_changed(struct drm_device *dev)
...@@ -44,7 +47,7 @@ static void msm_fb_output_poll_changed(struct drm_device *dev) ...@@ -44,7 +47,7 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = { static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create, .fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed, .output_poll_changed = msm_fb_output_poll_changed,
.atomic_check = msm_atomic_check, .atomic_check = drm_atomic_helper_check,
.atomic_commit = msm_atomic_commit, .atomic_commit = msm_atomic_commit,
.atomic_state_alloc = msm_atomic_state_alloc, .atomic_state_alloc = msm_atomic_state_alloc,
.atomic_state_clear = msm_atomic_state_clear, .atomic_state_clear = msm_atomic_state_clear,
...@@ -211,7 +214,6 @@ static int msm_drm_uninit(struct device *dev) ...@@ -211,7 +214,6 @@ static int msm_drm_uninit(struct device *dev)
struct drm_device *ddev = platform_get_drvdata(pdev); struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = ddev->dev_private; struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms; struct msm_kms *kms = priv->kms;
struct msm_gpu *gpu = priv->gpu;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp; struct vblank_event *vbl_ev, *tmp;
...@@ -253,15 +255,6 @@ static int msm_drm_uninit(struct device *dev) ...@@ -253,15 +255,6 @@ static int msm_drm_uninit(struct device *dev)
if (kms && kms->funcs) if (kms && kms->funcs)
kms->funcs->destroy(kms); kms->funcs->destroy(kms);
if (gpu) {
mutex_lock(&ddev->struct_mutex);
// XXX what do we do here?
//pm_runtime_enable(&pdev->dev);
gpu->funcs->pm_suspend(gpu);
mutex_unlock(&ddev->struct_mutex);
gpu->funcs->destroy(gpu);
}
if (priv->vram.paddr) { if (priv->vram.paddr) {
unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
drm_mm_takedown(&priv->vram.mm); drm_mm_takedown(&priv->vram.mm);
...@@ -514,24 +507,37 @@ static void load_gpu(struct drm_device *dev) ...@@ -514,24 +507,37 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock); mutex_unlock(&init_lock);
} }
static int msm_open(struct drm_device *dev, struct drm_file *file) static int context_init(struct drm_device *dev, struct drm_file *file)
{ {
struct msm_file_private *ctx; struct msm_file_private *ctx;
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
msm_submitqueue_init(dev, ctx);
file->driver_priv = ctx; file->driver_priv = ctx;
return 0; return 0;
} }
static int msm_open(struct drm_device *dev, struct drm_file *file)
{
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
return context_init(dev, file);
}
static void context_close(struct msm_file_private *ctx)
{
msm_submitqueue_close(ctx);
kfree(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file) static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
...@@ -542,7 +548,7 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file) ...@@ -542,7 +548,7 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
priv->lastctx = NULL; priv->lastctx = NULL;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
kfree(ctx); context_close(ctx);
} }
static void msm_lastclose(struct drm_device *dev) static void msm_lastclose(struct drm_device *dev)
...@@ -737,16 +743,27 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, ...@@ -737,16 +743,27 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data; struct drm_msm_wait_fence *args = data;
ktime_t timeout = to_ktime(args->timeout); ktime_t timeout = to_ktime(args->timeout);
struct msm_gpu_submitqueue *queue;
struct msm_gpu *gpu = priv->gpu;
int ret;
if (args->pad) { if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad); DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL; return -EINVAL;
} }
if (!priv->gpu) if (!gpu)
return 0; return 0;
return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true); queue = msm_submitqueue_get(file->driver_priv, args->queueid);
if (!queue)
return -ENOENT;
ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
true);
msm_submitqueue_put(queue);
return ret;
} }
static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
...@@ -787,6 +804,28 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, ...@@ -787,6 +804,28 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
return ret; return ret;
} }
static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_submitqueue *args = data;
if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
return -EINVAL;
return msm_submitqueue_create(dev, file->driver_priv, args->prio,
args->flags, &args->id);
}
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
{
u32 id = *(u32 *) data;
return msm_submitqueue_remove(file->driver_priv, id);
}
static const struct drm_ioctl_desc msm_ioctls[] = { static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
...@@ -796,6 +835,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = { ...@@ -796,6 +835,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
}; };
static const struct vm_operations_struct vm_ops = { static const struct vm_operations_struct vm_ops = {
......
...@@ -56,11 +56,9 @@ struct msm_gem_address_space; ...@@ -56,11 +56,9 @@ struct msm_gem_address_space;
struct msm_gem_vma; struct msm_gem_vma;
struct msm_file_private { struct msm_file_private {
/* currently we don't do anything useful with this.. but when rwlock_t queuelock;
* per-context address spaces are supported we'd keep track of struct list_head submitqueues;
* the context's page-tables here. int queueid;
*/
int dummy;
}; };
enum msm_mdp_plane_property { enum msm_mdp_plane_property {
...@@ -76,6 +74,8 @@ struct msm_vblank_ctrl { ...@@ -76,6 +74,8 @@ struct msm_vblank_ctrl {
spinlock_t lock; spinlock_t lock;
}; };
#define MSM_GPU_MAX_RINGS 4
struct msm_drm_private { struct msm_drm_private {
struct drm_device *dev; struct drm_device *dev;
...@@ -108,7 +108,8 @@ struct msm_drm_private { ...@@ -108,7 +108,8 @@ struct msm_drm_private {
struct drm_fb_helper *fbdev; struct drm_fb_helper *fbdev;
struct msm_rd_state *rd; struct msm_rd_state *rd; /* debugfs to dump all submits */
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
struct msm_perf_state *perf; struct msm_perf_state *perf;
/* list of GEM objects: */ /* list of GEM objects: */
...@@ -154,20 +155,12 @@ struct msm_drm_private { ...@@ -154,20 +155,12 @@ struct msm_drm_private {
struct shrinker shrinker; struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl; struct msm_vblank_ctrl vblank_ctrl;
/* task holding struct_mutex.. currently only used in submit path
* to detect and reject faults from copy_from_user() for submit
* ioctl.
*/
struct task_struct *struct_mutex_task;
}; };
struct msm_format { struct msm_format {
uint32_t pixel_format; uint32_t pixel_format;
}; };
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev, int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock); struct drm_atomic_state *state, bool nonblock);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
...@@ -219,6 +212,7 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -219,6 +212,7 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj); int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_get_vaddr(struct drm_gem_object *obj); void *msm_gem_get_vaddr(struct drm_gem_object *obj);
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj); void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
int msm_gem_sync_object(struct drm_gem_object *obj, int msm_gem_sync_object(struct drm_gem_object *obj,
...@@ -303,7 +297,8 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); ...@@ -303,7 +297,8 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
int msm_debugfs_late_init(struct drm_device *dev); int msm_debugfs_late_init(struct drm_device *dev);
int msm_rd_debugfs_init(struct drm_minor *minor); int msm_rd_debugfs_init(struct drm_minor *minor);
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
void msm_rd_dump_submit(struct msm_gem_submit *submit); void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...);
int msm_perf_debugfs_init(struct drm_minor *minor); int msm_perf_debugfs_init(struct drm_minor *minor);
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else #else
...@@ -319,6 +314,18 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, ...@@ -319,6 +314,18 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
void msm_writel(u32 data, void __iomem *addr); void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr); u32 msm_readl(const void __iomem *addr);
struct msm_gpu_submitqueue;
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
void msm_submitqueue_destroy(struct kref *kref);
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
......
...@@ -31,7 +31,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name) ...@@ -31,7 +31,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
fctx->dev = dev; fctx->dev = dev;
fctx->name = name; strncpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1); fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event); init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock); spin_lock_init(&fctx->spinlock);
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
struct msm_fence_context { struct msm_fence_context {
struct drm_device *dev; struct drm_device *dev;
const char *name; char name[32];
unsigned context; unsigned context;
/* last_fence == completed_fence --> no pending work */ /* last_fence == completed_fence --> no pending work */
uint32_t last_fence; /* last assigned fence */ uint32_t last_fence; /* last assigned fence */
......
...@@ -470,14 +470,16 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, ...@@ -470,14 +470,16 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return ret; return ret;
} }
void *msm_gem_get_vaddr(struct drm_gem_object *obj) static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0; int ret = 0;
mutex_lock(&msm_obj->lock); mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { if (WARN_ON(msm_obj->madv > madv)) {
dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
mutex_unlock(&msm_obj->lock); mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
} }
...@@ -513,6 +515,22 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj) ...@@ -513,6 +515,22 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
return get_vaddr(obj, MSM_MADV_WILLNEED);
}
/*
* Don't use this! It is for the very special case of dumping
* submits from GPU hangs or faults, were the bo may already
* be MSM_MADV_DONTNEED, but we know the buffer is still on the
* active list.
*/
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
return get_vaddr(obj, __MSM_MADV_PURGED);
}
void msm_gem_put_vaddr(struct drm_gem_object *obj) void msm_gem_put_vaddr(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
...@@ -610,17 +628,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj, ...@@ -610,17 +628,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence; struct dma_fence *fence;
int i, ret; int i, ret;
if (!exclusive) {
/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
* which makes this a slightly strange place to call it. OTOH this
* is a convenient can-fail point to hook it in. (And similar to
* how etnaviv and nouveau handle this.)
*/
ret = reservation_object_reserve_shared(msm_obj->resv);
if (ret)
return ret;
}
fobj = reservation_object_get_list(msm_obj->resv); fobj = reservation_object_get_list(msm_obj->resv);
if (!fobj || (fobj->shared_count == 0)) { if (!fobj || (fobj->shared_count == 0)) {
fence = reservation_object_get_excl(msm_obj->resv); fence = reservation_object_get_excl(msm_obj->resv);
......
...@@ -138,12 +138,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass); ...@@ -138,12 +138,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
struct msm_gem_submit { struct msm_gem_submit {
struct drm_device *dev; struct drm_device *dev;
struct msm_gpu *gpu; struct msm_gpu *gpu;
struct list_head node; /* node in gpu submit_list */ struct list_head node; /* node in ring submit list */
struct list_head bo_list; struct list_head bo_list;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
uint32_t seqno; /* Sequence number of the submit on the ring */
struct dma_fence *fence; struct dma_fence *fence;
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */ struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */ bool valid; /* true if no cmdstream patching needed */
struct msm_ringbuffer *ring;
unsigned int nr_cmds; unsigned int nr_cmds;
unsigned int nr_bos; unsigned int nr_bos;
struct { struct {
......
...@@ -31,7 +31,8 @@ ...@@ -31,7 +31,8 @@
#define BO_PINNED 0x2000 #define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev, static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
uint32_t nr_bos, uint32_t nr_cmds)
{ {
struct msm_gem_submit *submit; struct msm_gem_submit *submit;
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) + uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
...@@ -49,6 +50,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, ...@@ -49,6 +50,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->fence = NULL; submit->fence = NULL;
submit->pid = get_pid(task_pid(current)); submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos]; submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
submit->ring = gpu->rb[queue->prio];
/* initially, until copy_from_user() and bo lookup succeeds: */ /* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0; submit->nr_bos = 0;
...@@ -66,6 +69,8 @@ void msm_gem_submit_free(struct msm_gem_submit *submit) ...@@ -66,6 +69,8 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
dma_fence_put(submit->fence); dma_fence_put(submit->fence);
list_del(&submit->node); list_del(&submit->node);
put_pid(submit->pid); put_pid(submit->pid);
msm_submitqueue_put(submit->queue);
kfree(submit); kfree(submit);
} }
...@@ -156,7 +161,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, ...@@ -156,7 +161,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
return ret; return ret;
} }
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
int i, bool backoff)
{ {
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
...@@ -166,7 +172,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) ...@@ -166,7 +172,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
if (submit->bos[i].flags & BO_LOCKED) if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock); ww_mutex_unlock(&msm_obj->resv->lock);
if (!(submit->bos[i].flags & BO_VALID)) if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0; submit->bos[i].iova = 0;
submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED); submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
...@@ -201,10 +207,10 @@ static int submit_lock_objects(struct msm_gem_submit *submit) ...@@ -201,10 +207,10 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
fail: fail:
for (; i >= 0; i--) for (; i >= 0; i--)
submit_unlock_unpin_bo(submit, i); submit_unlock_unpin_bo(submit, i, true);
if (slow_locked > 0) if (slow_locked > 0)
submit_unlock_unpin_bo(submit, slow_locked); submit_unlock_unpin_bo(submit, slow_locked, true);
if (ret == -EDEADLK) { if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj; struct msm_gem_object *msm_obj = submit->bos[contended].obj;
...@@ -221,7 +227,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit) ...@@ -221,7 +227,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
return ret; return ret;
} }
static int submit_fence_sync(struct msm_gem_submit *submit) static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
{ {
int i, ret = 0; int i, ret = 0;
...@@ -229,7 +235,22 @@ static int submit_fence_sync(struct msm_gem_submit *submit) ...@@ -229,7 +235,22 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); if (!write) {
/* NOTE: _reserve_shared() must happen before
* _add_shared_fence(), which makes this a slightly
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
ret = reservation_object_reserve_shared(msm_obj->resv);
if (ret)
return ret;
}
if (no_implicit)
continue;
ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
write);
if (ret) if (ret)
break; break;
} }
...@@ -373,7 +394,7 @@ static void submit_cleanup(struct msm_gem_submit *submit) ...@@ -373,7 +394,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) { for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i); submit_unlock_unpin_bo(submit, i, false);
list_del_init(&msm_obj->submit_entry); list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base); drm_gem_object_unreference(&msm_obj->base);
} }
...@@ -391,6 +412,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -391,6 +412,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_gpu *gpu = priv->gpu; struct msm_gpu *gpu = priv->gpu;
struct dma_fence *in_fence = NULL; struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL; struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
int out_fence_fd = -1; int out_fence_fd = -1;
unsigned i; unsigned i;
int ret; int ret;
...@@ -407,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -407,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL; return -EINVAL;
queue = msm_submitqueue_get(ctx, args->queueid);
if (!queue)
return -ENOENT;
ring = gpu->rb[queue->prio];
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
in_fence = sync_file_get_fence(args->fence_fd); in_fence = sync_file_get_fence(args->fence_fd);
...@@ -417,7 +446,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -417,7 +446,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* Wait if the fence is from a foreign context, or if the fence * Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context. * array contains any fence from a foreign context.
*/ */
if (!dma_fence_match_context(in_fence, gpu->fctx->context)) { if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
ret = dma_fence_wait(in_fence, true); ret = dma_fence_wait(in_fence, true);
if (ret) if (ret)
return ret; return ret;
...@@ -435,9 +464,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -435,9 +464,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
} }
priv->struct_mutex_task = current;
submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds); submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
if (!submit) { if (!submit) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;
...@@ -451,11 +479,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -451,11 +479,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
ret = submit_fence_sync(submit); if (ret)
if (ret) goto out;
goto out;
}
ret = submit_pin_objects(submit); ret = submit_pin_objects(submit);
if (ret) if (ret)
...@@ -522,7 +548,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -522,7 +548,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i; submit->nr_cmds = i;
submit->fence = msm_fence_alloc(gpu->fctx); submit->fence = msm_fence_alloc(ring->fctx);
if (IS_ERR(submit->fence)) { if (IS_ERR(submit->fence)) {
ret = PTR_ERR(submit->fence); ret = PTR_ERR(submit->fence);
submit->fence = NULL; submit->fence = NULL;
...@@ -555,7 +581,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -555,7 +581,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out_unlock: out_unlock:
if (ret && (out_fence_fd >= 0)) if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd); put_unused_fd(out_fence_fd);
priv->struct_mutex_task = NULL;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
This diff is collapsed.
...@@ -33,7 +33,7 @@ struct msm_gpu_config { ...@@ -33,7 +33,7 @@ struct msm_gpu_config {
const char *irqname; const char *irqname;
uint64_t va_start; uint64_t va_start;
uint64_t va_end; uint64_t va_end;
unsigned int ringsz; unsigned int nr_rings;
}; };
/* So far, with hardware that I've seen to date, we can have: /* So far, with hardware that I've seen to date, we can have:
...@@ -57,9 +57,9 @@ struct msm_gpu_funcs { ...@@ -57,9 +57,9 @@ struct msm_gpu_funcs {
int (*pm_resume)(struct msm_gpu *gpu); int (*pm_resume)(struct msm_gpu *gpu);
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx); struct msm_file_private *ctx);
void (*flush)(struct msm_gpu *gpu); void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq); irqreturn_t (*irq)(struct msm_gpu *irq);
uint32_t (*last_fence)(struct msm_gpu *gpu); struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu); void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu); void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
...@@ -86,16 +86,12 @@ struct msm_gpu { ...@@ -86,16 +86,12 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs; const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs; uint32_t num_perfcntrs;
/* ringbuffer: */ struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
struct msm_ringbuffer *rb; int nr_rings;
uint64_t rb_iova;
/* list of GEM active objects: */ /* list of GEM active objects: */
struct list_head active_list; struct list_head active_list;
/* fencing: */
struct msm_fence_context *fctx;
/* does gpu need hw_init? */ /* does gpu need hw_init? */
bool needs_hw_init; bool needs_hw_init;
...@@ -126,15 +122,31 @@ struct msm_gpu { ...@@ -126,15 +122,31 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer; struct timer_list hangcheck_timer;
uint32_t hangcheck_fence;
struct work_struct recover_work; struct work_struct recover_work;
struct list_head submit_list; struct drm_gem_object *memptrs_bo;
}; };
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
#define MSM_GPU_RB_CNTL_DEFAULT \
(AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
static inline bool msm_gpu_active(struct msm_gpu *gpu) static inline bool msm_gpu_active(struct msm_gpu *gpu)
{ {
return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu); int i;
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
if (ring->seqno > ring->memptrs->fence)
return true;
}
return false;
} }
/* Perf-Counters: /* Perf-Counters:
...@@ -150,6 +162,15 @@ struct msm_gpu_perfcntr { ...@@ -150,6 +162,15 @@ struct msm_gpu_perfcntr {
const char *name; const char *name;
}; };
struct msm_gpu_submitqueue {
int id;
u32 flags;
u32 prio;
int faults;
struct list_head node;
struct kref ref;
};
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{ {
msm_writel(data, gpu->mmio + (reg << 2)); msm_writel(data, gpu->mmio + (reg << 2));
...@@ -223,4 +244,10 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev); ...@@ -223,4 +244,10 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
void __init adreno_register(void); void __init adreno_register(void);
void __exit adreno_unregister(void); void __exit adreno_unregister(void);
static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
{
if (queue)
kref_put(&queue->ref, msm_submitqueue_destroy);
}
#endif /* __MSM_GPU_H__ */ #endif /* __MSM_GPU_H__ */
...@@ -19,11 +19,17 @@ ...@@ -19,11 +19,17 @@
* *
* tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd * tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
* *
* To log the cmdstream in a format that is understood by freedreno/cffdump * to log the cmdstream in a format that is understood by freedreno/cffdump
* utility. By comparing the last successfully completed fence #, to the * utility. By comparing the last successfully completed fence #, to the
* cmdstream for the next fence, you can narrow down which process and submit * cmdstream for the next fence, you can narrow down which process and submit
* caused the gpu crash/lockup. * caused the gpu crash/lockup.
* *
* Additionally:
*
* tail -f /sys/kernel/debug/dri/<minor>/hangrd > logfile.rd
*
* will capture just the cmdstream from submits which triggered a GPU hang.
*
* This bypasses drm_debugfs_create_files() mainly because we need to use * This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to * our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open. * do anything if userspace doesn't have the debugfs file open.
...@@ -212,53 +218,89 @@ static const struct file_operations rd_debugfs_fops = { ...@@ -212,53 +218,89 @@ static const struct file_operations rd_debugfs_fops = {
.release = rd_release, .release = rd_release,
}; };
int msm_rd_debugfs_init(struct drm_minor *minor)
static void rd_cleanup(struct msm_rd_state *rd)
{
if (!rd)
return;
mutex_destroy(&rd->read_lock);
kfree(rd);
}
static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
{ {
struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_rd_state *rd; struct msm_rd_state *rd;
struct dentry *ent; struct dentry *ent;
int ret = 0;
/* only create on first minor: */
if (priv->rd)
return 0;
rd = kzalloc(sizeof(*rd), GFP_KERNEL); rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd) if (!rd)
return -ENOMEM; return ERR_PTR(-ENOMEM);
rd->dev = minor->dev; rd->dev = minor->dev;
rd->fifo.buf = rd->buf; rd->fifo.buf = rd->buf;
mutex_init(&rd->read_lock); mutex_init(&rd->read_lock);
priv->rd = rd;
init_waitqueue_head(&rd->fifo_event); init_waitqueue_head(&rd->fifo_event);
ent = debugfs_create_file("rd", S_IFREG | S_IRUGO, ent = debugfs_create_file(name, S_IFREG | S_IRUGO,
minor->debugfs_root, rd, &rd_debugfs_fops); minor->debugfs_root, rd, &rd_debugfs_fops);
if (!ent) { if (!ent) {
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/rd\n", DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n",
minor->debugfs_root); minor->debugfs_root, name);
ret = -ENOMEM;
goto fail; goto fail;
} }
return rd;
fail:
rd_cleanup(rd);
return ERR_PTR(ret);
}
int msm_rd_debugfs_init(struct drm_minor *minor)
{
struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_rd_state *rd;
int ret;
/* only create on first minor: */
if (priv->rd)
return 0;
rd = rd_init(minor, "rd");
if (IS_ERR(rd)) {
ret = PTR_ERR(rd);
goto fail;
}
priv->rd = rd;
rd = rd_init(minor, "hangrd");
if (IS_ERR(rd)) {
ret = PTR_ERR(rd);
goto fail;
}
priv->hangrd = rd;
return 0; return 0;
fail: fail:
msm_rd_debugfs_cleanup(priv); msm_rd_debugfs_cleanup(priv);
return -1; return ret;
} }
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
{ {
struct msm_rd_state *rd = priv->rd; rd_cleanup(priv->rd);
if (!rd)
return;
priv->rd = NULL; priv->rd = NULL;
mutex_destroy(&rd->read_lock);
kfree(rd); rd_cleanup(priv->hangrd);
priv->hangrd = NULL;
} }
static void snapshot_buf(struct msm_rd_state *rd, static void snapshot_buf(struct msm_rd_state *rd,
...@@ -268,10 +310,6 @@ static void snapshot_buf(struct msm_rd_state *rd, ...@@ -268,10 +310,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_object *obj = submit->bos[idx].obj; struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf; const char *buf;
buf = msm_gem_get_vaddr(&obj->base);
if (IS_ERR(buf))
return;
if (iova) { if (iova) {
buf += iova - submit->bos[idx].iova; buf += iova - submit->bos[idx].iova;
} else { } else {
...@@ -279,20 +317,33 @@ static void snapshot_buf(struct msm_rd_state *rd, ...@@ -279,20 +317,33 @@ static void snapshot_buf(struct msm_rd_state *rd,
size = obj->base.size; size = obj->base.size;
} }
/*
* Always write the GPUADDR header so can get a complete list of all the
* buffers in the cmd
*/
rd_write_section(rd, RD_GPUADDR, rd_write_section(rd, RD_GPUADDR,
(uint32_t[3]){ iova, size, iova >> 32 }, 12); (uint32_t[3]){ iova, size, iova >> 32 }, 12);
/* But only dump the contents of buffers marked READ */
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
return;
buf = msm_gem_get_vaddr_active(&obj->base);
if (IS_ERR(buf))
return;
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr(&obj->base); msm_gem_put_vaddr(&obj->base);
} }
/* called under struct_mutex */ /* called under struct_mutex */
void msm_rd_dump_submit(struct msm_gem_submit *submit) void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{ {
struct drm_device *dev = submit->dev; struct drm_device *dev = submit->dev;
struct msm_drm_private *priv = dev->dev_private; struct task_struct *task;
struct msm_rd_state *rd = priv->rd; char msg[256];
char msg[128];
int i, n; int i, n;
if (!rd->open) if (!rd->open)
...@@ -303,23 +354,32 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) ...@@ -303,23 +354,32 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
*/ */
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u", if (fmt) {
TASK_COMM_LEN, current->comm, task_pid_nr(current), va_list args;
submit->fence->seqno);
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); va_start(args, fmt);
n = vsnprintf(msg, sizeof(msg), fmt, args);
va_end(args);
if (rd_full) { rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
for (i = 0; i < submit->nr_bos; i++) { }
/* buffers that are written to probably don't start out
* with anything interesting:
*/
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
continue;
snapshot_buf(rd, submit, i, 0, 0); rcu_read_lock();
} task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, task->comm,
pid_nr(submit->pid), submit->seqno);
} else {
n = snprintf(msg, sizeof(msg), "???/%d: fence=%u",
pid_nr(submit->pid), submit->seqno);
} }
rcu_read_unlock();
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
for (i = 0; rd_full && i < submit->nr_bos; i++)
snapshot_buf(rd, submit, i, 0, 0);
for (i = 0; i < submit->nr_cmds; i++) { for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova; uint64_t iova = submit->cmd[i].iova;
......
...@@ -18,13 +18,15 @@ ...@@ -18,13 +18,15 @@
#include "msm_ringbuffer.h" #include "msm_ringbuffer.h"
#include "msm_gpu.h" #include "msm_gpu.h"
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova)
{ {
struct msm_ringbuffer *ring; struct msm_ringbuffer *ring;
char name[32];
int ret; int ret;
if (WARN_ON(!is_power_of_2(size))) /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
return ERR_PTR(-EINVAL); BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) { if (!ring) {
...@@ -33,32 +35,46 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) ...@@ -33,32 +35,46 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
} }
ring->gpu = gpu; ring->gpu = gpu;
ring->id = id;
/* Pass NULL for the iova pointer - we will map it later */ /* Pass NULL for the iova pointer - we will map it later */
ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC, ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
gpu->aspace, &ring->bo, NULL); MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
if (IS_ERR(ring->start)) { if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start); ret = PTR_ERR(ring->start);
ring->start = 0; ring->start = 0;
goto fail; goto fail;
} }
ring->end = ring->start + (size / 4); ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
ring->cur = ring->start; ring->cur = ring->start;
ring->size = size; ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
INIT_LIST_HEAD(&ring->submits);
spin_lock_init(&ring->lock);
snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
ring->fctx = msm_fence_context_alloc(gpu->dev, name);
return ring; return ring;
fail: fail:
if (ring) msm_ringbuffer_destroy(ring);
msm_ringbuffer_destroy(ring);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{ {
if (IS_ERR_OR_NULL(ring))
return;
msm_fence_context_free(ring->fctx);
if (ring->bo) { if (ring->bo) {
msm_gem_put_iova(ring->bo, ring->gpu->aspace);
msm_gem_put_vaddr(ring->bo); msm_gem_put_vaddr(ring->bo);
drm_gem_object_unreference_unlocked(ring->bo); drm_gem_object_unreference_unlocked(ring->bo);
} }
......
...@@ -20,14 +20,31 @@ ...@@ -20,14 +20,31 @@
#include "msm_drv.h" #include "msm_drv.h"
#define rbmemptr(ring, member) \
((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
};
struct msm_ringbuffer { struct msm_ringbuffer {
struct msm_gpu *gpu; struct msm_gpu *gpu;
int size; int id;
struct drm_gem_object *bo; struct drm_gem_object *bo;
uint32_t *start, *end, *cur; uint32_t *start, *end, *cur, *next;
struct list_head submits;
uint64_t iova;
uint32_t seqno;
uint32_t hangcheck_fence;
struct msm_rbmemptrs *memptrs;
uint64_t memptrs_iova;
struct msm_fence_context *fctx;
spinlock_t lock;
}; };
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size); struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring); void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */ /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
...@@ -35,9 +52,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring); ...@@ -35,9 +52,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
static inline void static inline void
OUT_RING(struct msm_ringbuffer *ring, uint32_t data) OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
{ {
if (ring->cur == ring->end) /*
ring->cur = ring->start; * ring->next points to the current command being written - it won't be
*(ring->cur++) = data; * committed as ring->cur until the flush
*/
if (ring->next == ring->end)
ring->next = ring->start;
*(ring->next++) = data;
} }
#endif /* __MSM_RINGBUFFER_H__ */ #endif /* __MSM_RINGBUFFER_H__ */
/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kref.h>
#include "msm_gpu.h"
void msm_submitqueue_destroy(struct kref *kref)
{
struct msm_gpu_submitqueue *queue = container_of(kref,
struct msm_gpu_submitqueue, ref);
kfree(queue);
}
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id)
{
struct msm_gpu_submitqueue *entry;
if (!ctx)
return NULL;
read_lock(&ctx->queuelock);
list_for_each_entry(entry, &ctx->submitqueues, node) {
if (entry->id == id) {
kref_get(&entry->ref);
read_unlock(&ctx->queuelock);
return entry;
}
}
read_unlock(&ctx->queuelock);
return NULL;
}
void msm_submitqueue_close(struct msm_file_private *ctx)
{
struct msm_gpu_submitqueue *entry, *tmp;
if (!ctx)
return;
/*
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
msm_submitqueue_put(entry);
}
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id)
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
if (!ctx)
return -ENODEV;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue)
return -ENOMEM;
kref_init(&queue->ref);
queue->flags = flags;
if (priv->gpu) {
if (prio >= priv->gpu->nr_rings)
return -EINVAL;
queue->prio = prio;
}
write_lock(&ctx->queuelock);
queue->id = ctx->queueid++;
if (id)
*id = queue->id;
list_add_tail(&queue->node, &ctx->submitqueues);
write_unlock(&ctx->queuelock);
return 0;
}
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
{
struct msm_drm_private *priv = drm->dev_private;
int default_prio;
if (!ctx)
return 0;
/*
* Select priority 2 as the "default priority" unless nr_rings is less
* than 2 and then pick the lowest pirority
*/
default_prio = priv->gpu ?
clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
INIT_LIST_HEAD(&ctx->submitqueues);
rwlock_init(&ctx->queuelock);
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
if (!ctx)
return 0;
/*
* id 0 is the "default" queue and can't be destroyed
* by the user
*/
if (!id)
return -ENOENT;
write_lock(&ctx->queuelock);
list_for_each_entry(entry, &ctx->submitqueues, node) {
if (entry->id == id) {
list_del(&entry->node);
write_unlock(&ctx->queuelock);
msm_submitqueue_put(entry);
return 0;
}
}
write_unlock(&ctx->queuelock);
return -ENOENT;
}
This diff is collapsed.
...@@ -73,6 +73,7 @@ struct drm_msm_timespec { ...@@ -73,6 +73,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_MAX_FREQ 0x04 #define MSM_PARAM_MAX_FREQ 0x04
#define MSM_PARAM_TIMESTAMP 0x05 #define MSM_PARAM_TIMESTAMP 0x05
#define MSM_PARAM_GMEM_BASE 0x06 #define MSM_PARAM_GMEM_BASE 0x06
#define MSM_PARAM_NR_RINGS 0x07
struct drm_msm_param { struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */ __u32 pipe; /* in, MSM_PIPE_x */
...@@ -218,6 +219,7 @@ struct drm_msm_gem_submit { ...@@ -218,6 +219,7 @@ struct drm_msm_gem_submit {
__u64 bos; /* in, ptr to array of submit_bo's */ __u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */ __u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
__u32 queueid; /* in, submitqueue id */
}; };
/* The normal way to synchronize with the GPU is just to CPU_PREP on /* The normal way to synchronize with the GPU is just to CPU_PREP on
...@@ -231,6 +233,7 @@ struct drm_msm_wait_fence { ...@@ -231,6 +233,7 @@ struct drm_msm_wait_fence {
__u32 fence; /* in */ __u32 fence; /* in */
__u32 pad; __u32 pad;
struct drm_msm_timespec timeout; /* in */ struct drm_msm_timespec timeout; /* in */
__u32 queueid; /* in, submitqueue id */
}; };
/* madvise provides a way to tell the kernel in case a buffers contents /* madvise provides a way to tell the kernel in case a buffers contents
...@@ -254,6 +257,20 @@ struct drm_msm_gem_madvise { ...@@ -254,6 +257,20 @@ struct drm_msm_gem_madvise {
__u32 retained; /* out, whether backing store still exists */ __u32 retained; /* out, whether backing store still exists */
}; };
/*
* Draw queues allow the user to set specific submission parameter. Command
* submissions specify a specific submitqueue to use. ID 0 is reserved for
* backwards compatibility as a "default" submitqueue
*/
#define MSM_SUBMITQUEUE_FLAGS (0)
struct drm_msm_submitqueue {
__u32 flags; /* in, MSM_SUBMITQUEUE_x */
__u32 prio; /* in, Priority level */
__u32 id; /* out, identifier */
};
#define DRM_MSM_GET_PARAM 0x00 #define DRM_MSM_GET_PARAM 0x00
/* placeholder: /* placeholder:
#define DRM_MSM_SET_PARAM 0x01 #define DRM_MSM_SET_PARAM 0x01
...@@ -265,6 +282,11 @@ struct drm_msm_gem_madvise { ...@@ -265,6 +282,11 @@ struct drm_msm_gem_madvise {
#define DRM_MSM_GEM_SUBMIT 0x06 #define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07 #define DRM_MSM_WAIT_FENCE 0x07
#define DRM_MSM_GEM_MADVISE 0x08 #define DRM_MSM_GEM_MADVISE 0x08
/* placeholder:
#define DRM_MSM_GEM_SVM_NEW 0x09
*/
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) #define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) #define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
...@@ -274,6 +296,8 @@ struct drm_msm_gem_madvise { ...@@ -274,6 +296,8 @@ struct drm_msm_gem_madvise {
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit) #define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence) #define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise) #define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#if defined(__cplusplus) #if defined(__cplusplus)
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment