Commit 9ecccaf9 authored by Rob Clark's avatar Rob Clark

Merge tag 'drm-msm-fixes-2021-04-02' into msm-next

Pull in fixes from previous cycle
parents 1e28eed1 12aca1ce
......@@ -1386,8 +1386,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
*value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
REG_A5XX_RBBM_PERFCTR_CP_0_HI);
*value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
return 0;
}
......
......@@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
else
else if (adreno_is_a540(adreno_gpu))
a540_lm_setup(gpu);
/* Set up SP/TP power collpase */
......
......@@ -339,7 +339,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
else
bit = a6xx_gmu_oob_bits[state].ack_new;
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
}
/* Enable CPU control of SPTP power power collapse */
......
......@@ -522,28 +522,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
/*
* Check that the microcode version is new enough to include several key
* security fixes. Return true if the ucode is safe.
*/
static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
struct drm_gem_object *obj)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
u32 *buf = msm_gem_get_vaddr(obj);
bool ret = false;
if (IS_ERR(buf))
return;
return false;
/*
* If the lowest nibble is 0xa that is an indication that this microcode
* has been patched. The actual version is in dword [3] but we only care
* about the patchlevel which is the lowest nibble of dword [3]
*
* Otherwise check that the firmware is greater than or equal to 1.90
* which was the first version that had this fix built in
* Targets up to a640 (a618, a630 and a640) need to check for a
* microcode version that is patched to support the whereami opcode or
* one that is new enough to include it by default.
*/
if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
a6xx_gpu->has_whereami = true;
else if ((buf[0] & 0xfff) > 0x190)
a6xx_gpu->has_whereami = true;
if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
adreno_is_a640(adreno_gpu)) {
/*
* If the lowest nibble is 0xa that is an indication that this
* microcode has been patched. The actual version is in dword
* [3] but we only care about the patchlevel which is the lowest
* nibble of dword [3]
*
* Otherwise check that the firmware is greater than or equal
* to 1.90 which was the first version that had this fix built
* in
*/
if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
(buf[0] & 0xfff) >= 0x190) {
a6xx_gpu->has_whereami = true;
ret = true;
goto out;
}
DRM_DEV_ERROR(&gpu->pdev->dev,
"a630 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x190);
} else {
/*
* a650 tier targets don't need whereami but still need to be
* equal to or newer than 0.95 for other security fixes
*/
if (adreno_is_a650(adreno_gpu)) {
if ((buf[0] & 0xfff) >= 0x095) {
ret = true;
goto out;
}
DRM_DEV_ERROR(&gpu->pdev->dev,
"a650 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x095);
}
/*
* When a660 is added those targets should return true here
* since those have all the critical security fixes built in
* from the start
*/
}
out:
msm_gem_put_vaddr(obj);
return ret;
}
static int a6xx_ucode_init(struct msm_gpu *gpu)
......@@ -566,7 +611,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
}
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put(a6xx_gpu->sqe_bo);
a6xx_gpu->sqe_bo = NULL;
return -EPERM;
}
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
......@@ -1177,8 +1228,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
REG_A6XX_RBBM_PERFCTR_CP_0_HI);
*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
mutex_unlock(&perfcounter_oob);
......@@ -1350,35 +1401,26 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
u32 revn)
{
struct opp_table *opp_table;
struct nvmem_cell *cell;
u32 supp_hw = UINT_MAX;
void *buf;
u16 speedbin;
int ret;
cell = nvmem_cell_get(dev, "speed_bin");
ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
/*
* -ENOENT means that the platform doesn't support speedbin which is
* fine
*/
if (PTR_ERR(cell) == -ENOENT)
if (ret == -ENOENT) {
return 0;
else if (IS_ERR(cell)) {
DRM_DEV_ERROR(dev,
"failed to read speed-bin. Some OPPs may not be supported by hardware");
goto done;
}
buf = nvmem_cell_read(cell, NULL);
if (IS_ERR(buf)) {
nvmem_cell_put(cell);
} else if (ret) {
DRM_DEV_ERROR(dev,
"failed to read speed-bin. Some OPPs may not be supported by hardware");
"failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
ret);
goto done;
}
speedbin = le16_to_cpu(speedbin);
supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
kfree(buf);
nvmem_cell_put(cell);
supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
done:
opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
......
......@@ -496,7 +496,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
if (cfg->merge_3d)
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
BIT(cfg->merge_3d - MERGE_3D_0));
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
......
......@@ -43,6 +43,8 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
......@@ -931,6 +933,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
DPU_DEBUG("REG_DMA is not defined");
}
if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
dpu_kms_parse_data_bus_icc_path(dpu_kms);
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
......@@ -1032,9 +1037,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_vbif_init_memtypes(dpu_kms);
if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
dpu_kms_parse_data_bus_icc_path(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
......@@ -1191,10 +1193,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
ddev = dpu_kms->dev;
WARN_ON(!(dpu_kms->num_paths));
/* Min vote of BW is required before turning on AXI clk */
for (i = 0; i < dpu_kms->num_paths; i++)
icc_set_bw(dpu_kms->path[i], 0,
dpu_kms->catalog->perf.min_dram_ib);
icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
......
......@@ -32,6 +32,8 @@ struct dp_aux_private {
struct drm_dp_aux dp_aux;
};
#define MAX_AUX_RETRIES 5
static const char *dp_aux_get_error(u32 aux_error)
{
switch (aux_error) {
......@@ -377,6 +379,11 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
if (aux->native) {
aux->retry_cnt++;
if (!(aux->retry_cnt % MAX_AUX_RETRIES))
dp_catalog_aux_update_cfg(aux->catalog);
}
usleep_range(400, 500); /* at least 400us to next try */
goto unlock_exit;
}
......
......@@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
break;
case MSM_DSI_PHY_7NM:
case MSM_DSI_PHY_7NM_V4_1:
pll = msm_dsi_pll_7nm_init(pdev, id);
pll = msm_dsi_pll_7nm_init(pdev, type, id);
break;
default:
pll = ERR_PTR(-ENXIO);
......
......@@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
}
#endif
#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id);
#else
static inline struct msm_dsi_pll *
msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
msm_dsi_pll_7nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
return ERR_PTR(-ENODEV);
}
......
......@@ -325,7 +325,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll)
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
......@@ -509,6 +509,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
struct dsi_pll_config *config = &pll_7nm->pll_configuration;
void __iomem *base = pll_7nm->mmio;
u64 ref_clk = pll_7nm->vco_ref_clk_rate;
u64 vco_rate = 0x0;
......@@ -529,9 +530,8 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
/*
* TODO:
* 1. Assumes prescaler is disabled
* 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
*/
multiplier = 1 << 18;
multiplier = 1 << config->frac_bits;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
......@@ -852,7 +852,8 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)
return ret;
}
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
struct dsi_pll_7nm *pll_7nm;
struct msm_dsi_pll *pll;
......@@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
pll = &pll_7nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
if (type == MSM_DSI_PHY_7NM_V4_1) {
pll->min_rate = 600000000UL;
pll->max_rate = (unsigned long)5000000000ULL;
/* workaround for max rate overflowing on 32-bit builds: */
......
......@@ -57,10 +57,13 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
{
int crtc_index;
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask)
mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]);
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
crtc_index = drm_crtc_index(crtc);
mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
}
}
static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
......
......@@ -570,6 +570,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
kfree(priv);
err_put_drm_dev:
drm_dev_put(ddev);
platform_set_drvdata(pdev, NULL);
return ret;
}
......@@ -1072,6 +1073,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
static int __maybe_unused msm_pm_prepare(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
if (!priv || !priv->kms)
return 0;
return drm_mode_config_helper_suspend(ddev);
}
......@@ -1079,6 +1084,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
static void __maybe_unused msm_pm_complete(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
if (!priv || !priv->kms)
return;
drm_mode_config_helper_resume(ddev);
}
......@@ -1311,6 +1320,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
static void msm_pdev_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
if (!priv || !priv->kms)
return;
drm_atomic_helper_shutdown(drm);
}
......
......@@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
int ret;
if (fence > fctx->last_fence) {
DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
fctx->name, fence, fctx->last_fence);
return -EINVAL;
}
......
......@@ -157,7 +157,6 @@ struct msm_kms {
* from the crtc's pending_timer close to end of the frame:
*/
struct mutex commit_lock[MAX_CRTCS];
struct lock_class_key commit_lock_keys[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
};
......@@ -167,11 +166,8 @@ static inline int msm_kms_init(struct msm_kms *kms,
{
unsigned i, ret;
for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
lockdep_register_key(&kms->commit_lock_keys[i]);
__mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
&kms->commit_lock_keys[i]);
}
for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
mutex_init(&kms->commit_lock[i]);
kms->funcs = funcs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment