Commit 955289c7 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

Now that we have the bits needed for mdp5 atomic, here is the followup
pull request I mentioned.  Main highlights are:

1) mdp5 multiple crtc and public plane support (no more hard-coded mixer setup!)
2) mdp5 atomic conversion
3) couple atomic helper fixes for issues found during mdp5 atomic
debug (reviewed by danvet.. but he didn't plane to send an
atomic-fixes pull request so I agreed to tack them on to mine)

* 'msm-next' of git://people.freedesktop.org/~robclark/linux:
  drm/atomic: shutdown *current* encoder
  drm/atomic: check mode_changed *after* atomic_check
  drm/msm/mdp4: fix mixer setup for multi-crtc + planes
  drm/msm/mdp5: dpms(OFF) cleanups
  drm/msm/mdp5: atomic
  drm/msm: atomic fixes
  drm/msm/mdp5: remove global mdp5_ctl_mgr
  drm/msm/mdp5: don't use void * for opaque types
  drm/msm: add multiple CRTC and overlay support
  drm/msm/mdp5: set rate before enabling clk
  drm/msm/mdp5: introduce mdp5_cfg module
  drm/msm/mdp5: make SMP module dynamically configurable
  drm/msm/hdmi: remove useless kref
  drm/msm/mdp5: get the core clock rate from MDP5 config
  drm/msm/mdp5: use irqdomains
parents ed1e8777 46df9adb
......@@ -331,7 +331,7 @@ mode_fixup(struct drm_atomic_state *state)
}
static int
drm_atomic_helper_check_prepare(struct drm_device *dev,
drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ncrtcs = dev->mode_config.num_crtc;
......@@ -428,10 +428,6 @@ int drm_atomic_helper_check(struct drm_device *dev,
int ncrtcs = dev->mode_config.num_crtc;
int i, ret = 0;
ret = drm_atomic_helper_check_prepare(dev, state);
if (ret)
return ret;
for (i = 0; i < nplanes; i++) {
struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane = state->planes[i];
......@@ -475,6 +471,10 @@ int drm_atomic_helper_check(struct drm_device *dev,
}
}
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check);
......@@ -499,9 +499,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state || !old_conn_state->crtc)
continue;
encoder = connector->state->best_encoder;
encoder = old_conn_state->best_encoder;
if (!encoder)
/* We shouldn't get this far if we didn't previously have
* an encoder.. but WARN_ON() rather than explode.
*/
if (WARN_ON(!encoder))
continue;
funcs = encoder->helper_private;
......
......@@ -25,6 +25,8 @@ msm-y := \
mdp/mdp4/mdp4_irq.o \
mdp/mdp4/mdp4_kms.o \
mdp/mdp4/mdp4_plane.o \
mdp/mdp5/mdp5_cfg.o \
mdp/mdp5/mdp5_ctl.o \
mdp/mdp5/mdp5_crtc.o \
mdp/mdp5/mdp5_encoder.o \
mdp/mdp5/mdp5_irq.o \
......
......@@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/of_irq.h>
#include "hdmi.h"
void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
......@@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
power_on ? "Enable" : "Disable", ctrl);
}
irqreturn_t hdmi_irq(int irq, void *dev_id)
static irqreturn_t hdmi_irq(int irq, void *dev_id)
{
struct hdmi *hdmi = dev_id;
......@@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
void hdmi_destroy(struct kref *kref)
static void hdmi_destroy(struct hdmi *hdmi)
{
struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
struct hdmi_phy *phy = hdmi->phy;
if (phy)
......@@ -84,8 +84,6 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail;
}
kref_init(&hdmi->refcount);
hdmi->pdev = pdev;
hdmi->config = config;
......@@ -182,7 +180,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
fail:
if (hdmi)
hdmi_destroy(&hdmi->refcount);
hdmi_destroy(hdmi);
return ERR_PTR(ret);
}
......@@ -200,7 +198,6 @@ int hdmi_modeset_init(struct hdmi *hdmi,
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = hdmi->pdev;
struct hdmi_platform_config *config = pdev->dev.platform_data;
int ret;
hdmi->dev = dev;
......@@ -224,23 +221,21 @@ int hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
if (!config->shared_irq) {
hdmi->irq = platform_get_irq(pdev, 0);
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (hdmi->irq < 0) {
ret = hdmi->irq;
dev_err(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
ret = devm_request_irq(&pdev->dev, hdmi->irq,
hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"hdmi_isr", hdmi);
if (ret < 0) {
dev_err(dev->dev, "failed to request IRQ%u: %d\n",
hdmi->irq, ret);
goto fail;
}
}
encoder->bridge = hdmi->bridge;
......@@ -271,12 +266,6 @@ int hdmi_modeset_init(struct hdmi *hdmi,
#include <linux/of_gpio.h>
static void set_hdmi(struct drm_device *dev, struct hdmi *hdmi)
{
struct msm_drm_private *priv = dev->dev_private;
priv->hdmi = hdmi;
}
#ifdef CONFIG_OF
static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
{
......@@ -297,6 +286,8 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
static struct hdmi_platform_config config = {};
struct hdmi *hdmi;
#ifdef CONFIG_OF
......@@ -318,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.pwr_clk_names = pwr_clk_names;
config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
config.shared_irq = true;
} else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
......@@ -392,14 +382,19 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi = hdmi_init(to_platform_device(dev));
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
set_hdmi(dev_get_drvdata(master), hdmi);
priv->hdmi = hdmi;
return 0;
}
static void hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
set_hdmi(dev_get_drvdata(master), NULL);
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
if (priv->hdmi) {
hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
}
static const struct component_ops hdmi_ops = {
......
......@@ -38,8 +38,6 @@ struct hdmi_audio {
};
struct hdmi {
struct kref refcount;
struct drm_device *dev;
struct platform_device *pdev;
......@@ -97,13 +95,9 @@ struct hdmi_platform_config {
/* gpio's: */
int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
int mux_lpm_gpio;
/* older devices had their own irq, mdp5+ it is shared w/ mdp: */
bool shared_irq;
};
void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
void hdmi_destroy(struct kref *kref);
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
{
......@@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
return msm_readl(hdmi->mmio + reg);
}
static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
{
kref_get(&hdmi->refcount);
return hdmi;
}
static inline void hdmi_unreference(struct hdmi *hdmi)
{
kref_put(&hdmi->refcount, hdmi_destroy);
}
/*
* The phy appears to be different, for example between 8960 and 8x60,
* so split the phy related functions out and load the correct one at
......
......@@ -26,7 +26,6 @@ struct hdmi_bridge {
static void hdmi_bridge_destroy(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
hdmi_unreference(hdmi_bridge->hdmi);
drm_bridge_cleanup(bridge);
kfree(hdmi_bridge);
}
......@@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
goto fail;
}
hdmi_bridge->hdmi = hdmi_reference(hdmi);
hdmi_bridge->hdmi = hdmi;
bridge = &hdmi_bridge->base;
......
......@@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
hdmi_unreference(hdmi_connector->hdmi);
kfree(hdmi_connector);
}
......@@ -425,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
goto fail;
}
hdmi_connector->hdmi = hdmi_reference(hdmi);
hdmi_connector->hdmi = hdmi;
INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
connector = &hdmi_connector->base;
......
......@@ -167,18 +167,8 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_plane *plane;
int i, ovlp = mdp4_crtc->ovlp;
uint32_t mixer_cfg = 0;
static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
};
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
[VG1] = 1,
[VG2] = 2,
[RGB1] = 0,
......@@ -187,14 +177,44 @@ static void blend_setup(struct drm_crtc *crtc)
[VG3] = 3,
[VG4] = 4,
};
bool alpha[4]= { false, false, false, false };
};
/* Don't rely on value read back from hw, but instead use our
* own shadowed value. Possibly disable/reenable looses the
* previous value and goes back to power-on default?
/* setup mixer config, for which we need to consider all crtc's and
* the planes attached to them
*
* TODO may possibly need some extra locking here
*/
mixer_cfg = mdp4_kms->mixer_cfg;
static void setup_mixer(struct mdp4_kms *mdp4_kms)
{
struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
struct drm_crtc *crtc;
uint32_t mixer_cfg = 0;
static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
};
list_for_each_entry(crtc, &config->crtc_list, head) {
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane;
for_each_plane_on_crtc(crtc, plane) {
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
int idx = idxs[pipe_id];
mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
pipe_id, stages[idx]);
}
}
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
}
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_plane *plane;
int i, ovlp = mdp4_crtc->ovlp;
bool alpha[4]= { false, false, false, false };
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
......@@ -209,13 +229,8 @@ static void blend_setup(struct drm_crtc *crtc)
to_mdp_format(msm_framebuffer_format(plane->fb));
alpha[idx-1] = format->alpha_enable;
}
mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
pipe_id, stages[idx]);
}
/* this shouldn't happen.. and seems to cause underflow: */
WARN_ON(!mixer_cfg);
for (i = 0; i < 4; i++) {
uint32_t op;
......@@ -238,8 +253,7 @@ static void blend_setup(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
}
mdp4_kms->mixer_cfg = mixer_cfg;
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
setup_mixer(mdp4_kms);
}
static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
......
......@@ -32,13 +32,6 @@ struct mdp4_kms {
int rev;
/* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
* crtcs/encoders is in one shared register, we need to update it
* via read/modify/write. But to avoid getting confused by power-
* on-default values after resume, use this shadow value instead:
*/
uint32_t mixer_cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
......
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mdp5_kms.h"
#include "mdp5_cfg.h"
struct mdp5_cfg_handler {
int revision;
struct mdp5_cfg config;
};
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
const struct mdp5_cfg_hw msm8x74_config = {
.name = "msm8x74",
.smp = {
.mmb_count = 22,
.mmb_size = 4096,
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
},
.pipe_vig = {
.count = 3,
.base = { 0x01200, 0x01600, 0x01a00 },
},
.pipe_rgb = {
.count = 3,
.base = { 0x01e00, 0x02200, 0x02600 },
},
.pipe_dma = {
.count = 2,
.base = { 0x02a00, 0x02e00 },
},
.lm = {
.count = 5,
.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
.nb_stages = 5,
},
.dspp = {
.count = 3,
.base = { 0x04600, 0x04a00, 0x04e00 },
},
.ad = {
.count = 2,
.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
},
.intf = {
.count = 4,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
},
.max_clk = 200000000,
};
const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
.smp = {
.mmb_count = 44,
.mmb_size = 8192,
.reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
.reserved[CID_RGB0] = 2,
.reserved[CID_RGB1] = 2,
.reserved[CID_RGB2] = 2,
.reserved[CID_RGB3] = 2,
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
},
.pipe_vig = {
.count = 4,
.base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
},
.pipe_rgb = {
.count = 4,
.base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
},
.pipe_dma = {
.count = 2,
.base = { 0x03200, 0x03600 },
},
.lm = {
.count = 6,
.base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
.nb_stages = 5,
},
.dspp = {
.count = 4,
.base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
},
.ad = {
.count = 3,
.base = { 0x13500, 0x13700, 0x13900 },
},
.intf = {
.count = 5,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
},
.max_clk = 320000000,
};
static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 0, .config = { .hw = &msm8x74_config } },
{ .revision = 2, .config = { .hw = &msm8x74_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
};
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
{
return cfg_handler->config.hw;
}
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
{
return &cfg_handler->config;
}
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
{
return cfg_handler->revision;
}
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
{
kfree(cfg_handler);
}
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor)
{
struct drm_device *dev = mdp5_kms->dev;
struct platform_device *pdev = dev->platformdev;
struct mdp5_cfg_handler *cfg_handler;
struct mdp5_cfg_platform *pconfig;
int i, ret = 0;
cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
if (unlikely(!cfg_handler)) {
ret = -ENOMEM;
goto fail;
}
if (major != 1) {
dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
}
/* only after mdp5_cfg global pointer's init can we access the hw */
for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
if (cfg_handlers[i].revision != minor)
continue;
mdp5_cfg = cfg_handlers[i].config.hw;
break;
}
if (unlikely(!mdp5_cfg)) {
dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
}
cfg_handler->revision = minor;
cfg_handler->config.hw = mdp5_cfg;
pconfig = mdp5_get_config(pdev);
memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
DBG("MDP5: %s hw config selected", mdp5_cfg->name);
return cfg_handler;
fail:
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
return NULL;
}
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
{
static struct mdp5_cfg_platform config = {};
#ifdef CONFIG_OF
/* TODO */
#endif
config.iommu = iommu_domain_alloc(&platform_bus_type);
return &config;
}
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MDP5_CFG_H__
#define __MDP5_CFG_H__
#include "msm_drv.h"
/*
* mdp5_cfg
*
* This module configures the dynamic offsets used by mdp5.xml.h
* (initialized in mdp5_cfg.c)
*/
extern const struct mdp5_cfg_hw *mdp5_cfg;
#define MAX_CTL 8
#define MAX_BASES 8
#define MAX_SMP_BLOCKS 44
#define MAX_CLIENTS 32
typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
#define MDP5_SUB_BLOCK_DEFINITION \
int count; \
uint32_t base[MAX_BASES]
struct mdp5_sub_block {
MDP5_SUB_BLOCK_DEFINITION;
};
struct mdp5_lm_block {
MDP5_SUB_BLOCK_DEFINITION;
uint32_t nb_stages; /* number of stages per blender */
};
struct mdp5_smp_block {
int mmb_count; /* number of SMP MMBs */
int mmb_size; /* MMB: size in bytes */
mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
};
struct mdp5_cfg_hw {
char *name;
struct mdp5_smp_block smp;
struct mdp5_sub_block ctl;
struct mdp5_sub_block pipe_vig;
struct mdp5_sub_block pipe_rgb;
struct mdp5_sub_block pipe_dma;
struct mdp5_lm_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
struct mdp5_sub_block intf;
uint32_t max_clk;
};
/* platform config data (ie. from DT, or pdata) */
struct mdp5_cfg_platform {
struct iommu_domain *iommu;
};
struct mdp5_cfg {
const struct mdp5_cfg_hw *hw;
struct mdp5_cfg_platform platform;
};
struct mdp5_kms;
struct mdp5_cfg_handler;
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor);
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
#endif /* __MDP5_CFG_H__ */
This diff is collapsed.
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mdp5_kms.h"
#include "mdp5_ctl.h"
/*
* CTL - MDP Control Pool Manager
*
* Controls are shared between all CRTCs.
*
* They are intended to be used for data path configuration.
* The top level register programming describes the complete data path for
* a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
*
* Hardware capabilities determine the number of concurrent data paths
*
* In certain use cases (high-resolution dual pipe), one single CTL can be
* shared across multiple CRTCs.
*
* Because the number of CTLs can be less than the number of CRTCs,
* CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
* requested by the client (in mdp5_crtc_mode_set()).
*/
struct mdp5_ctl {
struct mdp5_ctl_manager *ctlm;
u32 id;
/* whether this CTL has been allocated or not: */
bool busy;
/* memory output connection (@see mdp5_ctl_mode): */
u32 mode;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
u32 reg_offset;
/* flush mask used to commit CTL registers */
u32 flush_mask;
bool cursor_on;
struct drm_crtc *crtc;
};
struct mdp5_ctl_manager {
struct drm_device *dev;
/* number of CTL / Layer Mixers in this hw config: */
u32 nlm;
u32 nctl;
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
spinlock_t pool_lock;
struct mdp5_ctl ctls[MAX_CTL];
};
static inline
struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
{
struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static inline
void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
{
struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
mdp5_write(mdp5_kms, reg, data);
}
static inline
u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
{
struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
return mdp5_read(mdp5_kms, reg);
}
int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
{
unsigned long flags;
static const enum mdp5_intfnum intfnum[] = {
INTF0, INTF1, INTF2, INTF3,
};
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
MDP5_CTL_OP_MODE(ctl->mode) |
MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
spin_unlock_irqrestore(&ctl->hw_lock, flags);
return 0;
}
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 blend_cfg;
int lm;
lm = mdp5_crtc_get_lm(ctl->crtc);
if (unlikely(WARN_ON(lm < 0))) {
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
ctl->id, lm);
return -EINVAL;
}
spin_lock_irqsave(&ctl->hw_lock, flags);
blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
if (enable)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
else
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
ctl->cursor_on = enable;
return 0;
}
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
unsigned long flags;
if (ctl->cursor_on)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
else
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
return 0;
}
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
int lm = mdp5_crtc_get_lm(ctl->crtc);
if (unlikely(WARN_ON(lm < 0))) {
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
ctl->id, lm);
return -EINVAL;
}
/* for current targets, cursor bit is the same as LM bit */
flush_mask |= mdp_ctl_flush_mask_lm(lm);
}
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
return 0;
}
u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
{
return ctl->flush_mask;
}
void mdp5_ctl_release(struct mdp5_ctl *ctl)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
ctl->id, ctl->busy);
return;
}
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
ctl->busy = false;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
DBG("CTL %d released", ctl->id);
}
/*
* mdp5_ctl_request() - CTL dynamic allocation
*
* Note: Current implementation considers that we can only have one CRTC per CTL
*
* @return first free CTL
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
struct drm_crtc *crtc)
{
struct mdp5_ctl *ctl = NULL;
unsigned long flags;
int c;
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
for (c = 0; c < ctl_mgr->nctl; c++)
if (!ctl_mgr->ctls[c].busy)
break;
if (unlikely(c >= ctl_mgr->nctl)) {
dev_err(ctl_mgr->dev->dev, "No more CTL available!");
goto unlock;
}
ctl = &ctl_mgr->ctls[c];
ctl->crtc = crtc;
ctl->busy = true;
DBG("CTL %d allocated", ctl->id);
unlock:
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
return ctl;
}
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
{
unsigned long flags;
int c;
for (c = 0; c < ctl_mgr->nctl; c++) {
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
}
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
{
kfree(ctl_mgr);
}
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
{
struct mdp5_ctl_manager *ctl_mgr;
const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
dev_err(dev->dev, "failed to allocate CTL manager\n");
ret = -ENOMEM;
goto fail;
}
if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
dev_err(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
ret = -ENOSPC;
goto fail;
}
/* initialize the CTL manager: */
ctl_mgr->dev = dev;
ctl_mgr->nlm = hw_cfg->lm.count;
ctl_mgr->nctl = ctl_cfg->count;
spin_lock_init(&ctl_mgr->pool_lock);
/* initialize each CTL of the pool: */
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
for (c = 0; c < ctl_mgr->nctl; c++) {
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
if (WARN_ON(!ctl_cfg->base[c])) {
dev_err(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
goto fail;
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
ctl->mode = MODE_NONE;
ctl->reg_offset = ctl_cfg->base[c];
ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
ctl->busy = false;
spin_lock_init(&ctl->hw_lock);
}
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
return ctl_mgr;
fail:
if (ctl_mgr)
mdp5_ctlm_destroy(ctl_mgr);
return ERR_PTR(ret);
}
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MDP5_CTL_H__
#define __MDP5_CTL_H__
#include "msm_drv.h"
/*
* CTL Manager prototypes:
* mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
* which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
*/
struct mdp5_ctl_manager;
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
/*
* CTL prototypes:
* mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
* which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
/* @blend_cfg: see LM blender config definition below */
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
/* @flush_mask: see CTL flush masks definitions below */
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
void mdp5_ctl_release(struct mdp5_ctl *ctl);
/*
* blend_cfg (LM blender config):
*
* The function below allows the caller of mdp5_ctl_blend() to specify how pipes
* are being blended according to their stage (z-order), through @blend_cfg arg.
*/
static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
enum mdp_mixer_stage_id stage)
{
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
default: return 0;
}
}
/*
* flush_mask (CTL flush masks):
*
* The following functions allow each DRM entity to get and store
* their own flush mask.
* Once stored, these masks will then be accessed through each DRM's
* interface and used by the caller of mdp5_ctl_commit() to specify
* which block(s) need to be flushed through @flush_mask parameter.
*/
#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
{
/* TODO: use id once multiple cursor support is present */
(void)cursor_id;
return MDP5_CTL_FLUSH_CURSOR_DUMMY;
}
static inline u32 mdp_ctl_flush_mask_lm(int lm)
{
switch (lm) {
case 0: return MDP5_CTL_FLUSH_LM0;
case 1: return MDP5_CTL_FLUSH_LM1;
case 2: return MDP5_CTL_FLUSH_LM2;
case 5: return MDP5_CTL_FLUSH_LM5;
default: return 0;
}
}
static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
{
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
default: return 0;
}
}
#endif /* __MDP5_CTL_H__ */
......@@ -24,6 +24,7 @@ struct mdp5_encoder {
struct drm_encoder base;
int intf;
enum mdp5_intf intf_id;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
};
......@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf;
bool enabled = (mode == DRM_MODE_DPMS_ON);
unsigned long flags;
DBG("mode=%d", mode);
......@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
if (enabled) {
bs_set(mdp5_encoder, 1);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
} else {
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
bs_set(mdp5_encoder, 0);
}
......@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
uint32_t format;
unsigned long flags;
mode = adjusted_mode;
......@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
......@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
}
static void mdp5_encoder_prepare(struct drm_encoder *encoder)
......@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
mdp5_encoder->intf_id = intf_id;
encoder = &mdp5_encoder->base;
spin_lock_init(&mdp5_encoder->intf_lock);
drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
......
......@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include "msm_drv.h"
#include "mdp5_kms.h"
......@@ -82,18 +84,23 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
uint32_t intr;
intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
VERB("intr=%08x", intr);
if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
mdp5_irq_mdp(mdp_kms);
intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
}
if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
hdmi_irq(0, priv->hdmi);
while (intr) {
irq_hw_number_t hwirq = fls(intr) - 1;
generic_handle_irq(irq_find_mapping(
mdp5_kms->irqcontroller.domain, hwirq));
intr &= ~(1 << hwirq);
}
return IRQ_HANDLED;
}
......@@ -110,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
}
/*
* interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
* can register to get their irq's delivered
*/
#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
MDP5_HW_INTR_STATUS_INTR_DSI1 | \
MDP5_HW_INTR_STATUS_INTR_HDMI | \
MDP5_HW_INTR_STATUS_INTR_EDP)
static void mdp5_hw_mask_irq(struct irq_data *irqd)
{
struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static void mdp5_hw_unmask_irq(struct irq_data *irqd)
{
struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static struct irq_chip mdp5_hw_irq_chip = {
.name = "mdp5",
.irq_mask = mdp5_hw_mask_irq,
.irq_unmask = mdp5_hw_unmask_irq,
};
static int mdp5_hw_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
struct mdp5_kms *mdp5_kms = d->host_data;
if (!(VALID_IRQS & (1 << hwirq)))
return -EPERM;
irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
irq_set_chip_data(irq, mdp5_kms);
set_irq_flags(irq, IRQF_VALID);
return 0;
}
static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
.map = mdp5_hw_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
{
struct device *dev = mdp5_kms->dev->dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32,
&mdp5_hw_irqdomain_ops, mdp5_kms);
if (!d) {
dev_err(dev, "mdp5 irq domain add failed\n");
return -ENXIO;
}
mdp5_kms->irqcontroller.enabled_mask = 0;
mdp5_kms->irqcontroller.domain = d;
return 0;
}
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
{
if (mdp5_kms->irqcontroller.domain) {
irq_domain_remove(mdp5_kms->irqcontroller.domain);
mdp5_kms->irqcontroller.domain = NULL;
}
}
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
......@@ -24,145 +25,11 @@ static const char *iommu_ports[] = {
"mdp_0",
};
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
const struct mdp5_config *mdp5_cfg;
static const struct mdp5_config msm8x74_config = {
.name = "msm8x74",
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
},
.pipe_vig = {
.count = 3,
.base = { 0x01200, 0x01600, 0x01a00 },
},
.pipe_rgb = {
.count = 3,
.base = { 0x01e00, 0x02200, 0x02600 },
},
.pipe_dma = {
.count = 2,
.base = { 0x02a00, 0x02e00 },
},
.lm = {
.count = 5,
.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
},
.dspp = {
.count = 3,
.base = { 0x04600, 0x04a00, 0x04e00 },
},
.ad = {
.count = 2,
.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
},
.intf = {
.count = 4,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
},
};
static const struct mdp5_config apq8084_config = {
.name = "apq8084",
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
},
.pipe_vig = {
.count = 4,
.base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
},
.pipe_rgb = {
.count = 4,
.base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
},
.pipe_dma = {
.count = 2,
.base = { 0x03200, 0x03600 },
},
.lm = {
.count = 6,
.base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
},
.dspp = {
.count = 4,
.base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
},
.ad = {
.count = 3,
.base = { 0x13500, 0x13700, 0x13900 },
},
.intf = {
.count = 5,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
},
};
struct mdp5_config_entry {
int revision;
const struct mdp5_config *config;
};
static const struct mdp5_config_entry mdp5_configs[] = {
{ .revision = 0, .config = &msm8x74_config },
{ .revision = 2, .config = &msm8x74_config },
{ .revision = 3, .config = &apq8084_config },
};
static int mdp5_select_hw_cfg(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev;
uint32_t version, major, minor;
int i, ret = 0;
mdp5_enable(mdp5_kms);
version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
mdp5_disable(mdp5_kms);
major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
DBG("found MDP5 version v%d.%d", major, minor);
if (major != 1) {
dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
}
mdp5_kms->rev = minor;
/* only after mdp5_cfg global pointer's init can we access the hw */
for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
if (mdp5_configs[i].revision != minor)
continue;
mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
break;
}
if (unlikely(!mdp5_kms->hw_cfg)) {
dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
}
DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
return 0;
out:
return ret;
}
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev;
int i;
unsigned long flags;
pm_runtime_get_sync(dev->dev);
......@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
* care.
*/
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
pm_runtime_put_sync(dev->dev);
......@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms)
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
mdp5_irq_domain_fini(mdp5_kms);
if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
}
if (mdp5_kms->ctlm)
mdp5_ctlm_destroy(mdp5_kms->ctlm);
if (mdp5_kms->smp)
mdp5_smp_destroy(mdp5_kms->smp);
if (mdp5_kms->cfg)
mdp5_cfg_destroy(mdp5_kms->cfg);
kfree(mdp5_kms);
}
......@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
static const enum mdp5_pipe crtcs[] = {
SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
};
static const enum mdp5_pipe pub_planes[] = {
SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
};
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
/* construct CRTCs: */
for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
/* register our interrupt-controller for hdmi/eDP/dsi/etc
* to use for irqs routed through mdp:
*/
ret = mdp5_irq_domain_init(mdp5_kms);
if (ret)
goto fail;
/* construct CRTCs and their private planes: */
for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane;
struct drm_crtc *crtc;
plane = mdp5_plane_init(dev, crtcs[i], true);
plane = mdp5_plane_init(dev, crtcs[i], true,
hw_cfg->pipe_rgb.base[i]);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
......@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
}
/* Construct public planes: */
for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
struct drm_plane *plane;
plane = mdp5_plane_init(dev, pub_planes[i], false,
hw_cfg->pipe_vig.base[i]);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct %s plane: %d\n",
pipe2name(pub_planes[i]), ret);
goto fail;
}
}
/* Construct encoder for HDMI: */
encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
if (IS_ERR(encoder)) {
......@@ -338,6 +244,21 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
return ret;
}
static void read_hw_revision(struct mdp5_kms *mdp5_kms,
uint32_t *major, uint32_t *minor)
{
uint32_t version;
mdp5_enable(mdp5_kms);
version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
mdp5_disable(mdp5_kms);
*major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
*minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name)
{
......@@ -354,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
struct mdp5_platform_config *config = mdp5_get_config(pdev);
struct mdp5_cfg *config;
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
uint32_t major, minor;
int i, ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
......@@ -367,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
spin_lock_init(&mdp5_kms->resource_lock);
mdp_kms_init(&mdp5_kms->base, &kms_funcs);
kms = &mdp5_kms->base.base;
mdp5_kms->dev = dev;
mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
......@@ -417,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
if (ret)
goto fail;
ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
* more optimal rate:
*/
clk_set_rate(mdp5_kms->src_clk, 200000000);
ret = mdp5_select_hw_cfg(kms);
if (ret)
read_hw_revision(mdp5_kms, &major, &minor);
mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
if (IS_ERR(mdp5_kms->cfg)) {
ret = PTR_ERR(mdp5_kms->cfg);
mdp5_kms->cfg = NULL;
goto fail;
}
config = mdp5_cfg_get_config(mdp5_kms->cfg);
/* TODO: compute core clock rate at runtime */
clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
if (IS_ERR(mdp5_kms->smp)) {
ret = PTR_ERR(mdp5_kms->smp);
mdp5_kms->smp = NULL;
goto fail;
}
mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
if (IS_ERR(mdp5_kms->ctlm)) {
ret = PTR_ERR(mdp5_kms->ctlm);
mdp5_kms->ctlm = NULL;
goto fail;
}
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
mdp5_enable(mdp5_kms);
for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
for (i = 0; i < config->hw->intf.count; i++)
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
mdp5_disable(mdp5_kms);
mdelay(16);
if (config->iommu) {
mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (config->platform.iommu) {
mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
......@@ -475,18 +426,3 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_destroy(kms);
return ERR_PTR(ret);
}
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
{
static struct mdp5_platform_config config = {};
#ifdef CONFIG_OF
/* TODO */
#endif
config.iommu = iommu_domain_alloc(&platform_bus_type);
/* TODO hard-coded in downstream mdss, but should it be? */
config.max_clk = 200000000;
/* TODO get from DT: */
config.smp_blk_cnt = 22;
return &config;
}
......@@ -21,25 +21,9 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp/mdp_kms.h"
/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */
#define MDP5_MAX_BASES 8
struct mdp5_sub_block {
int count;
uint32_t base[MDP5_MAX_BASES];
};
struct mdp5_config {
char *name;
struct mdp5_sub_block ctl;
struct mdp5_sub_block pipe_vig;
struct mdp5_sub_block pipe_rgb;
struct mdp5_sub_block pipe_dma;
struct mdp5_sub_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
struct mdp5_sub_block intf;
};
extern const struct mdp5_config *mdp5_cfg;
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
#include "mdp5_ctl.h"
#include "mdp5_smp.h"
struct mdp5_kms {
......@@ -47,17 +31,14 @@ struct mdp5_kms {
struct drm_device *dev;
int rev;
const struct mdp5_config *hw_cfg;
struct mdp5_cfg_handler *cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
struct msm_mmu *mmu;
/* for tracking smp allocation amongst pipes: */
mdp5_smp_state_t smp_state;
struct mdp5_client_smp_state smp_client_state[CID_MAX];
int smp_blk_cnt;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
/* io/register spaces: */
void __iomem *mmio, *vbif;
......@@ -71,16 +52,47 @@ struct mdp5_kms {
struct clk *lut_clk;
struct clk *vsync_clk;
/*
* lock to protect access to global resources: ie., following register:
* - REG_MDP5_DISP_INTF_SEL
*/
spinlock_t resource_lock;
struct mdp_irq error_handler;
struct {
volatile unsigned long enabled_mask;
struct irq_domain *domain;
} irqcontroller;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
/* platform config data (ie. from DT, or pdata) */
struct mdp5_platform_config {
struct iommu_domain *iommu;
uint32_t max_clk;
int smp_blk_cnt;
struct mdp5_plane_state {
struct drm_plane_state base;
/* "virtual" zpos.. we calculate actual mixer-stage at runtime
* by sorting the attached planes by zpos and then assigning
* mixer stage lowest to highest. Private planes get default
* zpos of zero, and public planes a unique value that is
* greater than zero. This way, things work out if a naive
* userspace assigns planes to a crtc without setting zpos.
*/
int zpos;
/* the actual mixer stage, calculated in crtc->atomic_check()
* NOTE: this should move to mdp5_crtc_state, when that exists
*/
enum mdp_mixer_stage_id stage;
/* some additional transactional status to help us know in the
* apply path whether we need to update SMP allocation, and
* whether current update is still pending:
*/
bool mode_changed : 1;
bool pending : 1;
};
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
......@@ -105,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
return names[pipe];
}
static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
{
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
default: return 0;
}
}
static inline int pipe2nclients(enum mdp5_pipe pipe)
{
switch (pipe) {
......@@ -135,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
}
}
static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
{
WARN_ON(plane >= pipe2nclients(pipe));
switch (pipe) {
case SSPP_VIG0: return CID_VIG0_Y + plane;
case SSPP_VIG1: return CID_VIG1_Y + plane;
case SSPP_VIG2: return CID_VIG2_Y + plane;
case SSPP_RGB0: return CID_RGB0;
case SSPP_RGB1: return CID_RGB1;
case SSPP_RGB2: return CID_RGB2;
case SSPP_DMA0: return CID_DMA0_Y + plane;
case SSPP_DMA1: return CID_DMA1_Y + plane;
case SSPP_VIG3: return CID_VIG3_Y + plane;
case SSPP_RGB3: return CID_RGB3;
default: return CID_UNUSED;
}
}
static inline uint32_t mixer2flush(int lm)
{
switch (lm) {
case 0: return MDP5_CTL_FLUSH_LM0;
case 1: return MDP5_CTL_FLUSH_LM1;
case 2: return MDP5_CTL_FLUSH_LM2;
default: return 0;
}
}
static inline uint32_t intf2err(int intf)
{
switch (intf) {
......@@ -195,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms);
irqreturn_t mdp5_irq(struct msm_kms *kms);
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
static inline
uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
......@@ -208,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
void mdp5_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb);
int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
void mdp5_plane_complete_flip(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane);
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
enum mdp5_intf intf_id);
void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
......
This diff is collapsed.
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
......@@ -29,8 +30,11 @@
* Based on the size of the attached scanout buffer, a certain # of
* blocks must be allocated to that client out of the shared pool.
*
* For each block, it can be either free, or pending/in-use by a
* client. The updates happen in three steps:
* In some hw, some blocks are statically allocated for certain pipes
* and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
*
* For each block that can be dynamically allocated, it can be either
* free, or pending/in-use by a client. The updates happen in three steps:
*
* 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of
......@@ -61,21 +65,68 @@
* inuse and pending state of all clients..
*/
static DEFINE_SPINLOCK(smp_lock);
struct mdp5_smp {
struct drm_device *dev;
int blk_cnt;
int blk_size;
spinlock_t state_lock;
mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
struct mdp5_client_smp_state client_state[CID_MAX];
};
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
struct msm_drm_private *priv = smp->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
{
WARN_ON(plane >= pipe2nclients(pipe));
switch (pipe) {
case SSPP_VIG0: return CID_VIG0_Y + plane;
case SSPP_VIG1: return CID_VIG1_Y + plane;
case SSPP_VIG2: return CID_VIG2_Y + plane;
case SSPP_RGB0: return CID_RGB0;
case SSPP_RGB1: return CID_RGB1;
case SSPP_RGB2: return CID_RGB2;
case SSPP_DMA0: return CID_DMA0_Y + plane;
case SSPP_DMA1: return CID_DMA1_Y + plane;
case SSPP_VIG3: return CID_VIG3_Y + plane;
case SSPP_RGB3: return CID_RGB3;
default: return CID_UNUSED;
}
}
/* step #1: update # of blocks pending for the client: */
int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
static int smp_request_block(struct mdp5_smp *smp,
enum mdp5_client_id cid, int nblks)
{
struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
struct mdp5_kms *mdp5_kms = get_kms(smp);
const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
int reserved;
unsigned long flags;
spin_lock_irqsave(&smp_lock, flags);
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
reserved = hw_cfg->smp.reserved[cid];
spin_lock_irqsave(&smp->state_lock, flags);
avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
nblks -= reserved;
if (reserved)
DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
avail = cnt - bitmap_weight(smp->state, cnt);
if (nblks > avail) {
dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
ret = -ENOSPC;
goto fail;
}
......@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
if (nblks > cur_nblks) {
/* grow the existing pending reservation: */
for (i = cur_nblks; i < nblks; i++) {
int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
int blk = find_first_zero_bit(smp->state, cnt);
set_bit(blk, ps->pending);
set_bit(blk, mdp5_kms->smp_state);
set_bit(blk, smp->state);
}
} else {
/* shrink the existing pending reservation: */
......@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
}
fail:
spin_unlock_irqrestore(&smp_lock, flags);
spin_unlock_irqrestore(&smp->state_lock, flags);
return 0;
}
static void set_fifo_thresholds(struct mdp5_smp *smp,
enum mdp5_pipe pipe, int nblks)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
u32 val;
/* 1/4 of SMP pool that is being fetched */
val = (nblks * smp_entries_per_blk) / 4;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
}
/*
* NOTE: looks like if horizontal decimation is used (if we supported that)
* then the width used to calculate SMP block requirements is the post-
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct drm_device *dev = mdp5_kms->dev;
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines, nblks, ret;
nplanes = drm_format_num_planes(fmt);
hsub = drm_format_horz_chroma_subsampling(fmt);
/* different if BWC (compressed framebuffer?) enabled: */
nlines = 2;
for (i = 0, nblks = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
cpp = drm_format_plane_cpp(fmt, i);
fetch_stride = width * cpp / (i ? hsub : 1);
n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
/* for hw rev v1.00 */
if (rev == 0)
n = roundup_pow_of_two(n);
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
ret = smp_request_block(smp, pipe2client(pipe, i), n);
if (ret) {
dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
nblks += n;
}
set_fifo_thresholds(smp, pipe, nblks);
return 0;
}
static void update_smp_state(struct mdp5_kms *mdp5_kms,
/* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
int i, nblks;
for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
smp_request_block(smp, pipe2client(pipe, i), 0);
set_fifo_thresholds(smp, pipe, 0);
}
static void update_smp_state(struct mdp5_smp *smp,
enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
{
int cnt = mdp5_kms->smp_blk_cnt;
uint32_t blk, val;
struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
u32 blk, val;
for_each_set_bit(blk, *assigned, cnt) {
int idx = blk / 3;
......@@ -135,22 +259,31 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
}
/* step #2: configure hw for union(pending, inuse): */
void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
int cnt = mdp5_kms->smp_blk_cnt;
int cnt = smp->blk_cnt;
mdp5_smp_state_t assigned;
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
enum mdp5_client_id cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
bitmap_or(assigned, ps->inuse, ps->pending, cnt);
update_smp_state(mdp5_kms, cid, &assigned);
update_smp_state(smp, cid, &assigned);
}
}
/* step #3: after vblank, copy pending -> inuse: */
void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
int cnt = mdp5_kms->smp_blk_cnt;
int cnt = smp->blk_cnt;
mdp5_smp_state_t released;
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
enum mdp5_client_id cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
/*
* Figure out if there are any blocks we where previously
......@@ -160,14 +293,46 @@ void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
unsigned long flags;
spin_lock_irqsave(&smp_lock, flags);
spin_lock_irqsave(&smp->state_lock, flags);
/* clear released blocks: */
bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
released, cnt);
spin_unlock_irqrestore(&smp_lock, flags);
bitmap_andnot(smp->state, smp->state, released, cnt);
spin_unlock_irqrestore(&smp->state_lock, flags);
update_smp_state(mdp5_kms, CID_UNUSED, &released);
update_smp_state(smp, CID_UNUSED, &released);
}
bitmap_copy(ps->inuse, ps->pending, cnt);
}
}
void mdp5_smp_destroy(struct mdp5_smp *smp)
{
kfree(smp);
}
struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
{
struct mdp5_smp *smp = NULL;
int ret;
smp = kzalloc(sizeof(*smp), GFP_KERNEL);
if (unlikely(!smp)) {
ret = -ENOMEM;
goto fail;
}
smp->dev = dev;
smp->blk_cnt = cfg->mmb_count;
smp->blk_size = cfg->mmb_size;
/* statically tied MMBs cannot be re-allocated: */
bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
spin_lock_init(&smp->state_lock);
return smp;
fail:
if (smp)
mdp5_smp_destroy(smp);
return ERR_PTR(ret);
}
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
......@@ -20,22 +21,26 @@
#include "msm_drv.h"
#define MAX_SMP_BLOCKS 22
#define SMP_BLK_SIZE 4096
#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
struct mdp5_client_smp_state {
mdp5_smp_state_t inuse;
mdp5_smp_state_t pending;
};
struct mdp5_kms;
struct mdp5_smp;
/*
* SMP module prototypes:
* mdp5_smp_init() returns a SMP @handler,
* which is then used to call the other mdp5_smp_*(handler, ...) functions.
*/
int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
void mdp5_smp_destroy(struct mdp5_smp *smp);
int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
#endif /* __MDP5_SMP_H__ */
......@@ -117,7 +117,7 @@ int msm_atomic_commit(struct drm_device *dev,
if (!plane)
continue;
if (plane->state->fb != new_state->fb)
if ((plane->state->fb != new_state->fb) && new_state->fb)
add_fb(c, new_state->fb);
}
......
......@@ -215,7 +215,6 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
struct hdmi;
int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder);
irqreturn_t hdmi_irq(int irq, void *dev_id);
void __init hdmi_register(void);
void __exit hdmi_unregister(void);
......
......@@ -120,6 +120,8 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
return msm_gem_iova(msm_fb->planes[plane], id);
}
......
......@@ -68,6 +68,24 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev);
/* TODO move these helper iterator macro somewhere common: */
#define for_each_plane_on_crtc(_crtc, _plane) \
list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \
if ((_plane)->crtc == (_crtc))
if ((_plane)->state->crtc == (_crtc))
static inline bool
__plane_will_be_attached_to_crtc(struct drm_atomic_state *state,
struct drm_plane *plane, struct drm_crtc *crtc)
{
int idx = drm_plane_index(plane);
/* if plane is modified in incoming state, use the new state: */
if (state->plane_states[idx])
return state->plane_states[idx]->crtc == crtc;
/* otherwise, current state: */
return plane->state->crtc == crtc;
}
#define for_each_pending_plane_on_crtc(_state, _crtc, _plane) \
list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \
if (__plane_will_be_attached_to_crtc((_state), (_plane), (_crtc)))
#endif /* __MSM_KMS_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment