Commit 14f97d97 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Bunch of fixes all over the place, all pretty small: amdgpu, i915,
  exynos, one qxl and one vmwgfx.

  There is also a bunch of mst fixes, I left some cleanups in the series
  as I didn't think it was worth splitting up the tested series"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (37 commits)
  drm/dp/mst: add some defines for logical/physical ports
  drm/dp/mst: drop cancel work sync in the mstb destroy path (v2)
  drm/dp/mst: split connector registration into two parts (v2)
  drm/dp/mst: update the link_address_sent before sending the link address (v3)
  drm/dp/mst: fixup handling hotplug on port removal.
  drm/dp/mst: don't pass port into the path builder function
  drm/radeon: drop radeon_fb_helper_set_par
  drm: handle cursor_set2 in restore_fbdev_mode
  drm/exynos: Staticize local function in exynos_drm_gem.c
  drm/exynos: fimd: actually disable dp clock
  drm/exynos: dp: remove suspend/resume functions
  drm/qxl: recreate the primary surface when the bo is not primary
  drm/amdgpu: only print meaningful VM faults
  drm/amdgpu/cgs: remove import_gpu_mem
  drm/i915: Call non-locking version of drm_kms_helper_poll_enable(), v2
  drm: Add a non-locking version of drm_kms_helper_poll_enable(), v2
  drm/vmwgfx: Fix a command submission hang regression
  drm/exynos: remove unused mode_fixup() code
  drm/exynos: remove decon_mode_fixup()
  drm/exynos: remove fimd_mode_fixup()
  ...
parents 978ab6a0 ccf03d69
......@@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
return ret;
}
static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
cgs_handle_t *handle)
{
CGS_FUNC_ADEV;
int r;
uint32_t dma_handle;
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
struct drm_device *dev = adev->ddev;
struct drm_file *file_priv = NULL, *priv;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(priv, &dev->filelist, lhead) {
rcu_read_lock();
if (priv->pid == get_pid(task_pid(current)))
file_priv = priv;
rcu_read_unlock();
if (file_priv)
break;
}
mutex_unlock(&dev->struct_mutex);
r = dev->driver->prime_fd_to_handle(dev,
file_priv, dmabuf_fd,
&dma_handle);
spin_lock(&file_priv->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&file_priv->object_idr, dma_handle);
if (obj == NULL) {
spin_unlock(&file_priv->table_lock);
return -EINVAL;
}
spin_unlock(&file_priv->table_lock);
bo = gem_to_amdgpu_bo(obj);
*handle = (cgs_handle_t)bo;
return 0;
}
static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
......@@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
amdgpu_cgs_import_gpu_mem,
amdgpu_cgs_add_irq_source,
amdgpu_cgs_irq_get,
amdgpu_cgs_irq_put
......
......@@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
uint64_t *chunk_array_user;
uint64_t *chunk_array;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned size, i;
unsigned size;
int i;
int ret;
if (cs->in.num_chunks == 0)
......
......@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
if (ext_encoder)
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
} else {
......
......@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
/* reset addr and status */
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
if (!addr && !status)
return 0;
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
......@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
/* reset addr and status */
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0;
}
......
......@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
/* reset addr and status */
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
if (!addr && !status)
return 0;
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
......@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
/* reset addr and status */
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0;
}
......
......@@ -26,19 +26,6 @@
#include "cgs_common.h"
/**
* cgs_import_gpu_mem() - Import dmabuf handle
* @cgs_device: opaque device handle
* @dmabuf_fd: DMABuf file descriptor
* @handle: memory handle (output)
*
* Must be called in the process context that dmabuf_fd belongs to.
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
cgs_handle_t *handle);
/**
* cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
* @private_data: private data provided to cgs_add_irq_source
......@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
struct cgs_os_ops {
cgs_import_gpu_mem_t import_gpu_mem;
/* IRQ handling */
cgs_add_irq_source_t add_irq_source;
cgs_irq_get_t irq_get;
cgs_irq_put_t irq_put;
};
#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \
CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
private_data)
......
......@@ -53,7 +53,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
......@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
cancel_work_sync(&mstb->mgr->work);
/*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
......@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
{
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
if (!port->input) {
port->vcpi.num_slots = 0;
kfree(port->cached_edid);
/* we can't destroy the connector here, as
we might be holding the mode_config.mutex
from an EDID retrieval */
/*
* The only time we don't have a connector
* on an output port is if the connector init
* fails.
*/
if (port->connector) {
/* we can't destroy the connector here, as
* we might be holding the mode_config.mutex
* from an EDID retrieval */
mutex_lock(&mgr->destroy_connector_lock);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
return;
}
/* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
if (!port->input && port->vcpi.vcpi > 0)
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
}
kfree(port);
(*mgr->cbs->hotplug)(mgr);
}
static void drm_dp_put_port(struct drm_dp_mst_port *port)
......@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
}
}
static void build_mst_prop_path(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *mstb,
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
int pnum,
char *proppath,
size_t proppath_size)
{
......@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
}
snprintf(temp, sizeof(temp), "-%d", port->port_num);
snprintf(temp, sizeof(temp), "-%d", pnum);
strlcat(proppath, temp, proppath_size);
}
......@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_port_teardown_pdt(port, old_pdt);
ret = drm_dp_port_setup_pdt(port);
if (ret == true) {
if (ret == true)
drm_dp_send_link_address(mstb->mgr, port->mstb);
port->mstb->link_address_sent = true;
}
}
if (created && !port->input) {
char proppath[255];
build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
if (port->port_num >= 8) {
build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
if (!port->connector) {
/* remove it from the port list */
mutex_lock(&mstb->mgr->lock);
list_del(&port->next);
mutex_unlock(&mstb->mgr->lock);
/* drop port list reference */
drm_dp_put_port(port);
goto out;
}
if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector);
}
(*mstb->mgr->cbs->register_connector)(port->connector);
}
out:
/* put reference to this port */
drm_dp_put_port(port);
}
......@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *mstb_child;
if (!mstb->link_address_sent) {
if (!mstb->link_address_sent)
drm_dp_send_link_address(mgr, mstb);
mstb->link_address_sent = true;
}
list_for_each_entry(port, &mstb->ports, next) {
if (port->input)
continue;
......@@ -1458,7 +1469,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
mutex_unlock(&mgr->qlock);
}
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
int len;
......@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
return;
txmsg->dst = mstb;
len = build_link_address(txmsg);
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
......@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
}
(*mgr->cbs->hotplug)(mgr);
}
} else
} else {
mstb->link_address_sent = false;
DRM_DEBUG_KMS("link address failed %d\n", ret);
}
kfree(txmsg);
return 0;
}
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
......@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
......@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
if (port->cached_edid)
edid = drm_edid_duplicate(port->cached_edid);
else
else {
edid = drm_get_edid(connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(connector);
}
drm_dp_put_port(port);
return edid;
}
......@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
struct drm_dp_mst_port *port;
bool send_hotplug = false;
/*
* Not a regular list traverse as we have to drop the destroy
* connector lock before destroying the connector, to avoid AB->BA
......@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
if (!port->input && port->vcpi.vcpi > 0)
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
kfree(port);
send_hotplug = true;
}
if (send_hotplug)
(*mgr->cbs->hotplug)(mgr);
}
/**
......@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
*/
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
......
......@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
struct drm_crtc *crtc = mode_set->crtc;
int ret;
if (crtc->funcs->cursor_set) {
if (crtc->funcs->cursor_set2) {
ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
if (ret)
error = true;
} else if (crtc->funcs->cursor_set) {
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
if (ret)
error = true;
......
......@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
}
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void __drm_kms_helper_poll_enable(struct drm_device *dev)
/**
* drm_kms_helper_poll_enable_locked - re-enable output polling.
* @dev: drm_device
*
* This function re-enables the output polling work without
* locking the mode_config mutex.
*
* This is like drm_kms_helper_poll_enable() however it is to be
* called from a context where the mode_config mutex is locked
* already.
*/
void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
......@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
......@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
__drm_kms_helper_poll_enable(dev);
drm_kms_helper_poll_enable_locked(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
......@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
mutex_lock(&dev->mode_config.mutex);
__drm_kms_helper_poll_enable(dev);
drm_kms_helper_poll_enable_locked(dev);
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
......
......@@ -37,7 +37,6 @@
* DECON stands for Display and Enhancement controller.
*/
#define DECON_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
#define WINDOWS_NR 2
......@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (adjusted_mode->vrefresh == 0)
adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
return true;
}
static void decon_commit(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
......@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
.mode_fixup = decon_mode_fixup,
.commit = decon_commit,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
......
......@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int exynos_dp_suspend(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
exynos_dp_disable(&dp->encoder);
return 0;
}
static int exynos_dp_resume(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
exynos_dp_enable(&dp->encoder);
return 0;
}
#endif
static const struct dev_pm_ops exynos_dp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
};
static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
......@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = {
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
.pm = &exynos_dp_pm_ops,
.of_match_table = exynos_dp_match,
},
};
......
......@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
{
......@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
int exynos_drm_device_subdrv_probe(struct drm_device *dev)
{
......@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev)
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
int exynos_drm_device_subdrv_remove(struct drm_device *dev)
{
......@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev)
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
{
......@@ -111,7 +107,6 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
}
return ret;
}
EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
{
......@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
subdrv->close(dev, subdrv->dev, file);
}
}
EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
......@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
exynos_crtc->ops->disable(exynos_crtc);
}
static bool
exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->ops->mode_fixup)
return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
adjusted_mode);
return true;
}
static void
exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
......@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.enable = exynos_drm_crtc_enable,
.disable = exynos_drm_crtc_disable,
.mode_fixup = exynos_drm_crtc_mode_fixup,
.mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
......
......@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_connector *connector;
......@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev)
return 0;
}
#endif
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
......
......@@ -82,7 +82,6 @@ struct exynos_drm_plane {
*
* @enable: enable the device
* @disable: disable the device
* @mode_fixup: fix mode data before applying it
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
......@@ -103,9 +102,6 @@ struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
void (*enable)(struct exynos_drm_crtc *crtc);
void (*disable)(struct exynos_drm_crtc *crtc);
bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*commit)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
......
......@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
.set_addr = fimc_dst_set_addr,
};
static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
{
DRM_DEBUG_KMS("enable[%d]\n", enable);
if (enable) {
clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
ctx->suspended = false;
} else {
clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
ctx->suspended = true;
}
return 0;
}
static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
{
struct fimc_context *ctx = dev_id;
......@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM
static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
{
DRM_DEBUG_KMS("enable[%d]\n", enable);
if (enable) {
clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
ctx->suspended = false;
} else {
clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
ctx->suspended = true;
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int fimc_suspend(struct device *dev)
{
......@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev)
}
#endif
#ifdef CONFIG_PM
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
......
......@@ -41,7 +41,6 @@
* CPU Interface.
*/
#define FIMD_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
/* position control register for hardware window 0, 2 ~ 4.*/
......@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (adjusted_mode->vrefresh == 0)
adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
return true;
}
static void fimd_commit(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
......@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
return;
val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
writel(val, ctx->regs + DP_MIE_CLKCON);
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable = fimd_enable,
.disable = fimd_disable,
.mode_fixup = fimd_mode_fixup,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
......
......@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
return 0;
}
EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
......@@ -1230,7 +1229,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
g2d_put_cmdlist(g2d, node);
return ret;
}
EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
......@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
out:
return 0;
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
......
......@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
nr_pages = obj->size >> PAGE_SHIFT;
if (!is_drm_iommu_supported(dev)) {
dma_addr_t start_addr;
unsigned int i = 0;
obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
if (!obj->pages) {
DRM_ERROR("failed to allocate pages.\n");
return -ENOMEM;
}
}
obj->cookie = dma_alloc_attrs(dev->dev,
obj->size,
&obj->dma_addr, GFP_KERNEL,
&obj->dma_attrs);
obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
GFP_KERNEL, &obj->dma_attrs);
if (!obj->cookie) {
DRM_ERROR("failed to allocate buffer.\n");
if (obj->pages)
drm_free_large(obj->pages);
return -ENOMEM;
}
if (obj->pages) {
dma_addr_t start_addr;
unsigned int i = 0;
start_addr = obj->dma_addr;
while (i < nr_pages) {
obj->pages[i] = phys_to_page(start_addr);
obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
start_addr));
start_addr += PAGE_SIZE;
i++;
}
} else {
obj->pages = dma_alloc_attrs(dev->dev, obj->size,
&obj->dma_addr, GFP_KERNEL,
&obj->dma_attrs);
if (!obj->pages) {
DRM_ERROR("failed to allocate buffer.\n");
return -ENOMEM;
}
obj->pages = obj->cookie;
}
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
......@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)obj->dma_addr, obj->size);
if (!is_drm_iommu_supported(dev)) {
dma_free_attrs(dev->dev, obj->size, obj->cookie,
(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
drm_free_large(obj->pages);
} else
dma_free_attrs(dev->dev, obj->size, obj->pages,
(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
obj->dma_addr = (dma_addr_t)NULL;
if (!is_drm_iommu_supported(dev))
drm_free_large(obj->pages);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
......@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
* once dmabuf's refcount becomes 0.
*/
if (obj->import_attach)
goto out;
drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
else
exynos_drm_free_buf(exynos_gem_obj);
out:
drm_gem_free_mmap_offset(obj);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
kfree(exynos_gem_obj);
exynos_gem_obj = NULL;
}
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
......@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
return exynos_gem_obj->size;
}
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
......@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
ret = drm_gem_create_mmap_offset(obj);
if (ret < 0) {
drm_gem_object_release(obj);
kfree(exynos_gem_obj);
return ERR_PTR(ret);
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
return exynos_gem_obj;
......@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj);
}
int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
struct vm_area_struct *vma)
{
struct drm_device *drm_dev = exynos_gem_obj->base.dev;
......@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{ struct exynos_drm_gem_obj *exynos_gem_obj;
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_exynos_gem_info *args = data;
struct drm_gem_object *obj;
......@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
unsigned int flags;
int ret;
/*
......@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
if (is_drm_iommu_supported(dev)) {
exynos_gem_obj = exynos_drm_gem_create(dev,
EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
args->size);
} else {
exynos_gem_obj = exynos_drm_gem_create(dev,
EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
args->size);
}
if (is_drm_iommu_supported(dev))
flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
else
flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
if (IS_ERR(exynos_gem_obj)) {
dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem_obj);
......@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
*offset = drm_vma_node_offset_addr(&obj->vma_node);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
drm_gem_object_unreference(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
......@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
err_close_vm:
drm_gem_vm_close(vma);
drm_gem_free_mmap_offset(obj);
return ret;
}
......@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
if (ret < 0)
goto err_free_large;
exynos_gem_obj->sgt = sgt;
if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
......
......@@ -39,6 +39,7 @@
* - this address could be physical address without IOMMU and
* device address with IOMMU.
* @pages: Array of backing pages.
* @sgt: Imported sg_table.
*
* P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
......@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj {
dma_addr_t dma_addr;
struct dma_attrs dma_attrs;
struct page **pages;
struct sg_table *sgt;
};
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
......@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
/* create a private gem object and initialize it. */
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size);
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
......
......@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM
static int rotator_clk_crtl(struct rot_context *rot, bool enable)
{
if (enable) {
......@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev)
}
#endif
#ifdef CONFIG_PM
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
......
......@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
return connector;
}
static void intel_dp_register_mst_connector(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
drm_modeset_lock_all(dev);
intel_connector_add_to_fbdev(intel_connector);
drm_modeset_unlock_all(dev);
drm_connector_register(&intel_connector->base);
return connector;
}
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
......@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
static struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
.register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
};
......
......@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_enable(dev);
drm_kms_helper_poll_enable_locked(dev);
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
......
......@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
read_pointer = ring->next_context_status_buffer;
write_pointer = status_pointer & 0x07;
write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
if (read_pointer > write_pointer)
write_pointer += 6;
write_pointer += GEN8_CSB_ENTRIES;
spin_lock(&ring->execlist_lock);
while (read_pointer < write_pointer) {
read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8);
(read_pointer % GEN8_CSB_ENTRIES) * 8);
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8 + 4);
(read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue;
......@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
spin_unlock(&ring->execlist_lock);
WARN(submit_contexts > 2, "More than two context complete events?\n");
ring->next_context_status_buffer = write_pointer % 6;
ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
_MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
_MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
((u32)ring->next_context_status_buffer &
GEN8_CSB_PTR_MASK) << 8));
}
static int execlists_context_queue(struct drm_i915_gem_request *request)
......@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u8 next_context_status_buffer_hw;
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
......@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring));
ring->next_context_status_buffer = 0;
/*
* Instead of resetting the Context Status Buffer (CSB) read pointer to
* zero, we need to read the write pointer from hardware and use its
* value because "this register is power context save restored".
* Effectively, these states have been observed:
*
* | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
* BDW | CSB regs not reset | CSB regs reset |
* CHT | CSB regs not reset | CSB regs not reset |
*/
next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
& GEN8_CSB_PTR_MASK);
/*
* When the CSB registers are reset (also after power-up / gpu reset),
* CSB write pointer is set to all 1's, which is not valid, use '5' in
* this special case, so the first element read is CSB[0].
*/
if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
ring->next_context_status_buffer = next_context_status_buffer_hw;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
......
......@@ -25,6 +25,8 @@
#define _INTEL_LRC_H_
#define GEN8_LR_CONTEXT_ALIGN 4096
#define GEN8_CSB_ENTRIES 6
#define GEN8_CSB_PTR_MASK 0x07
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
......
......@@ -246,6 +246,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
}
if (power_well->data == SKL_DISP_PW_1) {
if (!dev_priv->power_domains.initializing)
intel_prepare_ddi(dev);
gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
}
......
......@@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->hdisplay,
adjusted_mode->vdisplay);
if (qcrtc->index == 0)
if (bo->is_primary == false)
recreate_primary = true;
if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
......
......@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLON;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
}
break;
case DRM_MODE_DPMS_STANDBY:
......@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
if (ext_encoder)
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
......
......@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
{
struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector;
struct drm_connector *connector;
......@@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
return connector;
}
static void radeon_dp_register_mst_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
drm_modeset_lock_all(dev);
radeon_fb_add_connector(rdev, connector);
drm_modeset_unlock_all(dev);
drm_connector_register(connector);
return connector;
}
static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
......@@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector,
.register_connector = radeon_dp_register_mst_connector,
.destroy_connector = radeon_dp_destroy_mst_connector,
.hotplug = radeon_dp_mst_hotplug,
};
......
......@@ -48,40 +48,10 @@ struct radeon_fbdev {
struct radeon_device *rdev;
};
/**
* radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
*
* @info: fbdev info
*
* This function hides the cursor on all CRTCs used by fbdev.
*/
static int radeon_fb_helper_set_par(struct fb_info *info)
{
int ret;
ret = drm_fb_helper_set_par(info);
/* XXX: with universal plane support fbdev will automatically disable
* all non-primary planes (including the cursor)
*/
if (ret == 0) {
struct drm_fb_helper *fb_helper = info->par;
int i;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
}
}
return ret;
}
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = radeon_fb_helper_set_par,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
......
......@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) {
(void) vmw_cmdbuf_man_process(man);
ret = drm_mm_insert_node_generic(&man->mm, info->node,
info->page_size, 0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
}
spin_unlock_bh(&man->lock);
info->done = !ret;
......
......@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
#endif
......@@ -568,6 +568,10 @@
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
#define DP_MST_PHYSICAL_PORT_0 0
#define DP_MST_LOGICAL_PORT_0 8
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
......
......@@ -374,6 +374,7 @@ struct drm_dp_mst_topology_mgr;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
void (*register_connector)(struct drm_connector *connector);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment