Commit 47c0fd72 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'topic/drm-misc-2015-12-04' of git://anongit.freedesktop.org/drm-intel into drm-next

New -misc pull. Big thing is Thierry's atomic helpers for system suspend
resume, which I'd like to use in i915 too. Hence the pull.

* tag 'topic/drm-misc-2015-12-04' of git://anongit.freedesktop.org/drm-intel:
  drm: keep connector status change logging human readable
  drm/atomic-helper: Reject attempts at re-stealing encoders
  drm/atomic-helper: Implement subsystem-level suspend/resume
  drm: Implement drm_modeset_lock_all_ctx()
  drm/gma500: Add driver private mutex for the fault handler
  drm/gma500: Drop dev->struct_mutex from mmap offset function
  drm/gma500: Drop dev->struct_mutex from fbdev init/teardown code
  drm/gma500: Drop dev->struct_mutex from modeset code
  drm/gma500: Use correct unref in the gem bo create function
  drm/edid: Make the detailed timing CEA/HDMI mode fixup accept up to 5kHz clock difference
  drm/atomic_helper: Add drm_atomic_helper_disable_planes_on_crtc()
  drm: Serialise multiple event readers
  drm: Drop dev->event_lock spinlock around faulting copy_to_user()
parents 80d69009 4e15f2a1
......@@ -1188,12 +1188,7 @@ void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
retry:
drm_modeset_backoff(state->acquire_ctx);
ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
state->acquire_ctx);
if (ret)
goto retry;
ret = drm_modeset_lock_all_crtcs(state->dev,
state->acquire_ctx);
ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
if (ret)
goto retry;
}
......
......@@ -80,6 +80,27 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
static bool
check_pending_encoder_assignment(struct drm_atomic_state *state,
struct drm_encoder *new_encoder,
struct drm_connector *new_connector)
{
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i;
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->best_encoder != new_encoder)
continue;
/* encoder already assigned and we're trying to re-steal it! */
if (connector->state->best_encoder != conn_state->best_encoder)
return false;
}
return true;
}
static struct drm_crtc *
get_current_crtc_for_encoder(struct drm_device *dev,
struct drm_encoder *encoder)
......@@ -229,6 +250,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
return 0;
}
if (!check_pending_encoder_assignment(state, new_encoder, connector)) {
DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
connector->base.id,
connector->name);
return -EINVAL;
}
encoder_crtc = get_current_crtc_for_encoder(state->dev,
new_encoder);
......@@ -1341,6 +1369,49 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
/**
* drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
* @crtc: CRTC
* @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
*
* Disables all planes associated with the given CRTC. This can be
* used for instance in the CRTC helper disable callback to disable
* all planes before shutting down the display pipeline.
*
* If the atomic-parameter is set the function calls the CRTC's
* atomic_begin hook before and atomic_flush hook after disabling the
* planes.
*
* It is a bug to call this function without having implemented the
* ->atomic_disable() plane hook.
*/
void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
bool atomic)
{
const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
struct drm_plane *plane;
if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
crtc_funcs->atomic_begin(crtc, NULL);
drm_for_each_plane(plane, crtc->dev) {
const struct drm_plane_helper_funcs *plane_funcs =
plane->helper_private;
if (plane->state->crtc != crtc || !plane_funcs)
continue;
WARN_ON(!plane_funcs->atomic_disable);
if (plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, NULL);
}
if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
crtc_funcs->atomic_flush(crtc, NULL);
}
EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
/**
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
* @dev: DRM device
......@@ -1817,6 +1888,161 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
return 0;
}
/**
* drm_atomic_helper_disable_all - disable all currently active outputs
* @dev: DRM device
* @ctx: lock acquisition context
*
* Loops through all connectors, finding those that aren't turned off and then
* turns them off by setting their DPMS mode to OFF and deactivating the CRTC
* that they are connected to.
*
* This is used for example in suspend/resume to disable all currently active
* functions when suspending.
*
* Note that if callers haven't already acquired all modeset locks this might
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
*
* Returns:
* 0 on success or a negative error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector *conn;
int err;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
drm_for_each_connector(conn, dev) {
struct drm_crtc *crtc = conn->state->crtc;
struct drm_crtc_state *crtc_state;
if (!crtc || conn->dpms != DRM_MODE_DPMS_ON)
continue;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto free;
}
crtc_state->active = false;
}
err = drm_atomic_commit(state);
free:
if (err < 0)
drm_atomic_state_free(state);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
/**
* drm_atomic_helper_suspend - subsystem-level suspend helper
* @dev: DRM device
*
* Duplicates the current atomic state, disables all active outputs and then
* returns a pointer to the original atomic state to the caller. Drivers can
* pass this pointer to the drm_atomic_helper_resume() helper upon resume to
* restore the output configuration that was active at the time the system
* entered suspend.
*
* Note that it is potentially unsafe to use this. The atomic state object
* returned by this function is assumed to be persistent. Drivers must ensure
* that this holds true. Before calling this function, drivers must make sure
* to suspend fbdev emulation so that nothing can be using the device.
*
* Returns:
* A pointer to a copy of the state before suspend on success or an ERR_PTR()-
* encoded error code on failure. Drivers should store the returned atomic
* state object and pass it to the drm_atomic_helper_resume() helper upon
* resume.
*
* See also:
* drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
* drm_atomic_helper_resume()
*/
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
int err;
drm_modeset_acquire_init(&ctx, 0);
retry:
err = drm_modeset_lock_all_ctx(dev, &ctx);
if (err < 0) {
state = ERR_PTR(err);
goto unlock;
}
state = drm_atomic_helper_duplicate_state(dev, &ctx);
if (IS_ERR(state))
goto unlock;
err = drm_atomic_helper_disable_all(dev, &ctx);
if (err < 0) {
drm_atomic_state_free(state);
state = ERR_PTR(err);
goto unlock;
}
unlock:
if (PTR_ERR(state) == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_suspend);
/**
* drm_atomic_helper_resume - subsystem-level resume helper
* @dev: DRM device
* @state: atomic state to resume to
*
* Calls drm_mode_config_reset() to synchronize hardware and software states,
* grabs all modeset locks and commits the atomic state object. This can be
* used in conjunction with the drm_atomic_helper_suspend() helper to
* implement suspend/resume for drivers that support atomic mode-setting.
*
* Returns:
* 0 on success or a negative error code on failure.
*
* See also:
* drm_atomic_helper_suspend()
*/
int drm_atomic_helper_resume(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_mode_config *config = &dev->mode_config;
int err;
drm_mode_config_reset(dev);
drm_modeset_lock_all(dev);
state->acquire_ctx = config->acquire_ctx;
err = drm_atomic_commit(state);
drm_modeset_unlock_all(dev);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_resume);
/**
* drm_atomic_helper_crtc_set_property - helper for crtc properties
* @crtc: DRM crtc
......@@ -2429,7 +2655,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
* @ctx: lock acquisition context
*
* Makes a copy of the current atomic state by looping over all objects and
* duplicating their respective states.
* duplicating their respective states. This is used for example by suspend/
* resume support code to save the state prior to suspend such that it can
* be restored upon resume.
*
* Note that this treats atomic state as persistent between save and restore.
* Drivers must make sure that this is possible and won't result in confusion
......@@ -2441,6 +2669,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
* Returns:
* A pointer to the copy of the atomic state object on success or an
* ERR_PTR()-encoded error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
......
......@@ -855,6 +855,12 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
* due to slight differences in allocating shared resources when the
* configuration is restored in a different order than when userspace set it up)
* need to use their own restore logic.
*
* This function is deprecated. New drivers should implement atomic mode-
* setting and use the atomic suspend/resume helpers.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
void drm_helper_resume_force_mode(struct drm_device *dev)
{
......
......@@ -2545,6 +2545,33 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
return clock;
}
static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
u8 mode;
if (!to_match->clock)
return 0;
for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
if (drm_mode_equal_no_clocks(to_match, cea_mode))
return mode + 1;
}
return 0;
}
/**
* drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode
......@@ -2609,6 +2636,33 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
return cea_mode_alternate_clock(hdmi_mode);
}
static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
u8 mode;
if (!to_match->clock)
return 0;
for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
unsigned int clock1, clock2;
/* Make sure to also match alternate clocks */
clock1 = hdmi_mode->clock;
clock2 = hdmi_mode_alternate_clock(hdmi_mode);
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
if (drm_mode_equal_no_clocks(to_match, hdmi_mode))
return mode + 1;
}
return 0;
}
/*
* drm_match_hdmi_mode - look for a HDMI mode matching given mode
* @to_match: display mode
......@@ -3119,14 +3173,18 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
u8 mode_idx;
const char *type;
mode_idx = drm_match_cea_mode(mode) - 1;
/*
* allow 5kHz clock difference either way to account for
* the 10kHz clock resolution limit of detailed timings.
*/
mode_idx = drm_match_cea_mode_clock_tolerance(mode, 5) - 1;
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
type = "CEA";
cea_mode = &edid_cea_modes[mode_idx];
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
mode_idx = drm_match_hdmi_mode(mode) - 1;
mode_idx = drm_match_hdmi_mode_clock_tolerance(mode, 5) - 1;
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
type = "HDMI";
cea_mode = &edid_4k_modes[mode_idx];
......
......@@ -172,6 +172,8 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
mutex_init(&priv->event_read_lock);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_open(dev, priv);
......@@ -483,14 +485,28 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
ssize_t ret = 0;
ssize_t ret;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
spin_lock_irq(&dev->event_lock);
ret = mutex_lock_interruptible(&file_priv->event_read_lock);
if (ret)
return ret;
for (;;) {
if (list_empty(&file_priv->event_list)) {
struct drm_pending_event *e = NULL;
spin_lock_irq(&dev->event_lock);
if (!list_empty(&file_priv->event_list)) {
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
file_priv->event_space += e->event->length;
list_del(&e->link);
}
spin_unlock_irq(&dev->event_lock);
if (e == NULL) {
if (ret)
break;
......@@ -499,36 +515,36 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
break;
}
spin_unlock_irq(&dev->event_lock);
mutex_unlock(&file_priv->event_read_lock);
ret = wait_event_interruptible(file_priv->event_wait,
!list_empty(&file_priv->event_list));
spin_lock_irq(&dev->event_lock);
if (ret < 0)
break;
ret = 0;
if (ret >= 0)
ret = mutex_lock_interruptible(&file_priv->event_read_lock);
if (ret)
return ret;
} else {
struct drm_pending_event *e;
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
if (e->event->length + ret > count)
unsigned length = e->event->length;
if (length > count - ret) {
put_back_event:
spin_lock_irq(&dev->event_lock);
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
break;
}
if (__copy_to_user_inatomic(buffer + ret,
e->event, e->event->length)) {
if (copy_to_user(buffer + ret, e->event, length)) {
if (ret == 0)
ret = -EFAULT;
break;
goto put_back_event;
}
file_priv->event_space += e->event->length;
ret += e->event->length;
list_del(&e->link);
ret += length;
e->destroy(e);
}
}
spin_unlock_irq(&dev->event_lock);
mutex_unlock(&file_priv->event_read_lock);
return ret;
}
......
......@@ -917,13 +917,30 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
} else if (mode1->clock != mode2->clock)
return false;
return drm_mode_equal_no_clocks(mode1, mode2);
}
EXPORT_SYMBOL(drm_mode_equal);
/**
* drm_mode_equal_no_clocks - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks.
*
* Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
(mode2->flags & DRM_MODE_FLAG_3D_MASK))
return false;
return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
}
EXPORT_SYMBOL(drm_mode_equal);
EXPORT_SYMBOL(drm_mode_equal_no_clocks);
/**
* drm_mode_equal_no_clocks_no_stereo - test modes for equality
......
......@@ -57,11 +57,18 @@
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
* @dev: DRM device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped with
* drm_modeset_unlock_all.
* scheme isn't (yet) implemented. Locks must be dropped by calling the
* drm_modeset_unlock_all() function.
*
* This function is deprecated. It allocates a lock acquisition context and
* stores it in the DRM device's ->mode_config. This facilitate conversion of
* existing code because it removes the need to manually deal with the
* acquisition context, but it is also brittle because the context is global
* and care must be taken not to nest calls. New code should use the
* drm_modeset_lock_all_ctx() function and pass in the context explicitly.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
......@@ -78,39 +85,43 @@ void drm_modeset_lock_all(struct drm_device *dev)
drm_modeset_acquire_init(ctx, 0);
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_crtcs(dev, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_ctx(dev, ctx);
if (ret < 0) {
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
drm_modeset_acquire_fini(ctx);
kfree(ctx);
return;
}
WARN_ON(config->acquire_ctx);
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_all():
/*
* We hold the locks now, so it is safe to stash the acquisition
* context for drm_modeset_unlock_all().
*/
config->acquire_ctx = ctx;
drm_warn_on_modeset_not_all_locked(dev);
return;
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
kfree(ctx);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
* @dev: DRM device
*
* This function drop all modeset locks taken by drm_modeset_lock_all.
* This function drops all modeset locks taken by a previous call to the
* drm_modeset_lock_all() function.
*
* This function is deprecated. It uses the lock acquisition context stored
* in the DRM device's ->mode_config. This facilitates conversion of existing
* code because it removes the need to manually deal with the acquisition
* context, but it is also brittle because the context is global and care must
* be taken not to nest calls. New code should pass the acquisition context
* directly to the drm_modeset_drop_locks() function.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
......@@ -431,14 +442,34 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock)
}
EXPORT_SYMBOL(drm_modeset_unlock);
/* In some legacy codepaths it's convenient to just grab all the crtc and plane
* related locks. */
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
/**
* drm_modeset_lock_all_ctx - take all modeset locks
* @dev: DRM device
* @ctx: lock acquisition context
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented.
*
* Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex
* since that lock isn't required for modeset state changes. Callers which
* need to grab that lock too need to do so outside of the acquire context
* @ctx.
*
* Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks() function on @ctx.
*
* Returns: 0 on success or a negative error-code on failure.
*/
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
int ret = 0;
int ret;
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
if (ret)
return ret;
drm_for_each_crtc(crtc, dev) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
......@@ -454,4 +485,4 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev,
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
......@@ -168,10 +168,11 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
* check here, and if anything changed start the hotplug code.
*/
if (old_status != connector->status) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
old_status, connector->status);
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
/*
* The hotplug event code might call into the fb
......
......@@ -406,8 +406,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
memset(dev_priv->vram_addr + backing->offset, 0, size);
mutex_lock(&dev->struct_mutex);
info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
......@@ -463,17 +461,15 @@ static int psbfb_create(struct psb_fbdev *fbdev,
dev_dbg(dev->dev, "allocated %dx%d fb\n",
psbfb->base.width, psbfb->base.height);
mutex_unlock(&dev->struct_mutex);
return 0;
out_unref:
if (backing->stolen)
psb_gtt_free_range(dev, backing);
else
drm_gem_object_unreference(&backing->gem);
drm_gem_object_unreference_unlocked(&backing->gem);
drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
out_err1:
mutex_unlock(&dev->struct_mutex);
psb_gtt_free_range(dev, backing);
return ret;
}
......@@ -569,7 +565,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
drm_framebuffer_cleanup(&psbfb->base);
if (psbfb->gtt)
drm_gem_object_unreference(&psbfb->gtt->gem);
drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
return 0;
}
......@@ -784,12 +780,8 @@ void psb_modeset_cleanup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
if (dev_priv->modeset) {
mutex_lock(&dev->struct_mutex);
drm_kms_helper_poll_fini(dev);
psb_fbdev_fini(dev);
drm_mode_config_cleanup(dev);
mutex_unlock(&dev->struct_mutex);
}
}
......@@ -62,15 +62,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
int ret = 0;
struct drm_gem_object *obj;
mutex_lock(&dev->struct_mutex);
/* GEM does all our handle to object mapping */
obj = drm_gem_object_lookup(dev, file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
/* What validation is needed here ? */
if (obj == NULL)
return -ENOENT;
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
......@@ -78,9 +73,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
goto out;
*offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
drm_gem_object_unreference(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
......@@ -130,7 +123,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
drm_gem_object_unreference(&r->gem);
drm_gem_object_unreference_unlocked(&r->gem);
*handlep = handle;
return 0;
}
......@@ -189,7 +182,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Make sure we don't parallel update on a fault, nor move or remove
something from beneath our feet */
mutex_lock(&dev->struct_mutex);
mutex_lock(&dev_priv->mmap_mutex);
/* For now the mmap pins the object and it stays pinned. As things
stand that will do us no harm */
......@@ -215,7 +208,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
fail:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->mmap_mutex);
switch (ret) {
case 0:
case -ERESTARTSYS:
......
......@@ -349,8 +349,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
/* If we didn't get a handle then turn the cursor off */
if (!handle) {
temp = CURSOR_MODE_DISABLE;
mutex_lock(&dev->struct_mutex);
if (gma_power_begin(dev, false)) {
REG_WRITE(control, temp);
REG_WRITE(base, 0);
......@@ -362,11 +360,9 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
gt = container_of(gma_crtc->cursor_obj,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(gma_crtc->cursor_obj);
drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
gma_crtc->cursor_obj = NULL;
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
......@@ -376,7 +372,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj) {
ret = -ENOENT;
......@@ -441,17 +436,15 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
if (gma_crtc->cursor_obj) {
gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(gma_crtc->cursor_obj);
drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
}
gma_crtc->cursor_obj = obj;
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
unref_cursor:
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
......
......@@ -425,6 +425,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
if (!resume) {
mutex_init(&dev_priv->gtt_mutex);
mutex_init(&dev_priv->mmap_mutex);
psb_gtt_alloc(dev);
}
......
......@@ -465,6 +465,8 @@ struct drm_psb_private {
struct mutex gtt_mutex;
struct resource *gtt_mem; /* Our PCI resource */
struct mutex mmap_mutex;
struct psb_mmu_driver *mmu;
struct psb_mmu_pd *pf_pd;
......
......@@ -344,6 +344,8 @@ struct drm_file {
struct list_head event_list;
int event_space;
struct mutex event_read_lock;
struct drm_prime_file_private prime;
};
......
......@@ -62,6 +62,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state);
void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
bool atomic);
void drm_atomic_helper_swap_state(struct drm_device *dev,
struct drm_atomic_state *state);
......@@ -81,6 +83,12 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set);
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_atomic_state *state);
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
int drm_atomic_helper_resume(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val);
......
......@@ -222,6 +222,8 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
bool drm_mode_equal(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
......
......@@ -138,7 +138,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
struct drm_modeset_acquire_ctx *
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
#endif /* DRM_MODESET_LOCK_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment