Commit 2b8f01f1 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'topic/drm-misc-2016-04-29' of git://anongit.freedesktop.org/drm-intel into drm-next

- prep work for struct_mutex-less gem_free_object
- more invasive/tricky mst fixes from Lyude for broken hw. I discussed
  this with Ville/Jani and we all agreed more soaking in -next would be
  real good this late in the -rc cycle. They're cc: stable too to make
  sure they're not getting lost. Feel free to cherry-pick those four if
  you disagree.
- few small things all over

* tag 'topic/drm-misc-2016-04-29' of git://anongit.freedesktop.org/drm-intel:
  drm/atomic: Add missing drm_crtc_internal.h include
  drm/dp: Allow signals to interrupt drm_aux-dev reads/writes
  drm: Quiet down drm_mode_getresources
  drm: Quiet down drm_mode_getconnector
  drm: Protect dev->filelist with its own mutex
  drm: Make drm_vm_open/close_locked private to drm_vm.c
  drm: Hide master MAP cleanup in drm_bufs.c
  drm: Forbid legacy MAP functions for DRIVER_MODESET
  drm: Push struct_mutex into ->master_destroy
  drm: Move drm_getmap into drm_bufs.c and give it a legacy prefix
  drm: Put legacy lastclose work into drm_legacy_dev_reinit
  drm: Give drm_agp_clear drm_legacy_ prefix
  drm/sysfs: Annote lockless show functions with READ_ONCE
  MAINTAINERS: Update the files list for the GMA500 DRM driver
  drm: rcar-du: Fix compilation warning
  drm/i915: Get rid of intel_dp_dpcd_read_wake()
  drm/dp_helper: Perform throw-away read before actual read in drm_dp_dpcd_read()
  drm/dp_helper: Retry aux transactions on all errors
  drm/dp_helper: Always wait before retrying native aux transactions
parents fffb6751 be35f94f
...@@ -3851,8 +3851,7 @@ M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com> ...@@ -3851,8 +3851,7 @@ M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
T: git git://github.com/patjak/drm-gma500 T: git git://github.com/patjak/drm-gma500
S: Maintained S: Maintained
F: drivers/gpu/drm/gma500 F: drivers/gpu/drm/gma500/
F: include/drm/gma500*
DRM DRIVERS FOR NVIDIA TEGRA DRM DRIVERS FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com> M: Thierry Reding <thierry.reding@gmail.com>
......
...@@ -93,7 +93,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) ...@@ -93,7 +93,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev; struct drm_device *ddev = adev->ddev;
struct drm_file *file; struct drm_file *file;
mutex_lock(&ddev->struct_mutex); mutex_lock(&ddev->filelist_mutex);
list_for_each_entry(file, &ddev->filelist, lhead) { list_for_each_entry(file, &ddev->filelist, lhead) {
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
...@@ -103,13 +103,13 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) ...@@ -103,13 +103,13 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
spin_lock(&file->table_lock); spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) { idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n"); WARN_ONCE(1, "And also active allocations!\n");
drm_gem_object_unreference(gobj); drm_gem_object_unreference_unlocked(gobj);
} }
idr_destroy(&file->object_idr); idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock); spin_unlock(&file->table_lock);
} }
mutex_unlock(&ddev->struct_mutex); mutex_unlock(&ddev->filelist_mutex);
} }
/* /*
...@@ -769,7 +769,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) ...@@ -769,7 +769,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
struct drm_file *file; struct drm_file *file;
int r; int r;
r = mutex_lock_interruptible(&dev->struct_mutex); r = mutex_lock_interruptible(&dev->filelist_mutex);
if (r) if (r)
return r; return r;
...@@ -793,7 +793,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) ...@@ -793,7 +793,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
spin_unlock(&file->table_lock); spin_unlock(&file->table_lock);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->filelist_mutex);
return 0; return 0;
} }
......
...@@ -423,7 +423,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) ...@@ -423,7 +423,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
} }
/** /**
* drm_agp_clear - Clear AGP resource list * drm_legacy_agp_clear - Clear AGP resource list
* @dev: DRM device * @dev: DRM device
* *
* Iterate over all AGP resources and remove them. But keep the AGP head * Iterate over all AGP resources and remove them. But keep the AGP head
...@@ -434,7 +434,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) ...@@ -434,7 +434,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
* resources from getting destroyed. Drivers are responsible of cleaning them up * resources from getting destroyed. Drivers are responsible of cleaning them up
* during device shutdown. * during device shutdown.
*/ */
void drm_agp_clear(struct drm_device *dev) void drm_legacy_agp_clear(struct drm_device *dev)
{ {
struct drm_agp_mem *entry, *tempe; struct drm_agp_mem *entry, *tempe;
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#include <drm/drm_mode.h> #include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h> #include <drm/drm_plane_helper.h>
#include "drm_crtc_internal.h"
/** /**
* drm_atomic_state_default_release - * drm_atomic_state_default_release -
* release memory initialized by drm_atomic_state_init * release memory initialized by drm_atomic_state_init
......
...@@ -396,6 +396,10 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, ...@@ -396,6 +396,10 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM; return -EPERM;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
err = drm_addmap_core(dev, map->offset, map->size, map->type, err = drm_addmap_core(dev, map->offset, map->size, map->type,
map->flags, &maplist); map->flags, &maplist);
...@@ -416,6 +420,62 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, ...@@ -416,6 +420,62 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
/*
* Get a mapping information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_map structure.
*
* \return zero on success or a negative number on failure.
*
* Searches for the mapping with the specified offset and copies its information
* into userspace
*/
int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *map = data;
struct drm_map_list *r_list = NULL;
struct list_head *list;
int idx;
int i;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
idx = map->offset;
if (idx < 0)
return -EINVAL;
i = 0;
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, struct drm_map_list, head);
break;
}
i++;
}
if (!r_list || !r_list->map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
map->offset = r_list->map->offset;
map->size = r_list->map->size;
map->type = r_list->map->type;
map->flags = r_list->map->flags;
map->handle = (void *)(unsigned long) r_list->user_token;
map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
mutex_unlock(&dev->struct_mutex);
return 0;
}
/** /**
* Remove a map private from list and deallocate resources if the mapping * Remove a map private from list and deallocate resources if the mapping
* isn't in use. * isn't in use.
...@@ -482,18 +542,35 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) ...@@ -482,18 +542,35 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
} }
EXPORT_SYMBOL(drm_legacy_rmmap_locked); EXPORT_SYMBOL(drm_legacy_rmmap_locked);
int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
{ {
int ret; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = drm_legacy_rmmap_locked(dev, map); drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret;
} }
EXPORT_SYMBOL(drm_legacy_rmmap); EXPORT_SYMBOL(drm_legacy_rmmap);
void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
{
struct drm_map_list *r_list, *list_temp;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->struct_mutex);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
if (r_list->master == master) {
drm_legacy_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
mutex_unlock(&dev->struct_mutex);
}
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things * the last close of the device, and this is necessary for cleanup when things
* exit uncleanly. Therefore, having userland manually remove mappings seems * exit uncleanly. Therefore, having userland manually remove mappings seems
...@@ -517,6 +594,10 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, ...@@ -517,6 +594,10 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_map_list *r_list; struct drm_map_list *r_list;
int ret; int ret;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) { list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map && if (r_list->map &&
......
...@@ -1936,8 +1936,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data, ...@@ -1936,8 +1936,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
copied = 0; copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
drm_for_each_crtc(crtc, dev) { drm_for_each_crtc(crtc, dev) {
DRM_DEBUG_KMS("[CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (put_user(crtc->base.id, crtc_id + copied)) { if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
...@@ -1952,8 +1950,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data, ...@@ -1952,8 +1950,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
copied = 0; copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
drm_for_each_encoder(encoder, dev) { drm_for_each_encoder(encoder, dev) {
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
encoder->name);
if (put_user(encoder->base.id, encoder_id + if (put_user(encoder->base.id, encoder_id +
copied)) { copied)) {
ret = -EFAULT; ret = -EFAULT;
...@@ -1969,9 +1965,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data, ...@@ -1969,9 +1965,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
copied = 0; copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
drm_for_each_connector(connector, dev) { drm_for_each_connector(connector, dev) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
if (put_user(connector->base.id, if (put_user(connector->base.id,
connector_id + copied)) { connector_id + copied)) {
ret = -EFAULT; ret = -EFAULT;
...@@ -1982,9 +1975,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data, ...@@ -1982,9 +1975,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
} }
card_res->count_connectors = connector_count; card_res->count_connectors = connector_count;
DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
card_res->count_connectors, card_res->count_encoders);
out: out:
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
return ret; return ret;
...@@ -2143,8 +2133,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, ...@@ -2143,8 +2133,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
connector = drm_connector_find(dev, out_resp->connector_id); connector = drm_connector_find(dev, out_resp->connector_id);
......
...@@ -159,6 +159,12 @@ static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count, ...@@ -159,6 +159,12 @@ static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count,
uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
if (signal_pending(current)) {
res = num_bytes_processed ?
num_bytes_processed : -ERESTARTSYS;
goto out;
}
res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo); res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo);
if (res <= 0) { if (res <= 0) {
res = num_bytes_processed ? num_bytes_processed : res; res = num_bytes_processed ? num_bytes_processed : res;
...@@ -202,6 +208,12 @@ static ssize_t auxdev_write(struct file *file, const char __user *buf, ...@@ -202,6 +208,12 @@ static ssize_t auxdev_write(struct file *file, const char __user *buf,
uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
if (signal_pending(current)) {
res = num_bytes_processed ?
num_bytes_processed : -ERESTARTSYS;
goto out;
}
if (__copy_from_user(localbuf, if (__copy_from_user(localbuf,
buf + num_bytes_processed, todo)) { buf + num_bytes_processed, todo)) {
res = num_bytes_processed ? res = num_bytes_processed ?
......
...@@ -178,8 +178,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, ...@@ -178,8 +178,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
unsigned int offset, void *buffer, size_t size) unsigned int offset, void *buffer, size_t size)
{ {
struct drm_dp_aux_msg msg; struct drm_dp_aux_msg msg;
unsigned int retry; unsigned int retry, native_reply;
int err = 0; int err = 0, ret = 0;
memset(&msg, 0, sizeof(msg)); memset(&msg, 0, sizeof(msg));
msg.address = offset; msg.address = offset;
...@@ -196,38 +196,39 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, ...@@ -196,38 +196,39 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
* sufficient, bump to 32 which makes Dell 4k monitors happier. * sufficient, bump to 32 which makes Dell 4k monitors happier.
*/ */
for (retry = 0; retry < 32; retry++) { for (retry = 0; retry < 32; retry++) {
if (ret != 0 && ret != -ETIMEDOUT) {
err = aux->transfer(aux, &msg); usleep_range(AUX_RETRY_INTERVAL,
if (err < 0) { AUX_RETRY_INTERVAL + 100);
if (err == -EBUSY)
continue;
goto unlock;
} }
ret = aux->transfer(aux, &msg);
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { if (ret > 0) {
case DP_AUX_NATIVE_REPLY_ACK: native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
if (err < size) if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
err = -EPROTO; if (ret == size)
goto unlock; goto unlock;
case DP_AUX_NATIVE_REPLY_NACK: ret = -EPROTO;
err = -EIO; } else
goto unlock; ret = -EIO;
case DP_AUX_NATIVE_REPLY_DEFER:
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
break;
} }
/*
* We want the error we return to be the error we received on
* the first transaction, since we may get a different error the
* next time we retry
*/
if (!err)
err = ret;
} }
DRM_DEBUG_KMS("too many retries, giving up\n"); DRM_DEBUG_KMS("too many retries, giving up\n");
err = -EIO; ret = err;
unlock: unlock:
mutex_unlock(&aux->hw_mutex); mutex_unlock(&aux->hw_mutex);
return err; return ret;
} }
/** /**
...@@ -247,6 +248,25 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, ...@@ -247,6 +248,25 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size) void *buffer, size_t size)
{ {
int ret;
/*
* HP ZR24w corrupts the first DPCD access after entering power save
* mode. Eg. on a read, the entire buffer will be filled with the same
* byte. Do a throw away read to avoid corrupting anything we care
* about. Afterwards things will work correctly until the monitor
* gets woken up and subsequently re-enters power save mode.
*
* The user pressing any button on the monitor is enough to wake it
* up, so there is no particularly good place to do the workaround.
* We just have to do it before any DPCD access and hope that the
* monitor doesn't power down exactly after the throw away read.
*/
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
1);
if (ret != 1)
return ret;
return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer, return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
size); size);
} }
......
...@@ -121,19 +121,11 @@ static void drm_master_destroy(struct kref *kref) ...@@ -121,19 +121,11 @@ static void drm_master_destroy(struct kref *kref)
{ {
struct drm_master *master = container_of(kref, struct drm_master, refcount); struct drm_master *master = container_of(kref, struct drm_master, refcount);
struct drm_device *dev = master->minor->dev; struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp;
mutex_lock(&dev->struct_mutex);
if (dev->driver->master_destroy) if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master); dev->driver->master_destroy(dev, master);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { drm_legacy_master_rmmaps(dev, master);
if (r_list->master == master) {
drm_legacy_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
mutex_unlock(&dev->struct_mutex);
idr_destroy(&master->magic_map); idr_destroy(&master->magic_map);
kfree(master->unique); kfree(master->unique);
...@@ -598,6 +590,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, ...@@ -598,6 +590,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
spin_lock_init(&dev->buf_lock); spin_lock_init(&dev->buf_lock);
spin_lock_init(&dev->event_lock); spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex); mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->ctxlist_mutex); mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->master_mutex); mutex_init(&dev->master_mutex);
......
...@@ -297,9 +297,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) ...@@ -297,9 +297,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
} }
mutex_unlock(&dev->master_mutex); mutex_unlock(&dev->master_mutex);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->filelist_mutex);
list_add(&priv->lhead, &dev->filelist); list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->filelist_mutex);
#ifdef __alpha__ #ifdef __alpha__
/* /*
...@@ -381,14 +381,26 @@ static void drm_events_release(struct drm_file *file_priv) ...@@ -381,14 +381,26 @@ static void drm_events_release(struct drm_file *file_priv)
*/ */
static void drm_legacy_dev_reinit(struct drm_device *dev) static void drm_legacy_dev_reinit(struct drm_device *dev)
{ {
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (dev->irq_enabled)
return; drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
drm_legacy_agp_clear(dev);
drm_legacy_sg_cleanup(dev);
drm_legacy_vma_flush(dev);
drm_legacy_dma_takedown(dev);
mutex_unlock(&dev->struct_mutex);
dev->sigdata.lock = NULL; dev->sigdata.lock = NULL;
dev->context_flag = 0; dev->context_flag = 0;
dev->last_context = 0; dev->last_context = 0;
dev->if_version = 0; dev->if_version = 0;
DRM_DEBUG("lastclose completed\n");
} }
/* /*
...@@ -400,7 +412,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev) ...@@ -400,7 +412,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
* *
* \sa drm_device * \sa drm_device
*/ */
int drm_lastclose(struct drm_device * dev) void drm_lastclose(struct drm_device * dev)
{ {
DRM_DEBUG("\n"); DRM_DEBUG("\n");
...@@ -408,23 +420,8 @@ int drm_lastclose(struct drm_device * dev) ...@@ -408,23 +420,8 @@ int drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev); dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n"); DRM_DEBUG("driver lastclose completed\n");
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) if (!drm_core_check_feature(dev, DRIVER_MODESET))
drm_irq_uninstall(dev); drm_legacy_dev_reinit(dev);
mutex_lock(&dev->struct_mutex);
drm_agp_clear(dev);
drm_legacy_sg_cleanup(dev);
drm_legacy_vma_flush(dev);
drm_legacy_dma_takedown(dev);
mutex_unlock(&dev->struct_mutex);
drm_legacy_dev_reinit(dev);
DRM_DEBUG("lastclose completed\n");
return 0;
} }
/** /**
...@@ -445,14 +442,16 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -445,14 +442,16 @@ int drm_release(struct inode *inode, struct file *filp)
struct drm_file *file_priv = filp->private_data; struct drm_file *file_priv = filp->private_data;
struct drm_minor *minor = file_priv->minor; struct drm_minor *minor = file_priv->minor;
struct drm_device *dev = minor->dev; struct drm_device *dev = minor->dev;
int retcode = 0;
mutex_lock(&drm_global_mutex); mutex_lock(&drm_global_mutex);
DRM_DEBUG("open_count = %d\n", dev->open_count); DRM_DEBUG("open_count = %d\n", dev->open_count);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->filelist_mutex);
list_del(&file_priv->lhead); list_del(&file_priv->lhead);
mutex_unlock(&dev->filelist_mutex);
mutex_lock(&dev->struct_mutex);
if (file_priv->magic) if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic); idr_remove(&file_priv->master->magic_map, file_priv->magic);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -538,7 +537,7 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -538,7 +537,7 @@ int drm_release(struct inode *inode, struct file *filp)
*/ */
if (!--dev->open_count) { if (!--dev->open_count) {
retcode = drm_lastclose(dev); drm_lastclose(dev);
if (drm_device_is_unplugged(dev)) if (drm_device_is_unplugged(dev))
drm_put_dev(dev); drm_put_dev(dev);
} }
...@@ -546,7 +545,7 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -546,7 +545,7 @@ int drm_release(struct inode *inode, struct file *filp)
drm_minor_release(minor); drm_minor_release(minor);
return retcode; return 0;
} }
EXPORT_SYMBOL(drm_release); EXPORT_SYMBOL(drm_release);
......
...@@ -174,7 +174,7 @@ int drm_clients_info(struct seq_file *m, void *data) ...@@ -174,7 +174,7 @@ int drm_clients_info(struct seq_file *m, void *data)
/* dev->filelist is sorted youngest first, but we want to present /* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss. * oldest first (i.e. kernel, servers, clients), so walk backwardss.
*/ */
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(priv, &dev->filelist, lhead) { list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
struct task_struct *task; struct task_struct *task;
...@@ -190,7 +190,7 @@ int drm_clients_info(struct seq_file *m, void *data) ...@@ -190,7 +190,7 @@ int drm_clients_info(struct seq_file *m, void *data)
priv->magic); priv->magic);
rcu_read_unlock(); rcu_read_unlock();
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->filelist_mutex);
return 0; return 0;
} }
......
...@@ -26,7 +26,7 @@ extern unsigned int drm_timestamp_monotonic; ...@@ -26,7 +26,7 @@ extern unsigned int drm_timestamp_monotonic;
/* drm_fops.c */ /* drm_fops.c */
extern struct mutex drm_global_mutex; extern struct mutex drm_global_mutex;
int drm_lastclose(struct drm_device *dev); void drm_lastclose(struct drm_device *dev);
/* drm_pci.c */ /* drm_pci.c */
int drm_pci_set_unique(struct drm_device *dev, int drm_pci_set_unique(struct drm_device *dev,
...@@ -37,8 +37,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, ...@@ -37,8 +37,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
/* drm_vm.c */ /* drm_vm.c */
int drm_vma_info(struct seq_file *m, void *data); int drm_vma_info(struct seq_file *m, void *data);
void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
/* drm_prime.c */ /* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
......
...@@ -149,58 +149,6 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) ...@@ -149,58 +149,6 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
return 0; return 0;
} }
/*
* Get a mapping information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_map structure.
*
* \return zero on success or a negative number on failure.
*
* Searches for the mapping with the specified offset and copies its information
* into userspace
*/
static int drm_getmap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *map = data;
struct drm_map_list *r_list = NULL;
struct list_head *list;
int idx;
int i;
idx = map->offset;
if (idx < 0)
return -EINVAL;
i = 0;
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, struct drm_map_list, head);
break;
}
i++;
}
if (!r_list || !r_list->map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
map->offset = r_list->map->offset;
map->size = r_list->map->size;
map->type = r_list->map->type;
map->flags = r_list->map->flags;
map->handle = (void *)(unsigned long) r_list->user_token;
map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
mutex_unlock(&dev->struct_mutex);
return 0;
}
/* /*
* Get client information. * Get client information.
* *
...@@ -558,7 +506,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { ...@@ -558,7 +506,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
......
...@@ -63,6 +63,8 @@ int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f); ...@@ -63,6 +63,8 @@ int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
#define DRM_MAP_HASH_OFFSET 0x10000000 #define DRM_MAP_HASH_OFFSET 0x10000000
int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f); int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
......
...@@ -250,7 +250,7 @@ void drm_pci_agp_destroy(struct drm_device *dev) ...@@ -250,7 +250,7 @@ void drm_pci_agp_destroy(struct drm_device *dev)
{ {
if (dev->agp) { if (dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr); arch_phys_wc_del(dev->agp->agp_mtrr);
drm_agp_clear(dev); drm_legacy_agp_clear(dev);
kfree(dev->agp); kfree(dev->agp);
dev->agp = NULL; dev->agp = NULL;
} }
......
...@@ -208,9 +208,12 @@ static ssize_t status_show(struct device *device, ...@@ -208,9 +208,12 @@ static ssize_t status_show(struct device *device,
char *buf) char *buf)
{ {
struct drm_connector *connector = to_drm_connector(device); struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
status = READ_ONCE(connector->status);
return snprintf(buf, PAGE_SIZE, "%s\n", return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(connector->status)); drm_get_connector_status_name(status));
} }
static ssize_t dpms_show(struct device *device, static ssize_t dpms_show(struct device *device,
...@@ -231,9 +234,11 @@ static ssize_t enabled_show(struct device *device, ...@@ -231,9 +234,11 @@ static ssize_t enabled_show(struct device *device,
char *buf) char *buf)
{ {
struct drm_connector *connector = to_drm_connector(device); struct drm_connector *connector = to_drm_connector(device);
bool enabled;
enabled = READ_ONCE(connector->encoder);
return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" : return snprintf(buf, PAGE_SIZE, enabled ? "enabled\n" : "disabled\n");
"disabled");
} }
static ssize_t edid_show(struct file *filp, struct kobject *kobj, static ssize_t edid_show(struct file *filp, struct kobject *kobj,
......
...@@ -395,16 +395,8 @@ static const struct vm_operations_struct drm_vm_sg_ops = { ...@@ -395,16 +395,8 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close, .close = drm_vm_close,
}; };
/** static void drm_vm_open_locked(struct drm_device *dev,
* \c open method for shared virtual memory. struct vm_area_struct *vma)
*
* \param vma virtual memory area.
*
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
void drm_vm_open_locked(struct drm_device *dev,
struct vm_area_struct *vma)
{ {
struct drm_vma_entry *vma_entry; struct drm_vma_entry *vma_entry;
...@@ -429,8 +421,8 @@ static void drm_vm_open(struct vm_area_struct *vma) ...@@ -429,8 +421,8 @@ static void drm_vm_open(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
void drm_vm_close_locked(struct drm_device *dev, static void drm_vm_close_locked(struct drm_device *dev,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
struct drm_vma_entry *pt, *temp; struct drm_vma_entry *pt, *temp;
......
...@@ -528,6 +528,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -528,6 +528,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_putc(m, '\n'); seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv); print_batch_pool_stats(m, dev_priv);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) { list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats; struct file_stats stats;
struct task_struct *task; struct task_struct *task;
...@@ -548,8 +552,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -548,8 +552,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
print_file_stats(m, task ? task->comm : "<unknown>", stats); print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock(); rcu_read_unlock();
} }
mutex_unlock(&dev->filelist_mutex);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -2354,6 +2357,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) ...@@ -2354,6 +2357,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
else if (INTEL_INFO(dev)->gen >= 6) else if (INTEL_INFO(dev)->gen >= 6)
gen6_ppgtt_info(m, dev); gen6_ppgtt_info(m, dev);
mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) { list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task; struct task_struct *task;
...@@ -2368,6 +2372,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) ...@@ -2368,6 +2372,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
idr_for_each(&file_priv->context_idr, per_file_ctx, idr_for_each(&file_priv->context_idr, per_file_ctx,
(void *)(unsigned long)m); (void *)(unsigned long)m);
} }
mutex_unlock(&dev->filelist_mutex);
out_put: out_put:
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -2403,6 +2408,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) ...@@ -2403,6 +2408,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
mutex_lock(&dev->filelist_mutex);
spin_lock(&dev_priv->rps.client_lock); spin_lock(&dev_priv->rps.client_lock);
list_for_each_entry_reverse(file, &dev->filelist, lhead) { list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
...@@ -2425,6 +2432,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) ...@@ -2425,6 +2432,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active"); list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
spin_unlock(&dev_priv->rps.client_lock); spin_unlock(&dev_priv->rps.client_lock);
mutex_unlock(&dev->filelist_mutex);
return 0; return 0;
} }
......
...@@ -3073,37 +3073,6 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder) ...@@ -3073,37 +3073,6 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
chv_phy_powergate_lanes(encoder, false, 0x0); chv_phy_powergate_lanes(encoder, false, 0x0);
} }
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
*
* Sinks are *supposed* to come up within 1ms from an off state, but we're also
* supposed to retry 3 times per the spec.
*/
static ssize_t
intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
ssize_t ret;
int i;
/*
* Sometime we just get the same incorrect byte repeated
* over the entire buffer. Doing just one throw away read
* initially seems to "solve" it.
*/
drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
for (i = 0; i < 3; i++) {
ret = drm_dp_dpcd_read(aux, offset, buffer, size);
if (ret == size)
return ret;
msleep(1);
}
return ret;
}
/* /*
* Fetch AUX CH registers 0x202 - 0x207 which contain * Fetch AUX CH registers 0x202 - 0x207 which contain
* link status information * link status information
...@@ -3111,10 +3080,8 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset, ...@@ -3111,10 +3080,8 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
bool bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{ {
return intel_dp_dpcd_read_wake(&intel_dp->aux, return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
DP_LANE0_1_STATUS, DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
link_status,
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
} }
/* These are source-specific values. */ /* These are source-specific values. */
...@@ -3749,8 +3716,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3749,8 +3716,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint8_t rev; uint8_t rev;
if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0) sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */ return false; /* aux transfer failed */
DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
...@@ -3758,8 +3725,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3758,8 +3725,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0) if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */ return false; /* DPCD not present */
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT, if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
&intel_dp->sink_count, 1) < 0) &intel_dp->sink_count, 1) < 0)
return false; return false;
/* /*
...@@ -3782,9 +3749,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3782,9 +3749,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
/* Check if the panel supports PSR */ /* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
if (is_edp(intel_dp)) { if (is_edp(intel_dp)) {
intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT, drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
intel_dp->psr_dpcd, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd)); sizeof(intel_dp->psr_dpcd));
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
dev_priv->psr.sink_support = true; dev_priv->psr.sink_support = true;
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
...@@ -3795,9 +3762,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3795,9 +3762,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
uint8_t frame_sync_cap; uint8_t frame_sync_cap;
dev_priv->psr.sink_support = true; dev_priv->psr.sink_support = true;
intel_dp_dpcd_read_wake(&intel_dp->aux, drm_dp_dpcd_read(&intel_dp->aux,
DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP, DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
&frame_sync_cap, 1); &frame_sync_cap, 1);
dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false; dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
/* PSR2 needs frame sync as well */ /* PSR2 needs frame sync as well */
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync; dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
...@@ -3813,15 +3780,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3813,15 +3780,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
/* Intermediate frequency support */ /* Intermediate frequency support */
if (is_edp(intel_dp) && if (is_edp(intel_dp) &&
(intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
(intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
(rev >= 0x03)) { /* eDp v1.4 or higher */ (rev >= 0x03)) { /* eDp v1.4 or higher */
__le16 sink_rates[DP_MAX_SUPPORTED_RATES]; __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i; int i;
intel_dp_dpcd_read_wake(&intel_dp->aux, drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates));
sink_rates,
sizeof(sink_rates));
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
int val = le16_to_cpu(sink_rates[i]); int val = le16_to_cpu(sink_rates[i]);
...@@ -3844,9 +3809,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3844,9 +3809,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
return true; /* no per-port downstream info */ return true; /* no per-port downstream info */
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
intel_dp->downstream_ports, intel_dp->downstream_ports,
DP_MAX_DOWNSTREAM_PORTS) < 0) DP_MAX_DOWNSTREAM_PORTS) < 0)
return false; /* downstream port status fetch failed */ return false; /* downstream port status fetch failed */
return true; return true;
...@@ -3860,11 +3825,11 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) ...@@ -3860,11 +3825,11 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return; return;
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3) if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]); buf[0], buf[1], buf[2]);
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3) if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]); buf[0], buf[1], buf[2]);
} }
...@@ -3883,7 +3848,7 @@ intel_dp_probe_mst(struct intel_dp *intel_dp) ...@@ -3883,7 +3848,7 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false; return false;
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
if (buf[0] & DP_MST_CAP) { if (buf[0] & DP_MST_CAP) {
DRM_DEBUG_KMS("Sink is MST capable\n"); DRM_DEBUG_KMS("Sink is MST capable\n");
intel_dp->is_mst = true; intel_dp->is_mst = true;
...@@ -4020,7 +3985,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) ...@@ -4020,7 +3985,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
static bool static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{ {
return intel_dp_dpcd_read_wake(&intel_dp->aux, return drm_dp_dpcd_read(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR, DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector, 1) == 1; sink_irq_vector, 1) == 1;
} }
...@@ -4030,7 +3995,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) ...@@ -4030,7 +3995,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{ {
int ret; int ret;
ret = intel_dp_dpcd_read_wake(&intel_dp->aux, ret = drm_dp_dpcd_read(&intel_dp->aux,
DP_SINK_COUNT_ESI, DP_SINK_COUNT_ESI,
sink_irq_vector, 14); sink_irq_vector, 14);
if (ret != 14) if (ret != 14)
......
...@@ -769,6 +769,7 @@ struct drm_device { ...@@ -769,6 +769,7 @@ struct drm_device {
atomic_t buf_alloc; /**< Buffer allocation in progress */ atomic_t buf_alloc; /**< Buffer allocation in progress */
/*@} */ /*@} */
struct mutex filelist_mutex;
struct list_head filelist; struct list_head filelist;
/** \name Memory management */ /** \name Memory management */
......
...@@ -37,7 +37,7 @@ struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, ...@@ -37,7 +37,7 @@ struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
uint32_t type); uint32_t type);
struct drm_agp_head *drm_agp_init(struct drm_device *dev); struct drm_agp_head *drm_agp_init(struct drm_device *dev);
void drm_agp_clear(struct drm_device *dev); void drm_legacy_agp_clear(struct drm_device *dev);
int drm_agp_acquire(struct drm_device *dev); int drm_agp_acquire(struct drm_device *dev);
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
...@@ -93,7 +93,7 @@ static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) ...@@ -93,7 +93,7 @@ static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
return NULL; return NULL;
} }
static inline void drm_agp_clear(struct drm_device *dev) static inline void drm_legacy_agp_clear(struct drm_device *dev)
{ {
} }
......
...@@ -154,8 +154,10 @@ struct drm_map_list { ...@@ -154,8 +154,10 @@ struct drm_map_list {
int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
unsigned int size, enum drm_map_type type, unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_p); enum drm_map_flags flags, struct drm_local_map **map_p);
int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
void drm_legacy_master_rmmaps(struct drm_device *dev,
struct drm_master *master);
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment