Commit f98c2135 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Just a couple of dma-buf related fixes and some amdgpu fixes, along
  with a regression fix for radeon off but default feature, but makes my
  30" monitor happy again"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon/mst: cleanup code indentation
  drm/radeon/mst: fix regression in lane/link handling.
  drm/amdgpu: add invalidate_page callback for userptrs
  drm/amdgpu: Revert "remove the userptr rmn->lock"
  drm/amdgpu: clean up path handling for powerplay
  drm/amd/powerplay: fix memory leak of tdp_table
  dma-buf/fence: fix fence_is_later v2
  dma-buf: Update docs for SYNC ioctl
  drm: remove excess description
  dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()
  drm/atmel-hlcdc: use helper to get crtc state
  drm/atomic: use helper to get crtc state
parents 11caf57f 4604202c
...@@ -352,7 +352,8 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases: ...@@ -352,7 +352,8 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases:
No special interfaces, userspace simply calls mmap on the dma-buf fd, making No special interfaces, userspace simply calls mmap on the dma-buf fd, making
sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always* sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always*
used when the access happens. This is discussed next paragraphs. used when the access happens. Note that DMA_BUF_IOCTL_SYNC can fail with
-EAGAIN or -EINTR, in which case it must be restarted.
Some systems might need some sort of cache coherency management e.g. when Some systems might need some sort of cache coherency management e.g. when
CPU and GPU domains are being accessed through dma-buf at the same time. To CPU and GPU domains are being accessed through dma-buf at the same time. To
...@@ -366,10 +367,10 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases: ...@@ -366,10 +367,10 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases:
want (with the new data being consumed by the GPU or say scanout device) want (with the new data being consumed by the GPU or say scanout device)
- munmap once you don't need the buffer any more - munmap once you don't need the buffer any more
Therefore, for correctness and optimal performance, systems with the memory For correctness and optimal performance, it is always required to use
cache shared by the GPU and CPU i.e. the "coherent" and also the SYNC_START and SYNC_END before and after, respectively, when accessing the
"incoherent" are always required to use SYNC_START and SYNC_END before and mapped address. Userspace cannot rely on coherent access, even when there
after, respectively, when accessing the mapped address. are systems where it just works without calling these ioctls.
2. Supporting existing mmap interfaces in importers 2. Supporting existing mmap interfaces in importers
......
...@@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file, ...@@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file,
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
struct dma_buf_sync sync; struct dma_buf_sync sync;
enum dma_data_direction direction; enum dma_data_direction direction;
int ret;
dmabuf = file->private_data; dmabuf = file->private_data;
...@@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file, ...@@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file,
} }
if (sync.flags & DMA_BUF_SYNC_END) if (sync.flags & DMA_BUF_SYNC_END)
dma_buf_end_cpu_access(dmabuf, direction); ret = dma_buf_end_cpu_access(dmabuf, direction);
else else
dma_buf_begin_cpu_access(dmabuf, direction); ret = dma_buf_begin_cpu_access(dmabuf, direction);
return 0; return ret;
default: default:
return -ENOTTY; return -ENOTTY;
} }
...@@ -611,15 +612,19 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); ...@@ -611,15 +612,19 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
* @dmabuf: [in] buffer to complete cpu access for. * @dmabuf: [in] buffer to complete cpu access for.
* @direction: [in] length of range for cpu access. * @direction: [in] length of range for cpu access.
* *
* This call must always succeed. * Can return negative error values, returns 0 on success.
*/ */
void dma_buf_end_cpu_access(struct dma_buf *dmabuf, int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
int ret = 0;
WARN_ON(!dmabuf); WARN_ON(!dmabuf);
if (dmabuf->ops->end_cpu_access) if (dmabuf->ops->end_cpu_access)
dmabuf->ops->end_cpu_access(dmabuf, direction); ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
return ret;
} }
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
......
...@@ -48,7 +48,8 @@ struct amdgpu_mn { ...@@ -48,7 +48,8 @@ struct amdgpu_mn {
/* protected by adev->mn_lock */ /* protected by adev->mn_lock */
struct hlist_node node; struct hlist_node node;
/* objects protected by mm->mmap_sem */ /* objects protected by lock */
struct mutex lock;
struct rb_root objects; struct rb_root objects;
}; };
...@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) ...@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo; struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock); mutex_lock(&adev->mn_lock);
down_write(&rmn->mm->mmap_sem); mutex_lock(&rmn->lock);
hash_del(&rmn->node); hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
it.rb) { it.rb) {
...@@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) ...@@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
} }
kfree(node); kfree(node);
} }
up_write(&rmn->mm->mmap_sem); mutex_unlock(&rmn->lock);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn); kfree(rmn);
...@@ -104,6 +105,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, ...@@ -104,6 +105,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
schedule_work(&rmn->work); schedule_work(&rmn->work);
} }
/**
* amdgpu_mn_invalidate_node - unmap all BOs of a node
*
* @node: the node with the BOs to unmap
*
* We block for all BOs and unmap them by move them
* into system domain again.
*/
static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
unsigned long start,
unsigned long end)
{
struct amdgpu_bo *bo;
long r;
list_for_each_entry(bo, &node->bos, mn_list) {
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue;
r = amdgpu_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
continue;
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
amdgpu_bo_unreserve(bo);
}
}
/**
* amdgpu_mn_invalidate_page - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
* @address: address of invalidate page
*
* Invalidation of a single page. Blocks for all BOs mapping it
* and unmap them by move them into system domain again.
*/
static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
mutex_lock(&rmn->lock);
it = interval_tree_iter_first(&rmn->objects, address, address);
if (it) {
struct amdgpu_mn_node *node;
node = container_of(it, struct amdgpu_mn_node, it);
amdgpu_mn_invalidate_node(node, address, address);
}
mutex_unlock(&rmn->lock);
}
/** /**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change * amdgpu_mn_invalidate_range_start - callback to notify about mm change
* *
...@@ -126,44 +197,24 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, ...@@ -126,44 +197,24 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
end -= 1; end -= 1;
mutex_lock(&rmn->lock);
it = interval_tree_iter_first(&rmn->objects, start, end); it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) { while (it) {
struct amdgpu_mn_node *node; struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
long r;
node = container_of(it, struct amdgpu_mn_node, it); node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end); it = interval_tree_iter_next(it, start, end);
list_for_each_entry(bo, &node->bos, mn_list) { amdgpu_mn_invalidate_node(node, start, end);
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
end))
continue;
r = amdgpu_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
continue;
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
amdgpu_bo_unreserve(bo);
}
} }
mutex_unlock(&rmn->lock);
} }
static const struct mmu_notifier_ops amdgpu_mn_ops = { static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release, .release = amdgpu_mn_release,
.invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start, .invalidate_range_start = amdgpu_mn_invalidate_range_start,
}; };
...@@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) ...@@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev; rmn->adev = adev;
rmn->mm = mm; rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops; rmn->mn.ops = &amdgpu_mn_ops;
mutex_init(&rmn->lock);
rmn->objects = RB_ROOT; rmn->objects = RB_ROOT;
r = __mmu_notifier_register(&rmn->mn, mm); r = __mmu_notifier_register(&rmn->mn, mm);
...@@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos); INIT_LIST_HEAD(&bos);
down_write(&rmn->mm->mmap_sem); mutex_lock(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node); kfree(node);
...@@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) { if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) { if (!node) {
up_write(&rmn->mm->mmap_sem); mutex_unlock(&rmn->lock);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects); interval_tree_insert(&node->it, &rmn->objects);
up_write(&rmn->mm->mmap_sem); mutex_unlock(&rmn->lock);
return 0; return 0;
} }
...@@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) ...@@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return; return;
} }
down_write(&rmn->mm->mmap_sem); mutex_lock(&rmn->lock);
/* save the next list entry for later */ /* save the next list entry for later */
head = bo->mn_list.next; head = bo->mn_list.next;
...@@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) ...@@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node); kfree(node);
} }
up_write(&rmn->mm->mmap_sem); mutex_unlock(&rmn->lock);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
} }
subdir-ccflags-y += -Iinclude/drm \ subdir-ccflags-y += -Iinclude/drm \
-Idrivers/gpu/drm/amd/powerplay/inc/ \ -I$(FULL_AMD_PATH)/powerplay/inc/ \
-Idrivers/gpu/drm/amd/include/asic_reg \ -I$(FULL_AMD_PATH)/include/asic_reg \
-Idrivers/gpu/drm/amd/include \ -I$(FULL_AMD_PATH)/include \
-Idrivers/gpu/drm/amd/powerplay/smumgr\ -I$(FULL_AMD_PATH)/powerplay/smumgr\
-Idrivers/gpu/drm/amd/powerplay/hwmgr \ -I$(FULL_AMD_PATH)/powerplay/hwmgr \
-Idrivers/gpu/drm/amd/powerplay/eventmgr -I$(FULL_AMD_PATH)/powerplay/eventmgr
AMD_PP_PATH = ../powerplay AMD_PP_PATH = ../powerplay
PP_LIBS = smumgr hwmgr eventmgr PP_LIBS = smumgr hwmgr eventmgr
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS))) AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
include $(AMD_POWERPLAY) include $(AMD_POWERPLAY)
......
...@@ -512,8 +512,10 @@ static int get_cac_tdp_table( ...@@ -512,8 +512,10 @@ static int get_cac_tdp_table(
hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL); hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == hwmgr->dyn_state.cac_dtp_table) if (NULL == hwmgr->dyn_state.cac_dtp_table) {
kfree(tdp_table);
return -ENOMEM; return -ENOMEM;
}
memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size); memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
......
...@@ -558,7 +558,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, ...@@ -558,7 +558,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (!state->base.crtc || !fb) if (!state->base.crtc || !fb)
return 0; return 0;
crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)]; crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
mode = &crtc_state->adjusted_mode; mode = &crtc_state->adjusted_mode;
state->src_x = s->src_x; state->src_x = s->src_x;
......
...@@ -380,7 +380,6 @@ EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); ...@@ -380,7 +380,6 @@ EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
* drm_atomic_replace_property_blob - replace a blob property * drm_atomic_replace_property_blob - replace a blob property
* @blob: a pointer to the member blob to be replaced * @blob: a pointer to the member blob to be replaced
* @new_blob: the new blob to replace with * @new_blob: the new blob to replace with
* @expected_size: the expected size of the new blob
* @replaced: whether the blob has been replaced * @replaced: whether the blob has been replaced
* *
* RETURNS: * RETURNS:
......
...@@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, ...@@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
if (plane->state->crtc) { if (plane->state->crtc) {
crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)]; crtc_state = drm_atomic_get_existing_crtc_state(state,
plane->state->crtc);
if (WARN_ON(!crtc_state)) if (WARN_ON(!crtc_state))
return; return;
...@@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, ...@@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
} }
if (plane_state->crtc) { if (plane_state->crtc) {
crtc_state = crtc_state = drm_atomic_get_existing_crtc_state(state,
state->crtc_states[drm_crtc_index(plane_state->crtc)]; plane_state->crtc);
if (WARN_ON(!crtc_state)) if (WARN_ON(!crtc_state))
return; return;
...@@ -374,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state) ...@@ -374,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state)
if (!conn_state->crtc || !conn_state->best_encoder) if (!conn_state->crtc || !conn_state->best_encoder)
continue; continue;
crtc_state = crtc_state = drm_atomic_get_existing_crtc_state(state,
state->crtc_states[drm_crtc_index(conn_state->crtc)]; conn_state->crtc);
/* /*
* Each encoder has at most one connector (since we always steal * Each encoder has at most one connector (since we always steal
...@@ -679,7 +680,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) ...@@ -679,7 +680,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state->crtc) if (!old_conn_state->crtc)
continue; continue;
old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)]; old_crtc_state = drm_atomic_get_existing_crtc_state(old_state,
old_conn_state->crtc);
if (!old_crtc_state->active || if (!old_crtc_state->active ||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
......
...@@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire ...@@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
return ret; return ret;
} }
static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
bool was_interruptible;
int ret; int ret;
mutex_lock(&dev->struct_mutex); ret = i915_mutex_lock_interruptible(dev);
was_interruptible = dev_priv->mm.interruptible; if (ret)
dev_priv->mm.interruptible = false; return ret;
ret = i915_gem_object_set_to_gtt_domain(obj, false); ret = i915_gem_object_set_to_gtt_domain(obj, false);
dev_priv->mm.interruptible = was_interruptible;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (unlikely(ret)) return ret;
DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
} }
static const struct dma_buf_ops i915_dmabuf_ops = { static const struct dma_buf_ops i915_dmabuf_ops = {
......
...@@ -97,11 +97,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, ...@@ -97,11 +97,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
return omap_gem_get_pages(obj, &pages, true); return omap_gem_get_pages(obj, &pages, true);
} }
static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
struct drm_gem_object *obj = buffer->priv; struct drm_gem_object *obj = buffer->priv;
omap_gem_put_pages(obj); omap_gem_put_pages(obj);
return 0;
} }
......
...@@ -510,6 +510,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder, ...@@ -510,6 +510,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
{ {
struct radeon_encoder_mst *mst_enc; struct radeon_encoder_mst *mst_enc;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector_atom_dig *dig_connector;
int bpp = 24; int bpp = 24;
mst_enc = radeon_encoder->enc_priv; mst_enc = radeon_encoder->enc_priv;
...@@ -523,22 +524,11 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder, ...@@ -523,22 +524,11 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0); drm_mode_set_crtcinfo(adjusted_mode, 0);
{ dig_connector = mst_enc->connector->con_priv;
struct radeon_connector_atom_dig *dig_connector; dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
int ret; dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
dig_connector = mst_enc->connector->con_priv; dig_connector->dp_lane_count, dig_connector->dp_clock);
ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
dig_connector->dpcd, adjusted_mode->clock,
&dig_connector->dp_lane_count,
&dig_connector->dp_clock);
if (ret) {
dig_connector->dp_lane_count = 0;
dig_connector->dp_clock = 0;
}
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
dig_connector->dp_lane_count, dig_connector->dp_clock);
}
return true; return true;
} }
......
...@@ -423,8 +423,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, ...@@ -423,8 +423,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
} }
if (ufb->obj->base.import_attach) { if (ufb->obj->base.import_attach) {
dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
unlock: unlock:
......
...@@ -1141,14 +1141,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, ...@@ -1141,14 +1141,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return PTR_ERR_OR_ZERO(vaddr); return PTR_ERR_OR_ZERO(vaddr);
} }
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
struct ion_buffer *buffer = dmabuf->priv; struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock); mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer); ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock); mutex_unlock(&buffer->lock);
return 0;
} }
static struct dma_buf_ops dma_buf_ops = { static struct dma_buf_ops dma_buf_ops = {
......
...@@ -94,7 +94,7 @@ struct dma_buf_ops { ...@@ -94,7 +94,7 @@ struct dma_buf_ops {
void (*release)(struct dma_buf *); void (*release)(struct dma_buf *);
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long); void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long); void *(*kmap)(struct dma_buf *, unsigned long);
...@@ -224,8 +224,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, ...@@ -224,8 +224,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction); enum dma_data_direction);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir); enum dma_data_direction dir);
void dma_buf_end_cpu_access(struct dma_buf *dma_buf, int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir); enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long); void *dma_buf_kmap(struct dma_buf *, unsigned long);
......
...@@ -294,7 +294,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2) ...@@ -294,7 +294,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2)
if (WARN_ON(f1->context != f2->context)) if (WARN_ON(f1->context != f2->context))
return false; return false;
return f1->seqno - f2->seqno < INT_MAX; return (int)(f1->seqno - f2->seqno) > 0;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment