Commit b5a66609 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2024-05-03' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Weekly fixes, mostly made up from amdgpu and some panel changes.

  Otherwise xe, nouveau, vmwgfx and a couple of others, all seems pretty
  on track.

  amdgpu:
   - Fix VRAM memory accounting
   - DCN 3.1 fixes
   - DCN 2.0 fix
   - DCN 3.1.5 fix
   - DCN 3.5 fix
   - DCN 3.2.1 fix
   - DP fixes
   - Seamless boot fix
   - Fix call order in amdgpu_ttm_move()
   - Fix doorbell regression
   - Disable panel replay temporarily

  amdkfd:
   - Flush wq before creating kfd process

  xe:
   - Fix UAF on rebind worker
   - Fix ADL-N display integration

  imagination:
   - fix page-count macro

  nouveau:
   - avoid page-table allocation failures
   - fix firmware memory allocation

  panel:
   - ili9341: avoid OF for device properties; respect deferred probe;
     fix usage of errno codes

  ttm:
   - fix status output

  vmwgfx:
   - fix legacy display unit
   - fix read length in fence signalling"

* tag 'drm-fixes-2024-05-03' of https://gitlab.freedesktop.org/drm/kernel: (25 commits)
  drm/xe/display: Fix ADL-N detection
  drm/panel: ili9341: Use predefined error codes
  drm/panel: ili9341: Respect deferred probe
  drm/panel: ili9341: Correct use of device property APIs
  drm/xe/vm: prevent UAF in rebind_work_func()
  drm/amd/display: Disable panel replay by default for now
  drm/amdgpu: fix doorbell regression
  drm/amdkfd: Flush the process wq before creating a kfd_process
  drm/amd/display: Disable seamless boot on 128b/132b encoding
  drm/amd/display: Fix DC mode screen flickering on DCN321
  drm/amd/display: Add VCO speed parameter for DCN31 FPU
  drm/amdgpu: once more fix the call oder in amdgpu_ttm_move() v2
  drm/amd/display: Allocate zero bw after bw alloc enable
  drm/amd/display: Fix incorrect DSC instance for MST
  drm/amd/display: Atom Integrated System Info v2_2 for DCN35
  drm/amd/display: Add dtbclk access to dcn315
  drm/amd/display: Ensure that dmcub support flag is set for DCN20
  drm/amd/display: Handle Y carry-over in VCP X.Y calculation
  drm/amdgpu: Fix VRAM memory accounting
  drm/vmwgfx: Fix invalid reads in fence signaled events
  ...
parents 9fbc8bdf 09e10499
...@@ -220,7 +220,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, ...@@ -220,7 +220,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed > (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) || kfd_mem_limit.max_ttm_mem_limit) ||
(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
vram_size - reserved_for_pt)) { vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) {
ret = -ENOMEM; ret = -ENOMEM;
goto release; goto release;
} }
......
...@@ -1243,14 +1243,18 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, ...@@ -1243,14 +1243,18 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
* amdgpu_bo_move_notify - notification about a memory move * amdgpu_bo_move_notify - notification about a memory move
* @bo: pointer to a buffer object * @bo: pointer to a buffer object
* @evict: if this move is evicting the buffer from the graphics address space * @evict: if this move is evicting the buffer from the graphics address space
* @new_mem: new resource for backing the BO
* *
* Marks the corresponding &amdgpu_bo buffer object as invalid, also performs * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
* bookkeeping. * bookkeeping.
* TTM driver callback which is called when ttm moves a buffer. * TTM driver callback which is called when ttm moves a buffer.
*/ */
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
if (!amdgpu_bo_is_amdgpu_bo(bo)) if (!amdgpu_bo_is_amdgpu_bo(bo))
...@@ -1262,12 +1266,12 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) ...@@ -1262,12 +1266,12 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
amdgpu_bo_kunmap(abo); amdgpu_bo_kunmap(abo);
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
bo->resource->mem_type != TTM_PL_SYSTEM) old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf); dma_buf_move_notify(abo->tbo.base.dma_buf);
/* remember the eviction */ /* move_notify is called before move happens */
if (evict) trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
atomic64_inc(&adev->num_evictions); old_mem ? old_mem->mem_type : -1);
} }
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
......
...@@ -328,7 +328,9 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, ...@@ -328,7 +328,9 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size, size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags); uint64_t *flags);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict); void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
......
...@@ -419,7 +419,7 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, ...@@ -419,7 +419,7 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
return false; return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
res->mem_type == AMDGPU_PL_PREEMPT) res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
return true; return true;
if (res->mem_type != TTM_PL_VRAM) if (res->mem_type != TTM_PL_VRAM)
...@@ -481,14 +481,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -481,14 +481,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
bo->ttm == NULL)) { bo->ttm == NULL)) {
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem); ttm_bo_move_null(bo, new_mem);
goto out; return 0;
} }
if (old_mem->mem_type == TTM_PL_SYSTEM && if (old_mem->mem_type == TTM_PL_SYSTEM &&
(new_mem->mem_type == TTM_PL_TT || (new_mem->mem_type == TTM_PL_TT ||
new_mem->mem_type == AMDGPU_PL_PREEMPT)) { new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem); ttm_bo_move_null(bo, new_mem);
goto out; return 0;
} }
if ((old_mem->mem_type == TTM_PL_TT || if ((old_mem->mem_type == TTM_PL_TT ||
old_mem->mem_type == AMDGPU_PL_PREEMPT) && old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
...@@ -498,9 +500,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -498,9 +500,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r; return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_resource_free(bo, &bo->resource); ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem); ttm_bo_assign_mem(bo, new_mem);
goto out; return 0;
} }
if (old_mem->mem_type == AMDGPU_PL_GDS || if (old_mem->mem_type == AMDGPU_PL_GDS ||
...@@ -512,8 +515,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -512,8 +515,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == AMDGPU_PL_OA || new_mem->mem_type == AMDGPU_PL_OA ||
new_mem->mem_type == AMDGPU_PL_DOORBELL) { new_mem->mem_type == AMDGPU_PL_DOORBELL) {
/* Nothing to save here */ /* Nothing to save here */
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem); ttm_bo_move_null(bo, new_mem);
goto out; return 0;
} }
if (bo->type == ttm_bo_type_device && if (bo->type == ttm_bo_type_device &&
...@@ -525,22 +529,23 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -525,22 +529,23 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
} }
if (adev->mman.buffer_funcs_enabled) { if (adev->mman.buffer_funcs_enabled &&
if (((old_mem->mem_type == TTM_PL_SYSTEM && ((old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) || new_mem->mem_type == TTM_PL_VRAM) ||
(old_mem->mem_type == TTM_PL_VRAM && (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM))) { new_mem->mem_type == TTM_PL_SYSTEM))) {
hop->fpfn = 0; hop->fpfn = 0;
hop->lpfn = 0; hop->lpfn = 0;
hop->mem_type = TTM_PL_TT; hop->mem_type = TTM_PL_TT;
hop->flags = TTM_PL_FLAG_TEMPORARY; hop->flags = TTM_PL_FLAG_TEMPORARY;
return -EMULTIHOP; return -EMULTIHOP;
} }
amdgpu_bo_move_notify(bo, evict, new_mem);
if (adev->mman.buffer_funcs_enabled)
r = amdgpu_move_blit(bo, evict, new_mem, old_mem); r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
} else { else
r = -ENODEV; r = -ENODEV;
}
if (r) { if (r) {
/* Check that all memory is CPU accessible */ /* Check that all memory is CPU accessible */
...@@ -555,11 +560,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -555,11 +560,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r; return r;
} }
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); /* update statistics after the move */
out: if (evict)
/* update statistics */ atomic64_inc(&adev->num_evictions);
atomic64_add(bo->base.size, &adev->num_bytes_moved); atomic64_add(bo->base.size, &adev->num_bytes_moved);
amdgpu_bo_move_notify(bo, evict);
return 0; return 0;
} }
...@@ -1559,7 +1563,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, ...@@ -1559,7 +1563,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
static void static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{ {
amdgpu_bo_move_notify(bo, false); amdgpu_bo_move_notify(bo, false, NULL);
} }
static struct ttm_device_funcs amdgpu_bo_driver = { static struct ttm_device_funcs amdgpu_bo_driver = {
......
...@@ -829,6 +829,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) ...@@ -829,6 +829,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
if (process) { if (process) {
pr_debug("Process already found\n"); pr_debug("Process already found\n");
} else { } else {
/* If the process just called exec(3), it is possible that the
* cleanup of the kfd_process (following the release of the mm
* of the old process image) is still in the cleanup work queue.
* Make sure to drain any job before trying to recreate any
* resource for this process.
*/
flush_workqueue(kfd_process_wq);
process = create_process(thread); process = create_process(thread);
if (IS_ERR(process)) if (IS_ERR(process))
goto out; goto out;
......
...@@ -4537,15 +4537,18 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -4537,15 +4537,18 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* Determine whether to enable Replay support by default. */ /* Determine whether to enable Replay support by default. */
if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 1, 4): /*
case IP_VERSION(3, 1, 5): * Disabled by default due to https://gitlab.freedesktop.org/drm/amd/-/issues/3344
case IP_VERSION(3, 1, 6): * case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 2, 0): * case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 2, 1): * case IP_VERSION(3, 1, 6):
case IP_VERSION(3, 5, 0): * case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 5, 1): * case IP_VERSION(3, 2, 1):
replay_feature_enabled = true; * case IP_VERSION(3, 5, 0):
break; * case IP_VERSION(3, 5, 1):
* replay_feature_enabled = true;
* break;
*/
default: default:
replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
break; break;
......
...@@ -1495,7 +1495,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, ...@@ -1495,7 +1495,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -1596,7 +1598,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, ...@@ -1596,7 +1598,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -1681,7 +1685,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, ...@@ -1681,7 +1685,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -1780,7 +1786,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, ...@@ -1780,7 +1786,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -1865,7 +1873,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, ...@@ -1865,7 +1873,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -1964,7 +1974,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, ...@@ -1964,7 +1974,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -2045,7 +2057,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, ...@@ -2045,7 +2057,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -2141,7 +2155,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu ...@@ -2141,7 +2155,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -2220,7 +2236,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, ...@@ -2220,7 +2236,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -2276,7 +2294,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, ...@@ -2276,7 +2294,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -2347,7 +2367,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, ...@@ -2347,7 +2367,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
...@@ -2418,7 +2440,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, ...@@ -2418,7 +2440,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link) pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break; break;
} }
......
...@@ -2948,6 +2948,7 @@ static enum bp_result construct_integrated_info( ...@@ -2948,6 +2948,7 @@ static enum bp_result construct_integrated_info(
result = get_integrated_info_v2_1(bp, info); result = get_integrated_info_v2_1(bp, info);
break; break;
case 2: case 2:
case 3:
result = get_integrated_info_v2_2(bp, info); result = get_integrated_info_v2_2(bp, info);
break; break;
default: default:
......
...@@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, ...@@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
*/ */
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
if (safe_to_lower) { if (safe_to_lower) {
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
dcn315_smu_set_dtbclk(clk_mgr, false);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */ /* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = dcn315_get_active_display_cnt_wa(dc, context); display_count = dcn315_get_active_display_cnt_wa(dc, context);
...@@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, ...@@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
} }
} }
} else { } else {
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
dcn315_smu_set_dtbclk(clk_mgr, true);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in D0 */ /* check that we're not already in D0 */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
union display_idle_optimization_u idle_info = { 0 }; union display_idle_optimization_u idle_info = { 0 };
......
...@@ -712,8 +712,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, ...@@ -712,8 +712,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
* since we calculate mode support based on softmax being the max UCLK * since we calculate mode support based on softmax being the max UCLK
* frequency. * frequency.
*/ */
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, if (dc->debug.disable_dc_mode_overwrite) {
dc->clk_mgr->bw_params->dc_mode_softmax_memclk); dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
} else
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
} else { } else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
} }
...@@ -746,8 +750,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, ...@@ -746,8 +750,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */ /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support && if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) && (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
!dc->work_arounds.clock_update_disable_mask.uclk) !dc->work_arounds.clock_update_disable_mask.uclk) {
if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite)
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)));
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
}
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways && if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) { clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
......
...@@ -1801,6 +1801,9 @@ bool dc_validate_boot_timing(const struct dc *dc, ...@@ -1801,6 +1801,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false; return false;
} }
if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
return false;
if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
return false; return false;
......
...@@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( ...@@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
x), x),
25)); 25));
// If y rounds up to integer, carry it over to x.
if (y >> 25) {
x += 1;
y = 0;
}
switch (stream_encoder_inst) { switch (stream_encoder_inst) {
case 0: case 0:
REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0, REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
......
...@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = { ...@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.do_urgent_latency_adjustment = false, .do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
.dispclk_dppclk_vco_speed_mhz = 2400.0,
.num_chans = 4, .num_chans = 4,
.dummy_pstate_latency_us = 10.0 .dummy_pstate_latency_us = 10.0
}; };
...@@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = { ...@@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
.do_urgent_latency_adjustment = false, .do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
.dispclk_dppclk_vco_speed_mhz = 2500.0,
}; };
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
......
...@@ -270,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw) ...@@ -270,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
/* Error check whether requested and allocated are equal */ /* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
if (req_bw == link->dpia_bw_alloc_config.allocated_bw) { if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n", DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
__func__, link->link_index); __func__, link->link_index);
} }
...@@ -341,6 +341,14 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link) ...@@ -341,6 +341,14 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
ret = true; ret = true;
init_usb4_bw_struct(link); init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true; link->dpia_bw_alloc_config.bw_alloc_enabled = true;
/*
* During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
* DPIA. CM release preallocation only when allocation is complete. Do zero alloc
* to make the CM to release preallocation and update estimated BW correctly for
* all DPIAs per host router
*/
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
} }
} }
......
...@@ -2449,6 +2449,7 @@ static bool dcn20_resource_construct( ...@@ -2449,6 +2449,7 @@ static bool dcn20_resource_construct(
dc->caps.post_blend_color_processing = true; dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true; dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
/* Color pipeline capabilities */ /* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1; dc->caps.color.dpp.dcn_arch = 1;
......
...@@ -7,13 +7,14 @@ ...@@ -7,13 +7,14 @@
#include "pvr_rogue_mips.h" #include "pvr_rogue_mips.h"
#include <asm/page.h> #include <asm/page.h>
#include <linux/math.h>
#include <linux/types.h> #include <linux/types.h>
/* Forward declaration from pvr_gem.h. */ /* Forward declaration from pvr_gem.h. */
struct pvr_gem_object; struct pvr_gem_object;
#define PVR_MIPS_PT_PAGE_COUNT ((ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K) \ #define PVR_MIPS_PT_PAGE_COUNT DIV_ROUND_UP(ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K, PAGE_SIZE)
>> PAGE_SHIFT)
/** /**
* struct pvr_fw_mips_data - MIPS-specific data * struct pvr_fw_mips_data - MIPS-specific data
*/ */
......
...@@ -15,7 +15,9 @@ struct nvkm_gsp_mem { ...@@ -15,7 +15,9 @@ struct nvkm_gsp_mem {
}; };
struct nvkm_gsp_radix3 { struct nvkm_gsp_radix3 {
struct nvkm_gsp_mem mem[3]; struct nvkm_gsp_mem lvl0;
struct nvkm_gsp_mem lvl1;
struct sg_table lvl2;
}; };
int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *); int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *);
......
...@@ -205,7 +205,9 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw) ...@@ -205,7 +205,9 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw)
break; break;
case NVKM_FIRMWARE_IMG_DMA: case NVKM_FIRMWARE_IMG_DMA:
nvkm_memory_unref(&memory); nvkm_memory_unref(&memory);
dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys); dma_unmap_single(fw->device->dev, fw->phys, sg_dma_len(&fw->mem.sgl),
DMA_TO_DEVICE);
kfree(fw->img);
break; break;
case NVKM_FIRMWARE_IMG_SGT: case NVKM_FIRMWARE_IMG_SGT:
nvkm_memory_unref(&memory); nvkm_memory_unref(&memory);
...@@ -235,14 +237,17 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name, ...@@ -235,14 +237,17 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
fw->img = kmemdup(src, fw->len, GFP_KERNEL); fw->img = kmemdup(src, fw->len, GFP_KERNEL);
break; break;
case NVKM_FIRMWARE_IMG_DMA: { case NVKM_FIRMWARE_IMG_DMA: {
dma_addr_t addr;
len = ALIGN(fw->len, PAGE_SIZE); len = ALIGN(fw->len, PAGE_SIZE);
fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL); fw->img = kmalloc(len, GFP_KERNEL);
if (fw->img) { if (!fw->img)
memcpy(fw->img, src, fw->len); return -ENOMEM;
fw->phys = addr;
memcpy(fw->img, src, fw->len);
fw->phys = dma_map_single(fw->device->dev, fw->img, len, DMA_TO_DEVICE);
if (dma_mapping_error(fw->device->dev, fw->phys)) {
kfree(fw->img);
return -EFAULT;
} }
sg_init_one(&fw->mem.sgl, fw->img, len); sg_init_one(&fw->mem.sgl, fw->img, len);
......
...@@ -1624,7 +1624,7 @@ r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) ...@@ -1624,7 +1624,7 @@ r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
meta->magic = GSP_FW_WPR_META_MAGIC; meta->magic = GSP_FW_WPR_META_MAGIC;
meta->revision = GSP_FW_WPR_META_REVISION; meta->revision = GSP_FW_WPR_META_REVISION;
meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr; meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
...@@ -1919,8 +1919,9 @@ nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) ...@@ -1919,8 +1919,9 @@ nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
static void static void
nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
{ {
for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2);
nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]); nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
} }
/** /**
...@@ -1960,36 +1961,60 @@ static int ...@@ -1960,36 +1961,60 @@ static int
nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3) struct nvkm_gsp_radix3 *rx3)
{ {
u64 addr; struct sg_dma_page_iter sg_dma_iter;
struct scatterlist *sg;
size_t bufsize;
u64 *pte;
int ret, i, page_idx = 0;
for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0);
u64 *ptes; if (ret)
size_t bufsize; return ret;
int ret, idx;
bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1);
ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]); if (ret)
if (ret) goto lvl1_fail;
return ret;
ptes = rx3->mem[i].data; // Allocate level 2
if (i == 2) { bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
struct scatterlist *sgl; ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2);
if (ret)
goto lvl2_fail;
for_each_sgtable_dma_sg(sgt, sgl, idx) { // Write the bus address of level 1 to level 0
for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++) pte = rx3->lvl0.data;
*ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j); *pte = rx3->lvl1.addr;
}
} else { // Write the bus address of each page in level 2 to level 1
for (int j = 0; j < size / GSP_PAGE_SIZE; j++) pte = rx3->lvl1.data;
*ptes++ = addr + GSP_PAGE_SIZE * j; for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0)
*pte++ = sg_page_iter_dma_address(&sg_dma_iter);
// Finally, write the bus address of each page in sgt to level 2
for_each_sgtable_sg(&rx3->lvl2, sg, i) {
void *sgl_end;
pte = sg_virt(sg);
sgl_end = (void *)pte + sg->length;
for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) {
*pte++ = sg_page_iter_dma_address(&sg_dma_iter);
page_idx++;
// Go to the next scatterlist for level 2 if we've reached the end
if ((void *)pte >= sgl_end)
break;
} }
}
size = rx3->mem[i].size; if (ret) {
addr = rx3->mem[i].addr; lvl2_fail:
nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
lvl1_fail:
nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
} }
return 0; return ret;
} }
int int
...@@ -2021,7 +2046,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) ...@@ -2021,7 +2046,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
sr = gsp->sr.meta.data; sr = gsp->sr.meta.data;
sr->magic = GSP_FW_SR_META_MAGIC; sr->magic = GSP_FW_SR_META_MAGIC;
sr->revision = GSP_FW_SR_META_REVISION; sr->revision = GSP_FW_SR_META_REVISION;
sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr; sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
sr->sizeOfSuspendResumeData = len; sr->sizeOfSuspendResumeData = len;
mbox0 = lower_32_bits(gsp->sr.meta.addr); mbox0 = lower_32_bits(gsp->sr.meta.addr);
......
...@@ -177,7 +177,7 @@ config DRM_PANEL_ILITEK_IL9322 ...@@ -177,7 +177,7 @@ config DRM_PANEL_ILITEK_IL9322
config DRM_PANEL_ILITEK_ILI9341 config DRM_PANEL_ILITEK_ILI9341
tristate "Ilitek ILI9341 240x320 QVGA panels" tristate "Ilitek ILI9341 240x320 QVGA panels"
depends on OF && SPI depends on SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER select DRM_GEM_DMA_HELPER
depends on BACKLIGHT_CLASS_DEVICE depends on BACKLIGHT_CLASS_DEVICE
......
...@@ -22,8 +22,9 @@ ...@@ -22,8 +22,9 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/property.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/spi/spi.h> #include <linux/spi/spi.h>
...@@ -421,7 +422,7 @@ static int ili9341_dpi_prepare(struct drm_panel *panel) ...@@ -421,7 +422,7 @@ static int ili9341_dpi_prepare(struct drm_panel *panel)
ili9341_dpi_init(ili); ili9341_dpi_init(ili);
return ret; return 0;
} }
static int ili9341_dpi_enable(struct drm_panel *panel) static int ili9341_dpi_enable(struct drm_panel *panel)
...@@ -691,7 +692,7 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc, ...@@ -691,7 +692,7 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
* Every new incarnation of this display must have a unique * Every new incarnation of this display must have a unique
* data entry for the system in this driver. * data entry for the system in this driver.
*/ */
ili->conf = of_device_get_match_data(dev); ili->conf = device_get_match_data(dev);
if (!ili->conf) { if (!ili->conf) {
dev_err(dev, "missing device configuration\n"); dev_err(dev, "missing device configuration\n");
return -ENODEV; return -ENODEV;
...@@ -714,18 +715,18 @@ static int ili9341_probe(struct spi_device *spi) ...@@ -714,18 +715,18 @@ static int ili9341_probe(struct spi_device *spi)
reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(reset)) if (IS_ERR(reset))
dev_err(dev, "Failed to get gpio 'reset'\n"); return dev_err_probe(dev, PTR_ERR(reset), "Failed to get gpio 'reset'\n");
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW); dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) if (IS_ERR(dc))
dev_err(dev, "Failed to get gpio 'dc'\n"); return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n");
if (!strcmp(id->name, "sf-tc240t-9370-t")) if (!strcmp(id->name, "sf-tc240t-9370-t"))
return ili9341_dpi_probe(spi, dc, reset); return ili9341_dpi_probe(spi, dc, reset);
else if (!strcmp(id->name, "yx240qv29")) else if (!strcmp(id->name, "yx240qv29"))
return ili9341_dbi_probe(spi, dc, reset); return ili9341_dbi_probe(spi, dc, reset);
return -1; return -ENODEV;
} }
static void ili9341_remove(struct spi_device *spi) static void ili9341_remove(struct spi_device *spi)
......
...@@ -92,7 +92,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) ...@@ -92,7 +92,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
*/ */
if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
page_flags |= TTM_TT_FLAG_DECRYPTED; page_flags |= TTM_TT_FLAG_DECRYPTED;
drm_info(ddev, "TT memory decryption enabled."); drm_info_once(ddev, "TT memory decryption enabled.");
} }
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
......
...@@ -204,6 +204,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, ...@@ -204,6 +204,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM); VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size); buf->places[0].lpfn = PFN_UP(bo->resource->size);
buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx); ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */ /* For some reason we didn't end up at the start of vram */
......
...@@ -991,7 +991,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, ...@@ -991,7 +991,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
} }
event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
event->event.base.length = sizeof(*event); event->event.base.length = sizeof(event->event);
event->event.user_data = user_data; event->event.user_data = user_data;
ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
......
...@@ -84,7 +84,8 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) ...@@ -84,7 +84,8 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE) #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1) #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S) #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) #define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \
IS_PLATFORM(dev_priv, XE_ALDERLAKE_N))
#define IS_XEHPSDV(dev_priv) (dev_priv && 0) #define IS_XEHPSDV(dev_priv) (dev_priv && 0)
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2) #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC) #define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC)
......
...@@ -1606,6 +1606,9 @@ static void vm_destroy_work_func(struct work_struct *w) ...@@ -1606,6 +1606,9 @@ static void vm_destroy_work_func(struct work_struct *w)
/* xe_vm_close_and_put was not called? */ /* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size); xe_assert(xe, !vm->size);
if (xe_vm_in_preempt_fence_mode(vm))
flush_work(&vm->preempt.rebind_work);
mutex_destroy(&vm->snap_mutex); mutex_destroy(&vm->snap_mutex);
if (!(vm->flags & XE_VM_FLAG_MIGRATION)) if (!(vm->flags & XE_VM_FLAG_MIGRATION))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment