Commit ac44ff7c authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-xe-fixes-2024-10-10' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes

Driver Changes:
- Fix error checking with xa_store() (Matthe Auld)
- Fix missing freq restore on GSC load error (Vinay)
- Fix wedged_mode file permission (Matt Roper)
- Fix use-after-free in ct communication (Matthew Auld)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/jri65tmv3bjbhqhxs5smv45nazssxzhtwphojem4uufwtjuliy@gsdhlh6kzsdy
parents b634acb2 1badf482
...@@ -187,7 +187,7 @@ void xe_debugfs_register(struct xe_device *xe) ...@@ -187,7 +187,7 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("forcewake_all", 0400, root, xe, debugfs_create_file("forcewake_all", 0400, root, xe,
&forcewake_all_fops); &forcewake_all_fops);
debugfs_create_file("wedged_mode", 0400, root, xe, debugfs_create_file("wedged_mode", 0600, root, xe,
&wedged_mode_fops); &wedged_mode_fops);
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) { for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
......
...@@ -874,7 +874,9 @@ int xe_gt_sanitize_freq(struct xe_gt *gt) ...@@ -874,7 +874,9 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
int ret = 0; int ret = 0;
if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) || if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
xe_uc_fw_is_loaded(&gt->uc.gsc.fw)) && XE_WA(gt, 22019338487)) xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
XE_WA(gt, 22019338487))
ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc); ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
return ret; return ret;
......
...@@ -667,16 +667,12 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, ...@@ -667,16 +667,12 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
num_g2h = 1; num_g2h = 1;
if (g2h_fence_needs_alloc(g2h_fence)) { if (g2h_fence_needs_alloc(g2h_fence)) {
void *ptr;
g2h_fence->seqno = next_ct_seqno(ct, true); g2h_fence->seqno = next_ct_seqno(ct, true);
ptr = xa_store(&ct->fence_lookup, ret = xa_err(xa_store(&ct->fence_lookup,
g2h_fence->seqno, g2h_fence->seqno, g2h_fence,
g2h_fence, GFP_ATOMIC); GFP_ATOMIC));
if (IS_ERR(ptr)) { if (ret)
ret = PTR_ERR(ptr);
goto out; goto out;
}
} }
seqno = g2h_fence->seqno; seqno = g2h_fence->seqno;
...@@ -879,14 +875,11 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, ...@@ -879,14 +875,11 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
retry_same_fence: retry_same_fence:
ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence); ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
if (unlikely(ret == -ENOMEM)) { if (unlikely(ret == -ENOMEM)) {
void *ptr;
/* Retry allocation /w GFP_KERNEL */ /* Retry allocation /w GFP_KERNEL */
ptr = xa_store(&ct->fence_lookup, ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
g2h_fence.seqno, &g2h_fence, GFP_KERNEL));
&g2h_fence, GFP_KERNEL); if (ret)
if (IS_ERR(ptr)) return ret;
return PTR_ERR(ptr);
goto retry_same_fence; goto retry_same_fence;
} else if (unlikely(ret)) { } else if (unlikely(ret)) {
...@@ -903,16 +896,26 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, ...@@ -903,16 +896,26 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
} }
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
/*
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
* the stack, since we have no clue if it will fire after the timeout before we can erase
* from the xa. Also we have some dependent loads and stores below for which we need the
* correct ordering, and we lack the needed barriers.
*/
mutex_lock(&ct->lock);
if (!ret) { if (!ret) {
xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x", xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
g2h_fence.seqno, action[0]); g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
mutex_unlock(&ct->lock);
return -ETIME; return -ETIME;
} }
if (g2h_fence.retry) { if (g2h_fence.retry) {
xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n", xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
action[0], g2h_fence.reason); action[0], g2h_fence.reason);
mutex_unlock(&ct->lock);
goto retry; goto retry;
} }
if (g2h_fence.fail) { if (g2h_fence.fail) {
...@@ -921,7 +924,12 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, ...@@ -921,7 +924,12 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
ret = -EIO; ret = -EIO;
} }
return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret; if (ret > 0)
ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
mutex_unlock(&ct->lock);
return ret;
} }
/** /**
......
...@@ -320,7 +320,6 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa ...@@ -320,7 +320,6 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{ {
int ret; int ret;
void *ptr;
int i; int i;
/* /*
...@@ -340,12 +339,10 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) ...@@ -340,12 +339,10 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
q->guc->id = ret; q->guc->id = ret;
for (i = 0; i < q->width; ++i) { for (i = 0; i < q->width; ++i) {
ptr = xa_store(&guc->submission_state.exec_queue_lookup, ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup,
q->guc->id + i, q, GFP_NOWAIT); q->guc->id + i, q, GFP_NOWAIT));
if (IS_ERR(ptr)) { if (ret)
ret = PTR_ERR(ptr);
goto err_release; goto err_release;
}
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment