Commit 55b72855 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-gt-next-2023-10-19' of...

Merge tag 'drm-intel-gt-next-2023-10-19' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Driver Changes:

Fixes/improvements/new stuff:

- Retry gtt fault when out of fence registers (Ville Syrjälä)
- Determine context valid in OA reports [perf] (Umesh Nerlige Ramappa)

Future platform enablement:

- GuC based TLB invalidation for Meteorlake (Jonathan Cavitt, Prathap Kumar Valsan)
- Don't set PIPE_CONTROL_FLUSH_L3 [mtl] (Vinay Belgaumkar)

Miscellaneous:

- Clean up zero initializers [guc,pxp] (Ville Syrjälä)
- Prevent potential null-ptr-deref in engine_init_common (Nirmoy Das)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZTFDFSbd/U7YP+hI@tursulin-desk
parents 3ac5fa3f 7eeaedf7
......@@ -235,6 +235,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case 0:
case -EAGAIN:
case -ENOSPC: /* transient failure to evict? */
case -ENOBUFS: /* temporarily out of fences? */
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
......
......@@ -278,7 +278,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
* deals with Protected Memory which is not needed for
* AUX CCS invalidation and lead to unwanted side effects.
*/
if (mode & EMIT_FLUSH)
if ((mode & EMIT_FLUSH) &&
GRAPHICS_VER_FULL(rq->i915) < IP_VER(12, 70))
bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
......@@ -812,12 +813,14 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
u32 flags = (PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TLB_INVALIDATE |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_FLUSH_L3 |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE);
if (GRAPHICS_VER_FULL(rq->i915) < IP_VER(12, 70))
flags |= PIPE_CONTROL_FLUSH_L3;
/* Wa_14016712196 */
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
/* dummy PIPE_CONTROL + depth flush */
......
......@@ -1491,7 +1491,8 @@ static int engine_init_common(struct intel_engine_cs *engine)
return 0;
err_bce_context:
intel_engine_destroy_pinned_context(bce);
if (bce)
intel_engine_destroy_pinned_context(bce);
err_ce_context:
intel_engine_destroy_pinned_context(ce);
return ret;
......
......@@ -206,22 +206,36 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}
static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
struct intel_guc *guc = &gt->uc.guc;
intel_guc_invalidate_tlb_guc(guc);
}
}
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct intel_gt *gt;
gen8_ggtt_invalidate(ggtt);
if (GRAPHICS_VER(i915) >= 12) {
struct intel_gt *gt;
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc)) {
guc_ggtt_ct_invalidate(gt);
} else if (GRAPHICS_VER(i915) >= 12) {
intel_uncore_write_fw(gt->uncore,
GEN12_GUC_TLB_INV_CR,
GEN12_GUC_TLB_INV_CR_INVALIDATE);
} else {
intel_uncore_write_fw(ggtt->vm.gt->uncore,
GEN8_GTCR, GEN8_GTCR_INVALIDATE);
} else {
intel_uncore_write_fw(gt->uncore,
GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
}
}
......@@ -1243,7 +1257,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
}
if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
if (intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
ggtt->invalidate = guc_ggtt_invalidate;
else
ggtt->invalidate = gen8_ggtt_invalidate;
......
......@@ -12,6 +12,7 @@
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_tlb.h"
#include "uc/intel_guc.h"
/*
* HW architecture suggest typical invalidation time at 40us,
......@@ -131,11 +132,24 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
struct intel_guc *guc = &gt->uc.guc;
mutex_lock(&gt->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
goto unlock;
mmio_invalidate_full(gt);
if (HAS_GUC_TLB_INVALIDATION(gt->i915)) {
/*
* Only perform GuC TLB invalidation if GuC is ready.
* The only time GuC could not be ready is on GT reset,
* which would clobber all the TLBs anyways, making
* any TLB invalidation path here unnecessary.
*/
if (intel_guc_is_ready(guc))
intel_guc_invalidate_tlb_engines(guc);
} else {
mmio_invalidate_full(gt);
}
write_seqcount_invalidate(&gt->tlb.seqno);
unlock:
......
......@@ -136,8 +136,15 @@ pte_tlbinv(struct intel_context *ce,
i915_request_get(rq);
i915_request_add(rq);
/* Short sleep to sanitycheck the batch is spinning before we begin */
msleep(10);
/*
* Short sleep to sanitycheck the batch is spinning before we begin.
* FIXME: Why is GSC so slow?
*/
if (ce->engine->class == OTHER_CLASS)
msleep(200);
else
msleep(10);
if (va == vb) {
if (!i915_request_completed(rq)) {
pr_err("%s(%s): Semaphore sanitycheck failed %llx, with alignment %llx, using PTE size %x (phys %x, sg %x)\n",
......
......@@ -138,6 +138,8 @@ enum intel_guc_action {
INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
INTEL_GUC_ACTION_TLB_INVALIDATION = 0x7000,
INTEL_GUC_ACTION_TLB_INVALIDATION_DONE = 0x7001,
INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
......@@ -181,4 +183,35 @@ enum intel_guc_state_capture_event_status {
#define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
#define INTEL_GUC_TLB_INVAL_TYPE_MASK REG_GENMASK(7, 0)
#define INTEL_GUC_TLB_INVAL_MODE_MASK REG_GENMASK(11, 8)
#define INTEL_GUC_TLB_INVAL_FLUSH_CACHE REG_BIT(31)
enum intel_guc_tlb_invalidation_type {
INTEL_GUC_TLB_INVAL_ENGINES = 0x0,
INTEL_GUC_TLB_INVAL_GUC = 0x3,
};
/*
* 0: Heavy mode of Invalidation:
* The pipeline of the engine(s) for which the invalidation is targeted to is
* blocked, and all the in-flight transactions are guaranteed to be Globally
* Observed before completing the TLB invalidation
* 1: Lite mode of Invalidation:
* TLBs of the targeted engine(s) are immediately invalidated.
* In-flight transactions are NOT guaranteed to be Globally Observed before
* completing TLB invalidation.
* Light Invalidation Mode is to be used only when
* it can be guaranteed (by SW) that the address translations remain invariant
* for the in-flight transactions across the TLB invalidation. In other words,
* this mode can be used when the TLB invalidation is intended to clear out the
* stale cached translations that are no longer in use. Light Invalidation Mode
* is much faster than the Heavy Invalidation Mode, as it does not wait for the
* in-flight transactions to be GOd.
*/
enum intel_guc_tlb_inval_mode {
INTEL_GUC_TLB_INVAL_MODE_HEAVY = 0x0,
INTEL_GUC_TLB_INVAL_MODE_LITE = 0x1,
};
#endif /* _ABI_GUC_ACTIONS_ABI_H */
......@@ -79,6 +79,18 @@ struct intel_guc {
*/
atomic_t outstanding_submission_g2h;
/** @tlb_lookup: xarray to store all pending TLB invalidation requests */
struct xarray tlb_lookup;
/**
* @serial_slot: id to the initial waiter created in tlb_lookup,
* which is used only when failed to allocate new waiter.
*/
u32 serial_slot;
/** @next_seqno: the next id (sequence number) to allocate. */
u32 next_seqno;
/** @interrupts: pointers to GuC interrupt-managing functions. */
struct {
bool enabled;
......@@ -288,6 +300,11 @@ struct intel_guc {
#endif
};
struct intel_guc_tlb_wait {
struct wait_queue_head wq;
bool busy;
};
/*
* GuC version number components are only 8-bit, so converting to a 32bit 8.8.8
* integer works.
......@@ -515,4 +532,10 @@ void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc);
bool intel_guc_tlb_invalidation_is_available(struct intel_guc *guc);
int intel_guc_invalidate_tlb_engines(struct intel_guc *guc);
int intel_guc_invalidate_tlb_guc(struct intel_guc *guc);
int intel_guc_tlb_invalidation_done(struct intel_guc *guc,
const u32 *payload, u32 len);
void wake_up_all_tlb_invalidate(struct intel_guc *guc);
#endif
......@@ -1101,8 +1101,8 @@ guc_capture_create_prealloc_nodes(struct intel_guc *guc)
static int
guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
{
struct guc_state_capture_group_header_t ghdr = {0};
struct guc_state_capture_header_t hdr = {0};
struct guc_state_capture_group_header_t ghdr = {};
struct guc_state_capture_header_t hdr = {};
struct __guc_capture_parsed_output *node = NULL;
struct guc_mmio_reg *regs = NULL;
int i, numlists, numregs, ret = 0;
......
......@@ -103,6 +103,33 @@ enum { CTB_SEND = 0, CTB_RECV = 1 };
enum { CTB_OWNER_HOST = 0 };
/*
* Some H2G commands involve a synchronous response that the driver needs
* to wait for. In such cases, a timeout is required to prevent the driver
* from waiting forever in the case of an error (either no error response
* is defined in the protocol or something has died and requires a reset).
* The specific command may be defined as having a time bound response but
* the CT is a queue and that time guarantee only starts from the point
* when the command reaches the head of the queue and is processed by GuC.
*
* Ideally there would be a helper to report the progress of a given
* command through the CT. However, that would require a significant
* amount of work in the CT layer. In the meantime, provide a reasonable
* estimation of the worst case latency it should take for the entire
* queue to drain. And therefore, how long a caller should wait before
* giving up on their request. The current estimate is based on empirical
* measurement of a test that fills the buffer with context creation and
* destruction requests as they seem to be the slowest operation.
*/
long intel_guc_ct_max_queue_time_jiffies(void)
{
/*
* A 4KB buffer full of context destroy commands takes a little
* over a second to process so bump that to 2s to be super safe.
*/
return (CTB_H2G_BUFFER_SIZE * HZ) / SZ_2K;
}
static void ct_receive_tasklet_func(struct tasklet_struct *t);
static void ct_incoming_request_worker_func(struct work_struct *w);
......@@ -1115,6 +1142,9 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r
case INTEL_GUC_ACTION_NOTIFY_EXCEPTION:
ret = intel_guc_crash_process_msg(guc, action);
break;
case INTEL_GUC_ACTION_TLB_INVALIDATION_DONE:
ret = intel_guc_tlb_invalidation_done(guc, payload, len);
break;
default:
ret = -EOPNOTSUPP;
break;
......@@ -1186,9 +1216,17 @@ static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *requ
switch (action) {
case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
case INTEL_GUC_ACTION_TLB_INVALIDATION_DONE:
g2h_release_space(ct, request->size);
}
/*
* TLB invalidation responses must be handled immediately as processing
* of other G2H notifications may be blocked by an invalidation request.
*/
if (action == INTEL_GUC_ACTION_TLB_INVALIDATION_DONE)
return ct_process_request(ct, request);
spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request->link, &ct->requests.incoming);
spin_unlock_irqrestore(&ct->requests.lock, flags);
......
......@@ -104,6 +104,8 @@ struct intel_guc_ct {
#endif
};
long intel_guc_ct_max_queue_time_jiffies(void);
void intel_guc_ct_init_early(struct intel_guc_ct *ct);
int intel_guc_ct_init(struct intel_guc_ct *ct);
void intel_guc_ct_fini(struct intel_guc_ct *ct);
......
......@@ -22,6 +22,7 @@
/* Payload length only i.e. don't include G2H header length */
#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2
#define G2H_LEN_DW_DEREGISTER_CONTEXT 1
#define G2H_LEN_DW_INVALIDATE_TLB 1
#define GUC_CONTEXT_DISABLE 0
#define GUC_CONTEXT_ENABLE 1
......
......@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_irq.h"
#include "i915_trace.h"
/**
......@@ -1796,6 +1797,20 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
intel_context_put(parent);
}
void wake_up_all_tlb_invalidate(struct intel_guc *guc)
{
struct intel_guc_tlb_wait *wait;
unsigned long i;
if (!intel_guc_tlb_invalidation_is_available(guc))
return;
xa_lock_irq(&guc->tlb_lookup);
xa_for_each(&guc->tlb_lookup, i, wait)
wake_up(&wait->wq);
xa_unlock_irq(&guc->tlb_lookup);
}
void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
{
struct intel_context *ce;
......@@ -1921,6 +1936,12 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
/* GuC is blown away, drop all references to contexts */
xa_destroy(&guc->context_lookup);
/*
* Wedged GT won't respond to any TLB invalidation request. Simply
* release all the blocked waiters.
*/
wake_up_all_tlb_invalidate(guc);
}
void intel_guc_submission_reset_finish(struct intel_guc *guc)
......@@ -1943,11 +1964,65 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
intel_guc_global_policies_update(guc);
enable_submission(guc);
intel_gt_unpark_heartbeats(guc_to_gt(guc));
/*
* The full GT reset will have cleared the TLB caches and flushed the
* G2H message queue; we can release all the blocked waiters.
*/
wake_up_all_tlb_invalidate(guc);
}
static void destroyed_worker_func(struct work_struct *w);
static void reset_fail_worker_func(struct work_struct *w);
bool intel_guc_tlb_invalidation_is_available(struct intel_guc *guc)
{
return HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915) &&
intel_guc_is_ready(guc);
}
static int init_tlb_lookup(struct intel_guc *guc)
{
struct intel_guc_tlb_wait *wait;
int err;
if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915))
return 0;
xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC);
wait = kzalloc(sizeof(*wait), GFP_KERNEL);
if (!wait)
return -ENOMEM;
init_waitqueue_head(&wait->wq);
/* Preallocate a shared id for use under memory pressure. */
err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot, wait,
xa_limit_32b, &guc->next_seqno, GFP_KERNEL);
if (err < 0) {
kfree(wait);
return err;
}
return 0;
}
static void fini_tlb_lookup(struct intel_guc *guc)
{
struct intel_guc_tlb_wait *wait;
if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915))
return;
wait = xa_load(&guc->tlb_lookup, guc->serial_slot);
if (wait && wait->busy)
guc_err(guc, "Unexpected busy item in tlb_lookup on fini\n");
kfree(wait);
xa_destroy(&guc->tlb_lookup);
}
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
......@@ -1966,11 +2041,15 @@ int intel_guc_submission_init(struct intel_guc *guc)
return ret;
}
ret = init_tlb_lookup(guc);
if (ret)
goto destroy_pool;
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap) {
ret = -ENOMEM;
goto destroy_pool;
goto destroy_tlb;
}
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
......@@ -1979,9 +2058,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
return 0;
destroy_tlb:
fini_tlb_lookup(guc);
destroy_pool:
guc_lrc_desc_pool_destroy_v69(guc);
return ret;
}
......@@ -1994,6 +2074,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap);
fini_tlb_lookup(guc);
guc->submission_initialized = false;
}
......@@ -4624,6 +4705,154 @@ g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
return ce;
}
static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno)
{
struct intel_guc_tlb_wait *wait;
unsigned long flags;
xa_lock_irqsave(&guc->tlb_lookup, flags);
wait = xa_load(&guc->tlb_lookup, seqno);
if (wait)
wake_up(&wait->wq);
else
guc_dbg(guc,
"Stale TLB invalidation response with seqno %d\n", seqno);
xa_unlock_irqrestore(&guc->tlb_lookup, flags);
}
int intel_guc_tlb_invalidation_done(struct intel_guc *guc,
const u32 *payload, u32 len)
{
if (len < 1)
return -EPROTO;
wait_wake_outstanding_tlb_g2h(guc, payload[0]);
return 0;
}
static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
{
/*
* This is equivalent to wait_woken() with the exception that
* we do not wake up early if the kthread task has been completed.
* As we are called from page reclaim in any task context,
* we may be invoked from stopped kthreads, but we *must*
* complete the wait from the HW.
*/
do {
set_current_state(TASK_UNINTERRUPTIBLE);
if (wq_entry->flags & WQ_FLAG_WOKEN)
break;
timeout = schedule_timeout(timeout);
} while (timeout);
/* See wait_woken() and woken_wake_function() */
__set_current_state(TASK_RUNNING);
smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN);
return timeout;
}
static bool intel_gt_is_enabled(const struct intel_gt *gt)
{
/* Check if GT is wedged or suspended */
if (intel_gt_is_wedged(gt) || !intel_irqs_enabled(gt->i915))
return false;
return true;
}
static int guc_send_invalidate_tlb(struct intel_guc *guc,
enum intel_guc_tlb_invalidation_type type)
{
struct intel_guc_tlb_wait _wq, *wq = &_wq;
struct intel_gt *gt = guc_to_gt(guc);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
u32 seqno;
u32 action[] = {
INTEL_GUC_ACTION_TLB_INVALIDATION,
0,
REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) |
REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
};
u32 size = ARRAY_SIZE(action);
/*
* Early guard against GT enablement. TLB invalidation should not be
* attempted if the GT is disabled due to suspend/wedge.
*/
if (!intel_gt_is_enabled(gt))
return -EINVAL;
init_waitqueue_head(&_wq.wq);
if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq,
xa_limit_32b, &guc->next_seqno,
GFP_ATOMIC | __GFP_NOWARN) < 0) {
/* Under severe memory pressure? Serialise TLB allocations */
xa_lock_irq(&guc->tlb_lookup);
wq = xa_load(&guc->tlb_lookup, guc->serial_slot);
wait_event_lock_irq(wq->wq,
!READ_ONCE(wq->busy),
guc->tlb_lookup.xa_lock);
/*
* Update wq->busy under lock to ensure only one waiter can
* issue the TLB invalidation command using the serial slot at a
* time. The condition is set to true before releasing the lock
* so that other caller continue to wait until woken up again.
*/
wq->busy = true;
xa_unlock_irq(&guc->tlb_lookup);
seqno = guc->serial_slot;
}
action[1] = seqno;
add_wait_queue(&wq->wq, &wait);
/* This is a critical reclaim path and thus we must loop here. */
err = intel_guc_send_busy_loop(guc, action, size, G2H_LEN_DW_INVALIDATE_TLB, true);
if (err)
goto out;
/*
* Late guard against GT enablement. It is not an error for the TLB
* invalidation to time out if the GT is disabled during the process
* due to suspend/wedge. In fact, the TLB invalidation is cancelled
* in this case.
*/
if (!must_wait_woken(&wait, intel_guc_ct_max_queue_time_jiffies()) &&
intel_gt_is_enabled(gt)) {
guc_err(guc,
"TLB invalidation response timed out for seqno %u\n", seqno);
err = -ETIME;
}
out:
remove_wait_queue(&wq->wq, &wait);
if (seqno != guc->serial_slot)
xa_erase_irq(&guc->tlb_lookup, seqno);
return err;
}
/* Send a H2G command to invalidate the TLBs at engine level and beyond. */
int intel_guc_invalidate_tlb_engines(struct intel_guc *guc)
{
return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_ENGINES);
}
/* Send a H2G command to invalidate the GuC's internal TLB. */
int intel_guc_invalidate_tlb_guc(struct intel_guc *guc)
{
return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC);
}
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
const u32 *msg,
u32 len)
......
......@@ -688,6 +688,8 @@ void intel_uc_suspend(struct intel_uc *uc)
/* flush the GSC worker */
intel_gsc_uc_flush_work(&uc->gsc);
wake_up_all_tlb_invalidate(guc);
if (!intel_guc_is_ready(guc)) {
guc->interrupts.enabled = false;
return;
......@@ -736,6 +738,11 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
intel_gsc_uc_resume(&uc->gsc);
if (intel_guc_tlb_invalidation_is_available(guc)) {
intel_guc_invalidate_tlb_engines(guc);
intel_guc_invalidate_tlb_guc(guc);
}
return 0;
}
......
......@@ -794,6 +794,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GUC_DEPRIVILEGE(i915) \
(INTEL_INFO(i915)->has_guc_deprivilege)
#define HAS_GUC_TLB_INVALIDATION(i915) (INTEL_INFO(i915)->has_guc_tlb_invalidation)
#define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
#define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
......
......@@ -829,6 +829,7 @@ static const struct intel_device_info mtl_info = {
.has_flat_ccs = 0,
.has_gmd_id = 1,
.has_guc_deprivilege = 1,
.has_guc_tlb_invalidation = 1,
.has_llc = 0,
.has_mslice_steering = 0,
.has_snoop = 1,
......
......@@ -483,8 +483,7 @@ static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
{
return !(oa_report_id(stream, report) &
stream->perf->gen8_valid_ctx_bit) &&
GRAPHICS_VER(stream->perf->i915) <= 11;
stream->perf->gen8_valid_ctx_bit);
}
static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
......@@ -5047,6 +5046,7 @@ static void i915_perf_init_info(struct drm_i915_private *i915)
perf->gen8_valid_ctx_bit = BIT(16);
break;
case 12:
perf->gen8_valid_ctx_bit = BIT(16);
/*
* Calculate offset at runtime in oa_pin_context for gen12 and
* cache the value in perf->ctx_oactxctrl_offset.
......
......@@ -153,6 +153,7 @@ enum intel_ppgtt_type {
func(has_heci_pxp); \
func(has_heci_gscfi); \
func(has_guc_deprivilege); \
func(has_guc_tlb_invalidation); \
func(has_l3_ccs_read); \
func(has_l3_dpf); \
func(has_llc); \
......
......@@ -209,8 +209,8 @@ int intel_pxp_gsccs_create_session(struct intel_pxp *pxp,
int arb_session_id)
{
struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
struct pxp43_create_arb_in msg_in = {0};
struct pxp43_create_arb_out msg_out = {0};
struct pxp43_create_arb_in msg_in = {};
struct pxp43_create_arb_out msg_out = {};
int ret;
msg_in.header.api_version = PXP_APIVER(4, 3);
......@@ -247,8 +247,8 @@ int intel_pxp_gsccs_create_session(struct intel_pxp *pxp,
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id)
{
struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
struct pxp42_inv_stream_key_in msg_in = {0};
struct pxp42_inv_stream_key_out msg_out = {0};
struct pxp42_inv_stream_key_in msg_in = {};
struct pxp42_inv_stream_key_out msg_out = {};
int ret = 0;
/*
......
......@@ -18,8 +18,8 @@ int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp)
{
struct intel_gt *gt;
struct intel_huc *huc;
struct pxp43_start_huc_auth_in huc_in = {0};
struct pxp43_huc_auth_out huc_out = {0};
struct pxp43_start_huc_auth_in huc_in = {};
struct pxp43_huc_auth_out huc_out = {};
dma_addr_t huc_phys_addr;
u8 client_id = 0;
u8 fence_id = 0;
......
......@@ -327,8 +327,8 @@ int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
int arb_session_id)
{
struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
struct pxp42_create_arb_in msg_in = {0};
struct pxp42_create_arb_out msg_out = {0};
struct pxp42_create_arb_in msg_in = {};
struct pxp42_create_arb_out msg_out = {};
int ret;
msg_in.header.api_version = PXP_APIVER(4, 2);
......@@ -365,8 +365,8 @@ int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id)
{
struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
struct pxp42_inv_stream_key_in msg_in = {0};
struct pxp42_inv_stream_key_out msg_out = {0};
struct pxp42_inv_stream_key_in msg_in = {};
struct pxp42_inv_stream_key_out msg_out = {};
int ret, trials = 0;
try_again:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment