Commit 275fee9d authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-xe-next-2024-06-26' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

UAPI Changes:
- New uapi adding OA functionality to Xe (Ashutosh)

Cross-subsystem Changes:
- devcoredump: Add dev_coredumpm_timeout (Jose)

Driver Changes:
- More SRIOV preparation, including GuC communication improvements (Michal)
- Kconfig update: do not select ACPI_BUTTON (Jani)
- Rework GPU page fault handling (Brost)
- Forcewake clean-up and fixes (Himal, Michal)
- Drop EXEC_QUEUE_FLAG_BANNED (Brost)
- Xe/Xe2 Workarounds fixes and additions (Tejas, Akshata, Sai, Vinay)
- Xe devcoredump changes (Jose)
- Tracing cleanup and add mmio tracing (RK)
- Add BMG PCI IDs (Roper)
- Scheduler fixes and improvements (Brost)
- Some overal driver clean-up around headers and print macros (Michal)
- Rename xe_exec_queue::compute to xe_exec_queue::lr (Francois)
- Improve RTP rules to allow easier 'OR' conditions in WA declaration (Lucas)
- Use ttm_uncached for BO with NEEDS_UC flag (Michal)
- Other OA related work and fixes (Ashutosh, Michal, Jose)
- Simplify locking in new_vma (Brost)
- Remove xe_irq_shutdown (Ilia)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZnyW9RdC_aWSla_q@intel.com
parents a78313bb 406d058d
......@@ -18,9 +18,6 @@ static struct class devcd_class;
/* global disable flag, for security purposes */
static bool devcd_disabled;
/* if data isn't read by userspace after 5 minutes then delete it */
#define DEVCD_TIMEOUT (HZ * 60 * 5)
struct devcd_entry {
struct device devcd_dev;
void *data;
......@@ -328,7 +325,8 @@ void dev_coredump_put(struct device *dev)
EXPORT_SYMBOL_GPL(dev_coredump_put);
/**
* dev_coredumpm - create device coredump with read/free methods
* dev_coredumpm_timeout - create device coredump with read/free methods with a
* custom timeout.
* @dev: the struct device for the crashed device
* @owner: the module that contains the read/free functions, use %THIS_MODULE
* @data: data cookie for the @read/@free functions
......@@ -336,17 +334,20 @@ EXPORT_SYMBOL_GPL(dev_coredump_put);
* @gfp: allocation flags
* @read: function to read from the given buffer
* @free: function to free the given buffer
* @timeout: time in jiffies to remove coredump
*
* Creates a new device coredump for the given device. If a previous one hasn't
* been read yet, the new coredump is discarded. The data lifetime is determined
* by the device coredump framework and when it is no longer needed the @free
* function will be called to free the data.
*/
void dev_coredumpm(struct device *dev, struct module *owner,
void *data, size_t datalen, gfp_t gfp,
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen),
void (*free)(void *data))
void dev_coredumpm_timeout(struct device *dev, struct module *owner,
void *data, size_t datalen, gfp_t gfp,
ssize_t (*read)(char *buffer, loff_t offset,
size_t count, void *data,
size_t datalen),
void (*free)(void *data),
unsigned long timeout)
{
static atomic_t devcd_count = ATOMIC_INIT(0);
struct devcd_entry *devcd;
......@@ -403,7 +404,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
schedule_delayed_work(&devcd->del_wk, timeout);
mutex_unlock(&devcd->mutex);
return;
put_device:
......@@ -414,7 +415,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
free:
free(data);
}
EXPORT_SYMBOL_GPL(dev_coredumpm);
EXPORT_SYMBOL_GPL(dev_coredumpm_timeout);
/**
* dev_coredumpsg - create device coredump that uses scatterlist as data
......
......@@ -25,7 +25,6 @@ config DRM_XE
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if X86 && ACPI
select ACPI_BUTTON if ACPI
select X86_PLATFORM_DEVICES if X86 && ACPI
select ACPI_WMI if X86 && ACPI
select SYNC_FILE
......
......@@ -24,9 +24,12 @@ $(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
$(call cmd,wa_oob)
uses_generated_oob := \
$(obj)/xe_ggtt.o \
$(obj)/xe_gsc.o \
$(obj)/xe_gt.o \
$(obj)/xe_guc.o \
$(obj)/xe_guc_ads.o \
$(obj)/xe_guc_pc.o \
$(obj)/xe_migrate.o \
$(obj)/xe_ring_ops.o \
$(obj)/xe_vm.o \
......@@ -92,9 +95,11 @@ xe-y += xe_bb.o \
xe_mmio.o \
xe_mocs.o \
xe_module.o \
xe_oa.o \
xe_pat.o \
xe_pci.o \
xe_pcode.o \
xe_perf.o \
xe_pm.o \
xe_preempt_fence.o \
xe_pt.o \
......@@ -112,6 +117,8 @@ xe-y += xe_bb.o \
xe_tile.o \
xe_tile_sysfs.o \
xe_trace.o \
xe_trace_bo.o \
xe_trace_guc.o \
xe_ttm_sys_mgr.o \
xe_ttm_stolen_mgr.o \
xe_ttm_vram_mgr.o \
......
......@@ -128,7 +128,6 @@ enum xe_guc_action {
XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
XE_GUC_ACTION_SETUP_PC_GUCRC = 0x3004,
XE_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
XE_GUC_ACTION_GET_HWCONFIG = 0x4100,
XE_GUC_ACTION_REGISTER_CONTEXT = 0x4502,
......@@ -153,11 +152,6 @@ enum xe_guc_action {
XE_GUC_ACTION_LIMIT
};
enum xe_guc_rc_options {
XE_GUCRC_HOST_CONTROL,
XE_GUCRC_FIRMWARE_CONTROL,
};
enum xe_guc_preempt_options {
XE_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
XE_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
......
......@@ -246,4 +246,26 @@ struct slpc_shared_data {
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xffu << 0)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn
/**
* DOC: SETUP_PC_GUCRC
*
* +---+-------+--------------------------------------------------------------+
* | | Bits | Description |
* +===+=======+==============================================================+
* | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
* | +-------+--------------------------------------------------------------+
* | | 30:28 | TYPE = GUC_HXG_TYPE_FAST_REQUEST_ |
* | +-------+--------------------------------------------------------------+
* | | 27:16 | DATA0 = MBZ |
* | +-------+--------------------------------------------------------------+
* | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC` = 0x3004 |
* +---+-------+--------------------------------------------------------------+
* | 1 | 31:0 | **MODE** = GUCRC_HOST_CONTROL(0), GUCRC_FIRMWARE_CONTROL(1) |
* +---+-------+--------------------------------------------------------------+
*/
#define GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC 0x3004u
#define GUCRC_HOST_CONTROL 0u
#define GUCRC_FIRMWARE_CONTROL 1u
#endif
......@@ -8,10 +8,41 @@
enum xe_guc_response_status {
XE_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
XE_GUC_RESPONSE_ERROR_PROTOCOL = 0x04,
XE_GUC_RESPONSE_INVALID_STATE = 0x0A,
XE_GUC_RESPONSE_UNSUPPORTED_VERSION = 0x0B,
XE_GUC_RESPONSE_INVALID_VFID = 0x0C,
XE_GUC_RESPONSE_UNPROVISIONED_VF = 0x0D,
XE_GUC_RESPONSE_INVALID_EVENT = 0x0E,
XE_GUC_RESPONSE_NOT_SUPPORTED = 0x20,
XE_GUC_RESPONSE_UNKNOWN_ACTION = 0x30,
XE_GUC_RESPONSE_ACTION_ABORTED = 0x31,
XE_GUC_RESPONSE_NO_PERMISSION = 0x40,
XE_GUC_RESPONSE_CANNOT_COMPLETE_ACTION = 0x41,
XE_GUC_RESPONSE_INVALID_KLV_DATA = 0x50,
XE_GUC_RESPONSE_INVALID_PARAMS = 0x60,
XE_GUC_RESPONSE_INVALID_BUFFER_RANGE = 0x70,
XE_GUC_RESPONSE_INVALID_BUFFER = 0x71,
XE_GUC_RESPONSE_INVALID_GGTT_ADDRESS = 0x80,
XE_GUC_RESPONSE_PENDING_ACTION = 0x90,
XE_GUC_RESPONSE_INVALID_SIZE = 0x102,
XE_GUC_RESPONSE_MALFORMED_KLV = 0x103,
XE_GUC_RESPONSE_INVALID_KLV_KEY = 0x105,
XE_GUC_RESPONSE_DATA_TOO_LARGE = 0x106,
XE_GUC_RESPONSE_VF_MIGRATED = 0x107,
XE_GUC_RESPONSE_NO_ATTRIBUTE_TABLE = 0x201,
XE_GUC_RESPONSE_NO_DECRYPTION_KEY = 0x202,
XE_GUC_RESPONSE_DECRYPTION_FAILED = 0x204,
XE_GUC_RESPONSE_VGT_DISABLED = 0x300,
XE_GUC_RESPONSE_CTB_FULL = 0x301,
XE_GUC_RESPONSE_VGT_UNAUTHORIZED_REQUEST = 0x302,
XE_GUC_RESPONSE_CTB_INVALID = 0x303,
XE_GUC_RESPONSE_CTB_NOT_REGISTERED = 0x304,
XE_GUC_RESPONSE_CTB_IN_USE = 0x305,
XE_GUC_RESPONSE_CTB_INVALID_DESC = 0x306,
XE_GUC_RESPONSE_CTB_SOURCE_INVALID_DESCRIPTOR = 0x30D,
XE_GUC_RESPONSE_CTB_DESTINATION_INVALID_DESCRIPTOR = 0x30E,
XE_GUC_RESPONSE_INVALID_CONFIG_STATE = 0x30F,
XE_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
};
......
......@@ -91,6 +91,34 @@
#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffffu << 0)
#define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/**
* DOC: HXG Fast Request
*
* The `HXG Request`_ message should be used to initiate asynchronous activity
* for which confirmation or return data is not expected.
*
* If confirmation is required then `HXG Request`_ shall be used instead.
*
* The recipient of this message may only use `HXG Failure`_ message if it was
* unable to accept this request (like invalid data).
*
* Format of `HXG Fast Request`_ message is same as `HXG Request`_ except @TYPE.
*
* +---+-------+--------------------------------------------------------------+
* | | Bits | Description |
* +===+=======+==============================================================+
* | 0 | 31 | ORIGIN - see `HXG Message`_ |
* | +-------+--------------------------------------------------------------+
* | | 30:28 | TYPE = `GUC_HXG_TYPE_FAST_REQUEST`_ |
* | +-------+--------------------------------------------------------------+
* | | 27:16 | DATA0 - see `HXG Request`_ |
* | +-------+--------------------------------------------------------------+
* | | 15:0 | ACTION - see `HXG Request`_ |
* +---+-------+--------------------------------------------------------------+
* |...| | DATAn - see `HXG Request`_ |
* +---+-------+--------------------------------------------------------------+
*/
/**
* DOC: HXG Event
*
......@@ -220,17 +248,4 @@
#define GUC_HXG_RESPONSE_MSG_0_DATA0 GUC_HXG_MSG_0_AUX
#define GUC_HXG_RESPONSE_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/* deprecated */
#define INTEL_GUC_MSG_TYPE_SHIFT 28
#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
#define INTEL_GUC_MSG_DATA_SHIFT 16
#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT)
#define INTEL_GUC_MSG_CODE_SHIFT 0
#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT)
enum intel_guc_msg_type {
INTEL_GUC_MSG_TYPE_REQUEST = 0x0,
INTEL_GUC_MSG_TYPE_RESPONSE = 0xF,
};
#endif
......@@ -171,7 +171,7 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
xe->pat.idx[XE_CACHE_NONE]);
xe_ggtt_set_pte(ggtt, *ggtt_ofs, pte);
ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte);
*ggtt_ofs += XE_PAGE_SIZE;
src_idx -= src_stride;
}
......@@ -217,7 +217,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
xe->pat.idx[XE_CACHE_NONE]);
xe_ggtt_set_pte(ggtt, vma->node.start + x, pte);
ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node.start + x, pte);
}
} else {
u32 i, ggtt_ofs;
......
......@@ -45,6 +45,7 @@
#define MI_LRI_MMIO_REMAP_EN REG_BIT(17)
#define MI_LRI_NUM_REGS(x) XE_INSTR_NUM_DW(2 * (x) + 1)
#define MI_LRI_FORCE_POSTED REG_BIT(12)
#define MI_LRI_LEN(x) (((x) & 0xff) + 1)
#define MI_FLUSH_DW __MI_INSTR(0x26)
#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)
......@@ -59,6 +60,10 @@
#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
#define MI_LRM_USE_GGTT REG_BIT(22)
#define MI_COPY_MEM_MEM (__MI_INSTR(0x2e) | XE_INSTR_NUM_DW(5))
#define MI_COPY_MEM_MEM_SRC_GGTT REG_BIT(22)
#define MI_COPY_MEM_MEM_DST_GGTT REG_BIT(21)
#define MI_BATCH_BUFFER_START __MI_INSTR(0x31)
#endif
......@@ -129,6 +129,8 @@
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
#define CTX_CTRL_RUN_ALONE REG_BIT(7)
#define CTX_CTRL_INDIRECT_RING_STATE_ENABLE REG_BIT(4)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
......
......@@ -170,6 +170,8 @@
#define SQCNT1 XE_REG_MCR(0x8718)
#define XELPMP_SQCNT1 XE_REG(0x8718)
#define SQCNT1_PMON_ENABLE REG_BIT(30)
#define SQCNT1_OABPC REG_BIT(29)
#define ENFORCE_RAR REG_BIT(23)
#define XEHP_SQCM XE_REG_MCR(0x8724)
......@@ -432,6 +434,7 @@
#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
#define UGM_BACKUP_MODE REG_BIT(13)
#define MDQ_ARBITRATION_MODE REG_BIT(12)
#define STALL_DOP_GATING_DISABLE REG_BIT(5)
#define EARLY_EOT_DIS REG_BIT(1)
#define ROW_CHICKEN2 XE_REG_MCR(0xe4f4, XE_REG_OPTION_MASKED)
......@@ -490,9 +493,11 @@
((ccs) << ((cslice) * CCS_MODE_CSLICE_WIDTH))
#define FORCEWAKE_ACK_GT XE_REG(0x130044)
#define FORCEWAKE_KERNEL BIT(0)
#define FORCEWAKE_USER BIT(1)
#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
/* Applicable for all FORCEWAKE_DOMAIN and FORCEWAKE_ACK_DOMAIN regs */
#define FORCEWAKE_KERNEL 0
#define FORCEWAKE_MT(bit) BIT(bit)
#define FORCEWAKE_MT_MASK(bit) BIT((bit) + 16)
#define MTL_MEDIA_PERF_LIMIT_REASONS XE_REG(0x138030)
#define MTL_MEDIA_MC6 XE_REG(0x138048)
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __XE_OA_REGS__
#define __XE_OA_REGS__
#define RPM_CONFIG1 XE_REG(0xd04)
#define GT_NOA_ENABLE REG_BIT(9)
#define EU_PERF_CNTL0 XE_REG(0xe458)
#define EU_PERF_CNTL4 XE_REG(0xe45c)
#define EU_PERF_CNTL1 XE_REG(0xe558)
#define EU_PERF_CNTL5 XE_REG(0xe55c)
#define EU_PERF_CNTL2 XE_REG(0xe658)
#define EU_PERF_CNTL6 XE_REG(0xe65c)
#define EU_PERF_CNTL3 XE_REG(0xe758)
#define OA_TLB_INV_CR XE_REG(0xceec)
/* OAR unit */
#define OAR_OACONTROL XE_REG(0x2960)
#define OAR_OACONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1)
#define OAR_OACONTROL_COUNTER_ENABLE REG_BIT(0)
#define OACTXCONTROL(base) XE_REG((base) + 0x360)
#define OAR_OASTATUS XE_REG(0x2968)
#define OA_COUNTER_RESUME REG_BIT(0)
/* OAG unit */
#define OAG_OAGLBCTXCTRL XE_REG(0x2b28)
#define OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK REG_GENMASK(7, 2)
#define OAG_OAGLBCTXCTRL_TIMER_ENABLE REG_BIT(1)
#define OAG_OAGLBCTXCTRL_COUNTER_RESUME REG_BIT(0)
#define OAG_OAHEADPTR XE_REG(0xdb00)
#define OAG_OAHEADPTR_MASK REG_GENMASK(31, 6)
#define OAG_OATAILPTR XE_REG(0xdb04)
#define OAG_OATAILPTR_MASK REG_GENMASK(31, 6)
#define OAG_OABUFFER XE_REG(0xdb08)
#define OABUFFER_SIZE_MASK REG_GENMASK(5, 3)
#define OABUFFER_SIZE_128K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 0)
#define OABUFFER_SIZE_256K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 1)
#define OABUFFER_SIZE_512K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 2)
#define OABUFFER_SIZE_1M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 3)
#define OABUFFER_SIZE_2M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 4)
#define OABUFFER_SIZE_4M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 5)
#define OABUFFER_SIZE_8M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 6)
#define OABUFFER_SIZE_16M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 7)
#define OAG_OABUFFER_MEMORY_SELECT REG_BIT(0) /* 0: PPGTT, 1: GGTT */
#define OAG_OACONTROL XE_REG(0xdaf4)
#define OAG_OACONTROL_OA_CCS_SELECT_MASK REG_GENMASK(18, 16)
#define OAG_OACONTROL_OA_COUNTER_SEL_MASK REG_GENMASK(4, 2)
#define OAG_OACONTROL_OA_COUNTER_ENABLE REG_BIT(0)
/* Common to all OA units */
#define OA_OACONTROL_REPORT_BC_MASK REG_GENMASK(9, 9)
#define OA_OACONTROL_COUNTER_SIZE_MASK REG_GENMASK(8, 8)
#define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
#define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14)
#define OAG_OA_DEBUG_START_TRIGGER_SCOPE_CONTROL REG_BIT(13)
#define OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL REG_BIT(8)
#define OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL REG_BIT(7)
#define OAG_OA_DEBUG_INCLUDE_CLK_RATIO REG_BIT(6)
#define OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS REG_BIT(5)
#define OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS REG_BIT(1)
#define OAG_OASTATUS XE_REG(0xdafc)
#define OASTATUS_MMIO_TRG_Q_FULL REG_BIT(6)
#define OASTATUS_COUNTER_OVERFLOW REG_BIT(2)
#define OASTATUS_BUFFER_OVERFLOW REG_BIT(1)
#define OASTATUS_REPORT_LOST REG_BIT(0)
#define OAG_MMIOTRIGGER XE_REG(0xdb1c)
/* OAC unit */
#define OAC_OACONTROL XE_REG(0x15114)
/* OAM unit */
#define OAM_HEAD_POINTER_OFFSET (0x1a0)
#define OAM_TAIL_POINTER_OFFSET (0x1a4)
#define OAM_BUFFER_OFFSET (0x1a8)
#define OAM_CONTEXT_CONTROL_OFFSET (0x1bc)
#define OAM_CONTROL_OFFSET (0x194)
#define OAM_CONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1)
#define OAM_DEBUG_OFFSET (0x198)
#define OAM_STATUS_OFFSET (0x19c)
#define OAM_MMIO_TRG_OFFSET (0x1d0)
#define OAM_HEAD_POINTER(base) XE_REG((base) + OAM_HEAD_POINTER_OFFSET)
#define OAM_TAIL_POINTER(base) XE_REG((base) + OAM_TAIL_POINTER_OFFSET)
#define OAM_BUFFER(base) XE_REG((base) + OAM_BUFFER_OFFSET)
#define OAM_CONTEXT_CONTROL(base) XE_REG((base) + OAM_CONTEXT_CONTROL_OFFSET)
#define OAM_CONTROL(base) XE_REG((base) + OAM_CONTROL_OFFSET)
#define OAM_DEBUG(base) XE_REG((base) + OAM_DEBUG_OFFSET)
#define OAM_STATUS(base) XE_REG((base) + OAM_STATUS_OFFSET)
#define OAM_MMIO_TRG(base) XE_REG((base) + OAM_MMIO_TRG_OFFSET)
#endif
......@@ -90,6 +90,59 @@ static const struct rtp_test_case cases[] = {
{}
},
},
{
.name = "match-or",
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
.expected_clr_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
.expected_count = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("first"),
XE_RTP_RULES(FUNC(match_yes), OR, FUNC(match_no)),
XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
},
{ XE_RTP_NAME("middle"),
XE_RTP_RULES(FUNC(match_no), FUNC(match_no), OR,
FUNC(match_yes), OR,
FUNC(match_no)),
XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1)))
},
{ XE_RTP_NAME("last"),
XE_RTP_RULES(FUNC(match_no), OR, FUNC(match_yes)),
XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(2)))
},
{ XE_RTP_NAME("no-match"),
XE_RTP_RULES(FUNC(match_no), OR, FUNC(match_no)),
XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(3)))
},
{}
},
},
{
.name = "match-or-xfail",
.expected_reg = REGULAR_REG1,
.expected_count = 0,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("leading-or"),
XE_RTP_RULES(OR, FUNC(match_yes)),
XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
},
{ XE_RTP_NAME("trailing-or"),
/*
* First condition is match_no, otherwise the failure
* wouldn't really trigger as RTP stops processing as
* soon as it has a matching set of rules
*/
XE_RTP_RULES(FUNC(match_no), OR),
XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1)))
},
{ XE_RTP_NAME("no-or-or-yes"),
XE_RTP_RULES(FUNC(match_no), OR, OR, FUNC(match_yes)),
XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(2)))
},
{}
},
},
{
.name = "no-match-no-add-multiple-rules",
.expected_reg = REGULAR_REG1,
......@@ -255,9 +308,14 @@ static void xe_rtp_process_tests(struct kunit *test)
}
KUNIT_EXPECT_EQ(test, count, param->expected_count);
KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits);
KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits);
KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw);
if (count) {
KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits);
KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits);
KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw);
} else {
KUNIT_EXPECT_NULL(test, sr_entry);
}
KUNIT_EXPECT_EQ(test, reg_sr->errors, param->expected_sr_errors);
}
......
......@@ -25,7 +25,7 @@
#include "xe_pm.h"
#include "xe_preempt_fence.h"
#include "xe_res_cursor.h"
#include "xe_trace.h"
#include "xe_trace_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
......@@ -378,6 +378,15 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
(xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
caching = ttm_write_combined;
if (bo->flags & XE_BO_FLAG_NEEDS_UC) {
/*
* Valid only for internally-created buffers only, for
* which cpu_caching is never initialized.
*/
xe_assert(xe, bo->cpu_caching == 0);
caching = ttm_uncached;
}
err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
if (err) {
kfree(tt);
......
......@@ -53,6 +53,9 @@
#ifdef CONFIG_DEV_COREDUMP
/* 1 hour timeout */
#define XE_COREDUMP_TIMEOUT_JIFFIES (60 * 60 * HZ)
static struct xe_device *coredump_to_xe(const struct xe_devcoredump *coredump)
{
return container_of(coredump, struct xe_device, devcoredump);
......@@ -247,8 +250,9 @@ void xe_devcoredump(struct xe_sched_job *job)
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
xe->drm.primary->index);
dev_coredumpm(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
xe_devcoredump_read, xe_devcoredump_free);
dev_coredumpm_timeout(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
xe_devcoredump_read, xe_devcoredump_free,
XE_COREDUMP_TIMEOUT_JIFFIES);
}
static void xe_driver_devcoredump_fini(void *arg)
......
......@@ -44,6 +44,7 @@
#include "xe_module.h"
#include "xe_pat.h"
#include "xe_pcode.h"
#include "xe_perf.h"
#include "xe_pm.h"
#include "xe_query.h"
#include "xe_sriov.h"
......@@ -141,6 +142,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_PERF, xe_perf_ioctl, DRM_RENDER_ALLOW),
};
static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
......@@ -484,6 +486,17 @@ static int wait_for_lmem_ready(struct xe_device *xe)
return 0;
}
static void update_device_info(struct xe_device *xe)
{
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
xe->info.enable_display = 0;
xe->info.has_heci_gscfi = 0;
xe->info.skip_guc_pc = 1;
xe->info.skip_pcode = 1;
}
}
/**
* xe_device_probe_early: Device early probe
* @xe: xe device instance
......@@ -504,6 +517,8 @@ int xe_device_probe_early(struct xe_device *xe)
xe_sriov_probe_early(xe);
update_device_info(xe);
err = xe_pcode_probe_early(xe);
if (err)
return err;
......@@ -619,16 +634,16 @@ int xe_device_probe(struct xe_device *xe)
err = xe_device_set_has_flat_ccs(xe);
if (err)
goto err_irq_shutdown;
goto err;
err = xe_vram_probe(xe);
if (err)
goto err_irq_shutdown;
goto err;
for_each_tile(tile, xe, id) {
err = xe_tile_init_noalloc(tile);
if (err)
goto err_irq_shutdown;
goto err;
}
/* Allocate and map stolen after potential VRAM resize */
......@@ -642,7 +657,7 @@ int xe_device_probe(struct xe_device *xe)
*/
err = xe_display_init_noaccel(xe);
if (err)
goto err_irq_shutdown;
goto err;
for_each_gt(gt, xe, id) {
last_gt = id;
......@@ -654,25 +669,37 @@ int xe_device_probe(struct xe_device *xe)
xe_heci_gsc_init(xe);
err = xe_display_init(xe);
err = xe_oa_init(xe);
if (err)
goto err_fini_gt;
err = xe_display_init(xe);
if (err)
goto err_fini_oa;
err = drm_dev_register(&xe->drm, 0);
if (err)
goto err_fini_display;
xe_display_register(xe);
xe_oa_register(xe);
xe_debugfs_register(xe);
xe_hwmon_register(xe);
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
err_fini_display:
xe_display_driver_remove(xe);
err_fini_oa:
xe_oa_fini(xe);
err_fini_gt:
for_each_gt(gt, xe, id) {
if (id < last_gt)
......@@ -681,8 +708,6 @@ int xe_device_probe(struct xe_device *xe)
break;
}
err_irq_shutdown:
xe_irq_shutdown(xe);
err:
xe_display_fini(xe);
return err;
......@@ -701,16 +726,18 @@ void xe_device_remove(struct xe_device *xe)
struct xe_gt *gt;
u8 id;
xe_oa_unregister(xe);
xe_device_remove_display(xe);
xe_display_fini(xe);
xe_oa_fini(xe);
xe_heci_gsc_fini(xe);
for_each_gt(gt, xe, id)
xe_gt_remove(gt);
xe_irq_shutdown(xe);
}
void xe_device_shutdown(struct xe_device *xe)
......
......@@ -17,6 +17,7 @@
#include "xe_gt_types.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
#include "xe_oa.h"
#include "xe_platform_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_types.h"
......@@ -462,6 +463,9 @@ struct xe_device {
/** @heci_gsc: graphics security controller */
struct xe_heci_gsc heci_gsc;
/** @oa: oa perf counter subsystem */
struct xe_oa oa;
/** @needs_flr_on_fini: requests function-reset on fini */
bool needs_flr_on_fini;
......
......@@ -141,7 +141,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
q->width != args->num_batch_buffer))
return -EINVAL;
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) {
err = -ECANCELED;
goto err_exec_queue;
}
......@@ -259,9 +259,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
/* Wait behind rebinds */
if (!xe_vm_in_lr_mode(vm)) {
err = drm_sched_job_add_resv_dependencies(&job->drm,
xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL);
err = xe_sched_job_add_deps(job,
xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL);
if (err)
goto err_put_job;
}
......
......@@ -67,7 +67,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops;
INIT_LIST_HEAD(&q->compute.link);
INIT_LIST_HEAD(&q->lr.link);
INIT_LIST_HEAD(&q->multi_gt_link);
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
......@@ -633,8 +633,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(q);
if (xe_vm_in_preempt_fence_mode(vm)) {
q->compute.context = dma_fence_context_alloc(1);
spin_lock_init(&q->compute.lock);
q->lr.context = dma_fence_context_alloc(1);
spin_lock_init(&q->lr.lock);
err = xe_vm_add_compute_exec_queue(vm, q);
if (XE_IOCTL_DBG(xe, err))
......@@ -677,7 +677,7 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
switch (args->property) {
case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
args->value = q->ops->reset_status(q);
ret = 0;
break;
default:
......
......@@ -70,18 +70,16 @@ struct xe_exec_queue {
*/
struct dma_fence *last_fence;
/* queue no longer allowed to submit */
#define EXEC_QUEUE_FLAG_BANNED BIT(0)
/* queue used for kernel submission only */
#define EXEC_QUEUE_FLAG_KERNEL BIT(1)
#define EXEC_QUEUE_FLAG_KERNEL BIT(0)
/* kernel engine only destroyed at driver unload */
#define EXEC_QUEUE_FLAG_PERMANENT BIT(2)
#define EXEC_QUEUE_FLAG_PERMANENT BIT(1)
/* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
#define EXEC_QUEUE_FLAG_VM BIT(3)
#define EXEC_QUEUE_FLAG_VM BIT(2)
/* child of VM queue for multi-tile VM jobs */
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(4)
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(3)
/* kernel exec_queue only, set priority to highest level */
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(5)
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(4)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
......@@ -115,19 +113,19 @@ struct xe_exec_queue {
enum xe_exec_queue_priority priority;
} sched_props;
/** @compute: compute exec queue state */
/** @lr: long-running exec queue state */
struct {
/** @compute.pfence: preemption fence */
/** @lr.pfence: preemption fence */
struct dma_fence *pfence;
/** @compute.context: preemption fence context */
/** @lr.context: preemption fence context */
u64 context;
/** @compute.seqno: preemption fence seqno */
/** @lr.seqno: preemption fence seqno */
u32 seqno;
/** @compute.link: link into VM's list of exec queues */
/** @lr.link: link into VM's list of exec queues */
struct list_head link;
/** @compute.lock: preemption fences lock */
/** @lr.lock: preemption fences lock */
spinlock_t lock;
} compute;
} lr;
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
......
......@@ -10,31 +10,26 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_reg_defs.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
#define XE_FORCE_WAKE_ACK_TIMEOUT_MS 50
static struct xe_gt *
fw_to_gt(struct xe_force_wake *fw)
static const char *str_wake_sleep(bool wake)
{
return fw->gt;
}
static struct xe_device *
fw_to_xe(struct xe_force_wake *fw)
{
return gt_to_xe(fw_to_gt(fw));
return wake ? "wake" : "sleep";
}
static void domain_init(struct xe_force_wake_domain *domain,
enum xe_force_wake_domain_id id,
struct xe_reg reg, struct xe_reg ack, u32 val, u32 mask)
struct xe_reg reg, struct xe_reg ack)
{
domain->id = id;
domain->reg_ctl = reg;
domain->reg_ack = ack;
domain->val = val;
domain->mask = mask;
domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL);
domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL);
}
void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
......@@ -51,14 +46,12 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
FORCEWAKE_ACK_GT_MTL,
BIT(0), BIT(16));
FORCEWAKE_ACK_GT_MTL);
} else {
domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
FORCEWAKE_ACK_GT,
BIT(0), BIT(16));
FORCEWAKE_ACK_GT);
}
}
......@@ -73,8 +66,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER,
FORCEWAKE_ACK_RENDER,
BIT(0), BIT(16));
FORCEWAKE_ACK_RENDER);
for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
if (!(gt->info.engine_mask & BIT(i)))
......@@ -83,8 +75,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j],
XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
FORCEWAKE_MEDIA_VDBOX(j),
FORCEWAKE_ACK_MEDIA_VDBOX(j),
BIT(0), BIT(16));
FORCEWAKE_ACK_MEDIA_VDBOX(j));
}
for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
......@@ -94,42 +85,63 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j],
XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
FORCEWAKE_MEDIA_VEBOX(j),
FORCEWAKE_ACK_MEDIA_VEBOX(j),
BIT(0), BIT(16));
FORCEWAKE_ACK_MEDIA_VEBOX(j));
}
if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC],
XE_FW_DOMAIN_ID_GSC,
FORCEWAKE_GSC,
FORCEWAKE_ACK_GSC,
BIT(0), BIT(16));
FORCEWAKE_ACK_GSC);
}
static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
{
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
xe_mmio_write32(gt, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
}
static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
{
u32 value;
int ret;
if (IS_SRIOV_VF(gt_to_xe(gt)))
return 0;
ret = xe_mmio_wait32(gt, domain->reg_ack, domain->val, wake ? domain->val : 0,
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
&value, true);
if (ret)
xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
domain->reg_ack.addr, value);
return ret;
}
static void domain_wake(struct xe_gt *gt, struct xe_force_wake_domain *domain)
{
xe_mmio_write32(gt, domain->reg_ctl, domain->mask | domain->val);
__domain_ctl(gt, domain, true);
}
static int domain_wake_wait(struct xe_gt *gt,
struct xe_force_wake_domain *domain)
{
return xe_mmio_wait32(gt, domain->reg_ack, domain->val, domain->val,
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
NULL, true);
return __domain_wait(gt, domain, true);
}
static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
{
xe_mmio_write32(gt, domain->reg_ctl, domain->mask);
__domain_ctl(gt, domain, false);
}
static int domain_sleep_wait(struct xe_gt *gt,
struct xe_force_wake_domain *domain)
{
return xe_mmio_wait32(gt, domain->reg_ack, domain->val, 0,
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
NULL, true);
return __domain_wait(gt, domain, false);
}
#define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
......@@ -141,12 +153,11 @@ static int domain_sleep_wait(struct xe_gt *gt,
int xe_force_wake_get(struct xe_force_wake *fw,
enum xe_force_wake_domains domains)
{
struct xe_device *xe = fw_to_xe(fw);
struct xe_gt *gt = fw_to_gt(fw);
struct xe_gt *gt = fw->gt;
struct xe_force_wake_domain *domain;
enum xe_force_wake_domains tmp, woken = 0;
unsigned long flags;
int ret, ret2 = 0;
int ret = 0;
spin_lock_irqsave(&fw->lock, flags);
for_each_fw_domain_masked(domain, domains, fw, tmp) {
......@@ -156,27 +167,22 @@ int xe_force_wake_get(struct xe_force_wake *fw,
}
}
for_each_fw_domain_masked(domain, woken, fw, tmp) {
ret = domain_wake_wait(gt, domain);
ret2 |= ret;
if (ret)
drm_notice(&xe->drm, "Force wake domain (%d) failed to ack wake, ret=%d\n",
domain->id, ret);
ret |= domain_wake_wait(gt, domain);
}
fw->awake_domains |= woken;
spin_unlock_irqrestore(&fw->lock, flags);
return ret2;
return ret;
}
int xe_force_wake_put(struct xe_force_wake *fw,
enum xe_force_wake_domains domains)
{
struct xe_device *xe = fw_to_xe(fw);
struct xe_gt *gt = fw_to_gt(fw);
struct xe_gt *gt = fw->gt;
struct xe_force_wake_domain *domain;
enum xe_force_wake_domains tmp, sleep = 0;
unsigned long flags;
int ret, ret2 = 0;
int ret = 0;
spin_lock_irqsave(&fw->lock, flags);
for_each_fw_domain_masked(domain, domains, fw, tmp) {
......@@ -186,14 +192,10 @@ int xe_force_wake_put(struct xe_force_wake *fw,
}
}
for_each_fw_domain_masked(domain, sleep, fw, tmp) {
ret = domain_sleep_wait(gt, domain);
ret2 |= ret;
if (ret)
drm_notice(&xe->drm, "Force wake domain (%d) failed to ack sleep, ret=%d\n",
domain->id, ret);
ret |= domain_sleep_wait(gt, domain);
}
fw->awake_domains &= ~sleep;
spin_unlock_irqrestore(&fw->lock, flags);
return ret2;
return ret;
}
......@@ -24,14 +24,25 @@ static inline int
xe_force_wake_ref(struct xe_force_wake *fw,
enum xe_force_wake_domains domain)
{
xe_gt_assert(fw->gt, domain);
xe_gt_assert(fw->gt, domain != XE_FORCEWAKE_ALL);
return fw->domains[ffs(domain) - 1].ref;
}
/**
* xe_force_wake_assert_held - asserts domain is awake
* @fw : xe_force_wake structure
* @domain: xe_force_wake_domains apart from XE_FORCEWAKE_ALL
*
* xe_force_wake_assert_held() is designed to confirm a particular
* forcewake domain's wakefulness; it doesn't verify the wakefulness of
* multiple domains. Make sure the caller doesn't input multiple
* domains(XE_FORCEWAKE_ALL) as a parameter.
*/
static inline void
xe_force_wake_assert_held(struct xe_force_wake *fw,
enum xe_force_wake_domains domain)
{
xe_gt_assert(fw->gt, domain != XE_FORCEWAKE_ALL);
xe_gt_assert(fw->gt, fw->awake_domains & domain);
}
......
......@@ -11,6 +11,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/intel/i915_drm.h>
#include <generated/xe_wa_oob.h>
#include "regs/xe_gt_regs.h"
#include "regs/xe_gtt_defs.h"
......@@ -23,8 +24,10 @@
#include "xe_gt_sriov_vf.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_wa.h"
#include "xe_wopcm.h"
static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
......@@ -69,7 +72,22 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev)
return ggms ? SZ_1M << ggms : 0;
}
void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
{
/*
* Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
* to wait for completion of prior GTT writes before letting this through.
* This needs to be done for all GGTT writes originating from the CPU.
*/
lockdep_assert_held(&ggtt->lock);
if ((++ggtt->access_count % 63) == 0) {
xe_mmio_write32(ggtt->tile->media_gt, GMD_ID, 0x0);
ggtt->access_count = 0;
}
}
static void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
{
xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
xe_tile_assert(ggtt->tile, addr < ggtt->size);
......@@ -77,6 +95,12 @@ void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
}
static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
{
xe_ggtt_set_pte(ggtt, addr, pte);
ggtt_update_access_counter(ggtt);
}
static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
{
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
......@@ -92,7 +116,7 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
scratch_pte = 0;
while (start < end) {
xe_ggtt_set_pte(ggtt, start, scratch_pte);
ggtt->pt_ops->ggtt_set_pte(ggtt, start, scratch_pte);
start += XE_PAGE_SIZE;
}
}
......@@ -124,10 +148,17 @@ static void primelockdep(struct xe_ggtt *ggtt)
static const struct xe_ggtt_pt_ops xelp_pt_ops = {
.pte_encode_bo = xelp_ggtt_pte_encode_bo,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
.pte_encode_bo = xelpg_ggtt_pte_encode_bo,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
.pte_encode_bo = xelpg_ggtt_pte_encode_bo,
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
/*
......@@ -187,7 +218,8 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->size = GUC_GGTT_TOP;
if (GRAPHICS_VERx100(xe) >= 1270)
ggtt->pt_ops = &xelpg_pt_ops;
ggtt->pt_ops = ggtt->tile->media_gt && XE_WA(ggtt->tile->media_gt, 22019338487) ?
&xelpg_pt_wa_ops : &xelpg_pt_ops;
else
ggtt->pt_ops = &xelp_pt_ops;
......@@ -394,7 +426,7 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
xe_ggtt_set_pte(ggtt, start + offset, pte);
ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte);
}
}
......@@ -502,7 +534,7 @@ static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node
return;
while (start < end) {
xe_ggtt_set_pte(ggtt, start, pte);
ggtt->pt_ops->ggtt_set_pte(ggtt, start, pte);
start += XE_PAGE_SIZE;
}
......
......@@ -10,7 +10,6 @@
struct drm_printer;
void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
int xe_ggtt_init(struct xe_ggtt *ggtt);
void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix);
......
......@@ -13,10 +13,6 @@
struct xe_bo;
struct xe_gt;
struct xe_ggtt_pt_ops {
u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
};
struct xe_ggtt {
struct xe_tile *tile;
......@@ -34,6 +30,14 @@ struct xe_ggtt {
const struct xe_ggtt_pt_ops *pt_ops;
struct drm_mm mm;
/** @access_count: counts GGTT writes */
unsigned int access_count;
};
struct xe_ggtt_pt_ops {
u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
};
#endif
......@@ -22,6 +22,7 @@
#include "xe_gt.h"
#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_guc_pc.h"
#include "xe_huc.h"
#include "xe_map.h"
#include "xe_mmio.h"
......@@ -284,6 +285,10 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
return ret;
xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
/* GSC load is done, restore expected GT frequencies */
xe_gt_sanitize_freq(gt);
xe_gt_dbg(gt, "GSC FW async load completed\n");
/* HuC auth failure is not fatal */
......
......@@ -9,6 +9,7 @@
#include <drm/drm_managed.h>
#include <drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
#include "instructions/xe_gfxpipe_commands.h"
#include "instructions/xe_mi_commands.h"
......@@ -54,6 +55,7 @@
#include "xe_sriov.h"
#include "xe_tuning.h"
#include "xe_uc.h"
#include "xe_uc_fw.h"
#include "xe_vm.h"
#include "xe_wa.h"
#include "xe_wopcm.h"
......@@ -678,6 +680,9 @@ static int do_gt_restart(struct xe_gt *gt)
/* Get CCS mode in sync between sw/hw */
xe_gt_apply_ccs_mode(gt);
/* Restore GT freq to expected values */
xe_gt_sanitize_freq(gt);
return 0;
}
......@@ -801,6 +806,25 @@ int xe_gt_suspend(struct xe_gt *gt)
return err;
}
/**
* xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
* @gt: the GT object
*
* Called after driver init/GSC load completes to restore GT frequencies if we
* limited them for any WAs.
*/
int xe_gt_sanitize_freq(struct xe_gt *gt)
{
int ret = 0;
if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
xe_uc_fw_is_loaded(&gt->uc.gsc.fw)) &&
XE_WA(gt, 22019338487))
ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
return ret;
}
int xe_gt_resume(struct xe_gt *gt)
{
int err;
......
......@@ -56,6 +56,7 @@ int xe_gt_suspend(struct xe_gt *gt);
int xe_gt_resume(struct xe_gt *gt);
void xe_gt_reset_async(struct xe_gt *gt);
void xe_gt_sanitize(struct xe_gt *gt);
int xe_gt_sanitize_freq(struct xe_gt *gt);
void xe_gt_remove(struct xe_gt *gt);
/**
......
......@@ -12,6 +12,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_sysfs.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
{
......@@ -75,7 +76,7 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
void xe_gt_apply_ccs_mode(struct xe_gt *gt)
{
if (!gt->ccs_mode)
if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt)))
return;
__xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
......@@ -110,6 +111,12 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
u32 num_engines, num_slices;
int ret;
if (IS_SRIOV(xe)) {
xe_gt_dbg(gt, "Can't change compute mode when running as %s\n",
xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
return -EOPNOTSUPP;
}
ret = kstrtou32(buff, 0, &num_engines);
if (ret)
return ret;
......
......@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
#include <linux/math64.h>
#include "xe_gt_clock.h"
#include "regs/xe_gt_regs.h"
......@@ -79,3 +81,21 @@ int xe_gt_clock_init(struct xe_gt *gt)
gt->info.reference_clock = freq;
return 0;
}
static u64 div_u64_roundup(u64 n, u32 d)
{
return div_u64(n + d - 1, d);
}
/**
* xe_gt_clock_interval_to_ms - Convert sampled GT clock ticks to msec
*
* @gt: the &xe_gt
* @count: count of GT clock ticks
*
* Returns: time in msec
*/
u64 xe_gt_clock_interval_to_ms(struct xe_gt *gt, u64 count)
{
return div_u64_roundup(count * MSEC_PER_SEC, gt->info.reference_clock);
}
......@@ -11,5 +11,6 @@
struct xe_gt;
int xe_gt_clock_init(struct xe_gt *gt);
u64 xe_gt_clock_interval_to_ms(struct xe_gt *gt, u64 count);
#endif
......@@ -15,6 +15,7 @@
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_sriov.h"
/**
* DOC: Xe GT Idle
......@@ -100,6 +101,9 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
u32 pg_enable;
int i, j;
if (IS_SRIOV_VF(xe))
return;
/* Disable CPG for PVC */
if (xe->info.platform == XE_PVC)
return;
......@@ -130,6 +134,9 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
void xe_gt_idle_disable_pg(struct xe_gt *gt)
{
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
xe_device_assert_mem_access(gt_to_xe(gt));
XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
......@@ -214,6 +221,9 @@ int xe_gt_idle_init(struct xe_gt_idle *gtidle)
struct kobject *kobj;
int err;
if (IS_SRIOV_VF(xe))
return 0;
kobj = kobject_create_and_add("gtidle", gt->sysfs);
if (!kobj)
return -ENOMEM;
......@@ -246,6 +256,9 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
xe_device_assert_mem_access(gt_to_xe(gt));
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
/* Units of 1280 ns for a total of 5s */
xe_mmio_write32(gt, RC_IDLE_HYSTERSIS, 0x3B9ACA);
/* Enable RC6 */
......@@ -258,6 +271,9 @@ void xe_gt_idle_disable_c6(struct xe_gt *gt)
xe_device_assert_mem_access(gt_to_xe(gt));
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
xe_mmio_write32(gt, RC_CONTROL, 0);
xe_mmio_write32(gt, RC_STATE, 0);
}
......@@ -19,7 +19,7 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
#include "xe_trace.h"
#include "xe_trace_bo.h"
#include "xe_vm.h"
struct pagefault {
......@@ -125,126 +125,108 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
return 0;
}
static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
static int handle_vma_pagefault(struct xe_tile *tile, struct pagefault *pf,
struct xe_vma *vma)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_vm *vm = xe_vma_vm(vma);
struct drm_exec exec;
struct xe_vm *vm;
struct xe_vma *vma = NULL;
struct dma_fence *fence;
bool write_locked;
int ret = 0;
ktime_t end = 0;
int err;
bool atomic;
/* SW isn't expected to handle TRTT faults */
if (pf->trva_fault)
return -EFAULT;
/* ASID to VM */
mutex_lock(&xe->usm.lock);
vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
if (vm && xe_vm_in_fault_mode(vm))
xe_vm_get(vm);
else
vm = NULL;
mutex_unlock(&xe->usm.lock);
if (!vm)
return -EINVAL;
retry_userptr:
/*
* TODO: Avoid exclusive lock if VM doesn't have userptrs, or
* start out read-locked?
*/
down_write(&vm->lock);
write_locked = true;
vma = lookup_vma(vm, pf->page_addr);
if (!vma) {
ret = -EINVAL;
goto unlock_vm;
}
if (!xe_vma_is_userptr(vma) ||
!xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
downgrade_write(&vm->lock);
write_locked = false;
}
trace_xe_vma_pagefault(vma);
atomic = access_is_atomic(pf->access_type);
/* Check if VMA is valid */
if (vma_is_valid(tile, vma) && !atomic)
goto unlock_vm;
/* TODO: Validate fault */
return 0;
if (xe_vma_is_userptr(vma) && write_locked) {
retry_userptr:
if (xe_vma_is_userptr(vma) &&
xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
struct xe_userptr_vma *uvma = to_userptr_vma(vma);
spin_lock(&vm->userptr.invalidated_lock);
list_del_init(&uvma->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
ret = xe_vma_userptr_pin_pages(uvma);
if (ret)
goto unlock_vm;
downgrade_write(&vm->lock);
write_locked = false;
err = xe_vma_userptr_pin_pages(uvma);
if (err)
return err;
}
/* Lock VM and BOs dma-resv */
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
ret = xe_pf_begin(&exec, vma, atomic, tile->id);
err = xe_pf_begin(&exec, vma, atomic, tile->id);
drm_exec_retry_on_contention(&exec);
if (ret)
if (xe_vm_validate_should_retry(&exec, err, &end))
err = -EAGAIN;
if (err)
goto unlock_dma_resv;
/* Bind VMA only to the GT that has faulted */
trace_xe_vma_pf_bind(vma);
fence = xe_vma_rebind(vm, vma, BIT(tile->id));
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
err = PTR_ERR(fence);
if (xe_vm_validate_should_retry(&exec, err, &end))
err = -EAGAIN;
goto unlock_dma_resv;
}
}
/*
* XXX: Should we drop the lock before waiting? This only helps if doing
* GPU binds which is currently only done if we have to wait for more
* than 10ms on a move.
*/
dma_fence_wait(fence, false);
dma_fence_put(fence);
if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
unlock_vm:
if (!ret)
vm->usm.last_fault_vma = vma;
if (write_locked)
up_write(&vm->lock);
else
up_read(&vm->lock);
if (ret == -EAGAIN)
if (err == -EAGAIN)
goto retry_userptr;
if (!ret) {
ret = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
if (ret >= 0)
ret = 0;
return err;
}
static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_vm *vm;
struct xe_vma *vma = NULL;
int err;
/* SW isn't expected to handle TRTT faults */
if (pf->trva_fault)
return -EFAULT;
/* ASID to VM */
mutex_lock(&xe->usm.lock);
vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
if (vm && xe_vm_in_fault_mode(vm))
xe_vm_get(vm);
else
vm = NULL;
mutex_unlock(&xe->usm.lock);
if (!vm)
return -EINVAL;
/*
* TODO: Change to read lock? Using write lock for simplicity.
*/
down_write(&vm->lock);
vma = lookup_vma(vm, pf->page_addr);
if (!vma) {
err = -EINVAL;
goto unlock_vm;
}
err = handle_vma_pagefault(tile, pf, vma);
unlock_vm:
if (!err)
vm->usm.last_fault_vma = vma;
up_write(&vm->lock);
xe_vm_put(vm);
return ret;
return err;
}
static int send_pagefault_reply(struct xe_guc *guc,
......
......@@ -1290,6 +1290,9 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
struct xe_tile *tile;
unsigned int tid;
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, IS_SRIOV_PF(xe));
for_each_tile(tile, xe, tid) {
lmtt = &tile->sriov.pf.lmtt;
xe_lmtt_drop_pages(lmtt, vfid);
......@@ -1308,6 +1311,9 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
unsigned int tid;
int err;
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, IS_SRIOV_PF(xe));
total = 0;
for_each_tile(tile, xe, tid)
total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
......@@ -1353,6 +1359,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
......@@ -1371,6 +1378,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
int err;
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, IS_DGFX(xe));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
size = round_up(size, pf_get_lmem_alignment(gt));
......@@ -1838,11 +1846,14 @@ u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_device *xe = gt_to_xe(gt);
if (!xe_gt_is_media_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
pf_release_vf_config_lmem(gt, config);
pf_update_vf_lmtt(gt_to_xe(gt), vfid);
if (IS_DGFX(xe)) {
pf_release_vf_config_lmem(gt, config);
pf_update_vf_lmtt(xe, vfid);
}
}
pf_release_config_ctxs(gt, config);
pf_release_config_dbs(gt, config);
......
......@@ -129,6 +129,27 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
return pf_send_vf_stop(gt, vfid);
}
/**
* xe_gt_sriov_pf_control_trigger_flr - Start a VF FLR sequence.
* @gt: the &xe_gt
* @vfid: the VF identifier
*
* This function is for PF only.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid)
{
int err;
/* XXX pf_send_vf_flr_start() expects ct->lock */
mutex_lock(&gt->uc.guc.ct.lock);
err = pf_send_vf_flr_start(gt, vfid);
mutex_unlock(&gt->uc.guc.ct.lock);
return err;
}
/**
* DOC: The VF FLR Flow with GuC
*
......
......@@ -14,6 +14,7 @@ struct xe_gt;
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid);
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len);
......
......@@ -13,6 +13,7 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_trace.h"
#include "regs/xe_guc_regs.h"
......@@ -22,6 +23,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
{
struct xe_gt *gt = container_of(work, struct xe_gt,
tlb_invalidation.fence_tdr.work);
struct xe_device *xe = gt_to_xe(gt);
struct xe_gt_tlb_invalidation_fence *fence, *next;
spin_lock_irq(&gt->tlb_invalidation.pending_lock);
......@@ -33,7 +35,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
if (msecs_to_jiffies(since_inval_ms) < TLB_TIMEOUT)
break;
trace_xe_gt_tlb_invalidation_fence_timeout(fence);
trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
fence->seqno, gt->tlb_invalidation.seqno_recv);
......@@ -71,18 +73,18 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
}
static void
__invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
{
trace_xe_gt_tlb_invalidation_fence_signal(fence);
trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
dma_fence_signal(&fence->base);
dma_fence_put(&fence->base);
}
static void
invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
{
list_del(&fence->link);
__invalidation_fence_signal(fence);
__invalidation_fence_signal(xe, fence);
}
/**
......@@ -121,7 +123,7 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link)
invalidation_fence_signal(fence);
invalidation_fence_signal(gt_to_xe(gt), fence);
spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
mutex_unlock(&gt->uc.guc.ct.lock);
}
......@@ -144,6 +146,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
u32 *action, int len)
{
struct xe_gt *gt = guc_to_gt(guc);
struct xe_device *xe = gt_to_xe(gt);
int seqno;
int ret;
......@@ -157,7 +160,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
seqno = gt->tlb_invalidation.seqno;
if (fence) {
fence->seqno = seqno;
trace_xe_gt_tlb_invalidation_fence_send(fence);
trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
}
action[1] = seqno;
ret = xe_guc_ct_send_locked(&guc->ct, action, len,
......@@ -171,7 +174,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
* we can just go ahead and signal the fence here.
*/
if (tlb_invalidation_seqno_past(gt, seqno)) {
__invalidation_fence_signal(fence);
__invalidation_fence_signal(xe, fence);
} else {
fence->invalidation_time = ktime_get();
list_add_tail(&fence->link,
......@@ -184,7 +187,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
}
spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
} else if (ret < 0 && fence) {
__invalidation_fence_signal(fence);
__invalidation_fence_signal(xe, fence);
}
if (!ret) {
gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
......@@ -247,6 +250,9 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
xe_gt_tlb_invalidation_wait(gt, seqno);
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
if (IS_SRIOV_VF(xe))
return 0;
xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
......@@ -294,7 +300,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
/* Execlists not supported */
if (gt_to_xe(gt)->info.force_execlist) {
if (fence)
__invalidation_fence_signal(fence);
__invalidation_fence_signal(xe, fence);
return 0;
}
......@@ -432,6 +438,7 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
struct xe_device *xe = gt_to_xe(gt);
struct xe_gt_tlb_invalidation_fence *fence, *next;
unsigned long flags;
......@@ -468,12 +475,12 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link) {
trace_xe_gt_tlb_invalidation_fence_recv(fence);
trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
if (!tlb_invalidation_seqno_past(gt, fence->seqno))
break;
invalidation_fence_signal(fence);
invalidation_fence_signal(xe, fence);
}
if (!list_empty(&gt->tlb_invalidation.pending_fences))
......
......@@ -12,6 +12,7 @@
#include "xe_gt_sriov_vf_types.h"
#include "xe_hw_engine_types.h"
#include "xe_hw_fence_types.h"
#include "xe_oa.h"
#include "xe_reg_sr_types.h"
#include "xe_sa_types.h"
#include "xe_uc_types.h"
......@@ -387,6 +388,9 @@ struct xe_gt {
*/
u8 instances_per_class[XE_ENGINE_CLASS_MAX];
} user_engines;
/** @oa: oa perf counter subsystem per gt info */
struct xe_oa_gt oa;
};
#endif
......@@ -476,6 +476,9 @@ static void guc_prepare_xfer(struct xe_guc *guc)
xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags);
xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
/* Make sure GuC receives ARAT interrupts */
xe_mmio_rmw32(gt, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0);
}
/*
......@@ -699,6 +702,9 @@ static int __xe_guc_upload(struct xe_guc *guc)
{
int ret;
/* Raise GT freq to speed up HuC/GuC load */
xe_guc_pc_raise_unslice(&guc->pc);
guc_write_params(guc);
guc_prepare_xfer(guc);
......@@ -784,7 +790,6 @@ int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
xe_guc_ads_populate_minimal(&guc->ads);
/* Raise GT freq to speed up HuC/GuC load */
xe_guc_pc_init_early(&guc->pc);
ret = __xe_guc_upload(guc);
......@@ -854,8 +859,6 @@ int xe_guc_enable_communication(struct xe_guc *guc)
struct xe_device *xe = guc_to_xe(guc);
int err;
guc_enable_irq(guc);
if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
......@@ -863,11 +866,10 @@ int xe_guc_enable_communication(struct xe_guc *guc)
err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
if (err)
return err;
} else {
guc_enable_irq(guc);
}
xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
ARAT_EXPIRED_INTRMSK, 0);
err = xe_guc_ct_enable(&guc->ct);
if (err)
return err;
......@@ -1094,7 +1096,7 @@ void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
void xe_guc_sanitize(struct xe_guc *guc)
{
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
xe_uc_fw_sanitize(&guc->fw);
xe_guc_ct_disable(&guc->ct);
guc->submission_state.enabled = false;
}
......@@ -1111,7 +1113,13 @@ void xe_guc_reset_wait(struct xe_guc *guc)
void xe_guc_stop_prepare(struct xe_guc *guc)
{
XE_WARN_ON(xe_guc_pc_stop(&guc->pc));
if (!IS_SRIOV_VF(guc_to_xe(guc))) {
int err;
err = xe_guc_pc_stop(&guc->pc);
xe_gt_WARN(guc_to_gt(guc), err, "Failed to stop GuC PC: %pe\n",
ERR_PTR(err));
}
}
void xe_guc_stop(struct xe_guc *guc)
......@@ -1123,10 +1131,13 @@ void xe_guc_stop(struct xe_guc *guc)
int xe_guc_start(struct xe_guc *guc)
{
int ret;
if (!IS_SRIOV_VF(guc_to_xe(guc))) {
int err;
ret = xe_guc_pc_start(&guc->pc);
XE_WARN_ON(ret);
err = xe_guc_pc_start(&guc->pc);
xe_gt_WARN(guc_to_gt(guc), err, "Failed to start GuC PC: %pe\n",
ERR_PTR(err));
}
return xe_guc_submit_start(guc);
}
......
......@@ -29,7 +29,7 @@
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_trace.h"
#include "xe_trace_guc.h"
/* Used when a CT send wants to block and / or receive data */
struct g2h_fence {
......@@ -126,7 +126,9 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
xa_destroy(&ct->fence_lookup);
}
static void receive_g2h(struct xe_guc_ct *ct);
static void g2h_worker_func(struct work_struct *w);
static void safe_mode_worker_func(struct work_struct *w);
static void primelockdep(struct xe_guc_ct *ct)
{
......@@ -155,6 +157,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
spin_lock_init(&ct->fast_lock);
xa_init(&ct->fence_lookup);
INIT_WORK(&ct->g2h_worker, g2h_worker_func);
INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
init_waitqueue_head(&ct->wq);
init_waitqueue_head(&ct->g2h_fence_wq);
......@@ -321,6 +324,42 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
mutex_unlock(&ct->lock);
}
static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
{
return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
}
static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
{
if (!ct_needs_safe_mode(ct))
return false;
queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
return true;
}
static void safe_mode_worker_func(struct work_struct *w)
{
struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
receive_g2h(ct);
if (!ct_restart_safe_mode_worker(ct))
xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
}
static void ct_enter_safe_mode(struct xe_guc_ct *ct)
{
if (ct_restart_safe_mode_worker(ct))
xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
}
static void ct_exit_safe_mode(struct xe_guc_ct *ct)
{
if (cancel_delayed_work_sync(&ct->safe_mode_worker))
xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
}
int xe_guc_ct_enable(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
......@@ -350,6 +389,9 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
wake_up_all(&ct->wq);
xe_gt_dbg(gt, "GuC CT communication channel enabled\n");
if (ct_needs_safe_mode(ct))
ct_enter_safe_mode(ct);
return 0;
err_out:
......@@ -373,6 +415,7 @@ static void stop_g2h_handler(struct xe_guc_ct *ct)
void xe_guc_ct_disable(struct xe_guc_ct *ct)
{
xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
ct_exit_safe_mode(ct);
stop_g2h_handler(ct);
}
......@@ -528,7 +571,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
/* Update descriptor */
desc_write(xe, h2g, tail, h2g->info.tail);
trace_xe_guc_ctb_h2g(gt->info.id, *(action - 1), full_len,
trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
desc_read(xe, h2g, head), h2g->info.tail);
return 0;
......@@ -641,6 +684,7 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
struct drm_printer p = xe_gt_info_printer(gt);
unsigned int sleep_period_ms = 1;
......@@ -668,7 +712,7 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
if (sleep_period_ms == 1024)
goto broken;
trace_xe_guc_ct_h2g_flow_control(h2g->info.head, h2g->info.tail,
trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
h2g->info.size,
h2g->info.space,
len + GUC_CTB_HDR_LEN);
......@@ -680,7 +724,7 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
struct xe_device *xe = ct_to_xe(ct);
struct guc_ctb *g2h = &ct->ctbs.g2h;
trace_xe_guc_ct_g2h_flow_control(g2h->info.head,
trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
desc_read(xe, g2h, tail),
g2h->info.size,
g2h->info.space,
......@@ -833,12 +877,12 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
}
if (g2h_fence.retry) {
xe_gt_warn(gt, "H2G retry, action 0x%04x, reason %u",
action[0], g2h_fence.reason);
xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
action[0], g2h_fence.reason);
goto retry;
}
if (g2h_fence.fail) {
xe_gt_err(gt, "H2G send failed, action 0x%04x, error %d, hint %u",
xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
action[0], g2h_fence.error, g2h_fence.hint);
ret = -EIO;
}
......@@ -1170,8 +1214,8 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
g2h->info.head = (head + avail) % g2h->info.size;
desc_write(xe, g2h, head, g2h->info.head);
trace_xe_guc_ctb_g2h(ct_to_gt(ct)->info.id, action, len,
g2h->info.head, tail);
trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
action, len, g2h->info.head, tail);
return len;
}
......@@ -1260,9 +1304,8 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct)
return 1;
}
static void g2h_worker_func(struct work_struct *w)
static void receive_g2h(struct xe_guc_ct *ct)
{
struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
struct xe_gt *gt = ct_to_gt(ct);
bool ongoing;
int ret;
......@@ -1311,6 +1354,13 @@ static void g2h_worker_func(struct work_struct *w)
xe_pm_runtime_put(ct_to_xe(ct));
}
static void g2h_worker_func(struct work_struct *w)
{
struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
receive_g2h(ct);
}
static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
struct guc_ctb_snapshot *snapshot,
bool atomic)
......
......@@ -110,6 +110,8 @@ struct xe_guc_ct {
u32 g2h_outstanding;
/** @g2h_worker: worker to process G2H messages */
struct work_struct g2h_worker;
/** @safe_mode_worker: worker to check G2H messages with IRQ disabled */
struct delayed_work safe_mode_worker;
/** @state: CT state */
enum xe_guc_ct_state state;
/** @fence_seqno: G2H fence seqno - 16 bits used by CT */
......
......@@ -8,8 +8,8 @@
#include <linux/delay.h>
#include <drm/drm_managed.h>
#include <generated/xe_wa_oob.h>
#include "abi/guc_actions_abi.h"
#include "abi/guc_actions_slpc_abi.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
......@@ -18,12 +18,15 @@
#include "xe_force_wake.h"
#include "xe_gt.h"
#include "xe_gt_idle.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_printk.h"
#include "xe_gt_types.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pcode.h"
#include "xe_pm.h"
#include "xe_wa.h"
#define MCHBAR_MIRROR_BASE_SNB 0x140000
......@@ -41,6 +44,8 @@
#define GT_FREQUENCY_MULTIPLIER 50
#define GT_FREQUENCY_SCALER 3
#define LNL_MERT_FREQ_CAP 800
/**
* DOC: GuC Power Conservation (PC)
*
......@@ -67,29 +72,27 @@
*
*/
static struct xe_guc *
pc_to_guc(struct xe_guc_pc *pc)
static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
{
return container_of(pc, struct xe_guc, pc);
}
static struct xe_device *
pc_to_xe(struct xe_guc_pc *pc)
static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
{
struct xe_guc *guc = pc_to_guc(pc);
struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
return &pc_to_guc(pc)->ct;
}
return gt_to_xe(gt);
static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
{
return guc_to_gt(pc_to_guc(pc));
}
static struct xe_gt *
pc_to_gt(struct xe_guc_pc *pc)
static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
{
return container_of(pc, struct xe_gt, uc.guc.pc);
return guc_to_xe(pc_to_guc(pc));
}
static struct iosys_map *
pc_to_maps(struct xe_guc_pc *pc)
static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
{
return &pc->bo->vmap;
}
......@@ -130,32 +133,33 @@ static int wait_for_pc_state(struct xe_guc_pc *pc,
static int pc_action_reset(struct xe_guc_pc *pc)
{
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
int ret;
struct xe_guc_ct *ct = pc_to_ct(pc);
u32 action[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
SLPC_EVENT(SLPC_EVENT_RESET, 2),
xe_bo_ggtt_addr(pc->bo),
0,
};
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
if (ret)
drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
ERR_PTR(ret));
return ret;
}
static int pc_action_query_task_state(struct xe_guc_pc *pc)
{
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
int ret;
struct xe_guc_ct *ct = pc_to_ct(pc);
u32 action[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
xe_bo_ggtt_addr(pc->bo),
0,
};
int ret;
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
return -EAGAIN;
......@@ -163,47 +167,68 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
/* Blocking here to ensure the results are ready before reading them */
ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
if (ret)
drm_err(&pc_to_xe(pc)->drm,
"GuC PC query task state failed: %pe", ERR_PTR(ret));
xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
ERR_PTR(ret));
return ret;
}
static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
{
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
int ret;
struct xe_guc_ct *ct = pc_to_ct(pc);
u32 action[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
id,
value,
};
int ret;
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
if (ret)
drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
ERR_PTR(ret));
xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
id, value, ERR_PTR(ret));
return ret;
}
static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
{
u32 action[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
id,
};
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
int ret;
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
if (ret)
xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
ERR_PTR(ret));
return ret;
}
static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
{
struct xe_guc_ct *ct = pc_to_ct(pc);
u32 action[] = {
XE_GUC_ACTION_SETUP_PC_GUCRC,
GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
mode,
};
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
if (ret)
drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
ERR_PTR(ret));
xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
mode, ERR_PTR(ret));
return ret;
}
......@@ -674,18 +699,39 @@ static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
tgl_init_fused_rp_values(pc);
}
static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
if (XE_WA(gt, 22019338487))
return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
else
return pc->rp0_freq;
}
/**
* xe_guc_pc_init_early - Initialize RPx values and request a higher GT
* xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
* frequency to allow faster GuC load times
* @pc: Xe_GuC_PC instance
*/
void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
pc_set_cur_freq(pc, pc_max_freq_cap(pc));
}
/**
* xe_guc_pc_init_early - Initialize RPx values
* @pc: Xe_GuC_PC instance
*/
void xe_guc_pc_init_early(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
pc_init_fused_rp_values(pc);
pc_set_cur_freq(pc, pc->rp0_freq);
}
static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
......@@ -741,6 +787,53 @@ static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
return ret;
}
static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
{
int ret = 0;
if (XE_WA(pc_to_gt(pc), 22019338487)) {
/*
* Get updated min/max and stash them.
*/
ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq);
if (!ret)
ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq);
if (ret)
return ret;
/*
* Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
*/
mutex_lock(&pc->freq_lock);
ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
if (!ret)
ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
mutex_unlock(&pc->freq_lock);
}
return ret;
}
/**
* xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
* @pc: The GuC PC
*
* Returns: 0 on success,
* error code on failure
*/
int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
{
int ret = 0;
mutex_lock(&pc->freq_lock);
ret = pc_set_max_freq(pc, pc->stashed_max_freq);
if (!ret)
ret = pc_set_min_freq(pc, pc->stashed_min_freq);
mutex_unlock(&pc->freq_lock);
return ret;
}
/**
* xe_guc_pc_gucrc_disable - Disable GuC RC
* @pc: Xe_GuC_PC instance
......@@ -758,7 +851,7 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
if (xe->info.skip_guc_pc)
return 0;
ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
if (ret)
return ret;
......@@ -773,6 +866,41 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
return 0;
}
/**
* xe_guc_pc_override_gucrc_mode - override GUCRC mode
* @pc: Xe_GuC_PC instance
* @mode: new value of the mode.
*
* Return: 0 on success, negative error code on error
*/
int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
{
int ret;
xe_pm_runtime_get(pc_to_xe(pc));
ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
xe_pm_runtime_put(pc_to_xe(pc));
return ret;
}
/**
* xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
* @pc: Xe_GuC_PC instance
*
* Return: 0 on success, negative error code on error
*/
int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
{
int ret;
xe_pm_runtime_get(pc_to_xe(pc));
ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
xe_pm_runtime_put(pc_to_xe(pc));
return ret;
}
static void pc_init_pcode_freq(struct xe_guc_pc *pc)
{
u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
......@@ -846,7 +974,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
goto out;
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
xe_gt_err(gt, "GuC PC Start failed\n");
ret = -EIO;
goto out;
}
......@@ -855,13 +983,17 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
if (ret)
goto out;
ret = pc_set_mert_freq_cap(pc);
if (ret)
goto out;
if (xe->info.platform == XE_PVC) {
xe_guc_pc_gucrc_disable(pc);
ret = 0;
goto out;
}
ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
out:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
......@@ -903,6 +1035,10 @@ static void xe_guc_pc_fini_hw(void *arg)
XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
XE_WARN_ON(xe_guc_pc_stop(pc));
/* Bind requested freq to mert_freq_cap before unload */
pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
}
......
......@@ -9,11 +9,14 @@
#include <linux/types.h>
struct xe_guc_pc;
enum slpc_gucrc_mode;
int xe_guc_pc_init(struct xe_guc_pc *pc);
int xe_guc_pc_start(struct xe_guc_pc *pc);
int xe_guc_pc_stop(struct xe_guc_pc *pc);
int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc);
int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode);
int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
......@@ -29,5 +32,7 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc);
u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc);
u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc);
void xe_guc_pc_init_early(struct xe_guc_pc *pc);
int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc);
void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc);
#endif /* _XE_GUC_PC_H_ */
......@@ -25,6 +25,10 @@ struct xe_guc_pc {
u32 user_requested_min;
/** @user_requested_max: Stash the maximum requested freq by user */
u32 user_requested_max;
/** @stashed_min_freq: Stash the current minimum freq */
u32 stashed_min_freq;
/** @stashed_max_freq: Stash the current maximum freq */
u32 stashed_max_freq;
/** @freq_lock: Let's protect the frequencies */
struct mutex freq_lock;
/** @freq_ready: Only handle freq changes, if they are really ready */
......
This diff is collapsed.
......@@ -18,9 +18,11 @@
#include "xe_force_wake.h"
#include "xe_gsc_submit.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_uc_fw.h"
static struct xe_gt *
......@@ -92,6 +94,9 @@ int xe_huc_init(struct xe_huc *huc)
if (!xe_uc_fw_is_enabled(&huc->fw))
return 0;
if (IS_SRIOV_VF(xe))
return 0;
if (huc->fw.has_gsc_headers) {
ret = huc_alloc_gsc_pkt(huc);
if (ret)
......@@ -103,7 +108,7 @@ int xe_huc_init(struct xe_huc *huc)
return 0;
out:
drm_err(&xe->drm, "HuC init failed with %d", ret);
xe_gt_err(gt, "HuC: initialization failed: %pe\n", ERR_PTR(ret));
return ret;
}
......@@ -191,14 +196,14 @@ static int huc_auth_via_gsccs(struct xe_huc *huc)
} while (--retry && err == -EBUSY);
if (err) {
drm_err(&xe->drm, "failed to submit GSC request to auth: %d\n", err);
xe_gt_err(gt, "HuC: failed to submit GSC request to auth: %pe\n", ERR_PTR(err));
return err;
}
err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE,
sizeof(struct pxp43_huc_auth_out), &rd_offset);
if (err) {
drm_err(&xe->drm, "HuC: invalid GSC reply for auth (err=%d)\n", err);
xe_gt_err(gt, "HuC: invalid GSC reply for auth: %pe\n", ERR_PTR(err));
return err;
}
......@@ -209,7 +214,7 @@ static int huc_auth_via_gsccs(struct xe_huc *huc)
*/
out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status);
if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) {
drm_err(&xe->drm, "auth failed with GSC error = 0x%x\n", out_status);
xe_gt_err(gt, "HuC: authentication failed with GSC error = %#x\n", out_status);
return -EIO;
}
......@@ -238,7 +243,6 @@ bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
{
struct xe_device *xe = huc_to_xe(huc);
struct xe_gt *gt = huc_to_gt(huc);
struct xe_guc *guc = huc_to_guc(huc);
int ret;
......@@ -268,26 +272,26 @@ int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
return -EINVAL;
}
if (ret) {
drm_err(&xe->drm, "Failed to trigger HuC auth via %s: %d\n",
huc_auth_modes[type].name, ret);
xe_gt_err(gt, "HuC: failed to trigger auth via %s: %pe\n",
huc_auth_modes[type].name, ERR_PTR(ret));
goto fail;
}
ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val,
huc_auth_modes[type].val, 100000, NULL, false);
if (ret) {
drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret);
xe_gt_err(gt, "HuC: firmware not verified: %pe\n", ERR_PTR(ret));
goto fail;
}
xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
drm_dbg(&xe->drm, "HuC authenticated via %s\n", huc_auth_modes[type].name);
xe_gt_dbg(gt, "HuC: authenticated via %s\n", huc_auth_modes[type].name);
return 0;
fail:
drm_err(&xe->drm, "HuC: Auth via %s failed: %d\n",
huc_auth_modes[type].name, ret);
xe_gt_err(gt, "HuC: authentication via %s failed: %pe\n",
huc_auth_modes[type].name, ERR_PTR(ret));
xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
return ret;
......@@ -295,9 +299,7 @@ int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
void xe_huc_sanitize(struct xe_huc *huc)
{
if (!xe_uc_fw_is_loadable(&huc->fw))
return;
xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
xe_uc_fw_sanitize(&huc->fw);
}
void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
......
......@@ -26,6 +26,7 @@
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
#include "xe_rtp.h"
#include "xe_sched_job.h"
#include "xe_sriov.h"
......@@ -546,7 +547,8 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
if (hwe->class == XE_ENGINE_CLASS_OTHER)
hwe->irq_handler = xe_gsc_hwe_irq_handler;
xe_hw_engine_enable_ring(hwe);
if (!IS_SRIOV_VF(xe))
xe_hw_engine_enable_ring(hwe);
}
/* We reserve the highest BCS instance for USM */
......
......@@ -148,6 +148,8 @@ struct xe_hw_engine {
enum xe_hw_engine_id engine_id;
/** @eclass: pointer to per hw engine class interface */
struct xe_hw_engine_class_intf *eclass;
/** @oa_unit: oa unit for this hw engine */
struct xe_oa_unit *oa_unit;
};
/**
......
......@@ -134,6 +134,9 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
u32 gsc_mask = 0;
u32 heci_mask = 0;
if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
return;
if (xe_device_uc_enabled(xe)) {
irqs = GT_RENDER_USER_INTERRUPT |
GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
......@@ -735,11 +738,6 @@ int xe_irq_install(struct xe_device *xe)
return err;
}
void xe_irq_shutdown(struct xe_device *xe)
{
irq_uninstall(xe);
}
void xe_irq_suspend(struct xe_device *xe)
{
int irq = to_pci_dev(xe->drm.dev)->irq;
......
......@@ -11,7 +11,6 @@ struct xe_tile;
struct xe_gt;
int xe_irq_install(struct xe_device *xe);
void xe_irq_shutdown(struct xe_device *xe);
void xe_irq_suspend(struct xe_device *xe);
void xe_irq_resume(struct xe_device *xe);
void xe_irq_enable_hwe(struct xe_gt *gt);
......
......@@ -49,6 +49,8 @@ struct xe_lrc_snapshot {
} tail;
u32 start_seqno;
u32 seqno;
u32 ctx_timestamp;
u32 ctx_job_timestamp;
};
static struct xe_device *
......@@ -649,12 +651,19 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
/* Make the magic macros work */
#define __xe_lrc_pphwsp_offset xe_lrc_pphwsp_offset
#define __xe_lrc_regs_offset xe_lrc_regs_offset
#define LRC_SEQNO_PPHWSP_OFFSET 512
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
#define LRC_PPHWSP_SIZE SZ_4K
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
return xe_lrc_pphwsp_offset(lrc) + LRC_PPHWSP_SIZE;
}
static size_t lrc_reg_size(struct xe_device *xe)
{
if (GRAPHICS_VERx100(xe) >= 1250)
......@@ -680,15 +689,21 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
return xe_lrc_pphwsp_offset(lrc) + LRC_START_SEQNO_PPHWSP_OFFSET;
}
static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
{
/* The start seqno is stored in the driver-defined portion of PPHWSP */
return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET;
}
static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc)
{
/* The parallel is stored in the driver-defined portion of PPHWSP */
return xe_lrc_pphwsp_offset(lrc) + LRC_PARALLEL_PPHWSP_OFFSET;
}
static inline u32 __xe_lrc_regs_offset(struct xe_lrc *lrc)
static u32 __xe_lrc_ctx_timestamp_offset(struct xe_lrc *lrc)
{
return xe_lrc_pphwsp_offset(lrc) + LRC_PPHWSP_SIZE;
return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP * sizeof(u32);
}
static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc)
......@@ -716,11 +731,65 @@ DECL_MAP_ADDR_HELPERS(pphwsp)
DECL_MAP_ADDR_HELPERS(seqno)
DECL_MAP_ADDR_HELPERS(regs)
DECL_MAP_ADDR_HELPERS(start_seqno)
DECL_MAP_ADDR_HELPERS(ctx_job_timestamp)
DECL_MAP_ADDR_HELPERS(ctx_timestamp)
DECL_MAP_ADDR_HELPERS(parallel)
DECL_MAP_ADDR_HELPERS(indirect_ring)
#undef DECL_MAP_ADDR_HELPERS
/**
* xe_lrc_ctx_timestamp_ggtt_addr() - Get ctx timestamp GGTT address
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp GGTT address
*/
u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc)
{
return __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
}
/**
* xe_lrc_ctx_timestamp() - Read ctx timestamp value
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp value
*/
u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
{
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map map;
map = __xe_lrc_ctx_timestamp_map(lrc);
return xe_map_read32(xe, &map);
}
/**
* xe_lrc_ctx_job_timestamp_ggtt_addr() - Get ctx job timestamp GGTT address
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp job GGTT address
*/
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc)
{
return __xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
}
/**
* xe_lrc_ctx_job_timestamp() - Read ctx job timestamp value
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp job value
*/
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc)
{
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map map;
map = __xe_lrc_ctx_job_timestamp_map(lrc);
return xe_map_read32(xe, &map);
}
u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc)
{
return __xe_lrc_pphwsp_ggtt_addr(lrc);
......@@ -1576,6 +1645,8 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
snapshot->lrc_snapshot = NULL;
snapshot->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);
return snapshot;
}
......@@ -1624,6 +1695,8 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
snapshot->tail.internal, snapshot->tail.memory);
drm_printf(p, "\tStart seqno: (memory) %d\n", snapshot->start_seqno);
drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->seqno);
drm_printf(p, "\tTimestamp: 0x%08x\n", snapshot->ctx_timestamp);
drm_printf(p, "\tJob Timestamp: 0x%08x\n", snapshot->ctx_job_timestamp);
if (!snapshot->lrc_snapshot)
return;
......@@ -1659,11 +1732,21 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
kfree(snapshot);
}
/**
* xe_lrc_update_timestamp() - Update ctx timestamp
* @lrc: Pointer to the lrc.
* @old_ts: Old timestamp value
*
* Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
* update saved value.
*
* Returns: New ctx timestamp value
*/
u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
{
*old_ts = lrc->ctx_timestamp;
lrc->ctx_timestamp = xe_lrc_read_ctx_reg(lrc, CTX_TIMESTAMP);
lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
return lrc->ctx_timestamp;
}
......@@ -52,6 +52,7 @@ static inline void xe_lrc_put(struct xe_lrc *lrc)
size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class);
u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc);
u32 xe_lrc_regs_offset(struct xe_lrc *lrc);
void xe_lrc_set_ring_tail(struct xe_lrc *lrc, u32 tail);
u32 xe_lrc_ring_tail(struct xe_lrc *lrc);
......@@ -94,6 +95,11 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot);
void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p);
void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot);
u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
/**
* xe_lrc_update_timestamp - readout LRC timestamp and update cached value
* @lrc: logical ring context for this exec queue
......
......@@ -32,7 +32,7 @@
#include "xe_res_cursor.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_trace_bo.h"
#include "xe_vm.h"
/**
......@@ -647,12 +647,6 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
bb->cs[bb->len++] = upper_32_bits(src_ofs);
}
static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
enum dma_resv_usage usage)
{
return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
}
static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
{
return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
......@@ -849,11 +843,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, flush_flags);
if (!fence) {
err = job_add_deps(job, src_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
if (!err && src_bo != dst_bo)
err = job_add_deps(job, dst_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
if (err)
goto err_job;
}
......@@ -1091,8 +1085,8 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
* fences, which are always tracked as
* DMA_RESV_USAGE_KERNEL.
*/
err = job_add_deps(job, bo->ttm.base.resv,
DMA_RESV_USAGE_KERNEL);
err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
DMA_RESV_USAGE_KERNEL);
if (err)
goto err_job;
}
......@@ -1417,8 +1411,8 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
/* Wait on BO move */
if (bo) {
err = job_add_deps(job, bo->ttm.base.resv,
DMA_RESV_USAGE_KERNEL);
err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
DMA_RESV_USAGE_KERNEL);
if (err)
goto err_job;
}
......@@ -1428,8 +1422,8 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
* trigger preempts before moving forward
*/
if (first_munmap_rebind) {
err = job_add_deps(job, xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP);
err = xe_sched_job_add_deps(job, xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP);
if (err)
goto err_job;
}
......
......@@ -21,6 +21,7 @@
#include "xe_gt_sriov_vf.h"
#include "xe_macros.h"
#include "xe_sriov.h"
#include "xe_trace.h"
static void tiles_fini(void *arg)
{
......@@ -124,16 +125,24 @@ u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
u8 val;
return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
return val;
}
u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
u16 val;
val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
return val;
}
void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
......@@ -141,6 +150,7 @@ void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
struct xe_tile *tile = gt_to_tile(gt);
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
}
......@@ -148,11 +158,16 @@ u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
u32 val;
if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
return xe_gt_sriov_vf_read32(gt, reg);
val = xe_gt_sriov_vf_read32(gt, reg);
else
val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
return val;
}
u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
......
......@@ -11,6 +11,7 @@
#include "xe_drv.h"
#include "xe_hw_fence.h"
#include "xe_pci.h"
#include "xe_perf.h"
#include "xe_sched_job.h"
struct xe_modparam xe_modparam = {
......@@ -78,6 +79,10 @@ static const struct init_funcs init_funcs[] = {
.init = xe_register_pci_driver,
.exit = xe_unregister_pci_driver,
},
{
.init = xe_perf_sysctl_register,
.exit = xe_perf_sysctl_unregister,
},
};
static int __init xe_init(void)
......
This diff is collapsed.
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023-2024 Intel Corporation
*/
#ifndef _XE_OA_H_
#define _XE_OA_H_
#include "xe_oa_types.h"
struct drm_device;
struct drm_file;
struct xe_device;
struct xe_gt;
struct xe_hw_engine;
int xe_oa_init(struct xe_device *xe);
void xe_oa_fini(struct xe_device *xe);
void xe_oa_register(struct xe_device *xe);
void xe_oa_unregister(struct xe_device *xe);
int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *file);
int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file);
int xe_oa_remove_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file);
u32 xe_oa_timestamp_frequency(struct xe_gt *gt);
u16 xe_oa_unit_id(struct xe_hw_engine *hwe);
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023-2024 Intel Corporation
*/
#ifndef _XE_OA_TYPES_H_
#define _XE_OA_TYPES_H_
#include <linux/bitops.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <drm/xe_drm.h>
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
#define XE_OA_BUFFER_SIZE SZ_16M
enum xe_oa_report_header {
HDR_32_BIT = 0,
HDR_64_BIT,
};
enum xe_oa_format_name {
XE_OA_FORMAT_C4_B8,
/* Gen8+ */
XE_OA_FORMAT_A12,
XE_OA_FORMAT_A12_B8_C8,
XE_OA_FORMAT_A32u40_A4u32_B8_C8,
/* DG2 */
XE_OAR_FORMAT_A32u40_A4u32_B8_C8,
XE_OA_FORMAT_A24u40_A14u32_B8_C8,
/* DG2/MTL OAC */
XE_OAC_FORMAT_A24u64_B8_C8,
XE_OAC_FORMAT_A22u32_R2u32_B8_C8,
/* MTL OAM */
XE_OAM_FORMAT_MPEC8u64_B8_C8,
XE_OAM_FORMAT_MPEC8u32_B8_C8,
/* Xe2+ */
XE_OA_FORMAT_PEC64u64,
XE_OA_FORMAT_PEC64u64_B8_C8,
XE_OA_FORMAT_PEC64u32,
XE_OA_FORMAT_PEC32u64_G1,
XE_OA_FORMAT_PEC32u32_G1,
XE_OA_FORMAT_PEC32u64_G2,
XE_OA_FORMAT_PEC32u32_G2,
XE_OA_FORMAT_PEC36u64_G1_32_G2_4,
XE_OA_FORMAT_PEC36u64_G1_4_G2_32,
__XE_OA_FORMAT_MAX,
};
/**
* struct xe_oa_format - Format fields for supported OA formats. OA format
* properties are specified in PRM/Bspec 52198 and 60942
*/
struct xe_oa_format {
/** @counter_select: counter select value (see Bspec 52198/60942) */
u32 counter_select;
/** @size: record size as written by HW (multiple of 64 byte cachelines) */
int size;
/** @type: of enum @drm_xe_oa_format_type */
int type;
/** @header: 32 or 64 bit report headers */
enum xe_oa_report_header header;
/** @counter_size: counter size value (see Bspec 60942) */
u16 counter_size;
/** @bc_report: BC report value (see Bspec 60942) */
u16 bc_report;
};
/** struct xe_oa_regs - Registers for each OA unit */
struct xe_oa_regs {
u32 base;
struct xe_reg oa_head_ptr;
struct xe_reg oa_tail_ptr;
struct xe_reg oa_buffer;
struct xe_reg oa_ctx_ctrl;
struct xe_reg oa_ctrl;
struct xe_reg oa_debug;
struct xe_reg oa_status;
u32 oa_ctrl_counter_select_mask;
};
/**
* struct xe_oa_unit - Hardware OA unit
*/
struct xe_oa_unit {
/** @oa_unit_id: identifier for the OA unit */
u16 oa_unit_id;
/** @type: Type of OA unit - OAM, OAG etc. */
enum drm_xe_oa_unit_type type;
/** @regs: OA registers for programming the OA unit */
struct xe_oa_regs regs;
/** @num_engines: number of engines attached to this OA unit */
u32 num_engines;
/** @exclusive_stream: The stream currently using the OA unit */
struct xe_oa_stream *exclusive_stream;
};
/**
* struct xe_oa_gt - OA per-gt information
*/
struct xe_oa_gt {
/** @gt_lock: lock protecting create/destroy OA streams */
struct mutex gt_lock;
/** @num_oa_units: number of oa units for each gt */
u32 num_oa_units;
/** @oa_unit: array of oa_units */
struct xe_oa_unit *oa_unit;
};
/**
* struct xe_oa - OA device level information
*/
struct xe_oa {
/** @xe: back pointer to xe device */
struct xe_device *xe;
/** @metrics_kobj: kobj for metrics sysfs */
struct kobject *metrics_kobj;
/** @metrics_lock: lock protecting add/remove configs */
struct mutex metrics_lock;
/** @metrics_idr: List of dynamic configurations (struct xe_oa_config) */
struct idr metrics_idr;
/** @ctx_oactxctrl_offset: offset of OACTXCONTROL register in context image */
u32 ctx_oactxctrl_offset[XE_ENGINE_CLASS_MAX];
/** @oa_formats: tracks all OA formats across platforms */
const struct xe_oa_format *oa_formats;
/** @format_mask: tracks valid OA formats for a platform */
unsigned long format_mask[BITS_TO_LONGS(__XE_OA_FORMAT_MAX)];
/** @oa_unit_ids: tracks oa unit ids assigned across gt's */
u16 oa_unit_ids;
};
/** @xe_oa_buffer: State of the stream OA buffer */
struct xe_oa_buffer {
/** @format: data format */
const struct xe_oa_format *format;
/** @format: xe_bo backing the OA buffer */
struct xe_bo *bo;
/** @vaddr: mapped vaddr of the OA buffer */
u8 *vaddr;
/** @ptr_lock: Lock protecting reads/writes to head/tail pointers */
spinlock_t ptr_lock;
/** @head: Cached head to read from */
u32 head;
/** @tail: The last verified cached tail where HW has completed writing */
u32 tail;
/** @circ_size: The effective circular buffer size, for Xe2+ */
u32 circ_size;
};
/**
* struct xe_oa_stream - state for a single open stream FD
*/
struct xe_oa_stream {
/** @oa: xe_oa backpointer */
struct xe_oa *oa;
/** @gt: gt associated with the oa stream */
struct xe_gt *gt;
/** @hwe: hardware engine associated with this oa stream */
struct xe_hw_engine *hwe;
/** @stream_lock: Lock serializing stream operations */
struct mutex stream_lock;
/** @sample: true if DRM_XE_OA_PROP_SAMPLE_OA is provided */
bool sample;
/** @exec_q: Exec queue corresponding to DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID */
struct xe_exec_queue *exec_q;
/** @k_exec_q: kernel exec_q used for OA programming batch submissions */
struct xe_exec_queue *k_exec_q;
/** @enabled: Whether the stream is currently enabled */
bool enabled;
/** @oa_config: OA configuration used by the stream */
struct xe_oa_config *oa_config;
/** @oa_config_bos: List of struct @xe_oa_config_bo's */
struct llist_head oa_config_bos;
/** @poll_check_timer: Timer to periodically check for data in the OA buffer */
struct hrtimer poll_check_timer;
/** @poll_wq: Wait queue for waiting for OA data to be available */
wait_queue_head_t poll_wq;
/** @pollin: Whether there is data available to read */
bool pollin;
/** @periodic: Whether periodic sampling is currently enabled */
bool periodic;
/** @period_exponent: OA unit sampling frequency is derived from this */
int period_exponent;
/** @oa_buffer: OA buffer for the stream */
struct xe_oa_buffer oa_buffer;
/** @poll_period_ns: hrtimer period for checking OA buffer for available data */
u64 poll_period_ns;
/** @override_gucrc: GuC RC has been overridden for the OA stream */
bool override_gucrc;
/** @oa_status: temporary storage for oa_status register value */
u32 oa_status;
/** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */
u32 no_preempt;
};
#endif
......@@ -340,7 +340,7 @@ static const struct xe_device_desc lnl_desc = {
.require_force_probe = true,
};
static const struct xe_device_desc bmg_desc __maybe_unused = {
static const struct xe_device_desc bmg_desc = {
DGFX_FEATURES,
PLATFORM(BATTLEMAGE),
.has_display = true,
......@@ -390,6 +390,7 @@ static const struct pci_device_id pciidlist[] = {
XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
XE_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
{ }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
......@@ -747,6 +748,11 @@ static void xe_pci_remove(struct pci_dev *pdev)
if (!xe) /* driver load aborted, nothing to cleanup */
return;
#ifdef CONFIG_PCI_IOV
if (IS_SRIOV_PF(xe))
xe_pci_sriov_configure(pdev, 0);
#endif
xe_device_remove(xe);
xe_pm_runtime_fini(xe);
pci_set_drvdata(pdev, NULL);
......
......@@ -6,6 +6,7 @@
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
......@@ -37,6 +38,17 @@ static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
xe_gt_sriov_pf_config_release(gt, n, true);
}
static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
{
struct xe_gt *gt;
unsigned int id;
unsigned int n;
for_each_gt(gt, xe, id)
for (n = 1; n <= num_vfs; n++)
xe_gt_sriov_pf_control_trigger_flr(gt, n);
}
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
......@@ -94,6 +106,8 @@ static int pf_disable_vfs(struct xe_device *xe)
pci_disable_sriov(pdev);
pf_reset_vfs(xe, num_vfs);
pf_unprovision_vfs(xe, num_vfs);
/* not needed anymore - see pf_enable_vfs() */
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023-2024 Intel Corporation
*/
#include <linux/errno.h>
#include <linux/sysctl.h>
#include <drm/xe_drm.h>
#include "xe_oa.h"
#include "xe_perf.h"
u32 xe_perf_stream_paranoid = true;
static struct ctl_table_header *sysctl_header;
static int xe_oa_ioctl(struct drm_device *dev, struct drm_xe_perf_param *arg,
struct drm_file *file)
{
switch (arg->perf_op) {
case DRM_XE_PERF_OP_STREAM_OPEN:
return xe_oa_stream_open_ioctl(dev, arg->param, file);
case DRM_XE_PERF_OP_ADD_CONFIG:
return xe_oa_add_config_ioctl(dev, arg->param, file);
case DRM_XE_PERF_OP_REMOVE_CONFIG:
return xe_oa_remove_config_ioctl(dev, arg->param, file);
default:
return -EINVAL;
}
}
/**
* xe_perf_ioctl - The top level perf layer ioctl
* @dev: @drm_device
* @data: pointer to struct @drm_xe_perf_param
* @file: @drm_file
*
* The function is called for different perf streams types and allows execution
* of different operations supported by those perf stream types.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_perf_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_xe_perf_param *arg = data;
if (arg->extensions)
return -EINVAL;
switch (arg->perf_type) {
case DRM_XE_PERF_TYPE_OA:
return xe_oa_ioctl(dev, arg, file);
default:
return -EINVAL;
}
}
static struct ctl_table perf_ctl_table[] = {
{
.procname = "perf_stream_paranoid",
.data = &xe_perf_stream_paranoid,
.maxlen = sizeof(xe_perf_stream_paranoid),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{}
};
/**
* xe_perf_sysctl_register - Register "perf_stream_paranoid" sysctl
*
* Normally only superuser/root can access perf counter data. However,
* superuser can set perf_stream_paranoid sysctl to 0 to allow non-privileged
* users to also access perf data.
*
* Return: always returns 0
*/
int xe_perf_sysctl_register(void)
{
sysctl_header = register_sysctl("dev/xe", perf_ctl_table);
return 0;
}
/**
* xe_perf_sysctl_unregister - Unregister "perf_stream_paranoid" sysctl
*/
void xe_perf_sysctl_unregister(void)
{
unregister_sysctl_table(sysctl_header);
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023-2024 Intel Corporation
*/
#ifndef _XE_PERF_H_
#define _XE_PERF_H_
#include <linux/types.h>
struct drm_device;
struct drm_file;
extern u32 xe_perf_stream_paranoid;
int xe_perf_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int xe_perf_sysctl_register(void);
void xe_perf_sysctl_unregister(void);
#endif
......@@ -129,7 +129,7 @@ xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
list_del_init(&pfence->link);
pfence->q = xe_exec_queue_get(q);
dma_fence_init(&pfence->base, &preempt_fence_ops,
&q->compute.lock, context, seqno);
&q->lr.lock, context, seqno);
return &pfence->base;
}
......
......@@ -1137,8 +1137,9 @@ static void invalidation_fence_cb(struct dma_fence *fence,
{
struct invalidation_fence *ifence =
container_of(cb, struct invalidation_fence, cb);
struct xe_device *xe = gt_to_xe(ifence->gt);
trace_xe_gt_tlb_invalidation_fence_cb(&ifence->base);
trace_xe_gt_tlb_invalidation_fence_cb(xe, &ifence->base);
if (!ifence->fence->error) {
queue_work(system_wq, &ifence->work);
} else {
......@@ -1153,8 +1154,9 @@ static void invalidation_fence_work_func(struct work_struct *w)
{
struct invalidation_fence *ifence =
container_of(w, struct invalidation_fence, work);
struct xe_device *xe = gt_to_xe(ifence->gt);
trace_xe_gt_tlb_invalidation_fence_work_func(&ifence->base);
trace_xe_gt_tlb_invalidation_fence_work_func(xe, &ifence->base);
xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start,
ifence->end, ifence->asid);
}
......@@ -1166,7 +1168,7 @@ static int invalidation_fence_init(struct xe_gt *gt,
{
int ret;
trace_xe_gt_tlb_invalidation_fence_create(&ifence->base);
trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
spin_lock_irq(&gt->tlb_invalidation.lock);
dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
......
......@@ -602,6 +602,82 @@ query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query)
return 0;
}
static size_t calc_oa_unit_query_size(struct xe_device *xe)
{
size_t size = sizeof(struct drm_xe_query_oa_units);
struct xe_gt *gt;
int i, id;
for_each_gt(gt, xe, id) {
for (i = 0; i < gt->oa.num_oa_units; i++) {
size += sizeof(struct drm_xe_oa_unit);
size += gt->oa.oa_unit[i].num_engines *
sizeof(struct drm_xe_engine_class_instance);
}
}
return size;
}
static int query_oa_units(struct xe_device *xe,
struct drm_xe_device_query *query)
{
void __user *query_ptr = u64_to_user_ptr(query->data);
size_t size = calc_oa_unit_query_size(xe);
struct drm_xe_query_oa_units *qoa;
enum xe_hw_engine_id hwe_id;
struct drm_xe_oa_unit *du;
struct xe_hw_engine *hwe;
struct xe_oa_unit *u;
int gt_id, i, j, ret;
struct xe_gt *gt;
u8 *pdu;
if (query->size == 0) {
query->size = size;
return 0;
} else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL;
}
qoa = kzalloc(size, GFP_KERNEL);
if (!qoa)
return -ENOMEM;
pdu = (u8 *)&qoa->oa_units[0];
for_each_gt(gt, xe, gt_id) {
for (i = 0; i < gt->oa.num_oa_units; i++) {
u = &gt->oa.oa_unit[i];
du = (struct drm_xe_oa_unit *)pdu;
du->oa_unit_id = u->oa_unit_id;
du->oa_unit_type = u->type;
du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
du->capabilities = DRM_XE_OA_CAPS_BASE;
j = 0;
for_each_hw_engine(hwe, gt, hwe_id) {
if (!xe_hw_engine_is_reserved(hwe) &&
xe_oa_unit_id(hwe) == u->oa_unit_id) {
du->eci[j].engine_class =
xe_to_user_engine_class[hwe->class];
du->eci[j].engine_instance = hwe->logical_instance;
du->eci[j].gt_id = gt->info.id;
j++;
}
}
du->num_engines = j;
pdu += sizeof(*du) + j * sizeof(du->eci[0]);
qoa->num_oa_units++;
}
}
ret = copy_to_user(query_ptr, qoa, size);
kfree(qoa);
return ret ? -EFAULT : 0;
}
static int (* const xe_query_funcs[])(struct xe_device *xe,
struct drm_xe_device_query *query) = {
query_engines,
......@@ -612,6 +688,7 @@ static int (* const xe_query_funcs[])(struct xe_device *xe,
query_gt_topology,
query_engine_cycles,
query_uc_fw_version,
query_oa_units,
};
int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
......
......@@ -7,6 +7,7 @@
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_oa_regs.h"
#include "regs/xe_regs.h"
#include "xe_gt_types.h"
#include "xe_platform_types.h"
......@@ -63,7 +64,28 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(WHITELIST(CSBE_DEBUG_STATUS(RENDER_RING_BASE), 0))
},
{ XE_RTP_NAME("oa_reg_render"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(WHITELIST(OAG_MMIOTRIGGER,
RING_FORCE_TO_NONPRIV_ACCESS_RW),
WHITELIST(OAG_OASTATUS,
RING_FORCE_TO_NONPRIV_ACCESS_RD),
WHITELIST(OAG_OAHEADPTR,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4))
},
{ XE_RTP_NAME("oa_reg_compute"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
ENGINE_CLASS(COMPUTE)),
XE_RTP_ACTIONS(WHITELIST(OAG_MMIOTRIGGER,
RING_FORCE_TO_NONPRIV_ACCESS_RW),
WHITELIST(OAG_OASTATUS,
RING_FORCE_TO_NONPRIV_ACCESS_RD),
WHITELIST(OAG_OAHEADPTR,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4))
},
{}
};
......
......@@ -224,6 +224,19 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job)
return job->q->vm ? BIT(8) : 0;
}
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
{
dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT |
MI_COPY_MEM_MEM_DST_GGTT;
dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
dw[i++] = MI_NOOP;
return i;
}
/* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc,
u64 batch_addr, u32 seqno)
......@@ -232,6 +245,8 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
u32 ppgtt_flag = get_ppgtt_flag(job);
struct xe_gt *gt = job->q->gt;
i = emit_copy_timestamp(lrc, dw, i);
if (job->ring_ops_flush_tlb) {
dw[i++] = preparser_disable(true);
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
......@@ -283,6 +298,8 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
struct xe_device *xe = gt_to_xe(gt);
bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
i = emit_copy_timestamp(lrc, dw, i);
dw[i++] = preparser_disable(true);
/* hsdes: 1809175790 */
......@@ -332,6 +349,8 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 mask_flags = 0;
i = emit_copy_timestamp(lrc, dw, i);
dw[i++] = preparser_disable(true);
if (lacks_render)
mask_flags = PIPE_CONTROL_3D_ARCH_FLAGS;
......@@ -375,6 +394,8 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
i = emit_copy_timestamp(lrc, dw, i);
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, dw, i);
......
......@@ -13,6 +13,7 @@
#include "xe_gt_topology.h"
#include "xe_macros.h"
#include "xe_reg_sr.h"
#include "xe_sriov.h"
/**
* DOC: Register Table Processing
......@@ -35,11 +36,18 @@ static bool rule_matches(const struct xe_device *xe,
unsigned int n_rules)
{
const struct xe_rtp_rule *r;
unsigned int i;
unsigned int i, rcount = 0;
bool match;
for (r = rules, i = 0; i < n_rules; r = &rules[++i]) {
switch (r->match_type) {
case XE_RTP_MATCH_OR:
/*
* This is only reached if a complete set of
* rules passed or none were evaluated. For both cases,
* shortcut the other rules and return the proper value.
*/
goto done;
case XE_RTP_MATCH_PLATFORM:
match = xe->info.platform == r->platform;
break;
......@@ -56,6 +64,9 @@ static bool rule_matches(const struct xe_device *xe,
xe->info.graphics_verx100 <= r->ver_end &&
(!has_samedia(xe) || !xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT:
match = xe->info.graphics_verx100 == r->ver_start;
break;
case XE_RTP_MATCH_GRAPHICS_STEP:
match = xe->info.step.graphics >= r->step_start &&
xe->info.step.graphics < r->step_end &&
......@@ -75,6 +86,9 @@ static bool rule_matches(const struct xe_device *xe,
xe->info.step.media < r->step_end &&
(!has_samedia(xe) || xe_gt_is_media_type(gt));
break;
case XE_RTP_MATCH_MEDIA_VERSION_ANY_GT:
match = xe->info.media_verx100 == r->ver_start;
break;
case XE_RTP_MATCH_INTEGRATED:
match = !xe->info.is_dgfx;
break;
......@@ -102,10 +116,27 @@ static bool rule_matches(const struct xe_device *xe,
match = false;
}
if (!match)
return false;
if (!match) {
/*
* Advance rules until we find XE_RTP_MATCH_OR to check
* if there's another set of conditions to check
*/
while (i < n_rules && rules[++i].match_type != XE_RTP_MATCH_OR)
;
if (i >= n_rules)
return false;
rcount = 0;
} else {
rcount++;
}
}
done:
if (drm_WARN_ON(&xe->drm, !rcount))
return false;
return true;
}
......@@ -227,6 +258,9 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
rtp_get_context(ctx, &hwe, &gt, &xe);
if (IS_SRIOV_VF(xe))
return;
for (entry = entries; entry && entry->name; entry++) {
bool match = false;
......@@ -324,8 +358,3 @@ bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
return dss >= dss_per_gslice;
}
bool xe_rtp_match_when_media2000(const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
return (gt_to_xe(gt))->info.media_verx100 == 2000;
}
......@@ -140,9 +140,23 @@ struct xe_reg_sr;
.ver_start = ver_start__, .ver_end = ver_end__, }
/**
* XE_RTP_RULE_MEDIA_VERSION - Create rule matching media version
* XE_RTP_RULE_GRAPHICS_VERSION_ANY_GT - Create rule matching graphics version on any GT
* @ver__: Graphics IP version to match
*
* Like XE_RTP_RULE_GRAPHICS_VERSION, but it matches even if the current GT
* being checked is not of the graphics type. It allows to add RTP entries to
* another GT when the device contains a Graphics IP with that version.
*
* Refer to XE_RTP_RULES() for expected usage.
*/
#define XE_RTP_RULE_GRAPHICS_VERSION_ANY_GT(ver__) \
{ .match_type = XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT, \
.ver_start = ver__, }
/**
* XE_RTP_RULE_MEDIA_VERSION - Create rule matching media version
* @ver__: Media IP version to match
*
* Refer to XE_RTP_RULES() for expected usage.
*/
#define XE_RTP_RULE_MEDIA_VERSION(ver__) \
......@@ -163,6 +177,20 @@ struct xe_reg_sr;
{ .match_type = XE_RTP_MATCH_MEDIA_VERSION_RANGE, \
.ver_start = ver_start__, .ver_end = ver_end__, }
/**
* XE_RTP_RULE_MEDIA_VERSION_ANY_GT - Create rule matching media version on any GT
* @ver__: Media IP version to match
*
* Like XE_RTP_RULE_MEDIA_VERSION, but it matches even if the current GT being
* checked is not of the media type. It allows to add RTP entries to another
* GT when the device contains a Media IP with that version.
*
* Refer to XE_RTP_RULES() for expected usage.
*/
#define XE_RTP_RULE_MEDIA_VERSION_ANY_GT(ver__) \
{ .match_type = XE_RTP_MATCH_MEDIA_VERSION_ANY_GT, \
.ver_start = ver__, }
/**
* XE_RTP_RULE_IS_INTEGRATED - Create a rule matching integrated graphics devices
*
......@@ -179,6 +207,27 @@ struct xe_reg_sr;
#define XE_RTP_RULE_IS_DISCRETE \
{ .match_type = XE_RTP_MATCH_DISCRETE }
/**
* XE_RTP_RULE_OR - Create an OR condition for rtp rules
*
* RTP rules are AND'ed when evaluated and all of them need to match.
* XE_RTP_RULE_OR allows to create set of rules where any of them matching is
* sufficient for the action to trigger. Example:
*
* .. code-block:: c
*
* const struct xe_rtp_entry_sr entries[] = {
* ...
* { XE_RTP_NAME("test-entry"),
* XE_RTP_RULES(PLATFORM(DG2), OR, PLATFORM(TIGERLAKE)),
* ...
* },
* ...
* };
*/
#define XE_RTP_RULE_OR \
{ .match_type = XE_RTP_MATCH_OR }
/**
* XE_RTP_ACTION_WR - Helper to write a value to the register, overriding all
* the bits
......@@ -325,7 +374,7 @@ struct xe_reg_sr;
* XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry_sr entry
* @...: Rules
*
* At least one rule is needed and up to 4 are supported. Multiple rules are
* At least one rule is needed and up to 6 are supported. Multiple rules are
* AND'ed together, i.e. all the rules must evaluate to true for the entry to
* be processed. See XE_RTP_MATCH_* for the possible match rules. Example:
*
......@@ -350,7 +399,7 @@ struct xe_reg_sr;
* XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry_sr
* @...: Actions to be taken
*
* At least one action is needed and up to 4 are supported. See XE_RTP_ACTION_*
* At least one action is needed and up to 6 are supported. See XE_RTP_ACTION_*
* for the possible actions. Example:
*
* .. code-block:: c
......@@ -427,18 +476,4 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
/*
* xe_rtp_match_when_media2000 - Match when media GT version 2000
*
* @gt: GT structure
* @hwe: Engine instance
*
* Its one of the case where we need to apply workaround on primary GT
* based on if media GT version 2000 is present. Thus this API will help
* us to match media version 2000.
*
* Returns: true if media GT version 2000, false otherwise.
*/
bool xe_rtp_match_when_media2000(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
#endif
......@@ -58,6 +58,8 @@
#define XE_RTP_PASTE_2(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_1(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_3(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_2(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_4(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_3(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_5(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_4(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_6(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_5(prefix_, sep_, _XE_TUPLE_TAIL args_)
/*
* XE_RTP_DROP_CAST - Drop cast to convert a compound statement to a initializer
......
......@@ -42,15 +42,18 @@ enum {
XE_RTP_MATCH_SUBPLATFORM,
XE_RTP_MATCH_GRAPHICS_VERSION,
XE_RTP_MATCH_GRAPHICS_VERSION_RANGE,
XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT,
XE_RTP_MATCH_GRAPHICS_STEP,
XE_RTP_MATCH_MEDIA_VERSION,
XE_RTP_MATCH_MEDIA_VERSION_RANGE,
XE_RTP_MATCH_MEDIA_VERSION_ANY_GT,
XE_RTP_MATCH_MEDIA_STEP,
XE_RTP_MATCH_INTEGRATED,
XE_RTP_MATCH_DISCRETE,
XE_RTP_MATCH_ENGINE_CLASS,
XE_RTP_MATCH_NOT_ENGINE_CLASS,
XE_RTP_MATCH_FUNC,
XE_RTP_MATCH_OR,
};
/** struct xe_rtp_rule - match rule for processing entry */
......
......@@ -363,3 +363,9 @@ xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot,
for (i = 0; i < snapshot->batch_addr_len; i++)
drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]);
}
int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
enum dma_resv_usage usage)
{
return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
}
......@@ -90,4 +90,7 @@ struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job
void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot);
void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p);
int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
enum dma_resv_usage usage);
#endif
......@@ -19,18 +19,18 @@ void xe_sriov_probe_early(struct xe_device *xe);
void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p);
int xe_sriov_init(struct xe_device *xe);
static inline enum xe_sriov_mode xe_device_sriov_mode(struct xe_device *xe)
static inline enum xe_sriov_mode xe_device_sriov_mode(const struct xe_device *xe)
{
xe_assert(xe, xe->sriov.__mode);
return xe->sriov.__mode;
}
static inline bool xe_device_is_sriov_pf(struct xe_device *xe)
static inline bool xe_device_is_sriov_pf(const struct xe_device *xe)
{
return xe_device_sriov_mode(xe) == XE_SRIOV_MODE_PF;
}
static inline bool xe_device_is_sriov_vf(struct xe_device *xe)
static inline bool xe_device_is_sriov_vf(const struct xe_device *xe)
{
return xe_device_sriov_mode(xe) == XE_SRIOV_MODE_VF;
}
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2024 Intel Corporation
*/
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "xe_trace_bo.h"
#endif
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2024 Intel Corporation
*/
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "xe_trace_guc.h"
#endif
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2024 Intel Corporation
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM xe
#if !defined(_XE_TRACE_GUC_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _XE_TRACE_GUC_H_
#include <linux/tracepoint.h>
#include <linux/types.h>
#include "xe_device_types.h"
#include "xe_guc_exec_queue_types.h"
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
TP_PROTO(struct xe_device *xe, u32 _head, u32 _tail, u32 size, u32 space, u32 len),
TP_ARGS(xe, _head, _tail, size, space, len),
TP_STRUCT__entry(
__string(dev, __dev_name_xe(xe))
__field(u32, _head)
__field(u32, _tail)
__field(u32, size)
__field(u32, space)
__field(u32, len)
),
TP_fast_assign(
__assign_str(dev);
__entry->_head = _head;
__entry->_tail = _tail;
__entry->size = size;
__entry->space = space;
__entry->len = len;
),
TP_printk("h2g flow control: dev=%s, head=%u, tail=%u, size=%u, space=%u, len=%u",
__get_str(dev), __entry->_head, __entry->_tail, __entry->size,
__entry->space, __entry->len)
);
DEFINE_EVENT(xe_guc_ct_flow_control, xe_guc_ct_h2g_flow_control,
TP_PROTO(struct xe_device *xe, u32 _head, u32 _tail, u32 size, u32 space, u32 len),
TP_ARGS(xe, _head, _tail, size, space, len)
);
DEFINE_EVENT_PRINT(xe_guc_ct_flow_control, xe_guc_ct_g2h_flow_control,
TP_PROTO(struct xe_device *xe, u32 _head, u32 _tail, u32 size, u32 space, u32 len),
TP_ARGS(xe, _head, _tail, size, space, len),
TP_printk("g2h flow control: dev=%s, head=%u, tail=%u, size=%u, space=%u, len=%u",
__get_str(dev), __entry->_head, __entry->_tail, __entry->size,
__entry->space, __entry->len)
);
DECLARE_EVENT_CLASS(xe_guc_ctb,
TP_PROTO(struct xe_device *xe, u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
TP_ARGS(xe, gt_id, action, len, _head, tail),
TP_STRUCT__entry(
__string(dev, __dev_name_xe(xe))
__field(u8, gt_id)
__field(u32, action)
__field(u32, len)
__field(u32, tail)
__field(u32, _head)
),
TP_fast_assign(
__assign_str(dev);
__entry->gt_id = gt_id;
__entry->action = action;
__entry->len = len;
__entry->tail = tail;
__entry->_head = _head;
),
TP_printk("H2G CTB: dev=%s, gt%d: action=0x%x, len=%d, tail=%d, head=%d\n",
__get_str(dev), __entry->gt_id, __entry->action, __entry->len,
__entry->tail, __entry->_head)
);
DEFINE_EVENT(xe_guc_ctb, xe_guc_ctb_h2g,
TP_PROTO(struct xe_device *xe, u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
TP_ARGS(xe, gt_id, action, len, _head, tail)
);
DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
TP_PROTO(struct xe_device *xe, u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
TP_ARGS(xe, gt_id, action, len, _head, tail),
TP_printk("G2H CTB: dev=%s, gt%d: action=0x%x, len=%d, tail=%d, head=%d\n",
__get_str(dev), __entry->gt_id, __entry->action, __entry->len,
__entry->tail, __entry->_head)
);
#endif
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
#define TRACE_INCLUDE_FILE xe_trace_guc
#include <trace/define_trace.h>
......@@ -158,7 +158,7 @@ static inline bool xe_uc_fw_is_overridden(const struct xe_uc_fw *uc_fw)
static inline void xe_uc_fw_sanitize(struct xe_uc_fw *uc_fw)
{
if (xe_uc_fw_is_loaded(uc_fw))
if (xe_uc_fw_is_loadable(uc_fw))
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_LOADABLE);
}
......
This diff is collapsed.
......@@ -21,6 +21,7 @@
#include "xe_mmio.h"
#include "xe_platform_types.h"
#include "xe_rtp.h"
#include "xe_sriov.h"
#include "xe_step.h"
/**
......@@ -629,7 +630,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE))
},
{ XE_RTP_NAME("14019877138"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271), ENGINE_CLASS(RENDER)),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
},
......@@ -678,9 +679,19 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_2, TBIMR_FAST_CLIP))
},
{ XE_RTP_NAME("14020756599"),
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER), OR,
MEDIA_VERSION_ANY_GT(2000), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(WM_CHICKEN3, HIZ_PLANE_COMPRESSION_DIS))
},
{ XE_RTP_NAME("14021490052"),
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(FF_MODE,
DIS_MESH_PARTIAL_AUTOSTRIP |
DIS_MESH_AUTOSTRIP),
SET(VFLSKPD,
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
/* Xe2_HPG */
{ XE_RTP_NAME("15010599737"),
......@@ -705,13 +716,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_AUTOSTRIP))
},
/* Xe2_LPM */
{ XE_RTP_NAME("14020756599"),
XE_RTP_RULES(ENGINE_CLASS(RENDER), FUNC(xe_rtp_match_when_media2000)),
XE_RTP_ACTIONS(SET(WM_CHICKEN3, HIZ_PLANE_COMPRESSION_DIS))
},
{}
};
......@@ -862,6 +866,9 @@ void xe_wa_apply_tile_workarounds(struct xe_tile *tile)
{
struct xe_gt *mmio = tile->primary_gt;
if (IS_SRIOV_VF(tile->xe))
return;
if (XE_WA(mmio, 22010954014))
xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
}
......@@ -17,8 +17,6 @@ void xe_wa_process_gt(struct xe_gt *gt);
void xe_wa_process_engine(struct xe_hw_engine *hwe);
void xe_wa_process_lrc(struct xe_hw_engine *hwe);
void xe_wa_apply_tile_workarounds(struct xe_tile *tile);
void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe);
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
/**
......
......@@ -27,3 +27,4 @@
16022287689 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
22019338487 MEDIA_VERSION(2000)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment