Commit a5edc7cd authored by Matt Roper's avatar Matt Roper Committed by Rodrigo Vivi

drm/xe: Introduce xe_tile

Create a new xe_tile structure to begin separating the concept of "tile"
from "GT."  A tile is effectively a complete GPU, and a GT is just one
part of that.  On platforms like MTL, there's only a single full GPU
(tile) which has its IP blocks provided by two GTs.  In contrast, a
"multi-tile" platform like PVC is basically multiple complete GPUs
packed behind a single PCI device.

For now, just create xe_tile as a simple wrapper around xe_gt.  The
items in xe_gt that are truly tied to the tile rather than the GT will
be moved in future patches.  Support for multiple GTs per tile (i.e.,
the MTL standalone media case) will also be re-introduced in a future
patch.

v2:
 - Fix kunit test build
 - Move hunk from next patch to use local tile variable rather than
   direct xe->tiles[id] accesses.  (Lucas)
 - Mention compute in kerneldoc.  (Rodrigo)
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20230601215244.678611-3-matthew.d.roper@intel.comSigned-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent dbc4f5d1
......@@ -174,7 +174,7 @@ static int evict_test_run_gt(struct xe_device *xe, struct xe_gt *gt, struct kuni
struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_CREATE_USER_BIT |
XE_BO_CREATE_VRAM_IF_DGFX(gt);
struct xe_vm *vm = xe_migrate_get_vm(xe->gt[0].migrate);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->primary_gt.migrate);
struct ww_acquire_ctx ww;
int err, i;
......
......@@ -13,6 +13,7 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_reg_defs.h"
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_pci_test.h"
#include "xe_reg_sr.h"
......@@ -236,9 +237,10 @@ static void xe_rtp_process_tests(struct kunit *test)
{
const struct rtp_test_case *param = test->param_value;
struct xe_device *xe = test->priv;
struct xe_reg_sr *reg_sr = &xe->gt[0].reg_sr;
struct xe_gt *gt = &xe_device_get_root_tile(xe)->primary_gt;
struct xe_reg_sr *reg_sr = &gt->reg_sr;
const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(&xe->gt[0]);
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
unsigned long idx, count = 0;
xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe);
......
......@@ -48,12 +48,17 @@ static inline struct xe_file *to_xe_file(const struct drm_file *file)
return file->driver_priv;
}
static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
{
return &xe->tiles[0];
}
static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
{
struct xe_gt *gt;
XE_BUG_ON(gt_id > XE_MAX_GT);
gt = xe->gt + gt_id;
XE_BUG_ON(gt_id > XE_MAX_TILES_PER_DEVICE);
gt = &xe->tiles[gt_id].primary_gt;
XE_BUG_ON(gt->info.id != gt_id);
XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED);
......@@ -65,7 +70,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
*/
static inline struct xe_gt *to_gt(struct xe_device *xe)
{
return xe->gt;
return &xe_device_get_root_tile(xe)->primary_gt;
}
static inline bool xe_device_guc_submission_enabled(struct xe_device *xe)
......
......@@ -29,7 +29,7 @@
#define XE_GT0 0
#define XE_GT1 1
#define XE_MAX_GT (XE_GT1 + 1)
#define XE_MAX_TILES_PER_DEVICE (XE_GT1 + 1)
#define XE_MAX_ASID (BIT(20))
......@@ -43,6 +43,40 @@
(_xe)->info.step.graphics >= (min_step) && \
(_xe)->info.step.graphics < (max_step))
#define tile_to_xe(tile__) \
_Generic(tile__, \
const struct xe_tile *: (const struct xe_device *)((tile__)->xe), \
struct xe_tile *: (tile__)->xe)
/**
* struct xe_tile - hardware tile structure
*
* From a driver perspective, a "tile" is effectively a complete GPU, containing
* an SGunit, 1-2 GTs, and (for discrete platforms) VRAM.
*
* Multi-tile platforms effectively bundle multiple GPUs behind a single PCI
* device and designate one "root" tile as being responsible for external PCI
* communication. PCI BAR0 exposes the GGTT and MMIO register space for each
* tile in a stacked layout, and PCI BAR2 exposes the local memory associated
* with each tile similarly. Device-wide interrupts can be enabled/disabled
* at the root tile, and the MSTR_TILE_INTR register will report which tiles
* have interrupts that need servicing.
*/
struct xe_tile {
/** @xe: Backpointer to tile's PCI device */
struct xe_device *xe;
/** @id: ID of the tile */
u8 id;
/**
* @primary_gt: Primary GT
*/
struct xe_gt primary_gt;
/* TODO: Add media GT here */
};
/**
* struct xe_device - Top level struct of XE device
*/
......@@ -193,8 +227,8 @@ struct xe_device {
/** @ordered_wq: used to serialize compute mode resume */
struct workqueue_struct *ordered_wq;
/** @gt: graphics tile */
struct xe_gt gt[XE_MAX_GT];
/** @tiles: device tiles */
struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
/**
* @mem_access: keep track of memory access in the device, possibly
......
......@@ -77,12 +77,17 @@ enum xe_steering_type {
};
/**
* struct xe_gt - Top level struct of a graphics tile
* struct xe_gt - A "Graphics Technology" unit of the GPU
*
* A graphics tile may be a physical split (duplicate pieces of silicon,
* different GGTT + VRAM) or a virtual split (shared GGTT + VRAM). Either way
* this structure encapsulates of everything a GT is (MMIO, VRAM, memory
* management, microcontrols, and a hardware set of engines).
* A GT ("Graphics Technology") is the subset of a GPU primarily responsible
* for implementing the graphics, compute, and/or media IP. It encapsulates
* the hardware engines, programmable execution units, and GuC. Each GT has
* its own handling of power management (RC6+forcewake) and multicast register
* steering.
*
* A GPU/tile may have a single GT that supplies all graphics, compute, and
* media functionality, or the graphics/compute and media may be split into
* separate GTs within a tile.
*/
struct xe_gt {
/** @xe: backpointer to XE device */
......
......@@ -438,6 +438,7 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
struct xe_gt *gt = xe_device_get_gt(xe, 0);
struct drm_xe_mmio *args = data;
unsigned int bits_flag, bytes;
struct xe_reg reg;
......@@ -480,7 +481,7 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
*/
reg = XE_REG(args->addr);
xe_force_wake_get(gt_to_fw(&xe->gt[0]), XE_FORCEWAKE_ALL);
xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (args->flags & DRM_XE_MMIO_WRITE) {
switch (bits_flag) {
......@@ -489,10 +490,10 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto exit;
}
xe_mmio_write32(to_gt(xe), reg, args->value);
xe_mmio_write32(gt, reg, args->value);
break;
case DRM_XE_MMIO_64BIT:
xe_mmio_write64(to_gt(xe), reg, args->value);
xe_mmio_write64(gt, reg, args->value);
break;
default:
drm_dbg(&xe->drm, "Invalid MMIO bit size");
......@@ -507,10 +508,10 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
if (args->flags & DRM_XE_MMIO_READ) {
switch (bits_flag) {
case DRM_XE_MMIO_32BIT:
args->value = xe_mmio_read32(to_gt(xe), reg);
args->value = xe_mmio_read32(gt, reg);
break;
case DRM_XE_MMIO_64BIT:
args->value = xe_mmio_read64(to_gt(xe), reg);
args->value = xe_mmio_read64(gt, reg);
break;
default:
drm_dbg(&xe->drm, "Invalid MMIO bit size");
......@@ -522,7 +523,7 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
}
exit:
xe_force_wake_put(gt_to_fw(&xe->gt[0]), XE_FORCEWAKE_ALL);
xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
return ret;
}
......@@ -478,6 +478,7 @@ static int xe_info_init(struct xe_device *xe,
const struct xe_graphics_desc *graphics_desc = NULL;
const struct xe_media_desc *media_desc = NULL;
u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
struct xe_tile *tile;
struct xe_gt *gt;
u8 id;
......@@ -537,7 +538,11 @@ static int xe_info_init(struct xe_device *xe,
xe->info.tile_count = 1 + graphics_desc->max_remote_tiles;
for (id = 0; id < xe->info.tile_count; ++id) {
gt = xe->gt + id;
tile = &xe->tiles[id];
tile->xe = xe;
tile->id = id;
gt = &tile->primary_gt;
gt->info.id = id;
gt->xe = xe;
......
......@@ -249,7 +249,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
u32 ppgtt_flag = get_ppgtt_flag(job);
struct xe_gt *gt = job->engine->gt;
struct xe_device *xe = gt_to_xe(gt);
bool lacks_render = !(xe->gt[0].info.engine_mask & XE_HW_ENGINE_RCS_MASK);
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 mask_flags = 0;
dw[i++] = preparser_disable(true);
......
......@@ -3389,7 +3389,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
struct xe_device *xe = vma->vm->xe;
struct xe_gt *gt;
u32 gt_needs_invalidate = 0;
int seqno[XE_MAX_GT];
int seqno[XE_MAX_TILES_PER_DEVICE];
u8 id;
int ret;
......
......@@ -159,7 +159,7 @@ struct xe_vm {
struct kref refcount;
/* engine used for (un)binding vma's */
struct xe_engine *eng[XE_MAX_GT];
struct xe_engine *eng[XE_MAX_TILES_PER_DEVICE];
/** Protects @rebind_list and the page-table structures */
struct dma_resv resv;
......@@ -167,9 +167,9 @@ struct xe_vm {
u64 size;
struct rb_root vmas;
struct xe_pt *pt_root[XE_MAX_GT];
struct xe_bo *scratch_bo[XE_MAX_GT];
struct xe_pt *scratch_pt[XE_MAX_GT][XE_VM_MAX_LEVEL];
struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
struct xe_bo *scratch_bo[XE_MAX_TILES_PER_DEVICE];
struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
/** @flags: flags for this VM, statically setup a creation time */
#define XE_VM_FLAGS_64K BIT(0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment