Commit f6929e80 authored by Matt Roper's avatar Matt Roper Committed by Rodrigo Vivi

drm/xe: Allocate GT dynamically

In preparation for re-adding media GT support, switch the primary GT
within the tile to a dynamic allocation.
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20230601215244.678611-19-matthew.d.roper@intel.comSigned-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 1e6c20be
...@@ -286,7 +286,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -286,7 +286,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
goto free_pt; goto free_pt;
} }
bb = xe_bb_new(&tile->primary_gt, 32, xe->info.supports_usm); bb = xe_bb_new(tile->primary_gt, 32, xe->info.supports_usm);
if (IS_ERR(bb)) { if (IS_ERR(bb)) {
KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n", KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
PTR_ERR(bb)); PTR_ERR(bb));
...@@ -323,7 +323,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -323,7 +323,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead); xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
expected = 0; expected = 0;
emit_clear(&tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4, emit_clear(tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
IS_DGFX(xe)); IS_DGFX(xe));
run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable", run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
test); test);
......
...@@ -237,7 +237,7 @@ static void xe_rtp_process_tests(struct kunit *test) ...@@ -237,7 +237,7 @@ static void xe_rtp_process_tests(struct kunit *test)
{ {
const struct rtp_test_case *param = test->param_value; const struct rtp_test_case *param = test->param_value;
struct xe_device *xe = test->priv; struct xe_device *xe = test->priv;
struct xe_gt *gt = &xe_device_get_root_tile(xe)->primary_gt; struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
struct xe_reg_sr *reg_sr = &gt->reg_sr; struct xe_reg_sr *reg_sr = &gt->reg_sr;
const struct xe_reg_sr_entry *sre, *sr_entry = NULL; const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt); struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
......
...@@ -249,10 +249,6 @@ int xe_device_probe(struct xe_device *xe) ...@@ -249,10 +249,6 @@ int xe_device_probe(struct xe_device *xe)
err = xe_tile_alloc(tile); err = xe_tile_alloc(tile);
if (err) if (err)
return err; return err;
err = xe_gt_alloc(xe, &tile->primary_gt);
if (err)
return err;
} }
err = xe_mmio_init(xe); err = xe_mmio_init(xe);
......
...@@ -58,7 +58,11 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id) ...@@ -58,7 +58,11 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
struct xe_gt *gt; struct xe_gt *gt;
XE_BUG_ON(gt_id > XE_MAX_TILES_PER_DEVICE); XE_BUG_ON(gt_id > XE_MAX_TILES_PER_DEVICE);
gt = &xe->tiles[gt_id].primary_gt;
gt = xe->tiles[gt_id].primary_gt;
if (drm_WARN_ON(&xe->drm, !gt))
return NULL;
XE_BUG_ON(gt->info.id != gt_id); XE_BUG_ON(gt->info.id != gt_id);
XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED); XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED);
...@@ -79,7 +83,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id) ...@@ -79,7 +83,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
*/ */
static inline struct xe_gt *xe_root_mmio_gt(struct xe_device *xe) static inline struct xe_gt *xe_root_mmio_gt(struct xe_device *xe)
{ {
return &xe_device_get_root_tile(xe)->primary_gt; return xe_device_get_root_tile(xe)->primary_gt;
} }
static inline bool xe_device_guc_submission_enabled(struct xe_device *xe) static inline bool xe_device_guc_submission_enabled(struct xe_device *xe)
......
...@@ -74,7 +74,7 @@ struct xe_tile { ...@@ -74,7 +74,7 @@ struct xe_tile {
/** /**
* @primary_gt: Primary GT * @primary_gt: Primary GT
*/ */
struct xe_gt primary_gt; struct xe_gt *primary_gt;
/* TODO: Add media GT here */ /* TODO: Add media GT here */
......
...@@ -196,7 +196,7 @@ void xe_ggtt_invalidate(struct xe_ggtt *ggtt) ...@@ -196,7 +196,7 @@ void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
* TODO: Loop over each GT in tile once media GT support is * TODO: Loop over each GT in tile once media GT support is
* re-added * re-added
*/ */
struct xe_gt *gt = &ggtt->tile->primary_gt; struct xe_gt *gt = ggtt->tile->primary_gt;
/* TODO: vfunc for GuC vs. non-GuC */ /* TODO: vfunc for GuC vs. non-GuC */
......
...@@ -43,13 +43,18 @@ ...@@ -43,13 +43,18 @@
#include "xe_wa.h" #include "xe_wa.h"
#include "xe_wopcm.h" #include "xe_wopcm.h"
int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt) struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
{ {
XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED); struct xe_gt *gt;
gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
if (!gt)
return ERR_PTR(-ENOMEM);
gt->tile = tile;
gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0); gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
return 0; return gt;
} }
void xe_gt_sanitize(struct xe_gt *gt) void xe_gt_sanitize(struct xe_gt *gt)
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
for_each_if (((hwe__) = (gt__)->hw_engines + (id__)) && \ for_each_if (((hwe__) = (gt__)->hw_engines + (id__)) && \
xe_hw_engine_is_valid((hwe__))) xe_hw_engine_is_valid((hwe__)))
int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt); struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
int xe_gt_init_early(struct xe_gt *gt); int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt); int xe_gt_init(struct xe_gt *gt);
int xe_gt_record_default_lrcs(struct xe_gt *gt); int xe_gt_record_default_lrcs(struct xe_gt *gt);
......
...@@ -229,7 +229,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -229,7 +229,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr); m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
if (xe->info.supports_usm) { if (xe->info.supports_usm) {
batch = tile->primary_gt.usm.bb_pool->bo; batch = tile->primary_gt->usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE, batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE,
&is_vram); &is_vram);
m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr); m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
...@@ -313,7 +313,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -313,7 +313,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
struct xe_migrate *xe_migrate_init(struct xe_tile *tile) struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
{ {
struct xe_device *xe = tile_to_xe(tile); struct xe_device *xe = tile_to_xe(tile);
struct xe_gt *primary_gt = &tile->primary_gt; struct xe_gt *primary_gt = tile->primary_gt;
struct xe_migrate *m; struct xe_migrate *m;
struct xe_vm *vm; struct xe_vm *vm;
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
...@@ -546,7 +546,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m, ...@@ -546,7 +546,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
u64 dst_ofs, bool dst_is_vram, u32 dst_size, u64 dst_ofs, bool dst_is_vram, u32 dst_size,
u64 ccs_ofs, bool copy_ccs) u64 ccs_ofs, bool copy_ccs)
{ {
struct xe_gt *gt = &m->tile->primary_gt; struct xe_gt *gt = m->tile->primary_gt;
u32 flush_flags = 0; u32 flush_flags = 0;
if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_vram) { if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_vram) {
...@@ -610,7 +610,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, ...@@ -610,7 +610,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *src, struct ttm_resource *src,
struct ttm_resource *dst) struct ttm_resource *dst)
{ {
struct xe_gt *gt = &m->tile->primary_gt; struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
u64 size = src_bo->size; u64 size = src_bo->size;
...@@ -873,7 +873,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, ...@@ -873,7 +873,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct ttm_resource *dst) struct ttm_resource *dst)
{ {
bool clear_vram = mem_type_is_vram(dst->mem_type); bool clear_vram = mem_type_is_vram(dst->mem_type);
struct xe_gt *gt = &m->tile->primary_gt; struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
u64 size = bo->size; u64 size = bo->size;
...@@ -1148,7 +1148,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -1148,7 +1148,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
{ {
const struct xe_migrate_pt_update_ops *ops = pt_update->ops; const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
struct xe_tile *tile = m->tile; struct xe_tile *tile = m->tile;
struct xe_gt *gt = &tile->primary_gt; struct xe_gt *gt = tile->primary_gt;
struct xe_device *xe = tile_to_xe(tile); struct xe_device *xe = tile_to_xe(tile);
struct xe_sched_job *job; struct xe_sched_job *job;
struct dma_fence *fence; struct dma_fence *fence;
......
...@@ -209,7 +209,7 @@ static int xe_determine_lmem_bar_size(struct xe_device *xe) ...@@ -209,7 +209,7 @@ static int xe_determine_lmem_bar_size(struct xe_device *xe)
int xe_mmio_tile_vram_size(struct xe_tile *tile, u64 *vram_size, u64 *tile_size, u64 *tile_offset) int xe_mmio_tile_vram_size(struct xe_tile *tile, u64 *vram_size, u64 *tile_size, u64 *tile_offset)
{ {
struct xe_device *xe = tile_to_xe(tile); struct xe_device *xe = tile_to_xe(tile);
struct xe_gt *gt = &tile->primary_gt; struct xe_gt *gt = tile->primary_gt;
u64 offset; u64 offset;
int err; int err;
u32 reg; u32 reg;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "regs/xe_gt_regs.h" #include "regs/xe_gt_regs.h"
#include "xe_device.h" #include "xe_device.h"
#include "xe_drv.h" #include "xe_drv.h"
#include "xe_gt.h"
#include "xe_macros.h" #include "xe_macros.h"
#include "xe_module.h" #include "xe_module.h"
#include "xe_pci_types.h" #include "xe_pci_types.h"
...@@ -529,9 +530,12 @@ static int xe_info_init(struct xe_device *xe, ...@@ -529,9 +530,12 @@ static int xe_info_init(struct xe_device *xe,
tile->xe = xe; tile->xe = xe;
tile->id = id; tile->id = id;
gt = &tile->primary_gt; tile->primary_gt = xe_gt_alloc(tile);
if (IS_ERR(tile->primary_gt))
return PTR_ERR(tile->primary_gt);
gt = tile->primary_gt;
gt->info.id = id; /* FIXME: Determine sensible numbering */ gt->info.id = id; /* FIXME: Determine sensible numbering */
gt->tile = tile;
gt->info.type = XE_GT_TYPE_MAIN; gt->info.type = XE_GT_TYPE_MAIN;
gt->info.__engine_mask = graphics_desc->hw_engine_mask; gt->info.__engine_mask = graphics_desc->hw_engine_mask;
if (MEDIA_VER(xe) < 13 && media_desc) if (MEDIA_VER(xe) < 13 && media_desc)
......
...@@ -1314,7 +1314,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e, ...@@ -1314,7 +1314,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
/* TLB invalidation must be done before signaling rebind */ /* TLB invalidation must be done before signaling rebind */
if (rebind && !xe_vm_no_dma_fences(vma->vm)) { if (rebind && !xe_vm_no_dma_fences(vma->vm)) {
int err = invalidation_fence_init(&tile->primary_gt, ifence, fence, int err = invalidation_fence_init(tile->primary_gt, ifence, fence,
vma); vma);
if (err) { if (err) {
dma_fence_put(fence); dma_fence_put(fence);
...@@ -1634,7 +1634,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e ...@@ -1634,7 +1634,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
int err; int err;
/* TLB invalidation must be done before signaling unbind */ /* TLB invalidation must be done before signaling unbind */
err = invalidation_fence_init(&tile->primary_gt, ifence, fence, vma); err = invalidation_fence_init(tile->primary_gt, ifence, fence, vma);
if (err) { if (err) {
dma_fence_put(fence); dma_fence_put(fence);
kfree(ifence); kfree(ifence);
......
...@@ -1260,7 +1260,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) ...@@ -1260,7 +1260,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
/* Kernel migration VM shouldn't have a circular loop.. */ /* Kernel migration VM shouldn't have a circular loop.. */
if (!(flags & XE_VM_FLAG_MIGRATION)) { if (!(flags & XE_VM_FLAG_MIGRATION)) {
for_each_tile(tile, xe, id) { for_each_tile(tile, xe, id) {
struct xe_gt *gt = &tile->primary_gt; struct xe_gt *gt = tile->primary_gt;
struct xe_vm *migrate_vm; struct xe_vm *migrate_vm;
struct xe_engine *eng; struct xe_engine *eng;
...@@ -3410,7 +3410,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) ...@@ -3410,7 +3410,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
* FIXME: We potentially need to invalidate multiple * FIXME: We potentially need to invalidate multiple
* GTs within the tile * GTs within the tile
*/ */
seqno[id] = xe_gt_tlb_invalidation_vma(&tile->primary_gt, NULL, vma); seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
if (seqno[id] < 0) if (seqno[id] < 0)
return seqno[id]; return seqno[id];
} }
...@@ -3418,7 +3418,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) ...@@ -3418,7 +3418,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
for_each_tile(tile, xe, id) { for_each_tile(tile, xe, id) {
if (tile_needs_invalidate & BIT(id)) { if (tile_needs_invalidate & BIT(id)) {
ret = xe_gt_tlb_invalidation_wait(&tile->primary_gt, seqno[id]); ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment