Commit e183910a authored by Thomas Hellström's avatar Thomas Hellström

drm/xe: Split lrc seqno fence creation up

Since sometimes a lock is required to initialize a seqno fence,
and it might be desirable not to hold that lock while performing
memory allocations, split the lrc seqno fence creation up into an
allocation phase and an initialization phase.

Since lrc seqno fences under the hood are hw_fences, do the same
for these and remove the xe_hw_fence_create() function since it
is not used anymore.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240527135912.152156-3-thomas.hellstrom@linux.intel.com
parent 08f72008
...@@ -208,23 +208,58 @@ static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence) ...@@ -208,23 +208,58 @@ static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence)
return container_of(fence, struct xe_hw_fence, dma); return container_of(fence, struct xe_hw_fence, dma);
} }
struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx, /**
struct iosys_map seqno_map) * xe_hw_fence_alloc() - Allocate an hw fence.
*
* Allocate but don't initialize an hw fence.
*
* Return: Pointer to the allocated fence or
* negative error pointer on error.
*/
struct dma_fence *xe_hw_fence_alloc(void)
{ {
struct xe_hw_fence *fence; struct xe_hw_fence *hw_fence = fence_alloc();
fence = fence_alloc(); if (!hw_fence)
if (!fence)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
fence->ctx = ctx; return &hw_fence->dma;
fence->seqno_map = seqno_map; }
INIT_LIST_HEAD(&fence->irq_link);
dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock, /**
ctx->dma_fence_ctx, ctx->next_seqno++); * xe_hw_fence_free() - Free an hw fence.
* @fence: Pointer to the fence to free.
*
* Frees an hw fence that hasn't yet been
* initialized.
*/
void xe_hw_fence_free(struct dma_fence *fence)
{
fence_free(&fence->rcu);
}
trace_xe_hw_fence_create(fence); /**
* xe_hw_fence_init() - Initialize an hw fence.
* @fence: Pointer to the fence to initialize.
* @ctx: Pointer to the struct xe_hw_fence_ctx fence context.
* @seqno_map: Pointer to the map into where the seqno is blitted.
*
* Initializes a pre-allocated hw fence.
* After initialization, the fence is subject to normal
* dma-fence refcounting.
*/
void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
struct iosys_map seqno_map)
{
struct xe_hw_fence *hw_fence =
container_of(fence, typeof(*hw_fence), dma);
hw_fence->ctx = ctx;
hw_fence->seqno_map = seqno_map;
INIT_LIST_HEAD(&hw_fence->irq_link);
dma_fence_init(fence, &xe_hw_fence_ops, &ctx->irq->lock,
ctx->dma_fence_ctx, ctx->next_seqno++);
return fence; trace_xe_hw_fence_create(hw_fence);
} }
...@@ -24,7 +24,10 @@ void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt, ...@@ -24,7 +24,10 @@ void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt,
struct xe_hw_fence_irq *irq, const char *name); struct xe_hw_fence_irq *irq, const char *name);
void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx); void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx);
struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx, struct dma_fence *xe_hw_fence_alloc(void);
struct iosys_map seqno_map);
void xe_hw_fence_free(struct dma_fence *fence);
void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
struct iosys_map seqno_map);
#endif #endif
...@@ -1033,10 +1033,54 @@ u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc) ...@@ -1033,10 +1033,54 @@ u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc)
return __xe_lrc_seqno_ggtt_addr(lrc); return __xe_lrc_seqno_ggtt_addr(lrc);
} }
/**
* xe_lrc_alloc_seqno_fence() - Allocate an lrc seqno fence.
*
* Allocate but don't initialize an lrc seqno fence.
*
* Return: Pointer to the allocated fence or
* negative error pointer on error.
*/
struct dma_fence *xe_lrc_alloc_seqno_fence(void)
{
return xe_hw_fence_alloc();
}
/**
* xe_lrc_free_seqno_fence() - Free an lrc seqno fence.
* @fence: Pointer to the fence to free.
*
* Frees an lrc seqno fence that hasn't yet been
* initialized.
*/
void xe_lrc_free_seqno_fence(struct dma_fence *fence)
{
xe_hw_fence_free(fence);
}
/**
* xe_lrc_init_seqno_fence() - Initialize an lrc seqno fence.
* @lrc: Pointer to the lrc.
* @fence: Pointer to the fence to initialize.
*
* Initializes a pre-allocated lrc seqno fence.
* After initialization, the fence is subject to normal
* dma-fence refcounting.
*/
void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct dma_fence *fence)
{
xe_hw_fence_init(fence, &lrc->fence_ctx, __xe_lrc_seqno_map(lrc));
}
struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc) struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc)
{ {
return &xe_hw_fence_create(&lrc->fence_ctx, struct dma_fence *fence = xe_lrc_alloc_seqno_fence();
__xe_lrc_seqno_map(lrc))->dma;
if (IS_ERR(fence))
return fence;
xe_lrc_init_seqno_fence(lrc, fence);
return fence;
} }
s32 xe_lrc_seqno(struct xe_lrc *lrc) s32 xe_lrc_seqno(struct xe_lrc *lrc)
......
...@@ -44,6 +44,9 @@ void xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val); ...@@ -44,6 +44,9 @@ void xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val);
u64 xe_lrc_descriptor(struct xe_lrc *lrc); u64 xe_lrc_descriptor(struct xe_lrc *lrc);
u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc); u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc);
struct dma_fence *xe_lrc_alloc_seqno_fence(void);
void xe_lrc_free_seqno_fence(struct dma_fence *fence);
void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct dma_fence *fence);
struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc); struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc);
s32 xe_lrc_seqno(struct xe_lrc *lrc); s32 xe_lrc_seqno(struct xe_lrc *lrc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment