Commit bf67014d authored by Huang Rui's avatar Huang Rui Committed by Alex Deucher

drm/amdgpu: introduce new amdgpu_fence object to indicate the job embedded fence

The job embedded fence donesn't initialize the flags at
dma_fence_init(). Then we will go a wrong way in
amdgpu_fence_get_timeline_name callback and trigger a null pointer panic
once we enabled the trace event here. So introduce new amdgpu_fence
object to indicate the job embedded fence.

[  156.131790] BUG: kernel NULL pointer dereference, address: 00000000000002a0
[  156.131804] #PF: supervisor read access in kernel mode
[  156.131811] #PF: error_code(0x0000) - not-present page
[  156.131817] PGD 0 P4D 0
[  156.131824] Oops: 0000 [#1] PREEMPT SMP PTI
[  156.131832] CPU: 6 PID: 1404 Comm: sdma0 Tainted: G           OE     5.16.0-rc1-custom #1
[  156.131842] Hardware name: Gigabyte Technology Co., Ltd. Z170XP-SLI/Z170XP-SLI-CF, BIOS F20 11/04/2016
[  156.131848] RIP: 0010:strlen+0x0/0x20
[  156.131859] Code: 89 c0 c3 0f 1f 80 00 00 00 00 48 01 fe eb 0f 0f b6 07 38 d0 74 10 48 83 c7 01 84 c0 74 05 48 39 f7 75 ec 31 c0 c3 48 89 f8 c3 <80> 3f 00 74 10 48 89 f8 48 83 c0 01 80 38 00 75 f7 48 29 f8 c3 31
[  156.131872] RSP: 0018:ffff9bd0018dbcf8 EFLAGS: 00010206
[  156.131880] RAX: 00000000000002a0 RBX: ffff8d0305ef01b0 RCX: 000000000000000b
[  156.131888] RDX: ffff8d03772ab924 RSI: ffff8d0305ef01b0 RDI: 00000000000002a0
[  156.131895] RBP: ffff9bd0018dbd60 R08: ffff8d03002094d0 R09: 0000000000000000
[  156.131901] R10: 000000000000005e R11: 0000000000000065 R12: ffff8d03002094d0
[  156.131907] R13: 000000000000001f R14: 0000000000070018 R15: 0000000000000007
[  156.131914] FS:  0000000000000000(0000) GS:ffff8d062ed80000(0000) knlGS:0000000000000000
[  156.131923] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  156.131929] CR2: 00000000000002a0 CR3: 000000001120a005 CR4: 00000000003706e0
[  156.131937] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[  156.131942] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[  156.131949] Call Trace:
[  156.131953]  <TASK>
[  156.131957]  ? trace_event_raw_event_dma_fence+0xcc/0x200
[  156.131973]  ? ring_buffer_unlock_commit+0x23/0x130
[  156.131982]  dma_fence_init+0x92/0xb0
[  156.131993]  amdgpu_fence_emit+0x10d/0x2b0 [amdgpu]
[  156.132302]  amdgpu_ib_schedule+0x2f9/0x580 [amdgpu]
[  156.132586]  amdgpu_job_run+0xed/0x220 [amdgpu]

v2: fix mismatch warning between the prototype and function name (Ray, kernel test robot)
Signed-off-by: default avatarHuang Rui <ray.huang@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a2fbfd51
......@@ -4461,7 +4461,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
int i, j, r = 0;
int i, r = 0;
struct amdgpu_job *job = NULL;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
......@@ -4483,15 +4483,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
/*clear job fence from fence drv to avoid force_completion
*leave NULL and vm flush fence in fence drv */
for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
struct dma_fence *old, **ptr;
amdgpu_fence_driver_clear_job_fences(ring);
ptr = &ring->fence_drv.fences[j];
old = rcu_dereference_protected(*ptr, 1);
if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
RCU_INIT_POINTER(*ptr, NULL);
}
}
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
......
......@@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
if (__f->base.ops == &amdgpu_fence_ops)
if (__f->base.ops == &amdgpu_fence_ops ||
__f->base.ops == &amdgpu_job_fence_ops)
return __f;
return NULL;
......@@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
}
seq = ++ring->fence_drv.sync_seq;
if (job != NULL && job->job_run_counter) {
if (job && job->job_run_counter) {
/* reinit seq for resubmitted jobs */
fence->seqno = seq;
} else {
if (job)
dma_fence_init(fence, &amdgpu_job_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
else
dma_fence_init(fence, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx,
seq);
}
if (job != NULL) {
/* mark this fence has a parent job */
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
adev->fence_context + ring->idx, seq);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
......@@ -620,6 +621,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
}
/**
* amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
*
* @ring: fence of the ring to be cleared
*
*/
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
{
int i;
struct dma_fence *old, **ptr;
for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
ptr = &ring->fence_drv.fences[i];
old = rcu_dereference_protected(*ptr, 1);
if (old && old->ops == &amdgpu_job_fence_ops)
RCU_INIT_POINTER(*ptr, NULL);
}
}
/**
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
......@@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_ring *ring;
return (const char *)to_amdgpu_fence(f)->ring->name;
}
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
ring = to_amdgpu_ring(job->base.sched);
} else {
ring = to_amdgpu_fence(f)->ring;
}
return (const char *)ring->name;
return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
/**
......@@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
*/
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_ring *ring;
if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
return true;
}
ring = to_amdgpu_ring(job->base.sched);
} else {
ring = to_amdgpu_fence(f)->ring;
}
/**
* amdgpu_job_fence_enable_signaling - enable signalling on job fence
* @f: fence
*
* This is the simliar function with amdgpu_fence_enable_signaling above, it
* only handles the job embedded fence.
*/
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
return true;
}
......@@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
/* free job if fence has a parent job */
struct amdgpu_job *job;
job = container_of(f, struct amdgpu_job, hw_fence);
kfree(job);
} else {
/* free fence_slab if it's separated fence*/
struct amdgpu_fence *fence;
kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
}
fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
/**
* amdgpu_job_fence_free - free up the job with embedded fence
*
* @rcu: RCU callback head
*
* Free up the job with embedded fence after the RCU grace period.
*/
static void amdgpu_job_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free job if fence has a parent job */
kfree(container_of(f, struct amdgpu_job, hw_fence));
}
/**
......@@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
/**
* amdgpu_job_fence_release - callback that job embedded fence can be freed
*
* @f: fence
*
* This is the simliar function with amdgpu_fence_release above, it
* only handles the job embedded fence.
*/
static void amdgpu_job_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_job_fence_free);
}
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
......@@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
static const struct dma_fence_ops amdgpu_job_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_job_fence_get_timeline_name,
.enable_signaling = amdgpu_job_fence_enable_signaling,
.release = amdgpu_job_fence_release,
};
/*
* Fence debugfs
......
......@@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
/* fence flag bit to indicate the face is embedded in job*/
#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
......@@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment