Commit 0bb5d5b0 authored by Luben Tuikov's avatar Luben Tuikov Committed by Alex Deucher

drm/amdgpu: Move to a per-IB secure flag (TMZ)

Move from a per-CS secure flag (TMZ) to a per-IB
secure flag.
Signed-off-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Reviewed-by: default avatarHuang Rui <ray.huang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5888f07a
...@@ -232,8 +232,6 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs ...@@ -232,8 +232,6 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
if (ret) if (ret)
goto free_all_kdata; goto free_all_kdata;
p->job->secure = cs->in.flags & AMDGPU_CS_FLAGS_SECURE;
if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) { if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
ret = -ECANCELED; ret = -ECANCELED;
goto free_all_kdata; goto free_all_kdata;
......
...@@ -133,6 +133,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -133,6 +133,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
uint64_t fence_ctx; uint64_t fence_ctx;
uint32_t status = 0, alloc_size; uint32_t status = 0, alloc_size;
unsigned fence_flags = 0; unsigned fence_flags = 0;
bool secure;
unsigned i; unsigned i;
int r = 0; int r = 0;
...@@ -214,9 +215,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -214,9 +215,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (job && ring->funcs->emit_cntxcntl) { if (job && ring->funcs->emit_cntxcntl) {
status |= job->preamble_status; status |= job->preamble_status;
status |= job->preemption_status; status |= job->preemption_status;
amdgpu_ring_emit_cntxcntl(ring, status, job->secure); amdgpu_ring_emit_cntxcntl(ring, status);
} }
secure = false;
for (i = 0; i < num_ibs; ++i) { for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i]; ib = &ibs[i];
...@@ -228,12 +230,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -228,12 +230,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue; continue;
/* If this IB is TMZ, add frame TMZ start packet,
* else, turn off TMZ.
*/
if (ib->flags & AMDGPU_IB_FLAGS_SECURE && ring->funcs->emit_tmz) {
if (!secure) {
secure = true;
amdgpu_ring_emit_tmz(ring, true);
}
} else if (secure) {
secure = false;
amdgpu_ring_emit_tmz(ring, false);
}
amdgpu_ring_emit_ib(ring, job, ib, status); amdgpu_ring_emit_ib(ring, job, ib, status);
status &= ~AMDGPU_HAVE_CTX_SWITCH; status &= ~AMDGPU_HAVE_CTX_SWITCH;
} }
if (ring->funcs->emit_tmz) if (secure) {
amdgpu_ring_emit_tmz(ring, false, job ? job->secure : false); secure = false;
amdgpu_ring_emit_tmz(ring, false);
}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (!(adev->flags & AMD_IS_APU)) if (!(adev->flags & AMD_IS_APU))
......
...@@ -62,9 +62,6 @@ struct amdgpu_job { ...@@ -62,9 +62,6 @@ struct amdgpu_job {
/* user fence handling */ /* user fence handling */
uint64_t uf_addr; uint64_t uf_addr;
uint64_t uf_sequence; uint64_t uf_sequence;
/* the job is due to a secure command submission */
bool secure;
}; };
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
......
...@@ -168,8 +168,7 @@ struct amdgpu_ring_funcs { ...@@ -168,8 +168,7 @@ struct amdgpu_ring_funcs {
void (*begin_use)(struct amdgpu_ring *ring); void (*begin_use)(struct amdgpu_ring *ring);
void (*end_use)(struct amdgpu_ring *ring); void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring); void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags, void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
bool trusted);
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg, void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs); uint32_t reg_val_offs);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
...@@ -178,7 +177,7 @@ struct amdgpu_ring_funcs { ...@@ -178,7 +177,7 @@ struct amdgpu_ring_funcs {
void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring, void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1, uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask); uint32_t ref, uint32_t mask);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start, bool trusted); void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* Try to soft recover the ring to make the fence signal */ /* Try to soft recover the ring to make the fence signal */
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring); int (*preempt_ib)(struct amdgpu_ring *ring);
...@@ -252,12 +251,12 @@ struct amdgpu_ring { ...@@ -252,12 +251,12 @@ struct amdgpu_ring {
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d, s) (r)->funcs->emit_cntxcntl((r), (d), (s)) #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o)) #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
#define amdgpu_ring_emit_tmz(r, b, s) (r)->funcs->emit_tmz((r), (b), (s)) #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
......
...@@ -3037,8 +3037,7 @@ static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev); ...@@ -3037,8 +3037,7 @@ static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume); static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start, static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
bool trusted);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{ {
...@@ -7436,8 +7435,7 @@ static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring) ...@@ -7436,8 +7435,7 @@ static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
} }
static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
uint32_t flags, uint32_t flags)
bool trusted)
{ {
uint32_t dw2 = 0; uint32_t dw2 = 0;
...@@ -7445,8 +7443,6 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, ...@@ -7445,8 +7443,6 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
gfx_v10_0_ring_emit_ce_meta(ring, gfx_v10_0_ring_emit_ce_meta(ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
gfx_v10_0_ring_emit_tmz(ring, true, trusted);
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) { if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */ /* set load_global_config & load_global_uconfig */
...@@ -7603,17 +7599,12 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) ...@@ -7603,17 +7599,12 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
sizeof(de_payload) >> 2); sizeof(de_payload) >> 2);
} }
static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start, static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
bool trusted)
{ {
if (amdgpu_is_tmz(ring->adev)) {
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
/* amdgpu_ring_write(ring, FRAME_TMZ | FRAME_CMD(start ? 0 : 1));
* cmd = 0: frame begin }
* cmd = 1: frame end
*/
amdgpu_ring_write(ring,
((amdgpu_is_tmz(ring->adev) && trusted) ? FRAME_TMZ : 0)
| FRAME_CMD(start ? 0 : 1));
} }
static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
......
...@@ -2969,8 +2969,7 @@ static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev) ...@@ -2969,8 +2969,7 @@ static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
return clock; return clock;
} }
static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags, static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
bool trusted)
{ {
if (flags & AMDGPU_HAVE_CTX_SWITCH) if (flags & AMDGPU_HAVE_CTX_SWITCH)
gfx_v6_0_ring_emit_vgt_flush(ring); gfx_v6_0_ring_emit_vgt_flush(ring);
......
...@@ -2320,8 +2320,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, ...@@ -2320,8 +2320,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control); amdgpu_ring_write(ring, control);
} }
static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags, static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
bool trusted)
{ {
uint32_t dw2 = 0; uint32_t dw2 = 0;
......
...@@ -6329,8 +6329,7 @@ static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring) ...@@ -6329,8 +6329,7 @@ static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
} }
static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags, static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
bool trusted)
{ {
uint32_t dw2 = 0; uint32_t dw2 = 0;
......
...@@ -5442,29 +5442,21 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) ...@@ -5442,29 +5442,21 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
} }
static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start, static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
bool trusted)
{ {
if (amdgpu_is_tmz(ring->adev)) {
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
/* amdgpu_ring_write(ring, FRAME_TMZ | FRAME_CMD(start ? 0 : 1));
* cmd = 0: frame begin }
* cmd = 1: frame end
*/
amdgpu_ring_write(ring,
((amdgpu_is_tmz(ring->adev) && trusted) ? FRAME_TMZ : 0)
| FRAME_CMD(start ? 0 : 1));
} }
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags, static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
bool trusted)
{ {
uint32_t dw2 = 0; uint32_t dw2 = 0;
if (amdgpu_sriov_vf(ring->adev)) if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring); gfx_v9_0_ring_emit_ce_meta(ring);
gfx_v9_0_ring_emit_tmz(ring, true, trusted);
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) { if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */ /* set load_global_config & load_global_uconfig */
......
...@@ -558,9 +558,6 @@ struct drm_amdgpu_cs_chunk { ...@@ -558,9 +558,6 @@ struct drm_amdgpu_cs_chunk {
__u64 chunk_data; __u64 chunk_data;
}; };
/* Flag the command submission as secure */
#define AMDGPU_CS_FLAGS_SECURE (1 << 0)
struct drm_amdgpu_cs_in { struct drm_amdgpu_cs_in {
/** Rendering context id */ /** Rendering context id */
__u32 ctx_id; __u32 ctx_id;
...@@ -601,6 +598,10 @@ union drm_amdgpu_cs { ...@@ -601,6 +598,10 @@ union drm_amdgpu_cs {
*/ */
#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4) #define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
/* Flag the IB as secure (TMZ)
*/
#define AMDGPU_IB_FLAGS_SECURE (1 << 5)
struct drm_amdgpu_cs_chunk_ib { struct drm_amdgpu_cs_chunk_ib {
__u32 _pad; __u32 _pad;
/** AMDGPU_IB_FLAG_* */ /** AMDGPU_IB_FLAG_* */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment