Commit d0f1a853 authored by Jay Cornwall's avatar Jay Cornwall Committed by Alex Deucher

drm/amdkfd: Support newer assemblers in gfx10 trap handler

The contents of macros are parsed by the assembler before conditions
have been tested. This causes assembly errors when using IP-specific
instructions in the IP-unified trap handler.

Add a preprocessing step to filter IP-specific code.

Also guard a Navi1x-specific instruction (no effect on Sienna_Cichlid).
Signed-off-by: default avatarJay Cornwall <jay.cornwall@amd.com>
Reviewed-by: default avatarYong Zhao <Yong.Zhao@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 80b6cfed
...@@ -23,13 +23,15 @@ ...@@ -23,13 +23,15 @@
/* To compile this assembly code: /* To compile this assembly code:
* *
* Navi1x: * Navi1x:
* PROJECT=gfx10 ./sp3 ASIC_TARGET_NAVI1X=1 cwsr_trap_handler_gfx10.asm -hex tmp.hex * cpp -DASIC_TARGET_NAVI1X=1 cwsr_trap_handler_gfx10.asm -P -o nv1x.sp3
* sp3-nv1x nv1x.sp3 -hex nv1x.hex
* *
* Others: * Others:
* PROJECT=gfx10 ./sp3 ASIC_TARGET_NAVI1X=0 cwsr_trap_handler_gfx10.asm -hex tmp.hex * cpp -DASIC_TARGET_NAVI1X=0 cwsr_trap_handler_gfx10.asm -P -o gfx10.sp3
* sp3-gfx10 gfx10.sp3 -hex gfx10.hex
*/ */
var NO_SQC_STORE = !ASIC_TARGET_NAVI1X #define NO_SQC_STORE !ASIC_TARGET_NAVI1X
var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23 var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000 var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
...@@ -182,7 +184,7 @@ L_SKIP_RESTORE: ...@@ -182,7 +184,7 @@ L_SKIP_RESTORE:
L_FETCH_2ND_TRAP: L_FETCH_2ND_TRAP:
if ASIC_TARGET_NAVI1X #if ASIC_TARGET_NAVI1X
// Preserve and clear scalar XNACK state before issuing scalar loads. // Preserve and clear scalar XNACK state before issuing scalar loads.
// Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
// unused space ttmp11[31:24]. // unused space ttmp11[31:24].
...@@ -196,7 +198,7 @@ if ASIC_TARGET_NAVI1X ...@@ -196,7 +198,7 @@ if ASIC_TARGET_NAVI1X
s_or_b32 ttmp11, ttmp11, ttmp3 s_or_b32 ttmp11, ttmp11, ttmp3
s_andn2_b32 ttmp2, ttmp2, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK) s_andn2_b32 ttmp2, ttmp2, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2 s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
end #endif
// Read second-level TBA/TMA from first-level TMA and jump if available. // Read second-level TBA/TMA from first-level TMA and jump if available.
// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data) // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
...@@ -221,7 +223,7 @@ L_NO_NEXT_TRAP: ...@@ -221,7 +223,7 @@ L_NO_NEXT_TRAP:
L_EXCP_CASE: L_EXCP_CASE:
s_and_b32 ttmp1, ttmp1, 0xFFFF s_and_b32 ttmp1, ttmp1, 0xFFFF
if ASIC_TARGET_NAVI1X #if ASIC_TARGET_NAVI1X
// Restore SQ_WAVE_IB_STS. // Restore SQ_WAVE_IB_STS.
s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT) s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
...@@ -229,7 +231,7 @@ if ASIC_TARGET_NAVI1X ...@@ -229,7 +231,7 @@ if ASIC_TARGET_NAVI1X
s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
s_or_b32 ttmp2, ttmp2, ttmp3 s_or_b32 ttmp2, ttmp2, ttmp3
s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2 s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
end #endif
// Restore SQ_WAVE_STATUS. // Restore SQ_WAVE_STATUS.
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
...@@ -255,9 +257,9 @@ L_NO_PC_REWIND: ...@@ -255,9 +257,9 @@ L_NO_PC_REWIND:
s_mov_b32 s_save_tmp, 0 s_mov_b32 s_save_tmp, 0
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
#if ASIC_TARGET_NAVI1X
s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK) s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK)
if ASIC_TARGET_NAVI1X
s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE)
s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
...@@ -271,7 +273,7 @@ if ASIC_TARGET_NAVI1X ...@@ -271,7 +273,7 @@ if ASIC_TARGET_NAVI1X
s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp
end #endif
/* inform SPI the readiness and wait for SPI's go signal */ /* inform SPI the readiness and wait for SPI's go signal */
s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
...@@ -280,16 +282,16 @@ end ...@@ -280,16 +282,16 @@ end
s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
if ASIC_TARGET_NAVI1X #if ASIC_TARGET_NAVI1X
L_SLEEP: L_SLEEP:
// sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause
// SQ hang, since the 7,8th wave could not get arbit to exec inst, while // SQ hang, since the 7,8th wave could not get arbit to exec inst, while
// other waves are stuck into the sleep-loop and waiting for wrexec!=0 // other waves are stuck into the sleep-loop and waiting for wrexec!=0
s_sleep 0x2 s_sleep 0x2
s_cbranch_execz L_SLEEP s_cbranch_execz L_SLEEP
else #else
s_waitcnt lgkmcnt(0) s_waitcnt lgkmcnt(0)
end #endif
/* setup Resource Contants */ /* setup Resource Contants */
s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
...@@ -355,12 +357,12 @@ L_SAVE_HWREG: ...@@ -355,12 +357,12 @@ L_SAVE_HWREG:
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
if NO_SQC_STORE #if NO_SQC_STORE
v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
s_mov_b32 m0, 0x0 //Next lane of v2 to write to s_mov_b32 m0, 0x0 //Next lane of v2 to write to
end #endif
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
...@@ -371,6 +373,8 @@ end ...@@ -371,6 +373,8 @@ end
s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset) write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset)
// Not used on Sienna_Cichlid but keep layout same for debugger.
write_hwreg_to_mem(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset) write_hwreg_to_mem(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset)
s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE)
...@@ -382,11 +386,11 @@ end ...@@ -382,11 +386,11 @@ end
s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI) s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
if NO_SQC_STORE #if NO_SQC_STORE
// Write HWREG/SGPRs with 32 VGPR lanes, wave32 is common case. // Write HWREG/SGPRs with 32 VGPR lanes, wave32 is common case.
s_mov_b32 exec_hi, 0x0 s_mov_b32 exec_hi, 0x0
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
end #endif
/* save SGPRs */ /* save SGPRs */
// Save SGPR before LDS save, then the s0 to s4 can be used during LDS save... // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
...@@ -397,14 +401,14 @@ end ...@@ -397,14 +401,14 @@ end
s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
if NO_SQC_STORE #if NO_SQC_STORE
s_mov_b32 ttmp13, 0x0 //next VGPR lane to copy SGPR into s_mov_b32 ttmp13, 0x0 //next VGPR lane to copy SGPR into
else #else
// backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0 // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
s_mov_b32 s_save_xnack_mask, s_save_buf_rsrc0 s_mov_b32 s_save_xnack_mask, s_save_buf_rsrc0
s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset
s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0 s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0
end #endif
s_mov_b32 m0, 0x0 //SGPR initial index value =0 s_mov_b32 m0, 0x0 //SGPR initial index value =0
s_nop 0x0 //Manually inserted wait states s_nop 0x0 //Manually inserted wait states
...@@ -421,7 +425,7 @@ L_SAVE_SGPR_LOOP: ...@@ -421,7 +425,7 @@ L_SAVE_SGPR_LOOP:
write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset)
if NO_SQC_STORE #if NO_SQC_STORE
s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled? s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled?
s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE
...@@ -430,7 +434,7 @@ if NO_SQC_STORE ...@@ -430,7 +434,7 @@ if NO_SQC_STORE
s_mov_b32 ttmp13, 0x0 s_mov_b32 ttmp13, 0x0
v_mov_b32 v2, 0x0 v_mov_b32 v2, 0x0
L_SAVE_SGPR_SKIP_TCP_STORE: L_SAVE_SGPR_SKIP_TCP_STORE:
end #endif
s_add_u32 m0, m0, 16 //next sgpr index s_add_u32 m0, m0, 16 //next sgpr index
s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0 s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0
...@@ -445,12 +449,12 @@ end ...@@ -445,12 +449,12 @@ end
s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0] s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
write_12sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) write_12sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset)
if NO_SQC_STORE #if NO_SQC_STORE
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
else #else
// restore s_save_buf_rsrc0,1 // restore s_save_buf_rsrc0,1
s_mov_b32 s_save_buf_rsrc0, s_save_xnack_mask s_mov_b32 s_save_buf_rsrc0, s_save_xnack_mask
end #endif
/* save LDS */ /* save LDS */
...@@ -899,13 +903,17 @@ L_RESTORE_HWREG: ...@@ -899,13 +903,17 @@ L_RESTORE_HWREG:
s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0 s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
#if ASIC_TARGET_NAVI1X
s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask
#endif
s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0 s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
if ASIC_TARGET_NAVI1X #if ASIC_TARGET_NAVI1X
s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_RCNT_MASK s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_RCNT_MASK
s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
...@@ -923,7 +931,7 @@ if ASIC_TARGET_NAVI1X ...@@ -923,7 +931,7 @@ if ASIC_TARGET_NAVI1X
s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_mode s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_mode
end #endif
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
...@@ -938,51 +946,51 @@ L_END_PGM: ...@@ -938,51 +946,51 @@ L_END_PGM:
end end
function write_hwreg_to_mem(s, s_rsrc, s_mem_offset) function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
if NO_SQC_STORE #if NO_SQC_STORE
// Copy into VGPR for later TCP store. // Copy into VGPR for later TCP store.
v_writelane_b32 v2, s, m0 v_writelane_b32 v2, s, m0
s_add_u32 m0, m0, 0x1 s_add_u32 m0, m0, 0x1
else #else
s_mov_b32 exec_lo, m0 s_mov_b32 exec_lo, m0
s_mov_b32 m0, s_mem_offset s_mov_b32 m0, s_mem_offset
s_buffer_store_dword s, s_rsrc, m0 glc:1 s_buffer_store_dword s, s_rsrc, m0 glc:1
s_add_u32 s_mem_offset, s_mem_offset, 4 s_add_u32 s_mem_offset, s_mem_offset, 4
s_mov_b32 m0, exec_lo s_mov_b32 m0, exec_lo
end #endif
end end
function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset) function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
if NO_SQC_STORE #if NO_SQC_STORE
// Copy into VGPR for later TCP store. // Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++ for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
v_writelane_b32 v2, s[sgpr_idx], ttmp13 v_writelane_b32 v2, s[sgpr_idx], ttmp13
s_add_u32 ttmp13, ttmp13, 0x1 s_add_u32 ttmp13, ttmp13, 0x1
end end
else #else
s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1 s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1 s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1 s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1 s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
s_add_u32 s_rsrc[0], s_rsrc[0], 4*16 s_add_u32 s_rsrc[0], s_rsrc[0], 4*16
s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0
end #endif
end end
function write_12sgpr_to_mem(s, s_rsrc, s_mem_offset) function write_12sgpr_to_mem(s, s_rsrc, s_mem_offset)
if NO_SQC_STORE #if NO_SQC_STORE
// Copy into VGPR for later TCP store. // Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++ for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
v_writelane_b32 v2, s[sgpr_idx], ttmp13 v_writelane_b32 v2, s[sgpr_idx], ttmp13
s_add_u32 ttmp13, ttmp13, 0x1 s_add_u32 ttmp13, ttmp13, 0x1
end end
else #else
s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1 s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1 s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1 s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
s_add_u32 s_rsrc[0], s_rsrc[0], 4*12 s_add_u32 s_rsrc[0], s_rsrc[0], 4*12
s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0
end #endif
end end
function read_hwreg_from_mem(s, s_rsrc, s_mem_offset) function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment