Commit ea196c54 authored by Akira Tsukamoto's avatar Akira Tsukamoto Committed by Palmer Dabbelt

riscv: __asm_copy_to-from_user: Fix: Typos in comments

Fixing typos and grammar mistakes and using more intuitive label
name.
Signed-off-by: default avatarAkira Tsukamoto <akira.tsukamoto@gmail.com>
Fixes: ca6eaaa2 ("riscv: __asm_copy_to-from_user: Optimize unaligned memory access and pipeline stall")
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent d4b3e010
...@@ -33,19 +33,20 @@ ENTRY(__asm_copy_from_user) ...@@ -33,19 +33,20 @@ ENTRY(__asm_copy_from_user)
/* /*
* Use byte copy only if too small. * Use byte copy only if too small.
* SZREG holds 4 for RV32 and 8 for RV64
*/ */
li a3, 9*SZREG /* size must be larger than size in word_copy */ li a3, 9*SZREG /* size must be larger than size in word_copy */
bltu a2, a3, .Lbyte_copy_tail bltu a2, a3, .Lbyte_copy_tail
/* /*
* Copy first bytes until dst is align to word boundary. * Copy first bytes until dst is aligned to word boundary.
* a0 - start of dst * a0 - start of dst
* t1 - start of aligned dst * t1 - start of aligned dst
*/ */
addi t1, a0, SZREG-1 addi t1, a0, SZREG-1
andi t1, t1, ~(SZREG-1) andi t1, t1, ~(SZREG-1)
/* dst is already aligned, skip */ /* dst is already aligned, skip */
beq a0, t1, .Lskip_first_bytes beq a0, t1, .Lskip_align_dst
1: 1:
/* a5 - one byte for copying data */ /* a5 - one byte for copying data */
fixup lb a5, 0(a1), 10f fixup lb a5, 0(a1), 10f
...@@ -54,7 +55,7 @@ ENTRY(__asm_copy_from_user) ...@@ -54,7 +55,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */ addi a0, a0, 1 /* dst */
bltu a0, t1, 1b /* t1 - start of aligned dst */ bltu a0, t1, 1b /* t1 - start of aligned dst */
.Lskip_first_bytes: .Lskip_align_dst:
/* /*
* Now dst is aligned. * Now dst is aligned.
* Use shift-copy if src is misaligned. * Use shift-copy if src is misaligned.
...@@ -71,7 +72,6 @@ ENTRY(__asm_copy_from_user) ...@@ -71,7 +72,6 @@ ENTRY(__asm_copy_from_user)
* *
* a0 - start of aligned dst * a0 - start of aligned dst
* a1 - start of aligned src * a1 - start of aligned src
* a3 - a1 & mask:(SZREG-1)
* t0 - end of aligned dst * t0 - end of aligned dst
*/ */
addi t0, t0, -(8*SZREG) /* not to over run */ addi t0, t0, -(8*SZREG) /* not to over run */
...@@ -106,7 +106,7 @@ ENTRY(__asm_copy_from_user) ...@@ -106,7 +106,7 @@ ENTRY(__asm_copy_from_user)
* For misaligned copy we still perform aligned word copy, but * For misaligned copy we still perform aligned word copy, but
* we need to use the value fetched from the previous iteration and * we need to use the value fetched from the previous iteration and
* do some shifts. * do some shifts.
* This is safe because reading less than a word size. * This is safe because reading is less than a word size.
* *
* a0 - start of aligned dst * a0 - start of aligned dst
* a1 - start of src * a1 - start of src
...@@ -116,7 +116,7 @@ ENTRY(__asm_copy_from_user) ...@@ -116,7 +116,7 @@ ENTRY(__asm_copy_from_user)
*/ */
/* calculating aligned word boundary for dst */ /* calculating aligned word boundary for dst */
andi t1, t0, ~(SZREG-1) andi t1, t0, ~(SZREG-1)
/* Converting unaligned src to aligned arc */ /* Converting unaligned src to aligned src */
andi a1, a1, ~(SZREG-1) andi a1, a1, ~(SZREG-1)
/* /*
...@@ -128,7 +128,7 @@ ENTRY(__asm_copy_from_user) ...@@ -128,7 +128,7 @@ ENTRY(__asm_copy_from_user)
li a5, SZREG*8 li a5, SZREG*8
sub t4, a5, t3 sub t4, a5, t3
/* Load the first word to combine with seceond word */ /* Load the first word to combine with second word */
fixup REG_L a5, 0(a1), 10f fixup REG_L a5, 0(a1), 10f
3: 3:
...@@ -160,7 +160,7 @@ ENTRY(__asm_copy_from_user) ...@@ -160,7 +160,7 @@ ENTRY(__asm_copy_from_user)
* a1 - start of remaining src * a1 - start of remaining src
* t0 - end of remaining dst * t0 - end of remaining dst
*/ */
bgeu a0, t0, 5f bgeu a0, t0, .Lout_copy_user /* check if end of copy */
4: 4:
fixup lb a5, 0(a1), 10f fixup lb a5, 0(a1), 10f
addi a1, a1, 1 /* src */ addi a1, a1, 1 /* src */
...@@ -168,7 +168,7 @@ ENTRY(__asm_copy_from_user) ...@@ -168,7 +168,7 @@ ENTRY(__asm_copy_from_user)
addi a0, a0, 1 /* dst */ addi a0, a0, 1 /* dst */
bltu a0, t0, 4b /* t0 - end of dst */ bltu a0, t0, 4b /* t0 - end of dst */
5: .Lout_copy_user:
/* Disable access to user memory */ /* Disable access to user memory */
csrc CSR_STATUS, t6 csrc CSR_STATUS, t6
li a0, 0 li a0, 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment