Commit fe6510b5 authored by Jussi Kivilinna's avatar Jussi Kivilinna Committed by Herbert Xu

crypto: aesni_intel - fix accessing of unaligned memory

The new XTS code for aesni_intel uses input buffers directly as memory operands
for pxor instructions, which causes crash if those buffers are not aligned to
16 bytes.

Patch changes XTS code to handle unaligned memory correctly, by loading memory
with movdqu instead.
Reported-by: default avatarDave Jones <davej@redhat.com>
Tested-by: default avatarDave Jones <davej@redhat.com>
Signed-off-by: default avatarJussi Kivilinna <jussi.kivilinna@iki.fi>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 68be0b1a
...@@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8) ...@@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8)
addq %rcx, KEYP addq %rcx, KEYP
movdqa IV, STATE1 movdqa IV, STATE1
pxor 0x00(INP), STATE1 movdqu 0x00(INP), INC
pxor INC, STATE1
movdqu IV, 0x00(OUTP) movdqu IV, 0x00(OUTP)
_aesni_gf128mul_x_ble() _aesni_gf128mul_x_ble()
movdqa IV, STATE2 movdqa IV, STATE2
pxor 0x10(INP), STATE2 movdqu 0x10(INP), INC
pxor INC, STATE2
movdqu IV, 0x10(OUTP) movdqu IV, 0x10(OUTP)
_aesni_gf128mul_x_ble() _aesni_gf128mul_x_ble()
movdqa IV, STATE3 movdqa IV, STATE3
pxor 0x20(INP), STATE3 movdqu 0x20(INP), INC
pxor INC, STATE3
movdqu IV, 0x20(OUTP) movdqu IV, 0x20(OUTP)
_aesni_gf128mul_x_ble() _aesni_gf128mul_x_ble()
movdqa IV, STATE4 movdqa IV, STATE4
pxor 0x30(INP), STATE4 movdqu 0x30(INP), INC
pxor INC, STATE4
movdqu IV, 0x30(OUTP) movdqu IV, 0x30(OUTP)
call *%r11 call *%r11
pxor 0x00(OUTP), STATE1 movdqu 0x00(OUTP), INC
pxor INC, STATE1
movdqu STATE1, 0x00(OUTP) movdqu STATE1, 0x00(OUTP)
_aesni_gf128mul_x_ble() _aesni_gf128mul_x_ble()
movdqa IV, STATE1 movdqa IV, STATE1
pxor 0x40(INP), STATE1 movdqu 0x40(INP), INC
pxor INC, STATE1
movdqu IV, 0x40(OUTP) movdqu IV, 0x40(OUTP)
pxor 0x10(OUTP), STATE2 movdqu 0x10(OUTP), INC
pxor INC, STATE2
movdqu STATE2, 0x10(OUTP) movdqu STATE2, 0x10(OUTP)
_aesni_gf128mul_x_ble() _aesni_gf128mul_x_ble()
movdqa IV, STATE2 movdqa IV, STATE2
pxor 0x50(INP), STATE2 movdqu 0x50(INP), INC
pxor INC, STATE2
movdqu IV, 0x50(OUTP) movdqu IV, 0x50(OUTP)
pxor 0x20(OUTP), STATE3 movdqu 0x20(OUTP), INC
pxor INC, STATE3
movdqu STATE3, 0x20(OUTP) movdqu STATE3, 0x20(OUTP)
_aesni_gf128mul_x_ble() _aesni_gf128mul_x_ble()
movdqa IV, STATE3 movdqa IV, STATE3
pxor 0x60(INP), STATE3 movdqu 0x60(INP), INC
pxor INC, STATE3
movdqu IV, 0x60(OUTP) movdqu IV, 0x60(OUTP)
pxor 0x30(OUTP), STATE4 movdqu 0x30(OUTP), INC
pxor INC, STATE4
movdqu STATE4, 0x30(OUTP) movdqu STATE4, 0x30(OUTP)
_aesni_gf128mul_x_ble() _aesni_gf128mul_x_ble()
movdqa IV, STATE4 movdqa IV, STATE4
pxor 0x70(INP), STATE4 movdqu 0x70(INP), INC
pxor INC, STATE4
movdqu IV, 0x70(OUTP) movdqu IV, 0x70(OUTP)
_aesni_gf128mul_x_ble() _aesni_gf128mul_x_ble()
...@@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8) ...@@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8)
call *%r11 call *%r11
pxor 0x40(OUTP), STATE1 movdqu 0x40(OUTP), INC
pxor INC, STATE1
movdqu STATE1, 0x40(OUTP) movdqu STATE1, 0x40(OUTP)
pxor 0x50(OUTP), STATE2 movdqu 0x50(OUTP), INC
pxor INC, STATE2
movdqu STATE2, 0x50(OUTP) movdqu STATE2, 0x50(OUTP)
pxor 0x60(OUTP), STATE3 movdqu 0x60(OUTP), INC
pxor INC, STATE3
movdqu STATE3, 0x60(OUTP) movdqu STATE3, 0x60(OUTP)
pxor 0x70(OUTP), STATE4 movdqu 0x70(OUTP), INC
pxor INC, STATE4
movdqu STATE4, 0x70(OUTP) movdqu STATE4, 0x70(OUTP)
ret ret
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment