Commit 19940ebb authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: x86/sha256 - fix possible crash with CFI enabled

sha256_transform_ssse3(), sha256_transform_avx(),
sha256_transform_rorx(), and sha256_ni_transform() are called via
indirect function calls.  Therefore they need to use
SYM_TYPED_FUNC_START instead of SYM_FUNC_START to cause their type
hashes to be emitted when the kernel is built with CONFIG_CFI_CLANG=y.
Otherwise, the code crashes with a CFI failure (if the compiler didn't
happen to optimize out the indirect calls).

Fixes: ccace936 ("x86: Add types to indirectly called assembly functions")
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarSami Tolvanen <samitolvanen@google.com>
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 32f34bf7
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
######################################################################## ########################################################################
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/cfi_types.h>
## assume buffers not aligned ## assume buffers not aligned
#define VMOVDQ vmovdqu #define VMOVDQ vmovdqu
...@@ -346,7 +347,7 @@ a = TMP_ ...@@ -346,7 +347,7 @@ a = TMP_
## arg 3 : Num blocks ## arg 3 : Num blocks
######################################################################## ########################################################################
.text .text
SYM_FUNC_START(sha256_transform_avx) SYM_TYPED_FUNC_START(sha256_transform_avx)
.align 32 .align 32
pushq %rbx pushq %rbx
pushq %r12 pushq %r12
......
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
######################################################################## ########################################################################
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/cfi_types.h>
## assume buffers not aligned ## assume buffers not aligned
#define VMOVDQ vmovdqu #define VMOVDQ vmovdqu
...@@ -523,7 +524,7 @@ STACK_SIZE = _CTX + _CTX_SIZE ...@@ -523,7 +524,7 @@ STACK_SIZE = _CTX + _CTX_SIZE
## arg 3 : Num blocks ## arg 3 : Num blocks
######################################################################## ########################################################################
.text .text
SYM_FUNC_START(sha256_transform_rorx) SYM_TYPED_FUNC_START(sha256_transform_rorx)
.align 32 .align 32
pushq %rbx pushq %rbx
pushq %r12 pushq %r12
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
######################################################################## ########################################################################
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/cfi_types.h>
## assume buffers not aligned ## assume buffers not aligned
#define MOVDQ movdqu #define MOVDQ movdqu
...@@ -355,7 +356,7 @@ a = TMP_ ...@@ -355,7 +356,7 @@ a = TMP_
## arg 3 : Num blocks ## arg 3 : Num blocks
######################################################################## ########################################################################
.text .text
SYM_FUNC_START(sha256_transform_ssse3) SYM_TYPED_FUNC_START(sha256_transform_ssse3)
.align 32 .align 32
pushq %rbx pushq %rbx
pushq %r12 pushq %r12
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/cfi_types.h>
#define DIGEST_PTR %rdi /* 1st arg */ #define DIGEST_PTR %rdi /* 1st arg */
#define DATA_PTR %rsi /* 2nd arg */ #define DATA_PTR %rsi /* 2nd arg */
...@@ -97,7 +98,7 @@ ...@@ -97,7 +98,7 @@
.text .text
.align 32 .align 32
SYM_FUNC_START(sha256_ni_transform) SYM_TYPED_FUNC_START(sha256_ni_transform)
shl $6, NUM_BLKS /* convert to bytes */ shl $6, NUM_BLKS /* convert to bytes */
jz .Ldone_hash jz .Ldone_hash
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment