Commit 93235e3d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'v5.18-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:

 - Missing Kconfig dependency on arm that leads to boot failure

 - x86 SLS fixes

 - Reference leak in the stm32 driver

* tag 'v5.18-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: x86/sm3 - Fixup SLS
  crypto: x86/poly1305 - Fixup SLS
  crypto: x86/chacha20 - Avoid spurious jumps to other functions
  crypto: stm32 - fix reference leak in stm32_crc_remove
  crypto: arm/aes-neonbs-cbc - Select generic cbc and aes
parents 787af64d aa8e73ee
...@@ -102,6 +102,8 @@ config CRYPTO_AES_ARM_BS ...@@ -102,6 +102,8 @@ config CRYPTO_AES_ARM_BS
depends on KERNEL_MODE_NEON depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES select CRYPTO_LIB_AES
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_SIMD select CRYPTO_SIMD
help help
Use a faster and more secure NEON based implementation of AES in CBC, Use a faster and more secure NEON based implementation of AES in CBC,
......
...@@ -172,7 +172,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl) ...@@ -172,7 +172,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl)
# xor remaining bytes from partial register into output # xor remaining bytes from partial register into output
mov %rcx,%rax mov %rcx,%rax
and $0xf,%rcx and $0xf,%rcx
jz .Ldone8 jz .Ldone2
mov %rax,%r9 mov %rax,%r9
and $~0xf,%r9 and $~0xf,%r9
...@@ -438,7 +438,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl) ...@@ -438,7 +438,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl)
# xor remaining bytes from partial register into output # xor remaining bytes from partial register into output
mov %rcx,%rax mov %rcx,%rax
and $0xf,%rcx and $0xf,%rcx
jz .Ldone8 jz .Ldone4
mov %rax,%r9 mov %rax,%r9
and $~0xf,%r9 and $~0xf,%r9
......
...@@ -297,7 +297,7 @@ ___ ...@@ -297,7 +297,7 @@ ___
$code.=<<___; $code.=<<___;
mov \$1,%eax mov \$1,%eax
.Lno_key: .Lno_key:
ret RET
___ ___
&end_function("poly1305_init_x86_64"); &end_function("poly1305_init_x86_64");
...@@ -373,7 +373,7 @@ $code.=<<___; ...@@ -373,7 +373,7 @@ $code.=<<___;
.cfi_adjust_cfa_offset -48 .cfi_adjust_cfa_offset -48
.Lno_data: .Lno_data:
.Lblocks_epilogue: .Lblocks_epilogue:
ret RET
.cfi_endproc .cfi_endproc
___ ___
&end_function("poly1305_blocks_x86_64"); &end_function("poly1305_blocks_x86_64");
...@@ -399,7 +399,7 @@ $code.=<<___; ...@@ -399,7 +399,7 @@ $code.=<<___;
mov %rax,0($mac) # write result mov %rax,0($mac) # write result
mov %rcx,8($mac) mov %rcx,8($mac)
ret RET
___ ___
&end_function("poly1305_emit_x86_64"); &end_function("poly1305_emit_x86_64");
if ($avx) { if ($avx) {
...@@ -429,7 +429,7 @@ ___ ...@@ -429,7 +429,7 @@ ___
&poly1305_iteration(); &poly1305_iteration();
$code.=<<___; $code.=<<___;
pop $ctx pop $ctx
ret RET
.size __poly1305_block,.-__poly1305_block .size __poly1305_block,.-__poly1305_block
.type __poly1305_init_avx,\@abi-omnipotent .type __poly1305_init_avx,\@abi-omnipotent
...@@ -594,7 +594,7 @@ __poly1305_init_avx: ...@@ -594,7 +594,7 @@ __poly1305_init_avx:
lea -48-64($ctx),$ctx # size [de-]optimization lea -48-64($ctx),$ctx # size [de-]optimization
pop %rbp pop %rbp
ret RET
.size __poly1305_init_avx,.-__poly1305_init_avx .size __poly1305_init_avx,.-__poly1305_init_avx
___ ___
...@@ -747,7 +747,7 @@ $code.=<<___; ...@@ -747,7 +747,7 @@ $code.=<<___;
.cfi_restore %rbp .cfi_restore %rbp
.Lno_data_avx: .Lno_data_avx:
.Lblocks_avx_epilogue: .Lblocks_avx_epilogue:
ret RET
.cfi_endproc .cfi_endproc
.align 32 .align 32
...@@ -1452,7 +1452,7 @@ $code.=<<___ if (!$win64); ...@@ -1452,7 +1452,7 @@ $code.=<<___ if (!$win64);
___ ___
$code.=<<___; $code.=<<___;
vzeroupper vzeroupper
ret RET
.cfi_endproc .cfi_endproc
___ ___
&end_function("poly1305_blocks_avx"); &end_function("poly1305_blocks_avx");
...@@ -1508,7 +1508,7 @@ $code.=<<___; ...@@ -1508,7 +1508,7 @@ $code.=<<___;
mov %rax,0($mac) # write result mov %rax,0($mac) # write result
mov %rcx,8($mac) mov %rcx,8($mac)
ret RET
___ ___
&end_function("poly1305_emit_avx"); &end_function("poly1305_emit_avx");
...@@ -1675,7 +1675,7 @@ $code.=<<___; ...@@ -1675,7 +1675,7 @@ $code.=<<___;
.cfi_restore %rbp .cfi_restore %rbp
.Lno_data_avx2$suffix: .Lno_data_avx2$suffix:
.Lblocks_avx2_epilogue$suffix: .Lblocks_avx2_epilogue$suffix:
ret RET
.cfi_endproc .cfi_endproc
.align 32 .align 32
...@@ -2201,7 +2201,7 @@ $code.=<<___ if (!$win64); ...@@ -2201,7 +2201,7 @@ $code.=<<___ if (!$win64);
___ ___
$code.=<<___; $code.=<<___;
vzeroupper vzeroupper
ret RET
.cfi_endproc .cfi_endproc
___ ___
if($avx > 2 && $avx512) { if($avx > 2 && $avx512) {
...@@ -2792,7 +2792,7 @@ $code.=<<___ if (!$win64); ...@@ -2792,7 +2792,7 @@ $code.=<<___ if (!$win64);
.cfi_def_cfa_register %rsp .cfi_def_cfa_register %rsp
___ ___
$code.=<<___; $code.=<<___;
ret RET
.cfi_endproc .cfi_endproc
___ ___
...@@ -2893,7 +2893,7 @@ $code.=<<___ if ($flavour =~ /elf32/); ...@@ -2893,7 +2893,7 @@ $code.=<<___ if ($flavour =~ /elf32/);
___ ___
$code.=<<___; $code.=<<___;
mov \$1,%eax mov \$1,%eax
ret RET
.size poly1305_init_base2_44,.-poly1305_init_base2_44 .size poly1305_init_base2_44,.-poly1305_init_base2_44
___ ___
{ {
...@@ -3010,7 +3010,7 @@ poly1305_blocks_vpmadd52: ...@@ -3010,7 +3010,7 @@ poly1305_blocks_vpmadd52:
jnz .Lblocks_vpmadd52_4x jnz .Lblocks_vpmadd52_4x
.Lno_data_vpmadd52: .Lno_data_vpmadd52:
ret RET
.size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
___ ___
} }
...@@ -3451,7 +3451,7 @@ poly1305_blocks_vpmadd52_4x: ...@@ -3451,7 +3451,7 @@ poly1305_blocks_vpmadd52_4x:
vzeroall vzeroall
.Lno_data_vpmadd52_4x: .Lno_data_vpmadd52_4x:
ret RET
.size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
___ ___
} }
...@@ -3824,7 +3824,7 @@ $code.=<<___; ...@@ -3824,7 +3824,7 @@ $code.=<<___;
vzeroall vzeroall
.Lno_data_vpmadd52_8x: .Lno_data_vpmadd52_8x:
ret RET
.size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
___ ___
} }
...@@ -3861,7 +3861,7 @@ poly1305_emit_base2_44: ...@@ -3861,7 +3861,7 @@ poly1305_emit_base2_44:
mov %rax,0($mac) # write result mov %rax,0($mac) # write result
mov %rcx,8($mac) mov %rcx,8($mac)
ret RET
.size poly1305_emit_base2_44,.-poly1305_emit_base2_44 .size poly1305_emit_base2_44,.-poly1305_emit_base2_44
___ ___
} } } } } }
...@@ -3916,7 +3916,7 @@ xor128_encrypt_n_pad: ...@@ -3916,7 +3916,7 @@ xor128_encrypt_n_pad:
.Ldone_enc: .Ldone_enc:
mov $otp,%rax mov $otp,%rax
ret RET
.size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad
.globl xor128_decrypt_n_pad .globl xor128_decrypt_n_pad
...@@ -3967,7 +3967,7 @@ xor128_decrypt_n_pad: ...@@ -3967,7 +3967,7 @@ xor128_decrypt_n_pad:
.Ldone_dec: .Ldone_dec:
mov $otp,%rax mov $otp,%rax
ret RET
.size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad
___ ___
} }
...@@ -4109,7 +4109,7 @@ avx_handler: ...@@ -4109,7 +4109,7 @@ avx_handler:
pop %rbx pop %rbx
pop %rdi pop %rdi
pop %rsi pop %rsi
ret RET
.size avx_handler,.-avx_handler .size avx_handler,.-avx_handler
.section .pdata .section .pdata
......
...@@ -513,5 +513,5 @@ SYM_FUNC_START(sm3_transform_avx) ...@@ -513,5 +513,5 @@ SYM_FUNC_START(sm3_transform_avx)
movq %rbp, %rsp; movq %rbp, %rsp;
popq %rbp; popq %rbp;
ret; RET;
SYM_FUNC_END(sm3_transform_avx) SYM_FUNC_END(sm3_transform_avx)
...@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev) ...@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
struct stm32_crc *crc = platform_get_drvdata(pdev); struct stm32_crc *crc = platform_get_drvdata(pdev);
int ret = pm_runtime_get_sync(crc->dev); int ret = pm_runtime_get_sync(crc->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_noidle(crc->dev);
return ret; return ret;
}
spin_lock(&crc_list.lock); spin_lock(&crc_list.lock);
list_del(&crc->list); list_del(&crc->list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment