Commit 2a8ba8f0 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (46 commits)
  random: simplify fips mode
  crypto: authenc - Fix cryptlen calculation
  crypto: talitos - add support for sha224
  crypto: talitos - add hash algorithms
  crypto: talitos - second prepare step for adding ahash algorithms
  crypto: talitos - prepare for adding ahash algorithms
  crypto: n2 - Add Niagara2 crypto driver
  crypto: skcipher - Add ablkcipher_walk interfaces
  crypto: testmgr - Add testing for async hashing and update/final
  crypto: tcrypt - Add speed tests for async hashing
  crypto: scatterwalk - Fix scatterwalk_done() test
  crypto: hifn_795x - Rename ablkcipher_walk to hifn_cipher_walk
  padata: Use get_online_cpus/put_online_cpus in padata_free
  padata: Add some code comments
  padata: Flush the padata queues actively
  padata: Use a timer to handle remaining objects in the reorder queues
  crypto: shash - Remove usage of CRYPTO_MINALIGN
  crypto: mv_cesa - Use resource_size
  crypto: omap - OMAP macros corrected
  padata: Use get_online_cpus/put_online_cpus
  ...

Fix up conflicts in arch/arm/mach-omap2/devices.c
parents ec2a7587 e954bc91
......@@ -1836,7 +1836,7 @@ static struct omap_clk omap2420_clks[] = {
CLK(NULL, "vlynq_ick", &vlynq_ick, CK_242X),
CLK(NULL, "vlynq_fck", &vlynq_fck, CK_242X),
CLK(NULL, "des_ick", &des_ick, CK_242X),
CLK(NULL, "sha_ick", &sha_ick, CK_242X),
CLK("omap-sham", "ick", &sha_ick, CK_242X),
CLK("omap_rng", "ick", &rng_ick, CK_242X),
CLK(NULL, "aes_ick", &aes_ick, CK_242X),
CLK(NULL, "pka_ick", &pka_ick, CK_242X),
......
......@@ -1924,7 +1924,7 @@ static struct omap_clk omap2430_clks[] = {
CLK(NULL, "sdma_ick", &sdma_ick, CK_243X),
CLK(NULL, "sdrc_ick", &sdrc_ick, CK_243X),
CLK(NULL, "des_ick", &des_ick, CK_243X),
CLK(NULL, "sha_ick", &sha_ick, CK_243X),
CLK("omap-sham", "ick", &sha_ick, CK_243X),
CLK("omap_rng", "ick", &rng_ick, CK_243X),
CLK(NULL, "aes_ick", &aes_ick, CK_243X),
CLK(NULL, "pka_ick", &pka_ick, CK_243X),
......
......@@ -3284,7 +3284,7 @@ static struct omap_clk omap3xxx_clks[] = {
CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2 | CK_AM35XX),
CLK(NULL, "icr_ick", &icr_ick, CK_343X),
CLK(NULL, "aes2_ick", &aes2_ick, CK_343X),
CLK(NULL, "sha12_ick", &sha12_ick, CK_343X),
CLK("omap-sham", "ick", &sha12_ick, CK_343X),
CLK(NULL, "des2_ick", &des2_ick, CK_343X),
CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX),
CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_3XXX),
......
......@@ -28,6 +28,7 @@
#include <plat/mux.h>
#include <mach/gpio.h>
#include <plat/mmc.h>
#include <plat/dma.h>
#include "mux.h"
......@@ -486,8 +487,10 @@ static void omap_init_pmu(void)
}
#ifdef CONFIG_OMAP_SHA1_MD5
static struct resource sha1_md5_resources[] = {
#if defined(CONFIG_CRYPTO_DEV_OMAP_SHAM) || defined(CONFIG_CRYPTO_DEV_OMAP_SHAM_MODULE)
#ifdef CONFIG_ARCH_OMAP2
static struct resource omap2_sham_resources[] = {
{
.start = OMAP24XX_SEC_SHA1MD5_BASE,
.end = OMAP24XX_SEC_SHA1MD5_BASE + 0x64,
......@@ -498,20 +501,55 @@ static struct resource sha1_md5_resources[] = {
.flags = IORESOURCE_IRQ,
}
};
static int omap2_sham_resources_sz = ARRAY_SIZE(omap2_sham_resources);
#else
#define omap2_sham_resources NULL
#define omap2_sham_resources_sz 0
#endif
static struct platform_device sha1_md5_device = {
.name = "OMAP SHA1/MD5",
#ifdef CONFIG_ARCH_OMAP3
static struct resource omap3_sham_resources[] = {
{
.start = OMAP34XX_SEC_SHA1MD5_BASE,
.end = OMAP34XX_SEC_SHA1MD5_BASE + 0x64,
.flags = IORESOURCE_MEM,
},
{
.start = INT_34XX_SHA1MD52_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = OMAP34XX_DMA_SHA1MD5_RX,
.flags = IORESOURCE_DMA,
}
};
static int omap3_sham_resources_sz = ARRAY_SIZE(omap3_sham_resources);
#else
#define omap3_sham_resources NULL
#define omap3_sham_resources_sz 0
#endif
static struct platform_device sham_device = {
.name = "omap-sham",
.id = -1,
.num_resources = ARRAY_SIZE(sha1_md5_resources),
.resource = sha1_md5_resources,
};
static void omap_init_sha1_md5(void)
static void omap_init_sham(void)
{
platform_device_register(&sha1_md5_device);
if (cpu_is_omap24xx()) {
sham_device.resource = omap2_sham_resources;
sham_device.num_resources = omap2_sham_resources_sz;
} else if (cpu_is_omap34xx()) {
sham_device.resource = omap3_sham_resources;
sham_device.num_resources = omap3_sham_resources_sz;
} else {
pr_err("%s: platform not supported\n", __func__);
return;
}
platform_device_register(&sham_device);
}
#else
static inline void omap_init_sha1_md5(void) { }
static inline void omap_init_sham(void) { }
#endif
/*-------------------------------------------------------------------------*/
......@@ -869,7 +907,7 @@ static int __init omap2_init_devices(void)
omap_init_pmu();
omap_hdq_init();
omap_init_sti();
omap_init_sha1_md5();
omap_init_sham();
omap_init_vout();
return 0;
......
......@@ -82,5 +82,10 @@
#define OMAP34XX_MAILBOX_BASE (L4_34XX_BASE + 0x94000)
/* Security */
#define OMAP34XX_SEC_BASE (L4_34XX_BASE + 0xA0000)
#define OMAP34XX_SEC_SHA1MD5_BASE (OMAP34XX_SEC_BASE + 0x23000)
#define OMAP34XX_SEC_AES_BASE (OMAP34XX_SEC_BASE + 0x25000)
#endif /* __ASM_ARCH_OMAP3_H */
......@@ -32,6 +32,9 @@
#define IN IN1
#define KEY %xmm2
#define IV %xmm3
#define BSWAP_MASK %xmm10
#define CTR %xmm11
#define INC %xmm12
#define KEYP %rdi
#define OUTP %rsi
......@@ -42,6 +45,7 @@
#define T1 %r10
#define TKEYP T1
#define T2 %r11
#define TCTR_LOW T2
_key_expansion_128:
_key_expansion_256a:
......@@ -724,3 +728,114 @@ ENTRY(aesni_cbc_dec)
movups IV, (IVP)
.Lcbc_dec_just_ret:
ret
.align 16
.Lbswap_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
/*
* _aesni_inc_init: internal ABI
* setup registers used by _aesni_inc
* input:
* IV
* output:
* CTR: == IV, in little endian
* TCTR_LOW: == lower qword of CTR
* INC: == 1, in little endian
* BSWAP_MASK == endian swapping mask
*/
_aesni_inc_init:
movaps .Lbswap_mask, BSWAP_MASK
movaps IV, CTR
PSHUFB_XMM BSWAP_MASK CTR
mov $1, TCTR_LOW
MOVQ_R64_XMM TCTR_LOW INC
MOVQ_R64_XMM CTR TCTR_LOW
ret
/*
* _aesni_inc: internal ABI
* Increase IV by 1, IV is in big endian
* input:
* IV
* CTR: == IV, in little endian
* TCTR_LOW: == lower qword of CTR
* INC: == 1, in little endian
* BSWAP_MASK == endian swapping mask
* output:
* IV: Increase by 1
* changed:
* CTR: == output IV, in little endian
* TCTR_LOW: == lower qword of CTR
*/
_aesni_inc:
paddq INC, CTR
add $1, TCTR_LOW
jnc .Linc_low
pslldq $8, INC
paddq INC, CTR
psrldq $8, INC
.Linc_low:
movaps CTR, IV
PSHUFB_XMM BSWAP_MASK IV
ret
/*
* void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
* size_t len, u8 *iv)
*/
ENTRY(aesni_ctr_enc)
cmp $16, LEN
jb .Lctr_enc_just_ret
mov 480(KEYP), KLEN
movups (IVP), IV
call _aesni_inc_init
cmp $64, LEN
jb .Lctr_enc_loop1
.align 4
.Lctr_enc_loop4:
movaps IV, STATE1
call _aesni_inc
movups (INP), IN1
movaps IV, STATE2
call _aesni_inc
movups 0x10(INP), IN2
movaps IV, STATE3
call _aesni_inc
movups 0x20(INP), IN3
movaps IV, STATE4
call _aesni_inc
movups 0x30(INP), IN4
call _aesni_enc4
pxor IN1, STATE1
movups STATE1, (OUTP)
pxor IN2, STATE2
movups STATE2, 0x10(OUTP)
pxor IN3, STATE3
movups STATE3, 0x20(OUTP)
pxor IN4, STATE4
movups STATE4, 0x30(OUTP)
sub $64, LEN
add $64, INP
add $64, OUTP
cmp $64, LEN
jge .Lctr_enc_loop4
cmp $16, LEN
jb .Lctr_enc_ret
.align 4
.Lctr_enc_loop1:
movaps IV, STATE
call _aesni_inc
movups (INP), IN
call _aesni_enc1
pxor IN, STATE
movups STATE, (OUTP)
sub $16, LEN
add $16, INP
add $16, OUTP
cmp $16, LEN
jge .Lctr_enc_loop1
.Lctr_enc_ret:
movups IV, (IVP)
.Lctr_enc_just_ret:
ret
......@@ -18,6 +18,7 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <asm/i387.h>
#include <asm/aes.h>
......@@ -58,6 +59,8 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
{
......@@ -321,6 +324,72 @@ static struct crypto_alg blk_cbc_alg = {
},
};
static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
struct blkcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[AES_BLOCK_SIZE];
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
aesni_enc(ctx, keystream, ctrblk);
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
static int ctr_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
if (walk.nbytes) {
ctr_crypt_final(ctx, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
}
kernel_fpu_end();
return err;
}
static struct crypto_alg blk_ctr_alg = {
.cra_name = "__ctr-aes-aesni",
.cra_driver_name = "__driver-ctr-aes-aesni",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aes_set_key,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
},
},
};
static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
......@@ -467,13 +536,11 @@ static struct crypto_alg ablk_cbc_alg = {
},
};
#ifdef HAS_CTR
static int ablk_ctr_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(ctr(__driver-aes-aesni))",
0, 0);
cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
......@@ -500,11 +567,50 @@ static struct crypto_alg ablk_ctr_alg = {
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
.decrypt = ablk_encrypt,
.geniv = "chainiv",
},
},
};
#ifdef HAS_CTR
static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher(
"rfc3686(__driver-ctr-aes-aesni)", 0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
return 0;
}
static struct crypto_alg ablk_rfc3686_ctr_alg = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
.cra_init = ablk_rfc3686_ctr_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
.geniv = "seqiv",
},
},
};
#endif
#ifdef HAS_LRW
......@@ -640,13 +746,17 @@ static int __init aesni_init(void)
goto blk_ecb_err;
if ((err = crypto_register_alg(&blk_cbc_alg)))
goto blk_cbc_err;
if ((err = crypto_register_alg(&blk_ctr_alg)))
goto blk_ctr_err;
if ((err = crypto_register_alg(&ablk_ecb_alg)))
goto ablk_ecb_err;
if ((err = crypto_register_alg(&ablk_cbc_alg)))
goto ablk_cbc_err;
#ifdef HAS_CTR
if ((err = crypto_register_alg(&ablk_ctr_alg)))
goto ablk_ctr_err;
#ifdef HAS_CTR
if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
goto ablk_rfc3686_ctr_err;
#endif
#ifdef HAS_LRW
if ((err = crypto_register_alg(&ablk_lrw_alg)))
......@@ -675,13 +785,17 @@ static int __init aesni_init(void)
ablk_lrw_err:
#endif
#ifdef HAS_CTR
crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
ablk_rfc3686_ctr_err:
#endif
crypto_unregister_alg(&ablk_ctr_alg);
ablk_ctr_err:
#endif
crypto_unregister_alg(&ablk_cbc_alg);
ablk_cbc_err:
crypto_unregister_alg(&ablk_ecb_alg);
ablk_ecb_err:
crypto_unregister_alg(&blk_ctr_alg);
blk_ctr_err:
crypto_unregister_alg(&blk_cbc_alg);
blk_cbc_err:
crypto_unregister_alg(&blk_ecb_alg);
......@@ -705,10 +819,12 @@ static void __exit aesni_exit(void)
crypto_unregister_alg(&ablk_lrw_alg);
#endif
#ifdef HAS_CTR
crypto_unregister_alg(&ablk_ctr_alg);
crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
#endif
crypto_unregister_alg(&ablk_ctr_alg);
crypto_unregister_alg(&ablk_cbc_alg);
crypto_unregister_alg(&ablk_ecb_alg);
crypto_unregister_alg(&blk_ctr_alg);
crypto_unregister_alg(&blk_cbc_alg);
crypto_unregister_alg(&blk_ecb_alg);
crypto_unregister_alg(&__aesni_alg);
......
......@@ -7,7 +7,66 @@
#ifdef __ASSEMBLY__
#define REG_NUM_INVALID 100
#define REG_TYPE_R64 0
#define REG_TYPE_XMM 1
#define REG_TYPE_INVALID 100
.macro R64_NUM opd r64
\opd = REG_NUM_INVALID
.ifc \r64,%rax
\opd = 0
.endif
.ifc \r64,%rcx
\opd = 1
.endif
.ifc \r64,%rdx
\opd = 2
.endif
.ifc \r64,%rbx
\opd = 3
.endif
.ifc \r64,%rsp
\opd = 4
.endif
.ifc \r64,%rbp
\opd = 5
.endif
.ifc \r64,%rsi
\opd = 6
.endif
.ifc \r64,%rdi
\opd = 7
.endif
.ifc \r64,%r8
\opd = 8
.endif
.ifc \r64,%r9
\opd = 9
.endif
.ifc \r64,%r10
\opd = 10
.endif
.ifc \r64,%r11
\opd = 11
.endif
.ifc \r64,%r12
\opd = 12
.endif
.ifc \r64,%r13
\opd = 13
.endif
.ifc \r64,%r14
\opd = 14
.endif
.ifc \r64,%r15
\opd = 15
.endif
.endm
.macro XMM_NUM opd xmm
\opd = REG_NUM_INVALID
.ifc \xmm,%xmm0
\opd = 0
.endif
......@@ -58,13 +117,25 @@
.endif
.endm
.macro REG_TYPE type reg
R64_NUM reg_type_r64 \reg
XMM_NUM reg_type_xmm \reg
.if reg_type_r64 <> REG_NUM_INVALID
\type = REG_TYPE_R64
.elseif reg_type_xmm <> REG_NUM_INVALID
\type = REG_TYPE_XMM
.else
\type = REG_TYPE_INVALID
.endif
.endm
.macro PFX_OPD_SIZE
.byte 0x66
.endm
.macro PFX_REX opd1 opd2
.if (\opd1 | \opd2) & 8
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1)
.macro PFX_REX opd1 opd2 W=0
.if ((\opd1 | \opd2) & 8) || \W
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
.endif
.endm
......@@ -145,6 +216,25 @@
.byte 0x0f, 0x38, 0xdf
MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
.endm
.macro MOVQ_R64_XMM opd1 opd2
REG_TYPE movq_r64_xmm_opd1_type \opd1
.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
XMM_NUM movq_r64_xmm_opd1 \opd1
R64_NUM movq_r64_xmm_opd2 \opd2
.else
R64_NUM movq_r64_xmm_opd1 \opd1
XMM_NUM movq_r64_xmm_opd2 \opd2
.endif
PFX_OPD_SIZE
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
.byte 0x0f, 0x7e
.else
.byte 0x0f, 0x6e
.endif
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
.endm
#endif
#endif
......@@ -24,10 +24,287 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <crypto/scatterwalk.h>
#include "internal.h"
static const char *skcipher_default_geniv __read_mostly;
struct ablkcipher_buffer {
struct list_head entry;
struct scatter_walk dst;
unsigned int len;
void *data;
};
enum {
ABLKCIPHER_WALK_SLOW = 1 << 0,
};
static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
{
scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
}
void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
{
struct ablkcipher_buffer *p, *tmp;
list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
ablkcipher_buffer_write(p);
list_del(&p->entry);
kfree(p);
}
}
EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
struct ablkcipher_buffer *p)
{
p->dst = walk->out;
list_add_tail(&p->entry, &walk->buffers);
}
/* Get a spot of the specified length that does not straddle a page.
* The caller needs to ensure that there is enough space for this operation.
*/
static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
{
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
return max(start, end_page);
}
static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
unsigned int bsize)
{
unsigned int n = bsize;
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
if (len_this_page > n)
len_this_page = n;
scatterwalk_advance(&walk->out, n);
if (n == len_this_page)
break;
n -= len_this_page;
scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
}
return bsize;
}
static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
unsigned int n)
{
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
return n;
}
static int ablkcipher_walk_next(struct ablkcipher_request *req,
struct ablkcipher_walk *walk);
int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err)
{
struct crypto_tfm *tfm = req->base.tfm;
unsigned int nbytes = 0;
if (likely(err >= 0)) {
unsigned int n = walk->nbytes - err;
if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
n = ablkcipher_done_fast(walk, n);
else if (WARN_ON(err)) {
err = -EINVAL;
goto err;
} else
n = ablkcipher_done_slow(walk, n);
nbytes = walk->total - n;
err = 0;
}
scatterwalk_done(&walk->in, 0, nbytes);
scatterwalk_done(&walk->out, 1, nbytes);
err:
walk->total = nbytes;
walk->nbytes = nbytes;
if (nbytes) {
crypto_yield(req->base.flags);
return ablkcipher_walk_next(req, walk);
}
if (walk->iv != req->info)
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
if (walk->iv_buffer)
kfree(walk->iv_buffer);
return err;
}
EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
struct ablkcipher_walk *walk,
unsigned int bsize,
unsigned int alignmask,
void **src_p, void **dst_p)
{
unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
struct ablkcipher_buffer *p;
void *src, *dst, *base;
unsigned int n;
n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
n += (aligned_bsize * 3 - (alignmask + 1) +
(alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
p = kmalloc(n, GFP_ATOMIC);
if (!p)
ablkcipher_walk_done(req, walk, -ENOMEM);
base = p + 1;
dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
src = dst = ablkcipher_get_spot(dst, bsize);
p->len = bsize;
p->data = dst;
scatterwalk_copychunks(src, &walk->in, bsize, 0);
ablkcipher_queue_write(walk, p);
walk->nbytes = bsize;
walk->flags |= ABLKCIPHER_WALK_SLOW;
*src_p = src;
*dst_p = dst;
return 0;
}
static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
struct crypto_tfm *tfm,
unsigned int alignmask)
{
unsigned bs = walk->blocksize;
unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
unsigned aligned_bs = ALIGN(bs, alignmask + 1);
unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
(alignmask + 1);
u8 *iv;
size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
if (!walk->iv_buffer)
return -ENOMEM;
iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
iv = ablkcipher_get_spot(iv, ivsize);
walk->iv = memcpy(iv, walk->iv, ivsize);
return 0;
}
static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
struct ablkcipher_walk *walk)
{
walk->src.page = scatterwalk_page(&walk->in);
walk->src.offset = offset_in_page(walk->in.offset);
walk->dst.page = scatterwalk_page(&walk->out);
walk->dst.offset = offset_in_page(walk->out.offset);
return 0;
}
static int ablkcipher_walk_next(struct ablkcipher_request *req,
struct ablkcipher_walk *walk)
{
struct crypto_tfm *tfm = req->base.tfm;
unsigned int alignmask, bsize, n;
void *src, *dst;
int err;
alignmask = crypto_tfm_alg_alignmask(tfm);
n = walk->total;
if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return ablkcipher_walk_done(req, walk, -EINVAL);
}
walk->flags &= ~ABLKCIPHER_WALK_SLOW;
src = dst = NULL;
bsize = min(walk->blocksize, n);
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);
if (n < bsize ||
!scatterwalk_aligned(&walk->in, alignmask) ||
!scatterwalk_aligned(&walk->out, alignmask)) {
err = ablkcipher_next_slow(req, walk, bsize, alignmask,
&src, &dst);
goto set_phys_lowmem;
}
walk->nbytes = n;
return ablkcipher_next_fast(req, walk);
set_phys_lowmem:
if (err >= 0) {
walk->src.page = virt_to_page(src);
walk->dst.page = virt_to_page(dst);
walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
}
return err;
}
static int ablkcipher_walk_first(struct ablkcipher_request *req,
struct ablkcipher_walk *walk)
{
struct crypto_tfm *tfm = req->base.tfm;
unsigned int alignmask;
alignmask = crypto_tfm_alg_alignmask(tfm);
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
walk->iv_buffer = NULL;
walk->iv = req->info;
if (unlikely(((unsigned long)walk->iv & alignmask))) {
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
if (err)
return err;
}
scatterwalk_start(&walk->in, walk->in.sg);
scatterwalk_start(&walk->out, walk->out.sg);
return ablkcipher_walk_next(req, walk);
}
int ablkcipher_walk_phys(struct ablkcipher_request *req,
struct ablkcipher_walk *walk)
{
walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
return ablkcipher_walk_first(req, walk);
}
EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
......
......@@ -544,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
{
int err = -EINVAL;
if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
goto out;
spawn->frontend = frontend;
......
......@@ -181,6 +181,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
......@@ -196,6 +197,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
goto out;
authsize = crypto_aead_authsize(authenc);
cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
......@@ -209,7 +211,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
req->cryptlen, req->iv);
cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
......@@ -228,11 +230,13 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
authsize = crypto_aead_authsize(authenc);
cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
......@@ -246,7 +250,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
req->cryptlen, req->iv);
cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
......
......@@ -6,7 +6,7 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
......
......@@ -315,16 +315,13 @@ static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
goto out;
}
static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
u32 type, u32 mask)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
alg = crypto_get_attr_alg(tb, algt->type,
(algt->mask & CRYPTO_ALG_TYPE_MASK));
alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
if (IS_ERR(alg))
return ERR_CAST(alg);
......@@ -365,7 +362,7 @@ static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
return pcrypt_alloc_aead(tb);
return pcrypt_alloc_aead(tb, algt->type, algt->mask);
}
return ERR_PTR(-EINVAL);
......
......@@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
{
if (!offset_in_page(walk->offset) || !more)
if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
scatterwalk_pagedone(walk, out, more);
}
EXPORT_SYMBOL_GPL(scatterwalk_done);
......
......@@ -37,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
u8 *buffer, *alignbuffer;
int err;
absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1));
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
......
......@@ -394,6 +394,17 @@ static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg,
return 0;
}
static void test_hash_sg_init(struct scatterlist *sg)
{
int i;
sg_init_table(sg, TVMEMSIZE);
for (i = 0; i < TVMEMSIZE; i++) {
sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
memset(tvmem[i], 0xff, PAGE_SIZE);
}
}
static void test_hash_speed(const char *algo, unsigned int sec,
struct hash_speed *speed)
{
......@@ -423,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
goto out;
}
sg_init_table(sg, TVMEMSIZE);
for (i = 0; i < TVMEMSIZE; i++) {
sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
memset(tvmem[i], 0xff, PAGE_SIZE);
}
test_hash_sg_init(sg);
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
printk(KERN_ERR
......@@ -437,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
goto out;
}
if (speed[i].klen)
crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
printk(KERN_INFO "test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
......@@ -458,6 +467,250 @@ static void test_hash_speed(const char *algo, unsigned int sec,
crypto_free_hash(tfm);
}
struct tcrypt_result {
struct completion completion;
int err;
};
static void tcrypt_complete(struct crypto_async_request *req, int err)
{
struct tcrypt_result *res = req->data;
if (err == -EINPROGRESS)
return;
res->err = err;
complete(&res->completion);
}
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
ret = wait_for_completion_interruptible(&tr->completion);
if (!ret)
ret = tr->err;
INIT_COMPLETION(tr->completion);
}
return ret;
}
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int sec)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + sec * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
return ret;
}
printk("%6u opers/sec, %9lu bytes/sec\n",
bcount / sec, ((long)bcount * blen) / sec);
return 0;
}
static int test_ahash_jiffies(struct ahash_request *req, int blen,
int plen, char *out, int sec)
{
unsigned long start, end;
int bcount, pcount;
int ret;
if (plen == blen)
return test_ahash_jiffies_digest(req, blen, out, sec);
for (start = jiffies, end = start + sec * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = crypto_ahash_init(req);
if (ret)
return ret;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
return ret;
}
/* we assume there is enough space in 'out' for the result */
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
return ret;
}
pr_cont("%6u opers/sec, %9lu bytes/sec\n",
bcount / sec, ((long)bcount * blen) / sec);
return 0;
}
static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
char *out)
{
unsigned long cycles = 0;
int ret, i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static int test_ahash_cycles(struct ahash_request *req, int blen,
int plen, char *out)
{
unsigned long cycles = 0;
int i, pcount, ret;
if (plen == blen)
return test_ahash_cycles_digest(req, blen, out);
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = crypto_ahash_init(req);
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = crypto_ahash_init(req);
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static void test_ahash_speed(const char *algo, unsigned int sec,
struct hash_speed *speed)
{
struct scatterlist sg[TVMEMSIZE];
struct tcrypt_result tresult;
struct ahash_request *req;
struct crypto_ahash *tfm;
static char output[1024];
int i, ret;
printk(KERN_INFO "\ntesting speed of async %s\n", algo);
tfm = crypto_alloc_ahash(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
return;
}
if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
pr_err("digestsize(%u) > outputbuffer(%zu)\n",
crypto_ahash_digestsize(tfm), sizeof(output));
goto out;
}
test_hash_sg_init(sg);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("ahash request allocation failure\n");
goto out;
}
init_completion(&tresult.completion);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &tresult);
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
speed[i].blen, TVMEMSIZE * PAGE_SIZE);
break;
}
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
ahash_request_set_crypt(req, sg, output, speed[i].plen);
if (sec)
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, sec);
else
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
break;
}
}
ahash_request_free(req);
out:
crypto_free_ahash(tfm);
}
static void test_available(void)
{
char **name = check;
......@@ -881,9 +1134,87 @@ static int do_test(int m)
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 318:
test_hash_speed("ghash-generic", sec, hash_speed_template_16);
if (mode > 300 && mode < 400) break;
case 399:
break;
case 400:
/* fall through */
case 401:
test_ahash_speed("md4", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 402:
test_ahash_speed("md5", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 403:
test_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 404:
test_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 405:
test_ahash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 406:
test_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 407:
test_ahash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 408:
test_ahash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 409:
test_ahash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 410:
test_ahash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 411:
test_ahash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 412:
test_ahash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 413:
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 414:
test_ahash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 416:
test_ahash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 417:
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 499:
break;
case 1000:
test_available();
break;
......
......@@ -25,6 +25,7 @@ struct cipher_speed_template {
struct hash_speed {
unsigned int blen; /* buffer length */
unsigned int plen; /* per-update length */
unsigned int klen; /* key length */
};
/*
......@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
static struct hash_speed hash_speed_template_16[] = {
{ .blen = 16, .plen = 16, .klen = 16, },
{ .blen = 64, .plen = 16, .klen = 16, },
{ .blen = 64, .plen = 64, .klen = 16, },
{ .blen = 256, .plen = 16, .klen = 16, },
{ .blen = 256, .plen = 64, .klen = 16, },
{ .blen = 256, .plen = 256, .klen = 16, },
{ .blen = 1024, .plen = 16, .klen = 16, },
{ .blen = 1024, .plen = 256, .klen = 16, },
{ .blen = 1024, .plen = 1024, .klen = 16, },
{ .blen = 2048, .plen = 16, .klen = 16, },
{ .blen = 2048, .plen = 256, .klen = 16, },
{ .blen = 2048, .plen = 1024, .klen = 16, },
{ .blen = 2048, .plen = 2048, .klen = 16, },
{ .blen = 4096, .plen = 16, .klen = 16, },
{ .blen = 4096, .plen = 256, .klen = 16, },
{ .blen = 4096, .plen = 1024, .klen = 16, },
{ .blen = 4096, .plen = 4096, .klen = 16, },
{ .blen = 8192, .plen = 16, .klen = 16, },
{ .blen = 8192, .plen = 256, .klen = 16, },
{ .blen = 8192, .plen = 1024, .klen = 16, },
{ .blen = 8192, .plen = 4096, .klen = 16, },
{ .blen = 8192, .plen = 8192, .klen = 16, },
/* End marker */
{ .blen = 0, .plen = 0, .klen = 0, }
};
#endif /* _CRYPTO_TCRYPT_H */
......@@ -153,8 +153,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
static int do_one_async_hash_op(struct ahash_request *req,
struct tcrypt_result *tr,
int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
ret = wait_for_completion_interruptible(&tr->completion);
if (!ret)
ret = tr->err;
INIT_COMPLETION(tr->completion);
}
return ret;
}
static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
unsigned int tcount)
unsigned int tcount, bool use_digest)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
unsigned int i, j, k, temp;
......@@ -206,23 +219,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
ret = crypto_ahash_digest(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&tresult.completion);
if (!ret && !(ret = tresult.err)) {
INIT_COMPLETION(tresult.completion);
break;
if (use_digest) {
ret = do_one_async_hash_op(req, &tresult,
crypto_ahash_digest(req));
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
} else {
ret = do_one_async_hash_op(req, &tresult,
crypto_ahash_init(req));
if (ret) {
pr_err("alt: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
ret = do_one_async_hash_op(req, &tresult,
crypto_ahash_update(req));
if (ret) {
pr_err("alt: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
ret = do_one_async_hash_op(req, &tresult,
crypto_ahash_final(req));
if (ret) {
pr_err("alt: hash: final failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
/* fall through */
default:
printk(KERN_ERR "alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
if (memcmp(result, template[i].digest,
......@@ -1402,7 +1428,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
return PTR_ERR(tfm);
}
err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count);
err = test_hash(tfm, desc->suite.hash.vecs,
desc->suite.hash.count, true);
if (!err)
err = test_hash(tfm, desc->suite.hash.vecs,
desc->suite.hash.count, false);
crypto_free_ahash(tfm);
return err;
......
......@@ -1669,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
#define VMAC_AES_TEST_VECTORS 1
static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
#define VMAC_AES_TEST_VECTORS 8
static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
'\x02', '\x03', '\x02', '\x02',
'\x02', '\x04', '\x01', '\x07',
'\x04', '\x01', '\x04', '\x03',};
static char vmac_string2[128] = {'a', 'b', 'c',};
static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
};
static struct hash_testvec aes_vmac128_tv_template[] = {
{
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.plaintext = NULL,
.digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
.psize = 0,
.ksize = 16,
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.plaintext = vmac_string1,
.digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
.psize = 128,
.ksize = 16,
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.plaintext = vmac_string2,
.digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
.psize = 128,
.ksize = 16,
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.plaintext = vmac_string,
.digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
.plaintext = vmac_string3,
.digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
.psize = 128,
.ksize = 16,
}, {
.key = "abcdefghijklmnop",
.plaintext = NULL,
.digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
.psize = 0,
.ksize = 16,
}, {
.key = "abcdefghijklmnop",
.plaintext = vmac_string1,
.digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
.psize = 128,
.ksize = 16,
}, {
.key = "abcdefghijklmnop",
.plaintext = vmac_string2,
.digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
.psize = 128,
.ksize = 16,
}, {
.key = "abcdefghijklmnop",
.plaintext = vmac_string3,
.digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
.psize = 128,
.ksize = 16,
},
......
......@@ -43,6 +43,8 @@ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
#ifdef __LITTLE_ENDIAN
#define INDEX_HIGH 1
#define INDEX_LOW 0
......@@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
......@@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
......@@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
......@@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
le64_to_cpup((mp)+i+3)+(kp)[i+5]); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
le64_to_cpup((mp)+i+5)+(kp)[i+7]); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
le64_to_cpup((mp)+i+7)+(kp)[i+9]); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
......@@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; \
rh = rl = t = 0; \
for (i = 0; i < nw; i += 2) { \
t1 = le64_to_cpup(mp+i) + kp[i]; \
t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \
t1 = pe64_to_cpup(mp+i) + kp[i]; \
t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
m2 = MUL32(t1 >> 32, t2); \
m1 = MUL32(t1, t2 >> 32); \
ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
......@@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx)
ctx->first_block_processed = 0;
}
static u64 l3hash(u64 p1, u64 p2,
u64 k1, u64 k2, u64 len)
static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
......@@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes,
}
p = be64_to_cpup(out_p + i);
h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
return p + h;
return le64_to_cpu(p + h);
}
static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
......@@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent,
static int vmac_init(struct shash_desc *pdesc)
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
return 0;
}
......
......@@ -257,6 +257,7 @@
#define INPUT_POOL_WORDS 128
#define OUTPUT_POOL_WORDS 32
#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10
/*
* The minimum number of bits of entropy before we wake up a read on
......@@ -414,7 +415,7 @@ struct entropy_store {
unsigned add_ptr;
int entropy_count;
int input_rotate;
__u8 *last_data;
__u8 last_data[EXTRACT_SIZE];
};
static __u32 input_pool_data[INPUT_POOL_WORDS];
......@@ -714,8 +715,6 @@ void add_disk_randomness(struct gendisk *disk)
}
#endif
#define EXTRACT_SIZE 10
/*********************************************************************
*
* Entropy extraction routines
......@@ -862,7 +861,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
while (nbytes) {
extract_buf(r, tmp);
if (r->last_data) {
if (fips_enabled) {
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
......@@ -951,9 +950,6 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now));
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
/* Enable continuous test in fips mode */
if (fips_enabled)
r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
}
static int rand_initialize(void)
......
......@@ -170,6 +170,18 @@ config CRYPTO_DEV_MV_CESA
Currently the driver supports AES in ECB and CBC mode without DMA.
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_ALGAPI
depends on SPARC64
help
Each core of a Niagara2 processor contains a Stream
Processing Unit, which itself contains several cryptographic
sub-units. One set provides the Modular Arithmetic Unit,
used for SSL offload. The other set provides the Cipher
Group, which can perform encryption, decryption, hashing,
checksumming, and raw copies.
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
......@@ -222,4 +234,13 @@ config CRYPTO_DEV_PPC4XX
help
This option allows you to have support for AMCC crypto acceleration.
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP SHA1/MD5 hw accelerator"
depends on ARCH_OMAP2 || ARCH_OMAP3
select CRYPTO_SHA1
select CRYPTO_MD5
help
OMAP processors have SHA1/MD5 hw accelerator. Select this if you
want to use the OMAP module for SHA1/MD5 algorithms.
endif # CRYPTO_HW
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-objs := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
......@@ -15,14 +15,14 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <linux/io.h>
#include <linux/delay.h>
#include "geode-aes.h"
/* Static structures */
static void __iomem * _iobase;
static void __iomem *_iobase;
static spinlock_t lock;
/* Write a 128 bit field (either a writable key or IV) */
......@@ -30,7 +30,7 @@ static inline void
_writefield(u32 offset, void *value)
{
int i;
for(i = 0; i < 4; i++)
for (i = 0; i < 4; i++)
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
}
......@@ -39,7 +39,7 @@ static inline void
_readfield(u32 offset, void *value)
{
int i;
for(i = 0; i < 4; i++)
for (i = 0; i < 4; i++)
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
}
......@@ -59,7 +59,7 @@ do_crypt(void *src, void *dst, int len, u32 flags)
do {
status = ioread32(_iobase + AES_INTR_REG);
cpu_relax();
} while(!(status & AES_INTRA_PENDING) && --counter);
} while (!(status & AES_INTRA_PENDING) && --counter);
/* Clear the event */
iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
......@@ -317,7 +317,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
while((nbytes = walk.nbytes)) {
while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
......@@ -349,7 +349,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
while((nbytes = walk.nbytes)) {
while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
......@@ -429,7 +429,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while((nbytes = walk.nbytes)) {
while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
......@@ -459,7 +459,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while((nbytes = walk.nbytes)) {
while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
......@@ -518,11 +518,12 @@ static int __devinit
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
if ((ret = pci_enable_device(dev)))
ret = pci_enable_device(dev);
if (ret)
return ret;
if ((ret = pci_request_regions(dev, "geode-aes")))
ret = pci_request_regions(dev, "geode-aes");
if (ret)
goto eenable;
_iobase = pci_iomap(dev, 0, 0);
......@@ -537,13 +538,16 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Clear any pending activity */
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
if ((ret = crypto_register_alg(&geode_alg)))
ret = crypto_register_alg(&geode_alg);
if (ret)
goto eiomap;
if ((ret = crypto_register_alg(&geode_ecb_alg)))
ret = crypto_register_alg(&geode_ecb_alg);
if (ret)
goto ealg;
if ((ret = crypto_register_alg(&geode_cbc_alg)))
ret = crypto_register_alg(&geode_cbc_alg);
if (ret)
goto eecb;
printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
......
......@@ -638,7 +638,7 @@ struct hifn_crypto_alg
#define ASYNC_FLAGS_MISALIGNED (1<<0)
struct ablkcipher_walk
struct hifn_cipher_walk
{
struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
u32 flags;
......@@ -657,7 +657,7 @@ struct hifn_request_context
u8 *iv;
unsigned int ivsize;
u8 op, type, mode, unused;
struct ablkcipher_walk walk;
struct hifn_cipher_walk walk;
};
#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
......@@ -1417,7 +1417,7 @@ static int hifn_setup_dma(struct hifn_device *dev,
return 0;
}
static int ablkcipher_walk_init(struct ablkcipher_walk *w,
static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
int num, gfp_t gfp_flags)
{
int i;
......@@ -1442,7 +1442,7 @@ static int ablkcipher_walk_init(struct ablkcipher_walk *w,
return i;
}
static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
{
int i;
......@@ -1486,8 +1486,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return idx;
}
static int ablkcipher_walk(struct ablkcipher_request *req,
struct ablkcipher_walk *w)
static int hifn_cipher_walk(struct ablkcipher_request *req,
struct hifn_cipher_walk *w)
{
struct scatterlist *dst, *t;
unsigned int nbytes = req->nbytes, offset, copy, diff;
......@@ -1600,12 +1600,12 @@ static int hifn_setup_session(struct ablkcipher_request *req)
}
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
if (err < 0)
return err;
}
sg_num = ablkcipher_walk(req, &rctx->walk);
sg_num = hifn_cipher_walk(req, &rctx->walk);
if (sg_num < 0) {
err = sg_num;
goto err_out_exit;
......@@ -1806,7 +1806,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
kunmap_atomic(saddr, KM_SOFTIRQ0);
}
ablkcipher_walk_exit(&rctx->walk);
hifn_cipher_walk_exit(&rctx->walk);
}
req->base.complete(&req->base, error);
......
This diff is collapsed.
#ifndef __MV_CRYPTO_H__
#define DIGEST_INITIAL_VAL_A 0xdd00
#define DIGEST_INITIAL_VAL_B 0xdd04
#define DIGEST_INITIAL_VAL_C 0xdd08
#define DIGEST_INITIAL_VAL_D 0xdd0c
#define DIGEST_INITIAL_VAL_E 0xdd10
#define DES_CMD_REG 0xdd58
#define SEC_ACCEL_CMD 0xde00
......@@ -70,6 +74,10 @@ struct sec_accel_config {
#define CFG_AES_LEN_128 (0 << 24)
#define CFG_AES_LEN_192 (1 << 24)
#define CFG_AES_LEN_256 (2 << 24)
#define CFG_NOT_FRAG (0 << 30)
#define CFG_FIRST_FRAG (1 << 30)
#define CFG_LAST_FRAG (2 << 30)
#define CFG_MID_FRAG (3 << 30)
u32 enc_p;
#define ENC_P_SRC(x) (x)
......@@ -90,7 +98,11 @@ struct sec_accel_config {
#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
u32 mac_digest;
#define MAC_DIGEST_P(x) (x)
#define MAC_FRAG_LEN(x) ((x) << 16)
u32 mac_iv;
#define MAC_INNER_IV_P(x) (x)
#define MAC_OUTER_IV_P(x) ((x) << 16)
}__attribute__ ((packed));
/*
* /-----------\ 0
......@@ -101,19 +113,37 @@ struct sec_accel_config {
* | IV IN | 4 * 4
* |-----------| 0x40 (inplace)
* | IV BUF | 4 * 4
* |-----------| 0x50
* |-----------| 0x80
* | DATA IN | 16 * x (max ->max_req_size)
* |-----------| 0x50 (inplace operation)
* |-----------| 0x80 (inplace operation)
* | DATA OUT | 16 * x (max ->max_req_size)
* \-----------/ SRAM size
*/
/* Hashing memory map:
* /-----------\ 0
* | ACCEL CFG | 4 * 8
* |-----------| 0x20
* | Inner IV | 5 * 4
* |-----------| 0x34
* | Outer IV | 5 * 4
* |-----------| 0x48
* | Output BUF| 5 * 4
* |-----------| 0x80
* | DATA IN | 64 * x (max ->max_req_size)
* \-----------/ SRAM size
*/
#define SRAM_CONFIG 0x00
#define SRAM_DATA_KEY_P 0x20
#define SRAM_DATA_IV 0x40
#define SRAM_DATA_IV_BUF 0x40
#define SRAM_DATA_IN_START 0x50
#define SRAM_DATA_OUT_START 0x50
#define SRAM_DATA_IN_START 0x80
#define SRAM_DATA_OUT_START 0x80
#define SRAM_HMAC_IV_IN 0x20
#define SRAM_HMAC_IV_OUT 0x34
#define SRAM_DIGEST_BUF 0x48
#define SRAM_CFG_SPACE 0x50
#define SRAM_CFG_SPACE 0x80
#endif
/* n2_asm.S: Hypervisor calls for NCS support.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/hypervisor.h>
#include "n2_core.h"
/* o0: queue type
* o1: RA of queue
* o2: num entries in queue
* o3: address of queue handle return
*/
ENTRY(sun4v_ncs_qconf)
mov HV_FAST_NCS_QCONF, %o5
ta HV_FAST_TRAP
stx %o1, [%o3]
retl
nop
ENDPROC(sun4v_ncs_qconf)
/* %o0: queue handle
* %o1: address of queue type return
* %o2: address of queue base address return
* %o3: address of queue num entries return
*/
ENTRY(sun4v_ncs_qinfo)
mov %o1, %g1
mov %o2, %g2
mov %o3, %g3
mov HV_FAST_NCS_QINFO, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
stx %o3, [%g3]
retl
nop
ENDPROC(sun4v_ncs_qinfo)
/* %o0: queue handle
* %o1: address of head offset return
*/
ENTRY(sun4v_ncs_gethead)
mov %o1, %o2
mov HV_FAST_NCS_GETHEAD, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_gethead)
/* %o0: queue handle
* %o1: address of tail offset return
*/
ENTRY(sun4v_ncs_gettail)
mov %o1, %o2
mov HV_FAST_NCS_GETTAIL, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_gettail)
/* %o0: queue handle
* %o1: new tail offset
*/
ENTRY(sun4v_ncs_settail)
mov HV_FAST_NCS_SETTAIL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_settail)
/* %o0: queue handle
* %o1: address of devino return
*/
ENTRY(sun4v_ncs_qhandle_to_devino)
mov %o1, %o2
mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_qhandle_to_devino)
/* %o0: queue handle
* %o1: new head offset
*/
ENTRY(sun4v_ncs_sethead_marker)
mov HV_FAST_NCS_SETHEAD_MARKER, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_sethead_marker)
This diff is collapsed.
#ifndef _N2_CORE_H
#define _N2_CORE_H
#ifndef __ASSEMBLY__
struct ino_blob {
u64 intr;
u64 ino;
};
struct spu_mdesc_info {
u64 cfg_handle;
struct ino_blob *ino_table;
int num_intrs;
};
struct n2_crypto {
struct spu_mdesc_info cwq_info;
struct list_head cwq_list;
};
struct n2_mau {
struct spu_mdesc_info mau_info;
struct list_head mau_list;
};
#define CWQ_ENTRY_SIZE 64
#define CWQ_NUM_ENTRIES 64
#define MAU_ENTRY_SIZE 64
#define MAU_NUM_ENTRIES 64
struct cwq_initial_entry {
u64 control;
u64 src_addr;
u64 auth_key_addr;
u64 auth_iv_addr;
u64 final_auth_state_addr;
u64 enc_key_addr;
u64 enc_iv_addr;
u64 dest_addr;
};
struct cwq_ext_entry {
u64 len;
u64 src_addr;
u64 resv1;
u64 resv2;
u64 resv3;
u64 resv4;
u64 resv5;
u64 resv6;
};
struct cwq_final_entry {
u64 control;
u64 src_addr;
u64 resv1;
u64 resv2;
u64 resv3;
u64 resv4;
u64 resv5;
u64 resv6;
};
#define CONTROL_LEN 0x000000000000ffffULL
#define CONTROL_LEN_SHIFT 0
#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
#define CONTROL_HMAC_KEY_LEN_SHIFT 16
#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
#define CONTROL_ENC_TYPE_SHIFT 24
#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
#define ENC_TYPE_ALG_DES 0x08ULL
#define ENC_TYPE_ALG_3DES 0x0cULL
#define ENC_TYPE_ALG_AES128 0x10ULL
#define ENC_TYPE_ALG_AES192 0x14ULL
#define ENC_TYPE_ALG_AES256 0x18ULL
#define ENC_TYPE_ALG_RESERVED 0x1cULL
#define ENC_TYPE_ALG_MASK 0x1cULL
#define ENC_TYPE_CHAINING_ECB 0x00ULL
#define ENC_TYPE_CHAINING_CBC 0x01ULL
#define ENC_TYPE_CHAINING_CFB 0x02ULL
#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
#define ENC_TYPE_CHAINING_MASK 0x03ULL
#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
#define CONTROL_AUTH_TYPE_SHIFT 32
#define AUTH_TYPE_RESERVED 0x00ULL
#define AUTH_TYPE_MD5 0x01ULL
#define AUTH_TYPE_SHA1 0x02ULL
#define AUTH_TYPE_SHA256 0x03ULL
#define AUTH_TYPE_CRC32 0x04ULL
#define AUTH_TYPE_HMAC_MD5 0x05ULL
#define AUTH_TYPE_HMAC_SHA1 0x06ULL
#define AUTH_TYPE_HMAC_SHA256 0x07ULL
#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
#define CONTROL_STRAND 0x000000e000000000ULL
#define CONTROL_STRAND_SHIFT 37
#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
#define CONTROL_HASH_LEN_SHIFT 40
#define CONTROL_INTERRUPT 0x0001000000000000ULL
#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
#define CONTROL_RESERVED 0x001c000000000000ULL
#define CONTROL_HV_DONE 0x0004000000000000ULL
#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
#define CONTROL_ENCRYPT 0x0080000000000000ULL
#define CONTROL_OPCODE 0xff00000000000000ULL
#define CONTROL_OPCODE_SHIFT 56
#define OPCODE_INPLACE_BIT 0x80ULL
#define OPCODE_SSL_KEYBLOCK 0x10ULL
#define OPCODE_COPY 0x20ULL
#define OPCODE_ENCRYPT 0x40ULL
#define OPCODE_AUTH_MAC 0x41ULL
#endif /* !(__ASSEMBLY__) */
/* NCS v2.0 hypervisor interfaces */
#define HV_NCS_QTYPE_MAU 0x01
#define HV_NCS_QTYPE_CWQ 0x02
/* ncs_qconf()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_QCONF
* ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
* ARG1: Real address of queue, or handle for unconfigure
* ARG2: Number of entries in queue, zero for unconfigure
* RET0: status
* RET1: queue handle
*
* Configure a queue in the stream processing unit.
*
* The real address given as the base must be 64-byte
* aligned.
*
* The queue size can range from a minimum of 2 to a maximum
* of 64. The queue size must be a power of two.
*
* To unconfigure a queue, specify a length of zero and place
* the queue handle into ARG1.
*
* On configure success the hypervisor will set the FIRST, HEAD,
* and TAIL registers to the address of the first entry in the
* queue. The LAST register will be set to point to the last
* entry in the queue.
*/
#define HV_FAST_NCS_QCONF 0x111
/* ncs_qinfo()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_QINFO
* ARG0: Queue handle
* RET0: status
* RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
* RET2: Queue base address
* RET3: Number of entries
*/
#define HV_FAST_NCS_QINFO 0x112
/* ncs_gethead()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_GETHEAD
* ARG0: Queue handle
* RET0: status
* RET1: queue head offset
*/
#define HV_FAST_NCS_GETHEAD 0x113
/* ncs_gettail()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_GETTAIL
* ARG0: Queue handle
* RET0: status
* RET1: queue tail offset
*/
#define HV_FAST_NCS_GETTAIL 0x114
/* ncs_settail()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_SETTAIL
* ARG0: Queue handle
* ARG1: New tail offset
* RET0: status
*/
#define HV_FAST_NCS_SETTAIL 0x115
/* ncs_qhandle_to_devino()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
* ARG0: Queue handle
* RET0: status
* RET1: devino
*/
#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
/* ncs_sethead_marker()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
* ARG0: Queue handle
* ARG1: New head offset
* RET0: status
*/
#define HV_FAST_NCS_SETHEAD_MARKER 0x117
#ifndef __ASSEMBLY__
extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
unsigned long queue_ra,
unsigned long num_entries,
unsigned long *qhandle);
extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
unsigned long *queue_type,
unsigned long *queue_ra,
unsigned long *num_entries);
extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
unsigned long *head);
extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
unsigned long *tail);
extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
unsigned long tail);
extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
unsigned long *devino);
extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
unsigned long head);
#endif /* !(__ASSEMBLY__) */
#endif /* _N2_CORE_H */
This diff is collapsed.
This diff is collapsed.
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
* Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
* Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -130,6 +130,9 @@
#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
#define TALITOS_CRCUISR_LO 0xf034
#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28
#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48
/*
* talitos descriptor header (hdr) bits
*/
......@@ -157,12 +160,16 @@
#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000)
#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000)
#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000)
#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000)
#define DESC_HDR_MODE0_MDEU_SHA224 cpu_to_be32(0x00300000)
#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000)
#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000)
#define DESC_HDR_MODE0_MDEUB_SHA384 cpu_to_be32(0x00000000)
#define DESC_HDR_MODE0_MDEUB_SHA512 cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
DESC_HDR_MODE0_MDEU_HMAC)
#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
......@@ -181,9 +188,12 @@
#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000)
#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800)
#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400)
#define DESC_HDR_MODE1_MDEU_SHA224 cpu_to_be32(0x00000300)
#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100)
#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000)
#define DESC_HDR_MODE1_MDEUB_SHA384 cpu_to_be32(0x00000000)
#define DESC_HDR_MODE1_MDEUB_SHA512 cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
DESC_HDR_MODE1_MDEU_HMAC)
#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
......
......@@ -103,6 +103,23 @@ struct blkcipher_walk {
unsigned int blocksize;
};
struct ablkcipher_walk {
struct {
struct page *page;
unsigned int offset;
} src, dst;
struct scatter_walk in;
unsigned int nbytes;
struct scatter_walk out;
unsigned int total;
struct list_head buffers;
u8 *iv_buffer;
u8 *iv;
int flags;
unsigned int blocksize;
};
extern const struct crypto_type crypto_ablkcipher_type;
extern const struct crypto_type crypto_aead_type;
extern const struct crypto_type crypto_blkcipher_type;
......@@ -173,6 +190,12 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
unsigned int blocksize);
int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err);
int ablkcipher_walk_phys(struct ablkcipher_request *req,
struct ablkcipher_walk *walk);
void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
{
return PTR_ALIGN(crypto_tfm_ctx(tfm),
......@@ -283,6 +306,23 @@ static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
walk->total = nbytes;
}
static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
walk->in.sg = src;
walk->out.sg = dst;
walk->total = nbytes;
INIT_LIST_HEAD(&walk->buffers);
}
static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
{
if (unlikely(!list_empty(&walk->buffers)))
__ablkcipher_walk_complete(walk);
}
static inline struct crypto_async_request *crypto_get_backlog(
struct crypto_queue *queue)
{
......
......@@ -24,7 +24,19 @@
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/timer.h>
/**
* struct padata_priv - Embedded to the users data structure.
*
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
* @cb_cpu: Callback cpu for serializatioon.
* @seq_nr: Sequence number of the parallelized data object.
* @info: Used to pass information from the parallel to the serial function.
* @parallel: Parallel execution function.
* @serial: Serial complete function.
*/
struct padata_priv {
struct list_head list;
struct parallel_data *pd;
......@@ -35,11 +47,29 @@ struct padata_priv {
void (*serial)(struct padata_priv *padata);
};
/**
* struct padata_list
*
* @list: List head.
* @lock: List lock.
*/
struct padata_list {
struct list_head list;
spinlock_t lock;
};
/**
* struct padata_queue - The percpu padata queues.
*
* @parallel: List to wait for parallelization.
* @reorder: List to wait for reordering after parallel processing.
* @serial: List to wait for serialization after reordering.
* @pwork: work struct for parallelization.
* @swork: work struct for serialization.
* @pd: Backpointer to the internal control structure.
* @num_obj: Number of objects that are processed by this cpu.
* @cpu_index: Index of the cpu.
*/
struct padata_queue {
struct padata_list parallel;
struct padata_list reorder;
......@@ -51,6 +81,20 @@ struct padata_queue {
int cpu_index;
};
/**
* struct parallel_data - Internal control structure, covers everything
* that depends on the cpumask in use.
*
* @pinst: padata instance.
* @queue: percpu padata queues.
* @seq_nr: The sequence number that will be attached to the next object.
* @reorder_objects: Number of objects waiting in the reorder queues.
* @refcnt: Number of objects holding a reference on this parallel_data.
* @max_seq_nr: Maximal used sequence number.
* @cpumask: cpumask in use.
* @lock: Reorder lock.
* @timer: Reorder timer.
*/
struct parallel_data {
struct padata_instance *pinst;
struct padata_queue *queue;
......@@ -60,8 +104,19 @@ struct parallel_data {
unsigned int max_seq_nr;
cpumask_var_t cpumask;
spinlock_t lock;
struct timer_list timer;
};
/**
* struct padata_instance - The overall control structure.
*
* @cpu_notifier: cpu hotplug notifier.
* @wq: The workqueue in use.
* @pd: The internal control structure.
* @cpumask: User supplied cpumask.
* @lock: padata instance lock.
* @flags: padata flags.
*/
struct padata_instance {
struct notifier_block cpu_notifier;
struct workqueue_struct *wq;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment