Commit 44a6b844 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:

 - Fixed algorithm construction hang when self-test fails.
 - Added SHA variants to talitos AEAD list.
 - New driver for Exynos random number generator.
 - Performance enhancements for arc4.
 - Added hwrng support to caam.
 - Added ahash support to caam.
 - Fixed bad kfree in aesni-intel.
 - Allow aesni-intel in FIPS mode.
 - Added atmel driver with support for AES/3DES/SHA.
 - Bug fixes for mv_cesa.
 - CRC hardware driver for BF60x family processors.

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (66 commits)
  crypto: twofish-avx - remove useless instruction
  crypto: testmgr - add aead cbc aes hmac sha1,256,512 test vectors
  crypto: talitos - add sha224, sha384 and sha512 to existing AEAD algorithms
  crypto: talitos - export the talitos_submit function
  crypto: talitos - move talitos structures to header file
  crypto: atmel - add new tests to tcrypt
  crypto: atmel - add Atmel SHA1/SHA256 driver
  crypto: atmel - add Atmel DES/TDES driver
  crypto: atmel - add Atmel AES driver
  ARM: AT91SAM9G45: add crypto peripherals
  crypto: testmgr - allow aesni-intel and ghash_clmulni-intel in fips mode
  hwrng: exynos - Add support for Exynos random number generator
  crypto: aesni-intel - fix wrong kfree pointer
  crypto: caam - ERA retrieval and printing for SEC device
  crypto: caam - Using alloc_coherent for caam job rings
  crypto: algapi - Fix hang on crypto allocation
  crypto: arc4 - now arc needs blockcipher support
  crypto: caam - one tasklet per job ring
  crypto: caam - consolidate memory barriers from job ring en/dequeue
  crypto: caam - only query h/w in job ring dequeue path
  ...
parents 945c40c6 a4347886
...@@ -183,6 +183,13 @@ static struct clk adc_op_clk = { ...@@ -183,6 +183,13 @@ static struct clk adc_op_clk = {
.rate_hz = 13200000, .rate_hz = 13200000,
}; };
/* AES/TDES/SHA clock - Only for sam9m11/sam9g56 */
static struct clk aestdessha_clk = {
.name = "aestdessha_clk",
.pmc_mask = 1 << AT91SAM9G45_ID_AESTDESSHA,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk *periph_clocks[] __initdata = { static struct clk *periph_clocks[] __initdata = {
&pioA_clk, &pioA_clk,
&pioB_clk, &pioB_clk,
...@@ -212,6 +219,7 @@ static struct clk *periph_clocks[] __initdata = { ...@@ -212,6 +219,7 @@ static struct clk *periph_clocks[] __initdata = {
&udphs_clk, &udphs_clk,
&mmc1_clk, &mmc1_clk,
&adc_op_clk, &adc_op_clk,
&aestdessha_clk,
// irq0 // irq0
}; };
...@@ -232,6 +240,9 @@ static struct clk_lookup periph_clocks_lookups[] = { ...@@ -232,6 +240,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID(NULL, "atmel-trng", &trng_clk), CLKDEV_CON_DEV_ID(NULL, "atmel-trng", &trng_clk),
CLKDEV_CON_DEV_ID(NULL, "atmel_sha", &aestdessha_clk),
CLKDEV_CON_DEV_ID(NULL, "atmel_tdes", &aestdessha_clk),
CLKDEV_CON_DEV_ID(NULL, "atmel_aes", &aestdessha_clk),
/* more usart lookup table for DT entries */ /* more usart lookup table for DT entries */
CLKDEV_CON_DEV_ID("usart", "ffffee00.serial", &mck), CLKDEV_CON_DEV_ID("usart", "ffffee00.serial", &mck),
CLKDEV_CON_DEV_ID("usart", "fff8c000.serial", &usart0_clk), CLKDEV_CON_DEV_ID("usart", "fff8c000.serial", &usart0_clk),
...@@ -388,7 +399,7 @@ static unsigned int at91sam9g45_default_irq_priority[NR_AIC_IRQS] __initdata = { ...@@ -388,7 +399,7 @@ static unsigned int at91sam9g45_default_irq_priority[NR_AIC_IRQS] __initdata = {
3, /* Ethernet */ 3, /* Ethernet */
0, /* Image Sensor Interface */ 0, /* Image Sensor Interface */
2, /* USB Device High speed port */ 2, /* USB Device High speed port */
0, 0, /* AESTDESSHA Crypto HW Accelerators */
0, /* Multimedia Card Interface 1 */ 0, /* Multimedia Card Interface 1 */
0, 0,
0, /* Advanced Interrupt Controller (IRQ0) */ 0, /* Advanced Interrupt Controller (IRQ0) */
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/i2c-gpio.h> #include <linux/i2c-gpio.h>
#include <linux/atmel-mci.h> #include <linux/atmel-mci.h>
#include <linux/platform_data/atmel-aes.h>
#include <linux/platform_data/at91_adc.h> #include <linux/platform_data/at91_adc.h>
...@@ -1830,6 +1831,130 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} ...@@ -1830,6 +1831,130 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
void __init at91_add_device_serial(void) {} void __init at91_add_device_serial(void) {}
#endif #endif
/* --------------------------------------------------------------------
* SHA1/SHA256
* -------------------------------------------------------------------- */
#if defined(CONFIG_CRYPTO_DEV_ATMEL_SHA) || defined(CONFIG_CRYPTO_DEV_ATMEL_SHA_MODULE)
static struct resource sha_resources[] = {
{
.start = AT91SAM9G45_BASE_SHA,
.end = AT91SAM9G45_BASE_SHA + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9G45_ID_AESTDESSHA,
.end = AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9g45_sha_device = {
.name = "atmel_sha",
.id = -1,
.resource = sha_resources,
.num_resources = ARRAY_SIZE(sha_resources),
};
static void __init at91_add_device_sha(void)
{
platform_device_register(&at91sam9g45_sha_device);
}
#else
static void __init at91_add_device_sha(void) {}
#endif
/* --------------------------------------------------------------------
* DES/TDES
* -------------------------------------------------------------------- */
#if defined(CONFIG_CRYPTO_DEV_ATMEL_TDES) || defined(CONFIG_CRYPTO_DEV_ATMEL_TDES_MODULE)
static struct resource tdes_resources[] = {
[0] = {
.start = AT91SAM9G45_BASE_TDES,
.end = AT91SAM9G45_BASE_TDES + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9G45_ID_AESTDESSHA,
.end = AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9g45_tdes_device = {
.name = "atmel_tdes",
.id = -1,
.resource = tdes_resources,
.num_resources = ARRAY_SIZE(tdes_resources),
};
static void __init at91_add_device_tdes(void)
{
platform_device_register(&at91sam9g45_tdes_device);
}
#else
static void __init at91_add_device_tdes(void) {}
#endif
/* --------------------------------------------------------------------
* AES
* -------------------------------------------------------------------- */
#if defined(CONFIG_CRYPTO_DEV_ATMEL_AES) || defined(CONFIG_CRYPTO_DEV_ATMEL_AES_MODULE)
static struct aes_platform_data aes_data;
static u64 aes_dmamask = DMA_BIT_MASK(32);
static struct resource aes_resources[] = {
[0] = {
.start = AT91SAM9G45_BASE_AES,
.end = AT91SAM9G45_BASE_AES + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9G45_ID_AESTDESSHA,
.end = AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9g45_aes_device = {
.name = "atmel_aes",
.id = -1,
.dev = {
.dma_mask = &aes_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &aes_data,
},
.resource = aes_resources,
.num_resources = ARRAY_SIZE(aes_resources),
};
static void __init at91_add_device_aes(void)
{
struct at_dma_slave *atslave;
struct aes_dma_data *alt_atslave;
alt_atslave = kzalloc(sizeof(struct aes_dma_data), GFP_KERNEL);
/* DMA TX slave channel configuration */
atslave = &alt_atslave->txdata;
atslave->dma_dev = &at_hdmac_device.dev;
atslave->cfg = ATC_FIFOCFG_ENOUGHSPACE | ATC_SRC_H2SEL_HW |
ATC_SRC_PER(AT_DMA_ID_AES_RX);
/* DMA RX slave channel configuration */
atslave = &alt_atslave->rxdata;
atslave->dma_dev = &at_hdmac_device.dev;
atslave->cfg = ATC_FIFOCFG_ENOUGHSPACE | ATC_DST_H2SEL_HW |
ATC_DST_PER(AT_DMA_ID_AES_TX);
aes_data.dma_slave = alt_atslave;
platform_device_register(&at91sam9g45_aes_device);
}
#else
static void __init at91_add_device_aes(void) {}
#endif
/* -------------------------------------------------------------------- */ /* -------------------------------------------------------------------- */
/* /*
...@@ -1847,6 +1972,9 @@ static int __init at91_add_standard_devices(void) ...@@ -1847,6 +1972,9 @@ static int __init at91_add_standard_devices(void)
at91_add_device_trng(); at91_add_device_trng();
at91_add_device_watchdog(); at91_add_device_watchdog();
at91_add_device_tc(); at91_add_device_tc();
at91_add_device_sha();
at91_add_device_tdes();
at91_add_device_aes();
return 0; return 0;
} }
......
...@@ -136,6 +136,8 @@ ...@@ -136,6 +136,8 @@
#define AT_DMA_ID_SSC1_RX 8 #define AT_DMA_ID_SSC1_RX 8
#define AT_DMA_ID_AC97_TX 9 #define AT_DMA_ID_AC97_TX 9
#define AT_DMA_ID_AC97_RX 10 #define AT_DMA_ID_AC97_RX 10
#define AT_DMA_ID_AES_TX 11
#define AT_DMA_ID_AES_RX 12
#define AT_DMA_ID_MCI1 13 #define AT_DMA_ID_MCI1 13
#endif #endif
...@@ -149,7 +149,6 @@ core-$(CONFIG_KVM) += arch/powerpc/kvm/ ...@@ -149,7 +149,6 @@ core-$(CONFIG_KVM) += arch/powerpc/kvm/
core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/ core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
drivers-$(CONFIG_CRYPTO_DEV_NX) += drivers/crypto/nx/
# Default to zImage, override when needed # Default to zImage, override when needed
all: zImage all: zImage
......
/*
* Cryptographic API.
*
* Function for checking keys for the DES and Tripple DES Encryption
* algorithms.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#ifndef __CRYPTO_DES_H__
#define __CRYPTO_DES_H__
extern int crypto_des_check_key(const u8*, unsigned int, u32*);
#endif /*__CRYPTO_DES_H__*/
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
# Arch-specific CryptoAPI modules. # Arch-specific CryptoAPI modules.
# #
obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o
obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
...@@ -12,8 +15,10 @@ obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o ...@@ -12,8 +15,10 @@ obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o
obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
...@@ -30,16 +35,11 @@ camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o ...@@ -30,16 +35,11 @@ camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
twofish-avx-x86_64-y := twofish-avx-x86_64-asm_64.o twofish_avx_glue.o
salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
# enable AVX support only when $(AS) can actually assemble the instructions
ifeq ($(call as-instr,vpxor %xmm0$(comma)%xmm1$(comma)%xmm2,yes,no),yes)
AFLAGS_sha1_ssse3_asm.o += -DSHA1_ENABLE_AVX_SUPPORT
CFLAGS_sha1_ssse3_glue.o += -DSHA1_ENABLE_AVX_SUPPORT
endif
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
/*
* Shared async block cipher helpers
*
* Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* Based on aesni-intel_glue.c by:
* Copyright (C) 2008, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <crypto/algapi.h>
#include <crypto/cryptd.h>
#include <asm/i387.h>
#include <asm/crypto/ablk_helper.h>
int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
int err;
crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
& CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(child, key, key_len);
crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
& CRYPTO_TFM_RES_MASK);
return err;
}
EXPORT_SYMBOL_GPL(ablk_set_key);
int __ablk_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct blkcipher_desc desc;
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
desc.info = req->info;
desc.flags = 0;
return crypto_blkcipher_crt(desc.tfm)->encrypt(
&desc, req->dst, req->src, req->nbytes);
}
EXPORT_SYMBOL_GPL(__ablk_encrypt);
int ablk_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_encrypt(cryptd_req);
} else {
return __ablk_encrypt(req);
}
}
EXPORT_SYMBOL_GPL(ablk_encrypt);
int ablk_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_decrypt(cryptd_req);
} else {
struct blkcipher_desc desc;
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
desc.info = req->info;
desc.flags = 0;
return crypto_blkcipher_crt(desc.tfm)->decrypt(
&desc, req->dst, req->src, req->nbytes);
}
}
EXPORT_SYMBOL_GPL(ablk_decrypt);
void ablk_exit(struct crypto_tfm *tfm)
{
struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
cryptd_free_ablkcipher(ctx->cryptd_tfm);
}
EXPORT_SYMBOL_GPL(ablk_exit);
int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
{
struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ctx->cryptd_tfm = cryptd_tfm;
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
crypto_ablkcipher_reqsize(&cryptd_tfm->base);
return 0;
}
EXPORT_SYMBOL_GPL(ablk_init_common);
int ablk_init(struct crypto_tfm *tfm)
{
char drv_name[CRYPTO_MAX_ALG_NAME];
snprintf(drv_name, sizeof(drv_name), "__driver-%s",
crypto_tfm_alg_driver_name(tfm));
return ablk_init_common(tfm, drv_name);
}
EXPORT_SYMBOL_GPL(ablk_init);
MODULE_LICENSE("GPL");
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <asm/aes.h> #include <asm/crypto/aes.h>
asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
......
...@@ -30,7 +30,8 @@ ...@@ -30,7 +30,8 @@
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/aes.h> #include <asm/crypto/aes.h>
#include <asm/crypto/ablk_helper.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h> #include <crypto/internal/aead.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
...@@ -52,10 +53,6 @@ ...@@ -52,10 +53,6 @@
#define HAS_XTS #define HAS_XTS
#endif #endif
struct async_aes_ctx {
struct cryptd_ablkcipher *cryptd_tfm;
};
/* This data is stored at the end of the crypto_tfm struct. /* This data is stored at the end of the crypto_tfm struct.
* It's a type of per "session" data storage location. * It's a type of per "session" data storage location.
* This needs to be 16 byte aligned. * This needs to be 16 byte aligned.
...@@ -377,87 +374,6 @@ static int ctr_crypt(struct blkcipher_desc *desc, ...@@ -377,87 +374,6 @@ static int ctr_crypt(struct blkcipher_desc *desc,
} }
#endif #endif
static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
int err;
crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
& CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(child, key, key_len);
crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
& CRYPTO_TFM_RES_MASK);
return err;
}
static int ablk_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_encrypt(cryptd_req);
} else {
struct blkcipher_desc desc;
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
desc.info = req->info;
desc.flags = 0;
return crypto_blkcipher_crt(desc.tfm)->encrypt(
&desc, req->dst, req->src, req->nbytes);
}
}
static int ablk_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_decrypt(cryptd_req);
} else {
struct blkcipher_desc desc;
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
desc.info = req->info;
desc.flags = 0;
return crypto_blkcipher_crt(desc.tfm)->decrypt(
&desc, req->dst, req->src, req->nbytes);
}
}
static void ablk_exit(struct crypto_tfm *tfm)
{
struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
cryptd_free_ablkcipher(ctx->cryptd_tfm);
}
static int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
{
struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ctx->cryptd_tfm = cryptd_tfm;
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
crypto_ablkcipher_reqsize(&cryptd_tfm->base);
return 0;
}
static int ablk_ecb_init(struct crypto_tfm *tfm) static int ablk_ecb_init(struct crypto_tfm *tfm)
{ {
return ablk_init_common(tfm, "__driver-ecb-aes-aesni"); return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
...@@ -613,7 +529,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, ...@@ -613,7 +529,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
struct aesni_rfc4106_gcm_ctx *child_ctx = struct aesni_rfc4106_gcm_ctx *child_ctx =
aesni_rfc4106_gcm_ctx_get(cryptd_child); aesni_rfc4106_gcm_ctx_get(cryptd_child);
u8 *new_key_mem = NULL; u8 *new_key_align, *new_key_mem = NULL;
if (key_len < 4) { if (key_len < 4) {
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
...@@ -637,9 +553,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, ...@@ -637,9 +553,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
if (!new_key_mem) if (!new_key_mem)
return -ENOMEM; return -ENOMEM;
new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN); new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
memcpy(new_key_mem, key, key_len); memcpy(new_key_align, key, key_len);
key = new_key_mem; key = new_key_align;
} }
if (!irq_fpu_usable()) if (!irq_fpu_usable())
...@@ -968,7 +884,7 @@ static struct crypto_alg aesni_algs[] = { { ...@@ -968,7 +884,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_priority = 400, .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx), .cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0, .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -989,7 +905,7 @@ static struct crypto_alg aesni_algs[] = { { ...@@ -989,7 +905,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_priority = 400, .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx), .cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0, .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -1033,7 +949,7 @@ static struct crypto_alg aesni_algs[] = { { ...@@ -1033,7 +949,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_priority = 400, .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = 1, .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_aes_ctx), .cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0, .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -1098,7 +1014,7 @@ static struct crypto_alg aesni_algs[] = { { ...@@ -1098,7 +1014,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_priority = 400, .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = 1, .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_aes_ctx), .cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0, .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -1126,7 +1042,7 @@ static struct crypto_alg aesni_algs[] = { { ...@@ -1126,7 +1042,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_priority = 400, .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx), .cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0, .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -1150,7 +1066,7 @@ static struct crypto_alg aesni_algs[] = { { ...@@ -1150,7 +1066,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_priority = 400, .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx), .cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0, .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -1174,7 +1090,7 @@ static struct crypto_alg aesni_algs[] = { { ...@@ -1174,7 +1090,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_priority = 400, .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx), .cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0, .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
......
This diff is collapsed.
/*
* Shared glue code for 128bit block ciphers
*
* Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
* CTR part based on code (crypto/ctr.c) by:
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/module.h>
#include <crypto/b128ops.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/crypto/glue_helper.h>
#include <crypto/scatterwalk.h>
static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
unsigned int nbytes, i, func_bytes;
bool fpu_enabled = false;
int err;
err = blkcipher_walk_virt(desc, walk);
while ((nbytes = walk->nbytes)) {
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
desc, fpu_enabled, nbytes);
for (i = 0; i < gctx->num_funcs; i++) {
func_bytes = bsize * gctx->funcs[i].num_blocks;
/* Process multi-block batch */
if (nbytes >= func_bytes) {
do {
gctx->funcs[i].fn_u.ecb(ctx, wdst,
wsrc);
wsrc += func_bytes;
wdst += func_bytes;
nbytes -= func_bytes;
} while (nbytes >= func_bytes);
if (nbytes < bsize)
goto done;
}
}
done:
err = blkcipher_walk_done(desc, walk, nbytes);
}
glue_fpu_end(fpu_enabled);
return err;
}
int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return __glue_ecb_crypt_128bit(gctx, desc, &walk);
}
EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
u128 *iv = (u128 *)walk->iv;
do {
u128_xor(dst, src, iv);
fn(ctx, (u8 *)dst, (u8 *)dst);
iv = dst;
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
return nbytes;
}
int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
static unsigned int
__glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
u128 last_iv;
unsigned int num_blocks, func_bytes;
unsigned int i;
/* Start of the last block. */
src += nbytes / bsize - 1;
dst += nbytes / bsize - 1;
last_iv = *src;
for (i = 0; i < gctx->num_funcs; i++) {
num_blocks = gctx->funcs[i].num_blocks;
func_bytes = bsize * num_blocks;
/* Process multi-block batch */
if (nbytes >= func_bytes) {
do {
nbytes -= func_bytes - bsize;
src -= num_blocks - 1;
dst -= num_blocks - 1;
gctx->funcs[i].fn_u.cbc(ctx, dst, src);
nbytes -= bsize;
if (nbytes < bsize)
goto done;
u128_xor(dst, dst, src - 1);
src -= 1;
dst -= 1;
} while (nbytes >= func_bytes);
if (nbytes < bsize)
goto done;
}
}
done:
u128_xor(dst, dst, (u128 *)walk->iv);
*(u128 *)walk->iv = last_iv;
return nbytes;
}
int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
desc, fpu_enabled, nbytes);
nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
glue_fpu_end(fpu_enabled);
return err;
}
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
void *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *src = (u8 *)walk->src.virt.addr;
u8 *dst = (u8 *)walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
u128 ctrblk;
u128 tmp;
be128_to_u128(&ctrblk, (be128 *)walk->iv);
memcpy(&tmp, src, nbytes);
fn_ctr(ctx, &tmp, &tmp, &ctrblk);
memcpy(dst, &tmp, nbytes);
u128_to_be128((be128 *)walk->iv, &ctrblk);
}
EXPORT_SYMBOL_GPL(glue_ctr_crypt_final_128bit);
static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
const unsigned int bsize = 128 / 8;
void *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
u128 ctrblk;
unsigned int num_blocks, func_bytes;
unsigned int i;
be128_to_u128(&ctrblk, (be128 *)walk->iv);
/* Process multi-block batch */
for (i = 0; i < gctx->num_funcs; i++) {
num_blocks = gctx->funcs[i].num_blocks;
func_bytes = bsize * num_blocks;
if (nbytes >= func_bytes) {
do {
gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
src += num_blocks;
dst += num_blocks;
nbytes -= func_bytes;
} while (nbytes >= func_bytes);
if (nbytes < bsize)
goto done;
}
}
done:
u128_to_be128((be128 *)walk->iv, &ctrblk);
return nbytes;
}
int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, bsize);
while ((nbytes = walk.nbytes) >= bsize) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
desc, fpu_enabled, nbytes);
nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
glue_fpu_end(fpu_enabled);
if (walk.nbytes) {
glue_ctr_crypt_final_128bit(
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
MODULE_LICENSE("GPL");
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -468,7 +468,7 @@ W_PRECALC_SSSE3 ...@@ -468,7 +468,7 @@ W_PRECALC_SSSE3
*/ */
SHA1_VECTOR_ASM sha1_transform_ssse3 SHA1_VECTOR_ASM sha1_transform_ssse3
#ifdef SHA1_ENABLE_AVX_SUPPORT #ifdef CONFIG_AS_AVX
.macro W_PRECALC_AVX .macro W_PRECALC_AVX
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
unsigned int rounds); unsigned int rounds);
#ifdef SHA1_ENABLE_AVX_SUPPORT #ifdef CONFIG_AS_AVX
asmlinkage void sha1_transform_avx(u32 *digest, const char *data, asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
unsigned int rounds); unsigned int rounds);
#endif #endif
...@@ -184,7 +184,7 @@ static struct shash_alg alg = { ...@@ -184,7 +184,7 @@ static struct shash_alg alg = {
} }
}; };
#ifdef SHA1_ENABLE_AVX_SUPPORT #ifdef CONFIG_AS_AVX
static bool __init avx_usable(void) static bool __init avx_usable(void)
{ {
u64 xcr0; u64 xcr0;
...@@ -209,7 +209,7 @@ static int __init sha1_ssse3_mod_init(void) ...@@ -209,7 +209,7 @@ static int __init sha1_ssse3_mod_init(void)
if (cpu_has_ssse3) if (cpu_has_ssse3)
sha1_transform_asm = sha1_transform_ssse3; sha1_transform_asm = sha1_transform_ssse3;
#ifdef SHA1_ENABLE_AVX_SUPPORT #ifdef CONFIG_AS_AVX
/* allow AVX to override SSSE3, it's a little faster */ /* allow AVX to override SSSE3, it's a little faster */
if (avx_usable()) if (avx_usable())
sha1_transform_asm = sha1_transform_avx; sha1_transform_asm = sha1_transform_avx;
......
/*
* Twofish Cipher 8-way parallel algorithm (AVX/x86_64)
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
.file "twofish-avx-x86_64-asm_64.S"
.text
/* structure of crypto context */
#define s0 0
#define s1 1024
#define s2 2048
#define s3 3072
#define w 4096
#define k 4128
/**********************************************************************
8-way AVX twofish
**********************************************************************/
#define CTX %rdi
#define RA1 %xmm0
#define RB1 %xmm1
#define RC1 %xmm2
#define RD1 %xmm3
#define RA2 %xmm4
#define RB2 %xmm5
#define RC2 %xmm6
#define RD2 %xmm7
#define RX %xmm8
#define RY %xmm9
#define RK1 %xmm10
#define RK2 %xmm11
#define RID1 %rax
#define RID1b %al
#define RID2 %rbx
#define RID2b %bl
#define RGI1 %rdx
#define RGI1bl %dl
#define RGI1bh %dh
#define RGI2 %rcx
#define RGI2bl %cl
#define RGI2bh %ch
#define RGS1 %r8
#define RGS1d %r8d
#define RGS2 %r9
#define RGS2d %r9d
#define RGS3 %r10
#define RGS3d %r10d
#define lookup_32bit(t0, t1, t2, t3, src, dst) \
movb src ## bl, RID1b; \
movb src ## bh, RID2b; \
movl t0(CTX, RID1, 4), dst ## d; \
xorl t1(CTX, RID2, 4), dst ## d; \
shrq $16, src; \
movb src ## bl, RID1b; \
movb src ## bh, RID2b; \
xorl t2(CTX, RID1, 4), dst ## d; \
xorl t3(CTX, RID2, 4), dst ## d;
#define G(a, x, t0, t1, t2, t3) \
vmovq a, RGI1; \
vpsrldq $8, a, x; \
vmovq x, RGI2; \
\
lookup_32bit(t0, t1, t2, t3, RGI1, RGS1); \
shrq $16, RGI1; \
lookup_32bit(t0, t1, t2, t3, RGI1, RGS2); \
shlq $32, RGS2; \
orq RGS1, RGS2; \
\
lookup_32bit(t0, t1, t2, t3, RGI2, RGS1); \
shrq $16, RGI2; \
lookup_32bit(t0, t1, t2, t3, RGI2, RGS3); \
shlq $32, RGS3; \
orq RGS1, RGS3; \
\
vmovq RGS2, x; \
vpinsrq $1, RGS3, x, x;
#define encround(a, b, c, d, x, y) \
G(a, x, s0, s1, s2, s3); \
G(b, y, s1, s2, s3, s0); \
vpaddd x, y, x; \
vpaddd y, x, y; \
vpaddd x, RK1, x; \
vpaddd y, RK2, y; \
vpxor x, c, c; \
vpsrld $1, c, x; \
vpslld $(32 - 1), c, c; \
vpor c, x, c; \
vpslld $1, d, x; \
vpsrld $(32 - 1), d, d; \
vpor d, x, d; \
vpxor d, y, d;
#define decround(a, b, c, d, x, y) \
G(a, x, s0, s1, s2, s3); \
G(b, y, s1, s2, s3, s0); \
vpaddd x, y, x; \
vpaddd y, x, y; \
vpaddd y, RK2, y; \
vpxor d, y, d; \
vpsrld $1, d, y; \
vpslld $(32 - 1), d, d; \
vpor d, y, d; \
vpslld $1, c, y; \
vpsrld $(32 - 1), c, c; \
vpor c, y, c; \
vpaddd x, RK1, x; \
vpxor x, c, c;
#define encrypt_round(n, a, b, c, d) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
encround(a ## 1, b ## 1, c ## 1, d ## 1, RX, RY); \
encround(a ## 2, b ## 2, c ## 2, d ## 2, RX, RY);
#define decrypt_round(n, a, b, c, d) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
decround(a ## 1, b ## 1, c ## 1, d ## 1, RX, RY); \
decround(a ## 2, b ## 2, c ## 2, d ## 2, RX, RY);
#define encrypt_cycle(n) \
encrypt_round((2*n), RA, RB, RC, RD); \
encrypt_round(((2*n) + 1), RC, RD, RA, RB);
#define decrypt_cycle(n) \
decrypt_round(((2*n) + 1), RC, RD, RA, RB); \
decrypt_round((2*n), RA, RB, RC, RD);
#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
vpunpckldq x1, x0, t0; \
vpunpckhdq x1, x0, t2; \
vpunpckldq x3, x2, t1; \
vpunpckhdq x3, x2, x3; \
\
vpunpcklqdq t1, t0, x0; \
vpunpckhqdq t1, t0, x1; \
vpunpcklqdq x3, t2, x2; \
vpunpckhqdq x3, t2, x3;
#define inpack_blocks(in, x0, x1, x2, x3, wkey, t0, t1, t2) \
vpxor (0*4*4)(in), wkey, x0; \
vpxor (1*4*4)(in), wkey, x1; \
vpxor (2*4*4)(in), wkey, x2; \
vpxor (3*4*4)(in), wkey, x3; \
\
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
#define outunpack_blocks(out, x0, x1, x2, x3, wkey, t0, t1, t2) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
vpxor x0, wkey, x0; \
vmovdqu x0, (0*4*4)(out); \
vpxor x1, wkey, x1; \
vmovdqu x1, (1*4*4)(out); \
vpxor x2, wkey, x2; \
vmovdqu x2, (2*4*4)(out); \
vpxor x3, wkey, x3; \
vmovdqu x3, (3*4*4)(out);
#define outunpack_xor_blocks(out, x0, x1, x2, x3, wkey, t0, t1, t2) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
vpxor x0, wkey, x0; \
vpxor (0*4*4)(out), x0, x0; \
vmovdqu x0, (0*4*4)(out); \
vpxor x1, wkey, x1; \
vpxor (1*4*4)(out), x1, x1; \
vmovdqu x1, (1*4*4)(out); \
vpxor x2, wkey, x2; \
vpxor (2*4*4)(out), x2, x2; \
vmovdqu x2, (2*4*4)(out); \
vpxor x3, wkey, x3; \
vpxor (3*4*4)(out), x3, x3; \
vmovdqu x3, (3*4*4)(out);
.align 8
.global __twofish_enc_blk_8way
.type __twofish_enc_blk_8way,@function;
__twofish_enc_blk_8way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
pushq %rbx;
pushq %rcx;
vmovdqu w(CTX), RK1;
leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RK1, RX, RY, RK2);
inpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX, RY, RK2);
xorq RID1, RID1;
xorq RID2, RID2;
encrypt_cycle(0);
encrypt_cycle(1);
encrypt_cycle(2);
encrypt_cycle(3);
encrypt_cycle(4);
encrypt_cycle(5);
encrypt_cycle(6);
encrypt_cycle(7);
vmovdqu (w+4*4)(CTX), RK1;
popq %rcx;
popq %rbx;
leaq (4*4*4)(%rsi), %rax;
testb %cl, %cl;
jnz __enc_xor8;
outunpack_blocks(%rsi, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2);
outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2);
ret;
__enc_xor8:
outunpack_xor_blocks(%rsi, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2);
outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2);
ret;
.align 8
.global twofish_dec_blk_8way
.type twofish_dec_blk_8way,@function;
twofish_dec_blk_8way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
*/
pushq %rbx;
vmovdqu (w+4*4)(CTX), RK1;
leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2);
inpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2);
xorq RID1, RID1;
xorq RID2, RID2;
decrypt_cycle(7);
decrypt_cycle(6);
decrypt_cycle(5);
decrypt_cycle(4);
decrypt_cycle(3);
decrypt_cycle(2);
decrypt_cycle(1);
decrypt_cycle(0);
vmovdqu (w)(CTX), RK1;
popq %rbx;
leaq (4*4*4)(%rsi), %rax;
outunpack_blocks(%rsi, RA1, RB1, RC1, RD1, RK1, RX, RY, RK2);
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX, RY, RK2);
ret;
This diff is collapsed.
This diff is collapsed.
/*
* Shared async block cipher helpers
*/
#ifndef _CRYPTO_ABLK_HELPER_H
#define _CRYPTO_ABLK_HELPER_H
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <crypto/cryptd.h>
struct async_helper_ctx {
struct cryptd_ablkcipher *cryptd_tfm;
};
extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len);
extern int __ablk_encrypt(struct ablkcipher_request *req);
extern int ablk_encrypt(struct ablkcipher_request *req);
extern int ablk_decrypt(struct ablkcipher_request *req);
extern void ablk_exit(struct crypto_tfm *tfm);
extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name);
extern int ablk_init(struct crypto_tfm *tfm);
#endif /* _CRYPTO_ABLK_HELPER_H */
/*
* Shared glue code for 128bit block ciphers
*/
#ifndef _CRYPTO_GLUE_HELPER_H
#define _CRYPTO_GLUE_HELPER_H
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <asm/i387.h>
#include <crypto/b128ops.h>
typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
u128 *iv);
#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
struct common_glue_func_entry {
unsigned int num_blocks; /* number of blocks that @fn will process */
union {
common_glue_func_t ecb;
common_glue_cbc_func_t cbc;
common_glue_ctr_func_t ctr;
} fn_u;
};
struct common_glue_ctx {
unsigned int num_funcs;
int fpu_blocks_limit; /* -1 means fpu not needed at all */
/*
* First funcs entry must have largest num_blocks and last funcs entry
* must have num_blocks == 1!
*/
struct common_glue_func_entry funcs[];
};
static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
struct blkcipher_desc *desc,
bool fpu_enabled, unsigned int nbytes)
{
if (likely(fpu_blocks_limit < 0))
return false;
if (fpu_enabled)
return true;
/*
* Vector-registers are only used when chunk to be processed is large
* enough, so do not enable FPU until it is necessary.
*/
if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
return false;
if (desc) {
/* prevent sleeping if FPU is in use */
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
}
kernel_fpu_begin();
return true;
}
static inline void glue_fpu_end(bool fpu_enabled)
{
if (fpu_enabled)
kernel_fpu_end();
}
static inline void u128_to_be128(be128 *dst, const u128 *src)
{
dst->a = cpu_to_be64(src->a);
dst->b = cpu_to_be64(src->b);
}
static inline void be128_to_u128(u128 *dst, const be128 *src)
{
dst->a = be64_to_cpu(src->a);
dst->b = be64_to_cpu(src->b);
}
static inline void u128_inc(u128 *i)
{
i->b++;
if (!i->b)
i->a++;
}
extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes);
extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes);
extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
#endif /* _CRYPTO_GLUE_HELPER_H */
#ifndef ASM_X86_SERPENT_AVX_H
#define ASM_X86_SERPENT_AVX_H
#include <linux/crypto.h>
#include <crypto/serpent.h>
#define SERPENT_PARALLEL_BLOCKS 8
asmlinkage void __serpent_enc_blk_8way_avx(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void serpent_dec_blk_8way_avx(struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_8way_avx(ctx, dst, src, false);
}
static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_8way_avx(ctx, dst, src, true);
}
static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
serpent_dec_blk_8way_avx(ctx, dst, src);
}
#endif
#ifndef ASM_X86_SERPENT_H #ifndef ASM_X86_SERPENT_SSE2_H
#define ASM_X86_SERPENT_H #define ASM_X86_SERPENT_SSE2_H
#include <linux/crypto.h> #include <linux/crypto.h>
#include <crypto/serpent.h> #include <crypto/serpent.h>
......
#ifndef ASM_X86_TWOFISH_H
#define ASM_X86_TWOFISH_H
#include <linux/crypto.h>
#include <crypto/twofish.h>
#include <crypto/lrw.h>
#include <crypto/b128ops.h>
struct twofish_lrw_ctx {
struct lrw_table_ctx lrw_table;
struct twofish_ctx twofish_ctx;
};
struct twofish_xts_ctx {
struct twofish_ctx tweak_ctx;
struct twofish_ctx crypt_ctx;
};
/* regular block cipher functions from twofish_x86_64 module */
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
/* 3-way parallel cipher functions */
asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
/* helpers from twofish_x86_64-3way module */
extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
u128 *iv);
extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
u128 *iv);
extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm);
extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
#endif /* ASM_X86_TWOFISH_H */
This diff is collapsed.
...@@ -24,22 +24,6 @@ ...@@ -24,22 +24,6 @@
static LIST_HEAD(crypto_template_list); static LIST_HEAD(crypto_template_list);
void crypto_larval_error(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
alg = crypto_alg_lookup(name, type, mask);
if (alg) {
if (crypto_is_larval(alg)) {
struct crypto_larval *larval = (void *)alg;
complete_all(&larval->completion);
}
crypto_mod_put(alg);
}
}
EXPORT_SYMBOL_GPL(crypto_larval_error);
static inline int crypto_set_driver_name(struct crypto_alg *alg) static inline int crypto_set_driver_name(struct crypto_alg *alg)
{ {
static const char suffix[] = "-generic"; static const char suffix[] = "-generic";
...@@ -295,7 +279,6 @@ void crypto_alg_tested(const char *name, int err) ...@@ -295,7 +279,6 @@ void crypto_alg_tested(const char *name, int err)
continue; continue;
larval->adult = alg; larval->adult = alg;
complete_all(&larval->completion);
continue; continue;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -83,7 +83,6 @@ void crypto_exit_compress_ops(struct crypto_tfm *tfm); ...@@ -83,7 +83,6 @@ void crypto_exit_compress_ops(struct crypto_tfm *tfm);
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
void crypto_larval_kill(struct crypto_alg *alg); void crypto_larval_kill(struct crypto_alg *alg);
struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
void crypto_larval_error(const char *name, u32 type, u32 mask);
void crypto_alg_tested(const char *name, int err); void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -263,3 +263,15 @@ config HW_RANDOM_PSERIES ...@@ -263,3 +263,15 @@ config HW_RANDOM_PSERIES
module will be called pseries-rng. module will be called pseries-rng.
If unsure, say Y. If unsure, say Y.
config HW_RANDOM_EXYNOS
tristate "EXYNOS HW random number generator support"
depends on HW_RANDOM && HAS_IOMEM && HAVE_CLK
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on EXYNOS SOCs.
To compile this driver as a module, choose M here: the
module will be called exynos-rng.
If unsure, say Y.
...@@ -23,3 +23,4 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o ...@@ -23,3 +23,4 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -15,3 +15,8 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o ...@@ -15,3 +15,8 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment