Commit 737aed94 authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman

staging: ccree: save ciphertext for CTS IV

The crypto API requires saving the last blocks of ciphertext
in req->info for use as IV for CTS mode. The ccree driver
was not doing this. This patch fixes that.

The bug was manifested with cts(cbc(aes)) mode in tcrypt tests.

Fixes: 302ef8eb ("Add CryptoCell skcipher support")
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 19dfd881
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/des.h> #include <crypto/des.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <crypto/scatterwalk.h>
#include "ssi_config.h" #include "ssi_config.h"
#include "ssi_driver.h" #include "ssi_driver.h"
...@@ -697,6 +698,7 @@ static int ssi_blkcipher_complete(struct device *dev, ...@@ -697,6 +698,7 @@ static int ssi_blkcipher_complete(struct device *dev,
{ {
int completion_error = 0; int completion_error = 0;
u32 inflight_counter; u32 inflight_counter;
struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst); ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
...@@ -707,6 +709,22 @@ static int ssi_blkcipher_complete(struct device *dev, ...@@ -707,6 +709,22 @@ static int ssi_blkcipher_complete(struct device *dev,
ctx_p->drvdata->inflight_counter--; ctx_p->drvdata->inflight_counter--;
if (areq) { if (areq) {
/*
* The crypto API expects us to set the req->info to the last
* ciphertext block. For encrypt, simply copy from the result.
* For decrypt, we must copy from a saved buffer since this
* could be an in-place decryption operation and the src is
* lost by this point.
*/
if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
memcpy(req->info, req_ctx->backup_info, ivsize);
kfree(req_ctx->backup_info);
} else {
scatterwalk_map_and_copy(req->info, req->dst,
(req->nbytes - ivsize),
ivsize, 0);
}
ablkcipher_request_complete(areq, completion_error); ablkcipher_request_complete(areq, completion_error);
return 0; return 0;
} }
...@@ -739,11 +757,13 @@ static int ssi_blkcipher_process( ...@@ -739,11 +757,13 @@ static int ssi_blkcipher_process(
if (unlikely(validate_data_size(ctx_p, nbytes))) { if (unlikely(validate_data_size(ctx_p, nbytes))) {
SSI_LOG_ERR("Unsupported data size %d.\n", nbytes); SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL; rc = -EINVAL;
goto exit_process;
} }
if (nbytes == 0) { if (nbytes == 0) {
/* No data to process is valid */ /* No data to process is valid */
return 0; rc = 0;
goto exit_process;
} }
/*For CTS in case of data size aligned to 16 use CBC mode*/ /*For CTS in case of data size aligned to 16 use CBC mode*/
if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) { if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
...@@ -818,6 +838,9 @@ static int ssi_blkcipher_process( ...@@ -818,6 +838,9 @@ static int ssi_blkcipher_process(
if (cts_restore_flag != 0) if (cts_restore_flag != 0)
ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS; ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
if (rc != -EINPROGRESS)
kfree(req_ctx->backup_info);
return rc; return rc;
} }
...@@ -858,7 +881,6 @@ static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req) ...@@ -858,7 +881,6 @@ static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req)
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req); struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm); unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
req_ctx->backup_info = req->info;
req_ctx->is_giv = false; req_ctx->is_giv = false;
return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_ENCRYPT); return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_ENCRYPT);
...@@ -871,8 +893,18 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req) ...@@ -871,8 +893,18 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req); struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm); unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
req_ctx->backup_info = req->info; /*
* Allocate and save the last IV sized bytes of the source, which will
* be lost in case of in-place decryption and might be needed for CTS.
*/
req_ctx->backup_info = kmalloc(ivsize, GFP_KERNEL);
if (!req_ctx->backup_info)
return -ENOMEM;
scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
(req->nbytes - ivsize), ivsize, 0);
req_ctx->is_giv = false; req_ctx->is_giv = false;
return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT); return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment