Commit 572b6278 authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Greg Kroah-Hartman

crypto: ccp - Reduce maximum stack usage

[ Upstream commit 72c8117a ]

Each of the operations in ccp_run_cmd() needs several hundred
bytes of kernel stack. Depending on the inlining, these may
need separate stack slots that add up to more than the warning
limit, as shown in this clang based build:

drivers/crypto/ccp/ccp-ops.c:871:12: error: stack frame size of 1164 bytes in function 'ccp_run_aes_cmd' [-Werror,-Wframe-larger-than=]
static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)

The problem may also happen when there is no warning, e.g. in the
ccp_run_cmd()->ccp_run_aes_cmd()->ccp_run_aes_gcm_cmd() call chain with
over 2000 bytes.

Mark each individual function as 'noinline_for_stack' to prevent
this from happening, and move the calls to the two special cases for aes
into the top-level function. This will keep the actual combined stack
usage to the mimimum: 828 bytes for ccp_run_aes_gcm_cmd() and
at most 524 bytes for each of the other cases.

Fixes: 63b94509 ("crypto: ccp - CCP device driver and interface support")
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent 2f27af9f
......@@ -458,8 +458,8 @@ static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
}
static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_cmd *cmd)
static noinline_for_stack int
ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
struct ccp_dm_workarea key, ctx;
......@@ -614,8 +614,8 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
}
static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_cmd *cmd)
static noinline_for_stack int
ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
struct ccp_dm_workarea key, ctx, final_wa, tag;
......@@ -897,7 +897,8 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
}
static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
static noinline_for_stack int
ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
struct ccp_dm_workarea key, ctx;
......@@ -907,12 +908,6 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
bool in_place = false;
int ret;
if (aes->mode == CCP_AES_MODE_CMAC)
return ccp_run_aes_cmac_cmd(cmd_q, cmd);
if (aes->mode == CCP_AES_MODE_GCM)
return ccp_run_aes_gcm_cmd(cmd_q, cmd);
if (!((aes->key_len == AES_KEYSIZE_128) ||
(aes->key_len == AES_KEYSIZE_192) ||
(aes->key_len == AES_KEYSIZE_256)))
......@@ -1080,8 +1075,8 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return ret;
}
static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_cmd *cmd)
static noinline_for_stack int
ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_xts_aes_engine *xts = &cmd->u.xts;
struct ccp_dm_workarea key, ctx;
......@@ -1280,7 +1275,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
}
static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
static noinline_for_stack int
ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_des3_engine *des3 = &cmd->u.des3;
......@@ -1476,7 +1472,8 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return ret;
}
static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
static noinline_for_stack int
ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_sha_engine *sha = &cmd->u.sha;
struct ccp_dm_workarea ctx;
......@@ -1820,7 +1817,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return ret;
}
static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
static noinline_for_stack int
ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_rsa_engine *rsa = &cmd->u.rsa;
struct ccp_dm_workarea exp, src, dst;
......@@ -1951,8 +1949,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return ret;
}
static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_cmd *cmd)
static noinline_for_stack int
ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_passthru_engine *pt = &cmd->u.passthru;
struct ccp_dm_workarea mask;
......@@ -2083,7 +2081,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
}
static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
static noinline_for_stack int
ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_cmd *cmd)
{
struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
......@@ -2424,7 +2423,8 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return ret;
}
static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
static noinline_for_stack int
ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_ecc_engine *ecc = &cmd->u.ecc;
......@@ -2461,8 +2461,18 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
switch (cmd->engine) {
case CCP_ENGINE_AES:
switch (cmd->u.aes.mode) {
case CCP_AES_MODE_CMAC:
ret = ccp_run_aes_cmac_cmd(cmd_q, cmd);
break;
case CCP_AES_MODE_GCM:
ret = ccp_run_aes_gcm_cmd(cmd_q, cmd);
break;
default:
ret = ccp_run_aes_cmd(cmd_q, cmd);
break;
}
break;
case CCP_ENGINE_XTS_AES_128:
ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment