Skip to content
/ linux Public

Commit 4be2f4c

Browse files
lin755Sasha Levin
authored andcommitted
crypto: hisilicon/sec2 - fix for sec spec check
[ Upstream commit f4f353c ] During encryption and decryption, user requests must be checked first, if the specifications that are not supported by the hardware are used, the software computing is used for processing. Fixes: 2f072d7 ("crypto: hisilicon - Add aead support on SEC2") Signed-off-by: Wenkai Lin <linwenkai6@hisilicon.com> Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Stable-dep-of: e750743 ("crypto: hisilicon/sec2 - support skcipher/aead fallback for hardware queue unavailable") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent b8e7b62 commit 4be2f4c

File tree

2 files changed

+39
-63
lines changed

2 files changed

+39
-63
lines changed

drivers/crypto/hisilicon/sec2/sec.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ struct sec_aead_req {
3737
u8 *a_ivin;
3838
dma_addr_t a_ivin_dma;
3939
struct aead_request *aead_req;
40-
bool fallback;
4140
};
4241

4342
/* SEC request of Crypto */

drivers/crypto/hisilicon/sec2/sec_crypto.c

Lines changed: 39 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -690,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
690690

691691
c_ctx->fallback = false;
692692

693-
/* Currently, only XTS mode need fallback tfm when using 192bit key */
694-
if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
695-
return 0;
696-
697693
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
698694
CRYPTO_ALG_NEED_FALLBACK);
699695
if (IS_ERR(c_ctx->fbtfm)) {
700-
pr_err("failed to alloc xts mode fallback tfm!\n");
696+
pr_err("failed to alloc fallback tfm for %s!\n", alg);
701697
return PTR_ERR(c_ctx->fbtfm);
702698
}
703699

@@ -859,7 +855,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
859855
}
860856

861857
memcpy(c_ctx->c_key, key, keylen);
862-
if (c_ctx->fallback && c_ctx->fbtfm) {
858+
if (c_ctx->fbtfm) {
863859
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
864860
if (ret) {
865861
dev_err(dev, "failed to set fallback skcipher key!\n");
@@ -1160,8 +1156,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
11601156
}
11611157

11621158
ret = crypto_authenc_extractkeys(&keys, key, keylen);
1163-
if (ret)
1159+
if (ret) {
1160+
dev_err(dev, "sec extract aead keys err!\n");
11641161
goto bad_key;
1162+
}
11651163

11661164
ret = sec_aead_aes_set_key(c_ctx, &keys);
11671165
if (ret) {
@@ -1175,12 +1173,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
11751173
goto bad_key;
11761174
}
11771175

1178-
if (ctx->a_ctx.a_key_len & WORD_MASK) {
1179-
ret = -EINVAL;
1180-
dev_err(dev, "AUTH key length error!\n");
1181-
goto bad_key;
1182-
}
1183-
11841176
ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
11851177
if (ret) {
11861178
dev_err(dev, "set sec fallback key err!\n");
@@ -2002,8 +1994,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
20021994
return sec_aead_ctx_init(tfm, "sha512");
20031995
}
20041996

2005-
static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
2006-
struct sec_req *sreq)
1997+
static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)
20071998
{
20081999
u32 cryptlen = sreq->c_req.sk_req->cryptlen;
20092000
struct device *dev = ctx->dev;
@@ -2027,10 +2018,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
20272018
case SEC_CMODE_CFB:
20282019
case SEC_CMODE_OFB:
20292020
case SEC_CMODE_CTR:
2030-
if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
2031-
dev_err(dev, "skcipher HW version error!\n");
2032-
ret = -EINVAL;
2033-
}
20342021
break;
20352022
default:
20362023
ret = -EINVAL;
@@ -2039,17 +2026,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
20392026
return ret;
20402027
}
20412028

2042-
static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2029+
static int sec_skcipher_param_check(struct sec_ctx *ctx,
2030+
struct sec_req *sreq, bool *need_fallback)
20432031
{
20442032
struct skcipher_request *sk_req = sreq->c_req.sk_req;
20452033
struct device *dev = ctx->dev;
20462034
u8 c_alg = ctx->c_ctx.c_alg;
20472035

2048-
if (unlikely(!sk_req->src || !sk_req->dst ||
2049-
sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
2036+
if (unlikely(!sk_req->src || !sk_req->dst)) {
20502037
dev_err(dev, "skcipher input param error!\n");
20512038
return -EINVAL;
20522039
}
2040+
2041+
if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)
2042+
*need_fallback = true;
2043+
20532044
sreq->c_req.c_len = sk_req->cryptlen;
20542045

20552046
if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
@@ -2107,6 +2098,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
21072098
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
21082099
struct sec_req *req = skcipher_request_ctx(sk_req);
21092100
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2101+
bool need_fallback = false;
21102102
int ret;
21112103

21122104
if (!sk_req->cryptlen) {
@@ -2120,11 +2112,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
21202112
req->c_req.encrypt = encrypt;
21212113
req->ctx = ctx;
21222114

2123-
ret = sec_skcipher_param_check(ctx, req);
2115+
ret = sec_skcipher_param_check(ctx, req, &need_fallback);
21242116
if (unlikely(ret))
21252117
return -EINVAL;
21262118

2127-
if (unlikely(ctx->c_ctx.fallback))
2119+
if (unlikely(ctx->c_ctx.fallback || need_fallback))
21282120
return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
21292121

21302122
return ctx->req_op->process(ctx, req);
@@ -2257,52 +2249,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
22572249
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
22582250
size_t sz = crypto_aead_authsize(tfm);
22592251
u8 c_mode = ctx->c_ctx.c_mode;
2260-
struct device *dev = ctx->dev;
22612252
int ret;
22622253

2263-
/* Hardware does not handle cases where authsize is not 4 bytes aligned */
2264-
if (c_mode == SEC_CMODE_CBC && (sz & WORD_MASK)) {
2265-
sreq->aead_req.fallback = true;
2254+
if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))
22662255
return -EINVAL;
2267-
}
22682256

22692257
if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2270-
req->assoclen > SEC_MAX_AAD_LEN)) {
2271-
dev_err(dev, "aead input spec error!\n");
2258+
req->assoclen > SEC_MAX_AAD_LEN))
22722259
return -EINVAL;
2273-
}
22742260

22752261
if (c_mode == SEC_CMODE_CCM) {
2276-
if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
2277-
dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
2262+
if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
22782263
return -EINVAL;
2279-
}
2280-
ret = aead_iv_demension_check(req);
2281-
if (ret) {
2282-
dev_err(dev, "aead input iv param error!\n");
2283-
return ret;
2284-
}
2285-
}
22862264

2287-
if (sreq->c_req.encrypt)
2288-
sreq->c_req.c_len = req->cryptlen;
2289-
else
2290-
sreq->c_req.c_len = req->cryptlen - sz;
2291-
if (c_mode == SEC_CMODE_CBC) {
2292-
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
2293-
dev_err(dev, "aead crypto length error!\n");
2265+
ret = aead_iv_demension_check(req);
2266+
if (unlikely(ret))
2267+
return -EINVAL;
2268+
} else if (c_mode == SEC_CMODE_CBC) {
2269+
if (unlikely(sz & WORD_MASK))
2270+
return -EINVAL;
2271+
if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
22942272
return -EINVAL;
2295-
}
22962273
}
22972274

22982275
return 0;
22992276
}
23002277

2301-
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2278+
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)
23022279
{
23032280
struct aead_request *req = sreq->aead_req.aead_req;
2304-
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2305-
size_t authsize = crypto_aead_authsize(tfm);
23062281
struct device *dev = ctx->dev;
23072282
u8 c_alg = ctx->c_ctx.c_alg;
23082283

@@ -2311,12 +2286,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
23112286
return -EINVAL;
23122287
}
23132288

2314-
if (ctx->sec->qm.ver == QM_HW_V2) {
2315-
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
2316-
req->cryptlen <= authsize))) {
2317-
sreq->aead_req.fallback = true;
2318-
return -EINVAL;
2319-
}
2289+
if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&
2290+
sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
2291+
dev_err(dev, "aead cbc mode input data length error!\n");
2292+
return -EINVAL;
23202293
}
23212294

23222295
/* Support AES or SM4 */
@@ -2325,8 +2298,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
23252298
return -EINVAL;
23262299
}
23272300

2328-
if (unlikely(sec_aead_spec_check(ctx, sreq)))
2301+
if (unlikely(sec_aead_spec_check(ctx, sreq))) {
2302+
*need_fallback = true;
23292303
return -EINVAL;
2304+
}
23302305

23312306
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
23322307
SEC_PBUF_SZ)
@@ -2370,17 +2345,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
23702345
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
23712346
struct sec_req *req = aead_request_ctx(a_req);
23722347
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2348+
size_t sz = crypto_aead_authsize(tfm);
2349+
bool need_fallback = false;
23732350
int ret;
23742351

23752352
req->flag = a_req->base.flags;
23762353
req->aead_req.aead_req = a_req;
23772354
req->c_req.encrypt = encrypt;
23782355
req->ctx = ctx;
2379-
req->aead_req.fallback = false;
2356+
req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
23802357

2381-
ret = sec_aead_param_check(ctx, req);
2358+
ret = sec_aead_param_check(ctx, req, &need_fallback);
23822359
if (unlikely(ret)) {
2383-
if (req->aead_req.fallback)
2360+
if (need_fallback)
23842361
return sec_aead_soft_crypto(ctx, a_req, encrypt);
23852362
return -EINVAL;
23862363
}

0 commit comments

Comments
 (0)