Skip to content

Commit e211506

Browse files
Ard Biesheuvelherbertx
authored andcommitted
crypto: arm64/aes-blk - add a non-SIMD fallback for synchronous CTR
To accommodate systems that may disallow use of the NEON in kernel mode in some circumstances, introduce a C fallback for synchronous AES in CTR mode, and use it if may_use_simd() returns false. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 5092fcf commit e211506

File tree

3 files changed

+101
-17
lines changed

3 files changed

+101
-17
lines changed

arch/arm64/crypto/Kconfig

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,15 +64,17 @@ config CRYPTO_AES_ARM64_CE_CCM
6464

6565
config CRYPTO_AES_ARM64_CE_BLK
6666
tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
67-
depends on ARM64 && KERNEL_MODE_NEON
67+
depends on KERNEL_MODE_NEON
6868
select CRYPTO_BLKCIPHER
6969
select CRYPTO_AES_ARM64_CE
70+
select CRYPTO_AES_ARM64
7071
select CRYPTO_SIMD
7172

7273
config CRYPTO_AES_ARM64_NEON_BLK
7374
tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
74-
depends on ARM64 && KERNEL_MODE_NEON
75+
depends on KERNEL_MODE_NEON
7576
select CRYPTO_BLKCIPHER
77+
select CRYPTO_AES_ARM64
7678
select CRYPTO_AES
7779
select CRYPTO_SIMD
7880

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
/*
2+
* Fallback for sync aes(ctr) in contexts where kernel mode NEON
3+
* is not allowed
4+
*
5+
* Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6+
*
7+
* This program is free software; you can redistribute it and/or modify
8+
* it under the terms of the GNU General Public License version 2 as
9+
* published by the Free Software Foundation.
10+
*/
11+
12+
#include <crypto/aes.h>
13+
#include <crypto/internal/skcipher.h>
14+
15+
asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
16+
17+
static inline int aes_ctr_encrypt_fallback(struct crypto_aes_ctx *ctx,
18+
struct skcipher_request *req)
19+
{
20+
struct skcipher_walk walk;
21+
u8 buf[AES_BLOCK_SIZE];
22+
int err;
23+
24+
err = skcipher_walk_virt(&walk, req, true);
25+
26+
while (walk.nbytes > 0) {
27+
u8 *dst = walk.dst.virt.addr;
28+
u8 *src = walk.src.virt.addr;
29+
int nbytes = walk.nbytes;
30+
int tail = 0;
31+
32+
if (nbytes < walk.total) {
33+
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
34+
tail = walk.nbytes % AES_BLOCK_SIZE;
35+
}
36+
37+
do {
38+
int bsize = min(nbytes, AES_BLOCK_SIZE);
39+
40+
__aes_arm64_encrypt(ctx->key_enc, buf, walk.iv,
41+
6 + ctx->key_length / 4);
42+
crypto_xor_cpy(dst, src, buf, bsize);
43+
crypto_inc(walk.iv, AES_BLOCK_SIZE);
44+
45+
dst += AES_BLOCK_SIZE;
46+
src += AES_BLOCK_SIZE;
47+
nbytes -= AES_BLOCK_SIZE;
48+
} while (nbytes > 0);
49+
50+
err = skcipher_walk_done(&walk, tail);
51+
}
52+
return err;
53+
}

arch/arm64/crypto/aes-glue.c

Lines changed: 44 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
#include <asm/neon.h>
1212
#include <asm/hwcap.h>
13+
#include <asm/simd.h>
1314
#include <crypto/aes.h>
1415
#include <crypto/internal/hash.h>
1516
#include <crypto/internal/simd.h>
@@ -19,6 +20,7 @@
1920
#include <crypto/xts.h>
2021

2122
#include "aes-ce-setkey.h"
23+
#include "aes-ctr-fallback.h"
2224

2325
#ifdef USE_V8_CRYPTO_EXTENSIONS
2426
#define MODE "ce"
@@ -249,6 +251,17 @@ static int ctr_encrypt(struct skcipher_request *req)
249251
return err;
250252
}
251253

254+
static int ctr_encrypt_sync(struct skcipher_request *req)
255+
{
256+
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
257+
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
258+
259+
if (!may_use_simd())
260+
return aes_ctr_encrypt_fallback(ctx, req);
261+
262+
return ctr_encrypt(req);
263+
}
264+
252265
static int xts_encrypt(struct skcipher_request *req)
253266
{
254267
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -355,8 +368,8 @@ static struct skcipher_alg aes_algs[] = { {
355368
.ivsize = AES_BLOCK_SIZE,
356369
.chunksize = AES_BLOCK_SIZE,
357370
.setkey = skcipher_aes_setkey,
358-
.encrypt = ctr_encrypt,
359-
.decrypt = ctr_encrypt,
371+
.encrypt = ctr_encrypt_sync,
372+
.decrypt = ctr_encrypt_sync,
360373
}, {
361374
.base = {
362375
.cra_name = "__xts(aes)",
@@ -458,11 +471,35 @@ static int mac_init(struct shash_desc *desc)
458471
return 0;
459472
}
460473

474+
static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
475+
u8 dg[], int enc_before, int enc_after)
476+
{
477+
int rounds = 6 + ctx->key_length / 4;
478+
479+
if (may_use_simd()) {
480+
kernel_neon_begin();
481+
aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
482+
enc_after);
483+
kernel_neon_end();
484+
} else {
485+
if (enc_before)
486+
__aes_arm64_encrypt(ctx->key_enc, dg, dg, rounds);
487+
488+
while (blocks--) {
489+
crypto_xor(dg, in, AES_BLOCK_SIZE);
490+
in += AES_BLOCK_SIZE;
491+
492+
if (blocks || enc_after)
493+
__aes_arm64_encrypt(ctx->key_enc, dg, dg,
494+
rounds);
495+
}
496+
}
497+
}
498+
461499
static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
462500
{
463501
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
464502
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
465-
int rounds = 6 + tctx->key.key_length / 4;
466503

467504
while (len > 0) {
468505
unsigned int l;
@@ -474,10 +511,8 @@ static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
474511

475512
len %= AES_BLOCK_SIZE;
476513

477-
kernel_neon_begin();
478-
aes_mac_update(p, tctx->key.key_enc, rounds, blocks,
479-
ctx->dg, (ctx->len != 0), (len != 0));
480-
kernel_neon_end();
514+
mac_do_update(&tctx->key, p, blocks, ctx->dg,
515+
(ctx->len != 0), (len != 0));
481516

482517
p += blocks * AES_BLOCK_SIZE;
483518

@@ -505,11 +540,8 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out)
505540
{
506541
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
507542
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
508-
int rounds = 6 + tctx->key.key_length / 4;
509543

510-
kernel_neon_begin();
511-
aes_mac_update(NULL, tctx->key.key_enc, rounds, 0, ctx->dg, 1, 0);
512-
kernel_neon_end();
544+
mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0);
513545

514546
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
515547

@@ -520,17 +552,14 @@ static int cmac_final(struct shash_desc *desc, u8 *out)
520552
{
521553
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
522554
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
523-
int rounds = 6 + tctx->key.key_length / 4;
524555
u8 *consts = tctx->consts;
525556

526557
if (ctx->len != AES_BLOCK_SIZE) {
527558
ctx->dg[ctx->len] ^= 0x80;
528559
consts += AES_BLOCK_SIZE;
529560
}
530561

531-
kernel_neon_begin();
532-
aes_mac_update(consts, tctx->key.key_enc, rounds, 1, ctx->dg, 0, 1);
533-
kernel_neon_end();
562+
mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1);
534563

535564
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
536565

0 commit comments

Comments
 (0)