Skip to content

Commit 674d40a

Browse files
ardbiesheuvelherbertx
authored andcommitted
crypto: x86/cast5 - drop dependency on glue helper
Replace the glue helper dependency with implementations of ECB and CBC based on the new CPP macros, which avoid the need for indirect calls. Acked-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 9ad58b4 commit 674d40a

File tree

1 file changed

+17
-167
lines changed

1 file changed

+17
-167
lines changed

arch/x86/crypto/cast5_avx_glue.c

Lines changed: 17 additions & 167 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
77
*/
88

9-
#include <asm/crypto/glue_helper.h>
109
#include <crypto/algapi.h>
1110
#include <crypto/cast5.h>
1211
#include <crypto/internal/simd.h>
@@ -15,6 +14,8 @@
1514
#include <linux/module.h>
1615
#include <linux/types.h>
1716

17+
#include "ecb_cbc_helpers.h"
18+
1819
#define CAST5_PARALLEL_BLOCKS 16
1920

2021
asmlinkage void cast5_ecb_enc_16way(struct cast5_ctx *ctx, u8 *dst,
@@ -30,186 +31,35 @@ static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
3031
return cast5_setkey(&tfm->base, key, keylen);
3132
}
3233

33-
static inline bool cast5_fpu_begin(bool fpu_enabled, struct skcipher_walk *walk,
34-
unsigned int nbytes)
35-
{
36-
return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS,
37-
walk, fpu_enabled, nbytes);
38-
}
39-
40-
static inline void cast5_fpu_end(bool fpu_enabled)
41-
{
42-
return glue_fpu_end(fpu_enabled);
43-
}
44-
45-
static int ecb_crypt(struct skcipher_request *req, bool enc)
46-
{
47-
bool fpu_enabled = false;
48-
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
49-
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
50-
struct skcipher_walk walk;
51-
const unsigned int bsize = CAST5_BLOCK_SIZE;
52-
unsigned int nbytes;
53-
void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
54-
int err;
55-
56-
err = skcipher_walk_virt(&walk, req, false);
57-
58-
while ((nbytes = walk.nbytes)) {
59-
u8 *wsrc = walk.src.virt.addr;
60-
u8 *wdst = walk.dst.virt.addr;
61-
62-
fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
63-
64-
/* Process multi-block batch */
65-
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
66-
fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
67-
do {
68-
fn(ctx, wdst, wsrc);
69-
70-
wsrc += bsize * CAST5_PARALLEL_BLOCKS;
71-
wdst += bsize * CAST5_PARALLEL_BLOCKS;
72-
nbytes -= bsize * CAST5_PARALLEL_BLOCKS;
73-
} while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
74-
75-
if (nbytes < bsize)
76-
goto done;
77-
}
78-
79-
fn = (enc) ? __cast5_encrypt : __cast5_decrypt;
80-
81-
/* Handle leftovers */
82-
do {
83-
fn(ctx, wdst, wsrc);
84-
85-
wsrc += bsize;
86-
wdst += bsize;
87-
nbytes -= bsize;
88-
} while (nbytes >= bsize);
89-
90-
done:
91-
err = skcipher_walk_done(&walk, nbytes);
92-
}
93-
94-
cast5_fpu_end(fpu_enabled);
95-
return err;
96-
}
97-
9834
static int ecb_encrypt(struct skcipher_request *req)
9935
{
100-
return ecb_crypt(req, true);
36+
ECB_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS);
37+
ECB_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_ecb_enc_16way);
38+
ECB_BLOCK(1, __cast5_encrypt);
39+
ECB_WALK_END();
10140
}
10241

10342
static int ecb_decrypt(struct skcipher_request *req)
10443
{
105-
return ecb_crypt(req, false);
44+
ECB_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS);
45+
ECB_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_ecb_dec_16way);
46+
ECB_BLOCK(1, __cast5_decrypt);
47+
ECB_WALK_END();
10648
}
10749

10850
static int cbc_encrypt(struct skcipher_request *req)
10951
{
110-
const unsigned int bsize = CAST5_BLOCK_SIZE;
111-
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
112-
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
113-
struct skcipher_walk walk;
114-
unsigned int nbytes;
115-
int err;
116-
117-
err = skcipher_walk_virt(&walk, req, false);
118-
119-
while ((nbytes = walk.nbytes)) {
120-
u64 *src = (u64 *)walk.src.virt.addr;
121-
u64 *dst = (u64 *)walk.dst.virt.addr;
122-
u64 *iv = (u64 *)walk.iv;
123-
124-
do {
125-
*dst = *src ^ *iv;
126-
__cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
127-
iv = dst;
128-
src++;
129-
dst++;
130-
nbytes -= bsize;
131-
} while (nbytes >= bsize);
132-
133-
*(u64 *)walk.iv = *iv;
134-
err = skcipher_walk_done(&walk, nbytes);
135-
}
136-
137-
return err;
138-
}
139-
140-
static unsigned int __cbc_decrypt(struct cast5_ctx *ctx,
141-
struct skcipher_walk *walk)
142-
{
143-
const unsigned int bsize = CAST5_BLOCK_SIZE;
144-
unsigned int nbytes = walk->nbytes;
145-
u64 *src = (u64 *)walk->src.virt.addr;
146-
u64 *dst = (u64 *)walk->dst.virt.addr;
147-
u64 last_iv;
148-
149-
/* Start of the last block. */
150-
src += nbytes / bsize - 1;
151-
dst += nbytes / bsize - 1;
152-
153-
last_iv = *src;
154-
155-
/* Process multi-block batch */
156-
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
157-
do {
158-
nbytes -= bsize * (CAST5_PARALLEL_BLOCKS - 1);
159-
src -= CAST5_PARALLEL_BLOCKS - 1;
160-
dst -= CAST5_PARALLEL_BLOCKS - 1;
161-
162-
cast5_cbc_dec_16way(ctx, (u8 *)dst, (u8 *)src);
163-
164-
nbytes -= bsize;
165-
if (nbytes < bsize)
166-
goto done;
167-
168-
*dst ^= *(src - 1);
169-
src -= 1;
170-
dst -= 1;
171-
} while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
172-
}
173-
174-
/* Handle leftovers */
175-
for (;;) {
176-
__cast5_decrypt(ctx, (u8 *)dst, (u8 *)src);
177-
178-
nbytes -= bsize;
179-
if (nbytes < bsize)
180-
break;
181-
182-
*dst ^= *(src - 1);
183-
src -= 1;
184-
dst -= 1;
185-
}
186-
187-
done:
188-
*dst ^= *(u64 *)walk->iv;
189-
*(u64 *)walk->iv = last_iv;
190-
191-
return nbytes;
52+
CBC_WALK_START(req, CAST5_BLOCK_SIZE, -1);
53+
CBC_ENC_BLOCK(__cast5_encrypt);
54+
CBC_WALK_END();
19255
}
19356

19457
static int cbc_decrypt(struct skcipher_request *req)
19558
{
196-
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
197-
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
198-
bool fpu_enabled = false;
199-
struct skcipher_walk walk;
200-
unsigned int nbytes;
201-
int err;
202-
203-
err = skcipher_walk_virt(&walk, req, false);
204-
205-
while ((nbytes = walk.nbytes)) {
206-
fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
207-
nbytes = __cbc_decrypt(ctx, &walk);
208-
err = skcipher_walk_done(&walk, nbytes);
209-
}
210-
211-
cast5_fpu_end(fpu_enabled);
212-
return err;
59+
CBC_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS);
60+
CBC_DEC_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_cbc_dec_16way);
61+
CBC_DEC_BLOCK(1, __cast5_decrypt);
62+
CBC_WALK_END();
21363
}
21464

21565
static struct skcipher_alg cast5_algs[] = {

0 commit comments

Comments
 (0)