Skip to content

Commit

Permalink
Use separate function to get GCM functions
Browse files Browse the repository at this point in the history
Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
(Merged from #18835)

(cherry picked from commit 92c9086)
  • Loading branch information
tmshort authored and t8m committed Nov 23, 2022
1 parent a66a116 commit 6843c1e
Show file tree
Hide file tree
Showing 3 changed files with 110 additions and 103 deletions.
194 changes: 97 additions & 97 deletions crypto/modes/gcm128.c
Expand Up @@ -84,7 +84,7 @@ typedef size_t size_t_aX;
* Value of 1 is not appropriate for performance reasons.
*/

static void gcm_init_4bit(u128 Htable[16], u64 H[2])
static void gcm_init_4bit(u128 Htable[16], const u64 H[2])
{
u128 V;
# if defined(OPENSSL_SMALL_FOOTPRINT)
Expand Down Expand Up @@ -418,120 +418,126 @@ void gcm_gmult_clmul_rv64i_zbb_zbc(u64 Xi[2], const u128 Htable[16]);
# endif
#endif

void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
static void gcm_get_funcs(struct gcm_funcs_st *ctx)
{
DECLARE_IS_ENDIAN;

memset(ctx, 0, sizeof(*ctx));
ctx->block = block;
ctx->key = key;

(*block) (ctx->H.c, ctx->H.c, key);

if (IS_LITTLE_ENDIAN) {
/* H is stored in host byte order */
#ifdef BSWAP8
ctx->H.u[0] = BSWAP8(ctx->H.u[0]);
ctx->H.u[1] = BSWAP8(ctx->H.u[1]);
/* set defaults -- overridden below as needed */
ctx->ginit = gcm_init_4bit;
#if !defined(GHASH_ASM) || defined(INCLUDE_C_GMULT_4BIT)
ctx->gmult = gcm_gmult_4bit;
#else
u8 *p = ctx->H.c;
u64 hi, lo;
hi = (u64)GETU32(p) << 32 | GETU32(p + 4);
lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12);
ctx->H.u[0] = hi;
ctx->H.u[1] = lo;
ctx->gmult = NULL;
#endif
}

#if defined(GHASH)
# define CTX__GHASH(f) (ctx->ghash = (f))
#if !defined(GHASH_ASM) && !defined(OPENSSL_SMALL_FOOTPRINT)
ctx->ghash = gcm_ghash_4bit;
#else
# define CTX__GHASH(f) (ctx->ghash = NULL)
ctx->ghash = NULL;
#endif
#if defined(GHASH_ASM_X86_OR_64)
# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)

#if defined(GHASH_ASM_X86_OR_64)
# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)
/* x86_64 */
if (OPENSSL_ia32cap_P[1] & (1 << 1)) { /* check PCLMULQDQ bit */
if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */
gcm_init_avx(ctx->Htable, ctx->H.u);
ctx->ginit = gcm_init_avx;
ctx->gmult = gcm_gmult_avx;
CTX__GHASH(gcm_ghash_avx);
ctx->ghash = gcm_ghash_avx;
} else {
gcm_init_clmul(ctx->Htable, ctx->H.u);
ctx->ginit = gcm_init_clmul;
ctx->gmult = gcm_gmult_clmul;
CTX__GHASH(gcm_ghash_clmul);
ctx->ghash = gcm_ghash_clmul;
}
return;
}
# endif
gcm_init_4bit(ctx->Htable, ctx->H.u);
# if defined(GHASH_ASM_X86) /* x86 only */
# if defined(OPENSSL_IA32_SSE2)
# if defined(GHASH_ASM_X86)
/* x86 only */
# if defined(OPENSSL_IA32_SSE2)
if (OPENSSL_ia32cap_P[0] & (1 << 25)) { /* check SSE bit */
ctx->gmult = gcm_gmult_4bit_mmx;
ctx->ghash = gcm_ghash_4bit_mmx;
return;
}
# else
if (OPENSSL_ia32cap_P[0] & (1 << 23)) { /* check MMX bit */
# endif
ctx->gmult = gcm_gmult_4bit_mmx;
CTX__GHASH(gcm_ghash_4bit_mmx);
} else {
ctx->gmult = gcm_gmult_4bit_x86;
CTX__GHASH(gcm_ghash_4bit_x86);
ctx->ghash = gcm_ghash_4bit_mmx;
return;
}
# else
ctx->gmult = gcm_gmult_4bit;
CTX__GHASH(gcm_ghash_4bit);
# endif
ctx->gmult = gcm_gmult_4bit_x86;
ctx->ghash = gcm_ghash_4bit_x86;
return;
# endif
#elif defined(GHASH_ASM_ARM)
#elif defined(GHASH_ASM_ARM)
/* ARM */
# ifdef PMULL_CAPABLE
if (PMULL_CAPABLE) {
gcm_init_v8(ctx->Htable, ctx->H.u);
ctx->ginit = (gcm_init_fn)gcm_init_v8;
ctx->gmult = gcm_gmult_v8;
CTX__GHASH(gcm_ghash_v8);
} else
# endif
# ifdef NEON_CAPABLE
ctx->ghash = gcm_ghash_v8;
}
# elif defined(NEON_CAPABLE)
if (NEON_CAPABLE) {
gcm_init_neon(ctx->Htable, ctx->H.u);
ctx->ginit = gcm_init_neon;
ctx->gmult = gcm_gmult_neon;
CTX__GHASH(gcm_ghash_neon);
} else
# endif
{
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
CTX__GHASH(gcm_ghash_4bit);
ctx->ghash = gcm_ghash_neon;
}
#elif defined(GHASH_ASM_SPARC)
# endif
return;
#elif defined(GHASH_ASM_SPARC)
/* SPARC */
if (OPENSSL_sparcv9cap_P[0] & SPARCV9_VIS3) {
gcm_init_vis3(ctx->Htable, ctx->H.u);
ctx->ginit = gcm_init_vis3;
ctx->gmult = gcm_gmult_vis3;
CTX__GHASH(gcm_ghash_vis3);
} else {
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
CTX__GHASH(gcm_ghash_4bit);
ctx->ghash = gcm_ghash_vis3;
}
#elif defined(GHASH_ASM_PPC)
return;
#elif defined(GHASH_ASM_PPC)
/* PowerPC */
if (OPENSSL_ppccap_P & PPC_CRYPTO207) {
gcm_init_p8(ctx->Htable, ctx->H.u);
ctx->ginit = gcm_init_p8;
ctx->gmult = gcm_gmult_p8;
CTX__GHASH(gcm_ghash_p8);
} else {
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
CTX__GHASH(gcm_ghash_4bit);
ctx->ghash = gcm_ghash_p8;
}
#elif defined(GHASH_ASM_RISCV) && __riscv_xlen == 64
return;
#elif defined(GHASH_ASM_RISCV) && __riscv_xlen == 64
/* RISCV */
ctx->ghash = NULL;
if (RISCV_HAS_ZBB() && RISCV_HAS_ZBC()) {
gcm_init_clmul_rv64i_zbb_zbc(ctx->Htable, ctx->H.u);
ctx->ginit = gcm_init_clmul_rv64i_zbb_zbc;
ctx->gmult = gcm_gmult_clmul_rv64i_zbb_zbc;
} else {
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
}
return;
#endif
}

void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
{
DECLARE_IS_ENDIAN;

memset(ctx, 0, sizeof(*ctx));
ctx->block = block;
ctx->key = key;

(*block) (ctx->H.c, ctx->H.c, key);

if (IS_LITTLE_ENDIAN) {
/* H is stored in host byte order */
#ifdef BSWAP8
ctx->H.u[0] = BSWAP8(ctx->H.u[0]);
ctx->H.u[1] = BSWAP8(ctx->H.u[1]);
#else
gcm_init_4bit(ctx->Htable, ctx->H.u);
u8 *p = ctx->H.c;
u64 hi, lo;
hi = (u64)GETU32(p) << 32 | GETU32(p + 4);
lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12);
ctx->H.u[0] = hi;
ctx->H.u[1] = lo;
#endif
#undef CTX__GHASH
}

gcm_get_funcs(&ctx->funcs);
ctx->funcs.ginit(ctx->Htable, ctx->H.u);
}

void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv,
Expand All @@ -540,7 +546,7 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv,
DECLARE_IS_ENDIAN;
unsigned int ctr;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
#endif

ctx->len.u[0] = 0; /* AAD length */
Expand Down Expand Up @@ -631,10 +637,9 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad,
unsigned int n;
u64 alen = ctx->len.u[0];
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
# ifdef GHASH
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
# endif
#endif

Expand Down Expand Up @@ -696,10 +701,9 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
block128_f block = ctx->block;
void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
# endif
#endif

Expand Down Expand Up @@ -928,10 +932,9 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
block128_f block = ctx->block;
void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
# endif
#endif

Expand Down Expand Up @@ -1170,10 +1173,9 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
u64 mlen = ctx->len.u[1];
void *key = ctx->key;
# ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
# ifdef GHASH
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
# endif
# endif

Expand Down Expand Up @@ -1331,10 +1333,9 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
u64 mlen = ctx->len.u[1];
void *key = ctx->key;
# ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
# ifdef GHASH
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
# endif
# endif

Expand Down Expand Up @@ -1493,10 +1494,9 @@ int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
u64 alen = ctx->len.u[0] << 3;
u64 clen = ctx->len.u[1] << 3;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult;
# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash;
# endif
#endif

Expand Down
6 changes: 3 additions & 3 deletions include/crypto/aes_platform.h
Expand Up @@ -92,7 +92,7 @@ size_t ppc_aes_gcm_decrypt_wrap(const unsigned char *in, unsigned char *out,
# define AES_gcm_encrypt ppc_aes_gcm_encrypt_wrap
# define AES_gcm_decrypt ppc_aes_gcm_decrypt_wrap
# define AES_GCM_ASM(gctx) ((gctx)->ctr==aes_p8_ctr32_encrypt_blocks && \
(gctx)->gcm.ghash==gcm_ghash_p8)
(gctx)->gcm.funcs.ghash==gcm_ghash_p8)
void gcm_ghash_p8(u64 Xi[2],const u128 Htable[16],const u8 *inp, size_t len);
# endif /* PPC */

Expand Down Expand Up @@ -124,7 +124,7 @@ void gcm_ghash_p8(u64 Xi[2],const u128 Htable[16],const u8 *inp, size_t len);
# define AES_gcm_encrypt armv8_aes_gcm_encrypt
# define AES_gcm_decrypt armv8_aes_gcm_decrypt
# define AES_GCM_ASM(gctx) ((gctx)->ctr==aes_v8_ctr32_encrypt_blocks && \
(gctx)->gcm.ghash==gcm_ghash_v8)
(gctx)->gcm.funcs.ghash==gcm_ghash_v8)
size_t aes_gcm_enc_128_kernel(const uint8_t * plaintext, uint64_t plaintext_length, uint8_t * ciphertext,
uint64_t *Xi, unsigned char ivec[16], const void *key);
size_t aes_gcm_enc_192_kernel(const uint8_t * plaintext, uint64_t plaintext_length, uint8_t * ciphertext,
Expand Down Expand Up @@ -258,7 +258,7 @@ void gcm_ghash_avx(u64 Xi[2], const u128 Htable[16], const u8 *in, size_t len);
# define AES_gcm_encrypt aesni_gcm_encrypt
# define AES_gcm_decrypt aesni_gcm_decrypt
# define AES_GCM_ASM(ctx) (ctx->ctr == aesni_ctr32_encrypt_blocks && \
ctx->gcm.ghash == gcm_ghash_avx)
ctx->gcm.funcs.ghash == gcm_ghash_avx)
# endif


Expand Down
13 changes: 10 additions & 3 deletions include/crypto/modes.h
Expand Up @@ -107,6 +107,15 @@ _asm mov eax, val _asm bswap eax}
u64 hi, lo;
} u128;

typedef void (*gcm_init_fn)(u128 Htable[16], const u64 H[2]);
typedef void (*gcm_ghash_fn)(u64 Xi[2], const u128 Htable[16], const u8 *inp, size_t len);
typedef void (*gcm_gmult_fn)(u64 Xi[2], const u128 Htable[16]);
struct gcm_funcs_st {
gcm_init_fn ginit;
gcm_ghash_fn ghash;
gcm_gmult_fn gmult;
};

struct gcm128_context {
/* Following 6 names follow names in GCM specification */
union {
Expand All @@ -120,9 +129,7 @@ struct gcm128_context {
* used in some assembler modules, i.e. don't change the order!
*/
u128 Htable[16];
void (*gmult) (u64 Xi[2], const u128 Htable[16]);
void (*ghash) (u64 Xi[2], const u128 Htable[16], const u8 *inp,
size_t len);
struct gcm_funcs_st funcs;
unsigned int mres, ares;
block128_f block;
void *key;
Expand Down

0 comments on commit 6843c1e

Please sign in to comment.