Skip to content

Commit

Permalink
Update for Zvkb extension.
Browse files Browse the repository at this point in the history
https://github.com/riscv/riscv-crypto/blob/c8ddeb7e64a3444dda0438316af1238aeed72041/doc/vector/riscv-crypto-vector-zvkb.adoc
Create `RISCV_HAS_ZVKB()` macro.
Use zvkb for SM4 instead of zvbb.
Use zvkb for ghash instead of zvbb.
We could just use the zvbb's subset `zvkb` for flexibility.

Signed-off-by: Jerry Shih <jerry.shih@sifive.com>
Signed-off-by: Phoebe Chen <phoebe.chen@sifive.com>

Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
Reviewed-by: Hugo Landau <hlandau@openssl.org>
(Merged from #21923)
  • Loading branch information
JerryShih authored and hlandau committed Oct 26, 2023
1 parent d26d01e commit 3645eb0
Show file tree
Hide file tree
Showing 12 changed files with 79 additions and 59 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

# - RV64I
# - RISC-V vector ('V') with VLEN >= 128
# - Vector Bit-manipulation used in Cryptography ('Zvbb')
# - Vector Carryless Multiplication ('Zvbc')
# - RISC-V Vector ('V') with VLEN >= 128
# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
# - RISC-V Vector Carryless Multiplication extension ('Zvbc')

use strict;
use warnings;
Expand All @@ -59,20 +59,20 @@
___

################################################################################
# void gcm_init_rv64i_zvbb_zvbc(u128 Htable[16], const u64 H[2]);
# void gcm_init_rv64i_zvkb_zvbc(u128 Htable[16], const u64 H[2]);
#
# input: H: 128-bit H - secret parameter E(K, 0^128)
# output: Htable: Preprocessed key data for gcm_gmult_rv64i_zvbb_zvbc and
# gcm_ghash_rv64i_zvbb_zvbc
# output: Htable: Preprocessed key data for gcm_gmult_rv64i_zvkb_zvbc and
# gcm_ghash_rv64i_zvkb_zvbc
{
my ($Htable,$H,$TMP0,$TMP1,$TMP2) = ("a0","a1","t0","t1","t2");
my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6");

$code .= <<___;
.p2align 3
.globl gcm_init_rv64i_zvbb_zvbc
.type gcm_init_rv64i_zvbb_zvbc,\@function
gcm_init_rv64i_zvbb_zvbc:
.globl gcm_init_rv64i_zvkb_zvbc
.type gcm_init_rv64i_zvkb_zvbc,\@function
gcm_init_rv64i_zvkb_zvbc:
# Load/store data in reverse order.
# This is needed as a part of endianness swap.
add $H, $H, 8
Expand Down Expand Up @@ -110,12 +110,12 @@
@{[vse64_v $V1, $Htable]} # vse64.v v1, (a0)
ret
.size gcm_init_rv64i_zvbb_zvbc,.-gcm_init_rv64i_zvbb_zvbc
.size gcm_init_rv64i_zvkb_zvbc,.-gcm_init_rv64i_zvkb_zvbc
___
}

################################################################################
# void gcm_gmult_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16]);
# void gcm_gmult_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16]);
#
# input: Xi: current hash value
# Htable: preprocessed H
Expand All @@ -127,9 +127,9 @@
$code .= <<___;
.text
.p2align 3
.globl gcm_gmult_rv64i_zvbb_zvbc
.type gcm_gmult_rv64i_zvbb_zvbc,\@function
gcm_gmult_rv64i_zvbb_zvbc:
.globl gcm_gmult_rv64i_zvkb_zvbc
.type gcm_gmult_rv64i_zvkb_zvbc,\@function
gcm_gmult_rv64i_zvkb_zvbc:
ld $TMP0, ($Htable)
ld $TMP1, 8($Htable)
li $TMP2, 63
Expand Down Expand Up @@ -228,12 +228,12 @@
@{[vrev8_v $V2, $V2]} # vrev8.v v2, v2
@{[vsse64_v $V2, $Xi, $TMP4]} # vsse64.v v2, (a0), t4
ret
.size gcm_gmult_rv64i_zvbb_zvbc,.-gcm_gmult_rv64i_zvbb_zvbc
.size gcm_gmult_rv64i_zvkb_zvbc,.-gcm_gmult_rv64i_zvkb_zvbc
___
}

################################################################################
# void gcm_ghash_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16],
# void gcm_ghash_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16],
# const u8 *inp, size_t len);
#
# input: Xi: current hash value
Expand All @@ -247,9 +247,9 @@

$code .= <<___;
.p2align 3
.globl gcm_ghash_rv64i_zvbb_zvbc
.type gcm_ghash_rv64i_zvbb_zvbc,\@function
gcm_ghash_rv64i_zvbb_zvbc:
.globl gcm_ghash_rv64i_zvkb_zvbc
.type gcm_ghash_rv64i_zvkb_zvbc,\@function
gcm_ghash_rv64i_zvkb_zvbc:
ld $TMP0, ($Htable)
ld $TMP1, 8($Htable)
li $TMP2, 63
Expand Down Expand Up @@ -361,7 +361,7 @@
@{[vsse64_v $V5, $Xi, $M8]} # vsse64.v v2, (a0), t4
ret
.size gcm_ghash_rv64i_zvbb_zvbc,.-gcm_ghash_rv64i_zvbb_zvbc
.size gcm_ghash_rv64i_zvkb_zvbc,.-gcm_ghash_rv64i_zvkb_zvbc
___
}

Expand Down
19 changes: 11 additions & 8 deletions crypto/modes/asm/ghash-riscv64-zvkg.pl
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,11 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

# - RV64I
# - RISC-V vector ('V') with VLEN >= 128
# - RISC-V vector crypto GHASH extension ('Zvkg')
# - RISC-V Vector ('V') with VLEN >= 128
# - RISC-V Vector GCM/GMAC extension ('Zvkg')
#
# Optional:
# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')

use strict;
use warnings;
Expand All @@ -59,7 +62,7 @@

################################################################################
# void gcm_init_rv64i_zvkg(u128 Htable[16], const u64 H[2]);
# void gcm_init_rv64i_zvkg_zvbb(u128 Htable[16], const u64 H[2]);
# void gcm_init_rv64i_zvkg_zvkb(u128 Htable[16], const u64 H[2]);
#
# input: H: 128-bit H - secret parameter E(K, 0^128)
# output: Htable: Copy of secret parameter (in normalized byte order)
Expand Down Expand Up @@ -88,15 +91,15 @@

$code .= <<___;
.p2align 3
.globl gcm_init_rv64i_zvkg_zvbb
.type gcm_init_rv64i_zvkg_zvbb,\@function
gcm_init_rv64i_zvkg_zvbb:
@{[vsetivli__x0_2_e64_m1_tu_mu]} # vsetivli x0, 2, e64, m1, tu, mu
.globl gcm_init_rv64i_zvkg_zvkb
.type gcm_init_rv64i_zvkg_zvkb,\@function
gcm_init_rv64i_zvkg_zvkb:
@{[vsetivli__x0_2_e64_m1_tu_mu]} # vsetivli x0, 2, e64, m1, ta, ma
@{[vle64_v $V0, $H]} # vle64.v v0, (a1)
@{[vrev8_v $V0, $V0]} # vrev8.v v0, v0
@{[vse64_v $V0, $Htable]} # vse64.v v0, (a0)
ret
.size gcm_init_rv64i_zvkg_zvbb,.-gcm_init_rv64i_zvkg_zvbb
.size gcm_init_rv64i_zvkg_zvkb,.-gcm_init_rv64i_zvkg_zvkb
___
}

Expand Down
4 changes: 2 additions & 2 deletions crypto/modes/build.info
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ IF[{- !$disabled{asm} -}]
$MODESASM_c64xplus=ghash-c64xplus.s
$MODESDEF_c64xplus=GHASH_ASM

$MODESASM_riscv64=ghash-riscv64.s ghash-riscv64-zvbb-zvbc.s ghash-riscv64-zvkg.s
$MODESASM_riscv64=ghash-riscv64.s ghash-riscv64-zvkb-zvbc.s ghash-riscv64-zvkg.s
$MODESDEF_riscv64=GHASH_ASM

# Now that we have defined all the arch specific variables, use the
Expand Down Expand Up @@ -91,5 +91,5 @@ GENERATE[ghash-s390x.S]=asm/ghash-s390x.pl
INCLUDE[ghash-s390x.o]=..
GENERATE[ghash-c64xplus.S]=asm/ghash-c64xplus.pl
GENERATE[ghash-riscv64.s]=asm/ghash-riscv64.pl
GENERATE[ghash-riscv64-zvbb-zvbc.s]=asm/ghash-riscv64-zvbb-zvbc.pl
GENERATE[ghash-riscv64-zvkb-zvbc.s]=asm/ghash-riscv64-zvkb-zvbc.pl
GENERATE[ghash-riscv64-zvkg.s]=asm/ghash-riscv64-zvkg.pl
22 changes: 11 additions & 11 deletions crypto/modes/gcm128.c
Original file line number Diff line number Diff line change
Expand Up @@ -413,14 +413,14 @@ void gcm_ghash_rv64i_zbc(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
void gcm_ghash_rv64i_zbc__zbkb(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
/* Zvbb/Zvbc (vector crypto with vclmul) based routines. */
void gcm_init_rv64i_zvbb_zvbc(u128 Htable[16], const u64 Xi[2]);
void gcm_gmult_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16]);
void gcm_ghash_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16],
/* zvkb/Zvbc (vector crypto with vclmul) based routines. */
void gcm_init_rv64i_zvkb_zvbc(u128 Htable[16], const u64 Xi[2]);
void gcm_gmult_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16]);
void gcm_ghash_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
/* Zvkg (vector crypto with vgmul.vv and vghsh.vv). */
void gcm_init_rv64i_zvkg(u128 Htable[16], const u64 Xi[2]);
void gcm_init_rv64i_zvkg_zvbb(u128 Htable[16], const u64 Xi[2]);
void gcm_init_rv64i_zvkg_zvkb(u128 Htable[16], const u64 Xi[2]);
void gcm_gmult_rv64i_zvkg(u64 Xi[2], const u128 Htable[16]);
void gcm_ghash_rv64i_zvkg(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
Expand Down Expand Up @@ -524,16 +524,16 @@ static void gcm_get_funcs(struct gcm_funcs_st *ctx)
ctx->ghash = gcm_ghash_4bit;

if (RISCV_HAS_ZVKG() && riscv_vlen() >= 128) {
if (RISCV_HAS_ZVBB())
ctx->ginit = gcm_init_rv64i_zvkg_zvbb;
if (RISCV_HAS_ZVKB())
ctx->ginit = gcm_init_rv64i_zvkg_zvkb;
else
ctx->ginit = gcm_init_rv64i_zvkg;
ctx->gmult = gcm_gmult_rv64i_zvkg;
ctx->ghash = gcm_ghash_rv64i_zvkg;
} else if (RISCV_HAS_ZVBB() && RISCV_HAS_ZVBC() && riscv_vlen() >= 128) {
ctx->ginit = gcm_init_rv64i_zvbb_zvbc;
ctx->gmult = gcm_gmult_rv64i_zvbb_zvbc;
ctx->ghash = gcm_ghash_rv64i_zvbb_zvbc;
} else if (RISCV_HAS_ZVKB() && RISCV_HAS_ZVBC() && riscv_vlen() >= 128) {
ctx->ginit = gcm_init_rv64i_zvkb_zvbc;
ctx->gmult = gcm_gmult_rv64i_zvkb_zvbc;
ctx->ghash = gcm_ghash_rv64i_zvkb_zvbc;
} else if (RISCV_HAS_ZBC()) {
if (RISCV_HAS_ZBKB()) {
ctx->ginit = gcm_init_rv64i_zbc__zbkb;
Expand Down
13 changes: 12 additions & 1 deletion crypto/perlasm/riscv.pm
Original file line number Diff line number Diff line change
Expand Up @@ -746,7 +746,18 @@ sub vxor_vv {

# Vector crypto instructions

## Zvbb instructions
## Zvbb and Zvkb instructions
##
## vandn (also in zvkb)
## vbrev
## vbrev8 (also in zvkb)
## vrev8 (also in zvkb)
## vclz
## vctz
## vcpop
## vrol (also in zvkb)
## vror (also in zvkb)
## vwsll

sub vrev8_v {
# vrev8.v vd, vs2, vm
Expand Down
6 changes: 3 additions & 3 deletions crypto/sm4/asm/sm4-riscv64-zvksed.pl
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@

# The generated code of this file depends on the following RISC-V extensions:
# - RV64I
# - RISC-V vector ('V') with VLEN >= 128
# - Vector Bit-manipulation used in Cryptography ('Zvbb')
# - Vector ShangMi Suite: SM4 Block Cipher ('Zvksed')
# - RISC-V Vector ('V') with VLEN >= 128
# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
# - RISC-V Vector SM4 Block Cipher extension ('Zvksed')

use strict;
use warnings;
Expand Down
13 changes: 7 additions & 6 deletions include/crypto/riscv_arch.def
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,13 @@ RISCV_DEFINE_CAP(ZKT, 0, 13)
RISCV_DEFINE_CAP(V, 0, 14)
RISCV_DEFINE_CAP(ZVBB, 0, 15)
RISCV_DEFINE_CAP(ZVBC, 0, 16)
RISCV_DEFINE_CAP(ZVKG, 0, 17)
RISCV_DEFINE_CAP(ZVKNED, 0, 18)
RISCV_DEFINE_CAP(ZVKNHA, 0, 19)
RISCV_DEFINE_CAP(ZVKNHB, 0, 20)
RISCV_DEFINE_CAP(ZVKSED, 0, 21)
RISCV_DEFINE_CAP(ZVKSH, 0, 22)
RISCV_DEFINE_CAP(ZVKB, 0, 17)
RISCV_DEFINE_CAP(ZVKG, 0, 18)
RISCV_DEFINE_CAP(ZVKNED, 0, 19)
RISCV_DEFINE_CAP(ZVKNHA, 0, 20)
RISCV_DEFINE_CAP(ZVKNHB, 0, 21)
RISCV_DEFINE_CAP(ZVKSED, 0, 22)
RISCV_DEFINE_CAP(ZVKSH, 0, 23)

/*
* In the future ...
Expand Down
13 changes: 9 additions & 4 deletions include/crypto/riscv_arch.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,15 @@ static const size_t kRISCVNumCaps =
#define RISCV_HAS_ZBB_AND_ZBC() (RISCV_HAS_ZBB() && RISCV_HAS_ZBC())
#define RISCV_HAS_ZBKB_AND_ZKND_AND_ZKNE() (RISCV_HAS_ZBKB() && RISCV_HAS_ZKND() && RISCV_HAS_ZKNE())
#define RISCV_HAS_ZKND_AND_ZKNE() (RISCV_HAS_ZKND() && RISCV_HAS_ZKNE())
#define RISCV_HAS_ZVBB_AND_ZVKNHA() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKNHA())
#define RISCV_HAS_ZVBB_AND_ZVKNHB() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKNHB())
#define RISCV_HAS_ZVBB_AND_ZVKSED() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKSED())
#define RISCV_HAS_ZVBB_AND_ZVKSH() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKSH())
/*
* The ZVBB is the superset of ZVKB extension. We use macro here to replace the
* `RISCV_HAS_ZVKB()` with `RISCV_HAS_ZVBB() || RISCV_HAS_ZVKB()`.
*/
#define RISCV_HAS_ZVKB() (RISCV_HAS_ZVBB() || RISCV_HAS_ZVKB())
#define RISCV_HAS_ZVKB_AND_ZVKNHA() (RISCV_HAS_ZVKB() && RISCV_HAS_ZVKNHA())
#define RISCV_HAS_ZVKB_AND_ZVKNHB() (RISCV_HAS_ZVKB() && RISCV_HAS_ZVKNHB())
#define RISCV_HAS_ZVKB_AND_ZVKSED() (RISCV_HAS_ZVKB() && RISCV_HAS_ZVKSED())
#define RISCV_HAS_ZVKB_AND_ZVKSH() (RISCV_HAS_ZVKB() && RISCV_HAS_ZVKSH())

/*
* Get the size of a vector register in bits (VLEN).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ static const PROV_CCM_HW rv64i_zvksed_sm4_ccm = {

const PROV_CCM_HW *ossl_prov_sm4_hw_ccm(size_t keybits)
{
if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)
if (RISCV_HAS_ZVKB_AND_ZVKSED() && riscv_vlen() >= 128)
return &rv64i_zvksed_sm4_ccm;
else
return &ccm_sm4;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ static const PROV_GCM_HW rv64i_zvksed_sm4_gcm = {

const PROV_GCM_HW *ossl_prov_sm4_hw_gcm(size_t keybits)
{
if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)
if (RISCV_HAS_ZVKB_AND_ZVKSED() && riscv_vlen() >= 128)
return &rv64i_zvksed_sm4_gcm;
else
return &sm4_gcm;
Expand Down
2 changes: 1 addition & 1 deletion providers/implementations/ciphers/cipher_sm4_hw_rv64i.inc
Original file line number Diff line number Diff line change
Expand Up @@ -48,5 +48,5 @@ static const PROV_CIPHER_HW rv64i_zvksed_sm4_##mode = { \
cipher_hw_sm4_copyctx \
};
#define PROV_CIPHER_HW_select(mode) \
if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128) \
if (RISCV_HAS_ZVKB_AND_ZVKSED() && riscv_vlen() >= 128) \
return &rv64i_zvksed_sm4_##mode;
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ static const PROV_CIPHER_HW rv64i_zvksed_sm4_xts = {

const PROV_CIPHER_HW *ossl_prov_cipher_hw_sm4_xts(size_t keybits)
{
if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)
if (RISCV_HAS_ZVKB_AND_ZVKSED() && riscv_vlen() >= 128)
return &rv64i_zvksed_sm4_xts;
else
return &sm4_generic_xts;
Expand Down

0 comments on commit 3645eb0

Please sign in to comment.