Skip to content
This repository has been archived by the owner on Nov 8, 2023. It is now read-only.

Commit

Permalink
UPSTREAM: crypto: riscv - add vector crypto accelerated SM3
Browse files Browse the repository at this point in the history
Add an implementation of SM3 using the Zvksh extension.  The assembly
code is derived from OpenSSL code (openssl/openssl#21923) that was
dual-licensed so that it could be reused in the kernel.  Nevertheless,
the assembly has been significantly reworked for integration with the
kernel, for example by using a regular .S file instead of the so-called
perlasm, using the assembler instead of bare '.inst', and greatly
reducing code duplication.

Co-developed-by: Christoph Müllner <christoph.muellner@vrull.eu>
Signed-off-by: Christoph Müllner <christoph.muellner@vrull.eu>
Co-developed-by: Heiko Stuebner <heiko.stuebner@vrull.eu>
Signed-off-by: Heiko Stuebner <heiko.stuebner@vrull.eu>
Signed-off-by: Jerry Shih <jerry.shih@sifive.com>
Co-developed-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20240122002024.27477-10-ebiggers@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>

(cherry picked from commit 563a525)
Bug: 332317531
Test: For whole topic: booted kernel in QEMU and ran crypto self-tests
Change-Id: Ie6b5cc3e84551b44807e6b962f398c23b7500edf
Signed-off-by: Eric Biggers <ebiggers@google.com>
  • Loading branch information
JerryShih authored and Treehugger Robot committed Apr 10, 2024
1 parent db08176 commit 1f17ffb
Show file tree
Hide file tree
Showing 4 changed files with 250 additions and 0 deletions.
12 changes: 12 additions & 0 deletions arch/riscv/crypto/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -61,4 +61,16 @@ config CRYPTO_SHA512_RISCV64
- Zvknhb vector crypto extension
- Zvkb vector crypto extension

config CRYPTO_SM3_RISCV64
tristate "Hash functions: SM3 (ShangMi 3)"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
select CRYPTO_HASH
select CRYPTO_SM3
help
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)

Architecture: riscv64 using:
- Zvksh vector crypto extension
- Zvkb vector crypto extension

endmenu
3 changes: 3 additions & 0 deletions arch/riscv/crypto/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,6 @@ sha256-riscv64-y := sha256-riscv64-glue.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o

obj-$(CONFIG_CRYPTO_SHA512_RISCV64) += sha512-riscv64.o
sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o

obj-$(CONFIG_CRYPTO_SM3_RISCV64) += sm3-riscv64.o
sm3-riscv64-y := sm3-riscv64-glue.o sm3-riscv64-zvksh-zvkb.o
112 changes: 112 additions & 0 deletions arch/riscv/crypto/sm3-riscv64-glue.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SM3 using the RISC-V vector crypto extensions
*
* Copyright (C) 2023 VRULL GmbH
* Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
*
* Copyright (C) 2023 SiFive, Inc.
* Author: Jerry Shih <jerry.shih@sifive.com>
*/

#include <asm/simd.h>
#include <asm/vector.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sm3_base.h>
#include <linux/linkage.h>
#include <linux/module.h>

/*
* Note: the asm function only uses the 'state' field of struct sm3_state.
* It is assumed to be the first field.
*/
asmlinkage void sm3_transform_zvksh_zvkb(
struct sm3_state *state, const u8 *data, int num_blocks);

static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
/*
* Ensure struct sm3_state begins directly with the SM3
* 256-bit internal state, as this is what the asm function expects.
*/
BUILD_BUG_ON(offsetof(struct sm3_state, state) != 0);

if (crypto_simd_usable()) {
kernel_vector_begin();
sm3_base_do_update(desc, data, len, sm3_transform_zvksh_zvkb);
kernel_vector_end();
} else {
sm3_update(shash_desc_ctx(desc), data, len);
}
return 0;
}

static int riscv64_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sm3_state *ctx;

if (crypto_simd_usable()) {
kernel_vector_begin();
if (len)
sm3_base_do_update(desc, data, len,
sm3_transform_zvksh_zvkb);
sm3_base_do_finalize(desc, sm3_transform_zvksh_zvkb);
kernel_vector_end();

return sm3_base_finish(desc, out);
}

ctx = shash_desc_ctx(desc);
if (len)
sm3_update(ctx, data, len);
sm3_final(ctx, out);

return 0;
}

static int riscv64_sm3_final(struct shash_desc *desc, u8 *out)
{
return riscv64_sm3_finup(desc, NULL, 0, out);
}

static struct shash_alg riscv64_sm3_alg = {
.init = sm3_base_init,
.update = riscv64_sm3_update,
.final = riscv64_sm3_final,
.finup = riscv64_sm3_finup,
.descsize = sizeof(struct sm3_state),
.digestsize = SM3_DIGEST_SIZE,
.base = {
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_priority = 300,
.cra_name = "sm3",
.cra_driver_name = "sm3-riscv64-zvksh-zvkb",
.cra_module = THIS_MODULE,
},
};

static int __init riscv64_sm3_mod_init(void)
{
if (riscv_isa_extension_available(NULL, ZVKSH) &&
riscv_isa_extension_available(NULL, ZVKB) &&
riscv_vector_vlen() >= 128)
return crypto_register_shash(&riscv64_sm3_alg);

return -ENODEV;
}

static void __exit riscv64_sm3_mod_exit(void)
{
crypto_unregister_shash(&riscv64_sm3_alg);
}

module_init(riscv64_sm3_mod_init);
module_exit(riscv64_sm3_mod_exit);

MODULE_DESCRIPTION("SM3 (RISC-V accelerated)");
MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("sm3");
123 changes: 123 additions & 0 deletions arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
//
// This file is dual-licensed, meaning that you can use it under your
// choice of either of the following two licenses:
//
// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License 2.0 (the "License"). You can obtain
// a copy in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
//
// or
//
// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
// Copyright 2024 Google LLC
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// The generated code of this file depends on the following RISC-V extensions:
// - RV64I
// - RISC-V Vector ('V') with VLEN >= 128
// - RISC-V Vector SM3 Secure Hash extension ('Zvksh')
// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')

#include <linux/cfi_types.h>

.text
.option arch, +zvksh, +zvkb

#define STATEP a0
#define DATA a1
#define NUM_BLOCKS a2

#define STATE v0 // LMUL=2
#define PREV_STATE v2 // LMUL=2
#define W0 v4 // LMUL=2
#define W1 v6 // LMUL=2
#define VTMP v8 // LMUL=2

.macro sm3_8rounds i, w0, w1
// Do 4 rounds using W_{0+i}..W_{7+i}.
vsm3c.vi STATE, \w0, \i + 0
vslidedown.vi VTMP, \w0, 2
vsm3c.vi STATE, VTMP, \i + 1

// Compute W_{4+i}..W_{11+i}.
vslidedown.vi VTMP, \w0, 4
vslideup.vi VTMP, \w1, 4

// Do 4 rounds using W_{4+i}..W_{11+i}.
vsm3c.vi STATE, VTMP, \i + 2
vslidedown.vi VTMP, VTMP, 2
vsm3c.vi STATE, VTMP, \i + 3

.if \i < 28
// Compute W_{16+i}..W_{23+i}.
vsm3me.vv \w0, \w1, \w0
.endif
// For the next 8 rounds, w0 and w1 are swapped.
.endm

// void sm3_transform_zvksh_zvkb(u32 state[8], const u8 *data, int num_blocks);
SYM_TYPED_FUNC_START(sm3_transform_zvksh_zvkb)

// Load the state and endian-swap each 32-bit word.
vsetivli zero, 8, e32, m2, ta, ma
vle32.v STATE, (STATEP)
vrev8.v STATE, STATE

.Lnext_block:
addi NUM_BLOCKS, NUM_BLOCKS, -1

// Save the previous state, as it's needed later.
vmv.v.v PREV_STATE, STATE

// Load the next 512-bit message block into W0-W1.
vle32.v W0, (DATA)
addi DATA, DATA, 32
vle32.v W1, (DATA)
addi DATA, DATA, 32

// Do the 64 rounds of SM3.
sm3_8rounds 0, W0, W1
sm3_8rounds 4, W1, W0
sm3_8rounds 8, W0, W1
sm3_8rounds 12, W1, W0
sm3_8rounds 16, W0, W1
sm3_8rounds 20, W1, W0
sm3_8rounds 24, W0, W1
sm3_8rounds 28, W1, W0

// XOR in the previous state.
vxor.vv STATE, STATE, PREV_STATE

// Repeat if more blocks remain.
bnez NUM_BLOCKS, .Lnext_block

// Store the new state and return.
vrev8.v STATE, STATE
vse32.v STATE, (STATEP)
ret
SYM_FUNC_END(sm3_transform_zvksh_zvkb)

0 comments on commit 1f17ffb

Please sign in to comment.