Skip to content

Commit 563a525

Browse files
JerryShihpalmer-dabbelt
authored andcommitted
crypto: riscv - add vector crypto accelerated SM3
Add an implementation of SM3 using the Zvksh extension. The assembly code is derived from OpenSSL code (openssl/openssl#21923) that was dual-licensed so that it could be reused in the kernel. Nevertheless, the assembly has been significantly reworked for integration with the kernel, for example by using a regular .S file instead of the so-called perlasm, using the assembler instead of bare '.inst', and greatly reducing code duplication. Co-developed-by: Christoph Müllner <[email protected]> Signed-off-by: Christoph Müllner <[email protected]> Co-developed-by: Heiko Stuebner <[email protected]> Signed-off-by: Heiko Stuebner <[email protected]> Signed-off-by: Jerry Shih <[email protected]> Co-developed-by: Eric Biggers <[email protected]> Signed-off-by: Eric Biggers <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent b341592 commit 563a525

File tree

4 files changed

+250
-0
lines changed

4 files changed

+250
-0
lines changed

arch/riscv/crypto/Kconfig

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,4 +61,16 @@ config CRYPTO_SHA512_RISCV64
6161
- Zvknhb vector crypto extension
6262
- Zvkb vector crypto extension
6363

64+
config CRYPTO_SM3_RISCV64
65+
tristate "Hash functions: SM3 (ShangMi 3)"
66+
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
67+
select CRYPTO_HASH
68+
select CRYPTO_SM3
69+
help
70+
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
71+
72+
Architecture: riscv64 using:
73+
- Zvksh vector crypto extension
74+
- Zvkb vector crypto extension
75+
6476
endmenu

arch/riscv/crypto/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,6 @@ sha256-riscv64-y := sha256-riscv64-glue.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
1515

1616
obj-$(CONFIG_CRYPTO_SHA512_RISCV64) += sha512-riscv64.o
1717
sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o
18+
19+
obj-$(CONFIG_CRYPTO_SM3_RISCV64) += sm3-riscv64.o
20+
sm3-riscv64-y := sm3-riscv64-glue.o sm3-riscv64-zvksh-zvkb.o

arch/riscv/crypto/sm3-riscv64-glue.c

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
// SPDX-License-Identifier: GPL-2.0-or-later
2+
/*
3+
* SM3 using the RISC-V vector crypto extensions
4+
*
5+
* Copyright (C) 2023 VRULL GmbH
6+
* Author: Heiko Stuebner <[email protected]>
7+
*
8+
* Copyright (C) 2023 SiFive, Inc.
9+
* Author: Jerry Shih <[email protected]>
10+
*/
11+
12+
#include <asm/simd.h>
13+
#include <asm/vector.h>
14+
#include <crypto/internal/hash.h>
15+
#include <crypto/internal/simd.h>
16+
#include <crypto/sm3_base.h>
17+
#include <linux/linkage.h>
18+
#include <linux/module.h>
19+
20+
/*
21+
* Note: the asm function only uses the 'state' field of struct sm3_state.
22+
* It is assumed to be the first field.
23+
*/
24+
asmlinkage void sm3_transform_zvksh_zvkb(
25+
struct sm3_state *state, const u8 *data, int num_blocks);
26+
27+
static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data,
28+
unsigned int len)
29+
{
30+
/*
31+
* Ensure struct sm3_state begins directly with the SM3
32+
* 256-bit internal state, as this is what the asm function expects.
33+
*/
34+
BUILD_BUG_ON(offsetof(struct sm3_state, state) != 0);
35+
36+
if (crypto_simd_usable()) {
37+
kernel_vector_begin();
38+
sm3_base_do_update(desc, data, len, sm3_transform_zvksh_zvkb);
39+
kernel_vector_end();
40+
} else {
41+
sm3_update(shash_desc_ctx(desc), data, len);
42+
}
43+
return 0;
44+
}
45+
46+
static int riscv64_sm3_finup(struct shash_desc *desc, const u8 *data,
47+
unsigned int len, u8 *out)
48+
{
49+
struct sm3_state *ctx;
50+
51+
if (crypto_simd_usable()) {
52+
kernel_vector_begin();
53+
if (len)
54+
sm3_base_do_update(desc, data, len,
55+
sm3_transform_zvksh_zvkb);
56+
sm3_base_do_finalize(desc, sm3_transform_zvksh_zvkb);
57+
kernel_vector_end();
58+
59+
return sm3_base_finish(desc, out);
60+
}
61+
62+
ctx = shash_desc_ctx(desc);
63+
if (len)
64+
sm3_update(ctx, data, len);
65+
sm3_final(ctx, out);
66+
67+
return 0;
68+
}
69+
70+
static int riscv64_sm3_final(struct shash_desc *desc, u8 *out)
71+
{
72+
return riscv64_sm3_finup(desc, NULL, 0, out);
73+
}
74+
75+
static struct shash_alg riscv64_sm3_alg = {
76+
.init = sm3_base_init,
77+
.update = riscv64_sm3_update,
78+
.final = riscv64_sm3_final,
79+
.finup = riscv64_sm3_finup,
80+
.descsize = sizeof(struct sm3_state),
81+
.digestsize = SM3_DIGEST_SIZE,
82+
.base = {
83+
.cra_blocksize = SM3_BLOCK_SIZE,
84+
.cra_priority = 300,
85+
.cra_name = "sm3",
86+
.cra_driver_name = "sm3-riscv64-zvksh-zvkb",
87+
.cra_module = THIS_MODULE,
88+
},
89+
};
90+
91+
static int __init riscv64_sm3_mod_init(void)
92+
{
93+
if (riscv_isa_extension_available(NULL, ZVKSH) &&
94+
riscv_isa_extension_available(NULL, ZVKB) &&
95+
riscv_vector_vlen() >= 128)
96+
return crypto_register_shash(&riscv64_sm3_alg);
97+
98+
return -ENODEV;
99+
}
100+
101+
static void __exit riscv64_sm3_mod_exit(void)
102+
{
103+
crypto_unregister_shash(&riscv64_sm3_alg);
104+
}
105+
106+
module_init(riscv64_sm3_mod_init);
107+
module_exit(riscv64_sm3_mod_exit);
108+
109+
MODULE_DESCRIPTION("SM3 (RISC-V accelerated)");
110+
MODULE_AUTHOR("Heiko Stuebner <[email protected]>");
111+
MODULE_LICENSE("GPL");
112+
MODULE_ALIAS_CRYPTO("sm3");
Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
2+
//
3+
// This file is dual-licensed, meaning that you can use it under your
4+
// choice of either of the following two licenses:
5+
//
6+
// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
7+
//
8+
// Licensed under the Apache License 2.0 (the "License"). You can obtain
9+
// a copy in the file LICENSE in the source distribution or at
10+
// https://www.openssl.org/source/license.html
11+
//
12+
// or
13+
//
14+
// Copyright (c) 2023, Christoph Müllner <[email protected]>
15+
// Copyright (c) 2023, Jerry Shih <[email protected]>
16+
// Copyright 2024 Google LLC
17+
// All rights reserved.
18+
//
19+
// Redistribution and use in source and binary forms, with or without
20+
// modification, are permitted provided that the following conditions
21+
// are met:
22+
// 1. Redistributions of source code must retain the above copyright
23+
// notice, this list of conditions and the following disclaimer.
24+
// 2. Redistributions in binary form must reproduce the above copyright
25+
// notice, this list of conditions and the following disclaimer in the
26+
// documentation and/or other materials provided with the distribution.
27+
//
28+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39+
40+
// The generated code of this file depends on the following RISC-V extensions:
41+
// - RV64I
42+
// - RISC-V Vector ('V') with VLEN >= 128
43+
// - RISC-V Vector SM3 Secure Hash extension ('Zvksh')
44+
// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
45+
46+
#include <linux/cfi_types.h>
47+
48+
.text
49+
.option arch, +zvksh, +zvkb
50+
51+
#define STATEP a0
52+
#define DATA a1
53+
#define NUM_BLOCKS a2
54+
55+
#define STATE v0 // LMUL=2
56+
#define PREV_STATE v2 // LMUL=2
57+
#define W0 v4 // LMUL=2
58+
#define W1 v6 // LMUL=2
59+
#define VTMP v8 // LMUL=2
60+
61+
.macro sm3_8rounds i, w0, w1
62+
// Do 4 rounds using W_{0+i}..W_{7+i}.
63+
vsm3c.vi STATE, \w0, \i + 0
64+
vslidedown.vi VTMP, \w0, 2
65+
vsm3c.vi STATE, VTMP, \i + 1
66+
67+
// Compute W_{4+i}..W_{11+i}.
68+
vslidedown.vi VTMP, \w0, 4
69+
vslideup.vi VTMP, \w1, 4
70+
71+
// Do 4 rounds using W_{4+i}..W_{11+i}.
72+
vsm3c.vi STATE, VTMP, \i + 2
73+
vslidedown.vi VTMP, VTMP, 2
74+
vsm3c.vi STATE, VTMP, \i + 3
75+
76+
.if \i < 28
77+
// Compute W_{16+i}..W_{23+i}.
78+
vsm3me.vv \w0, \w1, \w0
79+
.endif
80+
// For the next 8 rounds, w0 and w1 are swapped.
81+
.endm
82+
83+
// void sm3_transform_zvksh_zvkb(u32 state[8], const u8 *data, int num_blocks);
84+
SYM_TYPED_FUNC_START(sm3_transform_zvksh_zvkb)
85+
86+
// Load the state and endian-swap each 32-bit word.
87+
vsetivli zero, 8, e32, m2, ta, ma
88+
vle32.v STATE, (STATEP)
89+
vrev8.v STATE, STATE
90+
91+
.Lnext_block:
92+
addi NUM_BLOCKS, NUM_BLOCKS, -1
93+
94+
// Save the previous state, as it's needed later.
95+
vmv.v.v PREV_STATE, STATE
96+
97+
// Load the next 512-bit message block into W0-W1.
98+
vle32.v W0, (DATA)
99+
addi DATA, DATA, 32
100+
vle32.v W1, (DATA)
101+
addi DATA, DATA, 32
102+
103+
// Do the 64 rounds of SM3.
104+
sm3_8rounds 0, W0, W1
105+
sm3_8rounds 4, W1, W0
106+
sm3_8rounds 8, W0, W1
107+
sm3_8rounds 12, W1, W0
108+
sm3_8rounds 16, W0, W1
109+
sm3_8rounds 20, W1, W0
110+
sm3_8rounds 24, W0, W1
111+
sm3_8rounds 28, W1, W0
112+
113+
// XOR in the previous state.
114+
vxor.vv STATE, STATE, PREV_STATE
115+
116+
// Repeat if more blocks remain.
117+
bnez NUM_BLOCKS, .Lnext_block
118+
119+
// Store the new state and return.
120+
vrev8.v STATE, STATE
121+
vse32.v STATE, (STATEP)
122+
ret
123+
SYM_FUNC_END(sm3_transform_zvksh_zvkb)

0 commit comments

Comments
 (0)