Skip to content

Commit b8d0635

Browse files
JerryShihpalmer-dabbelt
authored andcommitted
crypto: riscv - add vector crypto accelerated SM4
Add an implementation of SM4 using the Zvksed extension. The assembly code is derived from OpenSSL code (openssl/openssl#21923) that was dual-licensed so that it could be reused in the kernel. Nevertheless, the assembly has been significantly reworked for integration with the kernel, for example by using a regular .S file instead of the so-called perlasm, using the assembler instead of bare '.inst', and greatly reducing code duplication. Co-developed-by: Christoph Müllner <[email protected]> Signed-off-by: Christoph Müllner <[email protected]> Co-developed-by: Heiko Stuebner <[email protected]> Signed-off-by: Heiko Stuebner <[email protected]> Signed-off-by: Jerry Shih <[email protected]> Co-developed-by: Eric Biggers <[email protected]> Signed-off-by: Eric Biggers <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 563a525 commit b8d0635

File tree

4 files changed

+244
-0
lines changed

4 files changed

+244
-0
lines changed

arch/riscv/crypto/Kconfig

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,4 +73,21 @@ config CRYPTO_SM3_RISCV64
7373
- Zvksh vector crypto extension
7474
- Zvkb vector crypto extension
7575

76+
config CRYPTO_SM4_RISCV64
77+
tristate "Ciphers: SM4 (ShangMi 4)"
78+
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
79+
select CRYPTO_ALGAPI
80+
select CRYPTO_SM4
81+
help
82+
SM4 block cipher algorithm (OSCCA GB/T 32907-2016,
83+
ISO/IEC 18033-3:2010/Amd 1:2021)
84+
85+
SM4 (GBT.32907-2016) is a cryptographic standard issued by the
86+
Organization of State Commercial Administration of China (OSCCA)
87+
as an authorized cryptographic algorithm for use within China.
88+
89+
Architecture: riscv64 using:
90+
- Zvksed vector crypto extension
91+
- Zvkb vector crypto extension
92+
7693
endmenu

arch/riscv/crypto/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,3 +18,6 @@ sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o
1818

1919
obj-$(CONFIG_CRYPTO_SM3_RISCV64) += sm3-riscv64.o
2020
sm3-riscv64-y := sm3-riscv64-glue.o sm3-riscv64-zvksh-zvkb.o
21+
22+
obj-$(CONFIG_CRYPTO_SM4_RISCV64) += sm4-riscv64.o
23+
sm4-riscv64-y := sm4-riscv64-glue.o sm4-riscv64-zvksed-zvkb.o

arch/riscv/crypto/sm4-riscv64-glue.c

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* SM4 using the RISC-V vector crypto extensions
4+
*
5+
* Copyright (C) 2023 VRULL GmbH
6+
* Author: Heiko Stuebner <[email protected]>
7+
*
8+
* Copyright (C) 2023 SiFive, Inc.
9+
* Author: Jerry Shih <[email protected]>
10+
*/
11+
12+
#include <asm/simd.h>
13+
#include <asm/vector.h>
14+
#include <crypto/internal/cipher.h>
15+
#include <crypto/internal/simd.h>
16+
#include <crypto/sm4.h>
17+
#include <linux/linkage.h>
18+
#include <linux/module.h>
19+
20+
asmlinkage void sm4_expandkey_zvksed_zvkb(const u8 user_key[SM4_KEY_SIZE],
21+
u32 rkey_enc[SM4_RKEY_WORDS],
22+
u32 rkey_dec[SM4_RKEY_WORDS]);
23+
asmlinkage void sm4_crypt_zvksed_zvkb(const u32 rkey[SM4_RKEY_WORDS],
24+
const u8 in[SM4_BLOCK_SIZE],
25+
u8 out[SM4_BLOCK_SIZE]);
26+
27+
static int riscv64_sm4_setkey(struct crypto_tfm *tfm, const u8 *key,
28+
unsigned int keylen)
29+
{
30+
struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
31+
32+
if (crypto_simd_usable()) {
33+
if (keylen != SM4_KEY_SIZE)
34+
return -EINVAL;
35+
kernel_vector_begin();
36+
sm4_expandkey_zvksed_zvkb(key, ctx->rkey_enc, ctx->rkey_dec);
37+
kernel_vector_end();
38+
return 0;
39+
}
40+
return sm4_expandkey(ctx, key, keylen);
41+
}
42+
43+
static void riscv64_sm4_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
44+
{
45+
const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
46+
47+
if (crypto_simd_usable()) {
48+
kernel_vector_begin();
49+
sm4_crypt_zvksed_zvkb(ctx->rkey_enc, src, dst);
50+
kernel_vector_end();
51+
} else {
52+
sm4_crypt_block(ctx->rkey_enc, dst, src);
53+
}
54+
}
55+
56+
static void riscv64_sm4_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
57+
{
58+
const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
59+
60+
if (crypto_simd_usable()) {
61+
kernel_vector_begin();
62+
sm4_crypt_zvksed_zvkb(ctx->rkey_dec, src, dst);
63+
kernel_vector_end();
64+
} else {
65+
sm4_crypt_block(ctx->rkey_dec, dst, src);
66+
}
67+
}
68+
69+
static struct crypto_alg riscv64_sm4_alg = {
70+
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
71+
.cra_blocksize = SM4_BLOCK_SIZE,
72+
.cra_ctxsize = sizeof(struct sm4_ctx),
73+
.cra_priority = 300,
74+
.cra_name = "sm4",
75+
.cra_driver_name = "sm4-riscv64-zvksed-zvkb",
76+
.cra_cipher = {
77+
.cia_min_keysize = SM4_KEY_SIZE,
78+
.cia_max_keysize = SM4_KEY_SIZE,
79+
.cia_setkey = riscv64_sm4_setkey,
80+
.cia_encrypt = riscv64_sm4_encrypt,
81+
.cia_decrypt = riscv64_sm4_decrypt,
82+
},
83+
.cra_module = THIS_MODULE,
84+
};
85+
86+
static int __init riscv64_sm4_mod_init(void)
87+
{
88+
if (riscv_isa_extension_available(NULL, ZVKSED) &&
89+
riscv_isa_extension_available(NULL, ZVKB) &&
90+
riscv_vector_vlen() >= 128)
91+
return crypto_register_alg(&riscv64_sm4_alg);
92+
93+
return -ENODEV;
94+
}
95+
96+
static void __exit riscv64_sm4_mod_exit(void)
97+
{
98+
crypto_unregister_alg(&riscv64_sm4_alg);
99+
}
100+
101+
module_init(riscv64_sm4_mod_init);
102+
module_exit(riscv64_sm4_mod_exit);
103+
104+
MODULE_DESCRIPTION("SM4 (RISC-V accelerated)");
105+
MODULE_AUTHOR("Heiko Stuebner <[email protected]>");
106+
MODULE_LICENSE("GPL");
107+
MODULE_ALIAS_CRYPTO("sm4");
Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
2+
//
3+
// This file is dual-licensed, meaning that you can use it under your
4+
// choice of either of the following two licenses:
5+
//
6+
// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
7+
//
8+
// Licensed under the Apache License 2.0 (the "License"). You can obtain
9+
// a copy in the file LICENSE in the source distribution or at
10+
// https://www.openssl.org/source/license.html
11+
//
12+
// or
13+
//
14+
// Copyright (c) 2023, Christoph Müllner <[email protected]>
15+
// Copyright (c) 2023, Jerry Shih <[email protected]>
16+
// Copyright 2024 Google LLC
17+
// All rights reserved.
18+
//
19+
// Redistribution and use in source and binary forms, with or without
20+
// modification, are permitted provided that the following conditions
21+
// are met:
22+
// 1. Redistributions of source code must retain the above copyright
23+
// notice, this list of conditions and the following disclaimer.
24+
// 2. Redistributions in binary form must reproduce the above copyright
25+
// notice, this list of conditions and the following disclaimer in the
26+
// documentation and/or other materials provided with the distribution.
27+
//
28+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39+
40+
// The generated code of this file depends on the following RISC-V extensions:
41+
// - RV64I
42+
// - RISC-V Vector ('V') with VLEN >= 128
43+
// - RISC-V Vector SM4 Block Cipher extension ('Zvksed')
44+
// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
45+
46+
#include <linux/linkage.h>
47+
48+
.text
49+
.option arch, +zvksed, +zvkb
50+
51+
// void sm4_expandkey_zksed_zvkb(const u8 user_key[16], u32 rkey_enc[32],
52+
// u32 rkey_dec[32]);
53+
SYM_FUNC_START(sm4_expandkey_zvksed_zvkb)
54+
vsetivli zero, 4, e32, m1, ta, ma
55+
56+
// Load the user key.
57+
vle32.v v1, (a0)
58+
vrev8.v v1, v1
59+
60+
// XOR the user key with the family key.
61+
la t0, FAMILY_KEY
62+
vle32.v v2, (t0)
63+
vxor.vv v1, v1, v2
64+
65+
// Compute the round keys. Store them in forwards order in rkey_enc
66+
// and in reverse order in rkey_dec.
67+
addi a2, a2, 31*4
68+
li t0, -4
69+
.set i, 0
70+
.rept 8
71+
vsm4k.vi v1, v1, i
72+
vse32.v v1, (a1) // Store to rkey_enc.
73+
vsse32.v v1, (a2), t0 // Store to rkey_dec.
74+
.if i < 7
75+
addi a1, a1, 16
76+
addi a2, a2, -16
77+
.endif
78+
.set i, i + 1
79+
.endr
80+
81+
ret
82+
SYM_FUNC_END(sm4_expandkey_zvksed_zvkb)
83+
84+
// void sm4_crypt_zvksed_zvkb(const u32 rkey[32], const u8 in[16], u8 out[16]);
85+
SYM_FUNC_START(sm4_crypt_zvksed_zvkb)
86+
vsetivli zero, 4, e32, m1, ta, ma
87+
88+
// Load the input data.
89+
vle32.v v1, (a1)
90+
vrev8.v v1, v1
91+
92+
// Do the 32 rounds of SM4, 4 at a time.
93+
.set i, 0
94+
.rept 8
95+
vle32.v v2, (a0)
96+
vsm4r.vs v1, v2
97+
.if i < 7
98+
addi a0, a0, 16
99+
.endif
100+
.set i, i + 1
101+
.endr
102+
103+
// Store the output data (in reverse element order).
104+
vrev8.v v1, v1
105+
li t0, -4
106+
addi a2, a2, 12
107+
vsse32.v v1, (a2), t0
108+
109+
ret
110+
SYM_FUNC_END(sm4_crypt_zvksed_zvkb)
111+
112+
.section ".rodata"
113+
.p2align 2
114+
.type FAMILY_KEY, @object
115+
FAMILY_KEY:
116+
.word 0xA3B1BAC6, 0x56AA3350, 0x677D9197, 0xB27022DC
117+
.size FAMILY_KEY, . - FAMILY_KEY

0 commit comments

Comments
 (0)