riscv: SM3: Provide a Zvksh-based implementation

The upcoming RISC-V vector crypto extensions feature
a Zvksh extension, that provides SM3-specific istructions.
This patch provides an implementation that utilizes this
extension if available.

Tested on QEMU and no regressions observed.

Signed-off-by: Charalampos Mitrodimas <charalampos.mitrodimas@vrull.eu>
Signed-off-by: Christoph Müllner <christoph.muellner@vrull.eu>

Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
Reviewed-by: Hugo Landau <hlandau@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/21923)
This commit is contained in:
Charalampos Mitrodimas 2023-01-27 16:47:41 +01:00 committed by Hugo Landau
parent 7543bb3a69
commit f20ee1f490
7 changed files with 289 additions and 0 deletions

View File

@ -414,6 +414,11 @@ sub vsetivli__x0_4_e64_m1_tu_mu {
return ".word 0xc1827057";
}
sub vsetivli__x0_8_e32_m1_tu_mu {
# vsetivli x0, 8, e32, m1, tu, mu
return ".word 0xc1047057";
}
sub vslidedown_vi {
# vslidedown.vi vd, vs2, uimm
my $template = 0b0011111_00000_00000_011_00000_1010111;
@ -663,4 +668,24 @@ sub vsm4r_vs {
return ".word ".($template | ($vs2 << 20) | ($vd << 7));
}
## zvksh instructions
sub vsm3c_vi {
# vsm3c.vi vd, vs2, uimm
my $template = 0b1010111_00000_00000_010_00000_1110111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
my $uimm = shift;
return ".word ".($template | ($vs2 << 20) | ($uimm << 15 ) | ($vd << 7));
}
sub vsm3me_vv {
# vsm3me.vv vd, vs2, vs1
my $template = 0b1000001_00000_00000_010_00000_1110111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
my $vs1 = read_vreg shift;
return ".word ".($template | ($vs2 << 20) | ($vs1 << 15 ) | ($vd << 7));
}
1;

View File

@ -0,0 +1,223 @@
#! /usr/bin/env perl
# This file is dual-licensed, meaning that you can use it under your
# choice of either of the following two licenses:
#
# Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You can obtain
# a copy in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
#
# or
#
# Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The generated code of this file depends on the following RISC-V extensions:
# - RV64I
# - RISC-V vector ('V') with VLEN >= 256
# - Vector Bit-manipulation used in Cryptography ('Zvbb')
# - ShangMi Suite: SM3 Secure Hash ('Zvksh')
use strict;
use warnings;
use FindBin qw($Bin);
use lib "$Bin";
use lib "$Bin/../../perlasm";
use riscv;
# $output is the last argument if it looks like a file (it has an extension)
# $flavour is the first argument if it doesn't look like a file
my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
$output and open STDOUT,">$output";
my $code=<<___;
.text
___
################################################################################
# ossl_hwsm3_block_data_order_zvksh(SM3_CTX *c, const void *p, size_t num);
{
my ($CTX, $INPUT, $NUM) = ("a0", "a1", "a2");
my ($V0, $V1, $V2, $V3, $V4) = ("v0", "v1", "v2", "v3", "v4");
$code .= <<___;
.text
.p2align 3
.globl ossl_hwsm3_block_data_order_zvksh
.type ossl_hwsm3_block_data_order_zvksh,\@function
ossl_hwsm3_block_data_order_zvksh:
@{[vsetivli__x0_8_e32_m1_tu_mu]}
# Load initial state of hash context (c->A-H).
@{[vle32_v $V0, $CTX]}
@{[vrev8_v $V0, $V0]}
L_sm3_loop:
# Copy the previous state to v1.
# It will be XOR'ed with the current state at the end of the round.
@{[vmv_v_v $V1, $V0]}
# Load the 64B block in 2x32B chunks.
@{[vle32_v $V3, $INPUT]} # v3 := {w7, ..., w0}
add $INPUT, $INPUT, 32
@{[vle32_v $V4, $INPUT]} # v4 := {w15, ..., w8}
add $INPUT, $INPUT, 32
add $NUM, $NUM, -1
# As vsm3c consumes only w0, w1, w4, w5 we need to slide the input
# 2 elements down so we process elements w2, w3, w6, w7
# This will be repeated for each odd round.
@{[vslidedown_vi $V2, $V3, 2]} # v2 := {X, X, w7, ..., w2}
@{[vsm3c_vi $V0, $V3, 0]}
@{[vsm3c_vi $V0, $V2, 1]}
# Prepare a vector with {w11, ..., w4}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w7, ..., w4}
@{[vslideup_vi $V2, $V4, 4]} # v2 := {w11, w10, w9, w8, w7, w6, w5, w4}
@{[vsm3c_vi $V0, $V2, 2]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w11, w10, w9, w8, w7, w6}
@{[vsm3c_vi $V0, $V2, 3]}
@{[vsm3c_vi $V0, $V4, 4]}
@{[vslidedown_vi $V2, $V4, 2]} # v2 := {X, X, w15, w14, w13, w12, w11, w10}
@{[vsm3c_vi $V0, $V2, 5]}
@{[vsm3me_vv $V3, $V4, $V3]} # v3 := {w23, w22, w21, w20, w19, w18, w17, w16}
# Prepare a register with {w19, w18, w17, w16, w15, w14, w13, w12}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w15, w14, w13, w12}
@{[vslideup_vi $V2, $V3, 4]} # v2 := {w19, w18, w17, w16, w15, w14, w13, w12}
@{[vsm3c_vi $V0, $V2, 6]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w19, w18, w17, w16, w15, w14}
@{[vsm3c_vi $V0, $V2, 7]}
@{[vsm3c_vi $V0, $V3, 8]}
@{[vslidedown_vi $V2, $V3, 2]} # v2 := {X, X, w23, w22, w21, w20, w19, w18}
@{[vsm3c_vi $V0, $V2, 9]}
@{[vsm3me_vv $V4, $V3, $V4]} # v4 := {w31, w30, w29, w28, w27, w26, w25, w24}
# Prepare a register with {w27, w26, w25, w24, w23, w22, w21, w20}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w23, w22, w21, w20}
@{[vslideup_vi $V2, $V4, 4]} # v2 := {w27, w26, w25, w24, w23, w22, w21, w20}
@{[vsm3c_vi $V0, $V2, 10]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w27, w26, w25, w24, w23, w22}
@{[vsm3c_vi $V0, $V2, 11]}
@{[vsm3c_vi $V0, $V4, 12]}
@{[vslidedown_vi $V2, $V4, 2]} # v2 := {x, X, w31, w30, w29, w28, w27, w26}
@{[vsm3c_vi $V0, $V2, 13]}
@{[vsm3me_vv $V3, $V4, $V3]} # v3 := {w32, w33, w34, w35, w36, w37, w38, w39}
# Prepare a register with {w35, w34, w33, w32, w31, w30, w29, w28}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w31, w30, w29, w28}
@{[vslideup_vi $V2, $V3, 4]} # v2 := {w35, w34, w33, w32, w31, w30, w29, w28}
@{[vsm3c_vi $V0, $V2, 14]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w35, w34, w33, w32, w31, w30}
@{[vsm3c_vi $V0, $V2, 15]}
@{[vsm3c_vi $V0, $V3, 16]}
@{[vslidedown_vi $V2, $V3, 2]} # v2 := {X, X, w39, w38, w37, w36, w35, w34}
@{[vsm3c_vi $V0, $V2, 17]}
@{[vsm3me_vv $V4, $V3, $V4]} # v4 := {w47, w46, w45, w44, w43, w42, w41, w40}
# Prepare a register with {w43, w42, w41, w40, w39, w38, w37, w36}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w39, w38, w37, w36}
@{[vslideup_vi $V2, $V4, 4]} # v2 := {w43, w42, w41, w40, w39, w38, w37, w36}
@{[vsm3c_vi $V0, $V2, 18]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w43, w42, w41, w40, w39, w38}
@{[vsm3c_vi $V0, $V2, 19]}
@{[vsm3c_vi $V0, $V4, 20]}
@{[vslidedown_vi $V2, $V4, 2]} # v2 := {X, X, w47, w46, w45, w44, w43, w42}
@{[vsm3c_vi $V0, $V2, 21]}
@{[vsm3me_vv $V3, $V4, $V3]} # v3 := {w55, w54, w53, w52, w51, w50, w49, w48}
# Prepare a register with {w51, w50, w49, w48, w47, w46, w45, w44}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w47, w46, w45, w44}
@{[vslideup_vi $V2, $V3, 4]} # v2 := {w51, w50, w49, w48, w47, w46, w45, w44}
@{[vsm3c_vi $V0, $V2, 22]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w51, w50, w49, w48, w47, w46}
@{[vsm3c_vi $V0, $V2, 23]}
@{[vsm3c_vi $V0, $V3, 24]}
@{[vslidedown_vi $V2, $V3, 2]} # v2 := {X, X, w55, w54, w53, w52, w51, w50}
@{[vsm3c_vi $V0, $V2, 25]}
@{[vsm3me_vv $V4, $V3, $V4]} # v4 := {w63, w62, w61, w60, w59, w58, w57, w56}
# Prepare a register with {w59, w58, w57, w56, w55, w54, w53, w52}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w55, w54, w53, w52}
@{[vslideup_vi $V2, $V4, 4]} # v2 := {w59, w58, w57, w56, w55, w54, w53, w52}
@{[vsm3c_vi $V0, $V2, 26]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w59, w58, w57, w56, w55, w54}
@{[vsm3c_vi $V0, $V2, 27]}
@{[vsm3c_vi $V0, $V4, 28]}
@{[vslidedown_vi $V2, $V4, 2]} # v2 := {X, X, w63, w62, w61, w60, w59, w58}
@{[vsm3c_vi $V0, $V2, 29]}
@{[vsm3me_vv $V3, $V4, $V3]} # v3 := {w71, w70, w69, w68, w67, w66, w65, w64}
# Prepare a register with {w67, w66, w65, w64, w63, w62, w61, w60}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w63, w62, w61, w60}
@{[vslideup_vi $V2, $V3, 4]} # v2 := {w67, w66, w65, w64, w63, w62, w61, w60}
@{[vsm3c_vi $V0, $V2, 30]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w67, w66, w65, w64, w63, w62}
@{[vsm3c_vi $V0, $V2, 31]}
# XOR in the previous state.
@{[vxor_vv $V0, $V0, $V1]}
bnez $NUM, L_sm3_loop # Check if there are any more block to process
L_sm3_end:
@{[vrev8_v $V0, $V0]}
@{[vse32_v $V0, $CTX]}
ret
.size ossl_hwsm3_block_data_order_zvksh,.-ossl_hwsm3_block_data_order_zvksh
___
}
print $code;
close STDOUT or die "error closing STDOUT: $!";

View File

@ -5,6 +5,9 @@ IF[{- !$disabled{sm3} -}]
$SM3ASM_aarch64=sm3-armv8.S
$SM3DEF_aarch64=OPENSSL_SM3_ASM
$SM3ASM_riscv64=sm3_riscv.c sm3-riscv64-zvksh.S
$SM3DEF_riscv64=OPENSSL_SM3_ASM
# Now that we have defined all the arch specific variables, use the
# appropriate ones, and define the appropriate macros
IF[$SM3ASM_{- $target{asm_arch} -}]
@ -18,5 +21,7 @@ IF[{- !$disabled{sm3} -}]
GENERATE[sm3-armv8.S]=asm/sm3-armv8.pl
INCLUDE[sm3-armv8.o]=..
GENERATE[sm3-riscv64-zvksh.S]=asm/sm3-riscv64-zvksh.pl
ENDIF

View File

@ -39,6 +39,11 @@
# define HWSM3_CAPABLE (OPENSSL_armcap_P & ARMV8_SM3)
void ossl_hwsm3_block_data_order(SM3_CTX *c, const void *p, size_t num);
# endif
# if defined(__riscv) && __riscv_xlen == 64
# include "crypto/riscv_arch.h"
# define HWSM3_CAPABLE 1
void ossl_hwsm3_block_data_order(SM3_CTX *c, const void *p, size_t num);
# endif
#endif
#if defined(HWSM3_CAPABLE)

29
crypto/sm3/sm3_riscv.c Normal file
View File

@ -0,0 +1,29 @@
/*
* Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include <stdlib.h>
#include <string.h>
#include <openssl/opensslconf.h>
#include "internal/sm3.h"
#include "crypto/riscv_arch.h"
#include <stdio.h>
void ossl_hwsm3_block_data_order_zvksh(SM3_CTX *c, const void *p, size_t num);
void ossl_sm3_block_data_order(SM3_CTX *c, const void *p, size_t num);
void ossl_hwsm3_block_data_order(SM3_CTX *c, const void *p, size_t num);
void ossl_hwsm3_block_data_order(SM3_CTX *c, const void *p, size_t num)
{
if (RISCV_HAS_ZVBB_AND_ZVKSH() && riscv_vlen() >= 256) {
ossl_hwsm3_block_data_order_zvksh(c, p, num);
} else {
ossl_sm3_block_data_order(c, p, num);
}
}

View File

@ -40,6 +40,7 @@ RISCV_DEFINE_CAP(ZVKNED, 0, 18)
RISCV_DEFINE_CAP(ZVKNHA, 0, 19)
RISCV_DEFINE_CAP(ZVKNHB, 0, 20)
RISCV_DEFINE_CAP(ZVKSED, 0, 21)
RISCV_DEFINE_CAP(ZVKSH, 0, 22)
/*
* In the future ...

View File

@ -63,6 +63,7 @@ static const size_t kRISCVNumCaps =
#define RISCV_HAS_ZVBB_AND_ZVKNHA() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKNHA())
#define RISCV_HAS_ZVBB_AND_ZVKNHB() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKNHB())
#define RISCV_HAS_ZVBB_AND_ZVKSED() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKSED())
#define RISCV_HAS_ZVBB_AND_ZVKSH() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKSH())
/*
* Get the size of a vector register in bits (VLEN).