openssl/crypto/armcap.c
XiaokangQian 9224a407f9 Apply the AES-GCM unroll8 optimization patch to Neoverse N2
The loop unrolling and use of EOR3 can improve N2 performance
by up to 32%

Signed-off-by: XiaokangQian <xiaokang.qian@arm.com>

Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/18350)
2022-05-23 11:05:51 +10:00

385 lines
10 KiB
C

/*
* Copyright 2011-2022 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <setjmp.h>
#include <signal.h>
#include <openssl/crypto.h>
#ifdef __APPLE__
#include <sys/sysctl.h>
#endif
#include "internal/cryptlib.h"
#include <unistd.h>
#include "arm_arch.h"
unsigned int OPENSSL_armcap_P = 0;
unsigned int OPENSSL_arm_midr = 0;
unsigned int OPENSSL_armv8_rsa_neonized = 0;
#if __ARM_MAX_ARCH__<7
void OPENSSL_cpuid_setup(void)
{
}
uint32_t OPENSSL_rdtsc(void)
{
return 0;
}
#else
static sigset_t all_masked;
static sigjmp_buf ill_jmp;
static void ill_handler(int sig)
{
siglongjmp(ill_jmp, sig);
}
/*
* Following subroutines could have been inlined, but it's not all
* ARM compilers support inline assembler...
*/
void _armv7_neon_probe(void);
void _armv8_aes_probe(void);
void _armv8_sha1_probe(void);
void _armv8_sha256_probe(void);
void _armv8_pmull_probe(void);
# ifdef __aarch64__
void _armv8_sm3_probe(void);
void _armv8_sm4_probe(void);
void _armv8_sha512_probe(void);
unsigned int _armv8_cpuid_probe(void);
void _armv8_sve_probe(void);
void _armv8_sve2_probe(void);
void _armv8_rng_probe(void);
size_t OPENSSL_rndr_asm(unsigned char *buf, size_t len);
size_t OPENSSL_rndrrs_asm(unsigned char *buf, size_t len);
size_t OPENSSL_rndr_bytes(unsigned char *buf, size_t len);
size_t OPENSSL_rndrrs_bytes(unsigned char *buf, size_t len);
static size_t OPENSSL_rndr_wrapper(size_t (*func)(unsigned char *, size_t), unsigned char *buf, size_t len)
{
size_t buffer_size = 0;
int i;
for (i = 0; i < 8; i++) {
buffer_size = func(buf, len);
if (buffer_size == len)
break;
usleep(5000); /* 5000 microseconds (5 milliseconds) */
}
return buffer_size;
}
size_t OPENSSL_rndr_bytes(unsigned char *buf, size_t len)
{
return OPENSSL_rndr_wrapper(OPENSSL_rndr_asm, buf, len);
}
size_t OPENSSL_rndrrs_bytes(unsigned char *buf, size_t len)
{
return OPENSSL_rndr_wrapper(OPENSSL_rndrrs_asm, buf, len);
}
# endif
uint32_t _armv7_tick(void);
uint32_t OPENSSL_rdtsc(void)
{
if (OPENSSL_armcap_P & ARMV7_TICK)
return _armv7_tick();
else
return 0;
}
# if defined(__GNUC__) && __GNUC__>=2
void OPENSSL_cpuid_setup(void) __attribute__ ((constructor));
# endif
# if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
# if __GLIBC_PREREQ(2, 16)
# include <sys/auxv.h>
# define OSSL_IMPLEMENT_GETAUXVAL
# endif
# elif defined(__ANDROID_API__)
/* see https://developer.android.google.cn/ndk/guides/cpu-features */
# if __ANDROID_API__ >= 18
# include <sys/auxv.h>
# define OSSL_IMPLEMENT_GETAUXVAL
# endif
# endif
# if defined(__FreeBSD__)
# include <sys/param.h>
# if __FreeBSD_version >= 1200000
# include <sys/auxv.h>
# define OSSL_IMPLEMENT_GETAUXVAL
static unsigned long getauxval(unsigned long key)
{
unsigned long val = 0ul;
if (elf_aux_info((int)key, &val, sizeof(val)) != 0)
return 0ul;
return val;
}
# endif
# endif
/*
* Android: according to https://developer.android.com/ndk/guides/cpu-features,
* getauxval is supported starting with API level 18
*/
# if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 18
# include <sys/auxv.h>
# define OSSL_IMPLEMENT_GETAUXVAL
# endif
/*
* ARM puts the feature bits for Crypto Extensions in AT_HWCAP2, whereas
* AArch64 used AT_HWCAP.
*/
# ifndef AT_HWCAP
# define AT_HWCAP 16
# endif
# ifndef AT_HWCAP2
# define AT_HWCAP2 26
# endif
# if defined(__arm__) || defined (__arm)
# define HWCAP AT_HWCAP
# define HWCAP_NEON (1 << 12)
# define HWCAP_CE AT_HWCAP2
# define HWCAP_CE_AES (1 << 0)
# define HWCAP_CE_PMULL (1 << 1)
# define HWCAP_CE_SHA1 (1 << 2)
# define HWCAP_CE_SHA256 (1 << 3)
# elif defined(__aarch64__)
# define HWCAP AT_HWCAP
# define HWCAP_NEON (1 << 1)
# define HWCAP_CE HWCAP
# define HWCAP_CE_AES (1 << 3)
# define HWCAP_CE_PMULL (1 << 4)
# define HWCAP_CE_SHA1 (1 << 5)
# define HWCAP_CE_SHA256 (1 << 6)
# define HWCAP_CPUID (1 << 11)
# define HWCAP_SHA3 (1 << 17)
# define HWCAP_CE_SM3 (1 << 18)
# define HWCAP_CE_SM4 (1 << 19)
# define HWCAP_CE_SHA512 (1 << 21)
# define HWCAP_SVE (1 << 22)
/* AT_HWCAP2 */
# define HWCAP2 26
# define HWCAP2_SVE2 (1 << 1)
# define HWCAP2_RNG (1 << 16)
# endif
void OPENSSL_cpuid_setup(void)
{
const char *e;
struct sigaction ill_oact, ill_act;
sigset_t oset;
static int trigger = 0;
if (trigger)
return;
trigger = 1;
OPENSSL_armcap_P = 0;
if ((e = getenv("OPENSSL_armcap"))) {
OPENSSL_armcap_P = (unsigned int)strtoul(e, NULL, 0);
return;
}
# if defined(__APPLE__)
# if !defined(__aarch64__)
/*
* Capability probing by catching SIGILL appears to be problematic
* on iOS. But since Apple universe is "monocultural", it's actually
* possible to simply set pre-defined processor capability mask.
*/
if (1) {
OPENSSL_armcap_P = ARMV7_NEON;
return;
}
/*
* One could do same even for __aarch64__ iOS builds. It's not done
* exclusively for reasons of keeping code unified across platforms.
* Unified code works because it never triggers SIGILL on Apple
* devices...
*/
# else
{
unsigned int feature;
size_t len = sizeof(feature);
char uarch[64];
if (sysctlbyname("hw.optional.armv8_2_sha512", &feature, &len, NULL, 0) == 0 && feature == 1)
OPENSSL_armcap_P |= ARMV8_SHA512;
feature = 0;
if (sysctlbyname("hw.optional.armv8_2_sha3", &feature, &len, NULL, 0) == 0 && feature == 1) {
OPENSSL_armcap_P |= ARMV8_SHA3;
len = sizeof(uarch);
if ((sysctlbyname("machdep.cpu.brand_string", uarch, &len, NULL, 0) == 0) &&
(strncmp(uarch, "Apple M1", 8) == 0))
OPENSSL_armcap_P |= ARMV8_UNROLL8_EOR3;
}
}
# endif
# endif
# ifdef OSSL_IMPLEMENT_GETAUXVAL
if (getauxval(HWCAP) & HWCAP_NEON) {
unsigned long hwcap = getauxval(HWCAP_CE);
OPENSSL_armcap_P |= ARMV7_NEON;
if (hwcap & HWCAP_CE_AES)
OPENSSL_armcap_P |= ARMV8_AES;
if (hwcap & HWCAP_CE_PMULL)
OPENSSL_armcap_P |= ARMV8_PMULL;
if (hwcap & HWCAP_CE_SHA1)
OPENSSL_armcap_P |= ARMV8_SHA1;
if (hwcap & HWCAP_CE_SHA256)
OPENSSL_armcap_P |= ARMV8_SHA256;
# ifdef __aarch64__
if (hwcap & HWCAP_CE_SM4)
OPENSSL_armcap_P |= ARMV8_SM4;
if (hwcap & HWCAP_CE_SHA512)
OPENSSL_armcap_P |= ARMV8_SHA512;
if (hwcap & HWCAP_CPUID)
OPENSSL_armcap_P |= ARMV8_CPUID;
if (hwcap & HWCAP_CE_SM3)
OPENSSL_armcap_P |= ARMV8_SM3;
if (hwcap & HWCAP_SHA3)
OPENSSL_armcap_P |= ARMV8_SHA3;
# endif
}
# ifdef __aarch64__
if (getauxval(HWCAP) & HWCAP_SVE)
OPENSSL_armcap_P |= ARMV8_SVE;
if (getauxval(HWCAP2) & HWCAP2_SVE2)
OPENSSL_armcap_P |= ARMV8_SVE2;
if (getauxval(HWCAP2) & HWCAP2_RNG)
OPENSSL_armcap_P |= ARMV8_RNG;
# endif
# endif
sigfillset(&all_masked);
sigdelset(&all_masked, SIGILL);
sigdelset(&all_masked, SIGTRAP);
sigdelset(&all_masked, SIGFPE);
sigdelset(&all_masked, SIGBUS);
sigdelset(&all_masked, SIGSEGV);
memset(&ill_act, 0, sizeof(ill_act));
ill_act.sa_handler = ill_handler;
ill_act.sa_mask = all_masked;
sigprocmask(SIG_SETMASK, &ill_act.sa_mask, &oset);
sigaction(SIGILL, &ill_act, &ill_oact);
/* If we used getauxval, we already have all the values */
# ifndef OSSL_IMPLEMENT_GETAUXVAL
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv7_neon_probe();
OPENSSL_armcap_P |= ARMV7_NEON;
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_pmull_probe();
OPENSSL_armcap_P |= ARMV8_PMULL | ARMV8_AES;
} else if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_aes_probe();
OPENSSL_armcap_P |= ARMV8_AES;
}
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_sha1_probe();
OPENSSL_armcap_P |= ARMV8_SHA1;
}
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_sha256_probe();
OPENSSL_armcap_P |= ARMV8_SHA256;
}
# if defined(__aarch64__) && !defined(__APPLE__)
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_sm4_probe();
OPENSSL_armcap_P |= ARMV8_SM4;
}
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_sha512_probe();
OPENSSL_armcap_P |= ARMV8_SHA512;
}
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_sm3_probe();
OPENSSL_armcap_P |= ARMV8_SM3;
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_eor3_probe();
OPENSSL_armcap_P |= ARMV8_SHA3;
}
# endif
}
# ifdef __aarch64__
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_sve_probe();
OPENSSL_armcap_P |= ARMV8_SVE;
}
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_sve2_probe();
OPENSSL_armcap_P |= ARMV8_SVE2;
}
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv8_rng_probe();
OPENSSL_armcap_P |= ARMV8_RNG;
}
# endif
# endif
/* Things that getauxval didn't tell us */
if (sigsetjmp(ill_jmp, 1) == 0) {
_armv7_tick();
OPENSSL_armcap_P |= ARMV7_TICK;
}
sigaction(SIGILL, &ill_oact, NULL);
sigprocmask(SIG_SETMASK, &oset, NULL);
# ifdef __aarch64__
if (OPENSSL_armcap_P & ARMV8_CPUID)
OPENSSL_arm_midr = _armv8_cpuid_probe();
if ((MIDR_IS_CPU_MODEL(OPENSSL_arm_midr, ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) ||
MIDR_IS_CPU_MODEL(OPENSSL_arm_midr, ARM_CPU_IMP_ARM, ARM_CPU_PART_N1)) &&
(OPENSSL_armcap_P & ARMV7_NEON)) {
OPENSSL_armv8_rsa_neonized = 1;
}
if ((MIDR_IS_CPU_MODEL(OPENSSL_arm_midr, ARM_CPU_IMP_ARM, ARM_CPU_PART_V1) ||
MIDR_IS_CPU_MODEL(OPENSSL_arm_midr, ARM_CPU_IMP_ARM, ARM_CPU_PART_N2)) &&
(OPENSSL_armcap_P & ARMV8_SHA3))
OPENSSL_armcap_P |= ARMV8_UNROLL8_EOR3;
# endif
}
#endif