Partially Revert "Remove curve448 architecture specific files"

This reverts commit 7e492f3372ed83af074a63d5920f13de7e3455b6.

This brings back the 64-bit reference implementation for curve448.

Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Matt Caswell <matt@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/14784)
This commit is contained in:
Amitay Isaacs 2021-01-19 13:50:18 +11:00 committed by Matt Caswell
parent 0f4286c78a
commit 927e704e8c
3 changed files with 388 additions and 0 deletions

View File

@ -0,0 +1,31 @@
/*
* Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
* Copyright 2016 Cryptography Research, Inc.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*
* Originally written by Mike Hamburg
*/
#ifndef __ARCH_REF64_ARCH_INTRINSICS_H__
# define __ARCH_REF64_ARCH_INTRINSICS_H__
# define ARCH_WORD_BITS 64
static __inline__ __attribute((always_inline, unused))
uint64_t word_is_zero(uint64_t a)
{
/* let's hope the compiler isn't clever enough to optimize this. */
return (((__uint128_t) a) - 1) >> 64;
}
static __inline__ __attribute((always_inline, unused))
__uint128_t widemul(uint64_t a, uint64_t b)
{
return ((__uint128_t) a) * b;
}
#endif /* ARCH_REF64_ARCH_INTRINSICS_H__ */

View File

@ -0,0 +1,308 @@
/*
* Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
* Copyright 2014 Cryptography Research, Inc.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*
* Originally written by Mike Hamburg
*/
#include "field.h"
void gf_mul(gf_s * __restrict__ cs, const gf as, const gf bs)
{
const uint64_t *a = as->limb, *b = bs->limb;
uint64_t *c = cs->limb;
__uint128_t accum0 = 0, accum1 = 0, accum2;
uint64_t mask = (1ull << 56) - 1;
uint64_t aa[4], bb[4], bbb[4];
unsigned int i;
for (i = 0; i < 4; i++) {
aa[i] = a[i] + a[i + 4];
bb[i] = b[i] + b[i + 4];
bbb[i] = bb[i] + b[i + 4];
}
int I_HATE_UNROLLED_LOOPS = 0;
if (I_HATE_UNROLLED_LOOPS) {
/*
* The compiler probably won't unroll this, so it's like 80% slower.
*/
for (i = 0; i < 4; i++) {
accum2 = 0;
unsigned int j;
for (j = 0; j <= i; j++) {
accum2 += widemul(a[j], b[i - j]);
accum1 += widemul(aa[j], bb[i - j]);
accum0 += widemul(a[j + 4], b[i - j + 4]);
}
for (; j < 4; j++) {
accum2 += widemul(a[j], b[i - j + 8]);
accum1 += widemul(aa[j], bbb[i - j + 4]);
accum0 += widemul(a[j + 4], bb[i - j + 4]);
}
accum1 -= accum2;
accum0 += accum2;
c[i] = ((uint64_t)(accum0)) & mask;
c[i + 4] = ((uint64_t)(accum1)) & mask;
accum0 >>= 56;
accum1 >>= 56;
}
} else {
accum2 = widemul(a[0], b[0]);
accum1 += widemul(aa[0], bb[0]);
accum0 += widemul(a[4], b[4]);
accum2 += widemul(a[1], b[7]);
accum1 += widemul(aa[1], bbb[3]);
accum0 += widemul(a[5], bb[3]);
accum2 += widemul(a[2], b[6]);
accum1 += widemul(aa[2], bbb[2]);
accum0 += widemul(a[6], bb[2]);
accum2 += widemul(a[3], b[5]);
accum1 += widemul(aa[3], bbb[1]);
accum0 += widemul(a[7], bb[1]);
accum1 -= accum2;
accum0 += accum2;
c[0] = ((uint64_t)(accum0)) & mask;
c[4] = ((uint64_t)(accum1)) & mask;
accum0 >>= 56;
accum1 >>= 56;
accum2 = widemul(a[0], b[1]);
accum1 += widemul(aa[0], bb[1]);
accum0 += widemul(a[4], b[5]);
accum2 += widemul(a[1], b[0]);
accum1 += widemul(aa[1], bb[0]);
accum0 += widemul(a[5], b[4]);
accum2 += widemul(a[2], b[7]);
accum1 += widemul(aa[2], bbb[3]);
accum0 += widemul(a[6], bb[3]);
accum2 += widemul(a[3], b[6]);
accum1 += widemul(aa[3], bbb[2]);
accum0 += widemul(a[7], bb[2]);
accum1 -= accum2;
accum0 += accum2;
c[1] = ((uint64_t)(accum0)) & mask;
c[5] = ((uint64_t)(accum1)) & mask;
accum0 >>= 56;
accum1 >>= 56;
accum2 = widemul(a[0], b[2]);
accum1 += widemul(aa[0], bb[2]);
accum0 += widemul(a[4], b[6]);
accum2 += widemul(a[1], b[1]);
accum1 += widemul(aa[1], bb[1]);
accum0 += widemul(a[5], b[5]);
accum2 += widemul(a[2], b[0]);
accum1 += widemul(aa[2], bb[0]);
accum0 += widemul(a[6], b[4]);
accum2 += widemul(a[3], b[7]);
accum1 += widemul(aa[3], bbb[3]);
accum0 += widemul(a[7], bb[3]);
accum1 -= accum2;
accum0 += accum2;
c[2] = ((uint64_t)(accum0)) & mask;
c[6] = ((uint64_t)(accum1)) & mask;
accum0 >>= 56;
accum1 >>= 56;
accum2 = widemul(a[0], b[3]);
accum1 += widemul(aa[0], bb[3]);
accum0 += widemul(a[4], b[7]);
accum2 += widemul(a[1], b[2]);
accum1 += widemul(aa[1], bb[2]);
accum0 += widemul(a[5], b[6]);
accum2 += widemul(a[2], b[1]);
accum1 += widemul(aa[2], bb[1]);
accum0 += widemul(a[6], b[5]);
accum2 += widemul(a[3], b[0]);
accum1 += widemul(aa[3], bb[0]);
accum0 += widemul(a[7], b[4]);
accum1 -= accum2;
accum0 += accum2;
c[3] = ((uint64_t)(accum0)) & mask;
c[7] = ((uint64_t)(accum1)) & mask;
accum0 >>= 56;
accum1 >>= 56;
} /* !I_HATE_UNROLLED_LOOPS */
accum0 += accum1;
accum0 += c[4];
accum1 += c[0];
c[4] = ((uint64_t)(accum0)) & mask;
c[0] = ((uint64_t)(accum1)) & mask;
accum0 >>= 56;
accum1 >>= 56;
c[5] += ((uint64_t)(accum0));
c[1] += ((uint64_t)(accum1));
}
void gf_mulw_unsigned(gf_s * __restrict__ cs, const gf as, uint32_t b)
{
const uint64_t *a = as->limb;
uint64_t *c = cs->limb;
__uint128_t accum0 = 0, accum4 = 0;
uint64_t mask = (1ull << 56) - 1;
int i;
for (i = 0; i < 4; i++) {
accum0 += widemul(b, a[i]);
accum4 += widemul(b, a[i + 4]);
c[i] = accum0 & mask;
accum0 >>= 56;
c[i + 4] = accum4 & mask;
accum4 >>= 56;
}
accum0 += accum4 + c[4];
c[4] = accum0 & mask;
c[5] += accum0 >> 56;
accum4 += c[0];
c[0] = accum4 & mask;
c[1] += accum4 >> 56;
}
void gf_sqr(gf_s * __restrict__ cs, const gf as)
{
const uint64_t *a = as->limb;
uint64_t *c = cs->limb;
__uint128_t accum0 = 0, accum1 = 0, accum2;
uint64_t mask = (1ull << 56) - 1;
uint64_t aa[4];
/* For some reason clang doesn't vectorize this without prompting? */
unsigned int i;
for (i = 0; i < 4; i++) {
aa[i] = a[i] + a[i + 4];
}
accum2 = widemul(a[0], a[3]);
accum0 = widemul(aa[0], aa[3]);
accum1 = widemul(a[4], a[7]);
accum2 += widemul(a[1], a[2]);
accum0 += widemul(aa[1], aa[2]);
accum1 += widemul(a[5], a[6]);
accum0 -= accum2;
accum1 += accum2;
c[3] = ((uint64_t)(accum1)) << 1 & mask;
c[7] = ((uint64_t)(accum0)) << 1 & mask;
accum0 >>= 55;
accum1 >>= 55;
accum0 += widemul(2 * aa[1], aa[3]);
accum1 += widemul(2 * a[5], a[7]);
accum0 += widemul(aa[2], aa[2]);
accum1 += accum0;
accum0 -= widemul(2 * a[1], a[3]);
accum1 += widemul(a[6], a[6]);
accum2 = widemul(a[0], a[0]);
accum1 -= accum2;
accum0 += accum2;
accum0 -= widemul(a[2], a[2]);
accum1 += widemul(aa[0], aa[0]);
accum0 += widemul(a[4], a[4]);
c[0] = ((uint64_t)(accum0)) & mask;
c[4] = ((uint64_t)(accum1)) & mask;
accum0 >>= 56;
accum1 >>= 56;
accum2 = widemul(2 * aa[2], aa[3]);
accum0 -= widemul(2 * a[2], a[3]);
accum1 += widemul(2 * a[6], a[7]);
accum1 += accum2;
accum0 += accum2;
accum2 = widemul(2 * a[0], a[1]);
accum1 += widemul(2 * aa[0], aa[1]);
accum0 += widemul(2 * a[4], a[5]);
accum1 -= accum2;
accum0 += accum2;
c[1] = ((uint64_t)(accum0)) & mask;
c[5] = ((uint64_t)(accum1)) & mask;
accum0 >>= 56;
accum1 >>= 56;
accum2 = widemul(aa[3], aa[3]);
accum0 -= widemul(a[3], a[3]);
accum1 += widemul(a[7], a[7]);
accum1 += accum2;
accum0 += accum2;
accum2 = widemul(2 * a[0], a[2]);
accum1 += widemul(2 * aa[0], aa[2]);
accum0 += widemul(2 * a[4], a[6]);
accum2 += widemul(a[1], a[1]);
accum1 += widemul(aa[1], aa[1]);
accum0 += widemul(a[5], a[5]);
accum1 -= accum2;
accum0 += accum2;
c[2] = ((uint64_t)(accum0)) & mask;
c[6] = ((uint64_t)(accum1)) & mask;
accum0 >>= 56;
accum1 >>= 56;
accum0 += c[3];
accum1 += c[7];
c[3] = ((uint64_t)(accum0)) & mask;
c[7] = ((uint64_t)(accum1)) & mask;
/* we could almost stop here, but it wouldn't be stable, so... */
accum0 >>= 56;
accum1 >>= 56;
c[4] += ((uint64_t)(accum0)) + ((uint64_t)(accum1));
c[0] += ((uint64_t)(accum1));
}

View File

@ -0,0 +1,49 @@
/*
* Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
* Copyright 2014-2016 Cryptography Research, Inc.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*
* Originally written by Mike Hamburg
*/
#define GF_HEADROOM 9999 /* Everything is reduced anyway */
#define FIELD_LITERAL(a,b,c,d,e,f,g,h) {{a,b,c,d,e,f,g,h}}
#define LIMB_PLACE_VALUE(i) 56
void gf_add_RAW(gf out, const gf a, const gf b)
{
for (unsigned int i = 0; i < 8; i++)
out->limb[i] = a->limb[i] + b->limb[i];
gf_weak_reduce(out);
}
void gf_sub_RAW(gf out, const gf a, const gf b)
{
uint64_t co1 = ((1ull << 56) - 1) * 2, co2 = co1 - 2;
for (unsigned int i = 0; i < 8; i++)
out->limb[i] = a->limb[i] - b->limb[i] + ((i == 4) ? co2 : co1);
gf_weak_reduce(out);
}
void gf_bias(gf a, int amt)
{
(void)a;
(void)amt;
}
void gf_weak_reduce(gf a)
{
uint64_t mask = (1ull << 56) - 1;
uint64_t tmp = a->limb[7] >> 56;
a->limb[4] += tmp;
for (unsigned int i = 7; i > 0; i--)
a->limb[i] = (a->limb[i] & mask) + (a->limb[i - 1] >> 56);
a->limb[0] = (a->limb[0] & mask) + tmp;
}