mirror of
https://github.com/openssl/openssl.git
synced 2025-03-19 19:50:42 +08:00
Avoid undefined behavior with unaligned accesses
Fixes: #4983 [extended tests] Reviewed-by: Nicola Tuveri <nic.tuv@gmail.com> (Merged from https://github.com/openssl/openssl/pull/6074)
This commit is contained in:
parent
c74aaa3920
commit
77286fe3ec
@ -103,10 +103,10 @@ jobs:
|
||||
env: EXTENDED_TEST="yes" CONFIG_OPTS="enable-msan disable-afalgeng -Wno-unused-command-line-argument"
|
||||
- os: linux
|
||||
compiler: clang
|
||||
env: EXTENDED_TEST="yes" CONFIG_OPTS="no-asm enable-ubsan enable-rc5 enable-md2 enable-ssl3 enable-ssl3-method enable-nextprotoneg no-shared enable-buildtest-c++ -fno-sanitize=alignment -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -Wno-unused-command-line-argument" CXX="clang++"
|
||||
env: EXTENDED_TEST="yes" CONFIG_OPTS="no-asm enable-ubsan enable-rc5 enable-md2 enable-ssl3 enable-ssl3-method enable-nextprotoneg no-shared enable-buildtest-c++ -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -Wno-unused-command-line-argument" CXX="clang++"
|
||||
- os: linux
|
||||
compiler: gcc
|
||||
env: EXTENDED_TEST="yes" CONFIG_OPTS="--debug no-asm enable-ubsan enable-rc5 enable-md2 enable-buildtest-c++ -DPEDANTIC" OPENSSL_TEST_RAND_ORDER=0
|
||||
env: EXTENDED_TEST="yes" CONFIG_OPTS="--debug no-asm enable-asan enable-ubsan enable-rc5 enable-md2 enable-ec_nistp_64_gcc_128 enable-buildtest-c++" OPENSSL_TEST_RAND_ORDER=0
|
||||
- os: linux
|
||||
dist: xenial
|
||||
addons:
|
||||
|
@ -18,11 +18,6 @@
|
||||
#include <openssl/aes.h>
|
||||
#include "aes_local.h"
|
||||
|
||||
#define N_WORDS (AES_BLOCK_SIZE / sizeof(unsigned long))
|
||||
typedef struct {
|
||||
unsigned long data[N_WORDS];
|
||||
} aes_block_t;
|
||||
|
||||
/* XXX: probably some better way to do this */
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
# define UNALIGNED_MEMOPS_ARE_FAST 1
|
||||
@ -30,6 +25,15 @@ typedef struct {
|
||||
# define UNALIGNED_MEMOPS_ARE_FAST 0
|
||||
#endif
|
||||
|
||||
#define N_WORDS (AES_BLOCK_SIZE / sizeof(unsigned long))
|
||||
typedef struct {
|
||||
unsigned long data[N_WORDS];
|
||||
#if defined(__GNUC__) && UNALIGNED_MEMOPS_ARE_FAST
|
||||
} aes_block_t __attribute((__aligned__(1)));
|
||||
#else
|
||||
} aes_block_t;
|
||||
#endif
|
||||
|
||||
#if UNALIGNED_MEMOPS_ARE_FAST
|
||||
# define load_block(d, s) (d) = *(const aes_block_t *)(s)
|
||||
# define store_block(d, s) *(aes_block_t *)(d) = (s)
|
||||
|
@ -75,6 +75,7 @@ typedef uint64_t u64;
|
||||
*/
|
||||
|
||||
typedef uint64_t limb;
|
||||
typedef uint64_t limb_aX __attribute((__aligned__(1)));
|
||||
typedef uint128_t widelimb;
|
||||
|
||||
typedef limb felem[4];
|
||||
@ -311,10 +312,10 @@ const EC_METHOD *EC_GFp_nistp224_method(void)
|
||||
*/
|
||||
static void bin28_to_felem(felem out, const u8 in[28])
|
||||
{
|
||||
out[0] = *((const uint64_t *)(in)) & 0x00ffffffffffffff;
|
||||
out[1] = (*((const uint64_t *)(in + 7))) & 0x00ffffffffffffff;
|
||||
out[2] = (*((const uint64_t *)(in + 14))) & 0x00ffffffffffffff;
|
||||
out[3] = (*((const uint64_t *)(in+20))) >> 8;
|
||||
out[0] = *((const limb *)(in)) & 0x00ffffffffffffff;
|
||||
out[1] = (*((const limb_aX *)(in + 7))) & 0x00ffffffffffffff;
|
||||
out[2] = (*((const limb_aX *)(in + 14))) & 0x00ffffffffffffff;
|
||||
out[3] = (*((const limb_aX *)(in + 20))) >> 8;
|
||||
}
|
||||
|
||||
static void felem_to_bin28(u8 out[28], const felem in)
|
||||
|
@ -131,6 +131,7 @@ static const felem_bytearray nistp521_curve_params[5] = {
|
||||
#define NLIMBS 9
|
||||
|
||||
typedef uint64_t limb;
|
||||
typedef limb limb_aX __attribute((__aligned__(1)));
|
||||
typedef limb felem[NLIMBS];
|
||||
typedef uint128_t largefelem[NLIMBS];
|
||||
|
||||
@ -144,14 +145,14 @@ static const limb bottom58bits = 0x3ffffffffffffff;
|
||||
static void bin66_to_felem(felem out, const u8 in[66])
|
||||
{
|
||||
out[0] = (*((limb *) & in[0])) & bottom58bits;
|
||||
out[1] = (*((limb *) & in[7]) >> 2) & bottom58bits;
|
||||
out[2] = (*((limb *) & in[14]) >> 4) & bottom58bits;
|
||||
out[3] = (*((limb *) & in[21]) >> 6) & bottom58bits;
|
||||
out[4] = (*((limb *) & in[29])) & bottom58bits;
|
||||
out[5] = (*((limb *) & in[36]) >> 2) & bottom58bits;
|
||||
out[6] = (*((limb *) & in[43]) >> 4) & bottom58bits;
|
||||
out[7] = (*((limb *) & in[50]) >> 6) & bottom58bits;
|
||||
out[8] = (*((limb *) & in[58])) & bottom57bits;
|
||||
out[1] = (*((limb_aX *) & in[7]) >> 2) & bottom58bits;
|
||||
out[2] = (*((limb_aX *) & in[14]) >> 4) & bottom58bits;
|
||||
out[3] = (*((limb_aX *) & in[21]) >> 6) & bottom58bits;
|
||||
out[4] = (*((limb_aX *) & in[29])) & bottom58bits;
|
||||
out[5] = (*((limb_aX *) & in[36]) >> 2) & bottom58bits;
|
||||
out[6] = (*((limb_aX *) & in[43]) >> 4) & bottom58bits;
|
||||
out[7] = (*((limb_aX *) & in[50]) >> 6) & bottom58bits;
|
||||
out[8] = (*((limb_aX *) & in[58])) & bottom57bits;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -162,14 +163,14 @@ static void felem_to_bin66(u8 out[66], const felem in)
|
||||
{
|
||||
memset(out, 0, 66);
|
||||
(*((limb *) & out[0])) = in[0];
|
||||
(*((limb *) & out[7])) |= in[1] << 2;
|
||||
(*((limb *) & out[14])) |= in[2] << 4;
|
||||
(*((limb *) & out[21])) |= in[3] << 6;
|
||||
(*((limb *) & out[29])) = in[4];
|
||||
(*((limb *) & out[36])) |= in[5] << 2;
|
||||
(*((limb *) & out[43])) |= in[6] << 4;
|
||||
(*((limb *) & out[50])) |= in[7] << 6;
|
||||
(*((limb *) & out[58])) = in[8];
|
||||
(*((limb_aX *) & out[7])) |= in[1] << 2;
|
||||
(*((limb_aX *) & out[14])) |= in[2] << 4;
|
||||
(*((limb_aX *) & out[21])) |= in[3] << 6;
|
||||
(*((limb_aX *) & out[29])) = in[4];
|
||||
(*((limb_aX *) & out[36])) |= in[5] << 2;
|
||||
(*((limb_aX *) & out[43])) |= in[6] << 4;
|
||||
(*((limb_aX *) & out[50])) |= in[7] << 6;
|
||||
(*((limb_aX *) & out[58])) = in[8];
|
||||
}
|
||||
|
||||
/* BN_to_felem converts an OpenSSL BIGNUM into an felem */
|
||||
|
@ -15,6 +15,12 @@
|
||||
# define STRICT_ALIGNMENT 0
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && !STRICT_ALIGNMENT
|
||||
typedef size_t size_t_aX __attribute((__aligned__(1)));
|
||||
#else
|
||||
typedef size_t size_t_aX;
|
||||
#endif
|
||||
|
||||
void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
|
||||
size_t len, const void *key,
|
||||
unsigned char ivec[16], block128_f block)
|
||||
@ -40,8 +46,8 @@ void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
|
||||
} else {
|
||||
while (len >= 16) {
|
||||
for (n = 0; n < 16; n += sizeof(size_t))
|
||||
*(size_t *)(out + n) =
|
||||
*(size_t *)(in + n) ^ *(size_t *)(iv + n);
|
||||
*(size_t_aX *)(out + n) =
|
||||
*(size_t_aX *)(in + n) ^ *(size_t_aX *)(iv + n);
|
||||
(*block) (out, out, key);
|
||||
iv = out;
|
||||
len -= 16;
|
||||
@ -96,7 +102,8 @@ void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
|
||||
}
|
||||
} else if (16 % sizeof(size_t) == 0) { /* always true */
|
||||
while (len >= 16) {
|
||||
size_t *out_t = (size_t *)out, *iv_t = (size_t *)iv;
|
||||
size_t_aX *out_t = (size_t_aX *)out;
|
||||
size_t_aX *iv_t = (size_t_aX *)iv;
|
||||
|
||||
(*block) (in, out, key);
|
||||
for (n = 0; n < 16 / sizeof(size_t); n++)
|
||||
@ -125,8 +132,10 @@ void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
|
||||
}
|
||||
} else if (16 % sizeof(size_t) == 0) { /* always true */
|
||||
while (len >= 16) {
|
||||
size_t c, *out_t = (size_t *)out, *ivec_t = (size_t *)ivec;
|
||||
const size_t *in_t = (const size_t *)in;
|
||||
size_t c;
|
||||
size_t_aX *out_t = (size_t_aX *)out;
|
||||
size_t_aX *ivec_t = (size_t_aX *)ivec;
|
||||
const size_t_aX *in_t = (const size_t_aX *)in;
|
||||
|
||||
(*block) (in, tmp.c, key);
|
||||
for (n = 0; n < 16 / sizeof(size_t); n++) {
|
||||
|
@ -11,6 +11,14 @@
|
||||
#include <openssl/crypto.h>
|
||||
#include "crypto/modes.h"
|
||||
|
||||
#ifndef STRICT_ALIGNMENT
|
||||
# ifdef __GNUC__
|
||||
typedef u64 u64_a1 __attribute((__aligned__(1)));
|
||||
# else
|
||||
typedef u64 u64_a1;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* First you setup M and L parameters and pass the key schedule. This is
|
||||
* called once per session setup...
|
||||
@ -170,8 +178,8 @@ int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
|
||||
ctx->cmac.u[0] ^= temp.u[0];
|
||||
ctx->cmac.u[1] ^= temp.u[1];
|
||||
#else
|
||||
ctx->cmac.u[0] ^= ((u64 *)inp)[0];
|
||||
ctx->cmac.u[1] ^= ((u64 *)inp)[1];
|
||||
ctx->cmac.u[0] ^= ((u64_a1 *)inp)[0];
|
||||
ctx->cmac.u[1] ^= ((u64_a1 *)inp)[1];
|
||||
#endif
|
||||
(*block) (ctx->cmac.c, ctx->cmac.c, key);
|
||||
(*block) (ctx->nonce.c, scratch.c, key);
|
||||
@ -181,8 +189,8 @@ int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
|
||||
temp.u[1] ^= scratch.u[1];
|
||||
memcpy(out, temp.c, 16);
|
||||
#else
|
||||
((u64 *)out)[0] = scratch.u[0] ^ ((u64 *)inp)[0];
|
||||
((u64 *)out)[1] = scratch.u[1] ^ ((u64 *)inp)[1];
|
||||
((u64_a1 *)out)[0] = scratch.u[0] ^ ((u64_a1 *)inp)[0];
|
||||
((u64_a1 *)out)[1] = scratch.u[1] ^ ((u64_a1 *)inp)[1];
|
||||
#endif
|
||||
inp += 16;
|
||||
out += 16;
|
||||
@ -254,8 +262,10 @@ int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
|
||||
ctx->cmac.u[1] ^= (scratch.u[1] ^= temp.u[1]);
|
||||
memcpy(out, scratch.c, 16);
|
||||
#else
|
||||
ctx->cmac.u[0] ^= (((u64 *)out)[0] = scratch.u[0] ^ ((u64 *)inp)[0]);
|
||||
ctx->cmac.u[1] ^= (((u64 *)out)[1] = scratch.u[1] ^ ((u64 *)inp)[1]);
|
||||
ctx->cmac.u[0] ^= (((u64_a1 *)out)[0]
|
||||
= scratch.u[0] ^ ((u64_a1 *)inp)[0]);
|
||||
ctx->cmac.u[1] ^= (((u64_a1 *)out)[1]
|
||||
= scratch.u[1] ^ ((u64_a1 *)inp)[1]);
|
||||
#endif
|
||||
(*block) (ctx->cmac.c, ctx->cmac.c, key);
|
||||
|
||||
|
@ -11,6 +11,12 @@
|
||||
#include <openssl/crypto.h>
|
||||
#include "crypto/modes.h"
|
||||
|
||||
#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT)
|
||||
typedef size_t size_t_aX __attribute((__aligned__(1)));
|
||||
#else
|
||||
typedef size_t size_t_aX;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The input and output encrypted as though 128bit cfb mode is being used.
|
||||
* The extra state information to record how much of the 128bit block we have
|
||||
@ -43,8 +49,9 @@ void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out,
|
||||
while (len >= 16) {
|
||||
(*block) (ivec, ivec, key);
|
||||
for (; n < 16; n += sizeof(size_t)) {
|
||||
*(size_t *)(out + n) =
|
||||
*(size_t *)(ivec + n) ^= *(size_t *)(in + n);
|
||||
*(size_t_aX *)(out + n) =
|
||||
*(size_t_aX *)(ivec + n)
|
||||
^= *(size_t_aX *)(in + n);
|
||||
}
|
||||
len -= 16;
|
||||
out += 16;
|
||||
@ -92,9 +99,10 @@ void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out,
|
||||
while (len >= 16) {
|
||||
(*block) (ivec, ivec, key);
|
||||
for (; n < 16; n += sizeof(size_t)) {
|
||||
size_t t = *(size_t *)(in + n);
|
||||
*(size_t *)(out + n) = *(size_t *)(ivec + n) ^ t;
|
||||
*(size_t *)(ivec + n) = t;
|
||||
size_t t = *(size_t_aX *)(in + n);
|
||||
*(size_t_aX *)(out + n)
|
||||
= *(size_t_aX *)(ivec + n) ^ t;
|
||||
*(size_t_aX *)(ivec + n) = t;
|
||||
}
|
||||
len -= 16;
|
||||
out += 16;
|
||||
|
@ -11,6 +11,12 @@
|
||||
#include <openssl/crypto.h>
|
||||
#include "crypto/modes.h"
|
||||
|
||||
#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT)
|
||||
typedef size_t size_t_aX __attribute((__aligned__(1)));
|
||||
#else
|
||||
typedef size_t size_t_aX;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NOTE: the IV/counter CTR mode is big-endian. The code itself is
|
||||
* endian-neutral.
|
||||
@ -97,8 +103,9 @@ void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
|
||||
(*block) (ivec, ecount_buf, key);
|
||||
ctr128_inc_aligned(ivec);
|
||||
for (n = 0; n < 16; n += sizeof(size_t))
|
||||
*(size_t *)(out + n) =
|
||||
*(size_t *)(in + n) ^ *(size_t *)(ecount_buf + n);
|
||||
*(size_t_aX *)(out + n) =
|
||||
*(size_t_aX *)(in + n)
|
||||
^ *(size_t_aX *)(ecount_buf + n);
|
||||
len -= 16;
|
||||
out += 16;
|
||||
in += 16;
|
||||
|
@ -12,6 +12,12 @@
|
||||
#include "internal/cryptlib.h"
|
||||
#include "crypto/modes.h"
|
||||
|
||||
#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT)
|
||||
typedef size_t size_t_aX __attribute((__aligned__(1)));
|
||||
#else
|
||||
typedef size_t size_t_aX;
|
||||
#endif
|
||||
|
||||
#if defined(BSWAP4) && defined(STRICT_ALIGNMENT)
|
||||
/* redefine, because alignment is ensured */
|
||||
# undef GETU32
|
||||
@ -1080,8 +1086,8 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
|
||||
size_t j = GHASH_CHUNK;
|
||||
|
||||
while (j) {
|
||||
size_t *out_t = (size_t *)out;
|
||||
const size_t *in_t = (const size_t *)in;
|
||||
size_t_aX *out_t = (size_t_aX *)out;
|
||||
const size_t_aX *in_t = (const size_t_aX *)in;
|
||||
|
||||
(*block) (ctx->Yi.c, ctx->EKi.c, key);
|
||||
++ctr;
|
||||
@ -1107,8 +1113,8 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
|
||||
size_t j = i;
|
||||
|
||||
while (len >= 16) {
|
||||
size_t *out_t = (size_t *)out;
|
||||
const size_t *in_t = (const size_t *)in;
|
||||
size_t_aX *out_t = (size_t_aX *)out;
|
||||
const size_t_aX *in_t = (const size_t_aX *)in;
|
||||
|
||||
(*block) (ctx->Yi.c, ctx->EKi.c, key);
|
||||
++ctr;
|
||||
@ -1318,8 +1324,8 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
|
||||
|
||||
GHASH(ctx, in, GHASH_CHUNK);
|
||||
while (j) {
|
||||
size_t *out_t = (size_t *)out;
|
||||
const size_t *in_t = (const size_t *)in;
|
||||
size_t_aX *out_t = (size_t_aX *)out;
|
||||
const size_t_aX *in_t = (const size_t_aX *)in;
|
||||
|
||||
(*block) (ctx->Yi.c, ctx->EKi.c, key);
|
||||
++ctr;
|
||||
@ -1343,8 +1349,8 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
|
||||
if ((i = (len & (size_t)-16))) {
|
||||
GHASH(ctx, in, i);
|
||||
while (len >= 16) {
|
||||
size_t *out_t = (size_t *)out;
|
||||
const size_t *in_t = (const size_t *)in;
|
||||
size_t_aX *out_t = (size_t_aX *)out;
|
||||
const size_t_aX *in_t = (const size_t_aX *)in;
|
||||
|
||||
(*block) (ctx->Yi.c, ctx->EKi.c, key);
|
||||
++ctr;
|
||||
|
@ -11,6 +11,12 @@
|
||||
#include <openssl/crypto.h>
|
||||
#include "crypto/modes.h"
|
||||
|
||||
#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT)
|
||||
typedef size_t size_t_aX __attribute((__aligned__(1)));
|
||||
#else
|
||||
typedef size_t size_t_aX;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The input and output encrypted as though 128bit ofb mode is being used.
|
||||
* The extra state information to record how much of the 128bit block we have
|
||||
@ -41,8 +47,9 @@ void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
|
||||
while (len >= 16) {
|
||||
(*block) (ivec, ivec, key);
|
||||
for (; n < 16; n += sizeof(size_t))
|
||||
*(size_t *)(out + n) =
|
||||
*(size_t *)(in + n) ^ *(size_t *)(ivec + n);
|
||||
*(size_t_aX *)(out + n) =
|
||||
*(size_t_aX *)(in + n)
|
||||
^ *(size_t_aX *)(ivec + n);
|
||||
len -= 16;
|
||||
out += 16;
|
||||
in += 16;
|
||||
|
@ -11,6 +11,14 @@
|
||||
#include <openssl/crypto.h>
|
||||
#include "crypto/modes.h"
|
||||
|
||||
#ifndef STRICT_ALIGNMENT
|
||||
# ifdef __GNUC__
|
||||
typedef u64 u64_a1 __attribute((__aligned__(1)));
|
||||
# else
|
||||
typedef u64 u64_a1;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx,
|
||||
const unsigned char iv[16],
|
||||
const unsigned char *inp, unsigned char *out,
|
||||
@ -45,8 +53,8 @@ int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx,
|
||||
scratch.u[0] ^= tweak.u[0];
|
||||
scratch.u[1] ^= tweak.u[1];
|
||||
#else
|
||||
scratch.u[0] = ((u64 *)inp)[0] ^ tweak.u[0];
|
||||
scratch.u[1] = ((u64 *)inp)[1] ^ tweak.u[1];
|
||||
scratch.u[0] = ((u64_a1 *)inp)[0] ^ tweak.u[0];
|
||||
scratch.u[1] = ((u64_a1 *)inp)[1] ^ tweak.u[1];
|
||||
#endif
|
||||
(*ctx->block1) (scratch.c, scratch.c, ctx->key1);
|
||||
#if defined(STRICT_ALIGNMENT)
|
||||
@ -54,8 +62,8 @@ int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx,
|
||||
scratch.u[1] ^= tweak.u[1];
|
||||
memcpy(out, scratch.c, 16);
|
||||
#else
|
||||
((u64 *)out)[0] = scratch.u[0] ^= tweak.u[0];
|
||||
((u64 *)out)[1] = scratch.u[1] ^= tweak.u[1];
|
||||
((u64_a1 *)out)[0] = scratch.u[0] ^= tweak.u[0];
|
||||
((u64_a1 *)out)[1] = scratch.u[1] ^= tweak.u[1];
|
||||
#endif
|
||||
inp += 16;
|
||||
out += 16;
|
||||
@ -128,8 +136,8 @@ int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx,
|
||||
scratch.u[0] ^= tweak1.u[0];
|
||||
scratch.u[1] ^= tweak1.u[1];
|
||||
#else
|
||||
scratch.u[0] = ((u64 *)inp)[0] ^ tweak1.u[0];
|
||||
scratch.u[1] = ((u64 *)inp)[1] ^ tweak1.u[1];
|
||||
scratch.u[0] = ((u64_a1 *)inp)[0] ^ tweak1.u[0];
|
||||
scratch.u[1] = ((u64_a1 *)inp)[1] ^ tweak1.u[1];
|
||||
#endif
|
||||
(*ctx->block1) (scratch.c, scratch.c, ctx->key1);
|
||||
scratch.u[0] ^= tweak1.u[0];
|
||||
@ -148,8 +156,8 @@ int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx,
|
||||
scratch.u[1] ^= tweak.u[1];
|
||||
memcpy(out, scratch.c, 16);
|
||||
#else
|
||||
((u64 *)out)[0] = scratch.u[0] ^ tweak.u[0];
|
||||
((u64 *)out)[1] = scratch.u[1] ^ tweak.u[1];
|
||||
((u64_a1 *)out)[0] = scratch.u[0] ^ tweak.u[0];
|
||||
((u64_a1 *)out)[1] = scratch.u[1] ^ tweak.u[1];
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -70,6 +70,20 @@ typedef unsigned long long u64;
|
||||
# undef STRICT_ALIGNMENT
|
||||
#endif
|
||||
|
||||
#ifndef STRICT_ALIGNMENT
|
||||
# ifdef __GNUC__
|
||||
typedef u64 u64_a1 __attribute((__aligned__(1)));
|
||||
# else
|
||||
typedef u64 u64_a1;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT)
|
||||
typedef u64 u64_aX __attribute((__aligned__(1)));
|
||||
#else
|
||||
typedef u64 u64_aX;
|
||||
#endif
|
||||
|
||||
#undef SMALL_REGISTER_BANK
|
||||
#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
|
||||
# define SMALL_REGISTER_BANK
|
||||
@ -197,13 +211,13 @@ typedef unsigned long long u64;
|
||||
# define LL(c0,c1,c2,c3,c4,c5,c6,c7) c0,c1,c2,c3,c4,c5,c6,c7, \
|
||||
c0,c1,c2,c3,c4,c5,c6,c7
|
||||
# define C0(K,i) (((u64*)(Cx.c+0))[2*K.c[(i)*8+0]])
|
||||
# define C1(K,i) (((u64*)(Cx.c+7))[2*K.c[(i)*8+1]])
|
||||
# define C2(K,i) (((u64*)(Cx.c+6))[2*K.c[(i)*8+2]])
|
||||
# define C3(K,i) (((u64*)(Cx.c+5))[2*K.c[(i)*8+3]])
|
||||
# define C4(K,i) (((u64*)(Cx.c+4))[2*K.c[(i)*8+4]])
|
||||
# define C5(K,i) (((u64*)(Cx.c+3))[2*K.c[(i)*8+5]])
|
||||
# define C6(K,i) (((u64*)(Cx.c+2))[2*K.c[(i)*8+6]])
|
||||
# define C7(K,i) (((u64*)(Cx.c+1))[2*K.c[(i)*8+7]])
|
||||
# define C1(K,i) (((u64_a1*)(Cx.c+7))[2*K.c[(i)*8+1]])
|
||||
# define C2(K,i) (((u64_a1*)(Cx.c+6))[2*K.c[(i)*8+2]])
|
||||
# define C3(K,i) (((u64_a1*)(Cx.c+5))[2*K.c[(i)*8+3]])
|
||||
# define C4(K,i) (((u64_a1*)(Cx.c+4))[2*K.c[(i)*8+4]])
|
||||
# define C5(K,i) (((u64_a1*)(Cx.c+3))[2*K.c[(i)*8+5]])
|
||||
# define C6(K,i) (((u64_a1*)(Cx.c+2))[2*K.c[(i)*8+6]])
|
||||
# define C7(K,i) (((u64_a1*)(Cx.c+1))[2*K.c[(i)*8+7]])
|
||||
#endif
|
||||
|
||||
static const
|
||||
@ -537,7 +551,7 @@ void whirlpool_block(WHIRLPOOL_CTX *ctx, const void *inp, size_t n)
|
||||
} else
|
||||
# endif
|
||||
{
|
||||
const u64 *pa = (const u64 *)p;
|
||||
const u64_aX *pa = (const u64_aX *)p;
|
||||
S.q[0] = (K.q[0] = H->q[0]) ^ pa[0];
|
||||
S.q[1] = (K.q[1] = H->q[1]) ^ pa[1];
|
||||
S.q[2] = (K.q[2] = H->q[2]) ^ pa[2];
|
||||
@ -775,7 +789,7 @@ void whirlpool_block(WHIRLPOOL_CTX *ctx, const void *inp, size_t n)
|
||||
} else
|
||||
# endif
|
||||
{
|
||||
const u64 *pa = (const u64 *)p;
|
||||
const u64_aX *pa = (const u64_aX *)p;
|
||||
H->q[0] ^= S.q[0] ^ pa[0];
|
||||
H->q[1] ^= S.q[1] ^ pa[1];
|
||||
H->q[2] ^= S.q[2] ^ pa[2];
|
||||
|
Loading…
x
Reference in New Issue
Block a user