diff options
author | marha <marha@users.sourceforge.net> | 2015-02-22 21:39:56 +0100 |
---|---|---|
committer | marha <marha@users.sourceforge.net> | 2015-02-22 21:39:56 +0100 |
commit | 462f18c7b25fe3e467f837647d07ab0a78aa8d2b (patch) | |
tree | fc8013c0a1bac05a1945846c1697e973f4c35013 /openssl/crypto/modes/modes_lcl.h | |
parent | 36f711ee12b6dd5184198abed3aa551efb585587 (diff) | |
download | vcxsrv-462f18c7b25fe3e467f837647d07ab0a78aa8d2b.tar.gz vcxsrv-462f18c7b25fe3e467f837647d07ab0a78aa8d2b.tar.bz2 vcxsrv-462f18c7b25fe3e467f837647d07ab0a78aa8d2b.zip |
Merged origin/release (checked in because wanted to merge new stuff)
Diffstat (limited to 'openssl/crypto/modes/modes_lcl.h')
-rw-r--r-- | openssl/crypto/modes/modes_lcl.h | 171 |
1 files changed, 93 insertions, 78 deletions
diff --git a/openssl/crypto/modes/modes_lcl.h b/openssl/crypto/modes/modes_lcl.h index 9d83e1284..900f54ca2 100644 --- a/openssl/crypto/modes/modes_lcl.h +++ b/openssl/crypto/modes/modes_lcl.h @@ -7,122 +7,137 @@ #include <openssl/modes.h> - #if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__) typedef __int64 i64; typedef unsigned __int64 u64; -#define U64(C) C##UI64 +# define U64(C) C##UI64 #elif defined(__arch64__) typedef long i64; typedef unsigned long u64; -#define U64(C) C##UL +# define U64(C) C##UL #else typedef long long i64; typedef unsigned long long u64; -#define U64(C) C##ULL +# define U64(C) C##ULL #endif typedef unsigned int u32; typedef unsigned char u8; #define STRICT_ALIGNMENT 1 -#if defined(__i386) || defined(__i386__) || \ - defined(__x86_64) || defined(__x86_64__) || \ - defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \ - defined(__s390__) || defined(__s390x__) -# undef STRICT_ALIGNMENT +#ifndef PEDANTIC +# if defined(__i386) || defined(__i386__) || \ + defined(__x86_64) || defined(__x86_64__) || \ + defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \ + defined(__aarch64__) || \ + defined(__s390__) || defined(__s390x__) +# undef STRICT_ALIGNMENT +# endif #endif #if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) -#if defined(__GNUC__) && __GNUC__>=2 -# if defined(__x86_64) || defined(__x86_64__) -# define BSWAP8(x) ({ u64 ret=(x); \ - asm ("bswapq %0" \ - : "+r"(ret)); ret; }) -# define BSWAP4(x) ({ u32 ret=(x); \ - asm ("bswapl %0" \ - : "+r"(ret)); ret; }) -# elif (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY) -# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \ - asm ("bswapl %0; bswapl %1" \ - : "+r"(hi),"+r"(lo)); \ - (u64)hi<<32|lo; }) -# define BSWAP4(x) ({ u32 ret=(x); \ - asm ("bswapl %0" \ - : "+r"(ret)); ret; }) -# elif (defined(__arm__) || defined(__arm)) && !defined(STRICT_ALIGNMENT) -# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \ - asm ("rev %0,%0; rev %1,%1" \ - : "+r"(hi),"+r"(lo)); \ - (u64)hi<<32|lo; }) -# define BSWAP4(x) ({ u32 ret; \ - asm ("rev %0,%1" \ - : "=r"(ret) : "r"((u32)(x))); \ - ret; }) +# if defined(__GNUC__) && __GNUC__>=2 +# if defined(__x86_64) || defined(__x86_64__) +# define BSWAP8(x) ({ u64 ret=(x); \ + asm ("bswapq %0" \ + : "+r"(ret)); ret; }) +# define BSWAP4(x) ({ u32 ret=(x); \ + asm ("bswapl %0" \ + : "+r"(ret)); ret; }) +# elif (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY) +# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \ + asm ("bswapl %0; bswapl %1" \ + : "+r"(hi),"+r"(lo)); \ + (u64)hi<<32|lo; }) +# define BSWAP4(x) ({ u32 ret=(x); \ + asm ("bswapl %0" \ + : "+r"(ret)); ret; }) +# elif defined(__aarch64__) +# define BSWAP8(x) ({ u64 ret; \ + asm ("rev %0,%1" \ + : "=r"(ret) : "r"(x)); ret; }) +# define BSWAP4(x) ({ u32 ret; \ + asm ("rev %w0,%w1" \ + : "=r"(ret) : "r"(x)); ret; }) +# elif (defined(__arm__) || defined(__arm)) && !defined(STRICT_ALIGNMENT) +# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \ + asm ("rev %0,%0; rev %1,%1" \ + : "+r"(hi),"+r"(lo)); \ + (u64)hi<<32|lo; }) +# define BSWAP4(x) ({ u32 ret; \ + asm ("rev %0,%1" \ + : "=r"(ret) : "r"((u32)(x))); \ + ret; }) +# endif +# elif defined(_MSC_VER) +# if _MSC_VER>=1300 +# pragma intrinsic(_byteswap_uint64,_byteswap_ulong) +# define BSWAP8(x) _byteswap_uint64((u64)(x)) +# define BSWAP4(x) _byteswap_ulong((u32)(x)) +# elif defined(_M_IX86) +__inline u32 _bswap4(u32 val) +{ +_asm mov eax, val _asm bswap eax} +# define BSWAP4(x) _bswap4(x) +# endif # endif -#elif defined(_MSC_VER) -# if _MSC_VER>=1300 -# pragma intrinsic(_byteswap_uint64,_byteswap_ulong) -# define BSWAP8(x) _byteswap_uint64((u64)(x)) -# define BSWAP4(x) _byteswap_ulong((u32)(x)) -# elif defined(_M_IX86) - __inline u32 _bswap4(u32 val) { - _asm mov eax,val - _asm bswap eax - } -# define BSWAP4(x) _bswap4(x) -# endif -#endif #endif - #if defined(BSWAP4) && !defined(STRICT_ALIGNMENT) -#define GETU32(p) BSWAP4(*(const u32 *)(p)) -#define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v) +# define GETU32(p) BSWAP4(*(const u32 *)(p)) +# define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v) #else -#define GETU32(p) ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3]) -#define PUTU32(p,v) ((p)[0]=(u8)((v)>>24),(p)[1]=(u8)((v)>>16),(p)[2]=(u8)((v)>>8),(p)[3]=(u8)(v)) +# define GETU32(p) ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3]) +# define PUTU32(p,v) ((p)[0]=(u8)((v)>>24),(p)[1]=(u8)((v)>>16),(p)[2]=(u8)((v)>>8),(p)[3]=(u8)(v)) #endif +/*- GCM definitions */ typedef struct { + u64 hi, lo; +} u128; -/* GCM definitions */ - -typedef struct { u64 hi,lo; } u128; - -#ifdef TABLE_BITS -#undef TABLE_BITS +#ifdef TABLE_BITS +# undef TABLE_BITS #endif /* * Even though permitted values for TABLE_BITS are 8, 4 and 1, it should * never be set to 8 [or 1]. For further information see gcm128.c. */ -#define TABLE_BITS 4 +#define TABLE_BITS 4 struct gcm128_context { - /* Following 6 names follow names in GCM specification */ - union { u64 u[2]; u32 d[4]; u8 c[16]; size_t t[16/sizeof(size_t)]; } - Yi,EKi,EK0,len,Xi,H; - /* Relative position of Xi, H and pre-computed Htable is used - * in some assembler modules, i.e. don't change the order! */ + /* Following 6 names follow names in GCM specification */ + union { + u64 u[2]; + u32 d[4]; + u8 c[16]; + size_t t[16 / sizeof(size_t)]; + } Yi, EKi, EK0, len, Xi, H; + /* + * Relative position of Xi, H and pre-computed Htable is used in some + * assembler modules, i.e. don't change the order! + */ #if TABLE_BITS==8 - u128 Htable[256]; + u128 Htable[256]; #else - u128 Htable[16]; - void (*gmult)(u64 Xi[2],const u128 Htable[16]); - void (*ghash)(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); + u128 Htable[16]; + void (*gmult) (u64 Xi[2], const u128 Htable[16]); + void (*ghash) (u64 Xi[2], const u128 Htable[16], const u8 *inp, + size_t len); #endif - unsigned int mres, ares; - block128_f block; - void *key; + unsigned int mres, ares; + block128_f block; + void *key; }; struct xts128_context { - void *key1, *key2; - block128_f block1,block2; + void *key1, *key2; + block128_f block1, block2; }; struct ccm128_context { - union { u64 u[2]; u8 c[16]; } nonce, cmac; - u64 blocks; - block128_f block; - void *key; + union { + u64 u[2]; + u8 c[16]; + } nonce, cmac; + u64 blocks; + block128_f block; + void *key; }; - |