aboutsummaryrefslogtreecommitdiff
path: root/openssl/crypto/sha
diff options
context:
space:
mode:
Diffstat (limited to 'openssl/crypto/sha')
-rw-r--r--openssl/crypto/sha/Makefile17
-rw-r--r--openssl/crypto/sha/asm/sha1-586.pl357
-rw-r--r--openssl/crypto/sha/asm/sha1-armv4-large.pl451
-rwxr-xr-xopenssl/crypto/sha/asm/sha1-armv8.pl334
-rwxr-xr-xopenssl/crypto/sha/asm/sha1-mb-x86_64.pl1574
-rw-r--r--openssl/crypto/sha/asm/sha1-mips.pl100
-rw-r--r--openssl/crypto/sha/asm/sha1-ppc.pl46
-rw-r--r--openssl/crypto/sha/asm/sha1-sparcv9.pl165
-rw-r--r--openssl/crypto/sha/asm/sha1-x86_64.pl1210
-rw-r--r--openssl/crypto/sha/asm/sha256-586.pl1266
-rw-r--r--openssl/crypto/sha/asm/sha256-armv4.pl590
-rwxr-xr-xopenssl/crypto/sha/asm/sha256-mb-x86_64.pl1560
-rw-r--r--openssl/crypto/sha/asm/sha512-586.pl517
-rw-r--r--openssl/crypto/sha/asm/sha512-armv4.pl69
-rwxr-xr-xopenssl/crypto/sha/asm/sha512-armv8.pl420
-rw-r--r--openssl/crypto/sha/asm/sha512-ia64.pl351
-rw-r--r--openssl/crypto/sha/asm/sha512-mips.pl81
-rw-r--r--openssl/crypto/sha/asm/sha512-ppc.pl460
-rw-r--r--openssl/crypto/sha/asm/sha512-sparcv9.pl320
-rw-r--r--openssl/crypto/sha/asm/sha512-x86_64.pl2043
-rwxr-xr-xopenssl/crypto/sha/asm/sha512p8-ppc.pl424
-rw-r--r--openssl/crypto/sha/sha.c106
-rw-r--r--openssl/crypto/sha/sha.h208
-rw-r--r--openssl/crypto/sha/sha1.c106
-rw-r--r--openssl/crypto/sha/sha1_one.c35
-rw-r--r--openssl/crypto/sha/sha1dgst.c23
-rw-r--r--openssl/crypto/sha/sha1test.c206
-rw-r--r--openssl/crypto/sha/sha256.c583
-rw-r--r--openssl/crypto/sha/sha256t.c241
-rw-r--r--openssl/crypto/sha/sha512.c1144
-rw-r--r--openssl/crypto/sha/sha512t.c312
-rw-r--r--openssl/crypto/sha/sha_dgst.c23
-rw-r--r--openssl/crypto/sha/sha_locl.h703
-rw-r--r--openssl/crypto/sha/sha_one.c35
-rw-r--r--openssl/crypto/sha/shatest.c206
35 files changed, 13488 insertions, 2798 deletions
diff --git a/openssl/crypto/sha/Makefile b/openssl/crypto/sha/Makefile
index 2eb2b7af9..a8c0cf785 100644
--- a/openssl/crypto/sha/Makefile
+++ b/openssl/crypto/sha/Makefile
@@ -60,21 +60,25 @@ sha256-armv4.S: asm/sha256-armv4.pl
$(PERL) $< $(PERLASM_SCHEME) $@
sha1-alpha.s: asm/sha1-alpha.pl
- (preproc=/tmp/$$$$.$@; trap "rm $$preproc" INT; \
+ (preproc=$$$$.$@.S; trap "rm $$preproc" INT; \
$(PERL) asm/sha1-alpha.pl > $$preproc && \
- $(CC) -E $$preproc > $@ && rm $$preproc)
+ $(CC) -E -P $$preproc > $@ && rm $$preproc)
# Solaris make has to be explicitly told
sha1-x86_64.s: asm/sha1-x86_64.pl; $(PERL) asm/sha1-x86_64.pl $(PERLASM_SCHEME) > $@
+sha1-mb-x86_64.s: asm/sha1-mb-x86_64.pl; $(PERL) asm/sha1-mb-x86_64.pl $(PERLASM_SCHEME) > $@
sha256-x86_64.s:asm/sha512-x86_64.pl; $(PERL) asm/sha512-x86_64.pl $(PERLASM_SCHEME) $@
+sha256-mb-x86_64.s: asm/sha256-mb-x86_64.pl; $(PERL) asm/sha256-mb-x86_64.pl $(PERLASM_SCHEME) > $@
sha512-x86_64.s:asm/sha512-x86_64.pl; $(PERL) asm/sha512-x86_64.pl $(PERLASM_SCHEME) $@
-sha1-sparcv9.s: asm/sha1-sparcv9.pl; $(PERL) asm/sha1-sparcv9.pl $@ $(CFLAGS)
-sha256-sparcv9.s:asm/sha512-sparcv9.pl; $(PERL) asm/sha512-sparcv9.pl $@ $(CFLAGS)
-sha512-sparcv9.s:asm/sha512-sparcv9.pl; $(PERL) asm/sha512-sparcv9.pl $@ $(CFLAGS)
+sha1-sparcv9.S: asm/sha1-sparcv9.pl; $(PERL) asm/sha1-sparcv9.pl $@ $(CFLAGS)
+sha256-sparcv9.S:asm/sha512-sparcv9.pl; $(PERL) asm/sha512-sparcv9.pl $@ $(CFLAGS)
+sha512-sparcv9.S:asm/sha512-sparcv9.pl; $(PERL) asm/sha512-sparcv9.pl $@ $(CFLAGS)
sha1-ppc.s: asm/sha1-ppc.pl; $(PERL) asm/sha1-ppc.pl $(PERLASM_SCHEME) $@
sha256-ppc.s: asm/sha512-ppc.pl; $(PERL) asm/sha512-ppc.pl $(PERLASM_SCHEME) $@
sha512-ppc.s: asm/sha512-ppc.pl; $(PERL) asm/sha512-ppc.pl $(PERLASM_SCHEME) $@
+sha256p8-ppc.s: asm/sha512p8-ppc.pl; $(PERL) asm/sha512p8-ppc.pl $(PERLASM_SCHEME) $@
+sha512p8-ppc.s: asm/sha512p8-ppc.pl; $(PERL) asm/sha512p8-ppc.pl $(PERLASM_SCHEME) $@
sha1-parisc.s: asm/sha1-parisc.pl; $(PERL) asm/sha1-parisc.pl $(PERLASM_SCHEME) $@
sha256-parisc.s:asm/sha512-parisc.pl; $(PERL) asm/sha512-parisc.pl $(PERLASM_SCHEME) $@
@@ -92,6 +96,9 @@ sha512-%.S: asm/sha512-%.pl; $(PERL) $< $(PERLASM_SCHEME) $@
sha1-armv4-large.o: sha1-armv4-large.S
sha256-armv4.o: sha256-armv4.S
sha512-armv4.o: sha512-armv4.S
+sha1-armv8.o: sha1-armv8.S
+sha256-armv8.o: sha256-armv8.S
+sha512-armv8.o: sha512-armv8.S
files:
$(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO
diff --git a/openssl/crypto/sha/asm/sha1-586.pl b/openssl/crypto/sha/asm/sha1-586.pl
index 1084d227f..8377299b1 100644
--- a/openssl/crypto/sha/asm/sha1-586.pl
+++ b/openssl/crypto/sha/asm/sha1-586.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# [Re]written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -79,6 +79,10 @@
# strongly, it's probably more appropriate to discuss possibility of
# using vector rotate XOP on AMD...
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+
######################################################################
# Current performance is summarized in following table. Numbers are
# CPU clock cycles spent to process single byte (less is better).
@@ -88,13 +92,20 @@
# PIII 11.5 -
# P4 10.6 -
# AMD K8 7.1 -
-# Core2 7.3 6.1/+20% -
-# Atom 12.5 9.5(*)/+32% -
-# Westmere 7.3 5.6/+30% -
-# Sandy Bridge 8.8 6.2/+40% 5.1(**)/+70%
+# Core2 7.3 6.0/+22% -
+# Westmere 7.3 5.5/+33% -
+# Sandy Bridge 8.8 6.2/+40% 5.1(**)/+73%
+# Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53%
+# Haswell 6.5 4.3/+51% 4.1(**)/+58%
+# Bulldozer 11.6 6.0/+92%
+# VIA Nano 10.6 7.5/+41%
+# Atom 12.5 9.3(*)/+35%
+# Silvermont 14.5 9.9(*)/+46%
#
# (*) Loop is 1056 instructions long and expected result is ~8.25.
-# It remains mystery [to me] why ILP is limited to 1.7.
+# The discrepancy is because of front-end limitations, so
+# called MS-ROM penalties, and on Silvermont even rotate's
+# limited parallelism.
#
# (**) As per above comment, the result is for AVX *plus* sh[rl]d.
@@ -116,6 +127,15 @@ $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
$1>=2.03); # first version supporting AVX
+$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
+ `ml 2>&1` =~ /Version ([0-9]+)\./ &&
+ $1>=10); # first version supporting AVX
+
+$ymm=1 if ($xmm && !$ymm && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/ &&
+ $2>=3.0); # first version supporting AVX
+
+$shaext=$xmm; ### set to zero if compiling for 1.0.1
+
&external_label("OPENSSL_ia32cap_P") if ($xmm);
@@ -295,6 +315,7 @@ if ($alt) {
&function_begin("sha1_block_data_order");
if ($xmm) {
+ &static_label("shaext_shortcut") if ($shaext);
&static_label("ssse3_shortcut");
&static_label("avx_shortcut") if ($ymm);
&static_label("K_XX_XX");
@@ -309,8 +330,13 @@ if ($xmm) {
&mov ($D,&DWP(4,$T));
&test ($D,1<<9); # check SSSE3 bit
&jz (&label("x86"));
+ &mov ($C,&DWP(8,$T));
&test ($A,1<<24); # check FXSR bit
&jz (&label("x86"));
+ if ($shaext) {
+ &test ($C,1<<29); # check SHA bit
+ &jnz (&label("shaext_shortcut"));
+ }
if ($ymm) {
&and ($D,1<<28); # mask AVX bit
&and ($A,1<<30); # mask "Intel CPU" bit
@@ -389,6 +415,117 @@ if ($xmm) {
&function_end("sha1_block_data_order");
if ($xmm) {
+if ($shaext) {
+######################################################################
+# Intel SHA Extensions implementation of SHA1 update function.
+#
+my ($ctx,$inp,$num)=("edi","esi","ecx");
+my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3));
+my @MSG=map("xmm$_",(4..7));
+
+sub sha1rnds4 {
+ my ($dst,$src,$imm)=@_;
+ if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+ { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); }
+}
+sub sha1op38 {
+ my ($opcodelet,$dst,$src)=@_;
+ if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+ { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
+}
+sub sha1nexte { sha1op38(0xc8,@_); }
+sub sha1msg1 { sha1op38(0xc9,@_); }
+sub sha1msg2 { sha1op38(0xca,@_); }
+
+&function_begin("_sha1_block_data_order_shaext");
+ &call (&label("pic_point")); # make it PIC!
+ &set_label("pic_point");
+ &blindpop($tmp1);
+ &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
+&set_label("shaext_shortcut");
+ &mov ($ctx,&wparam(0));
+ &mov ("ebx","esp");
+ &mov ($inp,&wparam(1));
+ &mov ($num,&wparam(2));
+ &sub ("esp",32);
+
+ &movdqu ($ABCD,&QWP(0,$ctx));
+ &movd ($E,&QWP(16,$ctx));
+ &and ("esp",-32);
+ &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap
+
+ &movdqu (@MSG[0],&QWP(0,$inp));
+ &pshufd ($ABCD,$ABCD,0b00011011); # flip word order
+ &movdqu (@MSG[1],&QWP(0x10,$inp));
+ &pshufd ($E,$E,0b00011011); # flip word order
+ &movdqu (@MSG[2],&QWP(0x20,$inp));
+ &pshufb (@MSG[0],$BSWAP);
+ &movdqu (@MSG[3],&QWP(0x30,$inp));
+ &pshufb (@MSG[1],$BSWAP);
+ &pshufb (@MSG[2],$BSWAP);
+ &pshufb (@MSG[3],$BSWAP);
+ &jmp (&label("loop_shaext"));
+
+&set_label("loop_shaext",16);
+ &dec ($num);
+ &lea ("eax",&DWP(0x40,$inp));
+ &movdqa (&QWP(0,"esp"),$E); # offload $E
+ &paddd ($E,@MSG[0]);
+ &cmovne ($inp,"eax");
+ &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD
+
+for($i=0;$i<20-4;$i+=2) {
+ &sha1msg1 (@MSG[0],@MSG[1]);
+ &movdqa ($E_,$ABCD);
+ &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3...
+ &sha1nexte ($E_,@MSG[1]);
+ &pxor (@MSG[0],@MSG[2]);
+ &sha1msg1 (@MSG[1],@MSG[2]);
+ &sha1msg2 (@MSG[0],@MSG[3]);
+
+ &movdqa ($E,$ABCD);
+ &sha1rnds4 ($ABCD,$E_,int(($i+1)/5));
+ &sha1nexte ($E,@MSG[2]);
+ &pxor (@MSG[1],@MSG[3]);
+ &sha1msg2 (@MSG[1],@MSG[0]);
+
+ push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG));
+}
+ &movdqu (@MSG[0],&QWP(0,$inp));
+ &movdqa ($E_,$ABCD);
+ &sha1rnds4 ($ABCD,$E,3); # 64-67
+ &sha1nexte ($E_,@MSG[1]);
+ &movdqu (@MSG[1],&QWP(0x10,$inp));
+ &pshufb (@MSG[0],$BSWAP);
+
+ &movdqa ($E,$ABCD);
+ &sha1rnds4 ($ABCD,$E_,3); # 68-71
+ &sha1nexte ($E,@MSG[2]);
+ &movdqu (@MSG[2],&QWP(0x20,$inp));
+ &pshufb (@MSG[1],$BSWAP);
+
+ &movdqa ($E_,$ABCD);
+ &sha1rnds4 ($ABCD,$E,3); # 72-75
+ &sha1nexte ($E_,@MSG[3]);
+ &movdqu (@MSG[3],&QWP(0x30,$inp));
+ &pshufb (@MSG[2],$BSWAP);
+
+ &movdqa ($E,$ABCD);
+ &sha1rnds4 ($ABCD,$E_,3); # 76-79
+ &movdqa ($E_,&QWP(0,"esp"));
+ &pshufb (@MSG[3],$BSWAP);
+ &sha1nexte ($E,$E_);
+ &paddd ($ABCD,&QWP(16,"esp"));
+
+ &jnz (&label("loop_shaext"));
+
+ &pshufd ($ABCD,$ABCD,0b00011011);
+ &pshufd ($E,$E,0b00011011);
+ &movdqu (&QWP(0,$ctx),$ABCD)
+ &movd (&DWP(16,$ctx),$E);
+ &mov ("esp","ebx");
+&function_end("_sha1_block_data_order_shaext");
+}
######################################################################
# The SSSE3 implementation.
#
@@ -416,6 +553,7 @@ my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
my @V=($A,$B,$C,$D,$E);
my $j=0; # hash round
+my $rx=0;
my @T=($T,$tmp1);
my $inp;
@@ -501,8 +639,11 @@ my $_ror=sub { &ror(@_) };
&movdqa (&QWP(0+16,"esp"),@X[-3&7]);
&psubd (@X[-3&7],@X[3]);
&movdqa (&QWP(0+32,"esp"),@X[-2&7]);
+ &mov (@T[1],$C);
&psubd (@X[-2&7],@X[3]);
- &movdqa (@X[0],@X[-3&7]);
+ &xor (@T[1],$D);
+ &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
+ &and (@T[0],@T[1]);
&jmp (&label("loop"));
######################################################################
@@ -528,76 +669,77 @@ sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
my ($a,$b,$c,$d,$e);
+ eval(shift(@insns)); # ror
eval(shift(@insns));
eval(shift(@insns));
- &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
+ &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
&movdqa (@X[2],@X[-1&7]);
eval(shift(@insns));
eval(shift(@insns));
&paddd (@X[3],@X[-1&7]);
&movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
- eval(shift(@insns));
+ eval(shift(@insns)); # rol
eval(shift(@insns));
&psrldq (@X[2],4); # "X[-3]", 3 dwords
eval(shift(@insns));
eval(shift(@insns));
&pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
eval(shift(@insns));
- eval(shift(@insns));
+ eval(shift(@insns)); # ror
&pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
- eval(shift(@insns));
&pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
eval(shift(@insns));
- eval(shift(@insns));
+ eval(shift(@insns)); # rol
&movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
eval(shift(@insns));
eval(shift(@insns));
&movdqa (@X[4],@X[0]);
- &movdqa (@X[2],@X[0]);
- eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns)); # ror
+ &movdqa (@X[2],@X[0]);
eval(shift(@insns));
&pslldq (@X[4],12); # "X[0]"<<96, extract one dword
&paddd (@X[0],@X[0]);
eval(shift(@insns));
eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
&psrld (@X[2],31);
eval(shift(@insns));
- eval(shift(@insns));
+ eval(shift(@insns)); # rol
&movdqa (@X[3],@X[4]);
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns));
&psrld (@X[4],30);
- &por (@X[0],@X[2]); # "X[0]"<<<=1
eval(shift(@insns));
+ eval(shift(@insns)); # ror
+ &por (@X[0],@X[2]); # "X[0]"<<<=1
eval(shift(@insns));
&movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
eval(shift(@insns));
eval(shift(@insns));
&pslld (@X[3],2);
- &pxor (@X[0],@X[4]);
- eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns)); # rol
+ &pxor (@X[0],@X[4]);
&movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
eval(shift(@insns));
eval(shift(@insns));
&pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2
- &movdqa (@X[1],@X[-2&7]) if ($Xi<7);
+ &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7])
+ &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7);
eval(shift(@insns));
eval(shift(@insns));
@@ -609,13 +751,12 @@ sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
sub Xupdate_ssse3_32_79()
{ use integer;
my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
+ my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
my ($a,$b,$c,$d,$e);
- &movdqa (@X[2],@X[-1&7]) if ($Xi==8);
eval(shift(@insns)); # body_20_39
&pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
- &palignr(@X[2],@X[-2&7],8); # compose "X[-6]"
+ &punpcklqdq(@X[2],@X[-1&7]); # compose "X[-6]", was &palignr(@X[2],@X[-2&7],8)
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); # rol
@@ -624,13 +765,14 @@ sub Xupdate_ssse3_32_79()
&movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns)) if (@insns[0] =~ /_rol/);
if ($Xi%5) {
&movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
} else { # ... or load next one
&movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
}
- &paddd (@X[3],@X[-1&7]);
eval(shift(@insns)); # ror
+ &paddd (@X[3],@X[-1&7]);
eval(shift(@insns));
&pxor (@X[0],@X[2]); # "X[0]"^="X[-6]"
@@ -645,6 +787,7 @@ sub Xupdate_ssse3_32_79()
eval(shift(@insns));
eval(shift(@insns)); # ror
eval(shift(@insns));
+ eval(shift(@insns)) if (@insns[0] =~ /_rol/);
&pslld (@X[0],2);
eval(shift(@insns)); # body_20_39
@@ -656,6 +799,8 @@ sub Xupdate_ssse3_32_79()
eval(shift(@insns));
eval(shift(@insns)); # ror
eval(shift(@insns));
+ eval(shift(@insns)) if (@insns[1] =~ /_rol/);
+ eval(shift(@insns)) if (@insns[0] =~ /_rol/);
&por (@X[0],@X[2]); # "X[0]"<<<=2
eval(shift(@insns)); # body_20_39
@@ -666,7 +811,7 @@ sub Xupdate_ssse3_32_79()
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); # ror
- &movdqa (@X[3],@X[0]) if ($Xi<19);
+ &pshufd (@X[3],@X[-1],0xee) if ($Xi<19); # was &movdqa (@X[3],@X[0])
eval(shift(@insns));
foreach (@insns) { eval; } # remaining instructions
@@ -681,6 +826,12 @@ sub Xuplast_ssse3_80()
my ($a,$b,$c,$d,$e);
eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
&paddd (@X[3],@X[-1&7]);
eval(shift(@insns));
eval(shift(@insns));
@@ -717,9 +868,16 @@ sub Xloop_ssse3()
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
&pshufb (@X[($Xi-3)&7],@X[2]);
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
&paddd (@X[($Xi-4)&7],@X[3]);
eval(shift(@insns));
eval(shift(@insns));
@@ -728,6 +886,8 @@ sub Xloop_ssse3()
&movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
&psubd (@X[($Xi-4)&7],@X[3]);
foreach (@insns) { eval; }
@@ -743,51 +903,124 @@ sub Xtail_ssse3()
foreach (@insns) { eval; }
}
-sub body_00_19 () {
+sub body_00_19 () { # ((c^d)&b)^d
+ # on start @T[0]=(c^d)&b
+ return &body_20_39() if ($rx==19); $rx++;
(
'($a,$b,$c,$d,$e)=@V;'.
- '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
- '&xor ($c,$d);',
+ '&$_ror ($b,$j?7:2);', # $b>>>2
+ '&xor (@T[0],$d);',
'&mov (@T[1],$a);', # $b in next round
+
+ '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
+ '&xor ($b,$c);', # $c^$d for next round
+
'&$_rol ($a,5);',
- '&and (@T[0],$c);', # ($b&($c^$d))
- '&xor ($c,$d);', # restore $c
- '&xor (@T[0],$d);',
- '&add ($e,$a);',
- '&$_ror ($b,$j?7:2);', # $b>>>2
- '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ '&add ($e,@T[0]);',
+ '&and (@T[1],$b);', # ($b&($c^$d)) for next round
+
+ '&xor ($b,$c);', # restore $b
+ '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
);
}
-sub body_20_39 () {
+sub body_20_39 () { # b^d^c
+ # on entry @T[0]=b^d
+ return &body_40_59() if ($rx==39); $rx++;
(
'($a,$b,$c,$d,$e)=@V;'.
- '&add ($e,&DWP(4*($j++&15),"esp"));', # X[]+K xfer
- '&xor (@T[0],$d);', # ($b^$d)
+ '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
+ '&xor (@T[0],$d) if($j==19);'.
+ '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c)
'&mov (@T[1],$a);', # $b in next round
+
'&$_rol ($a,5);',
- '&xor (@T[0],$c);', # ($b^$d^$c)
- '&add ($e,$a);',
+ '&add ($e,@T[0]);',
+ '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round
+
'&$_ror ($b,7);', # $b>>>2
- '&add ($e,@T[0]);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
);
}
-sub body_40_59 () {
+sub body_40_59 () { # ((b^c)&(c^d))^c
+ # on entry @T[0]=(b^c), (c^=d)
+ $rx++;
(
'($a,$b,$c,$d,$e)=@V;'.
- '&mov (@T[1],$c);',
- '&xor ($c,$d);',
- '&add ($e,&DWP(4*($j++&15),"esp"));', # X[]+K xfer
- '&and (@T[1],$d);',
- '&and (@T[0],$c);', # ($b&($c^$d))
+ '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
+ '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d)
+ '&xor ($c,$d) if ($j>=40);', # restore $c
+
'&$_ror ($b,7);', # $b>>>2
- '&add ($e,@T[1]);',
- '&mov (@T[1],$a);', # $b in next round
+ '&mov (@T[1],$a);', # $b for next round
+ '&xor (@T[0],$c);',
+
'&$_rol ($a,5);',
'&add ($e,@T[0]);',
- '&xor ($c,$d);', # restore $c
- '&add ($e,$a);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ '&xor (@T[1],$c) if ($j==59);'.
+ '&xor (@T[1],$b) if ($j< 59);', # b^c for next round
+
+ '&xor ($b,$c) if ($j< 59);', # c^d for next round
+ '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ );
+}
+######
+sub bodyx_00_19 () { # ((c^d)&b)^d
+ # on start @T[0]=(b&c)^(~b&d), $e+=X[]+K
+ return &bodyx_20_39() if ($rx==19); $rx++;
+ (
+ '($a,$b,$c,$d,$e)=@V;'.
+
+ '&rorx ($b,$b,2) if ($j==0);'. # $b>>>2
+ '&rorx ($b,@T[1],7) if ($j!=0);', # $b>>>2
+ '&lea ($e,&DWP(0,$e,@T[0]));',
+ '&rorx (@T[0],$a,5);',
+
+ '&andn (@T[1],$a,$c);',
+ '&and ($a,$b)',
+ '&add ($d,&DWP(4*(($j+1)&15),"esp"));', # X[]+K xfer
+
+ '&xor (@T[1],$a)',
+ '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ );
+}
+
+sub bodyx_20_39 () { # b^d^c
+ # on start $b=b^c^d
+ return &bodyx_40_59() if ($rx==39); $rx++;
+ (
+ '($a,$b,$c,$d,$e)=@V;'.
+
+ '&add ($e,($j==19?@T[0]:$b))',
+ '&rorx ($b,@T[1],7);', # $b>>>2
+ '&rorx (@T[0],$a,5);',
+
+ '&xor ($a,$b) if ($j<79);',
+ '&add ($d,&DWP(4*(($j+1)&15),"esp")) if ($j<79);', # X[]+K xfer
+ '&xor ($a,$c) if ($j<79);',
+ '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ );
+}
+
+sub bodyx_40_59 () { # ((b^c)&(c^d))^c
+ # on start $b=((b^c)&(c^d))^c
+ return &bodyx_20_39() if ($rx==59); $rx++;
+ (
+ '($a,$b,$c,$d,$e)=@V;'.
+
+ '&rorx (@T[0],$a,5)',
+ '&lea ($e,&DWP(0,$e,$b))',
+ '&rorx ($b,@T[1],7)', # $b>>>2
+ '&add ($d,&DWP(4*(($j+1)&15),"esp"))', # X[]+K xfer
+
+ '&mov (@T[1],$c)',
+ '&xor ($a,$b)', # b^c for next round
+ '&xor (@T[1],$b)', # c^d for next round
+
+ '&and ($a,@T[1])',
+ '&add ($e,@T[0])',
+ '&xor ($a,$b)' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
);
}
@@ -825,10 +1058,14 @@ sub body_40_59 () {
&mov (&DWP(4,@T[1]),@T[0]);
&add ($E,&DWP(16,@T[1]));
&mov (&DWP(8,@T[1]),$C);
- &mov ($B,@T[0]);
+ &mov ($B,$C);
&mov (&DWP(12,@T[1]),$D);
+ &xor ($B,$D);
&mov (&DWP(16,@T[1]),$E);
- &movdqa (@X[0],@X[-3&7]);
+ &mov (@T[1],@T[0]);
+ &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
+ &and (@T[0],$B);
+ &mov ($B,$T[1]);
&jmp (&label("loop"));
@@ -853,6 +1090,8 @@ sub body_40_59 () {
&function_end("_sha1_block_data_order_ssse3");
+$rx=0; # reset
+
if ($ymm) {
my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
@@ -940,8 +1179,11 @@ my $_ror=sub { &shrd(@_[0],@_) };
&vpaddd (@X[1],@X[-3&7],@X[3]);
&vpaddd (@X[2],@X[-2&7],@X[3]);
&vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU
+ &mov (@T[1],$C);
&vmovdqa(&QWP(0+16,"esp"),@X[1]);
+ &xor (@T[1],$D);
&vmovdqa(&QWP(0+32,"esp"),@X[2]);
+ &and (@T[0],@T[1]);
&jmp (&label("loop"));
sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
@@ -1025,7 +1267,7 @@ sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
sub Xupdate_avx_32_79()
{ use integer;
my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
+ my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
my ($a,$b,$c,$d,$e);
&vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
@@ -1188,10 +1430,14 @@ sub Xtail_avx()
&add ($D,&DWP(12,@T[1]));
&mov (&DWP(4,@T[1]),@T[0]);
&add ($E,&DWP(16,@T[1]));
+ &mov ($B,$C);
&mov (&DWP(8,@T[1]),$C);
- &mov ($B,@T[0]);
+ &xor ($B,$D);
&mov (&DWP(12,@T[1]),$D);
&mov (&DWP(16,@T[1]),$E);
+ &mov (@T[1],@T[0]);
+ &and (@T[0],$B);
+ &mov ($B,@T[1]);
&jmp (&label("loop"));
@@ -1223,6 +1469,7 @@ sub Xtail_avx()
&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59
&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79
&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask
+&data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0);
}
&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
diff --git a/openssl/crypto/sha/asm/sha1-armv4-large.pl b/openssl/crypto/sha/asm/sha1-armv4-large.pl
index 33da3e0e3..b2c30322c 100644
--- a/openssl/crypto/sha/asm/sha1-armv4-large.pl
+++ b/openssl/crypto/sha/asm/sha1-armv4-large.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -52,6 +52,20 @@
# Profiler-assisted and platform-specific optimization resulted in 10%
# improvement on Cortex A8 core and 12.2 cycles per byte.
+# September 2013.
+#
+# Add NEON implementation (see sha1-586.pl for background info). On
+# Cortex A8 it was measured to process one byte in 6.7 cycles or >80%
+# faster than integer-only code. Because [fully unrolled] NEON code
+# is ~2.5x larger and there are some redundant instructions executed
+# when processing last block, improvement is not as big for smallest
+# blocks, only ~30%. Snapdragon S4 is a tad faster, 6.4 cycles per
+# byte, which is also >80% faster than integer-only code.
+
+# May 2014.
+#
+# Add ARMv8 code path performing at 2.35 cpb on Apple A7.
+
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
@@ -153,12 +167,22 @@ $code=<<___;
#include "arm_arch.h"
.text
+.code 32
.global sha1_block_data_order
.type sha1_block_data_order,%function
-.align 2
+.align 5
sha1_block_data_order:
+#if __ARM_MAX_ARCH__>=7
+ sub r3,pc,#8 @ sha1_block_data_order
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#ARMV8_SHA1
+ bne .LARMv8
+ tst r12,#ARMV7_NEON
+ bne .LNEON
+#endif
stmdb sp!,{r4-r12,lr}
add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp
ldmia $ctx,{$a,$b,$c,$d,$e}
@@ -233,16 +257,427 @@ $code.=<<___;
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
#endif
-.align 2
+.size sha1_block_data_order,.-sha1_block_data_order
+
+.align 5
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6
-.size sha1_block_data_order,.-sha1_block_data_order
-.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
-.align 2
+#if __ARM_MAX_ARCH__>=7
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha1_block_data_order
+#endif
+.asciz "SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 5
+___
+#####################################################################
+# NEON stuff
+#
+{{{
+my @V=($a,$b,$c,$d,$e);
+my ($K_XX_XX,$Ki,$t0,$t1,$Xfer,$saved_sp)=map("r$_",(8..12,14));
+my $Xi=4;
+my @X=map("q$_",(8..11,0..3));
+my @Tx=("q12","q13");
+my ($K,$zero)=("q14","q15");
+my $j=0;
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+ my $arg = pop;
+ $arg = "#$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub body_00_19 () {
+ (
+ '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'.
+ '&bic ($t0,$d,$b)',
+ '&add ($e,$e,$Ki)', # e+=X[i]+K
+ '&and ($t1,$c,$b)',
+ '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
+ '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27)
+ '&eor ($t1,$t1,$t0)', # F_00_19
+ '&mov ($b,$b,"ror#2")', # b=ROR(b,2)
+ '&add ($e,$e,$t1);'. # e+=F_00_19
+ '$j++; unshift(@V,pop(@V));'
+ )
+}
+sub body_20_39 () {
+ (
+ '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'.
+ '&eor ($t0,$b,$d)',
+ '&add ($e,$e,$Ki)', # e+=X[i]+K
+ '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15)) if ($j<79)',
+ '&eor ($t1,$t0,$c)', # F_20_39
+ '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27)
+ '&mov ($b,$b,"ror#2")', # b=ROR(b,2)
+ '&add ($e,$e,$t1);'. # e+=F_20_39
+ '$j++; unshift(@V,pop(@V));'
+ )
+}
+sub body_40_59 () {
+ (
+ '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'.
+ '&add ($e,$e,$Ki)', # e+=X[i]+K
+ '&and ($t0,$c,$d)',
+ '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
+ '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27)
+ '&eor ($t1,$c,$d)',
+ '&add ($e,$e,$t0)',
+ '&and ($t1,$t1,$b)',
+ '&mov ($b,$b,"ror#2")', # b=ROR(b,2)
+ '&add ($e,$e,$t1);'. # e+=F_40_59
+ '$j++; unshift(@V,pop(@V));'
+ )
+}
+
+sub Xupdate_16_31 ()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e);
+
+ &vext_8 (@X[0],@X[-4&7],@X[-3&7],8); # compose "X[-14]" in "X[0]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@Tx[1],@X[-1&7],$K);
+ eval(shift(@insns));
+ &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!") if ($Xi%5==0);
+ eval(shift(@insns));
+ &vext_8 (@Tx[0],@X[-1&7],$zero,4); # "X[-3]", 3 words
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@Tx[0],@Tx[0],@X[0]); # "X[0]"^="X[-3]"^"X[-8]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!"); # X[]+K xfer
+ &sub ($Xfer,$Xfer,64) if ($Xi%4==0);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vext_8 (@Tx[1],$zero,@Tx[0],4); # "X[0]"<<96, extract one dword
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[0],@Tx[0],@Tx[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsri_32 (@X[0],@Tx[0],31); # "X[0]"<<<=1
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 (@Tx[0],@Tx[1],30);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshl_u32 (@Tx[1],@Tx[1],2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@Tx[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
+
+ foreach (@insns) { eval; } # remaining instructions [if any]
+
+ $Xi++; push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xupdate_32_79 ()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e);
+
+ &vext_8 (@Tx[0],@X[-2&7],@X[-1&7],8); # compose "X[-6]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@Tx[1],@X[-1&7],$K);
+ eval(shift(@insns));
+ &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!") if ($Xi%5==0);
+ eval(shift(@insns));
+ &veor (@Tx[0],@Tx[0],@X[0]); # "X[-6]"^="X[0]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 (@X[0],@Tx[0],30);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!"); # X[]+K xfer
+ &sub ($Xfer,$Xfer,64) if ($Xi%4==0);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 (@X[0],@Tx[0],2); # "X[0]"="X[-6]"<<<2
+
+ foreach (@insns) { eval; } # remaining instructions [if any]
+
+ $Xi++; push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xuplast_80 ()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e);
+
+ &vadd_i32 (@Tx[1],@X[-1&7],$K);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!");
+ &sub ($Xfer,$Xfer,64);
+
+ &teq ($inp,$len);
+ &sub ($K_XX_XX,$K_XX_XX,16); # rewind $K_XX_XX
+ &subeq ($inp,$inp,64); # reload last block to avoid SEGV
+ &vld1_8 ("{@X[-4&7]-@X[-3&7]}","[$inp]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_8 ("{@X[-2&7]-@X[-1&7]}","[$inp]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!"); # load K_00_19
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vrev32_8 (@X[-4&7],@X[-4&7]);
+
+ foreach (@insns) { eval; } # remaining instructions
+
+ $Xi=0;
+}
+
+sub Xloop()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e);
+
+ &vrev32_8 (@X[($Xi-3)&7],@X[($Xi-3)&7]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[$Xi&7],@X[($Xi-4)&7],$K);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vst1_32 ("{@X[$Xi&7]}","[$Xfer,:128]!");# X[]+K xfer to IALU
+
+ foreach (@insns) { eval; }
+
+ $Xi++;
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
+.type sha1_block_data_order_neon,%function
+.align 4
+sha1_block_data_order_neon:
+.LNEON:
+ stmdb sp!,{r4-r12,lr}
+ add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp
+ @ dmb @ errata #451034 on early Cortex A8
+ @ vstmdb sp!,{d8-d15} @ ABI specification says so
+ mov $saved_sp,sp
+ sub sp,sp,#64 @ alloca
+ adr $K_XX_XX,.LK_00_19
+ bic sp,sp,#15 @ align for 128-bit stores
+
+ ldmia $ctx,{$a,$b,$c,$d,$e} @ load context
+ mov $Xfer,sp
+
+ vld1.8 {@X[-4&7]-@X[-3&7]},[$inp]! @ handles unaligned
+ veor $zero,$zero,$zero
+ vld1.8 {@X[-2&7]-@X[-1&7]},[$inp]!
+ vld1.32 {${K}\[]},[$K_XX_XX,:32]! @ load K_00_19
+ vrev32.8 @X[-4&7],@X[-4&7] @ yes, even on
+ vrev32.8 @X[-3&7],@X[-3&7] @ big-endian...
+ vrev32.8 @X[-2&7],@X[-2&7]
+ vadd.i32 @X[0],@X[-4&7],$K
+ vrev32.8 @X[-1&7],@X[-1&7]
+ vadd.i32 @X[1],@X[-3&7],$K
+ vst1.32 {@X[0]},[$Xfer,:128]!
+ vadd.i32 @X[2],@X[-2&7],$K
+ vst1.32 {@X[1]},[$Xfer,:128]!
+ vst1.32 {@X[2]},[$Xfer,:128]!
+ ldr $Ki,[sp] @ big RAW stall
+
+.Loop_neon:
+___
+ &Xupdate_16_31(\&body_00_19);
+ &Xupdate_16_31(\&body_00_19);
+ &Xupdate_16_31(\&body_00_19);
+ &Xupdate_16_31(\&body_00_19);
+ &Xupdate_32_79(\&body_00_19);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_20_39);
+ &Xuplast_80(\&body_20_39);
+ &Xloop(\&body_20_39);
+ &Xloop(\&body_20_39);
+ &Xloop(\&body_20_39);
+$code.=<<___;
+ ldmia $ctx,{$Ki,$t0,$t1,$Xfer} @ accumulate context
+ add $a,$a,$Ki
+ ldr $Ki,[$ctx,#16]
+ add $b,$b,$t0
+ add $c,$c,$t1
+ add $d,$d,$Xfer
+ moveq sp,$saved_sp
+ add $e,$e,$Ki
+ ldrne $Ki,[sp]
+ stmia $ctx,{$a,$b,$c,$d,$e}
+ addne $Xfer,sp,#3*16
+ bne .Loop_neon
+
+ @ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r12,pc}
+.size sha1_block_data_order_neon,.-sha1_block_data_order_neon
+#endif
+___
+}}}
+#####################################################################
+# ARMv8 stuff
+#
+{{{
+my ($ABCD,$E,$E0,$E1)=map("q$_",(0..3));
+my @MSG=map("q$_",(4..7));
+my @Kxx=map("q$_",(8..11));
+my ($W0,$W1,$ABCD_SAVE)=map("q$_",(12..14));
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.type sha1_block_data_order_armv8,%function
+.align 5
+sha1_block_data_order_armv8:
+.LARMv8:
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ veor $E,$E,$E
+ adr r3,.LK_00_19
+ vld1.32 {$ABCD},[$ctx]!
+ vld1.32 {$E\[0]},[$ctx]
+ sub $ctx,$ctx,#16
+ vld1.32 {@Kxx[0]\[]},[r3,:32]!
+ vld1.32 {@Kxx[1]\[]},[r3,:32]!
+ vld1.32 {@Kxx[2]\[]},[r3,:32]!
+ vld1.32 {@Kxx[3]\[]},[r3,:32]
+
+.Loop_v8:
+ vld1.8 {@MSG[0]-@MSG[1]},[$inp]!
+ vld1.8 {@MSG[2]-@MSG[3]},[$inp]!
+ vrev32.8 @MSG[0],@MSG[0]
+ vrev32.8 @MSG[1],@MSG[1]
+
+ vadd.i32 $W0,@Kxx[0],@MSG[0]
+ vrev32.8 @MSG[2],@MSG[2]
+ vmov $ABCD_SAVE,$ABCD @ offload
+ subs $len,$len,#1
+
+ vadd.i32 $W1,@Kxx[0],@MSG[1]
+ vrev32.8 @MSG[3],@MSG[3]
+ sha1h $E1,$ABCD @ 0
+ sha1c $ABCD,$E,$W0
+ vadd.i32 $W0,@Kxx[$j],@MSG[2]
+ sha1su0 @MSG[0],@MSG[1],@MSG[2]
+___
+for ($j=0,$i=1;$i<20-3;$i++) {
+my $f=("c","p","m","p")[$i/5];
+$code.=<<___;
+ sha1h $E0,$ABCD @ $i
+ sha1$f $ABCD,$E1,$W1
+ vadd.i32 $W1,@Kxx[$j],@MSG[3]
+ sha1su1 @MSG[0],@MSG[3]
+___
+$code.=<<___ if ($i<20-4);
+ sha1su0 @MSG[1],@MSG[2],@MSG[3]
___
+ ($E0,$E1)=($E1,$E0); ($W0,$W1)=($W1,$W0);
+ push(@MSG,shift(@MSG)); $j++ if ((($i+3)%5)==0);
+}
+$code.=<<___;
+ sha1h $E0,$ABCD @ $i
+ sha1p $ABCD,$E1,$W1
+ vadd.i32 $W1,@Kxx[$j],@MSG[3]
+
+ sha1h $E1,$ABCD @ 18
+ sha1p $ABCD,$E0,$W0
+
+ sha1h $E0,$ABCD @ 19
+ sha1p $ABCD,$E1,$W1
+
+ vadd.i32 $E,$E,$E0
+ vadd.i32 $ABCD,$ABCD,$ABCD_SAVE
+ bne .Loop_v8
+
+ vst1.32 {$ABCD},[$ctx]!
+ vst1.32 {$E\[0]},[$ctx]
+
+ vldmia sp!,{d8-d15}
+ ret @ bx lr
+.size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8
+#endif
+___
+}}}
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.comm OPENSSL_armcap_P,4,4
+#endif
+___
+
+{ my %opcode = (
+ "sha1c" => 0xf2000c40, "sha1p" => 0xf2100c40,
+ "sha1m" => 0xf2200c40, "sha1su0" => 0xf2300c40,
+ "sha1h" => 0xf3b902c0, "sha1su1" => 0xf3ba0380 );
+
+ sub unsha1 {
+ my ($mnemonic,$arg)=@_;
+
+ if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
+ my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+ |(($2&7)<<17)|(($2&8)<<4)
+ |(($3&7)<<1) |(($3&8)<<2);
+ # since ARMv7 instructions are always encoded little-endian.
+ # correct solution is to use .inst directive, but older
+ # assemblers don't implement it:-(
+ sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
+ $word&0xff,($word>>8)&0xff,
+ ($word>>16)&0xff,($word>>24)&0xff,
+ $mnemonic,$arg;
+ }
+ }
+}
+
+foreach (split($/,$code)) {
+ s/{q([0-9]+)\[\]}/sprintf "{d%d[],d%d[]}",2*$1,2*$1+1/eo or
+ s/{q([0-9]+)\[0\]}/sprintf "{d%d[0]}",2*$1/eo;
+
+ s/\b(sha1\w+)\s+(q.*)/unsha1($1,$2)/geo;
+
+ s/\bret\b/bx lr/o or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/o; # make it possible to compile with -march=armv4
+
+ print $_,$/;
+}
-$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
-print $code;
close STDOUT; # enforce flush
diff --git a/openssl/crypto/sha/asm/sha1-armv8.pl b/openssl/crypto/sha/asm/sha1-armv8.pl
new file mode 100755
index 000000000..deb1238d3
--- /dev/null
+++ b/openssl/crypto/sha/asm/sha1-armv8.pl
@@ -0,0 +1,334 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA1 for ARMv8.
+#
+# Performance in cycles per processed byte and improvement coefficient
+# over code generated with "default" compiler:
+#
+# hardware-assisted software(*)
+# Apple A7 2.31 4.13 (+14%)
+# Cortex-A53 2.19 8.73 (+108%)
+# Cortex-A57 2.35 7.88 (+74%)
+#
+# (*) Software results are presented mostly for reference purposes.
+
+$flavour = shift;
+open STDOUT,">".shift;
+
+($ctx,$inp,$num)=("x0","x1","x2");
+@Xw=map("w$_",(3..17,19));
+@Xx=map("x$_",(3..17,19));
+@V=($A,$B,$C,$D,$E)=map("w$_",(20..24));
+($t0,$t1,$t2,$K)=map("w$_",(25..28));
+
+
+sub BODY_00_19 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=($i+2)&15;
+
+$code.=<<___ if ($i<15 && !($i&1));
+ lsr @Xx[$i+1],@Xx[$i],#32
+___
+$code.=<<___ if ($i<14 && !($i&1));
+ ldr @Xx[$i+2],[$inp,#`($i+2)*4-64`]
+___
+$code.=<<___ if ($i<14 && ($i&1));
+#ifdef __ARMEB__
+ ror @Xx[$i+1],@Xx[$i+1],#32
+#else
+ rev32 @Xx[$i+1],@Xx[$i+1]
+#endif
+___
+$code.=<<___ if ($i<14);
+ bic $t0,$d,$b
+ and $t1,$c,$b
+ ror $t2,$a,#27
+ add $d,$d,$K // future e+=K
+ orr $t0,$t0,$t1
+ add $e,$e,$t2 // e+=rot(a,5)
+ ror $b,$b,#2
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+___
+$code.=<<___ if ($i==19);
+ movz $K,#0xeba1
+ movk $K,#0x6ed9,lsl#16
+___
+$code.=<<___ if ($i>=14);
+ eor @Xw[$j],@Xw[$j],@Xw[($j+2)&15]
+ bic $t0,$d,$b
+ and $t1,$c,$b
+ ror $t2,$a,#27
+ eor @Xw[$j],@Xw[$j],@Xw[($j+8)&15]
+ add $d,$d,$K // future e+=K
+ orr $t0,$t0,$t1
+ add $e,$e,$t2 // e+=rot(a,5)
+ eor @Xw[$j],@Xw[$j],@Xw[($j+13)&15]
+ ror $b,$b,#2
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+ ror @Xw[$j],@Xw[$j],#31
+___
+}
+
+sub BODY_40_59 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=($i+2)&15;
+
+$code.=<<___ if ($i==59);
+ movz $K,#0xc1d6
+ movk $K,#0xca62,lsl#16
+___
+$code.=<<___;
+ orr $t0,$b,$c
+ and $t1,$b,$c
+ eor @Xw[$j],@Xw[$j],@Xw[($j+2)&15]
+ ror $t2,$a,#27
+ and $t0,$t0,$d
+ add $d,$d,$K // future e+=K
+ eor @Xw[$j],@Xw[$j],@Xw[($j+8)&15]
+ add $e,$e,$t2 // e+=rot(a,5)
+ orr $t0,$t0,$t1
+ ror $b,$b,#2
+ eor @Xw[$j],@Xw[$j],@Xw[($j+13)&15]
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+ ror @Xw[$j],@Xw[$j],#31
+___
+}
+
+sub BODY_20_39 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=($i+2)&15;
+
+$code.=<<___ if ($i==39);
+ movz $K,#0xbcdc
+ movk $K,#0x8f1b,lsl#16
+___
+$code.=<<___ if ($i<78);
+ eor @Xw[$j],@Xw[$j],@Xw[($j+2)&15]
+ eor $t0,$d,$b
+ ror $t2,$a,#27
+ add $d,$d,$K // future e+=K
+ eor @Xw[$j],@Xw[$j],@Xw[($j+8)&15]
+ eor $t0,$t0,$c
+ add $e,$e,$t2 // e+=rot(a,5)
+ ror $b,$b,#2
+ eor @Xw[$j],@Xw[$j],@Xw[($j+13)&15]
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+ ror @Xw[$j],@Xw[$j],#31
+___
+$code.=<<___ if ($i==78);
+ ldp @Xw[1],@Xw[2],[$ctx]
+ eor $t0,$d,$b
+ ror $t2,$a,#27
+ add $d,$d,$K // future e+=K
+ eor $t0,$t0,$c
+ add $e,$e,$t2 // e+=rot(a,5)
+ ror $b,$b,#2
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+___
+$code.=<<___ if ($i==79);
+ ldp @Xw[3],@Xw[4],[$ctx,#8]
+ eor $t0,$d,$b
+ ror $t2,$a,#27
+ eor $t0,$t0,$c
+ add $e,$e,$t2 // e+=rot(a,5)
+ ror $b,$b,#2
+ ldr @Xw[5],[$ctx,#16]
+ add $e,$e,$t0 // e+=F(b,c,d)
+___
+}
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+
+.globl sha1_block_data_order
+.type sha1_block_data_order,%function
+.align 6
+sha1_block_data_order:
+ ldr x16,.LOPENSSL_armcap_P
+ adr x17,.LOPENSSL_armcap_P
+ add x16,x16,x17
+ ldr w16,[x16]
+ tst w16,#ARMV8_SHA1
+ b.ne .Lv8_entry
+
+ stp x29,x30,[sp,#-96]!
+ add x29,sp,#0
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+
+ ldp $A,$B,[$ctx]
+ ldp $C,$D,[$ctx,#8]
+ ldr $E,[$ctx,#16]
+
+.Loop:
+ ldr @Xx[0],[$inp],#64
+ movz $K,#0x7999
+ sub $num,$num,#1
+ movk $K,#0x5a82,lsl#16
+#ifdef __ARMEB__
+ ror $Xx[0],@Xx[0],#32
+#else
+ rev32 @Xx[0],@Xx[0]
+#endif
+ add $E,$E,$K // warm it up
+ add $E,$E,@Xw[0]
+___
+for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
+for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
+for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ add $B,$B,@Xw[2]
+ add $C,$C,@Xw[3]
+ add $A,$A,@Xw[1]
+ add $D,$D,@Xw[4]
+ add $E,$E,@Xw[5]
+ stp $A,$B,[$ctx]
+ stp $C,$D,[$ctx,#8]
+ str $E,[$ctx,#16]
+ cbnz $num,.Loop
+
+ ldp x19,x20,[sp,#16]
+ ldp x21,x22,[sp,#32]
+ ldp x23,x24,[sp,#48]
+ ldp x25,x26,[sp,#64]
+ ldp x27,x28,[sp,#80]
+ ldr x29,[sp],#96
+ ret
+.size sha1_block_data_order,.-sha1_block_data_order
+___
+{{{
+my ($ABCD,$E,$E0,$E1)=map("v$_.16b",(0..3));
+my @MSG=map("v$_.16b",(4..7));
+my @Kxx=map("v$_.4s",(16..19));
+my ($W0,$W1)=("v20.4s","v21.4s");
+my $ABCD_SAVE="v22.16b";
+
+$code.=<<___;
+.type sha1_block_armv8,%function
+.align 6
+sha1_block_armv8:
+.Lv8_entry:
+ stp x29,x30,[sp,#-16]!
+ add x29,sp,#0
+
+ adr x4,.Lconst
+ eor $E,$E,$E
+ ld1.32 {$ABCD},[$ctx],#16
+ ld1.32 {$E}[0],[$ctx]
+ sub $ctx,$ctx,#16
+ ld1.32 {@Kxx[0]-@Kxx[3]},[x4]
+
+.Loop_hw:
+ ld1 {@MSG[0]-@MSG[3]},[$inp],#64
+ sub $num,$num,#1
+ rev32 @MSG[0],@MSG[0]
+ rev32 @MSG[1],@MSG[1]
+
+ add.i32 $W0,@Kxx[0],@MSG[0]
+ rev32 @MSG[2],@MSG[2]
+ orr $ABCD_SAVE,$ABCD,$ABCD // offload
+
+ add.i32 $W1,@Kxx[0],@MSG[1]
+ rev32 @MSG[3],@MSG[3]
+ sha1h $E1,$ABCD
+ sha1c $ABCD,$E,$W0 // 0
+ add.i32 $W0,@Kxx[$j],@MSG[2]
+ sha1su0 @MSG[0],@MSG[1],@MSG[2]
+___
+for ($j=0,$i=1;$i<20-3;$i++) {
+my $f=("c","p","m","p")[$i/5];
+$code.=<<___;
+ sha1h $E0,$ABCD // $i
+ sha1$f $ABCD,$E1,$W1
+ add.i32 $W1,@Kxx[$j],@MSG[3]
+ sha1su1 @MSG[0],@MSG[3]
+___
+$code.=<<___ if ($i<20-4);
+ sha1su0 @MSG[1],@MSG[2],@MSG[3]
+___
+ ($E0,$E1)=($E1,$E0); ($W0,$W1)=($W1,$W0);
+ push(@MSG,shift(@MSG)); $j++ if ((($i+3)%5)==0);
+}
+$code.=<<___;
+ sha1h $E0,$ABCD // $i
+ sha1p $ABCD,$E1,$W1
+ add.i32 $W1,@Kxx[$j],@MSG[3]
+
+ sha1h $E1,$ABCD // 18
+ sha1p $ABCD,$E0,$W0
+
+ sha1h $E0,$ABCD // 19
+ sha1p $ABCD,$E1,$W1
+
+ add.i32 $E,$E,$E0
+ add.i32 $ABCD,$ABCD,$ABCD_SAVE
+
+ cbnz $num,.Loop_hw
+
+ st1.32 {$ABCD},[$ctx],#16
+ st1.32 {$E}[0],[$ctx]
+
+ ldr x29,[sp],#16
+ ret
+.size sha1_block_armv8,.-sha1_block_armv8
+.align 6
+.Lconst:
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79
+.LOPENSSL_armcap_P:
+.quad OPENSSL_armcap_P-.
+.asciz "SHA1 block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+.comm OPENSSL_armcap_P,4,4
+___
+}}}
+
+{ my %opcode = (
+ "sha1c" => 0x5e000000, "sha1p" => 0x5e001000,
+ "sha1m" => 0x5e002000, "sha1su0" => 0x5e003000,
+ "sha1h" => 0x5e280800, "sha1su1" => 0x5e281800 );
+
+ sub unsha1 {
+ my ($mnemonic,$arg)=@_;
+
+ $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
+ &&
+ sprintf ".inst\t0x%08x\t//%s %s",
+ $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
+ $mnemonic,$arg;
+ }
+}
+
+foreach(split("\n",$code)) {
+
+ s/\`([^\`]*)\`/eval($1)/geo;
+
+ s/\b(sha1\w+)\s+([qv].*)/unsha1($1,$2)/geo;
+
+ s/\.\w?32\b//o and s/\.16b/\.4s/go;
+ m/(ld|st)1[^\[]+\[0\]/o and s/\.4s/\.s/go;
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/openssl/crypto/sha/asm/sha1-mb-x86_64.pl b/openssl/crypto/sha/asm/sha1-mb-x86_64.pl
new file mode 100755
index 000000000..a8ee075ea
--- /dev/null
+++ b/openssl/crypto/sha/asm/sha1-mb-x86_64.pl
@@ -0,0 +1,1574 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# Multi-buffer SHA1 procedure processes n buffers in parallel by
+# placing buffer data to designated lane of SIMD register. n is
+# naturally limited to 4 on pre-AVX2 processors and to 8 on
+# AVX2-capable processors such as Haswell.
+#
+# this +aesni(i) sha1 aesni-sha1 gain(iv)
+# -------------------------------------------------------------------
+# Westmere(ii) 10.7/n +1.28=3.96(n=4) 5.30 6.66 +68%
+# Atom(ii) 18.1/n +3.93=8.46(n=4) 9.37 12.8 +51%
+# Sandy Bridge (8.16 +5.15=13.3)/n 4.99 5.98 +80%
+# Ivy Bridge (8.08 +5.14=13.2)/n 4.60 5.54 +68%
+# Haswell(iii) (8.96 +5.00=14.0)/n 3.57 4.55 +160%
+# Bulldozer (9.76 +5.76=15.5)/n 5.95 6.37 +64%
+#
+# (i) multi-block CBC encrypt with 128-bit key;
+# (ii) (HASH+AES)/n does not apply to Westmere for n>3 and Atom,
+# because of lower AES-NI instruction throughput;
+# (iii) "this" is for n=8, when we gather twice as much data, result
+# for n=4 is 8.00+4.44=12.4;
+# (iv) presented improvement coefficients are asymptotic limits and
+# in real-life application are somewhat lower, e.g. for 2KB
+# fragments they range from 30% to 100% (on Haswell);
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+$avx=0;
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $avx = ($1>=10) + ($1>=11);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+ $avx = ($2>=3.0) + ($2>3.0);
+}
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+# void sha1_multi_block (
+# struct { unsigned int A[8];
+# unsigned int B[8];
+# unsigned int C[8];
+# unsigned int D[8];
+# unsigned int E[8]; } *ctx,
+# struct { void *ptr; int blocks; } inp[8],
+# int num); /* 1 or 2 */
+#
+$ctx="%rdi"; # 1st arg
+$inp="%rsi"; # 2nd arg
+$num="%edx";
+@ptr=map("%r$_",(8..11));
+$Tbl="%rbp";
+
+@V=($A,$B,$C,$D,$E)=map("%xmm$_",(0..4));
+($t0,$t1,$t2,$t3,$tx)=map("%xmm$_",(5..9));
+@Xi=map("%xmm$_",(10..14));
+$K="%xmm15";
+
+if (1) {
+ # Atom-specific optimization aiming to eliminate pshufb with high
+ # registers [and thus get rid of 48 cycles accumulated penalty]
+ @Xi=map("%xmm$_",(0..4));
+ ($tx,$t0,$t1,$t2,$t3)=map("%xmm$_",(5..9));
+ @V=($A,$B,$C,$D,$E)=map("%xmm$_",(10..14));
+}
+
+$REG_SZ=16;
+
+sub Xi_off {
+my $off = shift;
+
+ $off %= 16; $off *= $REG_SZ;
+ $off<256 ? "$off-128(%rax)" : "$off-256-128(%rbx)";
+}
+
+sub BODY_00_19 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+my $k=$i+2;
+
+# Loads are performed 2+3/4 iterations in advance. 3/4 means that out
+# of 4 words you would expect to be loaded per given iteration one is
+# spilled to next iteration. In other words indices in four input
+# streams are distributed as following:
+#
+# $i==0: 0,0,0,0,1,1,1,1,2,2,2,
+# $i==1: 2,3,3,3,
+# $i==2: 3,4,4,4,
+# ...
+# $i==13: 14,15,15,15,
+# $i==14: 15
+#
+# Then at $i==15 Xupdate is applied one iteration in advance...
+$code.=<<___ if ($i==0);
+ movd (@ptr[0]),@Xi[0]
+ lea `16*4`(@ptr[0]),@ptr[0]
+ movd (@ptr[1]),@Xi[2] # borrow @Xi[2]
+ lea `16*4`(@ptr[1]),@ptr[1]
+ movd (@ptr[2]),@Xi[3] # borrow @Xi[3]
+ lea `16*4`(@ptr[2]),@ptr[2]
+ movd (@ptr[3]),@Xi[4] # borrow @Xi[4]
+ lea `16*4`(@ptr[3]),@ptr[3]
+ punpckldq @Xi[3],@Xi[0]
+ movd `4*$j-16*4`(@ptr[0]),@Xi[1]
+ punpckldq @Xi[4],@Xi[2]
+ movd `4*$j-16*4`(@ptr[1]),$t3
+ punpckldq @Xi[2],@Xi[0]
+ movd `4*$j-16*4`(@ptr[2]),$t2
+ pshufb $tx,@Xi[0]
+___
+$code.=<<___ if ($i<14); # just load input
+ movd `4*$j-16*4`(@ptr[3]),$t1
+ punpckldq $t2,@Xi[1]
+ movdqa $a,$t2
+ paddd $K,$e # e+=K_00_19
+ punpckldq $t1,$t3
+ movdqa $b,$t1
+ movdqa $b,$t0
+ pslld \$5,$t2
+ pandn $d,$t1
+ pand $c,$t0
+ punpckldq $t3,@Xi[1]
+ movdqa $a,$t3
+
+ movdqa @Xi[0],`&Xi_off($i)`
+ paddd @Xi[0],$e # e+=X[i]
+ movd `4*$k-16*4`(@ptr[0]),@Xi[2]
+ psrld \$27,$t3
+ pxor $t1,$t0 # Ch(b,c,d)
+ movdqa $b,$t1
+
+ por $t3,$t2 # rol(a,5)
+ movd `4*$k-16*4`(@ptr[1]),$t3
+ pslld \$30,$t1
+ paddd $t0,$e # e+=Ch(b,c,d)
+
+ psrld \$2,$b
+ paddd $t2,$e # e+=rol(a,5)
+ pshufb $tx,@Xi[1]
+ movd `4*$k-16*4`(@ptr[2]),$t2
+ por $t1,$b # b=rol(b,30)
+___
+$code.=<<___ if ($i==14); # just load input
+ movd `4*$j-16*4`(@ptr[3]),$t1
+ punpckldq $t2,@Xi[1]
+ movdqa $a,$t2
+ paddd $K,$e # e+=K_00_19
+ punpckldq $t1,$t3
+ movdqa $b,$t1
+ movdqa $b,$t0
+ pslld \$5,$t2
+ prefetcht0 63(@ptr[0])
+ pandn $d,$t1
+ pand $c,$t0
+ punpckldq $t3,@Xi[1]
+ movdqa $a,$t3
+
+ movdqa @Xi[0],`&Xi_off($i)`
+ paddd @Xi[0],$e # e+=X[i]
+ psrld \$27,$t3
+ pxor $t1,$t0 # Ch(b,c,d)
+ movdqa $b,$t1
+ prefetcht0 63(@ptr[1])
+
+ por $t3,$t2 # rol(a,5)
+ pslld \$30,$t1
+ paddd $t0,$e # e+=Ch(b,c,d)
+ prefetcht0 63(@ptr[2])
+
+ psrld \$2,$b
+ paddd $t2,$e # e+=rol(a,5)
+ pshufb $tx,@Xi[1]
+ prefetcht0 63(@ptr[3])
+ por $t1,$b # b=rol(b,30)
+___
+$code.=<<___ if ($i>=13 && $i<15);
+ movdqa `&Xi_off($j+2)`,@Xi[3] # preload "X[2]"
+___
+$code.=<<___ if ($i>=15); # apply Xupdate
+ pxor @Xi[-2],@Xi[1] # "X[13]"
+ movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
+
+ movdqa $a,$t2
+ pxor `&Xi_off($j+8)`,@Xi[1]
+ paddd $K,$e # e+=K_00_19
+ movdqa $b,$t1
+ pslld \$5,$t2
+ pxor @Xi[3],@Xi[1]
+ movdqa $b,$t0
+ pandn $d,$t1
+ movdqa @Xi[1],$tx
+ pand $c,$t0
+ movdqa $a,$t3
+ psrld \$31,$tx
+ paddd @Xi[1],@Xi[1]
+
+ movdqa @Xi[0],`&Xi_off($i)`
+ paddd @Xi[0],$e # e+=X[i]
+ psrld \$27,$t3
+ pxor $t1,$t0 # Ch(b,c,d)
+
+ movdqa $b,$t1
+ por $t3,$t2 # rol(a,5)
+ pslld \$30,$t1
+ paddd $t0,$e # e+=Ch(b,c,d)
+
+ psrld \$2,$b
+ paddd $t2,$e # e+=rol(a,5)
+ por $tx,@Xi[1] # rol \$1,@Xi[1]
+ por $t1,$b # b=rol(b,30)
+___
+push(@Xi,shift(@Xi));
+}
+
+sub BODY_20_39 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+
+$code.=<<___ if ($i<79);
+ pxor @Xi[-2],@Xi[1] # "X[13]"
+ movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
+
+ movdqa $a,$t2
+ movdqa $d,$t0
+ pxor `&Xi_off($j+8)`,@Xi[1]
+ paddd $K,$e # e+=K_20_39
+ pslld \$5,$t2
+ pxor $b,$t0
+
+ movdqa $a,$t3
+___
+$code.=<<___ if ($i<72);
+ movdqa @Xi[0],`&Xi_off($i)`
+___
+$code.=<<___ if ($i<79);
+ paddd @Xi[0],$e # e+=X[i]
+ pxor @Xi[3],@Xi[1]
+ psrld \$27,$t3
+ pxor $c,$t0 # Parity(b,c,d)
+ movdqa $b,$t1
+
+ pslld \$30,$t1
+ movdqa @Xi[1],$tx
+ por $t3,$t2 # rol(a,5)
+ psrld \$31,$tx
+ paddd $t0,$e # e+=Parity(b,c,d)
+ paddd @Xi[1],@Xi[1]
+
+ psrld \$2,$b
+ paddd $t2,$e # e+=rol(a,5)
+ por $tx,@Xi[1] # rol(@Xi[1],1)
+ por $t1,$b # b=rol(b,30)
+___
+$code.=<<___ if ($i==79);
+ movdqa $a,$t2
+ paddd $K,$e # e+=K_20_39
+ movdqa $d,$t0
+ pslld \$5,$t2
+ pxor $b,$t0
+
+ movdqa $a,$t3
+ paddd @Xi[0],$e # e+=X[i]
+ psrld \$27,$t3
+ movdqa $b,$t1
+ pxor $c,$t0 # Parity(b,c,d)
+
+ pslld \$30,$t1
+ por $t3,$t2 # rol(a,5)
+ paddd $t0,$e # e+=Parity(b,c,d)
+
+ psrld \$2,$b
+ paddd $t2,$e # e+=rol(a,5)
+ por $t1,$b # b=rol(b,30)
+___
+push(@Xi,shift(@Xi));
+}
+
+sub BODY_40_59 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+
+$code.=<<___;
+ pxor @Xi[-2],@Xi[1] # "X[13]"
+ movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
+
+ movdqa $a,$t2
+ movdqa $d,$t1
+ pxor `&Xi_off($j+8)`,@Xi[1]
+ pxor @Xi[3],@Xi[1]
+ paddd $K,$e # e+=K_40_59
+ pslld \$5,$t2
+ movdqa $a,$t3
+ pand $c,$t1
+
+ movdqa $d,$t0
+ movdqa @Xi[1],$tx
+ psrld \$27,$t3
+ paddd $t1,$e
+ pxor $c,$t0
+
+ movdqa @Xi[0],`&Xi_off($i)`
+ paddd @Xi[0],$e # e+=X[i]
+ por $t3,$t2 # rol(a,5)
+ psrld \$31,$tx
+ pand $b,$t0
+ movdqa $b,$t1
+
+ pslld \$30,$t1
+ paddd @Xi[1],@Xi[1]
+ paddd $t0,$e # e+=Maj(b,d,c)
+
+ psrld \$2,$b
+ paddd $t2,$e # e+=rol(a,5)
+ por $tx,@Xi[1] # rol(@X[1],1)
+ por $t1,$b # b=rol(b,30)
+___
+push(@Xi,shift(@Xi));
+}
+
+$code.=<<___;
+.text
+
+.extern OPENSSL_ia32cap_P
+
+.globl sha1_multi_block
+.type sha1_multi_block,\@function,3
+.align 32
+sha1_multi_block:
+ mov OPENSSL_ia32cap_P+4(%rip),%rcx
+ bt \$61,%rcx # check SHA bit
+ jc _shaext_shortcut
+___
+$code.=<<___ if ($avx);
+ test \$`1<<28`,%ecx
+ jnz _avx_shortcut
+___
+$code.=<<___;
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+___
+$code.=<<___ if ($win64);
+ lea -0xa8(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+ movaps %xmm10,-0x78(%rax)
+ movaps %xmm11,-0x68(%rax)
+ movaps %xmm12,-0x58(%rax)
+ movaps %xmm13,-0x48(%rax)
+ movaps %xmm14,-0x38(%rax)
+ movaps %xmm15,-0x28(%rax)
+___
+$code.=<<___;
+ sub \$`$REG_SZ*18`,%rsp
+ and \$-256,%rsp
+ mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
+.Lbody:
+ lea K_XX_XX(%rip),$Tbl
+ lea `$REG_SZ*16`(%rsp),%rbx
+
+.Loop_grande:
+ mov $num,`$REG_SZ*17+8`(%rsp) # original $num
+ xor $num,$num
+___
+for($i=0;$i<4;$i++) {
+ $code.=<<___;
+ mov `16*$i+0`($inp),@ptr[$i] # input pointer
+ mov `16*$i+8`($inp),%ecx # number of blocks
+ cmp $num,%ecx
+ cmovg %ecx,$num # find maximum
+ test %ecx,%ecx
+ mov %ecx,`4*$i`(%rbx) # initialize counters
+ cmovle $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ test $num,$num
+ jz .Ldone
+
+ movdqu 0x00($ctx),$A # load context
+ lea 128(%rsp),%rax
+ movdqu 0x20($ctx),$B
+ movdqu 0x40($ctx),$C
+ movdqu 0x60($ctx),$D
+ movdqu 0x80($ctx),$E
+ movdqa 0x60($Tbl),$tx # pbswap_mask
+ movdqa -0x20($Tbl),$K # K_00_19
+ jmp .Loop
+
+.align 32
+.Loop:
+___
+for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
+$code.=" movdqa 0x00($Tbl),$K\n"; # K_20_39
+for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+$code.=" movdqa 0x20($Tbl),$K\n"; # K_40_59
+for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
+$code.=" movdqa 0x40($Tbl),$K\n"; # K_60_79
+for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ movdqa (%rbx),@Xi[0] # pull counters
+ mov \$1,%ecx
+ cmp 4*0(%rbx),%ecx # examinte counters
+ pxor $t2,$t2
+ cmovge $Tbl,@ptr[0] # cancel input
+ cmp 4*1(%rbx),%ecx
+ movdqa @Xi[0],@Xi[1]
+ cmovge $Tbl,@ptr[1]
+ cmp 4*2(%rbx),%ecx
+ pcmpgtd $t2,@Xi[1] # mask value
+ cmovge $Tbl,@ptr[2]
+ cmp 4*3(%rbx),%ecx
+ paddd @Xi[1],@Xi[0] # counters--
+ cmovge $Tbl,@ptr[3]
+
+ movdqu 0x00($ctx),$t0
+ pand @Xi[1],$A
+ movdqu 0x20($ctx),$t1
+ pand @Xi[1],$B
+ paddd $t0,$A
+ movdqu 0x40($ctx),$t2
+ pand @Xi[1],$C
+ paddd $t1,$B
+ movdqu 0x60($ctx),$t3
+ pand @Xi[1],$D
+ paddd $t2,$C
+ movdqu 0x80($ctx),$tx
+ pand @Xi[1],$E
+ movdqu $A,0x00($ctx)
+ paddd $t3,$D
+ movdqu $B,0x20($ctx)
+ paddd $tx,$E
+ movdqu $C,0x40($ctx)
+ movdqu $D,0x60($ctx)
+ movdqu $E,0x80($ctx)
+
+ movdqa @Xi[0],(%rbx) # save counters
+ movdqa 0x60($Tbl),$tx # pbswap_mask
+ movdqa -0x20($Tbl),$K # K_00_19
+ dec $num
+ jnz .Loop
+
+ mov `$REG_SZ*17+8`(%rsp),$num
+ lea $REG_SZ($ctx),$ctx
+ lea `16*$REG_SZ/4`($inp),$inp
+ dec $num
+ jnz .Loop_grande
+
+.Ldone:
+ mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
+___
+$code.=<<___ if ($win64);
+ movaps -0xb8(%rax),%xmm6
+ movaps -0xa8(%rax),%xmm7
+ movaps -0x98(%rax),%xmm8
+ movaps -0x88(%rax),%xmm9
+ movaps -0x78(%rax),%xmm10
+ movaps -0x68(%rax),%xmm11
+ movaps -0x58(%rax),%xmm12
+ movaps -0x48(%rax),%xmm13
+ movaps -0x38(%rax),%xmm14
+ movaps -0x28(%rax),%xmm15
+___
+$code.=<<___;
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp
+.Lepilogue:
+ ret
+.size sha1_multi_block,.-sha1_multi_block
+___
+ {{{
+my ($ABCD0,$E0,$E0_,$BSWAP,$ABCD1,$E1,$E1_)=map("%xmm$_",(0..3,8..10));
+my @MSG0=map("%xmm$_",(4..7));
+my @MSG1=map("%xmm$_",(11..14));
+
+$code.=<<___;
+.type sha1_multi_block_shaext,\@function,3
+.align 32
+sha1_multi_block_shaext:
+_shaext_shortcut:
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+___
+$code.=<<___ if ($win64);
+ lea -0xa8(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+ movaps %xmm10,-0x78(%rax)
+ movaps %xmm11,-0x68(%rax)
+ movaps %xmm12,-0x58(%rax)
+ movaps %xmm13,-0x48(%rax)
+ movaps %xmm14,-0x38(%rax)
+ movaps %xmm15,-0x28(%rax)
+___
+$code.=<<___;
+ sub \$`$REG_SZ*18`,%rsp
+ shl \$1,$num # we process pair at a time
+ and \$-256,%rsp
+ lea 0x40($ctx),$ctx # size optimization
+ mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
+.Lbody_shaext:
+ lea `$REG_SZ*16`(%rsp),%rbx
+ movdqa K_XX_XX+0x80(%rip),$BSWAP # byte-n-word swap
+
+.Loop_grande_shaext:
+ mov $num,`$REG_SZ*17+8`(%rsp) # orignal $num
+ xor $num,$num
+___
+for($i=0;$i<2;$i++) {
+ $code.=<<___;
+ mov `16*$i+0`($inp),@ptr[$i] # input pointer
+ mov `16*$i+8`($inp),%ecx # number of blocks
+ cmp $num,%ecx
+ cmovg %ecx,$num # find maximum
+ test %ecx,%ecx
+ mov %ecx,`4*$i`(%rbx) # initialize counters
+ cmovle %rsp,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ test $num,$num
+ jz .Ldone_shaext
+
+ movq 0x00-0x40($ctx),$ABCD0 # a1.a0
+ movq 0x20-0x40($ctx),@MSG0[0]# b1.b0
+ movq 0x40-0x40($ctx),@MSG0[1]# c1.c0
+ movq 0x60-0x40($ctx),@MSG0[2]# d1.d0
+ movq 0x80-0x40($ctx),@MSG0[3]# e1.e0
+
+ punpckldq @MSG0[0],$ABCD0 # b1.a1.b0.a0
+ punpckldq @MSG0[2],@MSG0[1] # d1.c1.d0.c0
+
+ movdqa $ABCD0,$ABCD1
+ punpcklqdq @MSG0[1],$ABCD0 # d0.c0.b0.a0
+ punpckhqdq @MSG0[1],$ABCD1 # d1.c1.b1.a1
+
+ pshufd \$0b00111111,@MSG0[3],$E0
+ pshufd \$0b01111111,@MSG0[3],$E1
+ pshufd \$0b00011011,$ABCD0,$ABCD0
+ pshufd \$0b00011011,$ABCD1,$ABCD1
+ jmp .Loop_shaext
+
+.align 32
+.Loop_shaext:
+ movdqu 0x00(@ptr[0]),@MSG0[0]
+ movdqu 0x00(@ptr[1]),@MSG1[0]
+ movdqu 0x10(@ptr[0]),@MSG0[1]
+ movdqu 0x10(@ptr[1]),@MSG1[1]
+ movdqu 0x20(@ptr[0]),@MSG0[2]
+ pshufb $BSWAP,@MSG0[0]
+ movdqu 0x20(@ptr[1]),@MSG1[2]
+ pshufb $BSWAP,@MSG1[0]
+ movdqu 0x30(@ptr[0]),@MSG0[3]
+ lea 0x40(@ptr[0]),@ptr[0]
+ pshufb $BSWAP,@MSG0[1]
+ movdqu 0x30(@ptr[1]),@MSG1[3]
+ lea 0x40(@ptr[1]),@ptr[1]
+ pshufb $BSWAP,@MSG1[1]
+
+ movdqa $E0,0x50(%rsp) # offload
+ paddd @MSG0[0],$E0
+ movdqa $E1,0x70(%rsp)
+ paddd @MSG1[0],$E1
+ movdqa $ABCD0,0x40(%rsp) # offload
+ movdqa $ABCD0,$E0_
+ movdqa $ABCD1,0x60(%rsp)
+ movdqa $ABCD1,$E1_
+ sha1rnds4 \$0,$E0,$ABCD0 # 0-3
+ sha1nexte @MSG0[1],$E0_
+ sha1rnds4 \$0,$E1,$ABCD1 # 0-3
+ sha1nexte @MSG1[1],$E1_
+ pshufb $BSWAP,@MSG0[2]
+ prefetcht0 127(@ptr[0])
+ sha1msg1 @MSG0[1],@MSG0[0]
+ pshufb $BSWAP,@MSG1[2]
+ prefetcht0 127(@ptr[1])
+ sha1msg1 @MSG1[1],@MSG1[0]
+
+ pshufb $BSWAP,@MSG0[3]
+ movdqa $ABCD0,$E0
+ pshufb $BSWAP,@MSG1[3]
+ movdqa $ABCD1,$E1
+ sha1rnds4 \$0,$E0_,$ABCD0 # 4-7
+ sha1nexte @MSG0[2],$E0
+ sha1rnds4 \$0,$E1_,$ABCD1 # 4-7
+ sha1nexte @MSG1[2],$E1
+ pxor @MSG0[2],@MSG0[0]
+ sha1msg1 @MSG0[2],@MSG0[1]
+ pxor @MSG1[2],@MSG1[0]
+ sha1msg1 @MSG1[2],@MSG1[1]
+___
+for($i=2;$i<20-4;$i++) {
+$code.=<<___;
+ movdqa $ABCD0,$E0_
+ movdqa $ABCD1,$E1_
+ sha1rnds4 \$`int($i/5)`,$E0,$ABCD0 # 8-11
+ sha1nexte @MSG0[3],$E0_
+ sha1rnds4 \$`int($i/5)`,$E1,$ABCD1 # 8-11
+ sha1nexte @MSG1[3],$E1_
+ sha1msg2 @MSG0[3],@MSG0[0]
+ sha1msg2 @MSG1[3],@MSG1[0]
+ pxor @MSG0[3],@MSG0[1]
+ sha1msg1 @MSG0[3],@MSG0[2]
+ pxor @MSG1[3],@MSG1[1]
+ sha1msg1 @MSG1[3],@MSG1[2]
+___
+ ($E0,$E0_)=($E0_,$E0); ($E1,$E1_)=($E1_,$E1);
+ push(@MSG0,shift(@MSG0)); push(@MSG1,shift(@MSG1));
+}
+$code.=<<___;
+ movdqa $ABCD0,$E0_
+ movdqa $ABCD1,$E1_
+ sha1rnds4 \$3,$E0,$ABCD0 # 64-67
+ sha1nexte @MSG0[3],$E0_
+ sha1rnds4 \$3,$E1,$ABCD1 # 64-67
+ sha1nexte @MSG1[3],$E1_
+ sha1msg2 @MSG0[3],@MSG0[0]
+ sha1msg2 @MSG1[3],@MSG1[0]
+ pxor @MSG0[3],@MSG0[1]
+ pxor @MSG1[3],@MSG1[1]
+
+ mov \$1,%ecx
+ pxor @MSG0[2],@MSG0[2] # zero
+ cmp 4*0(%rbx),%ecx # examine counters
+ cmovge %rsp,@ptr[0] # cancel input
+
+ movdqa $ABCD0,$E0
+ movdqa $ABCD1,$E1
+ sha1rnds4 \$3,$E0_,$ABCD0 # 68-71
+ sha1nexte @MSG0[0],$E0
+ sha1rnds4 \$3,$E1_,$ABCD1 # 68-71
+ sha1nexte @MSG1[0],$E1
+ sha1msg2 @MSG0[0],@MSG0[1]
+ sha1msg2 @MSG1[0],@MSG1[1]
+
+ cmp 4*1(%rbx),%ecx
+ cmovge %rsp,@ptr[1]
+ movq (%rbx),@MSG0[0] # pull counters
+
+ movdqa $ABCD0,$E0_
+ movdqa $ABCD1,$E1_
+ sha1rnds4 \$3,$E0,$ABCD0 # 72-75
+ sha1nexte @MSG0[1],$E0_
+ sha1rnds4 \$3,$E1,$ABCD1 # 72-75
+ sha1nexte @MSG1[1],$E1_
+
+ pshufd \$0x00,@MSG0[0],@MSG1[2]
+ pshufd \$0x55,@MSG0[0],@MSG1[3]
+ movdqa @MSG0[0],@MSG0[1]
+ pcmpgtd @MSG0[2],@MSG1[2]
+ pcmpgtd @MSG0[2],@MSG1[3]
+
+ movdqa $ABCD0,$E0
+ movdqa $ABCD1,$E1
+ sha1rnds4 \$3,$E0_,$ABCD0 # 76-79
+ sha1nexte $MSG0[2],$E0
+ sha1rnds4 \$3,$E1_,$ABCD1 # 76-79
+ sha1nexte $MSG0[2],$E1
+
+ pcmpgtd @MSG0[2],@MSG0[1] # counter mask
+ pand @MSG1[2],$ABCD0
+ pand @MSG1[2],$E0
+ pand @MSG1[3],$ABCD1
+ pand @MSG1[3],$E1
+ paddd @MSG0[1],@MSG0[0] # counters--
+
+ paddd 0x40(%rsp),$ABCD0
+ paddd 0x50(%rsp),$E0
+ paddd 0x60(%rsp),$ABCD1
+ paddd 0x70(%rsp),$E1
+
+ movq @MSG0[0],(%rbx) # save counters
+ dec $num
+ jnz .Loop_shaext
+
+ mov `$REG_SZ*17+8`(%rsp),$num
+
+ pshufd \$0b00011011,$ABCD0,$ABCD0
+ pshufd \$0b00011011,$ABCD1,$ABCD1
+
+ movdqa $ABCD0,@MSG0[0]
+ punpckldq $ABCD1,$ABCD0 # b1.b0.a1.a0
+ punpckhdq $ABCD1,@MSG0[0] # d1.d0.c1.c0
+ punpckhdq $E1,$E0 # e1.e0.xx.xx
+ movq $ABCD0,0x00-0x40($ctx) # a1.a0
+ psrldq \$8,$ABCD0
+ movq @MSG0[0],0x40-0x40($ctx)# c1.c0
+ psrldq \$8,@MSG0[0]
+ movq $ABCD0,0x20-0x40($ctx) # b1.b0
+ psrldq \$8,$E0
+ movq @MSG0[0],0x60-0x40($ctx)# d1.d0
+ movq $E0,0x80-0x40($ctx) # e1.e0
+
+ lea `$REG_SZ/2`($ctx),$ctx
+ lea `16*2`($inp),$inp
+ dec $num
+ jnz .Loop_grande_shaext
+
+.Ldone_shaext:
+ #mov `$REG_SZ*17`(%rsp),%rax # original %rsp
+___
+$code.=<<___ if ($win64);
+ movaps -0xb8(%rax),%xmm6
+ movaps -0xa8(%rax),%xmm7
+ movaps -0x98(%rax),%xmm8
+ movaps -0x88(%rax),%xmm9
+ movaps -0x78(%rax),%xmm10
+ movaps -0x68(%rax),%xmm11
+ movaps -0x58(%rax),%xmm12
+ movaps -0x48(%rax),%xmm13
+ movaps -0x38(%rax),%xmm14
+ movaps -0x28(%rax),%xmm15
+___
+$code.=<<___;
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp
+.Lepilogue_shaext:
+ ret
+.size sha1_multi_block_shaext,.-sha1_multi_block_shaext
+___
+ }}}
+
+ if ($avx) {{{
+sub BODY_00_19_avx {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+my $k=$i+2;
+my $vpack = $REG_SZ==16 ? "vpunpckldq" : "vinserti128";
+my $ptr_n = $REG_SZ==16 ? @ptr[1] : @ptr[4];
+
+$code.=<<___ if ($i==0 && $REG_SZ==16);
+ vmovd (@ptr[0]),@Xi[0]
+ lea `16*4`(@ptr[0]),@ptr[0]
+ vmovd (@ptr[1]),@Xi[2] # borrow Xi[2]
+ lea `16*4`(@ptr[1]),@ptr[1]
+ vpinsrd \$1,(@ptr[2]),@Xi[0],@Xi[0]
+ lea `16*4`(@ptr[2]),@ptr[2]
+ vpinsrd \$1,(@ptr[3]),@Xi[2],@Xi[2]
+ lea `16*4`(@ptr[3]),@ptr[3]
+ vmovd `4*$j-16*4`(@ptr[0]),@Xi[1]
+ vpunpckldq @Xi[2],@Xi[0],@Xi[0]
+ vmovd `4*$j-16*4`($ptr_n),$t3
+ vpshufb $tx,@Xi[0],@Xi[0]
+___
+$code.=<<___ if ($i<15 && $REG_SZ==16); # just load input
+ vpinsrd \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
+ vpinsrd \$1,`4*$j-16*4`(@ptr[3]),$t3,$t3
+___
+$code.=<<___ if ($i==0 && $REG_SZ==32);
+ vmovd (@ptr[0]),@Xi[0]
+ lea `16*4`(@ptr[0]),@ptr[0]
+ vmovd (@ptr[4]),@Xi[2] # borrow Xi[2]
+ lea `16*4`(@ptr[4]),@ptr[4]
+ vmovd (@ptr[1]),$t2
+ lea `16*4`(@ptr[1]),@ptr[1]
+ vmovd (@ptr[5]),$t1
+ lea `16*4`(@ptr[5]),@ptr[5]
+ vpinsrd \$1,(@ptr[2]),@Xi[0],@Xi[0]
+ lea `16*4`(@ptr[2]),@ptr[2]
+ vpinsrd \$1,(@ptr[6]),@Xi[2],@Xi[2]
+ lea `16*4`(@ptr[6]),@ptr[6]
+ vpinsrd \$1,(@ptr[3]),$t2,$t2
+ lea `16*4`(@ptr[3]),@ptr[3]
+ vpunpckldq $t2,@Xi[0],@Xi[0]
+ vpinsrd \$1,(@ptr[7]),$t1,$t1
+ lea `16*4`(@ptr[7]),@ptr[7]
+ vpunpckldq $t1,@Xi[2],@Xi[2]
+ vmovd `4*$j-16*4`(@ptr[0]),@Xi[1]
+ vinserti128 @Xi[2],@Xi[0],@Xi[0]
+ vmovd `4*$j-16*4`($ptr_n),$t3
+ vpshufb $tx,@Xi[0],@Xi[0]
+___
+$code.=<<___ if ($i<15 && $REG_SZ==32); # just load input
+ vmovd `4*$j-16*4`(@ptr[1]),$t2
+ vmovd `4*$j-16*4`(@ptr[5]),$t1
+ vpinsrd \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
+ vpinsrd \$1,`4*$j-16*4`(@ptr[6]),$t3,$t3
+ vpinsrd \$1,`4*$j-16*4`(@ptr[3]),$t2,$t2
+ vpunpckldq $t2,@Xi[1],@Xi[1]
+ vpinsrd \$1,`4*$j-16*4`(@ptr[7]),$t1,$t1
+ vpunpckldq $t1,$t3,$t3
+___
+$code.=<<___ if ($i<14);
+ vpaddd $K,$e,$e # e+=K_00_19
+ vpslld \$5,$a,$t2
+ vpandn $d,$b,$t1
+ vpand $c,$b,$t0
+
+ vmovdqa @Xi[0],`&Xi_off($i)`
+ vpaddd @Xi[0],$e,$e # e+=X[i]
+ $vpack $t3,@Xi[1],@Xi[1]
+ vpsrld \$27,$a,$t3
+ vpxor $t1,$t0,$t0 # Ch(b,c,d)
+ vmovd `4*$k-16*4`(@ptr[0]),@Xi[2]
+
+ vpslld \$30,$b,$t1
+ vpor $t3,$t2,$t2 # rol(a,5)
+ vmovd `4*$k-16*4`($ptr_n),$t3
+ vpaddd $t0,$e,$e # e+=Ch(b,c,d)
+
+ vpsrld \$2,$b,$b
+ vpaddd $t2,$e,$e # e+=rol(a,5)
+ vpshufb $tx,@Xi[1],@Xi[1]
+ vpor $t1,$b,$b # b=rol(b,30)
+___
+$code.=<<___ if ($i==14);
+ vpaddd $K,$e,$e # e+=K_00_19
+ prefetcht0 63(@ptr[0])
+ vpslld \$5,$a,$t2
+ vpandn $d,$b,$t1
+ vpand $c,$b,$t0
+
+ vmovdqa @Xi[0],`&Xi_off($i)`
+ vpaddd @Xi[0],$e,$e # e+=X[i]
+ $vpack $t3,@Xi[1],@Xi[1]
+ vpsrld \$27,$a,$t3
+ prefetcht0 63(@ptr[1])
+ vpxor $t1,$t0,$t0 # Ch(b,c,d)
+
+ vpslld \$30,$b,$t1
+ vpor $t3,$t2,$t2 # rol(a,5)
+ prefetcht0 63(@ptr[2])
+ vpaddd $t0,$e,$e # e+=Ch(b,c,d)
+
+ vpsrld \$2,$b,$b
+ vpaddd $t2,$e,$e # e+=rol(a,5)
+ prefetcht0 63(@ptr[3])
+ vpshufb $tx,@Xi[1],@Xi[1]
+ vpor $t1,$b,$b # b=rol(b,30)
+___
+$code.=<<___ if ($i>=13 && $i<15);
+ vmovdqa `&Xi_off($j+2)`,@Xi[3] # preload "X[2]"
+___
+$code.=<<___ if ($i>=15); # apply Xupdate
+ vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
+ vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
+
+ vpaddd $K,$e,$e # e+=K_00_19
+ vpslld \$5,$a,$t2
+ vpandn $d,$b,$t1
+ `"prefetcht0 63(@ptr[4])" if ($i==15 && $REG_SZ==32)`
+ vpand $c,$b,$t0
+
+ vmovdqa @Xi[0],`&Xi_off($i)`
+ vpaddd @Xi[0],$e,$e # e+=X[i]
+ vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
+ vpsrld \$27,$a,$t3
+ vpxor $t1,$t0,$t0 # Ch(b,c,d)
+ vpxor @Xi[3],@Xi[1],@Xi[1]
+ `"prefetcht0 63(@ptr[5])" if ($i==15 && $REG_SZ==32)`
+
+ vpslld \$30,$b,$t1
+ vpor $t3,$t2,$t2 # rol(a,5)
+ vpaddd $t0,$e,$e # e+=Ch(b,c,d)
+ `"prefetcht0 63(@ptr[6])" if ($i==15 && $REG_SZ==32)`
+ vpsrld \$31,@Xi[1],$tx
+ vpaddd @Xi[1],@Xi[1],@Xi[1]
+
+ vpsrld \$2,$b,$b
+ `"prefetcht0 63(@ptr[7])" if ($i==15 && $REG_SZ==32)`
+ vpaddd $t2,$e,$e # e+=rol(a,5)
+ vpor $tx,@Xi[1],@Xi[1] # rol \$1,@Xi[1]
+ vpor $t1,$b,$b # b=rol(b,30)
+___
+push(@Xi,shift(@Xi));
+}
+
+sub BODY_20_39_avx {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+
+$code.=<<___ if ($i<79);
+ vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
+ vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
+
+ vpslld \$5,$a,$t2
+ vpaddd $K,$e,$e # e+=K_20_39
+ vpxor $b,$d,$t0
+___
+$code.=<<___ if ($i<72);
+ vmovdqa @Xi[0],`&Xi_off($i)`
+___
+$code.=<<___ if ($i<79);
+ vpaddd @Xi[0],$e,$e # e+=X[i]
+ vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
+ vpsrld \$27,$a,$t3
+ vpxor $c,$t0,$t0 # Parity(b,c,d)
+ vpxor @Xi[3],@Xi[1],@Xi[1]
+
+ vpslld \$30,$b,$t1
+ vpor $t3,$t2,$t2 # rol(a,5)
+ vpaddd $t0,$e,$e # e+=Parity(b,c,d)
+ vpsrld \$31,@Xi[1],$tx
+ vpaddd @Xi[1],@Xi[1],@Xi[1]
+
+ vpsrld \$2,$b,$b
+ vpaddd $t2,$e,$e # e+=rol(a,5)
+ vpor $tx,@Xi[1],@Xi[1] # rol(@Xi[1],1)
+ vpor $t1,$b,$b # b=rol(b,30)
+___
+$code.=<<___ if ($i==79);
+ vpslld \$5,$a,$t2
+ vpaddd $K,$e,$e # e+=K_20_39
+ vpxor $b,$d,$t0
+
+ vpsrld \$27,$a,$t3
+ vpaddd @Xi[0],$e,$e # e+=X[i]
+ vpxor $c,$t0,$t0 # Parity(b,c,d)
+
+ vpslld \$30,$b,$t1
+ vpor $t3,$t2,$t2 # rol(a,5)
+ vpaddd $t0,$e,$e # e+=Parity(b,c,d)
+
+ vpsrld \$2,$b,$b
+ vpaddd $t2,$e,$e # e+=rol(a,5)
+ vpor $t1,$b,$b # b=rol(b,30)
+___
+push(@Xi,shift(@Xi));
+}
+
+sub BODY_40_59_avx {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+
+$code.=<<___;
+ vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
+ vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
+
+ vpaddd $K,$e,$e # e+=K_40_59
+ vpslld \$5,$a,$t2
+ vpand $c,$d,$t1
+ vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
+
+ vpaddd $t1,$e,$e
+ vpsrld \$27,$a,$t3
+ vpxor $c,$d,$t0
+ vpxor @Xi[3],@Xi[1],@Xi[1]
+
+ vmovdqu @Xi[0],`&Xi_off($i)`
+ vpaddd @Xi[0],$e,$e # e+=X[i]
+ vpor $t3,$t2,$t2 # rol(a,5)
+ vpsrld \$31,@Xi[1],$tx
+ vpand $b,$t0,$t0
+ vpaddd @Xi[1],@Xi[1],@Xi[1]
+
+ vpslld \$30,$b,$t1
+ vpaddd $t0,$e,$e # e+=Maj(b,d,c)
+
+ vpsrld \$2,$b,$b
+ vpaddd $t2,$e,$e # e+=rol(a,5)
+ vpor $tx,@Xi[1],@Xi[1] # rol(@X[1],1)
+ vpor $t1,$b,$b # b=rol(b,30)
+___
+push(@Xi,shift(@Xi));
+}
+
+$code.=<<___;
+.type sha1_multi_block_avx,\@function,3
+.align 32
+sha1_multi_block_avx:
+_avx_shortcut:
+___
+$code.=<<___ if ($avx>1);
+ shr \$32,%rcx
+ cmp \$2,$num
+ jb .Lavx
+ test \$`1<<5`,%ecx
+ jnz _avx2_shortcut
+ jmp .Lavx
+.align 32
+.Lavx:
+___
+$code.=<<___;
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+___
+$code.=<<___ if ($win64);
+ lea -0xa8(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+ movaps %xmm10,-0x78(%rax)
+ movaps %xmm11,-0x68(%rax)
+ movaps %xmm12,-0x58(%rax)
+ movaps %xmm13,-0x48(%rax)
+ movaps %xmm14,-0x38(%rax)
+ movaps %xmm15,-0x28(%rax)
+___
+$code.=<<___;
+ sub \$`$REG_SZ*18`, %rsp
+ and \$-256,%rsp
+ mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
+.Lbody_avx:
+ lea K_XX_XX(%rip),$Tbl
+ lea `$REG_SZ*16`(%rsp),%rbx
+
+ vzeroupper
+.Loop_grande_avx:
+ mov $num,`$REG_SZ*17+8`(%rsp) # original $num
+ xor $num,$num
+___
+for($i=0;$i<4;$i++) {
+ $code.=<<___;
+ mov `16*$i+0`($inp),@ptr[$i] # input pointer
+ mov `16*$i+8`($inp),%ecx # number of blocks
+ cmp $num,%ecx
+ cmovg %ecx,$num # find maximum
+ test %ecx,%ecx
+ mov %ecx,`4*$i`(%rbx) # initialize counters
+ cmovle $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ test $num,$num
+ jz .Ldone_avx
+
+ vmovdqu 0x00($ctx),$A # load context
+ lea 128(%rsp),%rax
+ vmovdqu 0x20($ctx),$B
+ vmovdqu 0x40($ctx),$C
+ vmovdqu 0x60($ctx),$D
+ vmovdqu 0x80($ctx),$E
+ vmovdqu 0x60($Tbl),$tx # pbswap_mask
+ jmp .Loop_avx
+
+.align 32
+.Loop_avx:
+___
+$code.=" vmovdqa -0x20($Tbl),$K\n"; # K_00_19
+for($i=0;$i<20;$i++) { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=" vmovdqa 0x00($Tbl),$K\n"; # K_20_39
+for(;$i<40;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=" vmovdqa 0x20($Tbl),$K\n"; # K_40_59
+for(;$i<60;$i++) { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=" vmovdqa 0x40($Tbl),$K\n"; # K_60_79
+for(;$i<80;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ mov \$1,%ecx
+___
+for($i=0;$i<4;$i++) {
+ $code.=<<___;
+ cmp `4*$i`(%rbx),%ecx # examine counters
+ cmovge $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ vmovdqu (%rbx),$t0 # pull counters
+ vpxor $t2,$t2,$t2
+ vmovdqa $t0,$t1
+ vpcmpgtd $t2,$t1,$t1 # mask value
+ vpaddd $t1,$t0,$t0 # counters--
+
+ vpand $t1,$A,$A
+ vpand $t1,$B,$B
+ vpaddd 0x00($ctx),$A,$A
+ vpand $t1,$C,$C
+ vpaddd 0x20($ctx),$B,$B
+ vpand $t1,$D,$D
+ vpaddd 0x40($ctx),$C,$C
+ vpand $t1,$E,$E
+ vpaddd 0x60($ctx),$D,$D
+ vpaddd 0x80($ctx),$E,$E
+ vmovdqu $A,0x00($ctx)
+ vmovdqu $B,0x20($ctx)
+ vmovdqu $C,0x40($ctx)
+ vmovdqu $D,0x60($ctx)
+ vmovdqu $E,0x80($ctx)
+
+ vmovdqu $t0,(%rbx) # save counters
+ vmovdqu 0x60($Tbl),$tx # pbswap_mask
+ dec $num
+ jnz .Loop_avx
+
+ mov `$REG_SZ*17+8`(%rsp),$num
+ lea $REG_SZ($ctx),$ctx
+ lea `16*$REG_SZ/4`($inp),$inp
+ dec $num
+ jnz .Loop_grande_avx
+
+.Ldone_avx:
+ mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps -0xb8(%rax),%xmm6
+ movaps -0xa8(%rax),%xmm7
+ movaps -0x98(%rax),%xmm8
+ movaps -0x88(%rax),%xmm9
+ movaps -0x78(%rax),%xmm10
+ movaps -0x68(%rax),%xmm11
+ movaps -0x58(%rax),%xmm12
+ movaps -0x48(%rax),%xmm13
+ movaps -0x38(%rax),%xmm14
+ movaps -0x28(%rax),%xmm15
+___
+$code.=<<___;
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp
+.Lepilogue_avx:
+ ret
+.size sha1_multi_block_avx,.-sha1_multi_block_avx
+___
+
+ if ($avx>1) {
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+
+$REG_SZ=32;
+
+@ptr=map("%r$_",(12..15,8..11));
+
+@V=($A,$B,$C,$D,$E)=map("%ymm$_",(0..4));
+($t0,$t1,$t2,$t3,$tx)=map("%ymm$_",(5..9));
+@Xi=map("%ymm$_",(10..14));
+$K="%ymm15";
+
+$code.=<<___;
+.type sha1_multi_block_avx2,\@function,3
+.align 32
+sha1_multi_block_avx2:
+_avx2_shortcut:
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+___
+$code.=<<___ if ($win64);
+ lea -0xa8(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+ movaps %xmm10,0x40(%rsp)
+ movaps %xmm11,0x50(%rsp)
+ movaps %xmm12,-0x78(%rax)
+ movaps %xmm13,-0x68(%rax)
+ movaps %xmm14,-0x58(%rax)
+ movaps %xmm15,-0x48(%rax)
+___
+$code.=<<___;
+ sub \$`$REG_SZ*18`, %rsp
+ and \$-256,%rsp
+ mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
+.Lbody_avx2:
+ lea K_XX_XX(%rip),$Tbl
+ shr \$1,$num
+
+ vzeroupper
+.Loop_grande_avx2:
+ mov $num,`$REG_SZ*17+8`(%rsp) # original $num
+ xor $num,$num
+ lea `$REG_SZ*16`(%rsp),%rbx
+___
+for($i=0;$i<8;$i++) {
+ $code.=<<___;
+ mov `16*$i+0`($inp),@ptr[$i] # input pointer
+ mov `16*$i+8`($inp),%ecx # number of blocks
+ cmp $num,%ecx
+ cmovg %ecx,$num # find maximum
+ test %ecx,%ecx
+ mov %ecx,`4*$i`(%rbx) # initialize counters
+ cmovle $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ vmovdqu 0x00($ctx),$A # load context
+ lea 128(%rsp),%rax
+ vmovdqu 0x20($ctx),$B
+ lea 256+128(%rsp),%rbx
+ vmovdqu 0x40($ctx),$C
+ vmovdqu 0x60($ctx),$D
+ vmovdqu 0x80($ctx),$E
+ vmovdqu 0x60($Tbl),$tx # pbswap_mask
+ jmp .Loop_avx2
+
+.align 32
+.Loop_avx2:
+___
+$code.=" vmovdqa -0x20($Tbl),$K\n"; # K_00_19
+for($i=0;$i<20;$i++) { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=" vmovdqa 0x00($Tbl),$K\n"; # K_20_39
+for(;$i<40;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=" vmovdqa 0x20($Tbl),$K\n"; # K_40_59
+for(;$i<60;$i++) { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=" vmovdqa 0x40($Tbl),$K\n"; # K_60_79
+for(;$i<80;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ mov \$1,%ecx
+ lea `$REG_SZ*16`(%rsp),%rbx
+___
+for($i=0;$i<8;$i++) {
+ $code.=<<___;
+ cmp `4*$i`(%rbx),%ecx # examine counters
+ cmovge $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ vmovdqu (%rbx),$t0 # pull counters
+ vpxor $t2,$t2,$t2
+ vmovdqa $t0,$t1
+ vpcmpgtd $t2,$t1,$t1 # mask value
+ vpaddd $t1,$t0,$t0 # counters--
+
+ vpand $t1,$A,$A
+ vpand $t1,$B,$B
+ vpaddd 0x00($ctx),$A,$A
+ vpand $t1,$C,$C
+ vpaddd 0x20($ctx),$B,$B
+ vpand $t1,$D,$D
+ vpaddd 0x40($ctx),$C,$C
+ vpand $t1,$E,$E
+ vpaddd 0x60($ctx),$D,$D
+ vpaddd 0x80($ctx),$E,$E
+ vmovdqu $A,0x00($ctx)
+ vmovdqu $B,0x20($ctx)
+ vmovdqu $C,0x40($ctx)
+ vmovdqu $D,0x60($ctx)
+ vmovdqu $E,0x80($ctx)
+
+ vmovdqu $t0,(%rbx) # save counters
+ lea 256+128(%rsp),%rbx
+ vmovdqu 0x60($Tbl),$tx # pbswap_mask
+ dec $num
+ jnz .Loop_avx2
+
+ #mov `$REG_SZ*17+8`(%rsp),$num
+ #lea $REG_SZ($ctx),$ctx
+ #lea `16*$REG_SZ/4`($inp),$inp
+ #dec $num
+ #jnz .Loop_grande_avx2
+
+.Ldone_avx2:
+ mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps -0xd8(%rax),%xmm6
+ movaps -0xc8(%rax),%xmm7
+ movaps -0xb8(%rax),%xmm8
+ movaps -0xa8(%rax),%xmm9
+ movaps -0x98(%rax),%xmm10
+ movaps -0x88(%rax),%xmm11
+ movaps -0x78(%rax),%xmm12
+ movaps -0x68(%rax),%xmm13
+ movaps -0x58(%rax),%xmm14
+ movaps -0x48(%rax),%xmm15
+___
+$code.=<<___;
+ mov -48(%rax),%r15
+ mov -40(%rax),%r14
+ mov -32(%rax),%r13
+ mov -24(%rax),%r12
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp
+.Lepilogue_avx2:
+ ret
+.size sha1_multi_block_avx2,.-sha1_multi_block_avx2
+___
+ } }}}
+$code.=<<___;
+
+.align 256
+ .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
+ .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
+K_XX_XX:
+ .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
+ .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
+ .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
+ .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
+ .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
+ .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
+ .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
+ .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
+ .byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
+ .asciz "SHA1 multi-block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<.Lbody
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=.Lepilogue
+ jae .Lin_prologue
+
+ mov `16*17`(%rax),%rax # pull saved stack pointer
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+
+ lea -24-10*16(%rax),%rsi
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+___
+$code.=<<___ if ($avx>1);
+.type avx2_handler,\@abi-omnipotent
+.align 16
+avx2_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<body label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+
+ mov `32*17`($context),%rax # pull saved stack pointer
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore cotnext->R12
+ mov %r13,224($context) # restore cotnext->R13
+ mov %r14,232($context) # restore cotnext->R14
+ mov %r15,240($context) # restore cotnext->R15
+
+ lea -56-10*16(%rax),%rsi
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+ jmp .Lin_prologue
+.size avx2_handler,.-avx2_handler
+___
+$code.=<<___;
+.section .pdata
+.align 4
+ .rva .LSEH_begin_sha1_multi_block
+ .rva .LSEH_end_sha1_multi_block
+ .rva .LSEH_info_sha1_multi_block
+ .rva .LSEH_begin_sha1_multi_block_shaext
+ .rva .LSEH_end_sha1_multi_block_shaext
+ .rva .LSEH_info_sha1_multi_block_shaext
+___
+$code.=<<___ if ($avx);
+ .rva .LSEH_begin_sha1_multi_block_avx
+ .rva .LSEH_end_sha1_multi_block_avx
+ .rva .LSEH_info_sha1_multi_block_avx
+___
+$code.=<<___ if ($avx>1);
+ .rva .LSEH_begin_sha1_multi_block_avx2
+ .rva .LSEH_end_sha1_multi_block_avx2
+ .rva .LSEH_info_sha1_multi_block_avx2
+___
+$code.=<<___;
+.section .xdata
+.align 8
+.LSEH_info_sha1_multi_block:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbody,.Lepilogue # HandlerData[]
+.LSEH_info_sha1_multi_block_shaext:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbody_shaext,.Lepilogue_shaext # HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_sha1_multi_block_avx:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbody_avx,.Lepilogue_avx # HandlerData[]
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_sha1_multi_block_avx2:
+ .byte 9,0,0,0
+ .rva avx2_handler
+ .rva .Lbody_avx2,.Lepilogue_avx2 # HandlerData[]
+___
+}
+####################################################################
+
+sub rex {
+ local *opcode=shift;
+ my ($dst,$src)=@_;
+ my $rex=0;
+
+ $rex|=0x04 if ($dst>=8);
+ $rex|=0x01 if ($src>=8);
+ unshift @opcode,$rex|0x40 if ($rex);
+}
+
+sub sha1rnds4 {
+ if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+ my @opcode=(0x0f,0x3a,0xcc);
+ rex(\@opcode,$3,$2);
+ push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
+ my $c=$1;
+ push @opcode,$c=~/^0/?oct($c):$c;
+ return ".byte\t".join(',',@opcode);
+ } else {
+ return "sha1rnds4\t".@_[0];
+ }
+}
+
+sub sha1op38 {
+ my $instr = shift;
+ my %opcodelet = (
+ "sha1nexte" => 0xc8,
+ "sha1msg1" => 0xc9,
+ "sha1msg2" => 0xca );
+
+ if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+ my @opcode=(0x0f,0x38);
+ rex(\@opcode,$2,$1);
+ push @opcode,$opcodelet{$instr};
+ push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
+ return ".byte\t".join(',',@opcode);
+ } else {
+ return $instr."\t".@_[0];
+ }
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval($1)/ge;
+
+ s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo or
+ s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo or
+
+ s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+),%ymm([0-9]+)/$1$2%xmm$3,%xmm$4/go or
+ s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vinserti128)\b(\s+)%ymm/$1$2\$1,%xmm/go or
+ s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/openssl/crypto/sha/asm/sha1-mips.pl b/openssl/crypto/sha/asm/sha1-mips.pl
index f1a702f38..340849389 100644
--- a/openssl/crypto/sha/asm/sha1-mips.pl
+++ b/openssl/crypto/sha/asm/sha1-mips.pl
@@ -15,6 +15,10 @@
# compatible subroutine. There is room for minor optimization on
# little-endian platforms...
+# September 2012.
+#
+# Add MIPS32r2 code (>25% less instructions).
+
######################################################################
# There is a number of MIPS ABI in use, O32 and N32/64 are most
# widely used. Then there is a new contender: NUBI. It appears that if
@@ -42,7 +46,7 @@
# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
#
-$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
+$flavour = shift || "o32"; # supported flavours are o32,n32,64,nubi32,nubi64
if ($flavour =~ /64|n32/i) {
$PTR_ADD="dadd"; # incidentally works even on n32
@@ -64,7 +68,7 @@ if ($flavour =~ /64|n32/i) {
#
######################################################################
-$big_endian=(`echo MIPSEL | $ENV{CC} -E -P -`=~/MIPSEL/)?1:0;
+$big_endian=(`echo MIPSEL | $ENV{CC} -E -`=~/MIPSEL/)?1:0 if ($ENV{CC});
for (@ARGV) { $output=$_ if (/^\w[\w\-]*\.\w+$/); }
open STDOUT,">$output";
@@ -95,6 +99,10 @@ sub BODY_00_14 {
my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
$code.=<<___ if (!$big_endian);
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ wsbh @X[$i],@X[$i] # byte swap($i)
+ rotr @X[$i],@X[$i],16
+#else
srl $t0,@X[$i],24 # byte swap($i)
srl $t1,@X[$i],8
andi $t2,@X[$i],0xFF00
@@ -104,8 +112,22 @@ $code.=<<___ if (!$big_endian);
or @X[$i],$t0
or $t1,$t2
or @X[$i],$t1
+#endif
___
$code.=<<___;
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ addu $e,$K # $i
+ xor $t0,$c,$d
+ rotr $t1,$a,27
+ lwl @X[$j],$j*4+$MSB($inp)
+ and $t0,$b
+ addu $e,$t1
+ lwr @X[$j],$j*4+$LSB($inp)
+ xor $t0,$d
+ addu $e,@X[$i]
+ rotr $b,$b,2
+ addu $e,$t0
+#else
lwl @X[$j],$j*4+$MSB($inp)
sll $t0,$a,5 # $i
addu $e,$K
@@ -121,6 +143,7 @@ $code.=<<___;
addu $e,@X[$i]
or $b,$t2
addu $e,$t0
+#endif
___
}
@@ -129,6 +152,10 @@ my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
$code.=<<___ if (!$big_endian && $i==15);
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ wsbh @X[$i],@X[$i] # byte swap($i)
+ rotr @X[$i],@X[$i],16
+#else
srl $t0,@X[$i],24 # byte swap($i)
srl $t1,@X[$i],8
andi $t2,@X[$i],0xFF00
@@ -138,8 +165,24 @@ $code.=<<___ if (!$big_endian && $i==15);
or @X[$i],$t0
or @X[$i],$t1
or @X[$i],$t2
+#endif
___
$code.=<<___;
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ addu $e,$K # $i
+ xor @X[$j%16],@X[($j+2)%16]
+ xor $t0,$c,$d
+ rotr $t1,$a,27
+ xor @X[$j%16],@X[($j+8)%16]
+ and $t0,$b
+ addu $e,$t1
+ xor @X[$j%16],@X[($j+13)%16]
+ xor $t0,$d
+ addu $e,@X[$i%16]
+ rotr @X[$j%16],@X[$j%16],31
+ rotr $b,$b,2
+ addu $e,$t0
+#else
xor @X[$j%16],@X[($j+2)%16]
sll $t0,$a,5 # $i
addu $e,$K
@@ -159,6 +202,7 @@ $code.=<<___;
addu $e,@X[$i%16]
or $b,$t2
addu $e,$t0
+#endif
___
}
@@ -166,6 +210,20 @@ sub BODY_20_39 {
my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
$code.=<<___ if ($i<79);
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ xor @X[$j%16],@X[($j+2)%16]
+ addu $e,$K # $i
+ rotr $t1,$a,27
+ xor @X[$j%16],@X[($j+8)%16]
+ xor $t0,$c,$d
+ addu $e,$t1
+ xor @X[$j%16],@X[($j+13)%16]
+ xor $t0,$b
+ addu $e,@X[$i%16]
+ rotr @X[$j%16],@X[$j%16],31
+ rotr $b,$b,2
+ addu $e,$t0
+#else
xor @X[$j%16],@X[($j+2)%16]
sll $t0,$a,5 # $i
addu $e,$K
@@ -184,8 +242,24 @@ $code.=<<___ if ($i<79);
or @X[$j%16],$t1
or $b,$t2
addu $e,$t0
+#endif
___
$code.=<<___ if ($i==79);
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ lw @X[0],0($ctx)
+ addu $e,$K # $i
+ lw @X[1],4($ctx)
+ rotr $t1,$a,27
+ lw @X[2],8($ctx)
+ xor $t0,$c,$d
+ addu $e,$t1
+ lw @X[3],12($ctx)
+ xor $t0,$b
+ addu $e,@X[$i%16]
+ lw @X[4],16($ctx)
+ rotr $b,$b,2
+ addu $e,$t0
+#else
lw @X[0],0($ctx)
sll $t0,$a,5 # $i
addu $e,$K
@@ -203,6 +277,7 @@ $code.=<<___ if ($i==79);
addu $e,@X[$i%16]
or $b,$t2
addu $e,$t0
+#endif
___
}
@@ -210,6 +285,22 @@ sub BODY_40_59 {
my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
$code.=<<___ if ($i<79);
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ addu $e,$K # $i
+ and $t0,$c,$d
+ xor @X[$j%16],@X[($j+2)%16]
+ rotr $t1,$a,27
+ addu $e,$t0
+ xor @X[$j%16],@X[($j+8)%16]
+ xor $t0,$c,$d
+ addu $e,$t1
+ xor @X[$j%16],@X[($j+13)%16]
+ and $t0,$b
+ addu $e,@X[$i%16]
+ rotr @X[$j%16],@X[$j%16],31
+ rotr $b,$b,2
+ addu $e,$t0
+#else
xor @X[$j%16],@X[($j+2)%16]
sll $t0,$a,5 # $i
addu $e,$K
@@ -230,6 +321,7 @@ $code.=<<___ if ($i<79);
addu $e,@X[$i%16]
or $b,$t2
addu $e,$t0
+#endif
___
}
@@ -241,6 +333,10 @@ $code=<<___;
# include <openssl/fipssyms.h>
#endif
+#if defined(__mips_smartmips) && !defined(_MIPS_ARCH_MIPS32R2)
+#define _MIPS_ARCH_MIPS32R2
+#endif
+
.text
.set noat
diff --git a/openssl/crypto/sha/asm/sha1-ppc.pl b/openssl/crypto/sha/asm/sha1-ppc.pl
index 2140dd2f8..df5989610 100644
--- a/openssl/crypto/sha/asm/sha1-ppc.pl
+++ b/openssl/crypto/sha/asm/sha1-ppc.pl
@@ -9,8 +9,7 @@
# I let hardware handle unaligned input(*), except on page boundaries
# (see below for details). Otherwise straightforward implementation
-# with X vector in register bank. The module is big-endian [which is
-# not big deal as there're no little-endian targets left around].
+# with X vector in register bank.
#
# (*) this means that this module is inappropriate for PPC403? Does
# anybody know if pre-POWER3 can sustain unaligned load?
@@ -38,6 +37,10 @@ if ($flavour =~ /64/) {
$PUSH ="stw";
} else { die "nonsense $flavour"; }
+# Define endianess based on flavour
+# i.e.: linux64le
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
@@ -68,14 +71,28 @@ $T ="r12";
@X=("r16","r17","r18","r19","r20","r21","r22","r23",
"r24","r25","r26","r27","r28","r29","r30","r31");
+sub loadbe {
+my ($dst, $src, $temp_reg) = @_;
+$code.=<<___ if (!$LITTLE_ENDIAN);
+ lwz $dst,$src
+___
+$code.=<<___ if ($LITTLE_ENDIAN);
+ lwz $temp_reg,$src
+ rotlwi $dst,$temp_reg,8
+ rlwimi $dst,$temp_reg,24,0,7
+ rlwimi $dst,$temp_reg,24,16,23
+___
+}
+
sub BODY_00_19 {
my ($i,$a,$b,$c,$d,$e,$f)=@_;
my $j=$i+1;
-$code.=<<___ if ($i==0);
- lwz @X[$i],`$i*4`($inp)
-___
+
+ # Since the last value of $f is discarded, we can use
+ # it as a temp reg to swap byte-order when needed.
+ loadbe("@X[$i]","`$i*4`($inp)",$f) if ($i==0);
+ loadbe("@X[$j]","`$j*4`($inp)",$f) if ($i<15);
$code.=<<___ if ($i<15);
- lwz @X[$j],`$j*4`($inp)
add $f,$K,$e
rotlwi $e,$a,5
add $f,$f,@X[$i]
@@ -108,31 +125,31 @@ my ($i,$a,$b,$c,$d,$e,$f)=@_;
my $j=$i+1;
$code.=<<___ if ($i<79);
add $f,$K,$e
+ xor $t0,$b,$d
rotlwi $e,$a,5
xor @X[$j%16],@X[$j%16],@X[($j+2)%16]
add $f,$f,@X[$i%16]
- xor $t0,$b,$c
+ xor $t0,$t0,$c
xor @X[$j%16],@X[$j%16],@X[($j+8)%16]
- add $f,$f,$e
+ add $f,$f,$t0
rotlwi $b,$b,30
- xor $t0,$t0,$d
xor @X[$j%16],@X[$j%16],@X[($j+13)%16]
- add $f,$f,$t0
+ add $f,$f,$e
rotlwi @X[$j%16],@X[$j%16],1
___
$code.=<<___ if ($i==79);
add $f,$K,$e
+ xor $t0,$b,$d
rotlwi $e,$a,5
lwz r16,0($ctx)
add $f,$f,@X[$i%16]
- xor $t0,$b,$c
+ xor $t0,$t0,$c
lwz r17,4($ctx)
- add $f,$f,$e
+ add $f,$f,$t0
rotlwi $b,$b,30
lwz r18,8($ctx)
- xor $t0,$t0,$d
lwz r19,12($ctx)
- add $f,$f,$t0
+ add $f,$f,$e
lwz r20,16($ctx)
___
}
@@ -316,6 +333,7 @@ $code.=<<___;
blr
.long 0
.byte 0,12,0x14,0,0,0,0,0
+.size .sha1_block_data_order,.-.sha1_block_data_order
___
$code.=<<___;
.asciz "SHA1 block transform for PPC, CRYPTOGAMS by <appro\@fy.chalmers.se>"
diff --git a/openssl/crypto/sha/asm/sha1-sparcv9.pl b/openssl/crypto/sha/asm/sha1-sparcv9.pl
index 5c161cecd..b5efcde5c 100644
--- a/openssl/crypto/sha/asm/sha1-sparcv9.pl
+++ b/openssl/crypto/sha/asm/sha1-sparcv9.pl
@@ -5,6 +5,8 @@
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Hardware SPARC T4 support by David S. Miller <davem@davemloft.net>.
# ====================================================================
# Performance improvement is not really impressive on pre-T1 CPU: +8%
@@ -18,10 +20,10 @@
# ensure scalability on UltraSPARC T1, or rather to avoid decay when
# amount of active threads exceeds the number of physical cores.
-$bits=32;
-for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
-if ($bits==64) { $bias=2047; $frame=192; }
-else { $bias=0; $frame=112; }
+# SPARC T4 SHA1 hardware achieves 3.72 cycles per byte, which is 3.1x
+# faster than software. Multi-process benchmark saturates at 11x
+# single-process result on 8-core processor, or ~9GBps per 2.85GHz
+# socket.
$output=shift;
open STDOUT,">$output";
@@ -178,17 +180,102 @@ $code.=<<___;
___
}
-$code.=<<___ if ($bits==64);
+$code.=<<___;
+#include "sparc_arch.h"
+
+#ifdef __arch64__
.register %g2,#scratch
.register %g3,#scratch
-___
-$code.=<<___;
+#endif
+
.section ".text",#alloc,#execinstr
+#ifdef __PIC__
+SPARC_PIC_THUNK(%g1)
+#endif
+
.align 32
.globl sha1_block_data_order
sha1_block_data_order:
- save %sp,-$frame,%sp
+ SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
+ ld [%g1+4],%g1 ! OPENSSL_sparcv9cap_P[1]
+
+ andcc %g1, CFR_SHA1, %g0
+ be .Lsoftware
+ nop
+
+ ld [%o0 + 0x00], %f0 ! load context
+ ld [%o0 + 0x04], %f1
+ ld [%o0 + 0x08], %f2
+ andcc %o1, 0x7, %g0
+ ld [%o0 + 0x0c], %f3
+ bne,pn %icc, .Lhwunaligned
+ ld [%o0 + 0x10], %f4
+
+.Lhw_loop:
+ ldd [%o1 + 0x00], %f8
+ ldd [%o1 + 0x08], %f10
+ ldd [%o1 + 0x10], %f12
+ ldd [%o1 + 0x18], %f14
+ ldd [%o1 + 0x20], %f16
+ ldd [%o1 + 0x28], %f18
+ ldd [%o1 + 0x30], %f20
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x38], %f22
+ add %o1, 0x40, %o1
+ prefetch [%o1 + 63], 20
+
+ .word 0x81b02820 ! SHA1
+
+ bne,pt SIZE_T_CC, .Lhw_loop
+ nop
+
+.Lhwfinish:
+ st %f0, [%o0 + 0x00] ! store context
+ st %f1, [%o0 + 0x04]
+ st %f2, [%o0 + 0x08]
+ st %f3, [%o0 + 0x0c]
+ retl
+ st %f4, [%o0 + 0x10]
+
+.align 8
+.Lhwunaligned:
+ alignaddr %o1, %g0, %o1
+
+ ldd [%o1 + 0x00], %f10
+.Lhwunaligned_loop:
+ ldd [%o1 + 0x08], %f12
+ ldd [%o1 + 0x10], %f14
+ ldd [%o1 + 0x18], %f16
+ ldd [%o1 + 0x20], %f18
+ ldd [%o1 + 0x28], %f20
+ ldd [%o1 + 0x30], %f22
+ ldd [%o1 + 0x38], %f24
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x40], %f26
+ add %o1, 0x40, %o1
+ prefetch [%o1 + 63], 20
+
+ faligndata %f10, %f12, %f8
+ faligndata %f12, %f14, %f10
+ faligndata %f14, %f16, %f12
+ faligndata %f16, %f18, %f14
+ faligndata %f18, %f20, %f16
+ faligndata %f20, %f22, %f18
+ faligndata %f22, %f24, %f20
+ faligndata %f24, %f26, %f22
+
+ .word 0x81b02820 ! SHA1
+
+ bne,pt SIZE_T_CC, .Lhwunaligned_loop
+ for %f26, %f26, %f10 ! %f10=%f26
+
+ ba .Lhwfinish
+ nop
+
+.align 16
+.Lsoftware:
+ save %sp,-STACK_FRAME,%sp
sllx $len,6,$len
add $inp,$len,$len
@@ -268,7 +355,7 @@ $code.=<<___;
add $E,@X[4],$E
st $E,[$ctx+16]
- bne `$bits==64?"%xcc":"%icc"`,.Lloop
+ bne SIZE_T_CC,.Lloop
andn $inp,7,$tmp0
ret
@@ -279,6 +366,62 @@ $code.=<<___;
.align 4
___
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
+# Purpose of these subroutines is to explicitly encode VIS instructions,
+# so that one can compile the module without having to specify VIS
+# extentions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
+# Idea is to reserve for option to produce "universal" binary and let
+# programmer detect if current CPU is VIS capable at run-time.
+sub unvis {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my $ref,$opf;
+my %visopf = ( "faligndata" => 0x048,
+ "for" => 0x07c );
+
+ $ref = "$mnemonic\t$rs1,$rs2,$rd";
+
+ if ($opf=$visopf{$mnemonic}) {
+ foreach ($rs1,$rs2,$rd) {
+ return $ref if (!/%f([0-9]{1,2})/);
+ $_=$1;
+ if ($1>=32) {
+ return $ref if ($1&1);
+ # re-encode for upper double register addressing
+ $_=($1|$1>>5)&31;
+ }
+ }
+
+ return sprintf ".word\t0x%08x !%s",
+ 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
+ $ref;
+ } else {
+ return $ref;
+ }
+}
+sub unalignaddr {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
+my $ref="$mnemonic\t$rs1,$rs2,$rd";
+
+ foreach ($rs1,$rs2,$rd) {
+ if (/%([goli])([0-7])/) { $_=$bias{$1}+$2; }
+ else { return $ref; }
+ }
+ return sprintf ".word\t0x%08x !%s",
+ 0x81b00300|$rd<<25|$rs1<<14|$rs2,
+ $ref;
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ s/\b(f[^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
+ &unvis($1,$2,$3,$4)
+ /ge;
+ s/\b(alignaddr)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
+ &unalignaddr($1,$2,$3,$4)
+ /ge;
+
+ print $_,"\n";
+}
+
close STDOUT;
diff --git a/openssl/crypto/sha/asm/sha1-x86_64.pl b/openssl/crypto/sha/asm/sha1-x86_64.pl
index f15c7ec39..9bb6b4981 100644
--- a/openssl/crypto/sha/asm/sha1-x86_64.pl
+++ b/openssl/crypto/sha/asm/sha1-x86_64.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
#
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -49,17 +49,37 @@
#
# Add AVX code path. See sha1-586.pl for further information.
+# May 2013.
+#
+# Add AVX2+BMI code path. Initial attempt (utilizing BMI instructions
+# and loading pair of consecutive blocks to 256-bit %ymm registers)
+# did not provide impressive performance improvement till a crucial
+# hint regarding the number of Xupdate iterations to pre-compute in
+# advance was provided by Ilya Albrekht of Intel Corp.
+
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+
######################################################################
# Current performance is summarized in following table. Numbers are
# CPU clock cycles spent to process single byte (less is better).
#
-# x86_64 SSSE3 AVX
-# P4 9.8 -
-# Opteron 6.6 -
-# Core2 6.7 6.1/+10% -
-# Atom 11.0 9.7/+13% -
-# Westmere 7.1 5.6/+27% -
-# Sandy Bridge 7.9 6.3/+25% 5.2/+51%
+# x86_64 SSSE3 AVX[2]
+# P4 9.05 -
+# Opteron 6.26 -
+# Core2 6.55 6.05/+8% -
+# Westmere 6.73 5.30/+27% -
+# Sandy Bridge 7.70 6.10/+26% 4.99/+54%
+# Ivy Bridge 6.06 4.67/+30% 4.60/+32%
+# Haswell 5.45 4.15/+31% 3.57/+53%
+# Bulldozer 9.11 5.95/+53%
+# VIA Nano 9.32 7.15/+30%
+# Atom 10.3 9.17/+12%
+# Silvermont 13.1(*) 9.37/+40%
+#
+# (*) obviously suboptimal result, nothing was done about it,
+# because SSSE3 code is compiled unconditionally;
$flavour = shift;
$output = shift;
@@ -72,15 +92,27 @@ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
-$avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
- =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
- $1>=2.19);
-$avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
- `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
- $1>=2.09);
-$avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
- `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
- $1>=10);
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $avx = ($1>=10) + ($1>=11);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([2-9]\.[0-9]+)/) {
+ $avx = ($2>=3.0) + ($2>3.0);
+}
+
+$shaext=1; ### set to zero if compiling for 1.0.1
+$avx=1 if (!$shaext && $avx);
open OUT,"| \"$^X\" $xlate $flavour $output";
*STDOUT=*OUT;
@@ -97,7 +129,7 @@ $num="%r10";
$t0="%eax";
$t1="%ebx";
$t2="%ecx";
-@xi=("%edx","%ebp");
+@xi=("%edx","%ebp","%r14d");
$A="%esi";
$B="%edi";
$C="%r11d";
@@ -112,42 +144,40 @@ my $j=$i+1;
$code.=<<___ if ($i==0);
mov `4*$i`($inp),$xi[0]
bswap $xi[0]
- mov $xi[0],`4*$i`(%rsp)
___
$code.=<<___ if ($i<15);
- mov $c,$t0
mov `4*$j`($inp),$xi[1]
+ mov $d,$t0
+ mov $xi[0],`4*$i`(%rsp)
mov $a,$t2
- xor $d,$t0
bswap $xi[1]
+ xor $c,$t0
rol \$5,$t2
- lea 0x5a827999($xi[0],$e),$e
and $b,$t0
- mov $xi[1],`4*$j`(%rsp)
+ lea 0x5a827999($xi[0],$e),$e
add $t2,$e
xor $d,$t0
rol \$30,$b
add $t0,$e
___
$code.=<<___ if ($i>=15);
- mov `4*($j%16)`(%rsp),$xi[1]
- mov $c,$t0
+ xor `4*($j%16)`(%rsp),$xi[1]
+ mov $d,$t0
+ mov $xi[0],`4*($i%16)`(%rsp)
mov $a,$t2
xor `4*(($j+2)%16)`(%rsp),$xi[1]
- xor $d,$t0
+ xor $c,$t0
rol \$5,$t2
xor `4*(($j+8)%16)`(%rsp),$xi[1]
and $b,$t0
lea 0x5a827999($xi[0],$e),$e
- xor `4*(($j+13)%16)`(%rsp),$xi[1]
+ rol \$30,$b
xor $d,$t0
- rol \$1,$xi[1]
add $t2,$e
- rol \$30,$b
- mov $xi[1],`4*($j%16)`(%rsp)
+ rol \$1,$xi[1]
add $t0,$e
___
-unshift(@xi,pop(@xi));
+push(@xi,shift(@xi));
}
sub BODY_20_39 {
@@ -155,62 +185,58 @@ my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
my $K=($i<40)?0x6ed9eba1:0xca62c1d6;
$code.=<<___ if ($i<79);
- mov `4*($j%16)`(%rsp),$xi[1]
- mov $c,$t0
+ xor `4*($j%16)`(%rsp),$xi[1]
+ mov $b,$t0
+ `"mov $xi[0],".4*($i%16)."(%rsp)" if ($i<72)`
mov $a,$t2
xor `4*(($j+2)%16)`(%rsp),$xi[1]
- xor $b,$t0
+ xor $d,$t0
rol \$5,$t2
- lea $K($xi[0],$e),$e
xor `4*(($j+8)%16)`(%rsp),$xi[1]
- xor $d,$t0
+ lea $K($xi[0],$e),$e
+ xor $c,$t0
add $t2,$e
- xor `4*(($j+13)%16)`(%rsp),$xi[1]
rol \$30,$b
add $t0,$e
rol \$1,$xi[1]
___
-$code.=<<___ if ($i<76);
- mov $xi[1],`4*($j%16)`(%rsp)
-___
$code.=<<___ if ($i==79);
- mov $c,$t0
+ mov $b,$t0
mov $a,$t2
- xor $b,$t0
+ xor $d,$t0
lea $K($xi[0],$e),$e
rol \$5,$t2
- xor $d,$t0
+ xor $c,$t0
add $t2,$e
rol \$30,$b
add $t0,$e
___
-unshift(@xi,pop(@xi));
+push(@xi,shift(@xi));
}
sub BODY_40_59 {
my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
$code.=<<___;
- mov `4*($j%16)`(%rsp),$xi[1]
- mov $c,$t0
- mov $c,$t1
+ xor `4*($j%16)`(%rsp),$xi[1]
+ mov $d,$t0
+ mov $xi[0],`4*($i%16)`(%rsp)
+ mov $d,$t1
xor `4*(($j+2)%16)`(%rsp),$xi[1]
- and $d,$t0
+ and $c,$t0
mov $a,$t2
xor `4*(($j+8)%16)`(%rsp),$xi[1]
- xor $d,$t1
lea 0x8f1bbcdc($xi[0],$e),$e
+ xor $c,$t1
rol \$5,$t2
- xor `4*(($j+13)%16)`(%rsp),$xi[1]
add $t0,$e
- and $b,$t1
rol \$1,$xi[1]
- add $t1,$e
- rol \$30,$b
- mov $xi[1],`4*($j%16)`(%rsp)
+ and $b,$t1
add $t2,$e
+ rol \$30,$b
+ add $t1,$e
___
-unshift(@xi,pop(@xi));
+push(@xi,shift(@xi));
}
$code.=<<___;
@@ -223,9 +249,19 @@ $code.=<<___;
sha1_block_data_order:
mov OPENSSL_ia32cap_P+0(%rip),%r9d
mov OPENSSL_ia32cap_P+4(%rip),%r8d
+ mov OPENSSL_ia32cap_P+8(%rip),%r10d
test \$`1<<9`,%r8d # check SSSE3 bit
jz .Lialu
___
+$code.=<<___ if ($shaext);
+ test \$`1<<29`,%r10d # check SHA bit
+ jnz _shaext_shortcut
+___
+$code.=<<___ if ($avx>1);
+ and \$`1<<3|1<<5|1<<8`,%r10d # check AVX2+BMI1+BMI2
+ cmp \$`1<<3|1<<5|1<<8`,%r10d
+ je _avx2_shortcut
+___
$code.=<<___ if ($avx);
and \$`1<<28`,%r8d # mask AVX bit
and \$`1<<30`,%r9d # mask "Intel CPU" bit
@@ -238,17 +274,18 @@ $code.=<<___;
.align 16
.Lialu:
+ mov %rsp,%rax
push %rbx
push %rbp
push %r12
push %r13
- mov %rsp,%r11
+ push %r14
mov %rdi,$ctx # reassigned argument
sub \$`8+16*4`,%rsp
mov %rsi,$inp # reassigned argument
and \$-64,%rsp
mov %rdx,$num # reassigned argument
- mov %r11,`16*4`(%rsp)
+ mov %rax,`16*4`(%rsp)
.Lprologue:
mov 0($ctx),$A
@@ -282,53 +319,187 @@ $code.=<<___;
jnz .Lloop
mov `16*4`(%rsp),%rsi
- mov (%rsi),%r13
- mov 8(%rsi),%r12
- mov 16(%rsi),%rbp
- mov 24(%rsi),%rbx
- lea 32(%rsi),%rsp
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
.Lepilogue:
ret
.size sha1_block_data_order,.-sha1_block_data_order
___
+if ($shaext) {{{
+######################################################################
+# Intel SHA Extensions implementation of SHA1 update function.
+#
+my ($ctx,$inp,$num)=("%rdi","%rsi","%rdx");
+my ($ABCD,$E,$E_,$BSWAP,$ABCD_SAVE,$E_SAVE)=map("%xmm$_",(0..3,8,9));
+my @MSG=map("%xmm$_",(4..7));
+
+$code.=<<___;
+.type sha1_block_data_order_shaext,\@function,3
+.align 32
+sha1_block_data_order_shaext:
+_shaext_shortcut:
+___
+$code.=<<___ if ($win64);
+ lea `-8-4*16`(%rsp),%rsp
+ movaps %xmm6,-8-4*16(%rax)
+ movaps %xmm7,-8-3*16(%rax)
+ movaps %xmm8,-8-2*16(%rax)
+ movaps %xmm9,-8-1*16(%rax)
+.Lprologue_shaext:
+___
+$code.=<<___;
+ movdqu ($ctx),$ABCD
+ movd 16($ctx),$E
+ movdqa K_XX_XX+0xa0(%rip),$BSWAP # byte-n-word swap
+
+ movdqu ($inp),@MSG[0]
+ pshufd \$0b00011011,$ABCD,$ABCD # flip word order
+ movdqu 0x10($inp),@MSG[1]
+ pshufd \$0b00011011,$E,$E # flip word order
+ movdqu 0x20($inp),@MSG[2]
+ pshufb $BSWAP,@MSG[0]
+ movdqu 0x30($inp),@MSG[3]
+ pshufb $BSWAP,@MSG[1]
+ pshufb $BSWAP,@MSG[2]
+ movdqa $E,$E_SAVE # offload $E
+ pshufb $BSWAP,@MSG[3]
+ jmp .Loop_shaext
+
+.align 16
+.Loop_shaext:
+ dec $num
+ lea 0x40($inp),%rax # next input block
+ paddd @MSG[0],$E
+ cmovne %rax,$inp
+ movdqa $ABCD,$ABCD_SAVE # offload $ABCD
+___
+for($i=0;$i<20-4;$i+=2) {
+$code.=<<___;
+ sha1msg1 @MSG[1],@MSG[0]
+ movdqa $ABCD,$E_
+ sha1rnds4 \$`int($i/5)`,$E,$ABCD # 0-3...
+ sha1nexte @MSG[1],$E_
+ pxor @MSG[2],@MSG[0]
+ sha1msg1 @MSG[2],@MSG[1]
+ sha1msg2 @MSG[3],@MSG[0]
+
+ movdqa $ABCD,$E
+ sha1rnds4 \$`int(($i+1)/5)`,$E_,$ABCD
+ sha1nexte @MSG[2],$E
+ pxor @MSG[3],@MSG[1]
+ sha1msg2 @MSG[0],@MSG[1]
+___
+ push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ movdqu ($inp),@MSG[0]
+ movdqa $ABCD,$E_
+ sha1rnds4 \$3,$E,$ABCD # 64-67
+ sha1nexte @MSG[1],$E_
+ movdqu 0x10($inp),@MSG[1]
+ pshufb $BSWAP,@MSG[0]
+
+ movdqa $ABCD,$E
+ sha1rnds4 \$3,$E_,$ABCD # 68-71
+ sha1nexte @MSG[2],$E
+ movdqu 0x20($inp),@MSG[2]
+ pshufb $BSWAP,@MSG[1]
+
+ movdqa $ABCD,$E_
+ sha1rnds4 \$3,$E,$ABCD # 72-75
+ sha1nexte @MSG[3],$E_
+ movdqu 0x30($inp),@MSG[3]
+ pshufb $BSWAP,@MSG[2]
+
+ movdqa $ABCD,$E
+ sha1rnds4 \$3,$E_,$ABCD # 76-79
+ sha1nexte $E_SAVE,$E
+ pshufb $BSWAP,@MSG[3]
+
+ paddd $ABCD_SAVE,$ABCD
+ movdqa $E,$E_SAVE # offload $E
+
+ jnz .Loop_shaext
+
+ pshufd \$0b00011011,$ABCD,$ABCD
+ pshufd \$0b00011011,$E,$E
+ movdqu $ABCD,($ctx)
+ movd $E,16($ctx)
+___
+$code.=<<___ if ($win64);
+ movaps -8-4*16(%rax),%xmm6
+ movaps -8-3*16(%rax),%xmm7
+ movaps -8-2*16(%rax),%xmm8
+ movaps -8-1*16(%rax),%xmm9
+ mov %rax,%rsp
+.Lepilogue_shaext:
+___
+$code.=<<___;
+ ret
+.size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext
+___
+}}}
{{{
my $Xi=4;
my @X=map("%xmm$_",(4..7,0..3));
my @Tx=map("%xmm$_",(8..10));
+my $Kx="%xmm11";
my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
my @T=("%esi","%edi");
my $j=0;
+my $rx=0;
my $K_XX_XX="%r11";
my $_rol=sub { &rol(@_) };
my $_ror=sub { &ror(@_) };
+{ my $sn;
+sub align32() {
+ ++$sn;
+$code.=<<___;
+ jmp .Lalign32_$sn # see "Decoded ICache" in manual
+.align 32
+.Lalign32_$sn:
+___
+}
+}
+
$code.=<<___;
.type sha1_block_data_order_ssse3,\@function,3
.align 16
sha1_block_data_order_ssse3:
_ssse3_shortcut:
+ mov %rsp,%rax
push %rbx
push %rbp
push %r12
- lea `-64-($win64?5*16:0)`(%rsp),%rsp
+ push %r13 # redundant, done to share Win64 SE handler
+ push %r14
+ lea `-64-($win64?6*16:0)`(%rsp),%rsp
___
$code.=<<___ if ($win64);
- movaps %xmm6,64+0(%rsp)
- movaps %xmm7,64+16(%rsp)
- movaps %xmm8,64+32(%rsp)
- movaps %xmm9,64+48(%rsp)
- movaps %xmm10,64+64(%rsp)
+ movaps %xmm6,-40-6*16(%rax)
+ movaps %xmm7,-40-5*16(%rax)
+ movaps %xmm8,-40-4*16(%rax)
+ movaps %xmm9,-40-3*16(%rax)
+ movaps %xmm10,-40-2*16(%rax)
+ movaps %xmm11,-40-1*16(%rax)
.Lprologue_ssse3:
___
$code.=<<___;
+ mov %rax,%r14 # original %rsp
+ and \$-64,%rsp
mov %rdi,$ctx # reassigned argument
mov %rsi,$inp # reassigned argument
mov %rdx,$num # reassigned argument
shl \$6,$num
add $inp,$num
- lea K_XX_XX(%rip),$K_XX_XX
+ lea K_XX_XX+64(%rip),$K_XX_XX
mov 0($ctx),$A # load context
mov 4($ctx),$B
@@ -336,19 +507,22 @@ $code.=<<___;
mov 12($ctx),$D
mov $B,@T[0] # magic seed
mov 16($ctx),$E
+ mov $C,@T[1]
+ xor $D,@T[1]
+ and @T[1],@T[0]
movdqa 64($K_XX_XX),@X[2] # pbswap mask
- movdqa 0($K_XX_XX),@Tx[1] # K_00_19
+ movdqa -64($K_XX_XX),@Tx[1] # K_00_19
movdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
movdqu 16($inp),@X[-3&7]
movdqu 32($inp),@X[-2&7]
movdqu 48($inp),@X[-1&7]
pshufb @X[2],@X[-4&7] # byte swap
- add \$64,$inp
pshufb @X[2],@X[-3&7]
pshufb @X[2],@X[-2&7]
- pshufb @X[2],@X[-1&7]
+ add \$64,$inp
paddd @Tx[1],@X[-4&7] # add K_00_19
+ pshufb @X[2],@X[-1&7]
paddd @Tx[1],@X[-3&7]
paddd @Tx[1],@X[-2&7]
movdqa @X[-4&7],0(%rsp) # X[]+K xfer to IALU
@@ -373,61 +547,61 @@ sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
my ($a,$b,$c,$d,$e);
- &movdqa (@X[0],@X[-3&7]);
- eval(shift(@insns));
+ eval(shift(@insns)); # ror
+ &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
eval(shift(@insns));
&movdqa (@Tx[0],@X[-1&7]);
- &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
+ &paddd (@Tx[1],@X[-1&7]);
eval(shift(@insns));
eval(shift(@insns));
- &paddd (@Tx[1],@X[-1&7]);
+ &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
eval(shift(@insns));
+ eval(shift(@insns)); # rol
eval(shift(@insns));
&psrldq (@Tx[0],4); # "X[-3]", 3 dwords
eval(shift(@insns));
eval(shift(@insns));
+
&pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
eval(shift(@insns));
- eval(shift(@insns));
-
+ eval(shift(@insns)); # ror
&pxor (@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
- eval(shift(@insns));
&pxor (@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
eval(shift(@insns));
- eval(shift(@insns));
+ eval(shift(@insns)); # rol
&movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
eval(shift(@insns));
eval(shift(@insns));
&movdqa (@Tx[2],@X[0]);
- &movdqa (@Tx[0],@X[0]);
- eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns)); # ror
+ &movdqa (@Tx[0],@X[0]);
eval(shift(@insns));
&pslldq (@Tx[2],12); # "X[0]"<<96, extract one dword
&paddd (@X[0],@X[0]);
eval(shift(@insns));
eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
&psrld (@Tx[0],31);
eval(shift(@insns));
+ eval(shift(@insns)); # rol
eval(shift(@insns));
&movdqa (@Tx[1],@Tx[2]);
eval(shift(@insns));
eval(shift(@insns));
&psrld (@Tx[2],30);
- &por (@X[0],@Tx[0]); # "X[0]"<<<=1
eval(shift(@insns));
+ eval(shift(@insns)); # ror
+ &por (@X[0],@Tx[0]); # "X[0]"<<<=1
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
@@ -435,12 +609,13 @@ sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
&pslld (@Tx[1],2);
&pxor (@X[0],@Tx[2]);
eval(shift(@insns));
- eval(shift(@insns));
- &movdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX
+ &movdqa (@Tx[2],eval(2*16*(($Xi)/5)-64)."($K_XX_XX)"); # K_XX_XX
+ eval(shift(@insns)); # rol
eval(shift(@insns));
eval(shift(@insns));
&pxor (@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
+ &pshufd (@Tx[1],@X[-1&7],0xee) if ($Xi==7); # was &movdqa (@Tx[0],@X[-1&7]) in Xupdate_ssse3_32_79
foreach (@insns) { eval; } # remaining instructions [if any]
@@ -451,27 +626,30 @@ sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
sub Xupdate_ssse3_32_79()
{ use integer;
my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
+ my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
my ($a,$b,$c,$d,$e);
- &movdqa (@Tx[0],@X[-1&7]) if ($Xi==8);
- eval(shift(@insns)); # body_20_39
+ eval(shift(@insns)) if ($Xi==8);
&pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
- &palignr(@Tx[0],@X[-2&7],8); # compose "X[-6]"
+ eval(shift(@insns)) if ($Xi==8);
+ eval(shift(@insns)); # body_20_39
eval(shift(@insns));
+ eval(shift(@insns)) if (@insns[1] =~ /_ror/);
+ eval(shift(@insns)) if (@insns[0] =~ /_ror/);
+ &punpcklqdq(@Tx[0],@X[-1&7]); # compose "X[-6]", was &palignr(@Tx[0],@X[-2&7],8);
eval(shift(@insns));
eval(shift(@insns)); # rol
&pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
eval(shift(@insns));
- eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
+ eval(shift(@insns));
if ($Xi%5) {
&movdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
} else { # ... or load next one
- &movdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
+ &movdqa (@Tx[2],eval(2*16*($Xi/5)-64)."($K_XX_XX)");
}
- &paddd (@Tx[1],@X[-1&7]);
eval(shift(@insns)); # ror
+ &paddd (@Tx[1],@X[-1&7]);
eval(shift(@insns));
&pxor (@X[0],@Tx[0]); # "X[0]"^="X[-6]"
@@ -479,29 +657,31 @@ sub Xupdate_ssse3_32_79()
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); # rol
+ eval(shift(@insns)) if (@insns[0] =~ /_ror/);
&movdqa (@Tx[0],@X[0]);
- &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
eval(shift(@insns));
eval(shift(@insns));
+ &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
eval(shift(@insns)); # ror
eval(shift(@insns));
+ eval(shift(@insns)); # body_20_39
&pslld (@X[0],2);
- eval(shift(@insns)); # body_20_39
eval(shift(@insns));
- &psrld (@Tx[0],30);
eval(shift(@insns));
- eval(shift(@insns)); # rol
+ &psrld (@Tx[0],30);
+ eval(shift(@insns)) if (@insns[0] =~ /_rol/);# rol
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); # ror
- eval(shift(@insns));
&por (@X[0],@Tx[0]); # "X[0]"<<<=2
- eval(shift(@insns)); # body_20_39
eval(shift(@insns));
- &movdqa (@Tx[1],@X[0]) if ($Xi<19);
+ eval(shift(@insns)); # body_20_39
+ eval(shift(@insns)) if (@insns[1] =~ /_rol/);
+ eval(shift(@insns)) if (@insns[0] =~ /_rol/);
+ &pshufd(@Tx[1],@X[-1&7],0xee) if ($Xi<19); # was &movdqa (@Tx[1],@X[0])
eval(shift(@insns));
eval(shift(@insns)); # rol
eval(shift(@insns));
@@ -522,10 +702,11 @@ sub Xuplast_ssse3_80()
my ($a,$b,$c,$d,$e);
eval(shift(@insns));
- &paddd (@Tx[1],@X[-1&7]);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
+ &paddd (@Tx[1],@X[-1&7]);
+ eval(shift(@insns));
eval(shift(@insns));
&movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
@@ -538,7 +719,7 @@ sub Xuplast_ssse3_80()
unshift(@Tx,pop(@Tx));
&movdqa (@X[2],"64($K_XX_XX)"); # pbswap mask
- &movdqa (@Tx[1],"0($K_XX_XX)"); # K_00_19
+ &movdqa (@Tx[1],"-64($K_XX_XX)"); # K_00_19
&movdqu (@X[-4&7],"0($inp)"); # load input
&movdqu (@X[-3&7],"16($inp)");
&movdqu (@X[-2&7],"32($inp)");
@@ -557,9 +738,12 @@ sub Xloop_ssse3()
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns));
&pshufb (@X[($Xi-3)&7],@X[2]);
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
&paddd (@X[($Xi-4)&7],@Tx[1]);
eval(shift(@insns));
eval(shift(@insns));
@@ -568,6 +752,8 @@ sub Xloop_ssse3()
&movdqa (eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]); # X[]+K xfer to IALU
eval(shift(@insns));
eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
&psubd (@X[($Xi-4)&7],@Tx[1]);
foreach (@insns) { eval; }
@@ -583,51 +769,66 @@ sub Xtail_ssse3()
foreach (@insns) { eval; }
}
-sub body_00_19 () {
+sub body_00_19 () { # ((c^d)&b)^d
+ # on start @T[0]=(c^d)&b
+ return &body_20_39() if ($rx==19); $rx++;
(
'($a,$b,$c,$d,$e)=@V;'.
- '&add ($e,eval(4*($j&15))."(%rsp)");', # X[]+K xfer
- '&xor ($c,$d);',
- '&mov (@T[1],$a);', # $b in next round
- '&$_rol ($a,5);',
- '&and (@T[0],$c);', # ($b&($c^$d))
- '&xor ($c,$d);', # restore $c
- '&xor (@T[0],$d);',
- '&add ($e,$a);',
- '&$_ror ($b,$j?7:2);', # $b>>>2
- '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ '&$_ror ($b,$j?7:2)', # $b>>>2
+ '&xor (@T[0],$d)',
+ '&mov (@T[1],$a)', # $b for next round
+
+ '&add ($e,eval(4*($j&15))."(%rsp)")', # X[]+K xfer
+ '&xor ($b,$c)', # $c^$d for next round
+
+ '&$_rol ($a,5)',
+ '&add ($e,@T[0])',
+ '&and (@T[1],$b)', # ($b&($c^$d)) for next round
+
+ '&xor ($b,$c)', # restore $b
+ '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
);
}
-sub body_20_39 () {
+sub body_20_39 () { # b^d^c
+ # on entry @T[0]=b^d
+ return &body_40_59() if ($rx==39); $rx++;
(
'($a,$b,$c,$d,$e)=@V;'.
- '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer
- '&xor (@T[0],$d);', # ($b^$d)
- '&mov (@T[1],$a);', # $b in next round
- '&$_rol ($a,5);',
- '&xor (@T[0],$c);', # ($b^$d^$c)
- '&add ($e,$a);',
- '&$_ror ($b,7);', # $b>>>2
- '&add ($e,@T[0]);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ '&add ($e,eval(4*($j&15))."(%rsp)")', # X[]+K xfer
+ '&xor (@T[0],$d) if($j==19);'.
+ '&xor (@T[0],$c) if($j> 19)', # ($b^$d^$c)
+ '&mov (@T[1],$a)', # $b for next round
+
+ '&$_rol ($a,5)',
+ '&add ($e,@T[0])',
+ '&xor (@T[1],$c) if ($j< 79)', # $b^$d for next round
+
+ '&$_ror ($b,7)', # $b>>>2
+ '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
);
}
-sub body_40_59 () {
+sub body_40_59 () { # ((b^c)&(c^d))^c
+ # on entry @T[0]=(b^c), (c^=d)
+ $rx++;
(
'($a,$b,$c,$d,$e)=@V;'.
- '&mov (@T[1],$c);',
- '&xor ($c,$d);',
- '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer
- '&and (@T[1],$d);',
- '&and (@T[0],$c);', # ($b&($c^$d))
- '&$_ror ($b,7);', # $b>>>2
- '&add ($e,@T[1]);',
- '&mov (@T[1],$a);', # $b in next round
- '&$_rol ($a,5);',
- '&add ($e,@T[0]);',
- '&xor ($c,$d);', # restore $c
- '&add ($e,$a);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ '&add ($e,eval(4*($j&15))."(%rsp)")', # X[]+K xfer
+ '&and (@T[0],$c) if ($j>=40)', # (b^c)&(c^d)
+ '&xor ($c,$d) if ($j>=40)', # restore $c
+
+ '&$_ror ($b,7)', # $b>>>2
+ '&mov (@T[1],$a)', # $b for next round
+ '&xor (@T[0],$c)',
+
+ '&$_rol ($a,5)',
+ '&add ($e,@T[0])',
+ '&xor (@T[1],$c) if ($j==59);'.
+ '&xor (@T[1],$b) if ($j< 59)', # b^c for next round
+
+ '&xor ($b,$c) if ($j< 59)', # c^d for next round
+ '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
);
}
$code.=<<___;
@@ -668,8 +869,11 @@ $code.=<<___;
mov @T[0],4($ctx)
mov @T[0],$B # magic seed
mov $C,8($ctx)
+ mov $C,@T[1]
mov $D,12($ctx)
+ xor $D,@T[1]
mov $E,16($ctx)
+ and @T[1],@T[0]
jmp .Loop_ssse3
.align 16
@@ -694,31 +898,34 @@ $code.=<<___;
mov $E,16($ctx)
___
$code.=<<___ if ($win64);
- movaps 64+0(%rsp),%xmm6
- movaps 64+16(%rsp),%xmm7
- movaps 64+32(%rsp),%xmm8
- movaps 64+48(%rsp),%xmm9
- movaps 64+64(%rsp),%xmm10
+ movaps -40-6*16(%r14),%xmm6
+ movaps -40-5*16(%r14),%xmm7
+ movaps -40-4*16(%r14),%xmm8
+ movaps -40-3*16(%r14),%xmm9
+ movaps -40-2*16(%r14),%xmm10
+ movaps -40-1*16(%r14),%xmm11
___
$code.=<<___;
- lea `64+($win64?5*16:0)`(%rsp),%rsi
- mov 0(%rsi),%r12
- mov 8(%rsi),%rbp
- mov 16(%rsi),%rbx
- lea 24(%rsi),%rsp
+ lea (%r14),%rsi
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
.Lepilogue_ssse3:
ret
.size sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3
___
if ($avx) {
-my $Xi=4;
-my @X=map("%xmm$_",(4..7,0..3));
-my @Tx=map("%xmm$_",(8..10));
-my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
-my @T=("%esi","%edi");
-my $j=0;
-my $K_XX_XX="%r11";
+$Xi=4; # reset variables
+@X=map("%xmm$_",(4..7,0..3));
+@Tx=map("%xmm$_",(8..10));
+$j=0;
+$rx=0;
+
+my $done_avx_label=".Ldone_avx";
my $_rol=sub { &shld(@_[0],@_) };
my $_ror=sub { &shrd(@_[0],@_) };
@@ -728,28 +935,34 @@ $code.=<<___;
.align 16
sha1_block_data_order_avx:
_avx_shortcut:
+ mov %rsp,%rax
push %rbx
push %rbp
push %r12
- lea `-64-($win64?5*16:0)`(%rsp),%rsp
+ push %r13 # redundant, done to share Win64 SE handler
+ push %r14
+ lea `-64-($win64?6*16:0)`(%rsp),%rsp
+ vzeroupper
___
$code.=<<___ if ($win64);
- movaps %xmm6,64+0(%rsp)
- movaps %xmm7,64+16(%rsp)
- movaps %xmm8,64+32(%rsp)
- movaps %xmm9,64+48(%rsp)
- movaps %xmm10,64+64(%rsp)
+ vmovaps %xmm6,-40-6*16(%rax)
+ vmovaps %xmm7,-40-5*16(%rax)
+ vmovaps %xmm8,-40-4*16(%rax)
+ vmovaps %xmm9,-40-3*16(%rax)
+ vmovaps %xmm10,-40-2*16(%rax)
+ vmovaps %xmm11,-40-1*16(%rax)
.Lprologue_avx:
___
$code.=<<___;
+ mov %rax,%r14 # original %rsp
+ and \$-64,%rsp
mov %rdi,$ctx # reassigned argument
mov %rsi,$inp # reassigned argument
mov %rdx,$num # reassigned argument
- vzeroupper
shl \$6,$num
add $inp,$num
- lea K_XX_XX(%rip),$K_XX_XX
+ lea K_XX_XX+64(%rip),$K_XX_XX
mov 0($ctx),$A # load context
mov 4($ctx),$B
@@ -757,9 +970,12 @@ $code.=<<___;
mov 12($ctx),$D
mov $B,@T[0] # magic seed
mov 16($ctx),$E
+ mov $C,@T[1]
+ xor $D,@T[1]
+ and @T[1],@T[0]
vmovdqa 64($K_XX_XX),@X[2] # pbswap mask
- vmovdqa 0($K_XX_XX),@Tx[1] # K_00_19
+ vmovdqa -64($K_XX_XX),$Kx # K_00_19
vmovdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
vmovdqu 16($inp),@X[-3&7]
vmovdqu 32($inp),@X[-2&7]
@@ -769,9 +985,9 @@ $code.=<<___;
vpshufb @X[2],@X[-3&7],@X[-3&7]
vpshufb @X[2],@X[-2&7],@X[-2&7]
vpshufb @X[2],@X[-1&7],@X[-1&7]
- vpaddd @Tx[1],@X[-4&7],@X[0] # add K_00_19
- vpaddd @Tx[1],@X[-3&7],@X[1]
- vpaddd @Tx[1],@X[-2&7],@X[2]
+ vpaddd $Kx,@X[-4&7],@X[0] # add K_00_19
+ vpaddd $Kx,@X[-3&7],@X[1]
+ vpaddd $Kx,@X[-2&7],@X[2]
vmovdqa @X[0],0(%rsp) # X[]+K xfer to IALU
vmovdqa @X[1],16(%rsp)
vmovdqa @X[2],32(%rsp)
@@ -790,10 +1006,10 @@ sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
eval(shift(@insns));
eval(shift(@insns));
- &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
+ &vpaddd (@Tx[1],$Kx,@X[-1&7]);
eval(shift(@insns));
eval(shift(@insns));
- &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
+ &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
eval(shift(@insns));
eval(shift(@insns));
&vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
@@ -843,7 +1059,7 @@ sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
&vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2
eval(shift(@insns));
eval(shift(@insns));
- &vmovdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX
+ &vmovdqa ($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)") if ($Xi%5==0); # K_XX_XX
eval(shift(@insns));
eval(shift(@insns));
@@ -851,13 +1067,12 @@ sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
foreach (@insns) { eval; } # remaining instructions [if any]
$Xi++; push(@X,shift(@X)); # "rotate" X[]
- push(@Tx,shift(@Tx));
}
sub Xupdate_avx_32_79()
{ use integer;
my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
+ my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
my ($a,$b,$c,$d,$e);
&vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
@@ -870,12 +1085,8 @@ sub Xupdate_avx_32_79()
&vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
eval(shift(@insns));
eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
- if ($Xi%5) {
- &vmovdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
- } else { # ... or load next one
- &vmovdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
- }
- &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
+ &vpaddd (@Tx[1],$Kx,@X[-1&7]);
+ &vmovdqa ($Kx,eval(2*16*($Xi/5)-64)."($K_XX_XX)") if ($Xi%5==0);
eval(shift(@insns)); # ror
eval(shift(@insns));
@@ -905,7 +1116,6 @@ sub Xupdate_avx_32_79()
&vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=2
eval(shift(@insns)); # body_20_39
eval(shift(@insns));
- &vmovdqa (@Tx[1],@X[0]) if ($Xi<19);
eval(shift(@insns));
eval(shift(@insns)); # rol
eval(shift(@insns));
@@ -916,7 +1126,6 @@ sub Xupdate_avx_32_79()
foreach (@insns) { eval; } # remaining instructions
$Xi++; push(@X,shift(@X)); # "rotate" X[]
- push(@Tx,shift(@Tx));
}
sub Xuplast_avx_80()
@@ -926,23 +1135,21 @@ sub Xuplast_avx_80()
my ($a,$b,$c,$d,$e);
eval(shift(@insns));
- &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
+ &vpaddd (@Tx[1],$Kx,@X[-1&7]);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
- &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
+ &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
foreach (@insns) { eval; } # remaining instructions
&cmp ($inp,$num);
- &je (".Ldone_avx");
-
- unshift(@Tx,pop(@Tx));
+ &je ($done_avx_label);
&vmovdqa(@X[2],"64($K_XX_XX)"); # pbswap mask
- &vmovdqa(@Tx[1],"0($K_XX_XX)"); # K_00_19
+ &vmovdqa($Kx,"-64($K_XX_XX)"); # K_00_19
&vmovdqu(@X[-4&7],"0($inp)"); # load input
&vmovdqu(@X[-3&7],"16($inp)");
&vmovdqu(@X[-2&7],"32($inp)");
@@ -964,7 +1171,7 @@ sub Xloop_avx()
&vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
eval(shift(@insns));
eval(shift(@insns));
- &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@Tx[1]);
+ &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],$Kx);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
@@ -1024,12 +1231,15 @@ $code.=<<___;
mov @T[0],4($ctx)
mov @T[0],$B # magic seed
mov $C,8($ctx)
+ mov $C,@T[1]
mov $D,12($ctx)
+ xor $D,@T[1]
mov $E,16($ctx)
+ and @T[1],@T[0]
jmp .Loop_avx
.align 16
-.Ldone_avx:
+$done_avx_label:
___
$j=$saved_j; @V=@saved_V;
@@ -1052,31 +1262,520 @@ $code.=<<___;
mov $E,16($ctx)
___
$code.=<<___ if ($win64);
- movaps 64+0(%rsp),%xmm6
- movaps 64+16(%rsp),%xmm7
- movaps 64+32(%rsp),%xmm8
- movaps 64+48(%rsp),%xmm9
- movaps 64+64(%rsp),%xmm10
+ movaps -40-6*16(%r14),%xmm6
+ movaps -40-5*16(%r14),%xmm7
+ movaps -40-4*16(%r14),%xmm8
+ movaps -40-3*16(%r14),%xmm9
+ movaps -40-2*16(%r14),%xmm10
+ movaps -40-1*16(%r14),%xmm11
___
$code.=<<___;
- lea `64+($win64?5*16:0)`(%rsp),%rsi
- mov 0(%rsi),%r12
- mov 8(%rsi),%rbp
- mov 16(%rsi),%rbx
- lea 24(%rsi),%rsp
+ lea (%r14),%rsi
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
.Lepilogue_avx:
ret
.size sha1_block_data_order_avx,.-sha1_block_data_order_avx
___
+
+if ($avx>1) {
+use integer;
+$Xi=4; # reset variables
+@X=map("%ymm$_",(4..7,0..3));
+@Tx=map("%ymm$_",(8..10));
+$Kx="%ymm11";
+$j=0;
+
+my @ROTX=("%eax","%ebp","%ebx","%ecx","%edx","%esi");
+my ($a5,$t0)=("%r12d","%edi");
+
+my ($A,$F,$B,$C,$D,$E)=@ROTX;
+my $rx=0;
+my $frame="%r13";
+
+$code.=<<___;
+.type sha1_block_data_order_avx2,\@function,3
+.align 16
+sha1_block_data_order_avx2:
+_avx2_shortcut:
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ lea -6*16(%rsp),%rsp
+ vmovaps %xmm6,-40-6*16(%rax)
+ vmovaps %xmm7,-40-5*16(%rax)
+ vmovaps %xmm8,-40-4*16(%rax)
+ vmovaps %xmm9,-40-3*16(%rax)
+ vmovaps %xmm10,-40-2*16(%rax)
+ vmovaps %xmm11,-40-1*16(%rax)
+.Lprologue_avx2:
+___
+$code.=<<___;
+ mov %rax,%r14 # original %rsp
+ mov %rdi,$ctx # reassigned argument
+ mov %rsi,$inp # reassigned argument
+ mov %rdx,$num # reassigned argument
+
+ lea -640(%rsp),%rsp
+ shl \$6,$num
+ lea 64($inp),$frame
+ and \$-128,%rsp
+ add $inp,$num
+ lea K_XX_XX+64(%rip),$K_XX_XX
+
+ mov 0($ctx),$A # load context
+ cmp $num,$frame
+ cmovae $inp,$frame # next or same block
+ mov 4($ctx),$F
+ mov 8($ctx),$C
+ mov 12($ctx),$D
+ mov 16($ctx),$E
+ vmovdqu 64($K_XX_XX),@X[2] # pbswap mask
+
+ vmovdqu ($inp),%xmm0
+ vmovdqu 16($inp),%xmm1
+ vmovdqu 32($inp),%xmm2
+ vmovdqu 48($inp),%xmm3
+ lea 64($inp),$inp
+ vinserti128 \$1,($frame),@X[-4&7],@X[-4&7]
+ vinserti128 \$1,16($frame),@X[-3&7],@X[-3&7]
+ vpshufb @X[2],@X[-4&7],@X[-4&7]
+ vinserti128 \$1,32($frame),@X[-2&7],@X[-2&7]
+ vpshufb @X[2],@X[-3&7],@X[-3&7]
+ vinserti128 \$1,48($frame),@X[-1&7],@X[-1&7]
+ vpshufb @X[2],@X[-2&7],@X[-2&7]
+ vmovdqu -64($K_XX_XX),$Kx # K_00_19
+ vpshufb @X[2],@X[-1&7],@X[-1&7]
+
+ vpaddd $Kx,@X[-4&7],@X[0] # add K_00_19
+ vpaddd $Kx,@X[-3&7],@X[1]
+ vmovdqu @X[0],0(%rsp) # X[]+K xfer to IALU
+ vpaddd $Kx,@X[-2&7],@X[2]
+ vmovdqu @X[1],32(%rsp)
+ vpaddd $Kx,@X[-1&7],@X[3]
+ vmovdqu @X[2],64(%rsp)
+ vmovdqu @X[3],96(%rsp)
+___
+for (;$Xi<8;$Xi++) { # Xupdate_avx2_16_31
+ use integer;
+
+ &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
+ &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
+ &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
+ &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
+ &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
+ &vpsrld (@Tx[0],@X[0],31);
+ &vmovdqu($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)") if ($Xi%5==0); # K_XX_XX
+ &vpslldq(@Tx[2],@X[0],12); # "X[0]"<<96, extract one dword
+ &vpaddd (@X[0],@X[0],@X[0]);
+ &vpsrld (@Tx[1],@Tx[2],30);
+ &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1
+ &vpslld (@Tx[2],@Tx[2],2);
+ &vpxor (@X[0],@X[0],@Tx[1]);
+ &vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2
+ &vpaddd (@Tx[1],@X[0],$Kx);
+ &vmovdqu("32*$Xi(%rsp)",@Tx[1]); # X[]+K xfer to IALU
+
+ push(@X,shift(@X)); # "rotate" X[]
+}
+$code.=<<___;
+ lea 128(%rsp),$frame
+ jmp .Loop_avx2
+.align 32
+.Loop_avx2:
+ rorx \$2,$F,$B
+ andn $D,$F,$t0
+ and $C,$F
+ xor $t0,$F
+___
+sub bodyx_00_19 () { # 8 instructions, 3 cycles critical path
+ # at start $f=(b&c)^(~b&d), $b>>>=2
+ return &bodyx_20_39() if ($rx==19); $rx++;
+ (
+ '($a,$f,$b,$c,$d,$e)=@ROTX;'.
+
+ '&add ($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'. # e+=X[i]+K
+ '&lea ($frame,"256($frame)") if ($j%32==31);',
+ '&andn ($t0,$a,$c)', # ~b&d for next round
+
+ '&add ($e,$f)', # e+=(b&c)^(~b&d)
+ '&rorx ($a5,$a,27)', # a<<<5
+ '&rorx ($f,$a,2)', # b>>>2 for next round
+ '&and ($a,$b)', # b&c for next round
+
+ '&add ($e,$a5)', # e+=a<<<5
+ '&xor ($a,$t0);'. # f=(b&c)^(~b&d) for next round
+
+ 'unshift(@ROTX,pop(@ROTX)); $j++;'
+ )
+}
+
+sub bodyx_20_39 () { # 7 instructions, 2 cycles critical path
+ # on entry $f=b^c^d, $b>>>=2
+ return &bodyx_40_59() if ($rx==39); $rx++;
+ (
+ '($a,$f,$b,$c,$d,$e)=@ROTX;'.
+
+ '&add ($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'. # e+=X[i]+K
+ '&lea ($frame,"256($frame)") if ($j%32==31);',
+
+ '&lea ($e,"($e,$f)")', # e+=b^c^d
+ '&rorx ($a5,$a,27)', # a<<<5
+ '&rorx ($f,$a,2) if ($j<79)', # b>>>2 in next round
+ '&xor ($a,$b) if ($j<79)', # b^c for next round
+
+ '&add ($e,$a5)', # e+=a<<<5
+ '&xor ($a,$c) if ($j<79);'. # f=b^c^d for next round
+
+ 'unshift(@ROTX,pop(@ROTX)); $j++;'
+ )
+}
+
+sub bodyx_40_59 () { # 10 instructions, 3 cycles critical path
+ # on entry $f=((b^c)&(c^d)), $b>>>=2
+ $rx++;
+ (
+ '($a,$f,$b,$c,$d,$e)=@ROTX;'.
+
+ '&add ($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'. # e+=X[i]+K
+ '&lea ($frame,"256($frame)") if ($j%32==31);',
+ '&xor ($f,$c) if ($j>39)', # (b^c)&(c^d)^c
+ '&mov ($t0,$b) if ($j<59)', # count on zero latency
+ '&xor ($t0,$c) if ($j<59)', # c^d for next round
+
+ '&lea ($e,"($e,$f)")', # e+=(b^c)&(c^d)^c
+ '&rorx ($a5,$a,27)', # a<<<5
+ '&rorx ($f,$a,2)', # b>>>2 in next round
+ '&xor ($a,$b)', # b^c for next round
+
+ '&add ($e,$a5)', # e+=a<<<5
+ '&and ($a,$t0) if ($j< 59);'. # f=(b^c)&(c^d) for next round
+ '&xor ($a,$c) if ($j==59);'. # f=b^c^d for next round
+
+ 'unshift(@ROTX,pop(@ROTX)); $j++;'
+ )
+}
+
+sub Xupdate_avx2_16_31() # recall that $Xi starts wtih 4
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body,&$body); # 35 instructions
+ my ($a,$b,$c,$d,$e);
+
+ &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
+ &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpsrld (@Tx[0],@X[0],31);
+ &vmovdqu($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)") if ($Xi%5==0); # K_XX_XX
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpslldq(@Tx[2],@X[0],12); # "X[0]"<<96, extract one dword
+ &vpaddd (@X[0],@X[0],@X[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpsrld (@Tx[1],@Tx[2],30);
+ &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpslld (@Tx[2],@Tx[2],2);
+ &vpxor (@X[0],@X[0],@Tx[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpaddd (@Tx[1],@X[0],$Kx);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vmovdqu(eval(32*($Xi))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
+
+ foreach (@insns) { eval; } # remaining instructions [if any]
+
+ $Xi++;
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xupdate_avx2_32_79()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body,&$body); # 35 to 50 instructions
+ my ($a,$b,$c,$d,$e);
+
+ &vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
+ &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
+ &vmovdqu($Kx,eval(2*16*($Xi/5)-64)."($K_XX_XX)") if ($Xi%5==0);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-6]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpsrld (@Tx[0],@X[0],30);
+ &vpslld (@X[0],@X[0],2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ #&vpslld (@X[0],@X[0],2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=2
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpaddd (@Tx[1],@X[0],$Kx);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vmovdqu("32*$Xi(%rsp)",@Tx[1]); # X[]+K xfer to IALU
+
+ foreach (@insns) { eval; } # remaining instructions
+
+ $Xi++;
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xloop_avx2()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body,&$body); # 32 instructions
+ my ($a,$b,$c,$d,$e);
+
+ foreach (@insns) { eval; }
+}
+
+ &align32();
+ &Xupdate_avx2_32_79(\&bodyx_00_19);
+ &Xupdate_avx2_32_79(\&bodyx_00_19);
+ &Xupdate_avx2_32_79(\&bodyx_00_19);
+ &Xupdate_avx2_32_79(\&bodyx_00_19);
+
+ &Xupdate_avx2_32_79(\&bodyx_20_39);
+ &Xupdate_avx2_32_79(\&bodyx_20_39);
+ &Xupdate_avx2_32_79(\&bodyx_20_39);
+ &Xupdate_avx2_32_79(\&bodyx_20_39);
+
+ &align32();
+ &Xupdate_avx2_32_79(\&bodyx_40_59);
+ &Xupdate_avx2_32_79(\&bodyx_40_59);
+ &Xupdate_avx2_32_79(\&bodyx_40_59);
+ &Xupdate_avx2_32_79(\&bodyx_40_59);
+
+ &Xloop_avx2(\&bodyx_20_39);
+ &Xloop_avx2(\&bodyx_20_39);
+ &Xloop_avx2(\&bodyx_20_39);
+ &Xloop_avx2(\&bodyx_20_39);
+
+$code.=<<___;
+ lea 128($inp),$frame
+ lea 128($inp),%rdi # borrow $t0
+ cmp $num,$frame
+ cmovae $inp,$frame # next or previous block
+
+ # output is d-e-[a]-f-b-c => A=d,F=e,C=f,D=b,E=c
+ add 0($ctx),@ROTX[0] # update context
+ add 4($ctx),@ROTX[1]
+ add 8($ctx),@ROTX[3]
+ mov @ROTX[0],0($ctx)
+ add 12($ctx),@ROTX[4]
+ mov @ROTX[1],4($ctx)
+ mov @ROTX[0],$A # A=d
+ add 16($ctx),@ROTX[5]
+ mov @ROTX[3],$a5
+ mov @ROTX[3],8($ctx)
+ mov @ROTX[4],$D # D=b
+ #xchg @ROTX[5],$F # F=c, C=f
+ mov @ROTX[4],12($ctx)
+ mov @ROTX[1],$F # F=e
+ mov @ROTX[5],16($ctx)
+ #mov $F,16($ctx)
+ mov @ROTX[5],$E # E=c
+ mov $a5,$C # C=f
+ #xchg $F,$E # E=c, F=e
+
+ cmp $num,$inp
+ je .Ldone_avx2
+___
+
+$Xi=4; # reset variables
+@X=map("%ymm$_",(4..7,0..3));
+
+$code.=<<___;
+ vmovdqu 64($K_XX_XX),@X[2] # pbswap mask
+ cmp $num,%rdi # borrowed $t0
+ ja .Last_avx2
+
+ vmovdqu -64(%rdi),%xmm0 # low part of @X[-4&7]
+ vmovdqu -48(%rdi),%xmm1
+ vmovdqu -32(%rdi),%xmm2
+ vmovdqu -16(%rdi),%xmm3
+ vinserti128 \$1,0($frame),@X[-4&7],@X[-4&7]
+ vinserti128 \$1,16($frame),@X[-3&7],@X[-3&7]
+ vinserti128 \$1,32($frame),@X[-2&7],@X[-2&7]
+ vinserti128 \$1,48($frame),@X[-1&7],@X[-1&7]
+ jmp .Last_avx2
+
+.align 32
+.Last_avx2:
+ lea 128+16(%rsp),$frame
+ rorx \$2,$F,$B
+ andn $D,$F,$t0
+ and $C,$F
+ xor $t0,$F
+ sub \$-128,$inp
+___
+ $rx=$j=0; @ROTX=($A,$F,$B,$C,$D,$E);
+
+ &Xloop_avx2 (\&bodyx_00_19);
+ &Xloop_avx2 (\&bodyx_00_19);
+ &Xloop_avx2 (\&bodyx_00_19);
+ &Xloop_avx2 (\&bodyx_00_19);
+
+ &Xloop_avx2 (\&bodyx_20_39);
+ &vmovdqu ($Kx,"-64($K_XX_XX)"); # K_00_19
+ &vpshufb (@X[-4&7],@X[-4&7],@X[2]); # byte swap
+ &Xloop_avx2 (\&bodyx_20_39);
+ &vpshufb (@X[-3&7],@X[-3&7],@X[2]);
+ &vpaddd (@Tx[0],@X[-4&7],$Kx); # add K_00_19
+ &Xloop_avx2 (\&bodyx_20_39);
+ &vmovdqu ("0(%rsp)",@Tx[0]);
+ &vpshufb (@X[-2&7],@X[-2&7],@X[2]);
+ &vpaddd (@Tx[1],@X[-3&7],$Kx);
+ &Xloop_avx2 (\&bodyx_20_39);
+ &vmovdqu ("32(%rsp)",@Tx[1]);
+ &vpshufb (@X[-1&7],@X[-1&7],@X[2]);
+ &vpaddd (@X[2],@X[-2&7],$Kx);
+
+ &Xloop_avx2 (\&bodyx_40_59);
+ &align32 ();
+ &vmovdqu ("64(%rsp)",@X[2]);
+ &vpaddd (@X[3],@X[-1&7],$Kx);
+ &Xloop_avx2 (\&bodyx_40_59);
+ &vmovdqu ("96(%rsp)",@X[3]);
+ &Xloop_avx2 (\&bodyx_40_59);
+ &Xupdate_avx2_16_31(\&bodyx_40_59);
+
+ &Xupdate_avx2_16_31(\&bodyx_20_39);
+ &Xupdate_avx2_16_31(\&bodyx_20_39);
+ &Xupdate_avx2_16_31(\&bodyx_20_39);
+ &Xloop_avx2 (\&bodyx_20_39);
+
+$code.=<<___;
+ lea 128(%rsp),$frame
+
+ # output is d-e-[a]-f-b-c => A=d,F=e,C=f,D=b,E=c
+ add 0($ctx),@ROTX[0] # update context
+ add 4($ctx),@ROTX[1]
+ add 8($ctx),@ROTX[3]
+ mov @ROTX[0],0($ctx)
+ add 12($ctx),@ROTX[4]
+ mov @ROTX[1],4($ctx)
+ mov @ROTX[0],$A # A=d
+ add 16($ctx),@ROTX[5]
+ mov @ROTX[3],$a5
+ mov @ROTX[3],8($ctx)
+ mov @ROTX[4],$D # D=b
+ #xchg @ROTX[5],$F # F=c, C=f
+ mov @ROTX[4],12($ctx)
+ mov @ROTX[1],$F # F=e
+ mov @ROTX[5],16($ctx)
+ #mov $F,16($ctx)
+ mov @ROTX[5],$E # E=c
+ mov $a5,$C # C=f
+ #xchg $F,$E # E=c, F=e
+
+ cmp $num,$inp
+ jbe .Loop_avx2
+
+.Ldone_avx2:
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps -40-6*16(%r14),%xmm6
+ movaps -40-5*16(%r14),%xmm7
+ movaps -40-4*16(%r14),%xmm8
+ movaps -40-3*16(%r14),%xmm9
+ movaps -40-2*16(%r14),%xmm10
+ movaps -40-1*16(%r14),%xmm11
+___
+$code.=<<___;
+ lea (%r14),%rsi
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lepilogue_avx2:
+ ret
+.size sha1_block_data_order_avx2,.-sha1_block_data_order_avx2
+___
+}
}
$code.=<<___;
.align 64
K_XX_XX:
.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
+.byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
___
}}}
$code.=<<___;
@@ -1122,20 +1821,58 @@ se_handler:
jae .Lcommon_seh_tail
mov `16*4`(%rax),%rax # pull saved stack pointer
- lea 32(%rax),%rax
mov -8(%rax),%rbx
mov -16(%rax),%rbp
mov -24(%rax),%r12
mov -32(%rax),%r13
+ mov -40(%rax),%r14
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
mov %r12,216($context) # restore context->R12
mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
jmp .Lcommon_seh_tail
.size se_handler,.-se_handler
+___
+
+$code.=<<___ if ($shaext);
+.type shaext_handler,\@abi-omnipotent
+.align 16
+shaext_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lprologue_shaext(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lprologue
+ jb .Lcommon_seh_tail
+
+ lea .Lepilogue_shaext(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lepilogue
+ jae .Lcommon_seh_tail
+
+ lea -8-4*16(%rax),%rsi
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$8,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+ jmp .Lcommon_seh_tail
+.size shaext_handler,.-shaext_handler
+___
+
+$code.=<<___;
.type ssse3_handler,\@abi-omnipotent
.align 16
ssse3_handler:
@@ -1168,18 +1905,23 @@ ssse3_handler:
cmp %r10,%rbx # context->Rip>=epilogue label
jae .Lcommon_seh_tail
- lea 64(%rax),%rsi
+ mov 232($context),%rax # pull context->R14
+
+ lea -40-6*16(%rax),%rsi
lea 512($context),%rdi # &context.Xmm6
- mov \$10,%ecx
+ mov \$12,%ecx
.long 0xa548f3fc # cld; rep movsq
- lea `24+64+5*16`(%rax),%rax # adjust stack pointer
mov -8(%rax),%rbx
mov -16(%rax),%rbp
mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
mov %r12,216($context) # restore cotnext->R12
+ mov %r13,224($context) # restore cotnext->R13
+ mov %r14,232($context) # restore cotnext->R14
.Lcommon_seh_tail:
mov 8(%rax),%rdi
@@ -1226,6 +1968,13 @@ ssse3_handler:
.rva .LSEH_begin_sha1_block_data_order
.rva .LSEH_end_sha1_block_data_order
.rva .LSEH_info_sha1_block_data_order
+___
+$code.=<<___ if ($shaext);
+ .rva .LSEH_begin_sha1_block_data_order_shaext
+ .rva .LSEH_end_sha1_block_data_order_shaext
+ .rva .LSEH_info_sha1_block_data_order_shaext
+___
+$code.=<<___;
.rva .LSEH_begin_sha1_block_data_order_ssse3
.rva .LSEH_end_sha1_block_data_order_ssse3
.rva .LSEH_info_sha1_block_data_order_ssse3
@@ -1235,12 +1984,24 @@ $code.=<<___ if ($avx);
.rva .LSEH_end_sha1_block_data_order_avx
.rva .LSEH_info_sha1_block_data_order_avx
___
+$code.=<<___ if ($avx>1);
+ .rva .LSEH_begin_sha1_block_data_order_avx2
+ .rva .LSEH_end_sha1_block_data_order_avx2
+ .rva .LSEH_info_sha1_block_data_order_avx2
+___
$code.=<<___;
.section .xdata
.align 8
.LSEH_info_sha1_block_data_order:
.byte 9,0,0,0
.rva se_handler
+___
+$code.=<<___ if ($shaext);
+.LSEH_info_sha1_block_data_order_shaext:
+ .byte 9,0,0,0
+ .rva shaext_handler
+___
+$code.=<<___;
.LSEH_info_sha1_block_data_order_ssse3:
.byte 9,0,0,0
.rva ssse3_handler
@@ -1252,10 +2013,55 @@ $code.=<<___ if ($avx);
.rva ssse3_handler
.rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
___
+$code.=<<___ if ($avx>1);
+.LSEH_info_sha1_block_data_order_avx2:
+ .byte 9,0,0,0
+ .rva ssse3_handler
+ .rva .Lprologue_avx2,.Lepilogue_avx2 # HandlerData[]
+___
}
####################################################################
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
+sub sha1rnds4 {
+ if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-7]),\s*%xmm([0-7])/) {
+ my @opcode=(0x0f,0x3a,0xcc);
+ push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
+ my $c=$1;
+ push @opcode,$c=~/^0/?oct($c):$c;
+ return ".byte\t".join(',',@opcode);
+ } else {
+ return "sha1rnds4\t".@_[0];
+ }
+}
+
+sub sha1op38 {
+ my $instr = shift;
+ my %opcodelet = (
+ "sha1nexte" => 0xc8,
+ "sha1msg1" => 0xc9,
+ "sha1msg2" => 0xca );
+
+ if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+ my @opcode=(0x0f,0x38);
+ my $rex=0;
+ $rex|=0x04 if ($2>=8);
+ $rex|=0x01 if ($1>=8);
+ unshift @opcode,0x40|$rex if ($rex);
+ push @opcode,$opcodelet{$instr};
+ push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
+ return ".byte\t".join(',',@opcode);
+ } else {
+ return $instr."\t".@_[0];
+ }
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo or
+ s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo;
+
+ print $_,"\n";
+}
close STDOUT;
diff --git a/openssl/crypto/sha/asm/sha256-586.pl b/openssl/crypto/sha/asm/sha256-586.pl
index 928ec5312..6462e45ba 100644
--- a/openssl/crypto/sha/asm/sha256-586.pl
+++ b/openssl/crypto/sha/asm/sha256-586.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
#
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -9,20 +9,55 @@
#
# SHA256 block transform for x86. September 2007.
#
-# Performance in clock cycles per processed byte (less is better):
+# Performance improvement over compiler generated code varies from
+# 10% to 40% [see below]. Not very impressive on some µ-archs, but
+# it's 5 times smaller and optimizies amount of writes.
#
-# Pentium PIII P4 AMD K8 Core2
-# gcc 46 36 41 27 26
-# icc 57 33 38 25 23
-# x86 asm 40 30 33 20 18
-# x86_64 asm(*) - - 21 16 16
+# May 2012.
#
-# (*) x86_64 assembler performance is presented for reference
-# purposes.
+# Optimization including two of Pavel Semjanov's ideas, alternative
+# Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
+# ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
+# 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
+# on P4, where it kills performance, nor Sandy Bridge, where folded
+# loop is approximately as fast...
#
-# Performance improvement over compiler generated code varies from
-# 10% to 40% [see above]. Not very impressive on some µ-archs, but
-# it's 5 times smaller and optimizies amount of writes.
+# June 2012.
+#
+# Add AMD XOP-specific code path, >30% improvement on Bulldozer over
+# May version, >60% over original. Add AVX+shrd code path, >25%
+# improvement on Sandy Bridge over May version, 60% over original.
+#
+# May 2013.
+#
+# Replace AMD XOP code path with SSSE3 to cover more processors.
+# (Biggest improvement coefficient is on upcoming Atom Silvermont,
+# not shown.) Add AVX+BMI code path.
+#
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+#
+# Performance in clock cycles per processed byte (less is better):
+#
+# gcc icc x86 asm(*) SIMD x86_64 asm(**)
+# Pentium 46 57 40/38 - -
+# PIII 36 33 27/24 - -
+# P4 41 38 28 - 17.3
+# AMD K8 27 25 19/15.5 - 14.9
+# Core2 26 23 18/15.6 14.3 13.8
+# Westmere 27 - 19/15.7 13.4 12.3
+# Sandy Bridge 25 - 15.9 12.4 11.6
+# Ivy Bridge 24 - 15.0 11.4 10.3
+# Haswell 22 - 13.9 9.46 7.80
+# Bulldozer 36 - 27/22 17.0 13.6
+# VIA Nano 36 - 25/22 16.8 16.5
+# Atom 50 - 30/25 21.9 18.9
+# Silvermont 40 - 34/31 22.9 20.6
+#
+# (*) numbers after slash are for unrolled loop, where applicable;
+# (**) x86_64 assembly performance is presented for reference
+# purposes, results are best-available;
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
push(@INC,"${dir}","${dir}../../perlasm");
@@ -30,72 +65,122 @@ require "x86asm.pl";
&asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
+$xmm=$avx=0;
+for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+if ($xmm && `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.03) + ($1>=2.10);
+}
+
+if ($xmm && !$avx && $ARGV[0] eq "win32" &&
+ `ml 2>&1` =~ /Version ([0-9]+)\./) {
+ $avx = ($1>=10) + ($1>=11);
+}
+
+if ($xmm && !$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+ $avx = ($2>=3.0) + ($2>3.0);
+}
+
+$shaext=$xmm; ### set to zero if compiling for 1.0.1
+
+$unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
+ # fully unrolled loop was measured to run about
+ # 3-4x slower. If slowdown coefficient is N and
+ # unrolled loop is m times faster, then you break
+ # even at (N-1)/(m-1) blocks. Then it needs to be
+ # adjusted for probability of code being evicted,
+ # code size/cache size=1/4. Typical m is 1.15...
+
$A="eax";
$E="edx";
$T="ebx";
-$Aoff=&DWP(0,"esp");
-$Boff=&DWP(4,"esp");
-$Coff=&DWP(8,"esp");
-$Doff=&DWP(12,"esp");
-$Eoff=&DWP(16,"esp");
-$Foff=&DWP(20,"esp");
-$Goff=&DWP(24,"esp");
-$Hoff=&DWP(28,"esp");
-$Xoff=&DWP(32,"esp");
+$Aoff=&DWP(4,"esp");
+$Boff=&DWP(8,"esp");
+$Coff=&DWP(12,"esp");
+$Doff=&DWP(16,"esp");
+$Eoff=&DWP(20,"esp");
+$Foff=&DWP(24,"esp");
+$Goff=&DWP(28,"esp");
+$Hoff=&DWP(32,"esp");
+$Xoff=&DWP(36,"esp");
$K256="ebp";
+sub BODY_16_63() {
+ &mov ($T,"ecx"); # "ecx" is preloaded
+ &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
+ &ror ("ecx",18-7);
+ &mov ("edi","esi");
+ &ror ("esi",19-17);
+ &xor ("ecx",$T);
+ &shr ($T,3);
+ &ror ("ecx",7);
+ &xor ("esi","edi");
+ &xor ($T,"ecx"); # T = sigma0(X[-15])
+ &ror ("esi",17);
+ &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
+ &shr ("edi",10);
+ &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
+ #&xor ("edi","esi") # sigma1(X[-2])
+ # &add ($T,"edi"); # T += sigma1(X[-2])
+ # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
+
+ &BODY_00_15(1);
+}
sub BODY_00_15() {
my $in_16_63=shift;
&mov ("ecx",$E);
- &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
- &ror ("ecx",25-11);
+ &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
&mov ("esi",$Foff);
+ &ror ("ecx",25-11);
+ &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
+ &mov ("edi",$Goff);
&xor ("ecx",$E);
+ &xor ("esi","edi");
+ &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
+ &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
&ror ("ecx",11-6);
- &mov (&DWP(4*(8+15),"esp"),$T) if ($in_16_63); # save X[0]
- &xor ("ecx",$E);
- &ror ("ecx",6); # Sigma1(e)
- &mov ("edi",$Goff);
- &add ($T,"ecx"); # T += Sigma1(e)
-
- &xor ("esi","edi");
- &mov ($Eoff,$E); # modulo-scheduled
+ &and ("esi",$E);
+ &mov ($Eoff,$E); # modulo-scheduled
+ &xor ($E,"ecx");
+ &add ($T,$Hoff); # T += h
+ &xor ("esi","edi"); # Ch(e,f,g)
+ &ror ($E,6); # Sigma1(e)
&mov ("ecx",$A);
- &and ("esi",$E);
- &mov ($E,$Doff); # e becomes d, which is e in next iteration
- &xor ("esi","edi"); # Ch(e,f,g)
- &mov ("edi",$A);
- &add ($T,"esi"); # T += Ch(e,f,g)
+ &add ($T,"esi"); # T += Ch(e,f,g)
&ror ("ecx",22-13);
- &add ($T,$Hoff); # T += h
+ &add ($T,$E); # T += Sigma1(e)
+ &mov ("edi",$Boff);
&xor ("ecx",$A);
+ &mov ($Aoff,$A); # modulo-scheduled
+ &lea ("esp",&DWP(-4,"esp"));
&ror ("ecx",13-2);
- &mov ("esi",$Boff);
- &xor ("ecx",$A);
- &ror ("ecx",2); # Sigma0(a)
- &add ($E,$T); # d += T
- &mov ("edi",$Coff);
-
- &add ($T,"ecx"); # T += Sigma0(a)
- &mov ($Aoff,$A); # modulo-scheduled
-
- &mov ("ecx",$A);
- &sub ("esp",4);
- &or ($A,"esi"); # a becomes h, which is a in next iteration
- &and ("ecx","esi");
- &and ($A,"edi");
&mov ("esi",&DWP(0,$K256));
- &or ($A,"ecx"); # h=Maj(a,b,c)
+ &xor ("ecx",$A);
+ &mov ($E,$Eoff); # e in next iteration, d in this one
+ &xor ($A,"edi"); # a ^= b
+ &ror ("ecx",2); # Sigma0(a)
+ &add ($T,"esi"); # T+= K[i]
+ &mov (&DWP(0,"esp"),$A); # (b^c) in next round
+ &add ($E,$T); # d += T
+ &and ($A,&DWP(4,"esp")); # a &= (b^c)
+ &add ($T,"ecx"); # T += Sigma0(a)
+ &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
+ &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
&add ($K256,4);
- &add ($A,$T); # h += T
- &mov ($T,&DWP(4*(8+15+16-1),"esp")) if ($in_16_63); # preload T
- &add ($E,"esi"); # d += K256[i]
- &add ($A,"esi"); # h += K256[i]
+ &add ($A,$T); # h += T
}
+&external_label("OPENSSL_ia32cap_P") if (!$i386);
+
&function_begin("sha256_block_data_order");
&mov ("esi",wparam(0)); # ctx
&mov ("edi",wparam(1)); # inp
@@ -116,26 +201,59 @@ sub BODY_00_15() {
&mov (&DWP(4,"esp"),"edi"); # inp
&mov (&DWP(8,"esp"),"eax"); # inp+num*128
&mov (&DWP(12,"esp"),"ebx"); # saved sp
+ if (!$i386 && $xmm) {
+ &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
+ &mov ("ecx",&DWP(0,"edx"));
+ &mov ("ebx",&DWP(4,"edx"));
+ &test ("ecx",1<<20); # check for P4
+ &jnz (&label("loop"));
+ &mov ("edx",&DWP(8,"edx")) if ($xmm);
+ &test ("ecx",1<<24); # check for FXSR
+ &jz ($unroll_after?&label("no_xmm"):&label("loop"));
+ &and ("ecx",1<<30); # mask "Intel CPU" bit
+ &and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
+ &test ("edx",1<<29) if ($shaext); # check for SHA
+ &jnz (&label("shaext")) if ($shaext);
+ &or ("ecx","ebx");
+ &and ("ecx",1<<28|1<<30);
+ &cmp ("ecx",1<<28|1<<30);
+ if ($xmm) {
+ &je (&label("AVX")) if ($avx);
+ &test ("ebx",1<<9); # check for SSSE3
+ &jnz (&label("SSSE3"));
+ } else {
+ &je (&label("loop_shrd"));
+ }
+ if ($unroll_after) {
+&set_label("no_xmm");
+ &sub ("eax","edi");
+ &cmp ("eax",$unroll_after);
+ &jae (&label("unrolled"));
+ } }
+ &jmp (&label("loop"));
+
+sub COMPACT_LOOP() {
+my $suffix=shift;
-&set_label("loop",16);
+&set_label("loop$suffix",$suffix?32:16);
# copy input block to stack reversing byte and dword order
for($i=0;$i<4;$i++) {
&mov ("eax",&DWP($i*16+0,"edi"));
&mov ("ebx",&DWP($i*16+4,"edi"));
&mov ("ecx",&DWP($i*16+8,"edi"));
- &mov ("edx",&DWP($i*16+12,"edi"));
&bswap ("eax");
+ &mov ("edx",&DWP($i*16+12,"edi"));
&bswap ("ebx");
- &bswap ("ecx");
- &bswap ("edx");
&push ("eax");
+ &bswap ("ecx");
&push ("ebx");
+ &bswap ("edx");
&push ("ecx");
&push ("edx");
}
&add ("edi",64);
- &sub ("esp",4*8); # place for A,B,C,D,E,F,G,H
- &mov (&DWP(4*(8+16)+4,"esp"),"edi");
+ &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
+ &mov (&DWP(4*(9+16)+4,"esp"),"edi");
# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
&mov ($A,&DWP(0,"esi"));
@@ -144,8 +262,10 @@ sub BODY_00_15() {
&mov ("edi",&DWP(12,"esi"));
# &mov ($Aoff,$A);
&mov ($Boff,"ebx");
+ &xor ("ebx","ecx");
&mov ($Coff,"ecx");
&mov ($Doff,"edi");
+ &mov (&DWP(0,"esp"),"ebx"); # magic
&mov ($E,&DWP(16,"esi"));
&mov ("ebx",&DWP(20,"esi"));
&mov ("ecx",&DWP(24,"esi"));
@@ -155,59 +275,41 @@ sub BODY_00_15() {
&mov ($Goff,"ecx");
&mov ($Hoff,"edi");
-&set_label("00_15",16);
- &mov ($T,&DWP(4*(8+15),"esp"));
+&set_label("00_15$suffix",16);
&BODY_00_15();
&cmp ("esi",0xc19bf174);
- &jne (&label("00_15"));
-
- &mov ($T,&DWP(4*(8+15+16-1),"esp")); # preloaded in BODY_00_15(1)
-&set_label("16_63",16);
- &mov ("esi",$T);
- &mov ("ecx",&DWP(4*(8+15+16-14),"esp"));
- &ror ("esi",18-7);
- &mov ("edi","ecx");
- &xor ("esi",$T);
- &ror ("esi",7);
- &shr ($T,3);
-
- &ror ("edi",19-17);
- &xor ($T,"esi"); # T = sigma0(X[-15])
- &xor ("edi","ecx");
- &ror ("edi",17);
- &shr ("ecx",10);
- &add ($T,&DWP(4*(8+15+16),"esp")); # T += X[-16]
- &xor ("edi","ecx"); # sigma1(X[-2])
-
- &add ($T,&DWP(4*(8+15+16-9),"esp")); # T += X[-7]
- # &add ($T,"edi"); # T += sigma1(X[-2])
- # &mov (&DWP(4*(8+15),"esp"),$T); # save X[0]
+ &jne (&label("00_15$suffix"));
- &BODY_00_15(1);
+ &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
+ &jmp (&label("16_63$suffix"));
+
+&set_label("16_63$suffix",16);
+
+ &BODY_16_63();
&cmp ("esi",0xc67178f2);
- &jne (&label("16_63"));
+ &jne (&label("16_63$suffix"));
- &mov ("esi",&DWP(4*(8+16+64)+0,"esp"));#ctx
+ &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
# &mov ($A,$Aoff);
&mov ("ebx",$Boff);
- &mov ("ecx",$Coff);
- &mov ("edi",$Doff);
+ # &mov ("edi",$Coff);
+ &mov ("ecx",$Doff);
&add ($A,&DWP(0,"esi"));
&add ("ebx",&DWP(4,"esi"));
- &add ("ecx",&DWP(8,"esi"));
- &add ("edi",&DWP(12,"esi"));
+ &add ("edi",&DWP(8,"esi"));
+ &add ("ecx",&DWP(12,"esi"));
&mov (&DWP(0,"esi"),$A);
&mov (&DWP(4,"esi"),"ebx");
- &mov (&DWP(8,"esi"),"ecx");
- &mov (&DWP(12,"esi"),"edi");
+ &mov (&DWP(8,"esi"),"edi");
+ &mov (&DWP(12,"esi"),"ecx");
# &mov ($E,$Eoff);
&mov ("eax",$Foff);
&mov ("ebx",$Goff);
&mov ("ecx",$Hoff);
- &mov ("edi",&DWP(4*(8+16+64)+4,"esp"));#inp
+ &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
&add ($E,&DWP(16,"esi"));
&add ("eax",&DWP(20,"esi"));
&add ("ebx",&DWP(24,"esi"));
@@ -217,33 +319,963 @@ sub BODY_00_15() {
&mov (&DWP(24,"esi"),"ebx");
&mov (&DWP(28,"esi"),"ecx");
- &add ("esp",4*(8+16+64)); # destroy frame
+ &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
&sub ($K256,4*64); # rewind K
&cmp ("edi",&DWP(8,"esp")); # are we done yet?
- &jb (&label("loop"));
-
+ &jb (&label("loop$suffix"));
+}
+ &COMPACT_LOOP();
+ &mov ("esp",&DWP(12,"esp")); # restore sp
+&function_end_A();
+ if (!$i386 && !$xmm) {
+ # ~20% improvement on Sandy Bridge
+ local *ror = sub { &shrd(@_[0],@_) };
+ &COMPACT_LOOP("_shrd");
&mov ("esp",&DWP(12,"esp")); # restore sp
&function_end_A();
+ }
&set_label("K256",64); # Yes! I keep it in the code segment!
- &data_word(0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5);
- &data_word(0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5);
- &data_word(0xd807aa98,0x12835b01,0x243185be,0x550c7dc3);
- &data_word(0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174);
- &data_word(0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc);
- &data_word(0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da);
- &data_word(0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7);
- &data_word(0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967);
- &data_word(0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13);
- &data_word(0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85);
- &data_word(0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3);
- &data_word(0xd192e819,0xd6990624,0xf40e3585,0x106aa070);
- &data_word(0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5);
- &data_word(0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3);
- &data_word(0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208);
- &data_word(0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2);
-&function_end_B("sha256_block_data_order");
+@K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
+ 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
+ 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
+ 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
+ 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
+ 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
+ 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
+ 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
+ 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
+ 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
+ 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
+ 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
+ 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
+ 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
+ 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
+ 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
+&data_word(@K256);
+&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # byte swap mask
&asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
+($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
+sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
+
+if (!$i386 && $unroll_after) {
+my @AH=($A,$K256);
+
+&set_label("unrolled",16);
+ &lea ("esp",&DWP(-96,"esp"));
+ # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+ &mov ($AH[0],&DWP(0,"esi"));
+ &mov ($AH[1],&DWP(4,"esi"));
+ &mov ("ecx",&DWP(8,"esi"));
+ &mov ("ebx",&DWP(12,"esi"));
+ #&mov (&DWP(0,"esp"),$AH[0]);
+ &mov (&DWP(4,"esp"),$AH[1]);
+ &xor ($AH[1],"ecx"); # magic
+ &mov (&DWP(8,"esp"),"ecx");
+ &mov (&DWP(12,"esp"),"ebx");
+ &mov ($E,&DWP(16,"esi"));
+ &mov ("ebx",&DWP(20,"esi"));
+ &mov ("ecx",&DWP(24,"esi"));
+ &mov ("esi",&DWP(28,"esi"));
+ #&mov (&DWP(16,"esp"),$E);
+ &mov (&DWP(20,"esp"),"ebx");
+ &mov (&DWP(24,"esp"),"ecx");
+ &mov (&DWP(28,"esp"),"esi");
+ &jmp (&label("grand_loop"));
+
+&set_label("grand_loop",16);
+ # copy input block to stack reversing byte order
+ for($i=0;$i<5;$i++) {
+ &mov ("ebx",&DWP(12*$i+0,"edi"));
+ &mov ("ecx",&DWP(12*$i+4,"edi"));
+ &bswap ("ebx");
+ &mov ("esi",&DWP(12*$i+8,"edi"));
+ &bswap ("ecx");
+ &mov (&DWP(32+12*$i+0,"esp"),"ebx");
+ &bswap ("esi");
+ &mov (&DWP(32+12*$i+4,"esp"),"ecx");
+ &mov (&DWP(32+12*$i+8,"esp"),"esi");
+ }
+ &mov ("ebx",&DWP($i*12,"edi"));
+ &add ("edi",64);
+ &bswap ("ebx");
+ &mov (&DWP(96+4,"esp"),"edi");
+ &mov (&DWP(32+12*$i,"esp"),"ebx");
+
+ my ($t1,$t2) = ("ecx","esi");
+
+ for ($i=0;$i<64;$i++) {
+
+ if ($i>=16) {
+ &mov ($T,$t1); # $t1 is preloaded
+ # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
+ &ror ($t1,18-7);
+ &mov ("edi",$t2);
+ &ror ($t2,19-17);
+ &xor ($t1,$T);
+ &shr ($T,3);
+ &ror ($t1,7);
+ &xor ($t2,"edi");
+ &xor ($T,$t1); # T = sigma0(X[-15])
+ &ror ($t2,17);
+ &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
+ &shr ("edi",10);
+ &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
+ #&xor ("edi",$t2) # sigma1(X[-2])
+ # &add ($T,"edi"); # T += sigma1(X[-2])
+ # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
+ }
+ &mov ($t1,$E);
+ &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
+ &mov ($t2,&off($f));
+ &ror ($E,25-11);
+ &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
+ &mov ("edi",&off($g));
+ &xor ($E,$t1);
+ &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
+ &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
+ &xor ($t2,"edi");
+ &ror ($E,11-6);
+ &and ($t2,$t1);
+ &mov (&off($e),$t1); # save $E, modulo-scheduled
+ &xor ($E,$t1);
+ &add ($T,&off($h)); # T += h
+ &xor ("edi",$t2); # Ch(e,f,g)
+ &ror ($E,6); # Sigma1(e)
+ &mov ($t1,$AH[0]);
+ &add ($T,"edi"); # T += Ch(e,f,g)
+
+ &ror ($t1,22-13);
+ &mov ($t2,$AH[0]);
+ &mov ("edi",&off($b));
+ &xor ($t1,$AH[0]);
+ &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
+ &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
+ &ror ($t1,13-2);
+ &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
+ &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
+ &xor ($t1,$t2);
+ &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
+ &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
+ &ror ($t1,2); # Sigma0(a)
+
+ &add ($AH[1],$E); # h += T
+ &add ($E,&off($d)); # d += T
+ &add ($AH[1],$t1); # h += Sigma0(a)
+ &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
+
+ @AH = reverse(@AH); # rotate(a,h)
+ ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
+ }
+ &mov ("esi",&DWP(96,"esp")); #ctx
+ #&mov ($AH[0],&DWP(0,"esp"));
+ &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
+ #&mov ("edi", &DWP(8,"esp"));
+ &mov ("ecx",&DWP(12,"esp"));
+ &add ($AH[0],&DWP(0,"esi"));
+ &add ($AH[1],&DWP(4,"esi"));
+ &add ("edi",&DWP(8,"esi"));
+ &add ("ecx",&DWP(12,"esi"));
+ &mov (&DWP(0,"esi"),$AH[0]);
+ &mov (&DWP(4,"esi"),$AH[1]);
+ &mov (&DWP(8,"esi"),"edi");
+ &mov (&DWP(12,"esi"),"ecx");
+ #&mov (&DWP(0,"esp"),$AH[0]);
+ &mov (&DWP(4,"esp"),$AH[1]);
+ &xor ($AH[1],"edi"); # magic
+ &mov (&DWP(8,"esp"),"edi");
+ &mov (&DWP(12,"esp"),"ecx");
+ #&mov ($E,&DWP(16,"esp"));
+ &mov ("edi",&DWP(20,"esp"));
+ &mov ("ebx",&DWP(24,"esp"));
+ &mov ("ecx",&DWP(28,"esp"));
+ &add ($E,&DWP(16,"esi"));
+ &add ("edi",&DWP(20,"esi"));
+ &add ("ebx",&DWP(24,"esi"));
+ &add ("ecx",&DWP(28,"esi"));
+ &mov (&DWP(16,"esi"),$E);
+ &mov (&DWP(20,"esi"),"edi");
+ &mov (&DWP(24,"esi"),"ebx");
+ &mov (&DWP(28,"esi"),"ecx");
+ #&mov (&DWP(16,"esp"),$E);
+ &mov (&DWP(20,"esp"),"edi");
+ &mov ("edi",&DWP(96+4,"esp")); # inp
+ &mov (&DWP(24,"esp"),"ebx");
+ &mov (&DWP(28,"esp"),"ecx");
+
+ &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
+ &jb (&label("grand_loop"));
+
+ &mov ("esp",&DWP(96+12,"esp")); # restore sp
+&function_end_A();
+}
+ if (!$i386 && $xmm) {{{
+if ($shaext) {
+######################################################################
+# Intel SHA Extensions implementation of SHA256 update function.
+#
+my ($ctx,$inp,$end)=("esi","edi","eax");
+my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
+my @MSG=map("xmm$_",(3..6));
+
+sub sha256op38 {
+ my ($opcodelet,$dst,$src)=@_;
+ if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+ { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
+}
+sub sha256rnds2 { sha256op38(0xcb,@_); }
+sub sha256msg1 { sha256op38(0xcc,@_); }
+sub sha256msg2 { sha256op38(0xcd,@_); }
+
+&set_label("shaext",32);
+ &sub ("esp",32);
+
+ &movdqu ($ABEF,&QWP(0,$ctx)); # DCBA
+ &lea ($K256,&DWP(0x80,$K256));
+ &movdqu ($CDGH,&QWP(16,$ctx)); # HGFE
+ &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
+
+ &pshufd ($Wi,$ABEF,0x1b); # ABCD
+ &pshufd ($ABEF,$ABEF,0xb1); # CDAB
+ &pshufd ($CDGH,$CDGH,0x1b); # EFGH
+ &palignr ($ABEF,$CDGH,8); # ABEF
+ &punpcklqdq ($CDGH,$Wi); # CDGH
+ &jmp (&label("loop_shaext"));
+
+&set_label("loop_shaext",16);
+ &movdqu (@MSG[0],&QWP(0,$inp));
+ &movdqu (@MSG[1],&QWP(0x10,$inp));
+ &movdqu (@MSG[2],&QWP(0x20,$inp));
+ &pshufb (@MSG[0],$TMP);
+ &movdqu (@MSG[3],&QWP(0x30,$inp));
+ &movdqa (&QWP(16,"esp"),$CDGH); # offload
+
+ &movdqa ($Wi,&QWP(0*16-0x80,$K256));
+ &paddd ($Wi,@MSG[0]);
+ &pshufb (@MSG[1],$TMP);
+ &sha256rnds2 ($CDGH,$ABEF); # 0-3
+ &pshufd ($Wi,$Wi,0x0e);
+ &nop ();
+ &movdqa (&QWP(0,"esp"),$ABEF); # offload
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &movdqa ($Wi,&QWP(1*16-0x80,$K256));
+ &paddd ($Wi,@MSG[1]);
+ &pshufb (@MSG[2],$TMP);
+ &sha256rnds2 ($CDGH,$ABEF); # 4-7
+ &pshufd ($Wi,$Wi,0x0e);
+ &lea ($inp,&DWP(0x40,$inp));
+ &sha256msg1 (@MSG[0],@MSG[1]);
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &movdqa ($Wi,&QWP(2*16-0x80,$K256));
+ &paddd ($Wi,@MSG[2]);
+ &pshufb (@MSG[3],$TMP);
+ &sha256rnds2 ($CDGH,$ABEF); # 8-11
+ &pshufd ($Wi,$Wi,0x0e);
+ &movdqa ($TMP,@MSG[3]);
+ &palignr ($TMP,@MSG[2],4);
+ &nop ();
+ &paddd (@MSG[0],$TMP);
+ &sha256msg1 (@MSG[1],@MSG[2]);
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &movdqa ($Wi,&QWP(3*16-0x80,$K256));
+ &paddd ($Wi,@MSG[3]);
+ &sha256msg2 (@MSG[0],@MSG[3]);
+ &sha256rnds2 ($CDGH,$ABEF); # 12-15
+ &pshufd ($Wi,$Wi,0x0e);
+ &movdqa ($TMP,@MSG[0]);
+ &palignr ($TMP,@MSG[3],4);
+ &nop ();
+ &paddd (@MSG[1],$TMP);
+ &sha256msg1 (@MSG[2],@MSG[3]);
+ &sha256rnds2 ($ABEF,$CDGH);
+
+for($i=4;$i<16-3;$i++) {
+ &movdqa ($Wi,&QWP($i*16-0x80,$K256));
+ &paddd ($Wi,@MSG[0]);
+ &sha256msg2 (@MSG[1],@MSG[0]);
+ &sha256rnds2 ($CDGH,$ABEF); # 16-19...
+ &pshufd ($Wi,$Wi,0x0e);
+ &movdqa ($TMP,@MSG[1]);
+ &palignr ($TMP,@MSG[0],4);
+ &nop ();
+ &paddd (@MSG[2],$TMP);
+ &sha256msg1 (@MSG[3],@MSG[0]);
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ push(@MSG,shift(@MSG));
+}
+ &movdqa ($Wi,&QWP(13*16-0x80,$K256));
+ &paddd ($Wi,@MSG[0]);
+ &sha256msg2 (@MSG[1],@MSG[0]);
+ &sha256rnds2 ($CDGH,$ABEF); # 52-55
+ &pshufd ($Wi,$Wi,0x0e);
+ &movdqa ($TMP,@MSG[1])
+ &palignr ($TMP,@MSG[0],4);
+ &sha256rnds2 ($ABEF,$CDGH);
+ &paddd (@MSG[2],$TMP);
+
+ &movdqa ($Wi,&QWP(14*16-0x80,$K256));
+ &paddd ($Wi,@MSG[1]);
+ &sha256rnds2 ($CDGH,$ABEF); # 56-59
+ &pshufd ($Wi,$Wi,0x0e);
+ &sha256msg2 (@MSG[2],@MSG[1]);
+ &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &movdqa ($Wi,&QWP(15*16-0x80,$K256));
+ &paddd ($Wi,@MSG[2]);
+ &nop ();
+ &sha256rnds2 ($CDGH,$ABEF); # 60-63
+ &pshufd ($Wi,$Wi,0x0e);
+ &cmp ($end,$inp);
+ &nop ();
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &paddd ($CDGH,&QWP(16,"esp"));
+ &paddd ($ABEF,&QWP(0,"esp"));
+ &jnz (&label("loop_shaext"));
+
+ &pshufd ($CDGH,$CDGH,0xb1); # DCHG
+ &pshufd ($TMP,$ABEF,0x1b); # FEBA
+ &pshufd ($ABEF,$ABEF,0xb1); # BAFE
+ &punpckhqdq ($ABEF,$CDGH); # DCBA
+ &palignr ($CDGH,$TMP,8); # HGFE
+
+ &mov ("esp",&DWP(32+12,"esp"));
+ &movdqu (&QWP(0,$ctx),$ABEF);
+ &movdqu (&QWP(16,$ctx),$CDGH);
+&function_end_A();
+}
+
+my @X = map("xmm$_",(0..3));
+my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
+my @AH = ($A,$T);
+
+&set_label("SSSE3",32);
+ &lea ("esp",&DWP(-96,"esp"));
+ # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+ &mov ($AH[0],&DWP(0,"esi"));
+ &mov ($AH[1],&DWP(4,"esi"));
+ &mov ("ecx",&DWP(8,"esi"));
+ &mov ("edi",&DWP(12,"esi"));
+ #&mov (&DWP(0,"esp"),$AH[0]);
+ &mov (&DWP(4,"esp"),$AH[1]);
+ &xor ($AH[1],"ecx"); # magic
+ &mov (&DWP(8,"esp"),"ecx");
+ &mov (&DWP(12,"esp"),"edi");
+ &mov ($E,&DWP(16,"esi"));
+ &mov ("edi",&DWP(20,"esi"));
+ &mov ("ecx",&DWP(24,"esi"));
+ &mov ("esi",&DWP(28,"esi"));
+ #&mov (&DWP(16,"esp"),$E);
+ &mov (&DWP(20,"esp"),"edi");
+ &mov ("edi",&DWP(96+4,"esp")); # inp
+ &mov (&DWP(24,"esp"),"ecx");
+ &mov (&DWP(28,"esp"),"esi");
+ &movdqa ($t3,&QWP(256,$K256));
+ &jmp (&label("grand_ssse3"));
+
+&set_label("grand_ssse3",16);
+ # load input, reverse byte order, add K256[0..15], save to stack
+ &movdqu (@X[0],&QWP(0,"edi"));
+ &movdqu (@X[1],&QWP(16,"edi"));
+ &movdqu (@X[2],&QWP(32,"edi"));
+ &movdqu (@X[3],&QWP(48,"edi"));
+ &add ("edi",64);
+ &pshufb (@X[0],$t3);
+ &mov (&DWP(96+4,"esp"),"edi");
+ &pshufb (@X[1],$t3);
+ &movdqa ($t0,&QWP(0,$K256));
+ &pshufb (@X[2],$t3);
+ &movdqa ($t1,&QWP(16,$K256));
+ &paddd ($t0,@X[0]);
+ &pshufb (@X[3],$t3);
+ &movdqa ($t2,&QWP(32,$K256));
+ &paddd ($t1,@X[1]);
+ &movdqa ($t3,&QWP(48,$K256));
+ &movdqa (&QWP(32+0,"esp"),$t0);
+ &paddd ($t2,@X[2]);
+ &movdqa (&QWP(32+16,"esp"),$t1);
+ &paddd ($t3,@X[3]);
+ &movdqa (&QWP(32+32,"esp"),$t2);
+ &movdqa (&QWP(32+48,"esp"),$t3);
+ &jmp (&label("ssse3_00_47"));
+
+&set_label("ssse3_00_47",16);
+ &add ($K256,64);
+
+sub SSSE3_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
+
+ eval(shift(@insns));
+ &movdqa ($t0,@X[1]);
+ eval(shift(@insns)); # @
+ eval(shift(@insns));
+ &movdqa ($t3,@X[3]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &palignr ($t0,@X[0],4); # X[1..4]
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ eval(shift(@insns));
+ &palignr ($t3,@X[2],4); # X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa ($t1,$t0);
+ eval(shift(@insns)); # @
+ eval(shift(@insns));
+ &movdqa ($t2,$t0);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrld ($t0,3);
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &paddd (@X[0],$t3); # X[0..3] += X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrld ($t2,7);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ eval(shift(@insns));
+ &pshufd ($t3,@X[3],0b11111010); # X[14..15]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pslld ($t1,32-18);
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &pxor ($t0,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrld ($t2,18-7);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &pxor ($t0,$t1);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pslld ($t1,18-7);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &pxor ($t0,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa ($t2,$t3);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &pxor ($t0,$t1); # sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrld ($t3,10);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrlq ($t2,17);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &pxor ($t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrlq ($t2,19-17);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &pxor ($t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pshufd ($t3,$t3,0b10000000);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ eval(shift(@insns));
+ &psrldq ($t3,8);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns)); # @
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ eval(shift(@insns));
+ &pshufd ($t3,@X[0],0b01010000); # X[16..17]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa ($t2,$t3);
+ eval(shift(@insns)); # @
+ &psrld ($t3,10);
+ eval(shift(@insns));
+ &psrlq ($t2,17);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &pxor ($t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrlq ($t2,19-17);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &pxor ($t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pshufd ($t3,$t3,0b00001000);
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &movdqa ($t2,&QWP(16*$j,$K256));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pslldq ($t3,8);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # @
+ &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &paddd ($t2,@X[0]);
+ eval(shift(@insns)); # @
+
+ foreach (@insns) { eval; } # remaining instructions
+
+ &movdqa (&QWP(32+16*$j,"esp"),$t2);
+}
+
+sub body_00_15 () {
+ (
+ '&mov ("ecx",$E);',
+ '&ror ($E,25-11);',
+ '&mov ("esi",&off($f));',
+ '&xor ($E,"ecx");',
+ '&mov ("edi",&off($g));',
+ '&xor ("esi","edi");',
+ '&ror ($E,11-6);',
+ '&and ("esi","ecx");',
+ '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
+ '&xor ($E,"ecx");',
+ '&xor ("edi","esi");', # Ch(e,f,g)
+ '&ror ($E,6);', # T = Sigma1(e)
+ '&mov ("ecx",$AH[0]);',
+ '&add ($E,"edi");', # T += Ch(e,f,g)
+ '&mov ("edi",&off($b));',
+ '&mov ("esi",$AH[0]);',
+
+ '&ror ("ecx",22-13);',
+ '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
+ '&xor ("ecx",$AH[0]);',
+ '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
+ '&add ($E,&off($h));', # T += h
+ '&ror ("ecx",13-2);',
+ '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
+ '&xor ("ecx","esi");',
+ '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
+ '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
+ '&ror ("ecx",2);', # Sigma0(a)
+
+ '&add ($AH[1],$E);', # h += T
+ '&add ($E,&off($d));', # d += T
+ '&add ($AH[1],"ecx");'. # h += Sigma0(a)
+
+ '@AH = reverse(@AH); $i++;' # rotate(a,h)
+ );
+}
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &SSSE3_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmp (&DWP(16*$j,$K256),0x00010203);
+ &jne (&label("ssse3_00_47"));
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+
+ &mov ("esi",&DWP(96,"esp")); #ctx
+ #&mov ($AH[0],&DWP(0,"esp"));
+ &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
+ #&mov ("edi", &DWP(8,"esp"));
+ &mov ("ecx",&DWP(12,"esp"));
+ &add ($AH[0],&DWP(0,"esi"));
+ &add ($AH[1],&DWP(4,"esi"));
+ &add ("edi",&DWP(8,"esi"));
+ &add ("ecx",&DWP(12,"esi"));
+ &mov (&DWP(0,"esi"),$AH[0]);
+ &mov (&DWP(4,"esi"),$AH[1]);
+ &mov (&DWP(8,"esi"),"edi");
+ &mov (&DWP(12,"esi"),"ecx");
+ #&mov (&DWP(0,"esp"),$AH[0]);
+ &mov (&DWP(4,"esp"),$AH[1]);
+ &xor ($AH[1],"edi"); # magic
+ &mov (&DWP(8,"esp"),"edi");
+ &mov (&DWP(12,"esp"),"ecx");
+ #&mov ($E,&DWP(16,"esp"));
+ &mov ("edi",&DWP(20,"esp"));
+ &mov ("ecx",&DWP(24,"esp"));
+ &add ($E,&DWP(16,"esi"));
+ &add ("edi",&DWP(20,"esi"));
+ &add ("ecx",&DWP(24,"esi"));
+ &mov (&DWP(16,"esi"),$E);
+ &mov (&DWP(20,"esi"),"edi");
+ &mov (&DWP(20,"esp"),"edi");
+ &mov ("edi",&DWP(28,"esp"));
+ &mov (&DWP(24,"esi"),"ecx");
+ #&mov (&DWP(16,"esp"),$E);
+ &add ("edi",&DWP(28,"esi"));
+ &mov (&DWP(24,"esp"),"ecx");
+ &mov (&DWP(28,"esi"),"edi");
+ &mov (&DWP(28,"esp"),"edi");
+ &mov ("edi",&DWP(96+4,"esp")); # inp
+
+ &movdqa ($t3,&QWP(64,$K256));
+ &sub ($K256,3*64); # rewind K
+ &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
+ &jb (&label("grand_ssse3"));
+
+ &mov ("esp",&DWP(96+12,"esp")); # restore sp
+&function_end_A();
+ if ($avx) {
+&set_label("AVX",32);
+ if ($avx>1) {
+ &and ("edx",1<<8|1<<3); # check for BMI2+BMI1
+ &cmp ("edx",1<<8|1<<3);
+ &je (&label("AVX_BMI"));
+ }
+ &lea ("esp",&DWP(-96,"esp"));
+ &vzeroall ();
+ # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+ &mov ($AH[0],&DWP(0,"esi"));
+ &mov ($AH[1],&DWP(4,"esi"));
+ &mov ("ecx",&DWP(8,"esi"));
+ &mov ("edi",&DWP(12,"esi"));
+ #&mov (&DWP(0,"esp"),$AH[0]);
+ &mov (&DWP(4,"esp"),$AH[1]);
+ &xor ($AH[1],"ecx"); # magic
+ &mov (&DWP(8,"esp"),"ecx");
+ &mov (&DWP(12,"esp"),"edi");
+ &mov ($E,&DWP(16,"esi"));
+ &mov ("edi",&DWP(20,"esi"));
+ &mov ("ecx",&DWP(24,"esi"));
+ &mov ("esi",&DWP(28,"esi"));
+ #&mov (&DWP(16,"esp"),$E);
+ &mov (&DWP(20,"esp"),"edi");
+ &mov ("edi",&DWP(96+4,"esp")); # inp
+ &mov (&DWP(24,"esp"),"ecx");
+ &mov (&DWP(28,"esp"),"esi");
+ &vmovdqa ($t3,&QWP(256,$K256));
+ &jmp (&label("grand_avx"));
+
+&set_label("grand_avx",32);
+ # load input, reverse byte order, add K256[0..15], save to stack
+ &vmovdqu (@X[0],&QWP(0,"edi"));
+ &vmovdqu (@X[1],&QWP(16,"edi"));
+ &vmovdqu (@X[2],&QWP(32,"edi"));
+ &vmovdqu (@X[3],&QWP(48,"edi"));
+ &add ("edi",64);
+ &vpshufb (@X[0],@X[0],$t3);
+ &mov (&DWP(96+4,"esp"),"edi");
+ &vpshufb (@X[1],@X[1],$t3);
+ &vpshufb (@X[2],@X[2],$t3);
+ &vpaddd ($t0,@X[0],&QWP(0,$K256));
+ &vpshufb (@X[3],@X[3],$t3);
+ &vpaddd ($t1,@X[1],&QWP(16,$K256));
+ &vpaddd ($t2,@X[2],&QWP(32,$K256));
+ &vpaddd ($t3,@X[3],&QWP(48,$K256));
+ &vmovdqa (&QWP(32+0,"esp"),$t0);
+ &vmovdqa (&QWP(32+16,"esp"),$t1);
+ &vmovdqa (&QWP(32+32,"esp"),$t2);
+ &vmovdqa (&QWP(32+48,"esp"),$t3);
+ &jmp (&label("avx_00_47"));
+
+&set_label("avx_00_47",16);
+ &add ($K256,64);
+
+sub Xupdate_AVX () {
+ (
+ '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
+ '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
+ '&vpsrld ($t2,$t0,7);',
+ '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
+ '&vpsrld ($t3,$t0,3);',
+ '&vpslld ($t1,$t0,14);',
+ '&vpxor ($t0,$t3,$t2);',
+ '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
+ '&vpsrld ($t2,$t2,18-7);',
+ '&vpxor ($t0,$t0,$t1);',
+ '&vpslld ($t1,$t1,25-14);',
+ '&vpxor ($t0,$t0,$t2);',
+ '&vpsrld ($t2,$t3,10);',
+ '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
+ '&vpsrlq ($t1,$t3,17);',
+ '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
+ '&vpxor ($t2,$t2,$t1);',
+ '&vpsrlq ($t3,$t3,19);',
+ '&vpxor ($t2,$t2,$t3);', # sigma1(X[14..15]
+ '&vpshufd ($t3,$t2,0b10000100);',
+ '&vpsrldq ($t3,$t3,8);',
+ '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
+ '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
+ '&vpsrld ($t2,$t3,10);',
+ '&vpsrlq ($t1,$t3,17);',
+ '&vpxor ($t2,$t2,$t1);',
+ '&vpsrlq ($t3,$t3,19);',
+ '&vpxor ($t2,$t2,$t3);', # sigma1(X[16..17]
+ '&vpshufd ($t3,$t2,0b11101000);',
+ '&vpslldq ($t3,$t3,8);',
+ '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
+ );
+}
+
+local *ror = sub { &shrd(@_[0],@_) };
+sub AVX_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
+my $insn;
+
+ foreach (Xupdate_AVX()) { # 31 instructions
+ eval;
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval($insn = shift(@insns));
+ eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
+ }
+ &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
+}
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &AVX_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmp (&DWP(16*$j,$K256),0x00010203);
+ &jne (&label("avx_00_47"));
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+
+ &mov ("esi",&DWP(96,"esp")); #ctx
+ #&mov ($AH[0],&DWP(0,"esp"));
+ &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
+ #&mov ("edi", &DWP(8,"esp"));
+ &mov ("ecx",&DWP(12,"esp"));
+ &add ($AH[0],&DWP(0,"esi"));
+ &add ($AH[1],&DWP(4,"esi"));
+ &add ("edi",&DWP(8,"esi"));
+ &add ("ecx",&DWP(12,"esi"));
+ &mov (&DWP(0,"esi"),$AH[0]);
+ &mov (&DWP(4,"esi"),$AH[1]);
+ &mov (&DWP(8,"esi"),"edi");
+ &mov (&DWP(12,"esi"),"ecx");
+ #&mov (&DWP(0,"esp"),$AH[0]);
+ &mov (&DWP(4,"esp"),$AH[1]);
+ &xor ($AH[1],"edi"); # magic
+ &mov (&DWP(8,"esp"),"edi");
+ &mov (&DWP(12,"esp"),"ecx");
+ #&mov ($E,&DWP(16,"esp"));
+ &mov ("edi",&DWP(20,"esp"));
+ &mov ("ecx",&DWP(24,"esp"));
+ &add ($E,&DWP(16,"esi"));
+ &add ("edi",&DWP(20,"esi"));
+ &add ("ecx",&DWP(24,"esi"));
+ &mov (&DWP(16,"esi"),$E);
+ &mov (&DWP(20,"esi"),"edi");
+ &mov (&DWP(20,"esp"),"edi");
+ &mov ("edi",&DWP(28,"esp"));
+ &mov (&DWP(24,"esi"),"ecx");
+ #&mov (&DWP(16,"esp"),$E);
+ &add ("edi",&DWP(28,"esi"));
+ &mov (&DWP(24,"esp"),"ecx");
+ &mov (&DWP(28,"esi"),"edi");
+ &mov (&DWP(28,"esp"),"edi");
+ &mov ("edi",&DWP(96+4,"esp")); # inp
+
+ &vmovdqa ($t3,&QWP(64,$K256));
+ &sub ($K256,3*64); # rewind K
+ &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
+ &jb (&label("grand_avx"));
+
+ &mov ("esp",&DWP(96+12,"esp")); # restore sp
+ &vzeroall ();
+&function_end_A();
+ if ($avx>1) {
+sub bodyx_00_15 () { # +10%
+ (
+ '&rorx ("ecx",$E,6)',
+ '&rorx ("esi",$E,11)',
+ '&mov (&off($e),$E)', # save $E, modulo-scheduled
+ '&rorx ("edi",$E,25)',
+ '&xor ("ecx","esi")',
+ '&andn ("esi",$E,&off($g))',
+ '&xor ("ecx","edi")', # Sigma1(e)
+ '&and ($E,&off($f))',
+ '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
+ '&or ($E,"esi")', # T = Ch(e,f,g)
+
+ '&rorx ("edi",$AH[0],2)',
+ '&rorx ("esi",$AH[0],13)',
+ '&lea ($E,&DWP(0,$E,"ecx"))', # T += Sigma1(e)
+ '&rorx ("ecx",$AH[0],22)',
+ '&xor ("esi","edi")',
+ '&mov ("edi",&off($b))',
+ '&xor ("ecx","esi")', # Sigma0(a)
+
+ '&xor ($AH[0],"edi")', # a ^= b, (b^c) in next round
+ '&add ($E,&off($h))', # T += h
+ '&and ($AH[1],$AH[0])', # (b^c) &= (a^b)
+ '&add ($E,&DWP(32+4*($i&15),"esp"))', # T += K[i]+X[i]
+ '&xor ($AH[1],"edi")', # h = Maj(a,b,c) = Ch(a^b,c,b)
+
+ '&add ("ecx",$E)', # h += T
+ '&add ($E,&off($d))', # d += T
+ '&lea ($AH[1],&DWP(0,$AH[1],"ecx"));'. # h += Sigma0(a)
+
+ '@AH = reverse(@AH); $i++;' # rotate(a,h)
+ );
+}
+
+&set_label("AVX_BMI",32);
+ &lea ("esp",&DWP(-96,"esp"));
+ &vzeroall ();
+ # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+ &mov ($AH[0],&DWP(0,"esi"));
+ &mov ($AH[1],&DWP(4,"esi"));
+ &mov ("ecx",&DWP(8,"esi"));
+ &mov ("edi",&DWP(12,"esi"));
+ #&mov (&DWP(0,"esp"),$AH[0]);
+ &mov (&DWP(4,"esp"),$AH[1]);
+ &xor ($AH[1],"ecx"); # magic
+ &mov (&DWP(8,"esp"),"ecx");
+ &mov (&DWP(12,"esp"),"edi");
+ &mov ($E,&DWP(16,"esi"));
+ &mov ("edi",&DWP(20,"esi"));
+ &mov ("ecx",&DWP(24,"esi"));
+ &mov ("esi",&DWP(28,"esi"));
+ #&mov (&DWP(16,"esp"),$E);
+ &mov (&DWP(20,"esp"),"edi");
+ &mov ("edi",&DWP(96+4,"esp")); # inp
+ &mov (&DWP(24,"esp"),"ecx");
+ &mov (&DWP(28,"esp"),"esi");
+ &vmovdqa ($t3,&QWP(256,$K256));
+ &jmp (&label("grand_avx_bmi"));
+
+&set_label("grand_avx_bmi",32);
+ # load input, reverse byte order, add K256[0..15], save to stack
+ &vmovdqu (@X[0],&QWP(0,"edi"));
+ &vmovdqu (@X[1],&QWP(16,"edi"));
+ &vmovdqu (@X[2],&QWP(32,"edi"));
+ &vmovdqu (@X[3],&QWP(48,"edi"));
+ &add ("edi",64);
+ &vpshufb (@X[0],@X[0],$t3);
+ &mov (&DWP(96+4,"esp"),"edi");
+ &vpshufb (@X[1],@X[1],$t3);
+ &vpshufb (@X[2],@X[2],$t3);
+ &vpaddd ($t0,@X[0],&QWP(0,$K256));
+ &vpshufb (@X[3],@X[3],$t3);
+ &vpaddd ($t1,@X[1],&QWP(16,$K256));
+ &vpaddd ($t2,@X[2],&QWP(32,$K256));
+ &vpaddd ($t3,@X[3],&QWP(48,$K256));
+ &vmovdqa (&QWP(32+0,"esp"),$t0);
+ &vmovdqa (&QWP(32+16,"esp"),$t1);
+ &vmovdqa (&QWP(32+32,"esp"),$t2);
+ &vmovdqa (&QWP(32+48,"esp"),$t3);
+ &jmp (&label("avx_bmi_00_47"));
+
+&set_label("avx_bmi_00_47",16);
+ &add ($K256,64);
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &AVX_00_47($j,\&bodyx_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmp (&DWP(16*$j,$K256),0x00010203);
+ &jne (&label("avx_bmi_00_47"));
+
+ for ($i=0; $i<16; ) {
+ foreach(bodyx_00_15()) { eval; }
+ }
+
+ &mov ("esi",&DWP(96,"esp")); #ctx
+ #&mov ($AH[0],&DWP(0,"esp"));
+ &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
+ #&mov ("edi", &DWP(8,"esp"));
+ &mov ("ecx",&DWP(12,"esp"));
+ &add ($AH[0],&DWP(0,"esi"));
+ &add ($AH[1],&DWP(4,"esi"));
+ &add ("edi",&DWP(8,"esi"));
+ &add ("ecx",&DWP(12,"esi"));
+ &mov (&DWP(0,"esi"),$AH[0]);
+ &mov (&DWP(4,"esi"),$AH[1]);
+ &mov (&DWP(8,"esi"),"edi");
+ &mov (&DWP(12,"esi"),"ecx");
+ #&mov (&DWP(0,"esp"),$AH[0]);
+ &mov (&DWP(4,"esp"),$AH[1]);
+ &xor ($AH[1],"edi"); # magic
+ &mov (&DWP(8,"esp"),"edi");
+ &mov (&DWP(12,"esp"),"ecx");
+ #&mov ($E,&DWP(16,"esp"));
+ &mov ("edi",&DWP(20,"esp"));
+ &mov ("ecx",&DWP(24,"esp"));
+ &add ($E,&DWP(16,"esi"));
+ &add ("edi",&DWP(20,"esi"));
+ &add ("ecx",&DWP(24,"esi"));
+ &mov (&DWP(16,"esi"),$E);
+ &mov (&DWP(20,"esi"),"edi");
+ &mov (&DWP(20,"esp"),"edi");
+ &mov ("edi",&DWP(28,"esp"));
+ &mov (&DWP(24,"esi"),"ecx");
+ #&mov (&DWP(16,"esp"),$E);
+ &add ("edi",&DWP(28,"esi"));
+ &mov (&DWP(24,"esp"),"ecx");
+ &mov (&DWP(28,"esi"),"edi");
+ &mov (&DWP(28,"esp"),"edi");
+ &mov ("edi",&DWP(96+4,"esp")); # inp
+
+ &vmovdqa ($t3,&QWP(64,$K256));
+ &sub ($K256,3*64); # rewind K
+ &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
+ &jb (&label("grand_avx_bmi"));
+
+ &mov ("esp",&DWP(96+12,"esp")); # restore sp
+ &vzeroall ();
+&function_end_A();
+ }
+ }
+ }}}
+&function_end_B("sha256_block_data_order");
+
&asm_finish();
diff --git a/openssl/crypto/sha/asm/sha256-armv4.pl b/openssl/crypto/sha/asm/sha256-armv4.pl
index 9c84e8d93..f14c9c3cb 100644
--- a/openssl/crypto/sha/asm/sha256-armv4.pl
+++ b/openssl/crypto/sha/asm/sha256-armv4.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -21,15 +21,27 @@
# February 2011.
#
# Profiler-assisted and platform-specific optimization resulted in 16%
-# improvement on Cortex A8 core and ~17 cycles per processed byte.
+# improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+# September 2013.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process one
+# byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+# S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+# code (meaning that latter performs sub-optimally, nothing was done
+# about it).
+
+# May 2014.
+#
+# Add ARMv8 code path performing at 2.0 cpb on Apple A7.
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
$ctx="r0"; $t0="r0";
-$inp="r1"; $t3="r1";
+$inp="r1"; $t4="r1";
$len="r2"; $t1="r2";
-$T1="r3";
+$T1="r3"; $t3="r3";
$A="r4";
$B="r5";
$C="r6";
@@ -52,71 +64,88 @@ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___ if ($i<16);
#if __ARM_ARCH__>=7
- ldr $T1,[$inp],#4
+ @ ldr $t1,[$inp],#4 @ $i
+# if $i==15
+ str $inp,[sp,#17*4] @ make room for $t4
+# endif
+ eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+ add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
+ eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
+ rev $t1,$t1
#else
- ldrb $T1,[$inp,#3] @ $i
+ @ ldrb $t1,[$inp,#3] @ $i
+ add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
ldrb $t2,[$inp,#2]
- ldrb $t1,[$inp,#1]
- ldrb $t0,[$inp],#4
- orr $T1,$T1,$t2,lsl#8
- orr $T1,$T1,$t1,lsl#16
- orr $T1,$T1,$t0,lsl#24
+ ldrb $t0,[$inp,#1]
+ orr $t1,$t1,$t2,lsl#8
+ ldrb $t2,[$inp],#4
+ orr $t1,$t1,$t0,lsl#16
+# if $i==15
+ str $inp,[sp,#17*4] @ make room for $t4
+# endif
+ eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+ orr $t1,$t1,$t2,lsl#24
+ eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
#endif
___
$code.=<<___;
- mov $t0,$e,ror#$Sigma1[0]
ldr $t2,[$Ktbl],#4 @ *K256++
- eor $t0,$t0,$e,ror#$Sigma1[1]
+ add $h,$h,$t1 @ h+=X[i]
+ str $t1,[sp,#`$i%16`*4]
eor $t1,$f,$g
-#if $i>=16
- add $T1,$T1,$t3 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev $T1,$T1
-#endif
-#if $i==15
- str $inp,[sp,#17*4] @ leave room for $t3
-#endif
- eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e)
+ add $h,$h,$t0,ror#$Sigma1[0] @ h+=Sigma1(e)
and $t1,$t1,$e
- str $T1,[sp,#`$i%16`*4]
- add $T1,$T1,$t0
+ add $h,$h,$t2 @ h+=K256[i]
eor $t1,$t1,$g @ Ch(e,f,g)
- add $T1,$T1,$h
- mov $h,$a,ror#$Sigma0[0]
- add $T1,$T1,$t1
- eor $h,$h,$a,ror#$Sigma0[1]
- add $T1,$T1,$t2
- eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a)
-#if $i>=15
- ldr $t3,[sp,#`($i+2)%16`*4] @ from BODY_16_xx
+ eor $t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]`
+ add $h,$h,$t1 @ h+=Ch(e,f,g)
+#if $i==31
+ and $t2,$t2,#0xff
+ cmp $t2,#0xf2 @ done?
#endif
- orr $t0,$a,$b
- and $t1,$a,$b
- and $t0,$t0,$c
- add $h,$h,$T1
- orr $t0,$t0,$t1 @ Maj(a,b,c)
- add $d,$d,$T1
- add $h,$h,$t0
+#if $i<15
+# if __ARM_ARCH__>=7
+ ldr $t1,[$inp],#4 @ prefetch
+# else
+ ldrb $t1,[$inp,#3]
+# endif
+ eor $t2,$a,$b @ a^b, b^c in next round
+#else
+ ldr $t1,[sp,#`($i+2)%16`*4] @ from future BODY_16_xx
+ eor $t2,$a,$b @ a^b, b^c in next round
+ ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx
+#endif
+ eor $t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]` @ Sigma0(a)
+ and $t3,$t3,$t2 @ (b^c)&=(a^b)
+ add $d,$d,$h @ d+=h
+ eor $t3,$t3,$b @ Maj(a,b,c)
+ add $h,$h,$t0,ror#$Sigma0[0] @ h+=Sigma0(a)
+ @ add $h,$h,$t3 @ h+=Maj(a,b,c)
___
+ ($t2,$t3)=($t3,$t2);
}
sub BODY_16_XX {
my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___;
- @ ldr $t3,[sp,#`($i+1)%16`*4] @ $i
- ldr $t2,[sp,#`($i+14)%16`*4]
- mov $t0,$t3,ror#$sigma0[0]
- ldr $T1,[sp,#`($i+0)%16`*4]
- eor $t0,$t0,$t3,ror#$sigma0[1]
- ldr $t1,[sp,#`($i+9)%16`*4]
- eor $t0,$t0,$t3,lsr#$sigma0[2] @ sigma0(X[i+1])
- mov $t3,$t2,ror#$sigma1[0]
- add $T1,$T1,$t0
- eor $t3,$t3,$t2,ror#$sigma1[1]
- add $T1,$T1,$t1
- eor $t3,$t3,$t2,lsr#$sigma1[2] @ sigma1(X[i+14])
- @ add $T1,$T1,$t3
+ @ ldr $t1,[sp,#`($i+1)%16`*4] @ $i
+ @ ldr $t4,[sp,#`($i+14)%16`*4]
+ mov $t0,$t1,ror#$sigma0[0]
+ add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
+ mov $t2,$t4,ror#$sigma1[0]
+ eor $t0,$t0,$t1,ror#$sigma0[1]
+ eor $t2,$t2,$t4,ror#$sigma1[1]
+ eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
+ ldr $t1,[sp,#`($i+0)%16`*4]
+ eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14])
+ ldr $t4,[sp,#`($i+9)%16`*4]
+
+ add $t2,$t2,$t0
+ eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` @ from BODY_00_15
+ add $t1,$t1,$t2
+ eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
+ add $t1,$t1,$t4 @ X[i]
___
&BODY_00_15(@_);
}
@@ -147,46 +176,66 @@ K256:
.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.size K256,.-K256
+.word 0 @ terminator
+#if __ARM_MAX_ARCH__>=7
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha256_block_data_order
+#endif
+.align 5
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
sub r3,pc,#8 @ sha256_block_data_order
add $len,$inp,$len,lsl#6 @ len to point at the end of inp
+#if __ARM_MAX_ARCH__>=7
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#ARMV8_SHA256
+ bne .LARMv8
+ tst r12,#ARMV7_NEON
+ bne .LNEON
+#endif
stmdb sp!,{$ctx,$inp,$len,r4-r11,lr}
ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
- sub $Ktbl,r3,#256 @ K256
+ sub $Ktbl,r3,#256+32 @ K256
sub sp,sp,#16*4 @ alloca(X[16])
.Loop:
+# if __ARM_ARCH__>=7
+ ldr $t1,[$inp],#4
+# else
+ ldrb $t1,[$inp,#3]
+# endif
+ eor $t3,$B,$C @ magic
+ eor $t2,$t2,$t2
___
for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
$code.=".Lrounds_16_xx:\n";
for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
- and $t2,$t2,#0xff
- cmp $t2,#0xf2
+ ldreq $t3,[sp,#16*4] @ pull ctx
bne .Lrounds_16_xx
- ldr $T1,[sp,#16*4] @ pull ctx
- ldr $t0,[$T1,#0]
- ldr $t1,[$T1,#4]
- ldr $t2,[$T1,#8]
+ add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
+ ldr $t0,[$t3,#0]
+ ldr $t1,[$t3,#4]
+ ldr $t2,[$t3,#8]
add $A,$A,$t0
- ldr $t0,[$T1,#12]
+ ldr $t0,[$t3,#12]
add $B,$B,$t1
- ldr $t1,[$T1,#16]
+ ldr $t1,[$t3,#16]
add $C,$C,$t2
- ldr $t2,[$T1,#20]
+ ldr $t2,[$t3,#20]
add $D,$D,$t0
- ldr $t0,[$T1,#24]
+ ldr $t0,[$t3,#24]
add $E,$E,$t1
- ldr $t1,[$T1,#28]
+ ldr $t1,[$t3,#28]
add $F,$F,$t2
ldr $inp,[sp,#17*4] @ pull inp
ldr $t2,[sp,#18*4] @ pull inp+len
add $G,$G,$t0
add $H,$H,$t1
- stmia $T1,{$A,$B,$C,$D,$E,$F,$G,$H}
+ stmia $t3,{$A,$B,$C,$D,$E,$F,$G,$H}
cmp $inp,$t2
sub $Ktbl,$Ktbl,#256 @ rewind Ktbl
bne .Loop
@@ -200,12 +249,413 @@ $code.=<<___;
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
#endif
-.size sha256_block_data_order,.-sha256_block_data_order
-.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.size sha256_block_data_order,.-sha256_block_data_order
+___
+######################################################################
+# NEON stuff
+#
+{{{
+my @X=map("q$_",(0..3));
+my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25");
+my $Xfer=$t4;
+my $j=0;
+
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+ my $arg = pop;
+ $arg = "#$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub Xupdate()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+ &vext_8 ($T0,@X[0],@X[1],4); # X[1..4]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vext_8 ($T1,@X[2],@X[3],4); # X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T2,$T0,$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T1,$T0,$sigma0[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T2,$T0,32-$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T3,$T0,$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T1,$T1,$T2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T3,$T0,32-$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T1,$T1,$T3); # sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T5,&Dhi(@X[3]),$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4); # sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T5,&Dlo(@X[0]),$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_32 ("{$T0}","[$Ktbl,:128]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4); # sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 ($T0,$T0,@X[0]);
+ while($#insns>=2) { eval(shift(@insns)); }
+ &vst1_32 ("{$T0}","[$Xfer,:128]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xpreload()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_32 ("{$T0}","[$Ktbl,:128]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vrev32_8 (@X[0],@X[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 ($T0,$T0,@X[0]);
+ foreach (@insns) { eval; } # remaining instructions
+ &vst1_32 ("{$T0}","[$Xfer,:128]!");
+
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub body_00_15 () {
+ (
+ '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
+ '&add ($h,$h,$t1)', # h+=X[i]+K[i]
+ '&eor ($t1,$f,$g)',
+ '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
+ '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past
+ '&and ($t1,$t1,$e)',
+ '&eor ($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e)
+ '&eor ($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
+ '&eor ($t1,$t1,$g)', # Ch(e,f,g)
+ '&add ($h,$h,$t2,"ror#$Sigma1[0]")', # h+=Sigma1(e)
+ '&eor ($t2,$a,$b)', # a^b, b^c in next round
+ '&eor ($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a)
+ '&add ($h,$h,$t1)', # h+=Ch(e,f,g)
+ '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'.
+ '&ldr ($t1,"[$Ktbl]") if ($j==15);'.
+ '&ldr ($t1,"[sp,#64]") if ($j==31)',
+ '&and ($t3,$t3,$t2)', # (b^c)&=(a^b)
+ '&add ($d,$d,$h)', # d+=h
+ '&add ($h,$h,$t0,"ror#$Sigma0[0]");'. # h+=Sigma0(a)
+ '&eor ($t3,$t3,$b)', # Maj(a,b,c)
+ '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
+ )
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
+.type sha256_block_data_order_neon,%function
+.align 4
+sha256_block_data_order_neon:
+.LNEON:
+ stmdb sp!,{r4-r12,lr}
+
+ mov $t2,sp
+ sub sp,sp,#16*4+16 @ alloca
+ sub $Ktbl,r3,#256+32 @ K256
+ bic sp,sp,#15 @ align for 128-bit stores
+
+ vld1.8 {@X[0]},[$inp]!
+ vld1.8 {@X[1]},[$inp]!
+ vld1.8 {@X[2]},[$inp]!
+ vld1.8 {@X[3]},[$inp]!
+ vld1.32 {$T0},[$Ktbl,:128]!
+ vld1.32 {$T1},[$Ktbl,:128]!
+ vld1.32 {$T2},[$Ktbl,:128]!
+ vld1.32 {$T3},[$Ktbl,:128]!
+ vrev32.8 @X[0],@X[0] @ yes, even on
+ str $ctx,[sp,#64]
+ vrev32.8 @X[1],@X[1] @ big-endian
+ str $inp,[sp,#68]
+ mov $Xfer,sp
+ vrev32.8 @X[2],@X[2]
+ str $len,[sp,#72]
+ vrev32.8 @X[3],@X[3]
+ str $t2,[sp,#76] @ save original sp
+ vadd.i32 $T0,$T0,@X[0]
+ vadd.i32 $T1,$T1,@X[1]
+ vst1.32 {$T0},[$Xfer,:128]!
+ vadd.i32 $T2,$T2,@X[2]
+ vst1.32 {$T1},[$Xfer,:128]!
+ vadd.i32 $T3,$T3,@X[3]
+ vst1.32 {$T2},[$Xfer,:128]!
+ vst1.32 {$T3},[$Xfer,:128]!
+
+ ldmia $ctx,{$A-$H}
+ sub $Xfer,$Xfer,#64
+ ldr $t1,[sp,#0]
+ eor $t2,$t2,$t2
+ eor $t3,$B,$C
+ b .L_00_48
+
+.align 4
+.L_00_48:
+___
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+$code.=<<___;
+ teq $t1,#0 @ check for K256 terminator
+ ldr $t1,[sp,#0]
+ sub $Xfer,$Xfer,#64
+ bne .L_00_48
+
+ ldr $inp,[sp,#68]
+ ldr $t0,[sp,#72]
+ sub $Ktbl,$Ktbl,#256 @ rewind $Ktbl
+ teq $inp,$t0
+ subeq $inp,$inp,#64 @ avoid SEGV
+ vld1.8 {@X[0]},[$inp]! @ load next input block
+ vld1.8 {@X[1]},[$inp]!
+ vld1.8 {@X[2]},[$inp]!
+ vld1.8 {@X[3]},[$inp]!
+ strne $inp,[sp,#68]
+ mov $Xfer,sp
+___
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+$code.=<<___;
+ ldr $t0,[$t1,#0]
+ add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
+ ldr $t2,[$t1,#4]
+ ldr $t3,[$t1,#8]
+ ldr $t4,[$t1,#12]
+ add $A,$A,$t0 @ accumulate
+ ldr $t0,[$t1,#16]
+ add $B,$B,$t2
+ ldr $t2,[$t1,#20]
+ add $C,$C,$t3
+ ldr $t3,[$t1,#24]
+ add $D,$D,$t4
+ ldr $t4,[$t1,#28]
+ add $E,$E,$t0
+ str $A,[$t1],#4
+ add $F,$F,$t2
+ str $B,[$t1],#4
+ add $G,$G,$t3
+ str $C,[$t1],#4
+ add $H,$H,$t4
+ str $D,[$t1],#4
+ stmia $t1,{$E-$H}
+
+ movne $Xfer,sp
+ ldrne $t1,[sp,#0]
+ eorne $t2,$t2,$t2
+ ldreq sp,[sp,#76] @ restore original sp
+ eorne $t3,$B,$C
+ bne .L_00_48
+
+ ldmia sp!,{r4-r12,pc}
+.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
+#endif
+___
+}}}
+######################################################################
+# ARMv8 stuff
+#
+{{{
+my ($ABCD,$EFGH,$abcd)=map("q$_",(0..2));
+my @MSG=map("q$_",(8..11));
+my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15));
+my $Ktbl="r3";
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.type sha256_block_data_order_armv8,%function
+.align 5
+sha256_block_data_order_armv8:
+.LARMv8:
+ vld1.32 {$ABCD,$EFGH},[$ctx]
+ sub $Ktbl,r3,#sha256_block_data_order-K256
+
+.Loop_v8:
+ vld1.8 {@MSG[0]-@MSG[1]},[$inp]!
+ vld1.8 {@MSG[2]-@MSG[3]},[$inp]!
+ vld1.32 {$W0},[$Ktbl]!
+ vrev32.8 @MSG[0],@MSG[0]
+ vrev32.8 @MSG[1],@MSG[1]
+ vrev32.8 @MSG[2],@MSG[2]
+ vrev32.8 @MSG[3],@MSG[3]
+ vmov $ABCD_SAVE,$ABCD @ offload
+ vmov $EFGH_SAVE,$EFGH
+ teq $inp,$len
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+ vld1.32 {$W1},[$Ktbl]!
+ vadd.i32 $W0,$W0,@MSG[0]
+ sha256su0 @MSG[0],@MSG[1]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+ sha256su1 @MSG[0],@MSG[2],@MSG[3]
+___
+ ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ vld1.32 {$W1},[$Ktbl]!
+ vadd.i32 $W0,$W0,@MSG[0]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ vld1.32 {$W0},[$Ktbl]!
+ vadd.i32 $W1,$W1,@MSG[1]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ vld1.32 {$W1},[$Ktbl]
+ vadd.i32 $W0,$W0,@MSG[2]
+ sub $Ktbl,$Ktbl,#256-16 @ rewind
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ vadd.i32 $W1,$W1,@MSG[3]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ vadd.i32 $ABCD,$ABCD,$ABCD_SAVE
+ vadd.i32 $EFGH,$EFGH,$EFGH_SAVE
+ bne .Loop_v8
+
+ vst1.32 {$ABCD,$EFGH},[$ctx]
+
+ ret @ bx lr
+.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
+#endif
+___
+}}}
+$code.=<<___;
+.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
.align 2
+#if __ARM_MAX_ARCH__>=7
+.comm OPENSSL_armcap_P,4,4
+#endif
___
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
-print $code;
+{ my %opcode = (
+ "sha256h" => 0xf3000c40, "sha256h2" => 0xf3100c40,
+ "sha256su0" => 0xf3ba03c0, "sha256su1" => 0xf3200c40 );
+
+ sub unsha256 {
+ my ($mnemonic,$arg)=@_;
+
+ if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
+ my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+ |(($2&7)<<17)|(($2&8)<<4)
+ |(($3&7)<<1) |(($3&8)<<2);
+ # since ARMv7 instructions are always encoded little-endian.
+ # correct solution is to use .inst directive, but older
+ # assemblers don't implement it:-(
+ sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
+ $word&0xff,($word>>8)&0xff,
+ ($word>>16)&0xff,($word>>24)&0xff,
+ $mnemonic,$arg;
+ }
+ }
+}
+
+foreach (split($/,$code)) {
+
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\b(sha256\w+)\s+(q.*)/unsha256($1,$2)/geo;
+
+ s/\bret\b/bx lr/go or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
+
+ print $_,"\n";
+}
+
close STDOUT; # enforce flush
diff --git a/openssl/crypto/sha/asm/sha256-mb-x86_64.pl b/openssl/crypto/sha/asm/sha256-mb-x86_64.pl
new file mode 100755
index 000000000..adf2ddccd
--- /dev/null
+++ b/openssl/crypto/sha/asm/sha256-mb-x86_64.pl
@@ -0,0 +1,1560 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# Multi-buffer SHA256 procedure processes n buffers in parallel by
+# placing buffer data to designated lane of SIMD register. n is
+# naturally limited to 4 on pre-AVX2 processors and to 8 on
+# AVX2-capable processors such as Haswell.
+#
+# this +aesni(i) sha256 aesni-sha256 gain(iv)
+# -------------------------------------------------------------------
+# Westmere(ii) 23.3/n +1.28=7.11(n=4) 12.3 +3.75=16.1 +126%
+# Atom(ii) 38.7/n +3.93=13.6(n=4) 20.8 +5.69=26.5 +95%
+# Sandy Bridge (20.5 +5.15=25.7)/n 11.6 13.0 +103%
+# Ivy Bridge (20.4 +5.14=25.5)/n 10.3 11.6 +82%
+# Haswell(iii) (21.0 +5.00=26.0)/n 7.80 8.79 +170%
+# Bulldozer (21.6 +5.76=27.4)/n 13.6 13.7 +100%
+#
+# (i) multi-block CBC encrypt with 128-bit key;
+# (ii) (HASH+AES)/n does not apply to Westmere for n>3 and Atom,
+# because of lower AES-NI instruction throughput, nor is there
+# AES-NI-SHA256 stitch for these processors;
+# (iii) "this" is for n=8, when we gather twice as much data, result
+# for n=4 is 20.3+4.44=24.7;
+# (iv) presented improvement coefficients are asymptotic limits and
+# in real-life application are somewhat lower, e.g. for 2KB
+# fragments they range from 75% to 130% (on Haswell);
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+$avx=0;
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $avx = ($1>=10) + ($1>=11);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+ $avx = ($2>=3.0) + ($2>3.0);
+}
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+# void sha256_multi_block (
+# struct { unsigned int A[8];
+# unsigned int B[8];
+# unsigned int C[8];
+# unsigned int D[8];
+# unsigned int E[8];
+# unsigned int F[8];
+# unsigned int G[8];
+# unsigned int H[8]; } *ctx,
+# struct { void *ptr; int blocks; } inp[8],
+# int num); /* 1 or 2 */
+#
+$ctx="%rdi"; # 1st arg
+$inp="%rsi"; # 2nd arg
+$num="%edx"; # 3rd arg
+@ptr=map("%r$_",(8..11));
+$Tbl="%rbp";
+
+@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("%xmm$_",(8..15));
+($t1,$t2,$t3,$axb,$bxc,$Xi,$Xn,$sigma)=map("%xmm$_",(0..7));
+
+$REG_SZ=16;
+
+sub Xi_off {
+my $off = shift;
+
+ $off %= 16; $off *= $REG_SZ;
+ $off<256 ? "$off-128(%rax)" : "$off-256-128(%rbx)";
+}
+
+sub ROUND_00_15 {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
+
+$code.=<<___ if ($i<15);
+ movd `4*$i`(@ptr[0]),$Xi
+ movd `4*$i`(@ptr[1]),$t1
+ movd `4*$i`(@ptr[2]),$t2
+ movd `4*$i`(@ptr[3]),$t3
+ punpckldq $t2,$Xi
+ punpckldq $t3,$t1
+ punpckldq $t1,$Xi
+___
+$code.=<<___ if ($i==15);
+ movd `4*$i`(@ptr[0]),$Xi
+ lea `16*4`(@ptr[0]),@ptr[0]
+ movd `4*$i`(@ptr[1]),$t1
+ lea `16*4`(@ptr[1]),@ptr[1]
+ movd `4*$i`(@ptr[2]),$t2
+ lea `16*4`(@ptr[2]),@ptr[2]
+ movd `4*$i`(@ptr[3]),$t3
+ lea `16*4`(@ptr[3]),@ptr[3]
+ punpckldq $t2,$Xi
+ punpckldq $t3,$t1
+ punpckldq $t1,$Xi
+___
+$code.=<<___;
+ movdqa $e,$sigma
+ `"pshufb $Xn,$Xi" if ($i<=15 && ($i&1)==0)`
+ movdqa $e,$t3
+ `"pshufb $Xn,$Xi" if ($i<=15 && ($i&1)==1)`
+ psrld \$6,$sigma
+ movdqa $e,$t2
+ pslld \$7,$t3
+ movdqa $Xi,`&Xi_off($i)`
+ paddd $h,$Xi # Xi+=h
+
+ psrld \$11,$t2
+ pxor $t3,$sigma
+ pslld \$21-7,$t3
+ paddd `32*($i%8)-128`($Tbl),$Xi # Xi+=K[round]
+ pxor $t2,$sigma
+
+ psrld \$25-11,$t2
+ movdqa $e,$t1
+ `"prefetcht0 63(@ptr[0])" if ($i==15)`
+ pxor $t3,$sigma
+ movdqa $e,$axb # borrow $axb
+ pslld \$26-21,$t3
+ pandn $g,$t1
+ pand $f,$axb
+ pxor $t2,$sigma
+
+ `"prefetcht0 63(@ptr[1])" if ($i==15)`
+ movdqa $a,$t2
+ pxor $t3,$sigma # Sigma1(e)
+ movdqa $a,$t3
+ psrld \$2,$t2
+ paddd $sigma,$Xi # Xi+=Sigma1(e)
+ pxor $axb,$t1 # Ch(e,f,g)
+ movdqa $b,$axb
+ movdqa $a,$sigma
+ pslld \$10,$t3
+ pxor $a,$axb # a^b, b^c in next round
+
+ `"prefetcht0 63(@ptr[2])" if ($i==15)`
+ psrld \$13,$sigma
+ pxor $t3,$t2
+ paddd $t1,$Xi # Xi+=Ch(e,f,g)
+ pslld \$19-10,$t3
+ pand $axb,$bxc
+ pxor $sigma,$t2
+
+ `"prefetcht0 63(@ptr[3])" if ($i==15)`
+ psrld \$22-13,$sigma
+ pxor $t3,$t2
+ movdqa $b,$h
+ pslld \$30-19,$t3
+ pxor $t2,$sigma
+ pxor $bxc,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
+ paddd $Xi,$d # d+=Xi
+ pxor $t3,$sigma # Sigma0(a)
+
+ paddd $Xi,$h # h+=Xi
+ paddd $sigma,$h # h+=Sigma0(a)
+___
+$code.=<<___ if (($i%8)==7);
+ lea `32*8`($Tbl),$Tbl
+___
+ ($axb,$bxc)=($bxc,$axb);
+}
+
+sub ROUND_16_XX {
+my $i=shift;
+
+$code.=<<___;
+ movdqa `&Xi_off($i+1)`,$Xn
+ paddd `&Xi_off($i+9)`,$Xi # Xi+=X[i+9]
+
+ movdqa $Xn,$sigma
+ movdqa $Xn,$t2
+ psrld \$3,$sigma
+ movdqa $Xn,$t3
+
+ psrld \$7,$t2
+ movdqa `&Xi_off($i+14)`,$t1
+ pslld \$14,$t3
+ pxor $t2,$sigma
+ psrld \$18-7,$t2
+ movdqa $t1,$axb # borrow $axb
+ pxor $t3,$sigma
+ pslld \$25-14,$t3
+ pxor $t2,$sigma
+ psrld \$10,$t1
+ movdqa $axb,$t2
+
+ psrld \$17,$axb
+ pxor $t3,$sigma # sigma0(X[i+1])
+ pslld \$13,$t2
+ paddd $sigma,$Xi # Xi+=sigma0(e)
+ pxor $axb,$t1
+ psrld \$19-17,$axb
+ pxor $t2,$t1
+ pslld \$15-13,$t2
+ pxor $axb,$t1
+ pxor $t2,$t1 # sigma0(X[i+14])
+ paddd $t1,$Xi # Xi+=sigma1(X[i+14])
+___
+ &ROUND_00_15($i,@_);
+ ($Xi,$Xn)=($Xn,$Xi);
+}
+
+$code.=<<___;
+.text
+
+.extern OPENSSL_ia32cap_P
+
+.globl sha256_multi_block
+.type sha256_multi_block,\@function,3
+.align 32
+sha256_multi_block:
+ mov OPENSSL_ia32cap_P+4(%rip),%rcx
+ bt \$61,%rcx # check SHA bit
+ jc _shaext_shortcut
+___
+$code.=<<___ if ($avx);
+ test \$`1<<28`,%ecx
+ jnz _avx_shortcut
+___
+$code.=<<___;
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+___
+$code.=<<___ if ($win64);
+ lea -0xa8(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+ movaps %xmm10,-0x78(%rax)
+ movaps %xmm11,-0x68(%rax)
+ movaps %xmm12,-0x58(%rax)
+ movaps %xmm13,-0x48(%rax)
+ movaps %xmm14,-0x38(%rax)
+ movaps %xmm15,-0x28(%rax)
+___
+$code.=<<___;
+ sub \$`$REG_SZ*18`, %rsp
+ and \$-256,%rsp
+ mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
+.Lbody:
+ lea K256+128(%rip),$Tbl
+ lea `$REG_SZ*16`(%rsp),%rbx
+ lea 0x80($ctx),$ctx # size optimization
+
+.Loop_grande:
+ mov $num,`$REG_SZ*17+8`(%rsp) # original $num
+ xor $num,$num
+___
+for($i=0;$i<4;$i++) {
+ $code.=<<___;
+ mov `16*$i+0`($inp),@ptr[$i] # input pointer
+ mov `16*$i+8`($inp),%ecx # number of blocks
+ cmp $num,%ecx
+ cmovg %ecx,$num # find maximum
+ test %ecx,%ecx
+ mov %ecx,`4*$i`(%rbx) # initialize counters
+ cmovle $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ test $num,$num
+ jz .Ldone
+
+ movdqu 0x00-0x80($ctx),$A # load context
+ lea 128(%rsp),%rax
+ movdqu 0x20-0x80($ctx),$B
+ movdqu 0x40-0x80($ctx),$C
+ movdqu 0x60-0x80($ctx),$D
+ movdqu 0x80-0x80($ctx),$E
+ movdqu 0xa0-0x80($ctx),$F
+ movdqu 0xc0-0x80($ctx),$G
+ movdqu 0xe0-0x80($ctx),$H
+ movdqu .Lpbswap(%rip),$Xn
+ jmp .Loop
+
+.align 32
+.Loop:
+ movdqa $C,$bxc
+ pxor $B,$bxc # magic seed
+___
+for($i=0;$i<16;$i++) { &ROUND_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ movdqu `&Xi_off($i)`,$Xi
+ mov \$3,%ecx
+ jmp .Loop_16_xx
+.align 32
+.Loop_16_xx:
+___
+for(;$i<32;$i++) { &ROUND_16_XX($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ dec %ecx
+ jnz .Loop_16_xx
+
+ mov \$1,%ecx
+ lea K256+128(%rip),$Tbl
+
+ movdqa (%rbx),$sigma # pull counters
+ cmp 4*0(%rbx),%ecx # examine counters
+ pxor $t1,$t1
+ cmovge $Tbl,@ptr[0] # cancel input
+ cmp 4*1(%rbx),%ecx
+ movdqa $sigma,$Xn
+ cmovge $Tbl,@ptr[1]
+ cmp 4*2(%rbx),%ecx
+ pcmpgtd $t1,$Xn # mask value
+ cmovge $Tbl,@ptr[2]
+ cmp 4*3(%rbx),%ecx
+ paddd $Xn,$sigma # counters--
+ cmovge $Tbl,@ptr[3]
+
+ movdqu 0x00-0x80($ctx),$t1
+ pand $Xn,$A
+ movdqu 0x20-0x80($ctx),$t2
+ pand $Xn,$B
+ movdqu 0x40-0x80($ctx),$t3
+ pand $Xn,$C
+ movdqu 0x60-0x80($ctx),$Xi
+ pand $Xn,$D
+ paddd $t1,$A
+ movdqu 0x80-0x80($ctx),$t1
+ pand $Xn,$E
+ paddd $t2,$B
+ movdqu 0xa0-0x80($ctx),$t2
+ pand $Xn,$F
+ paddd $t3,$C
+ movdqu 0xc0-0x80($ctx),$t3
+ pand $Xn,$G
+ paddd $Xi,$D
+ movdqu 0xe0-0x80($ctx),$Xi
+ pand $Xn,$H
+ paddd $t1,$E
+ paddd $t2,$F
+ movdqu $A,0x00-0x80($ctx)
+ paddd $t3,$G
+ movdqu $B,0x20-0x80($ctx)
+ paddd $Xi,$H
+ movdqu $C,0x40-0x80($ctx)
+ movdqu $D,0x60-0x80($ctx)
+ movdqu $E,0x80-0x80($ctx)
+ movdqu $F,0xa0-0x80($ctx)
+ movdqu $G,0xc0-0x80($ctx)
+ movdqu $H,0xe0-0x80($ctx)
+
+ movdqa $sigma,(%rbx) # save counters
+ movdqa .Lpbswap(%rip),$Xn
+ dec $num
+ jnz .Loop
+
+ mov `$REG_SZ*17+8`(%rsp),$num
+ lea $REG_SZ($ctx),$ctx
+ lea `16*$REG_SZ/4`($inp),$inp
+ dec $num
+ jnz .Loop_grande
+
+.Ldone:
+ mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
+___
+$code.=<<___ if ($win64);
+ movaps -0xb8(%rax),%xmm6
+ movaps -0xa8(%rax),%xmm7
+ movaps -0x98(%rax),%xmm8
+ movaps -0x88(%rax),%xmm9
+ movaps -0x78(%rax),%xmm10
+ movaps -0x68(%rax),%xmm11
+ movaps -0x58(%rax),%xmm12
+ movaps -0x48(%rax),%xmm13
+ movaps -0x38(%rax),%xmm14
+ movaps -0x28(%rax),%xmm15
+___
+$code.=<<___;
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp
+.Lepilogue:
+ ret
+.size sha256_multi_block,.-sha256_multi_block
+___
+ {{{
+my ($Wi,$TMP0,$TMP1,$TMPx,$ABEF0,$CDGH0,$ABEF1,$CDGH1)=map("%xmm$_",(0..3,12..15));
+my @MSG0=map("%xmm$_",(4..7));
+my @MSG1=map("%xmm$_",(8..11));
+
+$code.=<<___;
+.type sha256_multi_block_shaext,\@function,3
+.align 32
+sha256_multi_block_shaext:
+_shaext_shortcut:
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+___
+$code.=<<___ if ($win64);
+ lea -0xa8(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+ movaps %xmm10,-0x78(%rax)
+ movaps %xmm11,-0x68(%rax)
+ movaps %xmm12,-0x58(%rax)
+ movaps %xmm13,-0x48(%rax)
+ movaps %xmm14,-0x38(%rax)
+ movaps %xmm15,-0x28(%rax)
+___
+$code.=<<___;
+ sub \$`$REG_SZ*18`,%rsp
+ shl \$1,$num # we process pair at a time
+ and \$-256,%rsp
+ lea 0x80($ctx),$ctx # size optimization
+ mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
+.Lbody_shaext:
+ lea `$REG_SZ*16`(%rsp),%rbx
+ lea K256_shaext+0x80(%rip),$Tbl
+
+.Loop_grande_shaext:
+ mov $num,`$REG_SZ*17+8`(%rsp) # orignal $num
+ xor $num,$num
+___
+for($i=0;$i<2;$i++) {
+ $code.=<<___;
+ mov `16*$i+0`($inp),@ptr[$i] # input pointer
+ mov `16*$i+8`($inp),%ecx # number of blocks
+ cmp $num,%ecx
+ cmovg %ecx,$num # find maximum
+ test %ecx,%ecx
+ mov %ecx,`4*$i`(%rbx) # initialize counters
+ cmovle %rsp,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ test $num,$num
+ jz .Ldone_shaext
+
+ movq 0x00-0x80($ctx),$ABEF0 # A1.A0
+ movq 0x20-0x80($ctx),@MSG0[0] # B1.B0
+ movq 0x40-0x80($ctx),$CDGH0 # C1.C0
+ movq 0x60-0x80($ctx),@MSG0[1] # D1.D0
+ movq 0x80-0x80($ctx),@MSG1[0] # E1.E0
+ movq 0xa0-0x80($ctx),@MSG1[1] # F1.F0
+ movq 0xc0-0x80($ctx),@MSG1[2] # G1.G0
+ movq 0xe0-0x80($ctx),@MSG1[3] # H1.H0
+
+ punpckldq @MSG0[0],$ABEF0 # B1.A1.B0.A0
+ punpckldq @MSG0[1],$CDGH0 # D1.C1.D0.C0
+ punpckldq @MSG1[1],@MSG1[0] # F1.E1.F0.E0
+ punpckldq @MSG1[3],@MSG1[2] # H1.G1.H0.G0
+ movdqa K256_shaext-0x10(%rip),$TMPx # byte swap
+
+ movdqa $ABEF0,$ABEF1
+ movdqa $CDGH0,$CDGH1
+ punpcklqdq @MSG1[0],$ABEF0 # F0.E0.B0.A0
+ punpcklqdq @MSG1[2],$CDGH0 # H0.G0.D0.C0
+ punpckhqdq @MSG1[0],$ABEF1 # F1.E1.B1.A1
+ punpckhqdq @MSG1[2],$CDGH1 # H1.G1.D1.C1
+
+ pshufd \$0b00011011,$ABEF0,$ABEF0
+ pshufd \$0b00011011,$CDGH0,$CDGH0
+ pshufd \$0b00011011,$ABEF1,$ABEF1
+ pshufd \$0b00011011,$CDGH1,$CDGH1
+ jmp .Loop_shaext
+
+.align 32
+.Loop_shaext:
+ movdqu 0x00(@ptr[0]),@MSG0[0]
+ movdqu 0x00(@ptr[1]),@MSG1[0]
+ movdqu 0x10(@ptr[0]),@MSG0[1]
+ movdqu 0x10(@ptr[1]),@MSG1[1]
+ movdqu 0x20(@ptr[0]),@MSG0[2]
+ pshufb $TMPx,@MSG0[0]
+ movdqu 0x20(@ptr[1]),@MSG1[2]
+ pshufb $TMPx,@MSG1[0]
+ movdqu 0x30(@ptr[0]),@MSG0[3]
+ lea 0x40(@ptr[0]),@ptr[0]
+ movdqu 0x30(@ptr[1]),@MSG1[3]
+ lea 0x40(@ptr[1]),@ptr[1]
+
+ movdqa 0*16-0x80($Tbl),$Wi
+ pshufb $TMPx,@MSG0[1]
+ paddd @MSG0[0],$Wi
+ pxor $ABEF0,@MSG0[0] # black magic
+ movdqa $Wi,$TMP0
+ movdqa 0*16-0x80($Tbl),$TMP1
+ pshufb $TMPx,@MSG1[1]
+ paddd @MSG1[0],$TMP1
+ movdqa $CDGH0,0x50(%rsp) # offload
+ sha256rnds2 $ABEF0,$CDGH0 # 0-3
+ pxor $ABEF1,@MSG1[0] # black magic
+ movdqa $TMP1,$Wi
+ movdqa $CDGH1,0x70(%rsp)
+ sha256rnds2 $ABEF1,$CDGH1 # 0-3
+ pshufd \$0x0e,$TMP0,$Wi
+ pxor $ABEF0,@MSG0[0] # black magic
+ movdqa $ABEF0,0x40(%rsp) # offload
+ sha256rnds2 $CDGH0,$ABEF0
+ pshufd \$0x0e,$TMP1,$Wi
+ pxor $ABEF1,@MSG1[0] # black magic
+ movdqa $ABEF1,0x60(%rsp)
+ movdqa 1*16-0x80($Tbl),$TMP0
+ paddd @MSG0[1],$TMP0
+ pshufb $TMPx,@MSG0[2]
+ sha256rnds2 $CDGH1,$ABEF1
+
+ movdqa $TMP0,$Wi
+ movdqa 1*16-0x80($Tbl),$TMP1
+ paddd @MSG1[1],$TMP1
+ sha256rnds2 $ABEF0,$CDGH0 # 4-7
+ movdqa $TMP1,$Wi
+ prefetcht0 127(@ptr[0])
+ pshufb $TMPx,@MSG0[3]
+ pshufb $TMPx,@MSG1[2]
+ prefetcht0 127(@ptr[1])
+ sha256rnds2 $ABEF1,$CDGH1 # 4-7
+ pshufd \$0x0e,$TMP0,$Wi
+ pshufb $TMPx,@MSG1[3]
+ sha256msg1 @MSG0[1],@MSG0[0]
+ sha256rnds2 $CDGH0,$ABEF0
+ pshufd \$0x0e,$TMP1,$Wi
+ movdqa 2*16-0x80($Tbl),$TMP0
+ paddd @MSG0[2],$TMP0
+ sha256rnds2 $CDGH1,$ABEF1
+
+ movdqa $TMP0,$Wi
+ movdqa 2*16-0x80($Tbl),$TMP1
+ paddd @MSG1[2],$TMP1
+ sha256rnds2 $ABEF0,$CDGH0 # 8-11
+ sha256msg1 @MSG1[1],@MSG1[0]
+ movdqa $TMP1,$Wi
+ movdqa @MSG0[3],$TMPx
+ sha256rnds2 $ABEF1,$CDGH1 # 8-11
+ pshufd \$0x0e,$TMP0,$Wi
+ palignr \$4,@MSG0[2],$TMPx
+ paddd $TMPx,@MSG0[0]
+ movdqa @MSG1[3],$TMPx
+ palignr \$4,@MSG1[2],$TMPx
+ sha256msg1 @MSG0[2],@MSG0[1]
+ sha256rnds2 $CDGH0,$ABEF0
+ pshufd \$0x0e,$TMP1,$Wi
+ movdqa 3*16-0x80($Tbl),$TMP0
+ paddd @MSG0[3],$TMP0
+ sha256rnds2 $CDGH1,$ABEF1
+ sha256msg1 @MSG1[2],@MSG1[1]
+
+ movdqa $TMP0,$Wi
+ movdqa 3*16-0x80($Tbl),$TMP1
+ paddd $TMPx,@MSG1[0]
+ paddd @MSG1[3],$TMP1
+ sha256msg2 @MSG0[3],@MSG0[0]
+ sha256rnds2 $ABEF0,$CDGH0 # 12-15
+ movdqa $TMP1,$Wi
+ movdqa @MSG0[0],$TMPx
+ palignr \$4,@MSG0[3],$TMPx
+ sha256rnds2 $ABEF1,$CDGH1 # 12-15
+ sha256msg2 @MSG1[3],@MSG1[0]
+ pshufd \$0x0e,$TMP0,$Wi
+ paddd $TMPx,@MSG0[1]
+ movdqa @MSG1[0],$TMPx
+ palignr \$4,@MSG1[3],$TMPx
+ sha256msg1 @MSG0[3],@MSG0[2]
+ sha256rnds2 $CDGH0,$ABEF0
+ pshufd \$0x0e,$TMP1,$Wi
+ movdqa 4*16-0x80($Tbl),$TMP0
+ paddd @MSG0[0],$TMP0
+ sha256rnds2 $CDGH1,$ABEF1
+ sha256msg1 @MSG1[3],@MSG1[2]
+___
+for($i=4;$i<16-3;$i++) {
+$code.=<<___;
+ movdqa $TMP0,$Wi
+ movdqa $i*16-0x80($Tbl),$TMP1
+ paddd $TMPx,@MSG1[1]
+ paddd @MSG1[0],$TMP1
+ sha256msg2 @MSG0[0],@MSG0[1]
+ sha256rnds2 $ABEF0,$CDGH0 # 16-19...
+ movdqa $TMP1,$Wi
+ movdqa @MSG0[1],$TMPx
+ palignr \$4,@MSG0[0],$TMPx
+ sha256rnds2 $ABEF1,$CDGH1 # 16-19...
+ sha256msg2 @MSG1[0],@MSG1[1]
+ pshufd \$0x0e,$TMP0,$Wi
+ paddd $TMPx,@MSG0[2]
+ movdqa @MSG1[1],$TMPx
+ palignr \$4,@MSG1[0],$TMPx
+ sha256msg1 @MSG0[0],@MSG0[3]
+ sha256rnds2 $CDGH0,$ABEF0
+ pshufd \$0x0e,$TMP1,$Wi
+ movdqa `($i+1)*16`-0x80($Tbl),$TMP0
+ paddd @MSG0[1],$TMP0
+ sha256rnds2 $CDGH1,$ABEF1
+ sha256msg1 @MSG1[0],@MSG1[3]
+___
+ push(@MSG0,shift(@MSG0)); push(@MSG1,shift(@MSG1));
+}
+$code.=<<___;
+ movdqa $TMP0,$Wi
+ movdqa 13*16-0x80($Tbl),$TMP1
+ paddd $TMPx,@MSG1[1]
+ paddd @MSG1[0],$TMP1
+ sha256msg2 @MSG0[0],@MSG0[1]
+ sha256rnds2 $ABEF0,$CDGH0 # 52-55
+ movdqa $TMP1,$Wi
+ movdqa @MSG0[1],$TMPx
+ palignr \$4,@MSG0[0],$TMPx
+ sha256rnds2 $ABEF1,$CDGH1 # 52-55
+ sha256msg2 @MSG1[0],@MSG1[1]
+ pshufd \$0x0e,$TMP0,$Wi
+ paddd $TMPx,@MSG0[2]
+ movdqa @MSG1[1],$TMPx
+ palignr \$4,@MSG1[0],$TMPx
+ nop
+ sha256rnds2 $CDGH0,$ABEF0
+ pshufd \$0x0e,$TMP1,$Wi
+ movdqa 14*16-0x80($Tbl),$TMP0
+ paddd @MSG0[1],$TMP0
+ sha256rnds2 $CDGH1,$ABEF1
+
+ movdqa $TMP0,$Wi
+ movdqa 14*16-0x80($Tbl),$TMP1
+ paddd $TMPx,@MSG1[2]
+ paddd @MSG1[1],$TMP1
+ sha256msg2 @MSG0[1],@MSG0[2]
+ nop
+ sha256rnds2 $ABEF0,$CDGH0 # 56-59
+ movdqa $TMP1,$Wi
+ mov \$1,%ecx
+ pxor @MSG0[1],@MSG0[1] # zero
+ sha256rnds2 $ABEF1,$CDGH1 # 56-59
+ sha256msg2 @MSG1[1],@MSG1[2]
+ pshufd \$0x0e,$TMP0,$Wi
+ movdqa 15*16-0x80($Tbl),$TMP0
+ paddd @MSG0[2],$TMP0
+ movq (%rbx),@MSG0[2] # pull counters
+ nop
+ sha256rnds2 $CDGH0,$ABEF0
+ pshufd \$0x0e,$TMP1,$Wi
+ movdqa 15*16-0x80($Tbl),$TMP1
+ paddd @MSG1[2],$TMP1
+ sha256rnds2 $CDGH1,$ABEF1
+
+ movdqa $TMP0,$Wi
+ cmp 4*0(%rbx),%ecx # examine counters
+ cmovge %rsp,@ptr[0] # cancel input
+ cmp 4*1(%rbx),%ecx
+ cmovge %rsp,@ptr[1]
+ pshufd \$0x00,@MSG0[2],@MSG1[0]
+ sha256rnds2 $ABEF0,$CDGH0 # 60-63
+ movdqa $TMP1,$Wi
+ pshufd \$0x55,@MSG0[2],@MSG1[1]
+ movdqa @MSG0[2],@MSG1[2]
+ sha256rnds2 $ABEF1,$CDGH1 # 60-63
+ pshufd \$0x0e,$TMP0,$Wi
+ pcmpgtd @MSG0[1],@MSG1[0]
+ pcmpgtd @MSG0[1],@MSG1[1]
+ sha256rnds2 $CDGH0,$ABEF0
+ pshufd \$0x0e,$TMP1,$Wi
+ pcmpgtd @MSG0[1],@MSG1[2] # counter mask
+ movdqa K256_shaext-0x10(%rip),$TMPx
+ sha256rnds2 $CDGH1,$ABEF1
+
+ pand @MSG1[0],$CDGH0
+ pand @MSG1[1],$CDGH1
+ pand @MSG1[0],$ABEF0
+ pand @MSG1[1],$ABEF1
+ paddd @MSG0[2],@MSG1[2] # counters--
+
+ paddd 0x50(%rsp),$CDGH0
+ paddd 0x70(%rsp),$CDGH1
+ paddd 0x40(%rsp),$ABEF0
+ paddd 0x60(%rsp),$ABEF1
+
+ movq @MSG1[2],(%rbx) # save counters
+ dec $num
+ jnz .Loop_shaext
+
+ mov `$REG_SZ*17+8`(%rsp),$num
+
+ pshufd \$0b00011011,$ABEF0,$ABEF0
+ pshufd \$0b00011011,$CDGH0,$CDGH0
+ pshufd \$0b00011011,$ABEF1,$ABEF1
+ pshufd \$0b00011011,$CDGH1,$CDGH1
+
+ movdqa $ABEF0,@MSG0[0]
+ movdqa $CDGH0,@MSG0[1]
+ punpckldq $ABEF1,$ABEF0 # B1.B0.A1.A0
+ punpckhdq $ABEF1,@MSG0[0] # F1.F0.E1.E0
+ punpckldq $CDGH1,$CDGH0 # D1.D0.C1.C0
+ punpckhdq $CDGH1,@MSG0[1] # H1.H0.G1.G0
+
+ movq $ABEF0,0x00-0x80($ctx) # A1.A0
+ psrldq \$8,$ABEF0
+ movq @MSG0[0],0x80-0x80($ctx) # E1.E0
+ psrldq \$8,@MSG0[0]
+ movq $ABEF0,0x20-0x80($ctx) # B1.B0
+ movq @MSG0[0],0xa0-0x80($ctx) # F1.F0
+
+ movq $CDGH0,0x40-0x80($ctx) # C1.C0
+ psrldq \$8,$CDGH0
+ movq @MSG0[1],0xc0-0x80($ctx) # G1.G0
+ psrldq \$8,@MSG0[1]
+ movq $CDGH0,0x60-0x80($ctx) # D1.D0
+ movq @MSG0[1],0xe0-0x80($ctx) # H1.H0
+
+ lea `$REG_SZ/2`($ctx),$ctx
+ lea `16*2`($inp),$inp
+ dec $num
+ jnz .Loop_grande_shaext
+
+.Ldone_shaext:
+ #mov `$REG_SZ*17`(%rsp),%rax # original %rsp
+___
+$code.=<<___ if ($win64);
+ movaps -0xb8(%rax),%xmm6
+ movaps -0xa8(%rax),%xmm7
+ movaps -0x98(%rax),%xmm8
+ movaps -0x88(%rax),%xmm9
+ movaps -0x78(%rax),%xmm10
+ movaps -0x68(%rax),%xmm11
+ movaps -0x58(%rax),%xmm12
+ movaps -0x48(%rax),%xmm13
+ movaps -0x38(%rax),%xmm14
+ movaps -0x28(%rax),%xmm15
+___
+$code.=<<___;
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp
+.Lepilogue_shaext:
+ ret
+.size sha256_multi_block_shaext,.-sha256_multi_block_shaext
+___
+ }}}
+ if ($avx) {{{
+sub ROUND_00_15_avx {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
+
+$code.=<<___ if ($i<15 && $REG_SZ==16);
+ vmovd `4*$i`(@ptr[0]),$Xi
+ vmovd `4*$i`(@ptr[1]),$t1
+ vpinsrd \$1,`4*$i`(@ptr[2]),$Xi,$Xi
+ vpinsrd \$1,`4*$i`(@ptr[3]),$t1,$t1
+ vpunpckldq $t1,$Xi,$Xi
+ vpshufb $Xn,$Xi,$Xi
+___
+$code.=<<___ if ($i==15 && $REG_SZ==16);
+ vmovd `4*$i`(@ptr[0]),$Xi
+ lea `16*4`(@ptr[0]),@ptr[0]
+ vmovd `4*$i`(@ptr[1]),$t1
+ lea `16*4`(@ptr[1]),@ptr[1]
+ vpinsrd \$1,`4*$i`(@ptr[2]),$Xi,$Xi
+ lea `16*4`(@ptr[2]),@ptr[2]
+ vpinsrd \$1,`4*$i`(@ptr[3]),$t1,$t1
+ lea `16*4`(@ptr[3]),@ptr[3]
+ vpunpckldq $t1,$Xi,$Xi
+ vpshufb $Xn,$Xi,$Xi
+___
+$code.=<<___ if ($i<15 && $REG_SZ==32);
+ vmovd `4*$i`(@ptr[0]),$Xi
+ vmovd `4*$i`(@ptr[4]),$t1
+ vmovd `4*$i`(@ptr[1]),$t2
+ vmovd `4*$i`(@ptr[5]),$t3
+ vpinsrd \$1,`4*$i`(@ptr[2]),$Xi,$Xi
+ vpinsrd \$1,`4*$i`(@ptr[6]),$t1,$t1
+ vpinsrd \$1,`4*$i`(@ptr[3]),$t2,$t2
+ vpunpckldq $t2,$Xi,$Xi
+ vpinsrd \$1,`4*$i`(@ptr[7]),$t3,$t3
+ vpunpckldq $t3,$t1,$t1
+ vinserti128 $t1,$Xi,$Xi
+ vpshufb $Xn,$Xi,$Xi
+___
+$code.=<<___ if ($i==15 && $REG_SZ==32);
+ vmovd `4*$i`(@ptr[0]),$Xi
+ lea `16*4`(@ptr[0]),@ptr[0]
+ vmovd `4*$i`(@ptr[4]),$t1
+ lea `16*4`(@ptr[4]),@ptr[4]
+ vmovd `4*$i`(@ptr[1]),$t2
+ lea `16*4`(@ptr[1]),@ptr[1]
+ vmovd `4*$i`(@ptr[5]),$t3
+ lea `16*4`(@ptr[5]),@ptr[5]
+ vpinsrd \$1,`4*$i`(@ptr[2]),$Xi,$Xi
+ lea `16*4`(@ptr[2]),@ptr[2]
+ vpinsrd \$1,`4*$i`(@ptr[6]),$t1,$t1
+ lea `16*4`(@ptr[6]),@ptr[6]
+ vpinsrd \$1,`4*$i`(@ptr[3]),$t2,$t2
+ lea `16*4`(@ptr[3]),@ptr[3]
+ vpunpckldq $t2,$Xi,$Xi
+ vpinsrd \$1,`4*$i`(@ptr[7]),$t3,$t3
+ lea `16*4`(@ptr[7]),@ptr[7]
+ vpunpckldq $t3,$t1,$t1
+ vinserti128 $t1,$Xi,$Xi
+ vpshufb $Xn,$Xi,$Xi
+___
+$code.=<<___;
+ vpsrld \$6,$e,$sigma
+ vpslld \$26,$e,$t3
+ vmovdqu $Xi,`&Xi_off($i)`
+ vpaddd $h,$Xi,$Xi # Xi+=h
+
+ vpsrld \$11,$e,$t2
+ vpxor $t3,$sigma,$sigma
+ vpslld \$21,$e,$t3
+ vpaddd `32*($i%8)-128`($Tbl),$Xi,$Xi # Xi+=K[round]
+ vpxor $t2,$sigma,$sigma
+
+ vpsrld \$25,$e,$t2
+ vpxor $t3,$sigma,$sigma
+ `"prefetcht0 63(@ptr[0])" if ($i==15)`
+ vpslld \$7,$e,$t3
+ vpandn $g,$e,$t1
+ vpand $f,$e,$axb # borrow $axb
+ `"prefetcht0 63(@ptr[1])" if ($i==15)`
+ vpxor $t2,$sigma,$sigma
+
+ vpsrld \$2,$a,$h # borrow $h
+ vpxor $t3,$sigma,$sigma # Sigma1(e)
+ `"prefetcht0 63(@ptr[2])" if ($i==15)`
+ vpslld \$30,$a,$t2
+ vpxor $axb,$t1,$t1 # Ch(e,f,g)
+ vpxor $a,$b,$axb # a^b, b^c in next round
+ `"prefetcht0 63(@ptr[3])" if ($i==15)`
+ vpxor $t2,$h,$h
+ vpaddd $sigma,$Xi,$Xi # Xi+=Sigma1(e)
+
+ vpsrld \$13,$a,$t2
+ `"prefetcht0 63(@ptr[4])" if ($i==15 && $REG_SZ==32)`
+ vpslld \$19,$a,$t3
+ vpaddd $t1,$Xi,$Xi # Xi+=Ch(e,f,g)
+ vpand $axb,$bxc,$bxc
+ `"prefetcht0 63(@ptr[5])" if ($i==15 && $REG_SZ==32)`
+ vpxor $t2,$h,$sigma
+
+ vpsrld \$22,$a,$t2
+ vpxor $t3,$sigma,$sigma
+ `"prefetcht0 63(@ptr[6])" if ($i==15 && $REG_SZ==32)`
+ vpslld \$10,$a,$t3
+ vpxor $bxc,$b,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
+ vpaddd $Xi,$d,$d # d+=Xi
+ `"prefetcht0 63(@ptr[7])" if ($i==15 && $REG_SZ==32)`
+ vpxor $t2,$sigma,$sigma
+ vpxor $t3,$sigma,$sigma # Sigma0(a)
+
+ vpaddd $Xi,$h,$h # h+=Xi
+ vpaddd $sigma,$h,$h # h+=Sigma0(a)
+___
+$code.=<<___ if (($i%8)==7);
+ add \$`32*8`,$Tbl
+___
+ ($axb,$bxc)=($bxc,$axb);
+}
+
+sub ROUND_16_XX_avx {
+my $i=shift;
+
+$code.=<<___;
+ vmovdqu `&Xi_off($i+1)`,$Xn
+ vpaddd `&Xi_off($i+9)`,$Xi,$Xi # Xi+=X[i+9]
+
+ vpsrld \$3,$Xn,$sigma
+ vpsrld \$7,$Xn,$t2
+ vpslld \$25,$Xn,$t3
+ vpxor $t2,$sigma,$sigma
+ vpsrld \$18,$Xn,$t2
+ vpxor $t3,$sigma,$sigma
+ vpslld \$14,$Xn,$t3
+ vmovdqu `&Xi_off($i+14)`,$t1
+ vpsrld \$10,$t1,$axb # borrow $axb
+
+ vpxor $t2,$sigma,$sigma
+ vpsrld \$17,$t1,$t2
+ vpxor $t3,$sigma,$sigma # sigma0(X[i+1])
+ vpslld \$15,$t1,$t3
+ vpaddd $sigma,$Xi,$Xi # Xi+=sigma0(e)
+ vpxor $t2,$axb,$sigma
+ vpsrld \$19,$t1,$t2
+ vpxor $t3,$sigma,$sigma
+ vpslld \$13,$t1,$t3
+ vpxor $t2,$sigma,$sigma
+ vpxor $t3,$sigma,$sigma # sigma0(X[i+14])
+ vpaddd $sigma,$Xi,$Xi # Xi+=sigma1(X[i+14])
+___
+ &ROUND_00_15_avx($i,@_);
+ ($Xi,$Xn)=($Xn,$Xi);
+}
+
+$code.=<<___;
+.type sha256_multi_block_avx,\@function,3
+.align 32
+sha256_multi_block_avx:
+_avx_shortcut:
+___
+$code.=<<___ if ($avx>1);
+ shr \$32,%rcx
+ cmp \$2,$num
+ jb .Lavx
+ test \$`1<<5`,%ecx
+ jnz _avx2_shortcut
+ jmp .Lavx
+.align 32
+.Lavx:
+___
+$code.=<<___;
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+___
+$code.=<<___ if ($win64);
+ lea -0xa8(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+ movaps %xmm10,-0x78(%rax)
+ movaps %xmm11,-0x68(%rax)
+ movaps %xmm12,-0x58(%rax)
+ movaps %xmm13,-0x48(%rax)
+ movaps %xmm14,-0x38(%rax)
+ movaps %xmm15,-0x28(%rax)
+___
+$code.=<<___;
+ sub \$`$REG_SZ*18`, %rsp
+ and \$-256,%rsp
+ mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
+.Lbody_avx:
+ lea K256+128(%rip),$Tbl
+ lea `$REG_SZ*16`(%rsp),%rbx
+ lea 0x80($ctx),$ctx # size optimization
+
+.Loop_grande_avx:
+ mov $num,`$REG_SZ*17+8`(%rsp) # original $num
+ xor $num,$num
+___
+for($i=0;$i<4;$i++) {
+ $code.=<<___;
+ mov `16*$i+0`($inp),@ptr[$i] # input pointer
+ mov `16*$i+8`($inp),%ecx # number of blocks
+ cmp $num,%ecx
+ cmovg %ecx,$num # find maximum
+ test %ecx,%ecx
+ mov %ecx,`4*$i`(%rbx) # initialize counters
+ cmovle $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ test $num,$num
+ jz .Ldone_avx
+
+ vmovdqu 0x00-0x80($ctx),$A # load context
+ lea 128(%rsp),%rax
+ vmovdqu 0x20-0x80($ctx),$B
+ vmovdqu 0x40-0x80($ctx),$C
+ vmovdqu 0x60-0x80($ctx),$D
+ vmovdqu 0x80-0x80($ctx),$E
+ vmovdqu 0xa0-0x80($ctx),$F
+ vmovdqu 0xc0-0x80($ctx),$G
+ vmovdqu 0xe0-0x80($ctx),$H
+ vmovdqu .Lpbswap(%rip),$Xn
+ jmp .Loop_avx
+
+.align 32
+.Loop_avx:
+ vpxor $B,$C,$bxc # magic seed
+___
+for($i=0;$i<16;$i++) { &ROUND_00_15_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ vmovdqu `&Xi_off($i)`,$Xi
+ mov \$3,%ecx
+ jmp .Loop_16_xx_avx
+.align 32
+.Loop_16_xx_avx:
+___
+for(;$i<32;$i++) { &ROUND_16_XX_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ dec %ecx
+ jnz .Loop_16_xx_avx
+
+ mov \$1,%ecx
+ lea K256+128(%rip),$Tbl
+___
+for($i=0;$i<4;$i++) {
+ $code.=<<___;
+ cmp `4*$i`(%rbx),%ecx # examine counters
+ cmovge $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ vmovdqa (%rbx),$sigma # pull counters
+ vpxor $t1,$t1,$t1
+ vmovdqa $sigma,$Xn
+ vpcmpgtd $t1,$Xn,$Xn # mask value
+ vpaddd $Xn,$sigma,$sigma # counters--
+
+ vmovdqu 0x00-0x80($ctx),$t1
+ vpand $Xn,$A,$A
+ vmovdqu 0x20-0x80($ctx),$t2
+ vpand $Xn,$B,$B
+ vmovdqu 0x40-0x80($ctx),$t3
+ vpand $Xn,$C,$C
+ vmovdqu 0x60-0x80($ctx),$Xi
+ vpand $Xn,$D,$D
+ vpaddd $t1,$A,$A
+ vmovdqu 0x80-0x80($ctx),$t1
+ vpand $Xn,$E,$E
+ vpaddd $t2,$B,$B
+ vmovdqu 0xa0-0x80($ctx),$t2
+ vpand $Xn,$F,$F
+ vpaddd $t3,$C,$C
+ vmovdqu 0xc0-0x80($ctx),$t3
+ vpand $Xn,$G,$G
+ vpaddd $Xi,$D,$D
+ vmovdqu 0xe0-0x80($ctx),$Xi
+ vpand $Xn,$H,$H
+ vpaddd $t1,$E,$E
+ vpaddd $t2,$F,$F
+ vmovdqu $A,0x00-0x80($ctx)
+ vpaddd $t3,$G,$G
+ vmovdqu $B,0x20-0x80($ctx)
+ vpaddd $Xi,$H,$H
+ vmovdqu $C,0x40-0x80($ctx)
+ vmovdqu $D,0x60-0x80($ctx)
+ vmovdqu $E,0x80-0x80($ctx)
+ vmovdqu $F,0xa0-0x80($ctx)
+ vmovdqu $G,0xc0-0x80($ctx)
+ vmovdqu $H,0xe0-0x80($ctx)
+
+ vmovdqu $sigma,(%rbx) # save counters
+ vmovdqu .Lpbswap(%rip),$Xn
+ dec $num
+ jnz .Loop_avx
+
+ mov `$REG_SZ*17+8`(%rsp),$num
+ lea $REG_SZ($ctx),$ctx
+ lea `16*$REG_SZ/4`($inp),$inp
+ dec $num
+ jnz .Loop_grande_avx
+
+.Ldone_avx:
+ mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps -0xb8(%rax),%xmm6
+ movaps -0xa8(%rax),%xmm7
+ movaps -0x98(%rax),%xmm8
+ movaps -0x88(%rax),%xmm9
+ movaps -0x78(%rax),%xmm10
+ movaps -0x68(%rax),%xmm11
+ movaps -0x58(%rax),%xmm12
+ movaps -0x48(%rax),%xmm13
+ movaps -0x38(%rax),%xmm14
+ movaps -0x28(%rax),%xmm15
+___
+$code.=<<___;
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp
+.Lepilogue_avx:
+ ret
+.size sha256_multi_block_avx,.-sha256_multi_block_avx
+___
+ if ($avx>1) {
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+
+$REG_SZ=32;
+@ptr=map("%r$_",(12..15,8..11));
+
+@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("%ymm$_",(8..15));
+($t1,$t2,$t3,$axb,$bxc,$Xi,$Xn,$sigma)=map("%ymm$_",(0..7));
+
+$code.=<<___;
+.type sha256_multi_block_avx2,\@function,3
+.align 32
+sha256_multi_block_avx2:
+_avx2_shortcut:
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+___
+$code.=<<___ if ($win64);
+ lea -0xa8(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+ movaps %xmm10,0x40(%rsp)
+ movaps %xmm11,0x50(%rsp)
+ movaps %xmm12,-0x78(%rax)
+ movaps %xmm13,-0x68(%rax)
+ movaps %xmm14,-0x58(%rax)
+ movaps %xmm15,-0x48(%rax)
+___
+$code.=<<___;
+ sub \$`$REG_SZ*18`, %rsp
+ and \$-256,%rsp
+ mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
+.Lbody_avx2:
+ lea K256+128(%rip),$Tbl
+ lea 0x80($ctx),$ctx # size optimization
+
+.Loop_grande_avx2:
+ mov $num,`$REG_SZ*17+8`(%rsp) # original $num
+ xor $num,$num
+ lea `$REG_SZ*16`(%rsp),%rbx
+___
+for($i=0;$i<8;$i++) {
+ $code.=<<___;
+ mov `16*$i+0`($inp),@ptr[$i] # input pointer
+ mov `16*$i+8`($inp),%ecx # number of blocks
+ cmp $num,%ecx
+ cmovg %ecx,$num # find maximum
+ test %ecx,%ecx
+ mov %ecx,`4*$i`(%rbx) # initialize counters
+ cmovle $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ vmovdqu 0x00-0x80($ctx),$A # load context
+ lea 128(%rsp),%rax
+ vmovdqu 0x20-0x80($ctx),$B
+ lea 256+128(%rsp),%rbx
+ vmovdqu 0x40-0x80($ctx),$C
+ vmovdqu 0x60-0x80($ctx),$D
+ vmovdqu 0x80-0x80($ctx),$E
+ vmovdqu 0xa0-0x80($ctx),$F
+ vmovdqu 0xc0-0x80($ctx),$G
+ vmovdqu 0xe0-0x80($ctx),$H
+ vmovdqu .Lpbswap(%rip),$Xn
+ jmp .Loop_avx2
+
+.align 32
+.Loop_avx2:
+ vpxor $B,$C,$bxc # magic seed
+___
+for($i=0;$i<16;$i++) { &ROUND_00_15_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ vmovdqu `&Xi_off($i)`,$Xi
+ mov \$3,%ecx
+ jmp .Loop_16_xx_avx2
+.align 32
+.Loop_16_xx_avx2:
+___
+for(;$i<32;$i++) { &ROUND_16_XX_avx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ dec %ecx
+ jnz .Loop_16_xx_avx2
+
+ mov \$1,%ecx
+ lea `$REG_SZ*16`(%rsp),%rbx
+ lea K256+128(%rip),$Tbl
+___
+for($i=0;$i<8;$i++) {
+ $code.=<<___;
+ cmp `4*$i`(%rbx),%ecx # examine counters
+ cmovge $Tbl,@ptr[$i] # cancel input
+___
+}
+$code.=<<___;
+ vmovdqa (%rbx),$sigma # pull counters
+ vpxor $t1,$t1,$t1
+ vmovdqa $sigma,$Xn
+ vpcmpgtd $t1,$Xn,$Xn # mask value
+ vpaddd $Xn,$sigma,$sigma # counters--
+
+ vmovdqu 0x00-0x80($ctx),$t1
+ vpand $Xn,$A,$A
+ vmovdqu 0x20-0x80($ctx),$t2
+ vpand $Xn,$B,$B
+ vmovdqu 0x40-0x80($ctx),$t3
+ vpand $Xn,$C,$C
+ vmovdqu 0x60-0x80($ctx),$Xi
+ vpand $Xn,$D,$D
+ vpaddd $t1,$A,$A
+ vmovdqu 0x80-0x80($ctx),$t1
+ vpand $Xn,$E,$E
+ vpaddd $t2,$B,$B
+ vmovdqu 0xa0-0x80($ctx),$t2
+ vpand $Xn,$F,$F
+ vpaddd $t3,$C,$C
+ vmovdqu 0xc0-0x80($ctx),$t3
+ vpand $Xn,$G,$G
+ vpaddd $Xi,$D,$D
+ vmovdqu 0xe0-0x80($ctx),$Xi
+ vpand $Xn,$H,$H
+ vpaddd $t1,$E,$E
+ vpaddd $t2,$F,$F
+ vmovdqu $A,0x00-0x80($ctx)
+ vpaddd $t3,$G,$G
+ vmovdqu $B,0x20-0x80($ctx)
+ vpaddd $Xi,$H,$H
+ vmovdqu $C,0x40-0x80($ctx)
+ vmovdqu $D,0x60-0x80($ctx)
+ vmovdqu $E,0x80-0x80($ctx)
+ vmovdqu $F,0xa0-0x80($ctx)
+ vmovdqu $G,0xc0-0x80($ctx)
+ vmovdqu $H,0xe0-0x80($ctx)
+
+ vmovdqu $sigma,(%rbx) # save counters
+ lea 256+128(%rsp),%rbx
+ vmovdqu .Lpbswap(%rip),$Xn
+ dec $num
+ jnz .Loop_avx2
+
+ #mov `$REG_SZ*17+8`(%rsp),$num
+ #lea $REG_SZ($ctx),$ctx
+ #lea `16*$REG_SZ/4`($inp),$inp
+ #dec $num
+ #jnz .Loop_grande_avx2
+
+.Ldone_avx2:
+ mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps -0xd8(%rax),%xmm6
+ movaps -0xc8(%rax),%xmm7
+ movaps -0xb8(%rax),%xmm8
+ movaps -0xa8(%rax),%xmm9
+ movaps -0x98(%rax),%xmm10
+ movaps -0x88(%rax),%xmm11
+ movaps -0x78(%rax),%xmm12
+ movaps -0x68(%rax),%xmm13
+ movaps -0x58(%rax),%xmm14
+ movaps -0x48(%rax),%xmm15
+___
+$code.=<<___;
+ mov -48(%rax),%r15
+ mov -40(%rax),%r14
+ mov -32(%rax),%r13
+ mov -24(%rax),%r12
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp
+.Lepilogue_avx2:
+ ret
+.size sha256_multi_block_avx2,.-sha256_multi_block_avx2
+___
+ } }}}
+$code.=<<___;
+.align 256
+K256:
+___
+sub TABLE {
+ foreach (@_) {
+ $code.=<<___;
+ .long $_,$_,$_,$_
+ .long $_,$_,$_,$_
+___
+ }
+}
+&TABLE( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
+ 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
+ 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
+ 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
+ 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
+ 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
+ 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
+ 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
+ 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
+ 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
+ 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
+ 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
+ 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
+ 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
+ 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
+ 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
+$code.=<<___;
+.Lpbswap:
+ .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
+ .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
+K256_shaext:
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+ .asciz "SHA256 multi-block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<.Lbody
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=.Lepilogue
+ jae .Lin_prologue
+
+ mov `16*17`(%rax),%rax # pull saved stack pointer
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+
+ lea -24-10*16(%rax),%rsi
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+___
+$code.=<<___ if ($avx>1);
+.type avx2_handler,\@abi-omnipotent
+.align 16
+avx2_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<body label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+
+ mov `32*17`($context),%rax # pull saved stack pointer
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore cotnext->R12
+ mov %r13,224($context) # restore cotnext->R13
+ mov %r14,232($context) # restore cotnext->R14
+ mov %r15,240($context) # restore cotnext->R15
+
+ lea -56-10*16(%rax),%rsi
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+ jmp .Lin_prologue
+.size avx2_handler,.-avx2_handler
+___
+$code.=<<___;
+.section .pdata
+.align 4
+ .rva .LSEH_begin_sha256_multi_block
+ .rva .LSEH_end_sha256_multi_block
+ .rva .LSEH_info_sha256_multi_block
+ .rva .LSEH_begin_sha256_multi_block_shaext
+ .rva .LSEH_end_sha256_multi_block_shaext
+ .rva .LSEH_info_sha256_multi_block_shaext
+___
+$code.=<<___ if ($avx);
+ .rva .LSEH_begin_sha256_multi_block_avx
+ .rva .LSEH_end_sha256_multi_block_avx
+ .rva .LSEH_info_sha256_multi_block_avx
+___
+$code.=<<___ if ($avx>1);
+ .rva .LSEH_begin_sha256_multi_block_avx2
+ .rva .LSEH_end_sha256_multi_block_avx2
+ .rva .LSEH_info_sha256_multi_block_avx2
+___
+$code.=<<___;
+.section .xdata
+.align 8
+.LSEH_info_sha256_multi_block:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbody,.Lepilogue # HandlerData[]
+.LSEH_info_sha256_multi_block_shaext:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbody_shaext,.Lepilogue_shaext # HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_sha256_multi_block_avx:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbody_avx,.Lepilogue_avx # HandlerData[]
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_sha256_multi_block_avx2:
+ .byte 9,0,0,0
+ .rva avx2_handler
+ .rva .Lbody_avx2,.Lepilogue_avx2 # HandlerData[]
+___
+}
+####################################################################
+
+sub rex {
+ local *opcode=shift;
+ my ($dst,$src)=@_;
+ my $rex=0;
+
+ $rex|=0x04 if ($dst>=8);
+ $rex|=0x01 if ($src>=8);
+ unshift @opcode,$rex|0x40 if ($rex);
+}
+
+sub sha256op38 {
+ my $instr = shift;
+ my %opcodelet = (
+ "sha256rnds2" => 0xcb,
+ "sha256msg1" => 0xcc,
+ "sha256msg2" => 0xcd );
+
+ if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+ my @opcode=(0x0f,0x38);
+ rex(\@opcode,$2,$1);
+ push @opcode,$opcodelet{$instr};
+ push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
+ return ".byte\t".join(',',@opcode);
+ } else {
+ return $instr."\t".@_[0];
+ }
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval($1)/ge;
+
+ s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/geo or
+
+ s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+),%ymm([0-9]+)/$1$2%xmm$3,%xmm$4/go or
+ s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vinserti128)\b(\s+)%ymm/$1$2\$1,%xmm/go or
+ s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/openssl/crypto/sha/asm/sha512-586.pl b/openssl/crypto/sha/asm/sha512-586.pl
index 7eab6a5b8..e96ec0031 100644
--- a/openssl/crypto/sha/asm/sha512-586.pl
+++ b/openssl/crypto/sha/asm/sha512-586.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
#
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -9,17 +9,31 @@
#
# SHA512 block transform for x86. September 2007.
#
+# May 2013.
+#
+# Add SSSE3 code path, 20-25% improvement [over original SSE2 code].
+#
# Performance in clock cycles per processed byte (less is better):
#
-# Pentium PIII P4 AMD K8 Core2
-# gcc 100 75 116 54 66
-# icc 97 77 95 55 57
-# x86 asm 61 56 82 36 40
-# SSE2 asm - - 38 24 20
-# x86_64 asm(*) - - 30 10.0 10.5
+# gcc icc x86 asm SIMD(*) x86_64(**)
+# Pentium 100 97 61 - -
+# PIII 75 77 56 - -
+# P4 116 95 82 34.6 30.8
+# AMD K8 54 55 36 20.7 9.57
+# Core2 66 57 40 15.9 9.97
+# Westmere 70 - 38 12.2 9.58
+# Sandy Bridge 58 - 35 11.9 11.2
+# Ivy Bridge 50 - 33 11.5 8.17
+# Haswell 46 - 29 11.3 7.66
+# Bulldozer 121 - 50 14.0 13.5
+# VIA Nano 91 - 52 33 14.7
+# Atom 126 - 68 48(***) 14.7
+# Silvermont 97 - 58 42(***) 17.5
#
-# (*) x86_64 assembler performance is presented for reference
-# purposes.
+# (*) whichever best applicable.
+# (**) x86_64 assembler performance is presented for reference
+# purposes, the results are for integer-only code.
+# (***) paddq is increadibly slow on Atom.
#
# IALU code-path is optimized for elder Pentiums. On vanilla Pentium
# performance improvement over compiler generated code reaches ~60%,
@@ -66,72 +80,77 @@ $Hsse2=&QWP(56,"esp");
$A="mm0"; # B-D and
$E="mm4"; # F-H are commonly loaded to respectively mm1-mm3 and
# mm5-mm7, but it's done on on-demand basis...
+$BxC="mm2"; # ... except for B^C
sub BODY_00_15_sse2 {
- my $prefetch=shift;
+ my $phase=shift;
- &movq ("mm5",$Fsse2); # load f
- &movq ("mm6",$Gsse2); # load g
- &movq ("mm7",$Hsse2); # load h
+ #&movq ("mm5",$Fsse2); # load f
+ #&movq ("mm6",$Gsse2); # load g
&movq ("mm1",$E); # %mm1 is sliding right
- &movq ("mm2",$E); # %mm2 is sliding left
+ &pxor ("mm5","mm6"); # f^=g
&psrlq ("mm1",14);
- &movq ($Esse2,$E); # modulo-scheduled save e
- &psllq ("mm2",23);
+ &movq ($Esse2,$E); # modulo-scheduled save e
+ &pand ("mm5",$E); # f&=e
+ &psllq ($E,23); # $E is sliding left
+ &movq ($A,"mm3") if ($phase<2);
+ &movq (&QWP(8*9,"esp"),"mm7") # save X[i]
&movq ("mm3","mm1"); # %mm3 is T1
- &psrlq ("mm1",4);
- &pxor ("mm3","mm2");
- &psllq ("mm2",23);
+ &psrlq ("mm1",4);
+ &pxor ("mm5","mm6"); # Ch(e,f,g)
+ &pxor ("mm3",$E);
+ &psllq ($E,23);
&pxor ("mm3","mm1");
- &psrlq ("mm1",23);
- &pxor ("mm3","mm2");
- &psllq ("mm2",4);
+ &movq ($Asse2,$A); # modulo-scheduled save a
+ &paddq ("mm7","mm5"); # X[i]+=Ch(e,f,g)
+ &pxor ("mm3",$E);
+ &psrlq ("mm1",23);
+ &paddq ("mm7",$Hsse2); # X[i]+=h
&pxor ("mm3","mm1");
- &paddq ("mm7",QWP(0,$K512)); # h+=K512[i]
- &pxor ("mm3","mm2"); # T1=Sigma1_512(e)
-
- &pxor ("mm5","mm6"); # f^=g
+ &psllq ($E,4);
+ &paddq ("mm7",QWP(0,$K512)); # X[i]+=K512[i]
+ &pxor ("mm3",$E); # T1=Sigma1_512(e)
+
+ &movq ($E,$Dsse2); # e = load d, e in next round
+ &paddq ("mm3","mm7"); # T1+=X[i]
+ &movq ("mm5",$A); # %mm5 is sliding right
+ &psrlq ("mm5",28);
+ &paddq ($E,"mm3"); # d += T1
+ &movq ("mm6",$A); # %mm6 is sliding left
+ &movq ("mm7","mm5");
+ &psllq ("mm6",25);
&movq ("mm1",$Bsse2); # load b
- &pand ("mm5",$E); # f&=e
- &movq ("mm2",$Csse2); # load c
- &pxor ("mm5","mm6"); # f^=g
- &movq ($E,$Dsse2); # e = load d
- &paddq ("mm3","mm5"); # T1+=Ch(e,f,g)
- &movq (&QWP(0,"esp"),$A); # modulo-scheduled save a
- &paddq ("mm3","mm7"); # T1+=h
-
- &movq ("mm5",$A); # %mm5 is sliding right
- &movq ("mm6",$A); # %mm6 is sliding left
- &paddq ("mm3",&QWP(8*9,"esp")); # T1+=X[0]
- &psrlq ("mm5",28);
- &paddq ($E,"mm3"); # e += T1
- &psllq ("mm6",25);
- &movq ("mm7","mm5"); # %mm7 is T2
- &psrlq ("mm5",6);
- &pxor ("mm7","mm6");
- &psllq ("mm6",5);
- &pxor ("mm7","mm5");
- &psrlq ("mm5",5);
- &pxor ("mm7","mm6");
- &psllq ("mm6",6);
- &pxor ("mm7","mm5");
+ &psrlq ("mm5",6);
+ &pxor ("mm7","mm6");
&sub ("esp",8);
- &pxor ("mm7","mm6"); # T2=Sigma0_512(a)
-
- &movq ("mm5",$A); # %mm5=a
- &por ($A,"mm2"); # a=a|c
- &movq ("mm6",&QWP(8*(9+16-14),"esp")) if ($prefetch);
- &pand ("mm5","mm2"); # %mm5=a&c
- &pand ($A,"mm1"); # a=(a|c)&b
- &movq ("mm2",&QWP(8*(9+16-1),"esp")) if ($prefetch);
- &por ("mm5",$A); # %mm5=(a&c)|((a|c)&b)
- &paddq ("mm7","mm5"); # T2+=Maj(a,b,c)
- &movq ($A,"mm3"); # a=T1
-
- &mov (&LB("edx"),&BP(0,$K512));
- &paddq ($A,"mm7"); # a+=T2
- &add ($K512,8);
+ &psllq ("mm6",5);
+ &pxor ("mm7","mm5");
+ &pxor ($A,"mm1"); # a^b, b^c in next round
+ &psrlq ("mm5",5);
+ &pxor ("mm7","mm6");
+ &pand ($BxC,$A); # (b^c)&(a^b)
+ &psllq ("mm6",6);
+ &pxor ("mm7","mm5");
+ &pxor ($BxC,"mm1"); # [h=]Maj(a,b,c)
+ &pxor ("mm6","mm7"); # Sigma0_512(a)
+ &movq ("mm7",&QWP(8*(9+16-1),"esp")) if ($phase!=0); # pre-fetch
+ &movq ("mm5",$Fsse2) if ($phase==0); # load f
+
+ if ($phase>1) {
+ &paddq ($BxC,"mm6"); # h+=Sigma0(a)
+ &add ($K512,8);
+ #&paddq ($BxC,"mm3"); # h+=T1
+
+ ($A,$BxC) = ($BxC,$A); # rotate registers
+ } else {
+ &paddq ("mm3",$BxC); # T1+=Maj(a,b,c)
+ &movq ($BxC,$A);
+ &add ($K512,8);
+ &paddq ("mm3","mm6"); # T1+=Sigma0(a)
+ &movq ("mm6",$Gsse2) if ($phase==0); # load g
+ #&movq ($A,"mm3"); # h=T1
+ }
}
sub BODY_00_15_x86 {
@@ -284,110 +303,357 @@ sub BODY_00_15_x86 {
if ($sse2) {
&picmeup("edx","OPENSSL_ia32cap_P",$K512,&label("K512"));
- &bt (&DWP(0,"edx"),26);
- &jnc (&label("loop_x86"));
+ &mov ("ecx",&DWP(0,"edx"));
+ &test ("ecx",1<<26);
+ &jz (&label("loop_x86"));
+
+ &mov ("edx",&DWP(4,"edx"));
# load ctx->h[0-7]
&movq ($A,&QWP(0,"esi"));
+ &and ("ecx",1<<24); # XMM registers availability
&movq ("mm1",&QWP(8,"esi"));
- &movq ("mm2",&QWP(16,"esi"));
+ &and ("edx",1<<9); # SSSE3 bit
+ &movq ($BxC,&QWP(16,"esi"));
+ &or ("ecx","edx");
&movq ("mm3",&QWP(24,"esi"));
&movq ($E,&QWP(32,"esi"));
&movq ("mm5",&QWP(40,"esi"));
&movq ("mm6",&QWP(48,"esi"));
&movq ("mm7",&QWP(56,"esi"));
+ &cmp ("ecx",1<<24|1<<9);
+ &je (&label("SSSE3"));
&sub ("esp",8*10);
+ &jmp (&label("loop_sse2"));
&set_label("loop_sse2",16);
- # &movq ($Asse2,$A);
+ #&movq ($Asse2,$A);
&movq ($Bsse2,"mm1");
- &movq ($Csse2,"mm2");
+ &movq ($Csse2,$BxC);
&movq ($Dsse2,"mm3");
- # &movq ($Esse2,$E);
+ #&movq ($Esse2,$E);
&movq ($Fsse2,"mm5");
&movq ($Gsse2,"mm6");
+ &pxor ($BxC,"mm1"); # magic
&movq ($Hsse2,"mm7");
+ &movq ("mm3",$A); # magic
- &mov ("ecx",&DWP(0,"edi"));
- &mov ("edx",&DWP(4,"edi"));
+ &mov ("eax",&DWP(0,"edi"));
+ &mov ("ebx",&DWP(4,"edi"));
&add ("edi",8);
- &bswap ("ecx");
- &bswap ("edx");
- &mov (&DWP(8*9+4,"esp"),"ecx");
- &mov (&DWP(8*9+0,"esp"),"edx");
+ &mov ("edx",15); # counter
+ &bswap ("eax");
+ &bswap ("ebx");
+ &jmp (&label("00_14_sse2"));
&set_label("00_14_sse2",16);
+ &movd ("mm1","eax");
&mov ("eax",&DWP(0,"edi"));
+ &movd ("mm7","ebx");
&mov ("ebx",&DWP(4,"edi"));
&add ("edi",8);
&bswap ("eax");
&bswap ("ebx");
- &mov (&DWP(8*8+4,"esp"),"eax");
- &mov (&DWP(8*8+0,"esp"),"ebx");
+ &punpckldq("mm7","mm1");
&BODY_00_15_sse2();
- &cmp (&LB("edx"),0x35);
- &jne (&label("00_14_sse2"));
+ &dec ("edx");
+ &jnz (&label("00_14_sse2"));
+
+ &movd ("mm1","eax");
+ &movd ("mm7","ebx");
+ &punpckldq("mm7","mm1");
&BODY_00_15_sse2(1);
+ &pxor ($A,$A); # A is in %mm3
+ &mov ("edx",32); # counter
+ &jmp (&label("16_79_sse2"));
+
&set_label("16_79_sse2",16);
- #&movq ("mm2",&QWP(8*(9+16-1),"esp")); #prefetched in BODY_00_15
- #&movq ("mm6",&QWP(8*(9+16-14),"esp"));
- &movq ("mm1","mm2");
+ for ($j=0;$j<2;$j++) { # 2x unroll
+ #&movq ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15
+ &movq ("mm5",&QWP(8*(9+16-14),"esp"));
+ &movq ("mm1","mm7");
+ &psrlq ("mm7",1);
+ &movq ("mm6","mm5");
+ &psrlq ("mm5",6);
+ &psllq ("mm1",56);
+ &paddq ($A,"mm3"); # from BODY_00_15
+ &movq ("mm3","mm7");
+ &psrlq ("mm7",7-1);
+ &pxor ("mm3","mm1");
+ &psllq ("mm1",63-56);
+ &pxor ("mm3","mm7");
+ &psrlq ("mm7",8-7);
+ &pxor ("mm3","mm1");
+ &movq ("mm1","mm5");
+ &psrlq ("mm5",19-6);
+ &pxor ("mm7","mm3"); # sigma0
+
+ &psllq ("mm6",3);
+ &pxor ("mm1","mm5");
+ &paddq ("mm7",&QWP(8*(9+16),"esp"));
+ &pxor ("mm1","mm6");
+ &psrlq ("mm5",61-19);
+ &paddq ("mm7",&QWP(8*(9+16-9),"esp"));
+ &pxor ("mm1","mm5");
+ &psllq ("mm6",45-3);
+ &movq ("mm5",$Fsse2); # load f
+ &pxor ("mm1","mm6"); # sigma1
+ &movq ("mm6",$Gsse2); # load g
- &psrlq ("mm2",1);
- &movq ("mm7","mm6");
- &psrlq ("mm6",6);
- &movq ("mm3","mm2");
+ &paddq ("mm7","mm1"); # X[i]
+ #&movq (&QWP(8*9,"esp"),"mm7"); # moved to BODY_00_15
- &psrlq ("mm2",7-1);
- &movq ("mm5","mm6");
- &psrlq ("mm6",19-6);
- &pxor ("mm3","mm2");
+ &BODY_00_15_sse2(2);
+ }
+ &dec ("edx");
+ &jnz (&label("16_79_sse2"));
- &psrlq ("mm2",8-7);
- &pxor ("mm5","mm6");
- &psrlq ("mm6",61-19);
- &pxor ("mm3","mm2");
+ #&movq ($A,$Asse2);
+ &paddq ($A,"mm3"); # from BODY_00_15
+ &movq ("mm1",$Bsse2);
+ #&movq ($BxC,$Csse2);
+ &movq ("mm3",$Dsse2);
+ #&movq ($E,$Esse2);
+ &movq ("mm5",$Fsse2);
+ &movq ("mm6",$Gsse2);
+ &movq ("mm7",$Hsse2);
- &movq ("mm2",&QWP(8*(9+16),"esp"));
+ &pxor ($BxC,"mm1"); # de-magic
+ &paddq ($A,&QWP(0,"esi"));
+ &paddq ("mm1",&QWP(8,"esi"));
+ &paddq ($BxC,&QWP(16,"esi"));
+ &paddq ("mm3",&QWP(24,"esi"));
+ &paddq ($E,&QWP(32,"esi"));
+ &paddq ("mm5",&QWP(40,"esi"));
+ &paddq ("mm6",&QWP(48,"esi"));
+ &paddq ("mm7",&QWP(56,"esi"));
- &psllq ("mm1",56);
- &pxor ("mm5","mm6");
- &psllq ("mm7",3);
- &pxor ("mm3","mm1");
+ &mov ("eax",8*80);
+ &movq (&QWP(0,"esi"),$A);
+ &movq (&QWP(8,"esi"),"mm1");
+ &movq (&QWP(16,"esi"),$BxC);
+ &movq (&QWP(24,"esi"),"mm3");
+ &movq (&QWP(32,"esi"),$E);
+ &movq (&QWP(40,"esi"),"mm5");
+ &movq (&QWP(48,"esi"),"mm6");
+ &movq (&QWP(56,"esi"),"mm7");
- &paddq ("mm2",&QWP(8*(9+16-9),"esp"));
+ &lea ("esp",&DWP(0,"esp","eax")); # destroy frame
+ &sub ($K512,"eax"); # rewind K
- &psllq ("mm1",63-56);
- &pxor ("mm5","mm7");
- &psllq ("mm7",45-3);
- &pxor ("mm3","mm1");
- &pxor ("mm5","mm7");
+ &cmp ("edi",&DWP(8*10+8,"esp")); # are we done yet?
+ &jb (&label("loop_sse2"));
- &paddq ("mm3","mm5");
- &paddq ("mm3","mm2");
- &movq (&QWP(8*9,"esp"),"mm3");
+ &mov ("esp",&DWP(8*10+12,"esp")); # restore sp
+ &emms ();
+&function_end_A();
- &BODY_00_15_sse2(1);
+&set_label("SSSE3",32);
+{ my ($cnt,$frame)=("ecx","edx");
+ my @X=map("xmm$_",(0..7));
+ my $j;
+ my $i=0;
+
+ &lea ($frame,&DWP(-64,"esp"));
+ &sub ("esp",256);
+
+ # fixed stack frame layout
+ #
+ # +0 A B C D E F G H # backing store
+ # +64 X[0]+K[i] .. X[15]+K[i] # XMM->MM xfer area
+ # +192 # XMM off-load ring buffer
+ # +256 # saved parameters
+
+ &movdqa (@X[1],&QWP(80*8,$K512)); # byte swap mask
+ &movdqu (@X[0],&QWP(0,"edi"));
+ &pshufb (@X[0],@X[1]);
+ for ($j=0;$j<8;$j++) {
+ &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load
+ &movdqa (@X[3],&QWP(16*($j%8),$K512));
+ &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask
+ &movdqu (@X[1],&QWP(16*($j+1),"edi")) if ($j<7); # next input
+ &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0]
+ &paddq (@X[3],@X[0]);
+ &pshufb (@X[1],@X[2]) if ($j<7);
+ &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]); # xfer X[i]+K[i]
+
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ #&jmp (&label("loop_ssse3"));
+ &nop ();
- &cmp (&LB("edx"),0x17);
- &jne (&label("16_79_sse2"));
+&set_label("loop_ssse3",32);
+ &movdqa (@X[2],&QWP(16*(($j+1)%4),$frame)); # pre-restore @X[1]
+ &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]); # off-load @X[3]
+ &lea ($K512,&DWP(16*8,$K512));
+
+ #&movq ($Asse2,$A); # off-load A-H
+ &movq ($Bsse2,"mm1");
+ &mov ("ebx","edi");
+ &movq ($Csse2,$BxC);
+ &lea ("edi",&DWP(128,"edi")); # advance input
+ &movq ($Dsse2,"mm3");
+ &cmp ("edi","eax");
+ #&movq ($Esse2,$E);
+ &movq ($Fsse2,"mm5");
+ &cmovb ("ebx","edi");
+ &movq ($Gsse2,"mm6");
+ &mov ("ecx",4); # loop counter
+ &pxor ($BxC,"mm1"); # magic
+ &movq ($Hsse2,"mm7");
+ &pxor ("mm3","mm3"); # magic
+
+ &jmp (&label("00_47_ssse3"));
+
+sub BODY_00_15_ssse3 { # "phase-less" copy of BODY_00_15_sse2
+ (
+ '&movq ("mm1",$E)', # %mm1 is sliding right
+ '&movq ("mm7",&QWP(((-8*$i)%128)-128,$frame))',# X[i]+K[i]
+ '&pxor ("mm5","mm6")', # f^=g
+ '&psrlq ("mm1",14)',
+ '&movq (&QWP(8*($i+4)%64,"esp"),$E)', # modulo-scheduled save e
+ '&pand ("mm5",$E)', # f&=e
+ '&psllq ($E,23)', # $E is sliding left
+ '&paddq ($A,"mm3")', # [h+=Maj(a,b,c)]
+ '&movq ("mm3","mm1")', # %mm3 is T1
+ '&psrlq("mm1",4)',
+ '&pxor ("mm5","mm6")', # Ch(e,f,g)
+ '&pxor ("mm3",$E)',
+ '&psllq($E,23)',
+ '&pxor ("mm3","mm1")',
+ '&movq (&QWP(8*$i%64,"esp"),$A)', # modulo-scheduled save a
+ '&paddq("mm7","mm5")', # X[i]+=Ch(e,f,g)
+ '&pxor ("mm3",$E)',
+ '&psrlq("mm1",23)',
+ '&paddq("mm7",&QWP(8*($i+7)%64,"esp"))', # X[i]+=h
+ '&pxor ("mm3","mm1")',
+ '&psllq($E,4)',
+ '&pxor ("mm3",$E)', # T1=Sigma1_512(e)
+
+ '&movq ($E,&QWP(8*($i+3)%64,"esp"))', # e = load d, e in next round
+ '&paddq ("mm3","mm7")', # T1+=X[i]
+ '&movq ("mm5",$A)', # %mm5 is sliding right
+ '&psrlq("mm5",28)',
+ '&paddq ($E,"mm3")', # d += T1
+ '&movq ("mm6",$A)', # %mm6 is sliding left
+ '&movq ("mm7","mm5")',
+ '&psllq("mm6",25)',
+ '&movq ("mm1",&QWP(8*($i+1)%64,"esp"))', # load b
+ '&psrlq("mm5",6)',
+ '&pxor ("mm7","mm6")',
+ '&psllq("mm6",5)',
+ '&pxor ("mm7","mm5")',
+ '&pxor ($A,"mm1")', # a^b, b^c in next round
+ '&psrlq("mm5",5)',
+ '&pxor ("mm7","mm6")',
+ '&pand ($BxC,$A)', # (b^c)&(a^b)
+ '&psllq("mm6",6)',
+ '&pxor ("mm7","mm5")',
+ '&pxor ($BxC,"mm1")', # [h=]Maj(a,b,c)
+ '&pxor ("mm6","mm7")', # Sigma0_512(a)
+ '&movq ("mm5",&QWP(8*($i+5-1)%64,"esp"))', # pre-load f
+ '&paddq ($BxC,"mm6")', # h+=Sigma0(a)
+ '&movq ("mm6",&QWP(8*($i+6-1)%64,"esp"))', # pre-load g
+
+ '($A,$BxC) = ($BxC,$A); $i--;'
+ );
+}
- # &movq ($A,$Asse2);
+&set_label("00_47_ssse3",32);
+
+ for(;$j<16;$j++) {
+ my ($t0,$t2,$t1)=@X[2..4];
+ my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
+
+ &movdqa ($t2,@X[5]);
+ &movdqa (@X[1],$t0); # restore @X[1]
+ &palignr ($t0,@X[0],8); # X[1..2]
+ &movdqa (&QWP(16*($j%4),$frame),@X[4]); # off-load @X[4]
+ &palignr ($t2,@X[4],8); # X[9..10]
+
+ &movdqa ($t1,$t0);
+ &psrlq ($t0,7);
+ &paddq (@X[0],$t2); # X[0..1] += X[9..10]
+ &movdqa ($t2,$t1);
+ &psrlq ($t1,1);
+ &psllq ($t2,64-8);
+ &pxor ($t0,$t1);
+ &psrlq ($t1,8-1);
+ &pxor ($t0,$t2);
+ &psllq ($t2,8-1);
+ &pxor ($t0,$t1);
+ &movdqa ($t1,@X[7]);
+ &pxor ($t0,$t2); # sigma0(X[1..2])
+ &movdqa ($t2,@X[7]);
+ &psrlq ($t1,6);
+ &paddq (@X[0],$t0); # X[0..1] += sigma0(X[1..2])
+
+ &movdqa ($t0,@X[7]);
+ &psrlq ($t2,19);
+ &psllq ($t0,64-61);
+ &pxor ($t1,$t2);
+ &psrlq ($t2,61-19);
+ &pxor ($t1,$t0);
+ &psllq ($t0,61-19);
+ &pxor ($t1,$t2);
+ &movdqa ($t2,&QWP(16*(($j+2)%4),$frame));# pre-restore @X[1]
+ &pxor ($t1,$t0); # sigma0(X[1..2])
+ &movdqa ($t0,&QWP(16*($j%8),$K512));
+ eval(shift(@insns));
+ &paddq (@X[0],$t1); # X[0..1] += sigma0(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &paddq ($t0,@X[0]);
+ foreach(@insns) { eval; }
+ &movdqa (&QWP(16*($j%8)-128,$frame),$t0);# xfer X[i]+K[i]
+
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &lea ($K512,&DWP(16*8,$K512));
+ &dec ("ecx");
+ &jnz (&label("00_47_ssse3"));
+
+ &movdqa (@X[1],&QWP(0,$K512)); # byte swap mask
+ &lea ($K512,&DWP(-80*8,$K512)); # rewind
+ &movdqu (@X[0],&QWP(0,"ebx"));
+ &pshufb (@X[0],@X[1]);
+
+ for ($j=0;$j<8;$j++) { # load next or same block
+ my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
+
+ &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load
+ &movdqa (@X[3],&QWP(16*($j%8),$K512));
+ &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask
+ &movdqu (@X[1],&QWP(16*($j+1),"ebx")) if ($j<7); # next input
+ &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0]
+ &paddq (@X[3],@X[0]);
+ &pshufb (@X[1],@X[2]) if ($j<7);
+ foreach(@insns) { eval; }
+ &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]);# xfer X[i]+K[i]
+
+ push(@X,shift(@X)); # rotate(@X)
+ }
+
+ #&movq ($A,$Asse2); # load A-H
&movq ("mm1",$Bsse2);
- &movq ("mm2",$Csse2);
+ &paddq ($A,"mm3"); # from BODY_00_15
+ #&movq ($BxC,$Csse2);
&movq ("mm3",$Dsse2);
- # &movq ($E,$Esse2);
- &movq ("mm5",$Fsse2);
- &movq ("mm6",$Gsse2);
+ #&movq ($E,$Esse2);
+ #&movq ("mm5",$Fsse2);
+ #&movq ("mm6",$Gsse2);
&movq ("mm7",$Hsse2);
+ &pxor ($BxC,"mm1"); # de-magic
&paddq ($A,&QWP(0,"esi"));
&paddq ("mm1",&QWP(8,"esi"));
- &paddq ("mm2",&QWP(16,"esi"));
+ &paddq ($BxC,&QWP(16,"esi"));
&paddq ("mm3",&QWP(24,"esi"));
&paddq ($E,&QWP(32,"esi"));
&paddq ("mm5",&QWP(40,"esi"));
@@ -396,21 +662,19 @@ if ($sse2) {
&movq (&QWP(0,"esi"),$A);
&movq (&QWP(8,"esi"),"mm1");
- &movq (&QWP(16,"esi"),"mm2");
+ &movq (&QWP(16,"esi"),$BxC);
&movq (&QWP(24,"esi"),"mm3");
&movq (&QWP(32,"esi"),$E);
&movq (&QWP(40,"esi"),"mm5");
&movq (&QWP(48,"esi"),"mm6");
&movq (&QWP(56,"esi"),"mm7");
- &add ("esp",8*80); # destroy frame
- &sub ($K512,8*80); # rewind K
-
- &cmp ("edi",&DWP(8*10+8,"esp")); # are we done yet?
- &jb (&label("loop_sse2"));
+ &cmp ("edi","eax") # are we done yet?
+ &jb (&label("loop_ssse3"));
+ &mov ("esp",&DWP(64+12,$frame)); # restore sp
&emms ();
- &mov ("esp",&DWP(8*10+12,"esp")); # restore sp
+}
&function_end_A();
}
&set_label("loop_x86",16);
@@ -638,6 +902,9 @@ if ($sse2) {
&data_word(0xfc657e2a,0x597f299c); # u64
&data_word(0x3ad6faec,0x5fcb6fab); # u64
&data_word(0x4a475817,0x6c44198c); # u64
+
+ &data_word(0x04050607,0x00010203); # byte swap
+ &data_word(0x0c0d0e0f,0x08090a0b); # mask
&function_end_B("sha512_block_data_order");
&asciz("SHA512 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
diff --git a/openssl/crypto/sha/asm/sha512-armv4.pl b/openssl/crypto/sha/asm/sha512-armv4.pl
index 7faf37b14..fb7dc506a 100644
--- a/openssl/crypto/sha/asm/sha512-armv4.pl
+++ b/openssl/crypto/sha/asm/sha512-armv4.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -26,7 +26,24 @@
# March 2011.
#
# Add NEON implementation. On Cortex A8 it was measured to process
-# one byte in 25.5 cycles or 47% faster than integer-only code.
+# one byte in 23.3 cycles or ~60% faster than integer-only code.
+
+# August 2012.
+#
+# Improve NEON performance by 12% on Snapdragon S4. In absolute
+# terms it's 22.6 cycles per byte, which is disappointing result.
+# Technical writers asserted that 3-way S4 pipeline can sustain
+# multiple NEON instructions per cycle, but dual NEON issue could
+# not be observed, and for NEON-only sequences IPC(*) was found to
+# be limited by 1:-( 0.33 and 0.66 were measured for sequences with
+# ILPs(*) of 1 and 2 respectively. This in turn means that you can
+# even find yourself striving, as I did here, for achieving IPC
+# adequate to one delivered by Cortex A8 [for reference, it's
+# 0.5 for ILP of 1, and 1 for higher ILPs].
+#
+# (*) ILP, instruction-level parallelism, how many instructions
+# *can* execute at the same time. IPC, instructions per cycle,
+# indicates how many instructions actually execute.
# Byte order [in]dependence. =========================================
#
@@ -220,16 +237,20 @@ WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.size K512,.-K512
+#if __ARM_MAX_ARCH__>=7
.LOPENSSL_armcap:
.word OPENSSL_armcap_P-sha512_block_data_order
.skip 32-4
+#else
+.skip 32
+#endif
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
sub r3,pc,#8 @ sha512_block_data_order
add $len,$inp,$len,lsl#7 @ len to point at the end of inp
-#if __ARM_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7
ldr r12,.LOPENSSL_armcap
ldr r12,[r3,r12] @ OPENSSL_armcap_P
tst r12,#1
@@ -457,40 +478,40 @@ $code.=<<___ if ($i<16 || $i&1);
vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned
#endif
vshr.u64 $t1,$e,#@Sigma1[1]
+#if $i>0
+ vadd.i64 $a,$Maj @ h+=Maj from the past
+#endif
vshr.u64 $t2,$e,#@Sigma1[2]
___
$code.=<<___;
vld1.64 {$K},[$Ktbl,:64]! @ K[i++]
vsli.64 $t0,$e,#`64-@Sigma1[0]`
vsli.64 $t1,$e,#`64-@Sigma1[1]`
+ vmov $Ch,$e
vsli.64 $t2,$e,#`64-@Sigma1[2]`
#if $i<16 && defined(__ARMEL__)
vrev64.8 @X[$i],@X[$i]
#endif
- vadd.i64 $T1,$K,$h
- veor $Ch,$f,$g
- veor $t0,$t1
- vand $Ch,$e
- veor $t0,$t2 @ Sigma1(e)
- veor $Ch,$g @ Ch(e,f,g)
- vadd.i64 $T1,$t0
+ veor $t1,$t0
+ vbsl $Ch,$f,$g @ Ch(e,f,g)
vshr.u64 $t0,$a,#@Sigma0[0]
- vadd.i64 $T1,$Ch
+ veor $t2,$t1 @ Sigma1(e)
+ vadd.i64 $T1,$Ch,$h
vshr.u64 $t1,$a,#@Sigma0[1]
- vshr.u64 $t2,$a,#@Sigma0[2]
vsli.64 $t0,$a,#`64-@Sigma0[0]`
+ vadd.i64 $T1,$t2
+ vshr.u64 $t2,$a,#@Sigma0[2]
+ vadd.i64 $K,@X[$i%16]
vsli.64 $t1,$a,#`64-@Sigma0[1]`
+ veor $Maj,$a,$b
vsli.64 $t2,$a,#`64-@Sigma0[2]`
- vadd.i64 $T1,@X[$i%16]
- vorr $Maj,$a,$c
- vand $Ch,$a,$c
veor $h,$t0,$t1
- vand $Maj,$b
+ vadd.i64 $T1,$K
+ vbsl $Maj,$c,$b @ Maj(a,b,c)
veor $h,$t2 @ Sigma0(a)
- vorr $Maj,$Ch @ Maj(a,b,c)
- vadd.i64 $h,$T1
vadd.i64 $d,$T1
- vadd.i64 $h,$Maj
+ vadd.i64 $Maj,$T1
+ @ vadd.i64 $h,$Maj
___
}
@@ -508,6 +529,7 @@ $i /= 2;
$code.=<<___;
vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0]
vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1]
+ vadd.i64 @_[0],d30 @ h+=Maj from the past
vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2]
vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]`
vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1]
@@ -533,7 +555,8 @@ ___
}
$code.=<<___;
-#if __ARM_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
.fpu neon
.align 4
@@ -554,6 +577,7 @@ for(;$i<32;$i++) { &NEON_16_79($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
bne .L16_79_neon
+ vadd.i64 $A,d30 @ h+=Maj from the past
vldmia $ctx,{d24-d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13
@@ -565,7 +589,7 @@ $code.=<<___;
bne .Loop_neon
vldmia sp!,{d8-d15} @ epilogue
- bx lr
+ ret @ bx lr
#endif
___
}
@@ -573,10 +597,13 @@ $code.=<<___;
.size sha512_block_data_order,.-sha512_block_data_order
.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
.align 2
+#if __ARM_MAX_ARCH__>=7
.comm OPENSSL_armcap_P,4,4
+#endif
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;
$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+$code =~ s/\bret\b/bx lr/gm;
print $code;
close STDOUT; # enforce flush
diff --git a/openssl/crypto/sha/asm/sha512-armv8.pl b/openssl/crypto/sha/asm/sha512-armv8.pl
new file mode 100755
index 000000000..bd7a0a566
--- /dev/null
+++ b/openssl/crypto/sha/asm/sha512-armv8.pl
@@ -0,0 +1,420 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA256/512 for ARMv8.
+#
+# Performance in cycles per processed byte and improvement coefficient
+# over code generated with "default" compiler:
+#
+# SHA256-hw SHA256(*) SHA512
+# Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
+# Cortex-A53 2.38 15.6 (+110%) 10.1 (+190%(***))
+# Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
+#
+# (*) Software SHA256 results are of lesser relevance, presented
+# mostly for informational purposes.
+# (**) The result is a trade-off: it's possible to improve it by
+# 10% (or by 1 cycle per round), but at the cost of 20% loss
+# on Cortex-A53 (or by 4 cycles per round).
+# (***) Super-impressive coefficients over gcc-generated code are
+# indication of some compiler "pathology", most notably code
+# generated with -mgeneral-regs-only is significanty faster
+# and lags behind assembly only by 50-90%.
+
+$flavour=shift;
+$output=shift;
+open STDOUT,">$output";
+
+if ($output =~ /512/) {
+ $BITS=512;
+ $SZ=8;
+ @Sigma0=(28,34,39);
+ @Sigma1=(14,18,41);
+ @sigma0=(1, 8, 7);
+ @sigma1=(19,61, 6);
+ $rounds=80;
+ $reg_t="x";
+} else {
+ $BITS=256;
+ $SZ=4;
+ @Sigma0=( 2,13,22);
+ @Sigma1=( 6,11,25);
+ @sigma0=( 7,18, 3);
+ @sigma1=(17,19,10);
+ $rounds=64;
+ $reg_t="w";
+}
+
+$func="sha${BITS}_block_data_order";
+
+($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
+
+@X=map("$reg_t$_",(3..15,0..2));
+@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
+($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
+
+sub BODY_00_xx {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my $j=($i+1)&15;
+my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
+ $T0=@X[$i+3] if ($i<11);
+
+$code.=<<___ if ($i<16);
+#ifndef __ARMEB__
+ rev @X[$i],@X[$i] // $i
+#endif
+___
+$code.=<<___ if ($i<13 && ($i&1));
+ ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ
+___
+$code.=<<___ if ($i==13);
+ ldp @X[14],@X[15],[$inp]
+___
+$code.=<<___ if ($i>=14);
+ ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
+___
+$code.=<<___ if ($i>0 && $i<16);
+ add $a,$a,$t1 // h+=Sigma0(a)
+___
+$code.=<<___ if ($i>=11);
+ str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
+___
+# While ARMv8 specifies merged rotate-n-logical operation such as
+# 'eor x,y,z,ror#n', it was found to negatively affect performance
+# on Apple A7. The reason seems to be that it requires even 'y' to
+# be available earlier. This means that such merged instruction is
+# not necessarily best choice on critical path... On the other hand
+# Cortex-A5x handles merged instructions much better than disjoint
+# rotate and logical... See (**) footnote above.
+$code.=<<___ if ($i<15);
+ ror $t0,$e,#$Sigma1[0]
+ add $h,$h,$t2 // h+=K[i]
+ eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
+ and $t1,$f,$e
+ bic $t2,$g,$e
+ add $h,$h,@X[$i&15] // h+=X[i]
+ orr $t1,$t1,$t2 // Ch(e,f,g)
+ eor $t2,$a,$b // a^b, b^c in next round
+ eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e)
+ ror $T0,$a,#$Sigma0[0]
+ add $h,$h,$t1 // h+=Ch(e,f,g)
+ eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
+ add $h,$h,$t0 // h+=Sigma1(e)
+ and $t3,$t3,$t2 // (b^c)&=(a^b)
+ add $d,$d,$h // d+=h
+ eor $t3,$t3,$b // Maj(a,b,c)
+ eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a)
+ add $h,$h,$t3 // h+=Maj(a,b,c)
+ ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
+ //add $h,$h,$t1 // h+=Sigma0(a)
+___
+$code.=<<___ if ($i>=15);
+ ror $t0,$e,#$Sigma1[0]
+ add $h,$h,$t2 // h+=K[i]
+ ror $T1,@X[($j+1)&15],#$sigma0[0]
+ and $t1,$f,$e
+ ror $T2,@X[($j+14)&15],#$sigma1[0]
+ bic $t2,$g,$e
+ ror $T0,$a,#$Sigma0[0]
+ add $h,$h,@X[$i&15] // h+=X[i]
+ eor $t0,$t0,$e,ror#$Sigma1[1]
+ eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
+ orr $t1,$t1,$t2 // Ch(e,f,g)
+ eor $t2,$a,$b // a^b, b^c in next round
+ eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e)
+ eor $T0,$T0,$a,ror#$Sigma0[1]
+ add $h,$h,$t1 // h+=Ch(e,f,g)
+ and $t3,$t3,$t2 // (b^c)&=(a^b)
+ eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
+ eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1])
+ add $h,$h,$t0 // h+=Sigma1(e)
+ eor $t3,$t3,$b // Maj(a,b,c)
+ eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a)
+ eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14])
+ add @X[$j],@X[$j],@X[($j+9)&15]
+ add $d,$d,$h // d+=h
+ add $h,$h,$t3 // h+=Maj(a,b,c)
+ ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
+ add @X[$j],@X[$j],$T1
+ add $h,$h,$t1 // h+=Sigma0(a)
+ add @X[$j],@X[$j],$T2
+___
+ ($t2,$t3)=($t3,$t2);
+}
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+
+.globl $func
+.type $func,%function
+.align 6
+$func:
+___
+$code.=<<___ if ($SZ==4);
+ ldr x16,.LOPENSSL_armcap_P
+ adr x17,.LOPENSSL_armcap_P
+ add x16,x16,x17
+ ldr w16,[x16]
+ tst w16,#ARMV8_SHA256
+ b.ne .Lv8_entry
+___
+$code.=<<___;
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#4*$SZ
+
+ ldp $A,$B,[$ctx] // load context
+ ldp $C,$D,[$ctx,#2*$SZ]
+ ldp $E,$F,[$ctx,#4*$SZ]
+ add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input
+ ldp $G,$H,[$ctx,#6*$SZ]
+ adr $Ktbl,K$BITS
+ stp $ctx,$num,[x29,#96]
+
+.Loop:
+ ldp @X[0],@X[1],[$inp],#2*$SZ
+ ldr $t2,[$Ktbl],#$SZ // *K++
+ eor $t3,$B,$C // magic seed
+ str $inp,[x29,#112]
+___
+for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=".Loop_16_xx:\n";
+for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ cbnz $t2,.Loop_16_xx
+
+ ldp $ctx,$num,[x29,#96]
+ ldr $inp,[x29,#112]
+ sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind
+
+ ldp @X[0],@X[1],[$ctx]
+ ldp @X[2],@X[3],[$ctx,#2*$SZ]
+ add $inp,$inp,#14*$SZ // advance input pointer
+ ldp @X[4],@X[5],[$ctx,#4*$SZ]
+ add $A,$A,@X[0]
+ ldp @X[6],@X[7],[$ctx,#6*$SZ]
+ add $B,$B,@X[1]
+ add $C,$C,@X[2]
+ add $D,$D,@X[3]
+ stp $A,$B,[$ctx]
+ add $E,$E,@X[4]
+ add $F,$F,@X[5]
+ stp $C,$D,[$ctx,#2*$SZ]
+ add $G,$G,@X[6]
+ add $H,$H,@X[7]
+ cmp $inp,$num
+ stp $E,$F,[$ctx,#4*$SZ]
+ stp $G,$H,[$ctx,#6*$SZ]
+ b.ne .Loop
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#4*$SZ
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#128
+ ret
+.size $func,.-$func
+
+.align 6
+.type K$BITS,%object
+K$BITS:
+___
+$code.=<<___ if ($SZ==8);
+ .quad 0x428a2f98d728ae22,0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+ .quad 0x3956c25bf348b538,0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+ .quad 0xd807aa98a3030242,0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+ .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235,0xc19bf174cf692694
+ .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+ .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ .quad 0x983e5152ee66dfab,0xa831c66d2db43210
+ .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ .quad 0x06ca6351e003826f,0x142929670a0e6e70
+ .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+ .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+ .quad 0x81c2c92e47edaee6,0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+ .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218,0xd69906245565a910
+ .quad 0xf40e35855771202a,0x106aa07032bbd1b8
+ .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+ .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+ .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+ .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ .quad 0x90befffa23631e28,0xa4506cebde82bde9
+ .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ .quad 0xca273eceea26619c,0xd186b8c721c0c207
+ .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae,0x1b710b35131c471b
+ .quad 0x28db77f523047d84,0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+ .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+ .quad 0 // terminator
+___
+$code.=<<___ if ($SZ==4);
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+ .long 0 //terminator
+___
+$code.=<<___;
+.size K$BITS,.-K$BITS
+.align 3
+.LOPENSSL_armcap_P:
+ .quad OPENSSL_armcap_P-.
+.asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+if ($SZ==4) {
+my $Ktbl="x3";
+
+my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
+my @MSG=map("v$_.16b",(4..7));
+my ($W0,$W1)=("v16.4s","v17.4s");
+my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
+
+$code.=<<___;
+.type sha256_block_armv8,%function
+.align 6
+sha256_block_armv8:
+.Lv8_entry:
+ stp x29,x30,[sp,#-16]!
+ add x29,sp,#0
+
+ ld1.32 {$ABCD,$EFGH},[$ctx]
+ adr $Ktbl,K256
+
+.Loop_hw:
+ ld1 {@MSG[0]-@MSG[3]},[$inp],#64
+ sub $num,$num,#1
+ ld1.32 {$W0},[$Ktbl],#16
+ rev32 @MSG[0],@MSG[0]
+ rev32 @MSG[1],@MSG[1]
+ rev32 @MSG[2],@MSG[2]
+ rev32 @MSG[3],@MSG[3]
+ orr $ABCD_SAVE,$ABCD,$ABCD // offload
+ orr $EFGH_SAVE,$EFGH,$EFGH
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+ ld1.32 {$W1},[$Ktbl],#16
+ add.i32 $W0,$W0,@MSG[0]
+ sha256su0 @MSG[0],@MSG[1]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+ sha256su1 @MSG[0],@MSG[2],@MSG[3]
+___
+ ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ ld1.32 {$W1},[$Ktbl],#16
+ add.i32 $W0,$W0,@MSG[0]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ ld1.32 {$W0},[$Ktbl],#16
+ add.i32 $W1,$W1,@MSG[1]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ ld1.32 {$W1},[$Ktbl]
+ add.i32 $W0,$W0,@MSG[2]
+ sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ add.i32 $W1,$W1,@MSG[3]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ add.i32 $ABCD,$ABCD,$ABCD_SAVE
+ add.i32 $EFGH,$EFGH,$EFGH_SAVE
+
+ cbnz $num,.Loop_hw
+
+ st1.32 {$ABCD,$EFGH},[$ctx]
+
+ ldr x29,[sp],#16
+ ret
+.size sha256_block_armv8,.-sha256_block_armv8
+___
+}
+
+$code.=<<___;
+.comm OPENSSL_armcap_P,4,4
+___
+
+{ my %opcode = (
+ "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000,
+ "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 );
+
+ sub unsha256 {
+ my ($mnemonic,$arg)=@_;
+
+ $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
+ &&
+ sprintf ".inst\t0x%08x\t//%s %s",
+ $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
+ $mnemonic,$arg;
+ }
+}
+
+foreach(split("\n",$code)) {
+
+ s/\`([^\`]*)\`/eval($1)/geo;
+
+ s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/geo;
+
+ s/\.\w?32\b//o and s/\.16b/\.4s/go;
+ m/(ld|st)1[^\[]+\[0\]/o and s/\.4s/\.s/go;
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/openssl/crypto/sha/asm/sha512-ia64.pl b/openssl/crypto/sha/asm/sha512-ia64.pl
index 1c6ce5652..59f889a09 100644
--- a/openssl/crypto/sha/asm/sha512-ia64.pl
+++ b/openssl/crypto/sha/asm/sha512-ia64.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
#
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -48,16 +48,22 @@
# because on Itanium 1 stall on MM result is accompanied by
# pipeline flush, which takes 6 cycles:-(
#
-# Resulting performance numbers for 900MHz Itanium 2 system:
+# June 2012
#
-# The 'numbers' are in 1000s of bytes per second processed.
-# type 16 bytes 64 bytes 256 bytes 1024 bytes 8192 bytes
-# sha1(*) 6210.14k 20376.30k 52447.83k 85870.05k 105478.12k
-# sha256 7476.45k 20572.05k 41538.34k 56062.29k 62093.18k
-# sha512 4996.56k 20026.28k 47597.20k 85278.79k 111501.31k
+# Improve performance by 15-20%. Note about "rules of engagement"
+# above. Contemporary cores are equipped with additional shifter,
+# so that they should perform even better than below, presumably
+# by ~10%.
#
-# (*) SHA1 numbers are for HP-UX compiler and are presented purely
-# for reference purposes. I bet it can improved too...
+######################################################################
+# Current performance in cycles per processed byte for Itanium 2
+# pre-9000 series [little-endian] system:
+#
+# SHA1(*) 5.7
+# SHA256 12.6
+# SHA512 6.7
+#
+# (*) SHA1 result is presented purely for reference purposes.
#
# To generate code, pass the file name with either 256 or 512 in its
# name and compiler flags.
@@ -106,8 +112,8 @@ if (!defined($big_endian))
{ $big_endian=(unpack('L',pack('N',1))==1); }
$code=<<___;
-.ident \"$output, version 1.1\"
-.ident \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\"
+.ident \"$output, version 2.0\"
+.ident \"IA-64 ISA artwork by Andy Polyakov <appro\@openssl.org>\"
.explicit
.text
@@ -115,26 +121,25 @@ pfssave=r2;
lcsave=r3;
prsave=r14;
K=r15;
-A=r16; B=r17; C=r18; D=r19;
-E=r20; F=r21; G=r22; H=r23;
+A_=r16; B_=r17; C_=r18; D_=r19;
+E_=r20; F_=r21; G_=r22; H_=r23;
T1=r24; T2=r25;
s0=r26; s1=r27; t0=r28; t1=r29;
Ktbl=r30;
ctx=r31; // 1st arg
-input=r48; // 2nd arg
-num=r49; // 3rd arg
-sgm0=r50; sgm1=r51; // small constants
-A_=r54; B_=r55; C_=r56; D_=r57;
-E_=r58; F_=r59; G_=r60; H_=r61;
+input=r56; // 2nd arg
+num=r57; // 3rd arg
+sgm0=r58; sgm1=r59; // small constants
// void $func (SHA_CTX *ctx, const void *in,size_t num[,int host])
.global $func#
.proc $func#
.align 32
+.skip 16
$func:
.prologue
.save ar.pfs,pfssave
-{ .mmi; alloc pfssave=ar.pfs,3,27,0,16
+{ .mmi; alloc pfssave=ar.pfs,3,25,0,24
$ADDP ctx=0,r32 // 1st arg
.save ar.lc,lcsave
mov lcsave=ar.lc }
@@ -145,11 +150,9 @@ $func:
.body
{ .mib; add r8=0*$SZ,ctx
- add r9=1*$SZ,ctx
- brp.loop.imp .L_first16,.L_first16_end-16 }
+ add r9=1*$SZ,ctx }
{ .mib; add r10=2*$SZ,ctx
- add r11=3*$SZ,ctx
- brp.loop.imp .L_rest,.L_rest_end-16 };;
+ add r11=3*$SZ,ctx };;
// load A-H
.Lpic_point:
@@ -164,7 +167,7 @@ $func:
add Ktbl=($TABLE#-.Lpic_point),Ktbl }
{ .mmi; $LDW G_=[r10]
$LDW H_=[r11]
- cmp.ne p0,p16=0,r0 };; // used in sha256_block
+ cmp.ne p0,p16=0,r0 };;
___
$code.=<<___ if ($BITS==64);
{ .mii; and r8=7,input
@@ -179,50 +182,26 @@ $code.=<<___ if ($BITS==64);
___
$code.=<<___;
.L_outer:
-.rotr X[16]
-{ .mmi; mov A=A_
- mov B=B_
+.rotr R[8],X[16]
+A=R[0]; B=R[1]; C=R[2]; D=R[3]; E=R[4]; F=R[5]; G=R[6]; H=R[7]
+{ .mmi; ld1 X[15]=[input],$SZ // eliminated in sha512
+ mov A=A_
mov ar.lc=14 }
-{ .mmi; mov C=C_
- mov D=D_
- mov E=E_ }
-{ .mmi; mov F=F_
- mov G=G_
- mov ar.ec=2 }
-{ .mmi; ld1 X[15]=[input],$SZ // eliminated in 64-bit
+{ .mmi; mov B=B_
+ mov C=C_
+ mov D=D_ }
+{ .mmi; mov E=E_
+ mov F=F_
+ mov ar.ec=2 };;
+{ .mmi; mov G=G_
mov H=H_
- mov sgm1=$sigma1[2] };;
-
-___
-$t0="t0", $t1="t1", $code.=<<___ if ($BITS==32);
-.align 32
-.L_first16:
-{ .mmi; add r9=1-$SZ,input
- add r10=2-$SZ,input
- add r11=3-$SZ,input };;
-{ .mmi; ld1 r9=[r9]
- ld1 r10=[r10]
- dep.z $t1=E,32,32 }
-{ .mmi; $LDW K=[Ktbl],$SZ
- ld1 r11=[r11]
- zxt4 E=E };;
-{ .mii; or $t1=$t1,E
- dep X[15]=X[15],r9,8,8
- dep r11=r10,r11,8,8 };;
-{ .mmi; and T1=F,E
- and T2=A,B
- dep X[15]=X[15],r11,16,16 }
-{ .mmi; andcm r8=G,E
- and r9=A,C
- mux2 $t0=A,0x44 };; // copy lower half to upper
-{ .mmi; (p16) ld1 X[15-1]=[input],$SZ // prefetch
- xor T1=T1,r8 // T1=((e & f) ^ (~e & g))
- _rotr r11=$t1,$Sigma1[0] } // ROTR(e,14)
-{ .mib; and r10=B,C
- xor T2=T2,r9 };;
+ mov sgm1=$sigma1[2] }
+{ .mib; mov r8=0
+ add r9=1-$SZ,input
+ brp.loop.imp .L_first16,.L_first16_end-16 };;
___
$t0="A", $t1="E", $code.=<<___ if ($BITS==64);
-// in 64-bit mode I load whole X[16] at once and take care of alignment...
+// in sha512 case I load whole X[16] at once and take care of alignment...
{ .mmi; add r8=1*$SZ,input
add r9=2*$SZ,input
add r10=3*$SZ,input };;
@@ -248,7 +227,9 @@ $t0="A", $t1="E", $code.=<<___ if ($BITS==64);
$LDW X[ 2]=[r8],4*$SZ
(p15) br.cond.dpnt.many .L7byte };;
{ .mmb; $LDW X[ 1]=[r9],4*$SZ
- $LDW X[ 0]=[r10],4*$SZ
+ $LDW X[ 0]=[r10],4*$SZ }
+{ .mib; mov r8=0
+ mux1 X[15]=X[15],\@rev // eliminated on big-endian
br.many .L_first16 };;
.L1byte:
{ .mmi; $LDW X[13]=[r9],4*$SZ
@@ -281,7 +262,9 @@ $t0="A", $t1="E", $code.=<<___ if ($BITS==64);
shrp X[ 3]=X[ 3],X[ 2],56 }
{ .mii; shrp X[ 2]=X[ 2],X[ 1],56
shrp X[ 1]=X[ 1],X[ 0],56 }
-{ .mib; shrp X[ 0]=X[ 0],T1,56
+{ .mib; shrp X[ 0]=X[ 0],T1,56 }
+{ .mib; mov r8=0
+ mux1 X[15]=X[15],\@rev // eliminated on big-endian
br.many .L_first16 };;
.L2byte:
{ .mmi; $LDW X[11]=[input],4*$SZ
@@ -313,7 +296,9 @@ $t0="A", $t1="E", $code.=<<___ if ($BITS==64);
shrp X[ 2]=X[ 2],X[ 1],48 }
{ .mii; shrp X[ 1]=X[ 1],X[ 0],48
shrp X[ 0]=X[ 0],T1,48 }
-{ .mfb; br.many .L_first16 };;
+{ .mib; mov r8=0
+ mux1 X[15]=X[15],\@rev // eliminated on big-endian
+ br.many .L_first16 };;
.L3byte:
{ .mmi; $LDW X[ 9]=[r9],4*$SZ
$LDW X[ 8]=[r10],4*$SZ
@@ -341,7 +326,9 @@ $t0="A", $t1="E", $code.=<<___ if ($BITS==64);
shrp X[ 3]=X[ 3],X[ 2],40 }
{ .mii; shrp X[ 2]=X[ 2],X[ 1],40
shrp X[ 1]=X[ 1],X[ 0],40 }
-{ .mib; shrp X[ 0]=X[ 0],T1,40
+{ .mib; shrp X[ 0]=X[ 0],T1,40 }
+{ .mib; mov r8=0
+ mux1 X[15]=X[15],\@rev // eliminated on big-endian
br.many .L_first16 };;
.L4byte:
{ .mmi; $LDW X[ 7]=[input],4*$SZ
@@ -369,7 +356,9 @@ $t0="A", $t1="E", $code.=<<___ if ($BITS==64);
shrp X[ 2]=X[ 2],X[ 1],32 }
{ .mii; shrp X[ 1]=X[ 1],X[ 0],32
shrp X[ 0]=X[ 0],T1,32 }
-{ .mfb; br.many .L_first16 };;
+{ .mib; mov r8=0
+ mux1 X[15]=X[15],\@rev // eliminated on big-endian
+ br.many .L_first16 };;
.L5byte:
{ .mmi; $LDW X[ 5]=[r9],4*$SZ
$LDW X[ 4]=[r10],4*$SZ
@@ -393,7 +382,9 @@ $t0="A", $t1="E", $code.=<<___ if ($BITS==64);
shrp X[ 3]=X[ 3],X[ 2],24 }
{ .mii; shrp X[ 2]=X[ 2],X[ 1],24
shrp X[ 1]=X[ 1],X[ 0],24 }
-{ .mib; shrp X[ 0]=X[ 0],T1,24
+{ .mib; shrp X[ 0]=X[ 0],T1,24 }
+{ .mib; mov r8=0
+ mux1 X[15]=X[15],\@rev // eliminated on big-endian
br.many .L_first16 };;
.L6byte:
{ .mmi; $LDW X[ 3]=[input],4*$SZ
@@ -417,7 +408,9 @@ $t0="A", $t1="E", $code.=<<___ if ($BITS==64);
shrp X[ 2]=X[ 2],X[ 1],16 }
{ .mii; shrp X[ 1]=X[ 1],X[ 0],16
shrp X[ 0]=X[ 0],T1,16 }
-{ .mfb; br.many .L_first16 };;
+{ .mib; mov r8=0
+ mux1 X[15]=X[15],\@rev // eliminated on big-endian
+ br.many .L_first16 };;
.L7byte:
{ .mmi; $LDW X[ 1]=[r9],4*$SZ
$LDW X[ 0]=[r10],4*$SZ
@@ -437,128 +430,146 @@ $t0="A", $t1="E", $code.=<<___ if ($BITS==64);
shrp X[ 3]=X[ 3],X[ 2],8 }
{ .mii; shrp X[ 2]=X[ 2],X[ 1],8
shrp X[ 1]=X[ 1],X[ 0],8 }
-{ .mib; shrp X[ 0]=X[ 0],T1,8
- br.many .L_first16 };;
+{ .mib; shrp X[ 0]=X[ 0],T1,8 }
+{ .mib; mov r8=0
+ mux1 X[15]=X[15],\@rev };; // eliminated on big-endian
.align 32
.L_first16:
{ .mmi; $LDW K=[Ktbl],$SZ
- and T1=F,E
- and T2=A,B }
-{ .mmi; //$LDW X[15]=[input],$SZ // X[i]=*input++
+ add A=A,r8 // H+=Sigma(0) from the past
+ _rotr r10=$t1,$Sigma1[0] } // ROTR(e,14)
+{ .mmi; and T1=F,E
andcm r8=G,E
- and r9=A,C };;
-{ .mmi; xor T1=T1,r8 //T1=((e & f) ^ (~e & g))
- and r10=B,C
- _rotr r11=$t1,$Sigma1[0] } // ROTR(e,14)
-{ .mmi; xor T2=T2,r9
- mux1 X[15]=X[15],\@rev };; // eliminated in big-endian
+ (p16) mux1 X[14]=X[14],\@rev };; // eliminated on big-endian
+{ .mmi; and T2=A,B
+ and r9=A,C
+ _rotr r11=$t1,$Sigma1[1] } // ROTR(e,41)
+{ .mmi; xor T1=T1,r8 // T1=((e & f) ^ (~e & g))
+ and r8=B,C };;
+___
+$t0="t0", $t1="t1", $code.=<<___ if ($BITS==32);
+.align 32
+.L_first16:
+{ .mmi; add A=A,r8 // H+=Sigma(0) from the past
+ add r10=2-$SZ,input
+ add r11=3-$SZ,input };;
+{ .mmi; ld1 r9=[r9]
+ ld1 r10=[r10]
+ dep.z $t1=E,32,32 }
+{ .mmi; ld1 r11=[r11]
+ $LDW K=[Ktbl],$SZ
+ zxt4 E=E };;
+{ .mii; or $t1=$t1,E
+ dep X[15]=X[15],r9,8,8
+ mux2 $t0=A,0x44 };; // copy lower half to upper
+{ .mmi; and T1=F,E
+ andcm r8=G,E
+ dep r11=r10,r11,8,8 };;
+{ .mmi; and T2=A,B
+ and r9=A,C
+ dep X[15]=X[15],r11,16,16 };;
+{ .mmi; (p16) ld1 X[15-1]=[input],$SZ // prefetch
+ xor T1=T1,r8 // T1=((e & f) ^ (~e & g))
+ _rotr r10=$t1,$Sigma1[0] } // ROTR(e,14)
+{ .mmi; and r8=B,C
+ _rotr r11=$t1,$Sigma1[1] };; // ROTR(e,18)
___
$code.=<<___;
-{ .mib; add T1=T1,H // T1=Ch(e,f,g)+h
- _rotr r8=$t1,$Sigma1[1] } // ROTR(e,18)
-{ .mib; xor T2=T2,r10 // T2=((a & b) ^ (a & c) ^ (b & c))
- mov H=G };;
-{ .mib; xor r11=r8,r11
- _rotr r9=$t1,$Sigma1[2] } // ROTR(e,41)
-{ .mib; mov G=F
- mov F=E };;
-{ .mib; xor r9=r9,r11 // r9=Sigma1(e)
- _rotr r10=$t0,$Sigma0[0] } // ROTR(a,28)
-{ .mib; add T1=T1,K // T1=Ch(e,f,g)+h+K512[i]
- mov E=D };;
-{ .mib; add T1=T1,r9 // T1+=Sigma1(e)
- _rotr r11=$t0,$Sigma0[1] } // ROTR(a,34)
-{ .mib; mov D=C
- mov C=B };;
-{ .mib; add T1=T1,X[15] // T1+=X[i]
- _rotr r8=$t0,$Sigma0[2] } // ROTR(a,39)
-{ .mib; xor r10=r10,r11
- mux2 X[15]=X[15],0x44 };; // eliminated in 64-bit
-{ .mmi; xor r10=r8,r10 // r10=Sigma0(a)
- mov B=A
- add A=T1,T2 };;
-{ .mib; add E=E,T1
- add A=A,r10 // T2=Maj(a,b,c)+Sigma0(a)
- br.ctop.sptk .L_first16 };;
+{ .mmi; add T1=T1,H // T1=Ch(e,f,g)+h
+ xor r10=r10,r11
+ _rotr r11=$t1,$Sigma1[2] } // ROTR(e,41)
+{ .mmi; xor T2=T2,r9
+ add K=K,X[15] };;
+{ .mmi; add T1=T1,K // T1+=K[i]+X[i]
+ xor T2=T2,r8 // T2=((a & b) ^ (a & c) ^ (b & c))
+ _rotr r8=$t0,$Sigma0[0] } // ROTR(a,28)
+{ .mmi; xor r11=r11,r10 // Sigma1(e)
+ _rotr r9=$t0,$Sigma0[1] };; // ROTR(a,34)
+{ .mmi; add T1=T1,r11 // T+=Sigma1(e)
+ xor r8=r8,r9
+ _rotr r9=$t0,$Sigma0[2] };; // ROTR(a,39)
+{ .mmi; xor r8=r8,r9 // Sigma0(a)
+ add D=D,T1
+ mux2 H=X[15],0x44 } // mov H=X[15] in sha512
+{ .mib; (p16) add r9=1-$SZ,input // not used in sha512
+ add X[15]=T1,T2 // H=T1+Maj(a,b,c)
+ br.ctop.sptk .L_first16 };;
.L_first16_end:
-{ .mii; mov ar.lc=$rounds-17
- mov ar.ec=1 };;
+{ .mib; mov ar.lc=$rounds-17
+ brp.loop.imp .L_rest,.L_rest_end-16 }
+{ .mib; mov ar.ec=1
+ br.many .L_rest };;
.align 32
.L_rest:
-.rotr X[16]
-{ .mib; $LDW K=[Ktbl],$SZ
+{ .mmi; $LDW K=[Ktbl],$SZ
+ add A=A,r8 // H+=Sigma0(a) from the past
_rotr r8=X[15-1],$sigma0[0] } // ROTR(s0,1)
-{ .mib; $ADD X[15]=X[15],X[15-9] // X[i&0xF]+=X[(i+9)&0xF]
- $SHRU s0=X[15-1],sgm0 };; // s0=X[(i+1)&0xF]>>7
+{ .mmi; add X[15]=X[15],X[15-9] // X[i&0xF]+=X[(i+9)&0xF]
+ $SHRU s0=X[15-1],sgm0 };; // s0=X[(i+1)&0xF]>>7
{ .mib; and T1=F,E
_rotr r9=X[15-1],$sigma0[1] } // ROTR(s0,8)
{ .mib; andcm r10=G,E
- $SHRU s1=X[15-14],sgm1 };; // s1=X[(i+14)&0xF]>>6
+ $SHRU s1=X[15-14],sgm1 };; // s1=X[(i+14)&0xF]>>6
+// Pair of mmi; splits on Itanium 1 and prevents pipeline flush
+// upon $SHRU output usage
{ .mmi; xor T1=T1,r10 // T1=((e & f) ^ (~e & g))
xor r9=r8,r9
- _rotr r10=X[15-14],$sigma1[0] };;// ROTR(s1,19)
-{ .mib; and T2=A,B
- _rotr r11=X[15-14],$sigma1[1] }// ROTR(s1,61)
-{ .mib; and r8=A,C };;
+ _rotr r10=X[15-14],$sigma1[0] }// ROTR(s1,19)
+{ .mmi; and T2=A,B
+ and r8=A,C
+ _rotr r11=X[15-14],$sigma1[1] };;// ROTR(s1,61)
___
$t0="t0", $t1="t1", $code.=<<___ if ($BITS==32);
-// I adhere to mmi; in order to hold Itanium 1 back and avoid 6 cycle
-// pipeline flush in last bundle. Note that even on Itanium2 the
-// latter stalls for one clock cycle...
-{ .mmi; xor s0=s0,r9 // s0=sigma0(X[(i+1)&0xF])
- dep.z $t1=E,32,32 }
-{ .mmi; xor r10=r11,r10
- zxt4 E=E };;
-{ .mmi; or $t1=$t1,E
- xor s1=s1,r10 // s1=sigma1(X[(i+14)&0xF])
- mux2 $t0=A,0x44 };; // copy lower half to upper
+{ .mib; xor s0=s0,r9 // s0=sigma0(X[(i+1)&0xF])
+ dep.z $t1=E,32,32 }
+{ .mib; xor r10=r11,r10
+ zxt4 E=E };;
+{ .mii; xor s1=s1,r10 // s1=sigma1(X[(i+14)&0xF])
+ shrp r9=E,$t1,32+$Sigma1[0] // ROTR(e,14)
+ mux2 $t0=A,0x44 };; // copy lower half to upper
+// Pair of mmi; splits on Itanium 1 and prevents pipeline flush
+// upon mux2 output usage
{ .mmi; xor T2=T2,r8
- _rotr r9=$t1,$Sigma1[0] } // ROTR(e,14)
+ shrp r8=E,$t1,32+$Sigma1[1]} // ROTR(e,18)
{ .mmi; and r10=B,C
add T1=T1,H // T1=Ch(e,f,g)+h
- $ADD X[15]=X[15],s0 };; // X[i&0xF]+=sigma0(X[(i+1)&0xF])
+ or $t1=$t1,E };;
___
$t0="A", $t1="E", $code.=<<___ if ($BITS==64);
{ .mib; xor s0=s0,r9 // s0=sigma0(X[(i+1)&0xF])
- _rotr r9=$t1,$Sigma1[0] } // ROTR(e,14)
+ _rotr r9=$t1,$Sigma1[0] } // ROTR(e,14)
{ .mib; xor r10=r11,r10
- xor T2=T2,r8 };;
+ xor T2=T2,r8 };;
{ .mib; xor s1=s1,r10 // s1=sigma1(X[(i+14)&0xF])
- add T1=T1,H }
+ _rotr r8=$t1,$Sigma1[1] } // ROTR(e,18)
{ .mib; and r10=B,C
- $ADD X[15]=X[15],s0 };; // X[i&0xF]+=sigma0(X[(i+1)&0xF])
+ add T1=T1,H };; // T1+=H
___
$code.=<<___;
-{ .mmi; xor T2=T2,r10 // T2=((a & b) ^ (a & c) ^ (b & c))
- mov H=G
- _rotr r8=$t1,$Sigma1[1] };; // ROTR(e,18)
-{ .mmi; xor r11=r8,r9
- $ADD X[15]=X[15],s1 // X[i&0xF]+=sigma1(X[(i+14)&0xF])
- _rotr r9=$t1,$Sigma1[2] } // ROTR(e,41)
-{ .mmi; mov G=F
- mov F=E };;
-{ .mib; xor r9=r9,r11 // r9=Sigma1(e)
- _rotr r10=$t0,$Sigma0[0] } // ROTR(a,28)
-{ .mib; add T1=T1,K // T1=Ch(e,f,g)+h+K512[i]
- mov E=D };;
-{ .mib; add T1=T1,r9 // T1+=Sigma1(e)
- _rotr r11=$t0,$Sigma0[1] } // ROTR(a,34)
-{ .mib; mov D=C
- mov C=B };;
-{ .mmi; add T1=T1,X[15] // T1+=X[i]
- xor r10=r10,r11
- _rotr r8=$t0,$Sigma0[2] };; // ROTR(a,39)
-{ .mmi; xor r10=r8,r10 // r10=Sigma0(a)
- mov B=A
- add A=T1,T2 };;
-{ .mib; add E=E,T1
- add A=A,r10 // T2=Maj(a,b,c)+Sigma0(a)
- br.ctop.sptk .L_rest };;
+{ .mib; xor r9=r9,r8
+ _rotr r8=$t1,$Sigma1[2] } // ROTR(e,41)
+{ .mib; xor T2=T2,r10 // T2=((a & b) ^ (a & c) ^ (b & c))
+ add X[15]=X[15],s0 };; // X[i]+=sigma0(X[i+1])
+{ .mmi; xor r9=r9,r8 // Sigma1(e)
+ add X[15]=X[15],s1 // X[i]+=sigma0(X[i+14])
+ _rotr r8=$t0,$Sigma0[0] };; // ROTR(a,28)
+{ .mmi; add K=K,X[15]
+ add T1=T1,r9 // T1+=Sigma1(e)
+ _rotr r9=$t0,$Sigma0[1] };; // ROTR(a,34)
+{ .mmi; add T1=T1,K // T1+=K[i]+X[i]
+ xor r8=r8,r9
+ _rotr r9=$t0,$Sigma0[2] };; // ROTR(a,39)
+{ .mib; add D=D,T1
+ mux2 H=X[15],0x44 } // mov H=X[15] in sha512
+{ .mib; xor r8=r8,r9 // Sigma0(a)
+ add X[15]=T1,T2 // H=T1+Maj(a,b,c)
+ br.ctop.sptk .L_rest };;
.L_rest_end:
+{ .mmi; add A=A,r8 };; // H+=Sigma0(a) from the past
{ .mmi; add A_=A_,A
add B_=B_,B
add C_=C_,C }
@@ -590,17 +601,19 @@ $code.=<<___;
.endp $func#
___
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-$code =~ s/_rotr(\s+)([^=]+)=([^,]+),([0-9]+)/shrp$1$2=$3,$3,$4/gm;
-if ($BITS==64) {
- $code =~ s/mux2(\s+)\S+/nop.i$1 0x0/gm;
- $code =~ s/mux1(\s+)\S+/nop.i$1 0x0/gm if ($big_endian);
- $code =~ s/(shrp\s+X\[[^=]+)=([^,]+),([^,]+),([1-9]+)/$1=$3,$2,64-$4/gm
+foreach(split($/,$code)) {
+ s/\`([^\`]*)\`/eval $1/gem;
+ s/_rotr(\s+)([^=]+)=([^,]+),([0-9]+)/shrp$1$2=$3,$3,$4/gm;
+ if ($BITS==64) {
+ s/mux2(\s+)([^=]+)=([^,]+),\S+/mov$1 $2=$3/gm;
+ s/mux1(\s+)\S+/nop.i$1 0x0/gm if ($big_endian);
+ s/(shrp\s+X\[[^=]+)=([^,]+),([^,]+),([1-9]+)/$1=$3,$2,64-$4/gm
if (!$big_endian);
- $code =~ s/ld1(\s+)X\[\S+/nop.m$1 0x0/gm;
-}
+ s/ld1(\s+)X\[\S+/nop.m$1 0x0/gm;
+ }
-print $code;
+ print $_,"\n";
+}
print<<___ if ($BITS==32);
.align 64
diff --git a/openssl/crypto/sha/asm/sha512-mips.pl b/openssl/crypto/sha/asm/sha512-mips.pl
index ffa053bb7..b468cfb45 100644
--- a/openssl/crypto/sha/asm/sha512-mips.pl
+++ b/openssl/crypto/sha/asm/sha512-mips.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -17,6 +17,10 @@
# ~17%, but it comes for free, because it's same instruction sequence.
# Improvement coefficients are for aligned input.
+# September 2012.
+#
+# Add MIPS[32|64]R2 code (>25% less instructions).
+
######################################################################
# There is a number of MIPS ABI in use, O32 and N32/64 are most
# widely used. Then there is a new contender: NUBI. It appears that if
@@ -45,7 +49,7 @@
# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
#
-$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
+$flavour = shift || "o32"; # supported flavours are o32,n32,64,nubi32,nubi64
if ($flavour =~ /64|n32/i) {
$PTR_ADD="dadd"; # incidentally works even on n32
@@ -68,7 +72,7 @@ $pf = ($flavour =~ /nubi/i) ? $t0 : $t2;
#
######################################################################
-$big_endian=(`echo MIPSEL | $ENV{CC} -E -P -`=~/MIPSEL/)?1:0;
+$big_endian=(`echo MIPSEL | $ENV{CC} -E -`=~/MIPSEL/)?1:0 if ($ENV{CC});
for (@ARGV) { $output=$_ if (/^\w[\w\-]*\.\w+$/); }
open STDOUT,">$output";
@@ -83,6 +87,7 @@ if ($output =~ /512/) {
$SLL="dsll"; # shift left logical
$SRL="dsrl"; # shift right logical
$ADDU="daddu";
+ $ROTR="drotr";
@Sigma0=(28,34,39);
@Sigma1=(14,18,41);
@sigma0=( 7, 1, 8); # right shift first
@@ -97,6 +102,7 @@ if ($output =~ /512/) {
$SLL="sll"; # shift left logical
$SRL="srl"; # shift right logical
$ADDU="addu";
+ $ROTR="rotr";
@Sigma0=( 2,13,22);
@Sigma1=( 6,11,25);
@sigma0=( 3, 7,18); # right shift first
@@ -124,6 +130,10 @@ $code.=<<___ if ($i<15);
${LD}r @X[1],`($i+1)*$SZ+$LSB`($inp)
___
$code.=<<___ if (!$big_endian && $i<16 && $SZ==4);
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ wsbh @X[0],@X[0] # byte swap($i)
+ rotr @X[0],@X[0],16
+#else
srl $tmp0,@X[0],24 # byte swap($i)
srl $tmp1,@X[0],8
andi $tmp2,@X[0],0xFF00
@@ -133,8 +143,13 @@ $code.=<<___ if (!$big_endian && $i<16 && $SZ==4);
or @X[0],$tmp0
or $tmp1,$tmp2
or @X[0],$tmp1
+#endif
___
$code.=<<___ if (!$big_endian && $i<16 && $SZ==8);
+#if defined(_MIPS_ARCH_MIPS64R2)
+ dsbh @X[0],@X[0] # byte swap($i)
+ dshd @X[0],@X[0]
+#else
ori $tmp0,$zero,0xFF
dsll $tmp2,$tmp0,32
or $tmp0,$tmp2 # 0x000000FF000000FF
@@ -153,8 +168,31 @@ $code.=<<___ if (!$big_endian && $i<16 && $SZ==8);
dsrl $tmp1,@X[0],32
dsll @X[0],32
or @X[0],$tmp1
+#endif
___
$code.=<<___;
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ xor $tmp2,$f,$g # $i
+ $ROTR $tmp0,$e,@Sigma1[0]
+ $ADDU $T1,$X[0],$h
+ $ROTR $tmp1,$e,@Sigma1[1]
+ and $tmp2,$e
+ $ROTR $h,$e,@Sigma1[2]
+ xor $tmp0,$tmp1
+ $ROTR $tmp1,$a,@Sigma0[0]
+ xor $tmp2,$g # Ch(e,f,g)
+ xor $tmp0,$h # Sigma1(e)
+
+ $ROTR $h,$a,@Sigma0[1]
+ $ADDU $T1,$tmp2
+ $LD $tmp2,`$i*$SZ`($Ktbl) # K[$i]
+ xor $h,$tmp1
+ $ROTR $tmp1,$a,@Sigma0[2]
+ $ADDU $T1,$tmp0
+ and $tmp0,$b,$c
+ xor $h,$tmp1 # Sigma0(a)
+ xor $tmp1,$b,$c
+#else
$ADDU $T1,$X[0],$h # $i
$SRL $h,$e,@Sigma1[0]
xor $tmp2,$f,$g
@@ -184,16 +222,15 @@ $code.=<<___;
xor $h,$tmp1
$SLL $tmp1,$a,`$SZ*8-@Sigma0[0]`
xor $h,$tmp0
- $ST @X[0],`($i%16)*$SZ`($sp) # offload to ring buffer
+ and $tmp0,$b,$c
xor $h,$tmp1 # Sigma0(a)
-
- or $tmp0,$a,$b
- and $tmp1,$a,$b
- and $tmp0,$c
- or $tmp1,$tmp0 # Maj(a,b,c)
+ xor $tmp1,$b,$c
+#endif
+ $ST @X[0],`($i%16)*$SZ`($sp) # offload to ring buffer
+ $ADDU $h,$tmp0
+ and $tmp1,$a
$ADDU $T1,$tmp2 # +=K[$i]
- $ADDU $h,$tmp1
-
+ $ADDU $h,$tmp1 # +=Maj(a,b,c)
$ADDU $d,$T1
$ADDU $h,$T1
___
@@ -207,6 +244,20 @@ my $i=@_[0];
my ($tmp0,$tmp1,$tmp2,$tmp3)=(@X[4],@X[5],@X[6],@X[7]);
$code.=<<___;
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+ $SRL $tmp2,@X[1],@sigma0[0] # Xupdate($i)
+ $ROTR $tmp0,@X[1],@sigma0[1]
+ $ADDU @X[0],@X[9] # +=X[i+9]
+ xor $tmp2,$tmp0
+ $ROTR $tmp0,@X[1],@sigma0[2]
+
+ $SRL $tmp3,@X[14],@sigma1[0]
+ $ROTR $tmp1,@X[14],@sigma1[1]
+ xor $tmp2,$tmp0 # sigma0(X[i+1])
+ $ROTR $tmp0,@X[14],@sigma1[2]
+ xor $tmp3,$tmp1
+ $ADDU @X[0],$tmp2
+#else
$SRL $tmp2,@X[1],@sigma0[0] # Xupdate($i)
$ADDU @X[0],@X[9] # +=X[i+9]
$SLL $tmp1,@X[1],`$SZ*8-@sigma0[2]`
@@ -227,7 +278,7 @@ $code.=<<___;
xor $tmp3,$tmp0
$SRL $tmp0,@X[14],@sigma1[2]
xor $tmp3,$tmp1
-
+#endif
xor $tmp3,$tmp0 # sigma1(X[i+14])
$ADDU @X[0],$tmp3
___
@@ -242,9 +293,13 @@ $code.=<<___;
# include <openssl/fipssyms.h>
#endif
+#if defined(__mips_smartmips) && !defined(_MIPS_ARCH_MIPS32R2)
+#define _MIPS_ARCH_MIPS32R2
+#endif
+
.text
.set noat
-#if !defined(__vxworks) || defined(__pic__)
+#if !defined(__mips_eabi) && (!defined(__vxworks) || defined(__pic__))
.option pic2
#endif
diff --git a/openssl/crypto/sha/asm/sha512-ppc.pl b/openssl/crypto/sha/asm/sha512-ppc.pl
index 6b44a68e5..734f3c1ca 100644
--- a/openssl/crypto/sha/asm/sha512-ppc.pl
+++ b/openssl/crypto/sha/asm/sha512-ppc.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -9,8 +9,7 @@
# I let hardware handle unaligned input, except on page boundaries
# (see below for details). Otherwise straightforward implementation
-# with X vector in register bank. The module is big-endian [which is
-# not big deal as there're no little-endian targets left around].
+# with X vector in register bank.
# sha256 | sha512
# -m64 -m32 | -m64 -m32
@@ -56,6 +55,8 @@ if ($flavour =~ /64/) {
$PUSH="stw";
} else { die "nonsense $flavour"; }
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
@@ -64,7 +65,7 @@ die "can't locate ppc-xlate.pl";
open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
if ($output =~ /512/) {
- $func="sha512_block_data_order";
+ $func="sha512_block_ppc";
$SZ=8;
@Sigma0=(28,34,39);
@Sigma1=(14,18,41);
@@ -76,7 +77,7 @@ if ($output =~ /512/) {
$ROR="rotrdi";
$SHR="srdi";
} else {
- $func="sha256_block_data_order";
+ $func="sha256_block_ppc";
$SZ=4;
@Sigma0=( 2,13,22);
@Sigma1=( 6,11,25);
@@ -110,7 +111,7 @@ $B ="r9";
$C ="r10";
$D ="r11";
$E ="r12";
-$F ="r13"; $F="r2" if ($SIZE_T==8);# reassigned to exempt TLS pointer
+$F =$t1; $t1 = "r0"; # stay away from "r13";
$G ="r14";
$H ="r15";
@@ -118,24 +119,23 @@ $H ="r15";
@X=("r16","r17","r18","r19","r20","r21","r22","r23",
"r24","r25","r26","r27","r28","r29","r30","r31");
-$inp="r31"; # reassigned $inp! aliases with @X[15]
+$inp="r31" if($SZ==4 || $SIZE_T==8); # reassigned $inp! aliases with @X[15]
sub ROUND_00_15 {
my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
$code.=<<___;
- $LD $T,`$i*$SZ`($Tbl)
$ROR $a0,$e,$Sigma1[0]
$ROR $a1,$e,$Sigma1[1]
and $t0,$f,$e
- andc $t1,$g,$e
- add $T,$T,$h
xor $a0,$a0,$a1
+ add $h,$h,$t1
+ andc $t1,$g,$e
$ROR $a1,$a1,`$Sigma1[2]-$Sigma1[1]`
or $t0,$t0,$t1 ; Ch(e,f,g)
- add $T,$T,@X[$i]
+ add $h,$h,@X[$i%16]
xor $a0,$a0,$a1 ; Sigma1(e)
- add $T,$T,$t0
- add $T,$T,$a0
+ add $h,$h,$t0
+ add $h,$h,$a0
$ROR $a0,$a,$Sigma0[0]
$ROR $a1,$a,$Sigma0[1]
@@ -146,9 +146,14 @@ $code.=<<___;
xor $t0,$t0,$t1
and $t1,$b,$c
xor $a0,$a0,$a1 ; Sigma0(a)
- add $d,$d,$T
+ add $d,$d,$h
xor $t0,$t0,$t1 ; Maj(a,b,c)
- add $h,$T,$a0
+___
+$code.=<<___ if ($i<15);
+ $LD $t1,`($i+1)*$SZ`($Tbl)
+___
+$code.=<<___;
+ add $h,$h,$a0
add $h,$h,$t0
___
@@ -169,10 +174,11 @@ $code.=<<___;
add @X[$i],@X[$i],@X[($i+9)%16]
xor $a0,$a0,$a1 ; sigma0(X[(i+1)&0x0f])
xor $t0,$t0,$t1 ; sigma1(X[(i+14)&0x0f])
+ $LD $t1,`$i*$SZ`($Tbl)
add @X[$i],@X[$i],$a0
add @X[$i],@X[$i],$t0
___
-&ROUND_00_15($i,$a,$b,$c,$d,$e,$f,$g,$h);
+&ROUND_00_15($i+16,$a,$b,$c,$d,$e,$f,$g,$h);
}
$code=<<___;
@@ -188,8 +194,6 @@ $func:
$PUSH $ctx,`$FRAME-$SIZE_T*22`($sp)
- $PUSH $toc,`$FRAME-$SIZE_T*20`($sp)
- $PUSH r13,`$FRAME-$SIZE_T*19`($sp)
$PUSH r14,`$FRAME-$SIZE_T*18`($sp)
$PUSH r15,`$FRAME-$SIZE_T*17`($sp)
$PUSH r16,`$FRAME-$SIZE_T*16`($sp)
@@ -209,7 +213,10 @@ $func:
$PUSH r30,`$FRAME-$SIZE_T*2`($sp)
$PUSH r31,`$FRAME-$SIZE_T*1`($sp)
$PUSH r0,`$FRAME+$LRSAVE`($sp)
+___
+if ($SZ==4 || $SIZE_T==8) {
+$code.=<<___;
$LD $A,`0*$SZ`($ctx)
mr $inp,r4 ; incarnate $inp
$LD $B,`1*$SZ`($ctx)
@@ -219,7 +226,16 @@ $func:
$LD $F,`5*$SZ`($ctx)
$LD $G,`6*$SZ`($ctx)
$LD $H,`7*$SZ`($ctx)
+___
+} else {
+ for ($i=16;$i<32;$i++) {
+ $code.=<<___;
+ lwz r$i,`$LITTLE_ENDIAN^(4*($i-16))`($ctx)
+___
+ }
+}
+$code.=<<___;
bl LPICmeup
LPICedup:
andi. r0,$inp,3
@@ -255,6 +271,9 @@ Lunaligned:
Lcross_page:
li $t1,`16*$SZ/4`
mtctr $t1
+___
+if ($SZ==4 || $SIZE_T==8) {
+$code.=<<___;
addi r20,$sp,$LOCALS ; aligned spot below the frame
Lmemcpy:
lbz r16,0($inp)
@@ -268,7 +287,26 @@ Lmemcpy:
stb r19,3(r20)
addi r20,r20,4
bdnz Lmemcpy
+___
+} else {
+$code.=<<___;
+ addi r12,$sp,$LOCALS ; aligned spot below the frame
+Lmemcpy:
+ lbz r8,0($inp)
+ lbz r9,1($inp)
+ lbz r10,2($inp)
+ lbz r11,3($inp)
+ addi $inp,$inp,4
+ stb r8,0(r12)
+ stb r9,1(r12)
+ stb r10,2(r12)
+ stb r11,3(r12)
+ addi r12,r12,4
+ bdnz Lmemcpy
+___
+}
+$code.=<<___;
$PUSH $inp,`$FRAME-$SIZE_T*26`($sp) ; save real inp
addi $t1,$sp,`$LOCALS+16*$SZ` ; fictitious end pointer
addi $inp,$sp,$LOCALS ; fictitious inp pointer
@@ -283,8 +321,6 @@ Lmemcpy:
Ldone:
$POP r0,`$FRAME+$LRSAVE`($sp)
- $POP $toc,`$FRAME-$SIZE_T*20`($sp)
- $POP r13,`$FRAME-$SIZE_T*19`($sp)
$POP r14,`$FRAME-$SIZE_T*18`($sp)
$POP r15,`$FRAME-$SIZE_T*17`($sp)
$POP r16,`$FRAME-$SIZE_T*16`($sp)
@@ -309,27 +345,48 @@ Ldone:
.long 0
.byte 0,12,4,1,0x80,18,3,0
.long 0
+___
+if ($SZ==4 || $SIZE_T==8) {
+$code.=<<___;
.align 4
Lsha2_block_private:
+ $LD $t1,0($Tbl)
___
for($i=0;$i<16;$i++) {
-$code.=<<___ if ($SZ==4);
+$code.=<<___ if ($SZ==4 && !$LITTLE_ENDIAN);
lwz @X[$i],`$i*$SZ`($inp)
___
+$code.=<<___ if ($SZ==4 && $LITTLE_ENDIAN);
+ lwz $a0,`$i*$SZ`($inp)
+ rotlwi @X[$i],$a0,8
+ rlwimi @X[$i],$a0,24,0,7
+ rlwimi @X[$i],$a0,24,16,23
+___
# 64-bit loads are split to 2x32-bit ones, as CPU can't handle
# unaligned 64-bit loads, only 32-bit ones...
-$code.=<<___ if ($SZ==8);
+$code.=<<___ if ($SZ==8 && !$LITTLE_ENDIAN);
lwz $t0,`$i*$SZ`($inp)
lwz @X[$i],`$i*$SZ+4`($inp)
insrdi @X[$i],$t0,32,0
___
+$code.=<<___ if ($SZ==8 && $LITTLE_ENDIAN);
+ lwz $a0,`$i*$SZ`($inp)
+ lwz $a1,`$i*$SZ+4`($inp)
+ rotlwi $t0,$a0,8
+ rotlwi @X[$i],$a1,8
+ rlwimi $t0,$a0,24,0,7
+ rlwimi @X[$i],$a1,24,0,7
+ rlwimi $t0,$a0,24,16,23
+ rlwimi @X[$i],$a1,24,16,23
+ insrdi @X[$i],$t0,32,0
+___
&ROUND_00_15($i,@V);
unshift(@V,pop(@V));
}
$code.=<<___;
- li $T,`$rounds/16-1`
- mtctr $T
+ li $t0,`$rounds/16-1`
+ mtctr $t0
.align 4
Lrounds:
addi $Tbl,$Tbl,`16*$SZ`
@@ -377,7 +434,282 @@ $code.=<<___;
blr
.long 0
.byte 0,12,0x14,0,0,0,0,0
+.size $func,.-$func
+___
+} else {
+########################################################################
+# SHA512 for PPC32, X vector is off-loaded to stack...
+#
+# | sha512
+# | -m32
+# ----------------------+-----------------------
+# PPC74x0,gcc-4.0.1 | +48%
+# POWER6,gcc-4.4.6 | +124%(*)
+# POWER7,gcc-4.4.6 | +79%(*)
+# e300,gcc-4.1.0 | +167%
+#
+# (*) ~1/3 of -m64 result [and ~20% better than -m32 code generated
+# by xlc-12.1]
+
+my $XOFF=$LOCALS;
+
+my @V=map("r$_",(16..31)); # A..H
+
+my ($s0,$s1,$t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("r$_",(0,5,6,8..12,14,15));
+my ($x0,$x1)=("r3","r4"); # zaps $ctx and $inp
+
+sub ROUND_00_15_ppc32 {
+my ($i, $ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo,
+ $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo)=@_;
+
+$code.=<<___;
+ lwz $t2,`$SZ*($i%16)+($LITTLE_ENDIAN^4)`($Tbl)
+ xor $a0,$flo,$glo
+ lwz $t3,`$SZ*($i%16)+($LITTLE_ENDIAN^0)`($Tbl)
+ xor $a1,$fhi,$ghi
+ addc $hlo,$hlo,$t0 ; h+=x[i]
+ stw $t0,`$XOFF+0+$SZ*($i%16)`($sp) ; save x[i]
+
+ srwi $s0,$elo,$Sigma1[0]
+ srwi $s1,$ehi,$Sigma1[0]
+ and $a0,$a0,$elo
+ adde $hhi,$hhi,$t1
+ and $a1,$a1,$ehi
+ stw $t1,`$XOFF+4+$SZ*($i%16)`($sp)
+ srwi $t0,$elo,$Sigma1[1]
+ srwi $t1,$ehi,$Sigma1[1]
+ addc $hlo,$hlo,$t2 ; h+=K512[i]
+ insrwi $s0,$ehi,$Sigma1[0],0
+ insrwi $s1,$elo,$Sigma1[0],0
+ xor $a0,$a0,$glo ; Ch(e,f,g)
+ adde $hhi,$hhi,$t3
+ xor $a1,$a1,$ghi
+ insrwi $t0,$ehi,$Sigma1[1],0
+ insrwi $t1,$elo,$Sigma1[1],0
+ addc $hlo,$hlo,$a0 ; h+=Ch(e,f,g)
+ srwi $t2,$ehi,$Sigma1[2]-32
+ srwi $t3,$elo,$Sigma1[2]-32
+ xor $s0,$s0,$t0
+ xor $s1,$s1,$t1
+ insrwi $t2,$elo,$Sigma1[2]-32,0
+ insrwi $t3,$ehi,$Sigma1[2]-32,0
+ xor $a0,$alo,$blo ; a^b, b^c in next round
+ adde $hhi,$hhi,$a1
+ xor $a1,$ahi,$bhi
+ xor $s0,$s0,$t2 ; Sigma1(e)
+ xor $s1,$s1,$t3
+
+ srwi $t0,$alo,$Sigma0[0]
+ and $a2,$a2,$a0
+ addc $hlo,$hlo,$s0 ; h+=Sigma1(e)
+ and $a3,$a3,$a1
+ srwi $t1,$ahi,$Sigma0[0]
+ srwi $s0,$ahi,$Sigma0[1]-32
+ adde $hhi,$hhi,$s1
+ srwi $s1,$alo,$Sigma0[1]-32
+ insrwi $t0,$ahi,$Sigma0[0],0
+ insrwi $t1,$alo,$Sigma0[0],0
+ xor $a2,$a2,$blo ; Maj(a,b,c)
+ addc $dlo,$dlo,$hlo ; d+=h
+ xor $a3,$a3,$bhi
+ insrwi $s0,$alo,$Sigma0[1]-32,0
+ insrwi $s1,$ahi,$Sigma0[1]-32,0
+ adde $dhi,$dhi,$hhi
+ srwi $t2,$ahi,$Sigma0[2]-32
+ srwi $t3,$alo,$Sigma0[2]-32
+ xor $s0,$s0,$t0
+ addc $hlo,$hlo,$a2 ; h+=Maj(a,b,c)
+ xor $s1,$s1,$t1
+ insrwi $t2,$alo,$Sigma0[2]-32,0
+ insrwi $t3,$ahi,$Sigma0[2]-32,0
+ adde $hhi,$hhi,$a3
+___
+$code.=<<___ if ($i>=15);
+ lwz $t0,`$XOFF+0+$SZ*(($i+2)%16)`($sp)
+ lwz $t1,`$XOFF+4+$SZ*(($i+2)%16)`($sp)
+___
+$code.=<<___ if ($i<15 && !$LITTLE_ENDIAN);
+ lwz $t1,`$SZ*($i+1)+0`($inp)
+ lwz $t0,`$SZ*($i+1)+4`($inp)
___
+$code.=<<___ if ($i<15 && $LITTLE_ENDIAN);
+ lwz $a2,`$SZ*($i+1)+0`($inp)
+ lwz $a3,`$SZ*($i+1)+4`($inp)
+ rotlwi $t1,$a2,8
+ rotlwi $t0,$a3,8
+ rlwimi $t1,$a2,24,0,7
+ rlwimi $t0,$a3,24,0,7
+ rlwimi $t1,$a2,24,16,23
+ rlwimi $t0,$a3,24,16,23
+___
+$code.=<<___;
+ xor $s0,$s0,$t2 ; Sigma0(a)
+ xor $s1,$s1,$t3
+ addc $hlo,$hlo,$s0 ; h+=Sigma0(a)
+ adde $hhi,$hhi,$s1
+___
+$code.=<<___ if ($i==15);
+ lwz $x0,`$XOFF+0+$SZ*(($i+1)%16)`($sp)
+ lwz $x1,`$XOFF+4+$SZ*(($i+1)%16)`($sp)
+___
+}
+sub ROUND_16_xx_ppc32 {
+my ($i, $ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo,
+ $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo)=@_;
+
+$code.=<<___;
+ srwi $s0,$t0,$sigma0[0]
+ srwi $s1,$t1,$sigma0[0]
+ srwi $t2,$t0,$sigma0[1]
+ srwi $t3,$t1,$sigma0[1]
+ insrwi $s0,$t1,$sigma0[0],0
+ insrwi $s1,$t0,$sigma0[0],0
+ srwi $a0,$t0,$sigma0[2]
+ insrwi $t2,$t1,$sigma0[1],0
+ insrwi $t3,$t0,$sigma0[1],0
+ insrwi $a0,$t1,$sigma0[2],0
+ xor $s0,$s0,$t2
+ lwz $t2,`$XOFF+0+$SZ*(($i+14)%16)`($sp)
+ srwi $a1,$t1,$sigma0[2]
+ xor $s1,$s1,$t3
+ lwz $t3,`$XOFF+4+$SZ*(($i+14)%16)`($sp)
+ xor $a0,$a0,$s0
+ srwi $s0,$t2,$sigma1[0]
+ xor $a1,$a1,$s1
+ srwi $s1,$t3,$sigma1[0]
+ addc $x0,$x0,$a0 ; x[i]+=sigma0(x[i+1])
+ srwi $a0,$t3,$sigma1[1]-32
+ insrwi $s0,$t3,$sigma1[0],0
+ insrwi $s1,$t2,$sigma1[0],0
+ adde $x1,$x1,$a1
+ srwi $a1,$t2,$sigma1[1]-32
+
+ insrwi $a0,$t2,$sigma1[1]-32,0
+ srwi $t2,$t2,$sigma1[2]
+ insrwi $a1,$t3,$sigma1[1]-32,0
+ insrwi $t2,$t3,$sigma1[2],0
+ xor $s0,$s0,$a0
+ lwz $a0,`$XOFF+0+$SZ*(($i+9)%16)`($sp)
+ srwi $t3,$t3,$sigma1[2]
+ xor $s1,$s1,$a1
+ lwz $a1,`$XOFF+4+$SZ*(($i+9)%16)`($sp)
+ xor $s0,$s0,$t2
+ addc $x0,$x0,$a0 ; x[i]+=x[i+9]
+ xor $s1,$s1,$t3
+ adde $x1,$x1,$a1
+ addc $x0,$x0,$s0 ; x[i]+=sigma1(x[i+14])
+ adde $x1,$x1,$s1
+___
+ ($t0,$t1,$x0,$x1) = ($x0,$x1,$t0,$t1);
+ &ROUND_00_15_ppc32(@_);
+}
+
+$code.=<<___;
+.align 4
+Lsha2_block_private:
+___
+$code.=<<___ if (!$LITTLE_ENDIAN);
+ lwz $t1,0($inp)
+ xor $a2,@V[3],@V[5] ; B^C, magic seed
+ lwz $t0,4($inp)
+ xor $a3,@V[2],@V[4]
+___
+$code.=<<___ if ($LITTLE_ENDIAN);
+ lwz $a1,0($inp)
+ xor $a2,@V[3],@V[5] ; B^C, magic seed
+ lwz $a0,4($inp)
+ xor $a3,@V[2],@V[4]
+ rotlwi $t1,$a1,8
+ rotlwi $t0,$a0,8
+ rlwimi $t1,$a1,24,0,7
+ rlwimi $t0,$a0,24,0,7
+ rlwimi $t1,$a1,24,16,23
+ rlwimi $t0,$a0,24,16,23
+___
+for($i=0;$i<16;$i++) {
+ &ROUND_00_15_ppc32($i,@V);
+ unshift(@V,pop(@V)); unshift(@V,pop(@V));
+ ($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1);
+}
+$code.=<<___;
+ li $a0,`$rounds/16-1`
+ mtctr $a0
+.align 4
+Lrounds:
+ addi $Tbl,$Tbl,`16*$SZ`
+___
+for(;$i<32;$i++) {
+ &ROUND_16_xx_ppc32($i,@V);
+ unshift(@V,pop(@V)); unshift(@V,pop(@V));
+ ($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1);
+}
+$code.=<<___;
+ bdnz- Lrounds
+
+ $POP $ctx,`$FRAME-$SIZE_T*22`($sp)
+ $POP $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer
+ $POP $num,`$FRAME-$SIZE_T*24`($sp) ; end pointer
+ subi $Tbl,$Tbl,`($rounds-16)*$SZ` ; rewind Tbl
+
+ lwz $t0,`$LITTLE_ENDIAN^0`($ctx)
+ lwz $t1,`$LITTLE_ENDIAN^4`($ctx)
+ lwz $t2,`$LITTLE_ENDIAN^8`($ctx)
+ lwz $t3,`$LITTLE_ENDIAN^12`($ctx)
+ lwz $a0,`$LITTLE_ENDIAN^16`($ctx)
+ lwz $a1,`$LITTLE_ENDIAN^20`($ctx)
+ lwz $a2,`$LITTLE_ENDIAN^24`($ctx)
+ addc @V[1],@V[1],$t1
+ lwz $a3,`$LITTLE_ENDIAN^28`($ctx)
+ adde @V[0],@V[0],$t0
+ lwz $t0,`$LITTLE_ENDIAN^32`($ctx)
+ addc @V[3],@V[3],$t3
+ lwz $t1,`$LITTLE_ENDIAN^36`($ctx)
+ adde @V[2],@V[2],$t2
+ lwz $t2,`$LITTLE_ENDIAN^40`($ctx)
+ addc @V[5],@V[5],$a1
+ lwz $t3,`$LITTLE_ENDIAN^44`($ctx)
+ adde @V[4],@V[4],$a0
+ lwz $a0,`$LITTLE_ENDIAN^48`($ctx)
+ addc @V[7],@V[7],$a3
+ lwz $a1,`$LITTLE_ENDIAN^52`($ctx)
+ adde @V[6],@V[6],$a2
+ lwz $a2,`$LITTLE_ENDIAN^56`($ctx)
+ addc @V[9],@V[9],$t1
+ lwz $a3,`$LITTLE_ENDIAN^60`($ctx)
+ adde @V[8],@V[8],$t0
+ stw @V[0],`$LITTLE_ENDIAN^0`($ctx)
+ stw @V[1],`$LITTLE_ENDIAN^4`($ctx)
+ addc @V[11],@V[11],$t3
+ stw @V[2],`$LITTLE_ENDIAN^8`($ctx)
+ stw @V[3],`$LITTLE_ENDIAN^12`($ctx)
+ adde @V[10],@V[10],$t2
+ stw @V[4],`$LITTLE_ENDIAN^16`($ctx)
+ stw @V[5],`$LITTLE_ENDIAN^20`($ctx)
+ addc @V[13],@V[13],$a1
+ stw @V[6],`$LITTLE_ENDIAN^24`($ctx)
+ stw @V[7],`$LITTLE_ENDIAN^28`($ctx)
+ adde @V[12],@V[12],$a0
+ stw @V[8],`$LITTLE_ENDIAN^32`($ctx)
+ stw @V[9],`$LITTLE_ENDIAN^36`($ctx)
+ addc @V[15],@V[15],$a3
+ stw @V[10],`$LITTLE_ENDIAN^40`($ctx)
+ stw @V[11],`$LITTLE_ENDIAN^44`($ctx)
+ adde @V[14],@V[14],$a2
+ stw @V[12],`$LITTLE_ENDIAN^48`($ctx)
+ stw @V[13],`$LITTLE_ENDIAN^52`($ctx)
+ stw @V[14],`$LITTLE_ENDIAN^56`($ctx)
+ stw @V[15],`$LITTLE_ENDIAN^60`($ctx)
+
+ addi $inp,$inp,`16*$SZ` ; advance inp
+ $PUSH $inp,`$FRAME-$SIZE_T*23`($sp)
+ $UCMP $inp,$num
+ bne Lsha2_block_private
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+.size $func,.-$func
+___
+}
# Ugly hack here, because PPC assembler syntax seem to vary too
# much from platforms to platform...
@@ -395,46 +727,46 @@ LPICmeup:
.space `64-9*4`
___
$code.=<<___ if ($SZ==8);
- .long 0x428a2f98,0xd728ae22,0x71374491,0x23ef65cd
- .long 0xb5c0fbcf,0xec4d3b2f,0xe9b5dba5,0x8189dbbc
- .long 0x3956c25b,0xf348b538,0x59f111f1,0xb605d019
- .long 0x923f82a4,0xaf194f9b,0xab1c5ed5,0xda6d8118
- .long 0xd807aa98,0xa3030242,0x12835b01,0x45706fbe
- .long 0x243185be,0x4ee4b28c,0x550c7dc3,0xd5ffb4e2
- .long 0x72be5d74,0xf27b896f,0x80deb1fe,0x3b1696b1
- .long 0x9bdc06a7,0x25c71235,0xc19bf174,0xcf692694
- .long 0xe49b69c1,0x9ef14ad2,0xefbe4786,0x384f25e3
- .long 0x0fc19dc6,0x8b8cd5b5,0x240ca1cc,0x77ac9c65
- .long 0x2de92c6f,0x592b0275,0x4a7484aa,0x6ea6e483
- .long 0x5cb0a9dc,0xbd41fbd4,0x76f988da,0x831153b5
- .long 0x983e5152,0xee66dfab,0xa831c66d,0x2db43210
- .long 0xb00327c8,0x98fb213f,0xbf597fc7,0xbeef0ee4
- .long 0xc6e00bf3,0x3da88fc2,0xd5a79147,0x930aa725
- .long 0x06ca6351,0xe003826f,0x14292967,0x0a0e6e70
- .long 0x27b70a85,0x46d22ffc,0x2e1b2138,0x5c26c926
- .long 0x4d2c6dfc,0x5ac42aed,0x53380d13,0x9d95b3df
- .long 0x650a7354,0x8baf63de,0x766a0abb,0x3c77b2a8
- .long 0x81c2c92e,0x47edaee6,0x92722c85,0x1482353b
- .long 0xa2bfe8a1,0x4cf10364,0xa81a664b,0xbc423001
- .long 0xc24b8b70,0xd0f89791,0xc76c51a3,0x0654be30
- .long 0xd192e819,0xd6ef5218,0xd6990624,0x5565a910
- .long 0xf40e3585,0x5771202a,0x106aa070,0x32bbd1b8
- .long 0x19a4c116,0xb8d2d0c8,0x1e376c08,0x5141ab53
- .long 0x2748774c,0xdf8eeb99,0x34b0bcb5,0xe19b48a8
- .long 0x391c0cb3,0xc5c95a63,0x4ed8aa4a,0xe3418acb
- .long 0x5b9cca4f,0x7763e373,0x682e6ff3,0xd6b2b8a3
- .long 0x748f82ee,0x5defb2fc,0x78a5636f,0x43172f60
- .long 0x84c87814,0xa1f0ab72,0x8cc70208,0x1a6439ec
- .long 0x90befffa,0x23631e28,0xa4506ceb,0xde82bde9
- .long 0xbef9a3f7,0xb2c67915,0xc67178f2,0xe372532b
- .long 0xca273ece,0xea26619c,0xd186b8c7,0x21c0c207
- .long 0xeada7dd6,0xcde0eb1e,0xf57d4f7f,0xee6ed178
- .long 0x06f067aa,0x72176fba,0x0a637dc5,0xa2c898a6
- .long 0x113f9804,0xbef90dae,0x1b710b35,0x131c471b
- .long 0x28db77f5,0x23047d84,0x32caab7b,0x40c72493
- .long 0x3c9ebe0a,0x15c9bebc,0x431d67c4,0x9c100d4c
- .long 0x4cc5d4be,0xcb3e42b6,0x597f299c,0xfc657e2a
- .long 0x5fcb6fab,0x3ad6faec,0x6c44198c,0x4a475817
+ .quad 0x428a2f98d728ae22,0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+ .quad 0x3956c25bf348b538,0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+ .quad 0xd807aa98a3030242,0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+ .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235,0xc19bf174cf692694
+ .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+ .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ .quad 0x983e5152ee66dfab,0xa831c66d2db43210
+ .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ .quad 0x06ca6351e003826f,0x142929670a0e6e70
+ .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+ .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+ .quad 0x81c2c92e47edaee6,0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+ .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218,0xd69906245565a910
+ .quad 0xf40e35855771202a,0x106aa07032bbd1b8
+ .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+ .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+ .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+ .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ .quad 0x90befffa23631e28,0xa4506cebde82bde9
+ .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ .quad 0xca273eceea26619c,0xd186b8c721c0c207
+ .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae,0x1b710b35131c471b
+ .quad 0x28db77f523047d84,0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+ .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
___
$code.=<<___ if ($SZ==4);
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
diff --git a/openssl/crypto/sha/asm/sha512-sparcv9.pl b/openssl/crypto/sha/asm/sha512-sparcv9.pl
index 585740789..5a9c15d1d 100644
--- a/openssl/crypto/sha/asm/sha512-sparcv9.pl
+++ b/openssl/crypto/sha/asm/sha512-sparcv9.pl
@@ -5,6 +5,8 @@
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Hardware SPARC T4 support by David S. Miller <davem@davemloft.net>.
# ====================================================================
# SHA256 performance improvement over compiler generated code varies
@@ -41,11 +43,11 @@
# loads are always slower than one 64-bit load. Once again this
# is unlike pre-T1 UltraSPARC, where, if scheduled appropriately,
# 2x32-bit loads can be as fast as 1x64-bit ones.
-
-$bits=32;
-for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
-if ($bits==64) { $bias=2047; $frame=192; }
-else { $bias=0; $frame=112; }
+#
+# SPARC T4 SHA256/512 hardware achieves 3.17/2.01 cycles per byte,
+# which is 9.3x/11.1x faster than software. Multi-process benchmark
+# saturates at 11.5x single-process result on 8-core processor, or
+# ~11/16GBps per 2.85GHz socket.
$output=shift;
open STDOUT,">$output";
@@ -170,6 +172,7 @@ $code.=<<___ if ($i==0);
ld [$inp+16],%l4
ld [$inp+20],%l5
ld [$inp+24],%l6
+ cmp $tmp31,0
ld [$inp+28],%l7
___
$code.=<<___ if ($i<15);
@@ -182,29 +185,29 @@ $code.=<<___ if ($i<15);
or @pair[1],$tmp2,$tmp2
`"ld [$inp+".eval(32+4+$i*8)."],@pair[1]" if ($i<12)`
add $h,$tmp2,$T1
- $ST $tmp2,[%sp+`$bias+$frame+$i*$SZ`]
+ $ST $tmp2,[%sp+STACK_BIAS+STACK_FRAME+`$i*$SZ`]
___
$code.=<<___ if ($i==12);
- brnz,a $tmp31,.+8
+ bnz,a,pn %icc,.+8
ld [$inp+128],%l0
___
$code.=<<___ if ($i==15);
- ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+0`],%l2
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+1)%16)*$SZ+0`],%l2
sllx @pair[1],$tmp31,$tmp2 ! Xload($i)
add $tmp31,32,$tmp0
- ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+4`],%l3
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+1)%16)*$SZ+4`],%l3
sllx @pair[0],$tmp0,$tmp1
- ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+0`],%l4
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+9)%16)*$SZ+0`],%l4
srlx @pair[2],$tmp32,@pair[1]
or $tmp1,$tmp2,$tmp2
- ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+4`],%l5
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+9)%16)*$SZ+4`],%l5
or @pair[1],$tmp2,$tmp2
- ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+0`],%l6
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+14)%16)*$SZ+0`],%l6
add $h,$tmp2,$T1
- $ST $tmp2,[%sp+`$bias+$frame+$i*$SZ`]
- ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+4`],%l7
- ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+0`],%l0
- ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+4`],%l1
+ $ST $tmp2,[%sp+STACK_BIAS+STACK_FRAME+`$i*$SZ`]
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+14)%16)*$SZ+4`],%l7
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+0)%16)*$SZ+0`],%l0
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+0)%16)*$SZ+4`],%l1
___
} if ($SZ==8);
@@ -340,9 +343,9 @@ $code.=<<___;
or %l3,$tmp0,$tmp0
srlx $tmp0,@sigma0[0],$T1
- ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+0`],%l2
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+1)%16)*$SZ+0`],%l2
sllx $tmp0,`64-@sigma0[2]`,$tmp1
- ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+4`],%l3
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+1)%16)*$SZ+4`],%l3
srlx $tmp0,@sigma0[1],$tmp0
xor $tmp1,$T1,$T1
sllx $tmp1,`@sigma0[2]-@sigma0[1]`,$tmp1
@@ -354,9 +357,9 @@ $code.=<<___;
or %l7,$tmp2,$tmp2
srlx $tmp2,@sigma1[0],$tmp1
- ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+0`],%l6
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+14)%16)*$SZ+0`],%l6
sllx $tmp2,`64-@sigma1[2]`,$tmp0
- ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+4`],%l7
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+14)%16)*$SZ+4`],%l7
srlx $tmp2,@sigma1[1],$tmp2
xor $tmp0,$tmp1,$tmp1
sllx $tmp0,`@sigma1[2]-@sigma1[1]`,$tmp0
@@ -365,27 +368,30 @@ $code.=<<___;
xor $tmp0,$tmp1,$tmp1
sllx %l4,32,$tmp0
xor $tmp2,$tmp1,$tmp1 ! sigma1(X[$i+14])
- ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+0`],%l4
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+9)%16)*$SZ+0`],%l4
or %l5,$tmp0,$tmp0
- ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+4`],%l5
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+9)%16)*$SZ+4`],%l5
sllx %l0,32,$tmp2
add $tmp1,$T1,$T1
- ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+0`],%l0
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+0)%16)*$SZ+0`],%l0
or %l1,$tmp2,$tmp2
add $tmp0,$T1,$T1 ! +=X[$i+9]
- ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+4`],%l1
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+0)%16)*$SZ+4`],%l1
add $tmp2,$T1,$T1 ! +=X[$i]
- $ST $T1,[%sp+`$bias+$frame+($i%16)*$SZ`]
+ $ST $T1,[%sp+STACK_BIAS+STACK_FRAME+`($i%16)*$SZ`]
___
&BODY_00_15(@_);
} if ($SZ==8);
-$code.=<<___ if ($bits==64);
+$code.=<<___;
+#include "sparc_arch.h"
+
+#ifdef __arch64__
.register %g2,#scratch
.register %g3,#scratch
-___
-$code.=<<___;
+#endif
+
.section ".text",#alloc,#execinstr
.align 64
@@ -457,9 +463,203 @@ ___
}
$code.=<<___;
.size K${label},.-K${label}
+
+#ifdef __PIC__
+SPARC_PIC_THUNK(%g1)
+#endif
+
.globl sha${label}_block_data_order
+.align 32
sha${label}_block_data_order:
- save %sp,`-$frame-$locals`,%sp
+ SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
+ ld [%g1+4],%g1 ! OPENSSL_sparcv9cap_P[1]
+
+ andcc %g1, CFR_SHA${label}, %g0
+ be .Lsoftware
+ nop
+___
+$code.=<<___ if ($SZ==8); # SHA512
+ ldd [%o0 + 0x00], %f0 ! load context
+ ldd [%o0 + 0x08], %f2
+ ldd [%o0 + 0x10], %f4
+ ldd [%o0 + 0x18], %f6
+ ldd [%o0 + 0x20], %f8
+ ldd [%o0 + 0x28], %f10
+ andcc %o1, 0x7, %g0
+ ldd [%o0 + 0x30], %f12
+ bne,pn %icc, .Lhwunaligned
+ ldd [%o0 + 0x38], %f14
+
+.Lhwaligned_loop:
+ ldd [%o1 + 0x00], %f16
+ ldd [%o1 + 0x08], %f18
+ ldd [%o1 + 0x10], %f20
+ ldd [%o1 + 0x18], %f22
+ ldd [%o1 + 0x20], %f24
+ ldd [%o1 + 0x28], %f26
+ ldd [%o1 + 0x30], %f28
+ ldd [%o1 + 0x38], %f30
+ ldd [%o1 + 0x40], %f32
+ ldd [%o1 + 0x48], %f34
+ ldd [%o1 + 0x50], %f36
+ ldd [%o1 + 0x58], %f38
+ ldd [%o1 + 0x60], %f40
+ ldd [%o1 + 0x68], %f42
+ ldd [%o1 + 0x70], %f44
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x78], %f46
+ add %o1, 0x80, %o1
+ prefetch [%o1 + 63], 20
+ prefetch [%o1 + 64+63], 20
+
+ .word 0x81b02860 ! SHA512
+
+ bne,pt SIZE_T_CC, .Lhwaligned_loop
+ nop
+
+.Lhwfinish:
+ std %f0, [%o0 + 0x00] ! store context
+ std %f2, [%o0 + 0x08]
+ std %f4, [%o0 + 0x10]
+ std %f6, [%o0 + 0x18]
+ std %f8, [%o0 + 0x20]
+ std %f10, [%o0 + 0x28]
+ std %f12, [%o0 + 0x30]
+ retl
+ std %f14, [%o0 + 0x38]
+
+.align 16
+.Lhwunaligned:
+ alignaddr %o1, %g0, %o1
+
+ ldd [%o1 + 0x00], %f18
+.Lhwunaligned_loop:
+ ldd [%o1 + 0x08], %f20
+ ldd [%o1 + 0x10], %f22
+ ldd [%o1 + 0x18], %f24
+ ldd [%o1 + 0x20], %f26
+ ldd [%o1 + 0x28], %f28
+ ldd [%o1 + 0x30], %f30
+ ldd [%o1 + 0x38], %f32
+ ldd [%o1 + 0x40], %f34
+ ldd [%o1 + 0x48], %f36
+ ldd [%o1 + 0x50], %f38
+ ldd [%o1 + 0x58], %f40
+ ldd [%o1 + 0x60], %f42
+ ldd [%o1 + 0x68], %f44
+ ldd [%o1 + 0x70], %f46
+ ldd [%o1 + 0x78], %f48
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x80], %f50
+ add %o1, 0x80, %o1
+ prefetch [%o1 + 63], 20
+ prefetch [%o1 + 64+63], 20
+
+ faligndata %f18, %f20, %f16
+ faligndata %f20, %f22, %f18
+ faligndata %f22, %f24, %f20
+ faligndata %f24, %f26, %f22
+ faligndata %f26, %f28, %f24
+ faligndata %f28, %f30, %f26
+ faligndata %f30, %f32, %f28
+ faligndata %f32, %f34, %f30
+ faligndata %f34, %f36, %f32
+ faligndata %f36, %f38, %f34
+ faligndata %f38, %f40, %f36
+ faligndata %f40, %f42, %f38
+ faligndata %f42, %f44, %f40
+ faligndata %f44, %f46, %f42
+ faligndata %f46, %f48, %f44
+ faligndata %f48, %f50, %f46
+
+ .word 0x81b02860 ! SHA512
+
+ bne,pt SIZE_T_CC, .Lhwunaligned_loop
+ for %f50, %f50, %f18 ! %f18=%f50
+
+ ba .Lhwfinish
+ nop
+___
+$code.=<<___ if ($SZ==4); # SHA256
+ ld [%o0 + 0x00], %f0
+ ld [%o0 + 0x04], %f1
+ ld [%o0 + 0x08], %f2
+ ld [%o0 + 0x0c], %f3
+ ld [%o0 + 0x10], %f4
+ ld [%o0 + 0x14], %f5
+ andcc %o1, 0x7, %g0
+ ld [%o0 + 0x18], %f6
+ bne,pn %icc, .Lhwunaligned
+ ld [%o0 + 0x1c], %f7
+
+.Lhwloop:
+ ldd [%o1 + 0x00], %f8
+ ldd [%o1 + 0x08], %f10
+ ldd [%o1 + 0x10], %f12
+ ldd [%o1 + 0x18], %f14
+ ldd [%o1 + 0x20], %f16
+ ldd [%o1 + 0x28], %f18
+ ldd [%o1 + 0x30], %f20
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x38], %f22
+ add %o1, 0x40, %o1
+ prefetch [%o1 + 63], 20
+
+ .word 0x81b02840 ! SHA256
+
+ bne,pt SIZE_T_CC, .Lhwloop
+ nop
+
+.Lhwfinish:
+ st %f0, [%o0 + 0x00] ! store context
+ st %f1, [%o0 + 0x04]
+ st %f2, [%o0 + 0x08]
+ st %f3, [%o0 + 0x0c]
+ st %f4, [%o0 + 0x10]
+ st %f5, [%o0 + 0x14]
+ st %f6, [%o0 + 0x18]
+ retl
+ st %f7, [%o0 + 0x1c]
+
+.align 8
+.Lhwunaligned:
+ alignaddr %o1, %g0, %o1
+
+ ldd [%o1 + 0x00], %f10
+.Lhwunaligned_loop:
+ ldd [%o1 + 0x08], %f12
+ ldd [%o1 + 0x10], %f14
+ ldd [%o1 + 0x18], %f16
+ ldd [%o1 + 0x20], %f18
+ ldd [%o1 + 0x28], %f20
+ ldd [%o1 + 0x30], %f22
+ ldd [%o1 + 0x38], %f24
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x40], %f26
+ add %o1, 0x40, %o1
+ prefetch [%o1 + 63], 20
+
+ faligndata %f10, %f12, %f8
+ faligndata %f12, %f14, %f10
+ faligndata %f14, %f16, %f12
+ faligndata %f16, %f18, %f14
+ faligndata %f18, %f20, %f16
+ faligndata %f20, %f22, %f18
+ faligndata %f22, %f24, %f20
+ faligndata %f24, %f26, %f22
+
+ .word 0x81b02840 ! SHA256
+
+ bne,pt SIZE_T_CC, .Lhwunaligned_loop
+ for %f26, %f26, %f10 ! %f10=%f26
+
+ ba .Lhwfinish
+ nop
+___
+$code.=<<___;
+.align 16
+.Lsoftware:
+ save %sp,-STACK_FRAME-$locals,%sp
and $inp,`$align-1`,$tmp31
sllx $len,`log(16*$SZ)/log(2)`,$len
andn $inp,`$align-1`,$inp
@@ -578,7 +778,7 @@ ___
$code.=<<___;
add $inp,`16*$SZ`,$inp ! advance inp
cmp $inp,$len
- bne `$bits==64?"%xcc":"%icc"`,.Lloop
+ bne SIZE_T_CC,.Lloop
sub $Ktbl,`($rounds-16)*$SZ`,$Ktbl ! rewind Ktbl
ret
@@ -589,6 +789,62 @@ $code.=<<___;
.align 4
___
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
+# Purpose of these subroutines is to explicitly encode VIS instructions,
+# so that one can compile the module without having to specify VIS
+# extentions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
+# Idea is to reserve for option to produce "universal" binary and let
+# programmer detect if current CPU is VIS capable at run-time.
+sub unvis {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my $ref,$opf;
+my %visopf = ( "faligndata" => 0x048,
+ "for" => 0x07c );
+
+ $ref = "$mnemonic\t$rs1,$rs2,$rd";
+
+ if ($opf=$visopf{$mnemonic}) {
+ foreach ($rs1,$rs2,$rd) {
+ return $ref if (!/%f([0-9]{1,2})/);
+ $_=$1;
+ if ($1>=32) {
+ return $ref if ($1&1);
+ # re-encode for upper double register addressing
+ $_=($1|$1>>5)&31;
+ }
+ }
+
+ return sprintf ".word\t0x%08x !%s",
+ 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
+ $ref;
+ } else {
+ return $ref;
+ }
+}
+sub unalignaddr {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
+my $ref="$mnemonic\t$rs1,$rs2,$rd";
+
+ foreach ($rs1,$rs2,$rd) {
+ if (/%([goli])([0-7])/) { $_=$bias{$1}+$2; }
+ else { return $ref; }
+ }
+ return sprintf ".word\t0x%08x !%s",
+ 0x81b00300|$rd<<25|$rs1<<14|$rs2,
+ $ref;
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ s/\b(f[^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
+ &unvis($1,$2,$3,$4)
+ /ge;
+ s/\b(alignaddr)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
+ &unalignaddr($1,$2,$3,$4)
+ /ge;
+
+ print $_,"\n";
+}
+
close STDOUT;
diff --git a/openssl/crypto/sha/asm/sha512-x86_64.pl b/openssl/crypto/sha/asm/sha512-x86_64.pl
index 8d5167855..b7b44b441 100644
--- a/openssl/crypto/sha/asm/sha512-x86_64.pl
+++ b/openssl/crypto/sha/asm/sha512-x86_64.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
#
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. Rights for redistribution and usage in source and binary
# forms are granted according to the OpenSSL license.
# ====================================================================
@@ -39,6 +39,64 @@
# contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
# sha256_block:-( This is presumably because 64-bit shifts/rotates
# apparently are not atomic instructions, but implemented in microcode.
+#
+# May 2012.
+#
+# Optimization including one of Pavel Semjanov's ideas, alternative
+# Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and
+# unfortunately -2% SHA512 on P4 [which nobody should care about
+# that much].
+#
+# June 2012.
+#
+# Add SIMD code paths, see below for improvement coefficients. SSSE3
+# code path was not attempted for SHA512, because improvement is not
+# estimated to be high enough, noticeably less than 9%, to justify
+# the effort, not on pre-AVX processors. [Obviously with exclusion
+# for VIA Nano, but it has SHA512 instruction that is faster and
+# should be used instead.] For reference, corresponding estimated
+# upper limit for improvement for SSSE3 SHA256 is 28%. The fact that
+# higher coefficients are observed on VIA Nano and Bulldozer has more
+# to do with specifics of their architecture [which is topic for
+# separate discussion].
+#
+# November 2012.
+#
+# Add AVX2 code path. Two consecutive input blocks are loaded to
+# 256-bit %ymm registers, with data from first block to least
+# significant 128-bit halves and data from second to most significant.
+# The data is then processed with same SIMD instruction sequence as
+# for AVX, but with %ymm as operands. Side effect is increased stack
+# frame, 448 additional bytes in SHA256 and 1152 in SHA512, and 1.2KB
+# code size increase.
+#
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+
+######################################################################
+# Current performance in cycles per processed byte (less is better):
+#
+# SHA256 SSSE3 AVX/XOP(*) SHA512 AVX/XOP(*)
+#
+# AMD K8 14.9 - - 9.57 -
+# P4 17.3 - - 30.8 -
+# Core 2 15.6 13.8(+13%) - 9.97 -
+# Westmere 14.8 12.3(+19%) - 9.58 -
+# Sandy Bridge 17.4 14.2(+23%) 11.6(+50%(**)) 11.2 8.10(+38%(**))
+# Ivy Bridge 12.6 10.5(+20%) 10.3(+22%) 8.17 7.22(+13%)
+# Haswell 12.2 9.28(+31%) 7.80(+56%) 7.66 5.40(+42%)
+# Bulldozer 21.1 13.6(+54%) 13.6(+54%(***)) 13.5 8.58(+57%)
+# VIA Nano 23.0 16.5(+39%) - 14.7 -
+# Atom 23.0 18.9(+22%) - 14.7 -
+# Silvermont 27.4 20.6(+33%) - 17.5 -
+#
+# (*) whichever best applicable;
+# (**) switch from ror to shrd stands for fair share of improvement;
+# (***) execution time is fully determined by remaining integer-only
+# part, body_00_15; reducing the amount of SIMD instructions
+# below certain limit makes no difference/sense; to conserve
+# space SHA256 XOP code path is therefore omitted;
$flavour = shift;
$output = shift;
@@ -51,6 +109,28 @@ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $avx = ($1>=10) + ($1>=11);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+ $avx = ($2>=3.0) + ($2>3.0);
+}
+
+$shaext=1; ### set to zero if compiling for 1.0.1
+$avx=1 if (!$shaext && $avx);
+
open OUT,"| \"$^X\" $xlate $flavour $output";
*STDOUT=*OUT;
@@ -60,7 +140,7 @@ if ($output =~ /512/) {
$SZ=8;
@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
"%r8", "%r9", "%r10","%r11");
- ($T1,$a0,$a1,$a2)=("%r12","%r13","%r14","%r15");
+ ($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi");
@Sigma0=(28,34,39);
@Sigma1=(14,18,41);
@sigma0=(1, 8, 7);
@@ -72,7 +152,7 @@ if ($output =~ /512/) {
$SZ=4;
@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
"%r8d","%r9d","%r10d","%r11d");
- ($T1,$a0,$a1,$a2)=("%r12d","%r13d","%r14d","%r15d");
+ ($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi");
@Sigma0=( 2,13,22);
@Sigma1=( 6,11,25);
@sigma0=( 7,18, 3);
@@ -80,8 +160,7 @@ if ($output =~ /512/) {
$rounds=64;
}
-$ctx="%rdi"; # 1st arg
-$round="%rdi"; # zaps $ctx
+$ctx="%rdi"; # 1st arg, zapped by $a3
$inp="%rsi"; # 2nd arg
$Tbl="%rbp";
@@ -94,47 +173,51 @@ $framesz="16*$SZ+4*8";
sub ROUND_00_15()
{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+ my $STRIDE=$SZ;
+ $STRIDE += 16 if ($i%(16/$SZ)==(16/$SZ-1));
$code.=<<___;
ror \$`$Sigma1[2]-$Sigma1[1]`,$a0
mov $f,$a2
- mov $T1,`$SZ*($i&0xf)`(%rsp)
- ror \$`$Sigma0[2]-$Sigma0[1]`,$a1
xor $e,$a0
+ ror \$`$Sigma0[2]-$Sigma0[1]`,$a1
xor $g,$a2 # f^g
- ror \$`$Sigma1[1]-$Sigma1[0]`,$a0
- add $h,$T1 # T1+=h
+ mov $T1,`$SZ*($i&0xf)`(%rsp)
xor $a,$a1
-
- add ($Tbl,$round,$SZ),$T1 # T1+=K[round]
and $e,$a2 # (f^g)&e
- mov $b,$h
+
+ ror \$`$Sigma1[1]-$Sigma1[0]`,$a0
+ add $h,$T1 # T1+=h
+ xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g
ror \$`$Sigma0[1]-$Sigma0[0]`,$a1
xor $e,$a0
- xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g
+ add $a2,$T1 # T1+=Ch(e,f,g)
- xor $c,$h # b^c
+ mov $a,$a2
+ add ($Tbl),$T1 # T1+=K[round]
xor $a,$a1
- add $a2,$T1 # T1+=Ch(e,f,g)
- mov $b,$a2
+ xor $b,$a2 # a^b, b^c in next round
ror \$$Sigma1[0],$a0 # Sigma1(e)
- and $a,$h # h=(b^c)&a
- and $c,$a2 # b&c
+ mov $b,$h
+ and $a2,$a3
ror \$$Sigma0[0],$a1 # Sigma0(a)
add $a0,$T1 # T1+=Sigma1(e)
- add $a2,$h # h+=b&c (completes +=Maj(a,b,c)
+ xor $a3,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
add $T1,$d # d+=T1
add $T1,$h # h+=T1
- lea 1($round),$round # round++
- add $a1,$h # h+=Sigma0(a)
+ lea $STRIDE($Tbl),$Tbl # round++
+___
+$code.=<<___ if ($i<15);
+ add $a1,$h # h+=Sigma0(a)
___
+ ($a2,$a3) = ($a3,$a2);
}
sub ROUND_16_XX()
@@ -142,29 +225,28 @@ sub ROUND_16_XX()
$code.=<<___;
mov `$SZ*(($i+1)&0xf)`(%rsp),$a0
- mov `$SZ*(($i+14)&0xf)`(%rsp),$a1
- mov $a0,$T1
- mov $a1,$a2
-
- ror \$`$sigma0[1]-$sigma0[0]`,$T1
- xor $a0,$T1
- shr \$$sigma0[2],$a0
-
- ror \$$sigma0[0],$T1
- xor $T1,$a0 # sigma0(X[(i+1)&0xf])
- mov `$SZ*(($i+9)&0xf)`(%rsp),$T1
+ mov `$SZ*(($i+14)&0xf)`(%rsp),$a2
+ mov $a0,$T1
+ ror \$`$sigma0[1]-$sigma0[0]`,$a0
+ add $a1,$a # modulo-scheduled h+=Sigma0(a)
+ mov $a2,$a1
ror \$`$sigma1[1]-$sigma1[0]`,$a2
+
+ xor $T1,$a0
+ shr \$$sigma0[2],$T1
+ ror \$$sigma0[0],$a0
xor $a1,$a2
shr \$$sigma1[2],$a1
ror \$$sigma1[0],$a2
- add $a0,$T1
- xor $a2,$a1 # sigma1(X[(i+14)&0xf])
+ xor $a0,$T1 # sigma0(X[(i+1)&0xf])
+ xor $a1,$a2 # sigma1(X[(i+14)&0xf])
+ add `$SZ*(($i+9)&0xf)`(%rsp),$T1
add `$SZ*($i&0xf)`(%rsp),$T1
mov $e,$a0
- add $a1,$T1
+ add $a2,$T1
mov $a,$a1
___
&ROUND_00_15(@_);
@@ -173,10 +255,43 @@ ___
$code=<<___;
.text
+.extern OPENSSL_ia32cap_P
.globl $func
-.type $func,\@function,4
+.type $func,\@function,3
.align 16
$func:
+___
+$code.=<<___ if ($SZ==4 || $avx);
+ lea OPENSSL_ia32cap_P(%rip),%r11
+ mov 0(%r11),%r9d
+ mov 4(%r11),%r10d
+ mov 8(%r11),%r11d
+___
+$code.=<<___ if ($SZ==4 && $shaext);
+ test \$`1<<29`,%r11d # check for SHA
+ jnz _shaext_shortcut
+___
+$code.=<<___ if ($avx && $SZ==8);
+ test \$`1<<11`,%r10d # check for XOP
+ jnz .Lxop_shortcut
+___
+$code.=<<___ if ($avx>1);
+ and \$`1<<8|1<<5|1<<3`,%r11d # check for BMI2+AVX2+BMI1
+ cmp \$`1<<8|1<<5|1<<3`,%r11d
+ je .Lavx2_shortcut
+___
+$code.=<<___ if ($avx);
+ and \$`1<<30`,%r9d # mask "Intel CPU" bit
+ and \$`1<<28|1<<9`,%r10d # mask AVX and SSSE3 bits
+ or %r9d,%r10d
+ cmp \$`1<<28|1<<9|1<<30`,%r10d
+ je .Lavx_shortcut
+___
+$code.=<<___ if ($SZ==4);
+ test \$`1<<9`,%r10d
+ jnz .Lssse3_shortcut
+___
+$code.=<<___;
push %rbx
push %rbp
push %r12
@@ -194,8 +309,6 @@ $func:
mov %r11,$_rsp # save copy of %rsp
.Lprologue:
- lea $TABLE(%rip),$Tbl
-
mov $SZ*0($ctx),$A
mov $SZ*1($ctx),$B
mov $SZ*2($ctx),$C
@@ -208,7 +321,9 @@ $func:
.align 16
.Lloop:
- xor $round,$round
+ mov $B,$a3
+ lea $TABLE(%rip),$Tbl
+ xor $C,$a3 # magic
___
for($i=0;$i<16;$i++) {
$code.=" mov $SZ*$i($inp),$T1\n";
@@ -229,10 +344,11 @@ ___
}
$code.=<<___;
- cmp \$$rounds,$round
- jb .Lrounds_16_xx
+ cmpb \$0,`$SZ-1`($Tbl)
+ jnz .Lrounds_16_xx
mov $_ctx,$ctx
+ add $a1,$A # modulo-scheduled h+=Sigma0(a)
lea 16*$SZ($inp),$inp
add $SZ*0($ctx),$A
@@ -275,21 +391,45 @@ $code.=<<___;
.type $TABLE,\@object
$TABLE:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+ .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+ .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+ .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
+ .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
+ .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
+ .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
+ .asciz "SHA256 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
___
} else {
$code.=<<___;
@@ -297,47 +437,1714 @@ $code.=<<___;
.type $TABLE,\@object
$TABLE:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
+ .quad 0x428a2f98d728ae22,0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
+ .quad 0x3956c25bf348b538,0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
+ .quad 0xd807aa98a3030242,0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ .quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ .quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+ .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
+ .quad 0x81c2c92e47edaee6,0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+ .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
+ .quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+ .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ .quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ .quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
+ .quad 0x28db77f523047d84,0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+ .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+
+ .quad 0x0001020304050607,0x08090a0b0c0d0e0f
+ .quad 0x0001020304050607,0x08090a0b0c0d0e0f
+ .asciz "SHA512 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+}
+
+######################################################################
+# SIMD code paths
+#
+if ($SZ==4 && $shaext) {{{
+######################################################################
+# Intel SHA Extensions implementation of SHA256 update function.
+#
+my ($ctx,$inp,$num,$Tbl)=("%rdi","%rsi","%rdx","%rcx");
+
+my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%xmm$_",(0..2,7..10));
+my @MSG=map("%xmm$_",(3..6));
+
+$code.=<<___;
+.type sha256_block_data_order_shaext,\@function,3
+.align 64
+sha256_block_data_order_shaext:
+_shaext_shortcut:
+___
+$code.=<<___ if ($win64);
+ lea `-8-5*16`(%rsp),%rsp
+ movaps %xmm6,-8-5*16(%rax)
+ movaps %xmm7,-8-4*16(%rax)
+ movaps %xmm8,-8-3*16(%rax)
+ movaps %xmm9,-8-2*16(%rax)
+ movaps %xmm10,-8-1*16(%rax)
+.Lprologue_shaext:
+___
+$code.=<<___;
+ lea K256+0x80(%rip),$Tbl
+ movdqu ($ctx),$ABEF # DCBA
+ movdqu 16($ctx),$CDGH # HGFE
+ movdqa 0x200-0x80($Tbl),$TMP # byte swap mask
+
+ pshufd \$0x1b,$ABEF,$Wi # ABCD
+ pshufd \$0xb1,$ABEF,$ABEF # CDAB
+ pshufd \$0x1b,$CDGH,$CDGH # EFGH
+ movdqa $TMP,$BSWAP # offload
+ palignr \$8,$CDGH,$ABEF # ABEF
+ punpcklqdq $Wi,$CDGH # CDGH
+ jmp .Loop_shaext
+
+.align 16
+.Loop_shaext:
+ movdqu ($inp),@MSG[0]
+ movdqu 0x10($inp),@MSG[1]
+ movdqu 0x20($inp),@MSG[2]
+ pshufb $TMP,@MSG[0]
+ movdqu 0x30($inp),@MSG[3]
+
+ movdqa 0*32-0x80($Tbl),$Wi
+ paddd @MSG[0],$Wi
+ pshufb $TMP,@MSG[1]
+ movdqa $CDGH,$CDGH_SAVE # offload
+ sha256rnds2 $ABEF,$CDGH # 0-3
+ pshufd \$0x0e,$Wi,$Wi
+ nop
+ movdqa $ABEF,$ABEF_SAVE # offload
+ sha256rnds2 $CDGH,$ABEF
+
+ movdqa 1*32-0x80($Tbl),$Wi
+ paddd @MSG[1],$Wi
+ pshufb $TMP,@MSG[2]
+ sha256rnds2 $ABEF,$CDGH # 4-7
+ pshufd \$0x0e,$Wi,$Wi
+ lea 0x40($inp),$inp
+ sha256msg1 @MSG[1],@MSG[0]
+ sha256rnds2 $CDGH,$ABEF
+
+ movdqa 2*32-0x80($Tbl),$Wi
+ paddd @MSG[2],$Wi
+ pshufb $TMP,@MSG[3]
+ sha256rnds2 $ABEF,$CDGH # 8-11
+ pshufd \$0x0e,$Wi,$Wi
+ movdqa @MSG[3],$TMP
+ palignr \$4,@MSG[2],$TMP
+ nop
+ paddd $TMP,@MSG[0]
+ sha256msg1 @MSG[2],@MSG[1]
+ sha256rnds2 $CDGH,$ABEF
+
+ movdqa 3*32-0x80($Tbl),$Wi
+ paddd @MSG[3],$Wi
+ sha256msg2 @MSG[3],@MSG[0]
+ sha256rnds2 $ABEF,$CDGH # 12-15
+ pshufd \$0x0e,$Wi,$Wi
+ movdqa @MSG[0],$TMP
+ palignr \$4,@MSG[3],$TMP
+ nop
+ paddd $TMP,@MSG[1]
+ sha256msg1 @MSG[3],@MSG[2]
+ sha256rnds2 $CDGH,$ABEF
+___
+for($i=4;$i<16-3;$i++) {
+$code.=<<___;
+ movdqa $i*32-0x80($Tbl),$Wi
+ paddd @MSG[0],$Wi
+ sha256msg2 @MSG[0],@MSG[1]
+ sha256rnds2 $ABEF,$CDGH # 16-19...
+ pshufd \$0x0e,$Wi,$Wi
+ movdqa @MSG[1],$TMP
+ palignr \$4,@MSG[0],$TMP
+ nop
+ paddd $TMP,@MSG[2]
+ sha256msg1 @MSG[0],@MSG[3]
+ sha256rnds2 $CDGH,$ABEF
+___
+ push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ movdqa 13*32-0x80($Tbl),$Wi
+ paddd @MSG[0],$Wi
+ sha256msg2 @MSG[0],@MSG[1]
+ sha256rnds2 $ABEF,$CDGH # 52-55
+ pshufd \$0x0e,$Wi,$Wi
+ movdqa @MSG[1],$TMP
+ palignr \$4,@MSG[0],$TMP
+ sha256rnds2 $CDGH,$ABEF
+ paddd $TMP,@MSG[2]
+
+ movdqa 14*32-0x80($Tbl),$Wi
+ paddd @MSG[1],$Wi
+ sha256rnds2 $ABEF,$CDGH # 56-59
+ pshufd \$0x0e,$Wi,$Wi
+ sha256msg2 @MSG[1],@MSG[2]
+ movdqa $BSWAP,$TMP
+ sha256rnds2 $CDGH,$ABEF
+
+ movdqa 15*32-0x80($Tbl),$Wi
+ paddd @MSG[2],$Wi
+ nop
+ sha256rnds2 $ABEF,$CDGH # 60-63
+ pshufd \$0x0e,$Wi,$Wi
+ dec $num
+ nop
+ sha256rnds2 $CDGH,$ABEF
+
+ paddd $CDGH_SAVE,$CDGH
+ paddd $ABEF_SAVE,$ABEF
+ jnz .Loop_shaext
+
+ pshufd \$0xb1,$CDGH,$CDGH # DCHG
+ pshufd \$0x1b,$ABEF,$TMP # FEBA
+ pshufd \$0xb1,$ABEF,$ABEF # BAFE
+ punpckhqdq $CDGH,$ABEF # DCBA
+ palignr \$8,$TMP,$CDGH # HGFE
+
+ movdqu $ABEF,($ctx)
+ movdqu $CDGH,16($ctx)
+___
+$code.=<<___ if ($win64);
+ movaps -8-5*16(%rax),%xmm6
+ movaps -8-4*16(%rax),%xmm7
+ movaps -8-3*16(%rax),%xmm8
+ movaps -8-2*16(%rax),%xmm9
+ movaps -8-1*16(%rax),%xmm10
+ mov %rax,%rsp
+.Lepilogue_shaext:
+___
+$code.=<<___;
+ ret
+.size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext
+___
+}}}
+{{{
+
+my $a4=$T1;
+my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+ my $arg = pop;
+ $arg = "\$$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+sub body_00_15 () {
+ (
+ '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
+
+ '&ror ($a0,$Sigma1[2]-$Sigma1[1])',
+ '&mov ($a,$a1)',
+ '&mov ($a4,$f)',
+
+ '&ror ($a1,$Sigma0[2]-$Sigma0[1])',
+ '&xor ($a0,$e)',
+ '&xor ($a4,$g)', # f^g
+
+ '&ror ($a0,$Sigma1[1]-$Sigma1[0])',
+ '&xor ($a1,$a)',
+ '&and ($a4,$e)', # (f^g)&e
+
+ '&xor ($a0,$e)',
+ '&add ($h,$SZ*($i&15)."(%rsp)")', # h+=X[i]+K[i]
+ '&mov ($a2,$a)',
+
+ '&xor ($a4,$g)', # Ch(e,f,g)=((f^g)&e)^g
+ '&ror ($a1,$Sigma0[1]-$Sigma0[0])',
+ '&xor ($a2,$b)', # a^b, b^c in next round
+
+ '&add ($h,$a4)', # h+=Ch(e,f,g)
+ '&ror ($a0,$Sigma1[0])', # Sigma1(e)
+ '&and ($a3,$a2)', # (b^c)&(a^b)
+
+ '&xor ($a1,$a)',
+ '&add ($h,$a0)', # h+=Sigma1(e)
+ '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
+
+ '&ror ($a1,$Sigma0[0])', # Sigma0(a)
+ '&add ($d,$h)', # d+=h
+ '&add ($h,$a3)', # h+=Maj(a,b,c)
+
+ '&mov ($a0,$d)',
+ '&add ($a1,$h);'. # h+=Sigma0(a)
+ '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
+ );
+}
+
+######################################################################
+# SSSE3 code path
+#
+if ($SZ==4) { # SHA256 only
+my @X = map("%xmm$_",(0..3));
+my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
+
+$code.=<<___;
+.type ${func}_ssse3,\@function,3
+.align 64
+${func}_ssse3:
+.Lssse3_shortcut:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ mov %rsp,%r11 # copy %rsp
+ shl \$4,%rdx # num*16
+ sub \$`$framesz+$win64*16*4`,%rsp
+ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
+ and \$-64,%rsp # align stack frame
+ mov $ctx,$_ctx # save ctx, 1st arg
+ mov $inp,$_inp # save inp, 2nd arh
+ mov %rdx,$_end # save end pointer, "3rd" arg
+ mov %r11,$_rsp # save copy of %rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,16*$SZ+32(%rsp)
+ movaps %xmm7,16*$SZ+48(%rsp)
+ movaps %xmm8,16*$SZ+64(%rsp)
+ movaps %xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___;
+.Lprologue_ssse3:
+
+ mov $SZ*0($ctx),$A
+ mov $SZ*1($ctx),$B
+ mov $SZ*2($ctx),$C
+ mov $SZ*3($ctx),$D
+ mov $SZ*4($ctx),$E
+ mov $SZ*5($ctx),$F
+ mov $SZ*6($ctx),$G
+ mov $SZ*7($ctx),$H
+___
+
+$code.=<<___;
+ #movdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+ #movdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+ jmp .Lloop_ssse3
+.align 16
+.Lloop_ssse3:
+ movdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ movdqu 0x00($inp),@X[0]
+ movdqu 0x10($inp),@X[1]
+ movdqu 0x20($inp),@X[2]
+ pshufb $t3,@X[0]
+ movdqu 0x30($inp),@X[3]
+ lea $TABLE(%rip),$Tbl
+ pshufb $t3,@X[1]
+ movdqa 0x00($Tbl),$t0
+ movdqa 0x20($Tbl),$t1
+ pshufb $t3,@X[2]
+ paddd @X[0],$t0
+ movdqa 0x40($Tbl),$t2
+ pshufb $t3,@X[3]
+ movdqa 0x60($Tbl),$t3
+ paddd @X[1],$t1
+ paddd @X[2],$t2
+ paddd @X[3],$t3
+ movdqa $t0,0x00(%rsp)
+ mov $A,$a1
+ movdqa $t1,0x10(%rsp)
+ mov $B,$a3
+ movdqa $t2,0x20(%rsp)
+ xor $C,$a3 # magic
+ movdqa $t3,0x30(%rsp)
+ mov $E,$a0
+ jmp .Lssse3_00_47
+
+.align 16
+.Lssse3_00_47:
+ sub \$`-16*2*$SZ`,$Tbl # size optimization
+___
+sub Xupdate_256_SSSE3 () {
+ (
+ '&movdqa ($t0,@X[1]);',
+ '&movdqa ($t3,@X[3])',
+ '&palignr ($t0,@X[0],$SZ)', # X[1..4]
+ '&palignr ($t3,@X[2],$SZ);', # X[9..12]
+ '&movdqa ($t1,$t0)',
+ '&movdqa ($t2,$t0);',
+ '&psrld ($t0,$sigma0[2])',
+ '&paddd (@X[0],$t3);', # X[0..3] += X[9..12]
+ '&psrld ($t2,$sigma0[0])',
+ '&pshufd ($t3,@X[3],0b11111010)',# X[14..15]
+ '&pslld ($t1,8*$SZ-$sigma0[1]);'.
+ '&pxor ($t0,$t2)',
+ '&psrld ($t2,$sigma0[1]-$sigma0[0]);'.
+ '&pxor ($t0,$t1)',
+ '&pslld ($t1,$sigma0[1]-$sigma0[0]);'.
+ '&pxor ($t0,$t2);',
+ '&movdqa ($t2,$t3)',
+ '&pxor ($t0,$t1);', # sigma0(X[1..4])
+ '&psrld ($t3,$sigma1[2])',
+ '&paddd (@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
+ '&psrlq ($t2,$sigma1[0])',
+ '&pxor ($t3,$t2);',
+ '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
+ '&pxor ($t3,$t2)',
+ '&pshufb ($t3,$t4)', # sigma1(X[14..15])
+ '&paddd (@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
+ '&pshufd ($t3,@X[0],0b01010000)',# X[16..17]
+ '&movdqa ($t2,$t3);',
+ '&psrld ($t3,$sigma1[2])',
+ '&psrlq ($t2,$sigma1[0])',
+ '&pxor ($t3,$t2);',
+ '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
+ '&pxor ($t3,$t2);',
+ '&movdqa ($t2,16*2*$j."($Tbl)")',
+ '&pshufb ($t3,$t5)',
+ '&paddd (@X[0],$t3)' # X[2..3] += sigma1(X[16..17])
+ );
+}
+
+sub SSSE3_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
+
+ if (0) {
+ foreach (Xupdate_256_SSSE3()) { # 36 instructions
+ eval;
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ } else { # squeeze extra 4% on Westmere and 19% on Atom
+ eval(shift(@insns)); #@
+ &movdqa ($t0,@X[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa ($t3,@X[3]);
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ &palignr ($t0,@X[0],$SZ); # X[1..4]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &palignr ($t3,@X[2],$SZ); # X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ &movdqa ($t1,$t0);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa ($t2,$t0);
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ &psrld ($t0,$sigma0[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &paddd (@X[0],$t3); # X[0..3] += X[9..12]
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ &psrld ($t2,$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pshufd ($t3,@X[3],0b11111010); # X[4..15]
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ &pslld ($t1,8*$SZ-$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pxor ($t0,$t2);
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ &psrld ($t2,$sigma0[1]-$sigma0[0]);
+ eval(shift(@insns));
+ &pxor ($t0,$t1);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pslld ($t1,$sigma0[1]-$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pxor ($t0,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ &movdqa ($t2,$t3);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pxor ($t0,$t1); # sigma0(X[1..4])
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrld ($t3,$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ &psrlq ($t2,$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pxor ($t3,$t2);
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ &psrlq ($t2,$sigma1[1]-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pxor ($t3,$t2);
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ eval(shift(@insns));
+ #&pshufb ($t3,$t4); # sigma1(X[14..15])
+ &pshufd ($t3,$t3,0b10000000);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrldq ($t3,8);
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pshufd ($t3,@X[0],0b01010000); # X[16..17]
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ &movdqa ($t2,$t3);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrld ($t3,$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ &psrlq ($t2,$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pxor ($t3,$t2);
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ &psrlq ($t2,$sigma1[1]-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pxor ($t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); #@
+ #&pshufb ($t3,$t5);
+ &pshufd ($t3,$t3,0b00001000);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa ($t2,16*2*$j."($Tbl)");
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ &pslldq ($t3,8);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
+ eval(shift(@insns)); #@
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ &paddd ($t2,@X[0]);
+ foreach (@insns) { eval; } # remaining instructions
+ &movdqa (16*$j."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &SSSE3_256_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
+ &jne (".Lssse3_00_47");
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+$code.=<<___;
+ mov $_ctx,$ctx
+ mov $a1,$A
+
+ add $SZ*0($ctx),$A
+ lea 16*$SZ($inp),$inp
+ add $SZ*1($ctx),$B
+ add $SZ*2($ctx),$C
+ add $SZ*3($ctx),$D
+ add $SZ*4($ctx),$E
+ add $SZ*5($ctx),$F
+ add $SZ*6($ctx),$G
+ add $SZ*7($ctx),$H
+
+ cmp $_end,$inp
+
+ mov $A,$SZ*0($ctx)
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+ jb .Lloop_ssse3
+
+ mov $_rsp,%rsi
+___
+$code.=<<___ if ($win64);
+ movaps 16*$SZ+32(%rsp),%xmm6
+ movaps 16*$SZ+48(%rsp),%xmm7
+ movaps 16*$SZ+64(%rsp),%xmm8
+ movaps 16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_ssse3:
+ ret
+.size ${func}_ssse3,.-${func}_ssse3
+___
+}
+
+if ($avx) {{
+######################################################################
+# XOP code path
+#
+if ($SZ==8) { # SHA512 only
+$code.=<<___;
+.type ${func}_xop,\@function,3
+.align 64
+${func}_xop:
+.Lxop_shortcut:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ mov %rsp,%r11 # copy %rsp
+ shl \$4,%rdx # num*16
+ sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
+ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
+ and \$-64,%rsp # align stack frame
+ mov $ctx,$_ctx # save ctx, 1st arg
+ mov $inp,$_inp # save inp, 2nd arh
+ mov %rdx,$_end # save end pointer, "3rd" arg
+ mov %r11,$_rsp # save copy of %rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,16*$SZ+32(%rsp)
+ movaps %xmm7,16*$SZ+48(%rsp)
+ movaps %xmm8,16*$SZ+64(%rsp)
+ movaps %xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps %xmm10,16*$SZ+96(%rsp)
+ movaps %xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_xop:
+
+ vzeroupper
+ mov $SZ*0($ctx),$A
+ mov $SZ*1($ctx),$B
+ mov $SZ*2($ctx),$C
+ mov $SZ*3($ctx),$D
+ mov $SZ*4($ctx),$E
+ mov $SZ*5($ctx),$F
+ mov $SZ*6($ctx),$G
+ mov $SZ*7($ctx),$H
+ jmp .Lloop_xop
+___
+ if ($SZ==4) { # SHA256
+ my @X = map("%xmm$_",(0..3));
+ my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
+
+$code.=<<___;
+.align 16
+.Lloop_xop:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu 0x00($inp),@X[0]
+ vmovdqu 0x10($inp),@X[1]
+ vmovdqu 0x20($inp),@X[2]
+ vmovdqu 0x30($inp),@X[3]
+ vpshufb $t3,@X[0],@X[0]
+ lea $TABLE(%rip),$Tbl
+ vpshufb $t3,@X[1],@X[1]
+ vpshufb $t3,@X[2],@X[2]
+ vpaddd 0x00($Tbl),@X[0],$t0
+ vpshufb $t3,@X[3],@X[3]
+ vpaddd 0x20($Tbl),@X[1],$t1
+ vpaddd 0x40($Tbl),@X[2],$t2
+ vpaddd 0x60($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ mov $A,$a1
+ vmovdqa $t1,0x10(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x20(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x30(%rsp)
+ mov $E,$a0
+ jmp .Lxop_00_47
+
+.align 16
+.Lxop_00_47:
+ sub \$`-16*2*$SZ`,$Tbl # size optimization
+___
+sub XOP_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
+
+ &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t1,$t0,8*$SZ-$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrld ($t0,$t0,$sigma0[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t0,$t0,$t1);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t3,@X[3],8*$SZ-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t0,$t0,$t2); # sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrld ($t2,@X[3],$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrldq ($t3,$t3,8);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t3,@X[0],8*$SZ-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrld ($t2,@X[0],$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t1); # sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpslldq ($t3,$t3,8); # 22 instructions
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa (16*$j."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &XOP_256_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
+ &jne (".Lxop_00_47");
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+
+ } else { # SHA512
+ my @X = map("%xmm$_",(0..7));
+ my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
+
+$code.=<<___;
+.align 16
+.Lloop_xop:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu 0x00($inp),@X[0]
+ lea $TABLE+0x80(%rip),$Tbl # size optimization
+ vmovdqu 0x10($inp),@X[1]
+ vmovdqu 0x20($inp),@X[2]
+ vpshufb $t3,@X[0],@X[0]
+ vmovdqu 0x30($inp),@X[3]
+ vpshufb $t3,@X[1],@X[1]
+ vmovdqu 0x40($inp),@X[4]
+ vpshufb $t3,@X[2],@X[2]
+ vmovdqu 0x50($inp),@X[5]
+ vpshufb $t3,@X[3],@X[3]
+ vmovdqu 0x60($inp),@X[6]
+ vpshufb $t3,@X[4],@X[4]
+ vmovdqu 0x70($inp),@X[7]
+ vpshufb $t3,@X[5],@X[5]
+ vpaddq -0x80($Tbl),@X[0],$t0
+ vpshufb $t3,@X[6],@X[6]
+ vpaddq -0x60($Tbl),@X[1],$t1
+ vpshufb $t3,@X[7],@X[7]
+ vpaddq -0x40($Tbl),@X[2],$t2
+ vpaddq -0x20($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ vpaddq 0x00($Tbl),@X[4],$t0
+ vmovdqa $t1,0x10(%rsp)
+ vpaddq 0x20($Tbl),@X[5],$t1
+ vmovdqa $t2,0x20(%rsp)
+ vpaddq 0x40($Tbl),@X[6],$t2
+ vmovdqa $t3,0x30(%rsp)
+ vpaddq 0x60($Tbl),@X[7],$t3
+ vmovdqa $t0,0x40(%rsp)
+ mov $A,$a1
+ vmovdqa $t1,0x50(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x60(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x70(%rsp)
+ mov $E,$a0
+ jmp .Lxop_00_47
+
+.align 16
+.Lxop_00_47:
+ add \$`16*2*$SZ`,$Tbl
+___
+sub XOP_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body); # 52 instructions
+
+ &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..2]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpalignr ($t3,@X[5],@X[4],$SZ); # X[9..10]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotq ($t1,$t0,8*$SZ-$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrlq ($t0,$t0,$sigma0[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddq (@X[0],@X[0],$t3); # X[0..1] += X[9..10]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotq ($t2,$t1,$sigma0[1]-$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t0,$t0,$t1);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotq ($t3,@X[7],8*$SZ-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t0,$t0,$t2); # sigma0(X[1..2])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrlq ($t2,@X[7],$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddq (@X[0],@X[0],$t0); # X[0..1] += sigma0(X[1..2])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotq ($t1,$t3,$sigma1[1]-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddq (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa (16*$j."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<8; $j++) {
+ &XOP_512_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
+ &jne (".Lxop_00_47");
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+}
+$code.=<<___;
+ mov $_ctx,$ctx
+ mov $a1,$A
+
+ add $SZ*0($ctx),$A
+ lea 16*$SZ($inp),$inp
+ add $SZ*1($ctx),$B
+ add $SZ*2($ctx),$C
+ add $SZ*3($ctx),$D
+ add $SZ*4($ctx),$E
+ add $SZ*5($ctx),$F
+ add $SZ*6($ctx),$G
+ add $SZ*7($ctx),$H
+
+ cmp $_end,$inp
+
+ mov $A,$SZ*0($ctx)
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+ jb .Lloop_xop
+
+ mov $_rsp,%rsi
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps 16*$SZ+32(%rsp),%xmm6
+ movaps 16*$SZ+48(%rsp),%xmm7
+ movaps 16*$SZ+64(%rsp),%xmm8
+ movaps 16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps 16*$SZ+96(%rsp),%xmm10
+ movaps 16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_xop:
+ ret
+.size ${func}_xop,.-${func}_xop
+___
+}
+######################################################################
+# AVX+shrd code path
+#
+local *ror = sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type ${func}_avx,\@function,3
+.align 64
+${func}_avx:
+.Lavx_shortcut:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ mov %rsp,%r11 # copy %rsp
+ shl \$4,%rdx # num*16
+ sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
+ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
+ and \$-64,%rsp # align stack frame
+ mov $ctx,$_ctx # save ctx, 1st arg
+ mov $inp,$_inp # save inp, 2nd arh
+ mov %rdx,$_end # save end pointer, "3rd" arg
+ mov %r11,$_rsp # save copy of %rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,16*$SZ+32(%rsp)
+ movaps %xmm7,16*$SZ+48(%rsp)
+ movaps %xmm8,16*$SZ+64(%rsp)
+ movaps %xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps %xmm10,16*$SZ+96(%rsp)
+ movaps %xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx:
+
+ vzeroupper
+ mov $SZ*0($ctx),$A
+ mov $SZ*1($ctx),$B
+ mov $SZ*2($ctx),$C
+ mov $SZ*3($ctx),$D
+ mov $SZ*4($ctx),$E
+ mov $SZ*5($ctx),$F
+ mov $SZ*6($ctx),$G
+ mov $SZ*7($ctx),$H
+___
+ if ($SZ==4) { # SHA256
+ my @X = map("%xmm$_",(0..3));
+ my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
+
+$code.=<<___;
+ vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+ vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+ jmp .Lloop_avx
+.align 16
+.Lloop_avx:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu 0x00($inp),@X[0]
+ vmovdqu 0x10($inp),@X[1]
+ vmovdqu 0x20($inp),@X[2]
+ vmovdqu 0x30($inp),@X[3]
+ vpshufb $t3,@X[0],@X[0]
+ lea $TABLE(%rip),$Tbl
+ vpshufb $t3,@X[1],@X[1]
+ vpshufb $t3,@X[2],@X[2]
+ vpaddd 0x00($Tbl),@X[0],$t0
+ vpshufb $t3,@X[3],@X[3]
+ vpaddd 0x20($Tbl),@X[1],$t1
+ vpaddd 0x40($Tbl),@X[2],$t2
+ vpaddd 0x60($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ mov $A,$a1
+ vmovdqa $t1,0x10(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x20(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x30(%rsp)
+ mov $E,$a0
+ jmp .Lavx_00_47
+
+.align 16
+.Lavx_00_47:
+ sub \$`-16*2*$SZ`,$Tbl # size optimization
+___
+sub Xupdate_256_AVX () {
+ (
+ '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4]
+ '&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12]
+ '&vpsrld ($t2,$t0,$sigma0[0]);',
+ '&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12]
+ '&vpsrld ($t3,$t0,$sigma0[2])',
+ '&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);',
+ '&vpxor ($t0,$t3,$t2)',
+ '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
+ '&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);',
+ '&vpxor ($t0,$t0,$t1)',
+ '&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);',
+ '&vpxor ($t0,$t0,$t2)',
+ '&vpsrld ($t2,$t3,$sigma1[2]);',
+ '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4])
+ '&vpsrlq ($t3,$t3,$sigma1[0]);',
+ '&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4])
+ '&vpxor ($t2,$t2,$t3);',
+ '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
+ '&vpxor ($t2,$t2,$t3)',
+ '&vpshufb ($t2,$t2,$t4)', # sigma1(X[14..15])
+ '&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15])
+ '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
+ '&vpsrld ($t2,$t3,$sigma1[2])',
+ '&vpsrlq ($t3,$t3,$sigma1[0])',
+ '&vpxor ($t2,$t2,$t3);',
+ '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
+ '&vpxor ($t2,$t2,$t3)',
+ '&vpshufb ($t2,$t2,$t5)',
+ '&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17])
+ );
+}
+
+sub AVX_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
+
+ foreach (Xupdate_256_AVX()) { # 29 instructions
+ eval;
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa (16*$j."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &AVX_256_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
+ &jne (".Lavx_00_47");
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+
+ } else { # SHA512
+ my @X = map("%xmm$_",(0..7));
+ my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
+
+$code.=<<___;
+ jmp .Lloop_avx
+.align 16
+.Lloop_avx:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu 0x00($inp),@X[0]
+ lea $TABLE+0x80(%rip),$Tbl # size optimization
+ vmovdqu 0x10($inp),@X[1]
+ vmovdqu 0x20($inp),@X[2]
+ vpshufb $t3,@X[0],@X[0]
+ vmovdqu 0x30($inp),@X[3]
+ vpshufb $t3,@X[1],@X[1]
+ vmovdqu 0x40($inp),@X[4]
+ vpshufb $t3,@X[2],@X[2]
+ vmovdqu 0x50($inp),@X[5]
+ vpshufb $t3,@X[3],@X[3]
+ vmovdqu 0x60($inp),@X[6]
+ vpshufb $t3,@X[4],@X[4]
+ vmovdqu 0x70($inp),@X[7]
+ vpshufb $t3,@X[5],@X[5]
+ vpaddq -0x80($Tbl),@X[0],$t0
+ vpshufb $t3,@X[6],@X[6]
+ vpaddq -0x60($Tbl),@X[1],$t1
+ vpshufb $t3,@X[7],@X[7]
+ vpaddq -0x40($Tbl),@X[2],$t2
+ vpaddq -0x20($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ vpaddq 0x00($Tbl),@X[4],$t0
+ vmovdqa $t1,0x10(%rsp)
+ vpaddq 0x20($Tbl),@X[5],$t1
+ vmovdqa $t2,0x20(%rsp)
+ vpaddq 0x40($Tbl),@X[6],$t2
+ vmovdqa $t3,0x30(%rsp)
+ vpaddq 0x60($Tbl),@X[7],$t3
+ vmovdqa $t0,0x40(%rsp)
+ mov $A,$a1
+ vmovdqa $t1,0x50(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x60(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x70(%rsp)
+ mov $E,$a0
+ jmp .Lavx_00_47
+
+.align 16
+.Lavx_00_47:
+ add \$`16*2*$SZ`,$Tbl
+___
+sub Xupdate_512_AVX () {
+ (
+ '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..2]
+ '&vpalignr ($t3,@X[5],@X[4],$SZ)', # X[9..10]
+ '&vpsrlq ($t2,$t0,$sigma0[0])',
+ '&vpaddq (@X[0],@X[0],$t3);', # X[0..1] += X[9..10]
+ '&vpsrlq ($t3,$t0,$sigma0[2])',
+ '&vpsllq ($t1,$t0,8*$SZ-$sigma0[1]);',
+ '&vpxor ($t0,$t3,$t2)',
+ '&vpsrlq ($t2,$t2,$sigma0[1]-$sigma0[0]);',
+ '&vpxor ($t0,$t0,$t1)',
+ '&vpsllq ($t1,$t1,$sigma0[1]-$sigma0[0]);',
+ '&vpxor ($t0,$t0,$t2)',
+ '&vpsrlq ($t3,@X[7],$sigma1[2]);',
+ '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..2])
+ '&vpsllq ($t2,@X[7],8*$SZ-$sigma1[1]);',
+ '&vpaddq (@X[0],@X[0],$t0)', # X[0..1] += sigma0(X[1..2])
+ '&vpsrlq ($t1,@X[7],$sigma1[0]);',
+ '&vpxor ($t3,$t3,$t2)',
+ '&vpsllq ($t2,$t2,$sigma1[1]-$sigma1[0]);',
+ '&vpxor ($t3,$t3,$t1)',
+ '&vpsrlq ($t1,$t1,$sigma1[1]-$sigma1[0]);',
+ '&vpxor ($t3,$t3,$t2)',
+ '&vpxor ($t3,$t3,$t1)', # sigma1(X[14..15])
+ '&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
+ );
+}
+
+sub AVX_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body); # 52 instructions
+
+ foreach (Xupdate_512_AVX()) { # 23 instructions
+ eval;
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa (16*$j."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<8; $j++) {
+ &AVX_512_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
+ &jne (".Lavx_00_47");
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+}
+$code.=<<___;
+ mov $_ctx,$ctx
+ mov $a1,$A
+
+ add $SZ*0($ctx),$A
+ lea 16*$SZ($inp),$inp
+ add $SZ*1($ctx),$B
+ add $SZ*2($ctx),$C
+ add $SZ*3($ctx),$D
+ add $SZ*4($ctx),$E
+ add $SZ*5($ctx),$F
+ add $SZ*6($ctx),$G
+ add $SZ*7($ctx),$H
+
+ cmp $_end,$inp
+
+ mov $A,$SZ*0($ctx)
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+ jb .Lloop_avx
+
+ mov $_rsp,%rsi
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps 16*$SZ+32(%rsp),%xmm6
+ movaps 16*$SZ+48(%rsp),%xmm7
+ movaps 16*$SZ+64(%rsp),%xmm8
+ movaps 16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps 16*$SZ+96(%rsp),%xmm10
+ movaps 16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_avx:
+ ret
+.size ${func}_avx,.-${func}_avx
+___
+
+if ($avx>1) {{
+######################################################################
+# AVX2+BMI code path
+#
+my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
+my $PUSH8=8*2*$SZ;
+use integer;
+
+sub bodyx_00_15 () {
+ # at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f
+ (
+ '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
+
+ '&add ($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)', # h+=X[i]+K[i]
+ '&and ($a4,$e)', # f&e
+ '&rorx ($a0,$e,$Sigma1[2])',
+ '&rorx ($a2,$e,$Sigma1[1])',
+
+ '&lea ($a,"($a,$a1)")', # h+=Sigma0(a) from the past
+ '&lea ($h,"($h,$a4)")',
+ '&andn ($a4,$e,$g)', # ~e&g
+ '&xor ($a0,$a2)',
+
+ '&rorx ($a1,$e,$Sigma1[0])',
+ '&lea ($h,"($h,$a4)")', # h+=Ch(e,f,g)=(e&f)+(~e&g)
+ '&xor ($a0,$a1)', # Sigma1(e)
+ '&mov ($a2,$a)',
+
+ '&rorx ($a4,$a,$Sigma0[2])',
+ '&lea ($h,"($h,$a0)")', # h+=Sigma1(e)
+ '&xor ($a2,$b)', # a^b, b^c in next round
+ '&rorx ($a1,$a,$Sigma0[1])',
+
+ '&rorx ($a0,$a,$Sigma0[0])',
+ '&lea ($d,"($d,$h)")', # d+=h
+ '&and ($a3,$a2)', # (b^c)&(a^b)
+ '&xor ($a1,$a4)',
+
+ '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
+ '&xor ($a1,$a0)', # Sigma0(a)
+ '&lea ($h,"($h,$a3)");'. # h+=Maj(a,b,c)
+ '&mov ($a4,$e)', # copy of f in future
+
+ '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
+ );
+ # and at the finish one has to $a+=$a1
+}
+
+$code.=<<___;
+.type ${func}_avx2,\@function,3
+.align 64
+${func}_avx2:
+.Lavx2_shortcut:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ mov %rsp,%r11 # copy %rsp
+ sub \$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
+ shl \$4,%rdx # num*16
+ and \$-256*$SZ,%rsp # align stack frame
+ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
+ add \$`2*$SZ*($rounds-8)`,%rsp
+ mov $ctx,$_ctx # save ctx, 1st arg
+ mov $inp,$_inp # save inp, 2nd arh
+ mov %rdx,$_end # save end pointer, "3rd" arg
+ mov %r11,$_rsp # save copy of %rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,16*$SZ+32(%rsp)
+ movaps %xmm7,16*$SZ+48(%rsp)
+ movaps %xmm8,16*$SZ+64(%rsp)
+ movaps %xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps %xmm10,16*$SZ+96(%rsp)
+ movaps %xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx2:
+
+ vzeroupper
+ sub \$-16*$SZ,$inp # inp++, size optimization
+ mov $SZ*0($ctx),$A
+ mov $inp,%r12 # borrow $T1
+ mov $SZ*1($ctx),$B
+ cmp %rdx,$inp # $_end
+ mov $SZ*2($ctx),$C
+ cmove %rsp,%r12 # next block or random data
+ mov $SZ*3($ctx),$D
+ mov $SZ*4($ctx),$E
+ mov $SZ*5($ctx),$F
+ mov $SZ*6($ctx),$G
+ mov $SZ*7($ctx),$H
+___
+ if ($SZ==4) { # SHA256
+ my @X = map("%ymm$_",(0..3));
+ my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%ymm$_",(4..9));
+
+$code.=<<___;
+ vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+ vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+ jmp .Loop_avx2
+.align 16
+.Loop_avx2:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu -16*$SZ+0($inp),%xmm0
+ vmovdqu -16*$SZ+16($inp),%xmm1
+ vmovdqu -16*$SZ+32($inp),%xmm2
+ vmovdqu -16*$SZ+48($inp),%xmm3
+ #mov $inp,$_inp # offload $inp
+ vinserti128 \$1,(%r12),@X[0],@X[0]
+ vinserti128 \$1,16(%r12),@X[1],@X[1]
+ vpshufb $t3,@X[0],@X[0]
+ vinserti128 \$1,32(%r12),@X[2],@X[2]
+ vpshufb $t3,@X[1],@X[1]
+ vinserti128 \$1,48(%r12),@X[3],@X[3]
+
+ lea $TABLE(%rip),$Tbl
+ vpshufb $t3,@X[2],@X[2]
+ vpaddd 0x00($Tbl),@X[0],$t0
+ vpshufb $t3,@X[3],@X[3]
+ vpaddd 0x20($Tbl),@X[1],$t1
+ vpaddd 0x40($Tbl),@X[2],$t2
+ vpaddd 0x60($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ xor $a1,$a1
+ vmovdqa $t1,0x20(%rsp)
+ lea -$PUSH8(%rsp),%rsp
+ mov $B,$a3
+ vmovdqa $t2,0x00(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x20(%rsp)
+ mov $F,$a4
+ sub \$-16*2*$SZ,$Tbl # size optimization
+ jmp .Lavx2_00_47
+
+.align 16
+.Lavx2_00_47:
+___
+
+sub AVX2_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body); # 96 instructions
+my $base = "+2*$PUSH8(%rsp)";
+
+ &lea ("%rsp","-$PUSH8(%rsp)") if (($j%2)==0);
+ foreach (Xupdate_256_AVX()) { # 29 instructions
+ eval;
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &AVX2_256_00_47($j,\&bodyx_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &lea ($Tbl,16*2*$SZ."($Tbl)");
+ &cmpb (($SZ-1)."($Tbl)",0);
+ &jne (".Lavx2_00_47");
+
+ for ($i=0; $i<16; ) {
+ my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
+ foreach(bodyx_00_15()) { eval; }
+ }
+ } else { # SHA512
+ my @X = map("%ymm$_",(0..7));
+ my ($t0,$t1,$t2,$t3) = map("%ymm$_",(8..11));
+
+$code.=<<___;
+ jmp .Loop_avx2
+.align 16
+.Loop_avx2:
+ vmovdqu -16*$SZ($inp),%xmm0
+ vmovdqu -16*$SZ+16($inp),%xmm1
+ vmovdqu -16*$SZ+32($inp),%xmm2
+ lea $TABLE+0x80(%rip),$Tbl # size optimization
+ vmovdqu -16*$SZ+48($inp),%xmm3
+ vmovdqu -16*$SZ+64($inp),%xmm4
+ vmovdqu -16*$SZ+80($inp),%xmm5
+ vmovdqu -16*$SZ+96($inp),%xmm6
+ vmovdqu -16*$SZ+112($inp),%xmm7
+ #mov $inp,$_inp # offload $inp
+ vmovdqa `$SZ*2*$rounds-0x80`($Tbl),$t2
+ vinserti128 \$1,(%r12),@X[0],@X[0]
+ vinserti128 \$1,16(%r12),@X[1],@X[1]
+ vpshufb $t2,@X[0],@X[0]
+ vinserti128 \$1,32(%r12),@X[2],@X[2]
+ vpshufb $t2,@X[1],@X[1]
+ vinserti128 \$1,48(%r12),@X[3],@X[3]
+ vpshufb $t2,@X[2],@X[2]
+ vinserti128 \$1,64(%r12),@X[4],@X[4]
+ vpshufb $t2,@X[3],@X[3]
+ vinserti128 \$1,80(%r12),@X[5],@X[5]
+ vpshufb $t2,@X[4],@X[4]
+ vinserti128 \$1,96(%r12),@X[6],@X[6]
+ vpshufb $t2,@X[5],@X[5]
+ vinserti128 \$1,112(%r12),@X[7],@X[7]
+
+ vpaddq -0x80($Tbl),@X[0],$t0
+ vpshufb $t2,@X[6],@X[6]
+ vpaddq -0x60($Tbl),@X[1],$t1
+ vpshufb $t2,@X[7],@X[7]
+ vpaddq -0x40($Tbl),@X[2],$t2
+ vpaddq -0x20($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ vpaddq 0x00($Tbl),@X[4],$t0
+ vmovdqa $t1,0x20(%rsp)
+ vpaddq 0x20($Tbl),@X[5],$t1
+ vmovdqa $t2,0x40(%rsp)
+ vpaddq 0x40($Tbl),@X[6],$t2
+ vmovdqa $t3,0x60(%rsp)
+ lea -$PUSH8(%rsp),%rsp
+ vpaddq 0x60($Tbl),@X[7],$t3
+ vmovdqa $t0,0x00(%rsp)
+ xor $a1,$a1
+ vmovdqa $t1,0x20(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x40(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x60(%rsp)
+ mov $F,$a4
+ add \$16*2*$SZ,$Tbl
+ jmp .Lavx2_00_47
+
+.align 16
+.Lavx2_00_47:
___
+
+sub AVX2_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body); # 48 instructions
+my $base = "+2*$PUSH8(%rsp)";
+
+ &lea ("%rsp","-$PUSH8(%rsp)") if (($j%4)==0);
+ foreach (Xupdate_512_AVX()) { # 23 instructions
+ eval;
+ if ($_ !~ /\;$/) {
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ }
+ &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<8; $j++) {
+ &AVX2_512_00_47($j,\&bodyx_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &lea ($Tbl,16*2*$SZ."($Tbl)");
+ &cmpb (($SZ-1-0x80)."($Tbl)",0);
+ &jne (".Lavx2_00_47");
+
+ for ($i=0; $i<16; ) {
+ my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
+ foreach(bodyx_00_15()) { eval; }
+ }
}
+$code.=<<___;
+ mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
+ add $a1,$A
+ #mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
+ lea `2*$SZ*($rounds-8)`(%rsp),$Tbl
+
+ add $SZ*0($ctx),$A
+ add $SZ*1($ctx),$B
+ add $SZ*2($ctx),$C
+ add $SZ*3($ctx),$D
+ add $SZ*4($ctx),$E
+ add $SZ*5($ctx),$F
+ add $SZ*6($ctx),$G
+ add $SZ*7($ctx),$H
+
+ mov $A,$SZ*0($ctx)
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+
+ cmp `$PUSH8+2*8`($Tbl),$inp # $_end
+ je .Ldone_avx2
+
+ xor $a1,$a1
+ mov $B,$a3
+ xor $C,$a3 # magic
+ mov $F,$a4
+ jmp .Lower_avx2
+.align 16
+.Lower_avx2:
+___
+ for ($i=0; $i<8; ) {
+ my $base="+16($Tbl)";
+ foreach(bodyx_00_15()) { eval; }
+ }
+$code.=<<___;
+ lea -$PUSH8($Tbl),$Tbl
+ cmp %rsp,$Tbl
+ jae .Lower_avx2
+
+ mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
+ add $a1,$A
+ #mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
+ lea `2*$SZ*($rounds-8)`(%rsp),%rsp
+
+ add $SZ*0($ctx),$A
+ add $SZ*1($ctx),$B
+ add $SZ*2($ctx),$C
+ add $SZ*3($ctx),$D
+ add $SZ*4($ctx),$E
+ add $SZ*5($ctx),$F
+ lea `2*16*$SZ`($inp),$inp # inp+=2
+ add $SZ*6($ctx),$G
+ mov $inp,%r12
+ add $SZ*7($ctx),$H
+ cmp $_end,$inp
+
+ mov $A,$SZ*0($ctx)
+ cmove %rsp,%r12 # next block or stale data
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+
+ jbe .Loop_avx2
+ lea (%rsp),$Tbl
+
+.Ldone_avx2:
+ lea ($Tbl),%rsp
+ mov $_rsp,%rsi
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps 16*$SZ+32(%rsp),%xmm6
+ movaps 16*$SZ+48(%rsp),%xmm7
+ movaps 16*$SZ+64(%rsp),%xmm8
+ movaps 16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps 16*$SZ+96(%rsp),%xmm10
+ movaps 16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_avx2:
+ ret
+.size ${func}_avx2,.-${func}_avx2
+___
+}}
+}}}}}
# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
# CONTEXT *context,DISPATCHER_CONTEXT *disp)
@@ -366,16 +2173,32 @@ se_handler:
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
- lea .Lprologue(%rip),%r10
- cmp %r10,%rbx # context->Rip<.Lprologue
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HanderlData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
jb .Lin_prologue
mov 152($context),%rax # pull context->Rsp
- lea .Lepilogue(%rip),%r10
- cmp %r10,%rbx # context->Rip>=.Lepilogue
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
jae .Lin_prologue
+___
+$code.=<<___ if ($avx>1);
+ lea .Lavx2_shortcut(%rip),%r10
+ cmp %r10,%rbx # context->Rip<avx2_shortcut
+ jb .Lnot_in_avx2
+ and \$-256*$SZ,%rax
+ add \$`2*$SZ*($rounds-8)`,%rax
+.Lnot_in_avx2:
+___
+$code.=<<___;
+ mov %rax,%rsi # put aside Rsp
mov 16*$SZ+3*8(%rax),%rax # pull $_rsp
lea 48(%rax),%rax
@@ -392,6 +2215,15 @@ se_handler:
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
+ lea .Lepilogue(%rip),%r10
+ cmp %r10,%rbx
+ jb .Lin_prologue # non-AVX code
+
+ lea 16*$SZ+4*8(%rsi),%rsi # Xmm6- save area
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$`$SZ==4?8:12`,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
.Lin_prologue:
mov 8(%rax),%rdi
mov 16(%rax),%rsi
@@ -431,21 +2263,136 @@ se_handler:
pop %rsi
ret
.size se_handler,.-se_handler
+___
+
+$code.=<<___ if ($SZ==4 && $shaext);
+.type shaext_handler,\@abi-omnipotent
+.align 16
+shaext_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+ lea .Lprologue_shaext(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lprologue
+ jb .Lin_prologue
+
+ lea .Lepilogue_shaext(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lepilogue
+ jae .Lin_prologue
+
+ lea -8-5*16(%rax),%rsi
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$10,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+ jmp .Lin_prologue
+.size shaext_handler,.-shaext_handler
+___
+
+$code.=<<___;
.section .pdata
.align 4
.rva .LSEH_begin_$func
.rva .LSEH_end_$func
.rva .LSEH_info_$func
-
+___
+$code.=<<___ if ($SZ==4 && $shaext);
+ .rva .LSEH_begin_${func}_shaext
+ .rva .LSEH_end_${func}_shaext
+ .rva .LSEH_info_${func}_shaext
+___
+$code.=<<___ if ($SZ==4);
+ .rva .LSEH_begin_${func}_ssse3
+ .rva .LSEH_end_${func}_ssse3
+ .rva .LSEH_info_${func}_ssse3
+___
+$code.=<<___ if ($avx && $SZ==8);
+ .rva .LSEH_begin_${func}_xop
+ .rva .LSEH_end_${func}_xop
+ .rva .LSEH_info_${func}_xop
+___
+$code.=<<___ if ($avx);
+ .rva .LSEH_begin_${func}_avx
+ .rva .LSEH_end_${func}_avx
+ .rva .LSEH_info_${func}_avx
+___
+$code.=<<___ if ($avx>1);
+ .rva .LSEH_begin_${func}_avx2
+ .rva .LSEH_end_${func}_avx2
+ .rva .LSEH_info_${func}_avx2
+___
+$code.=<<___;
.section .xdata
.align 8
.LSEH_info_$func:
.byte 9,0,0,0
.rva se_handler
+ .rva .Lprologue,.Lepilogue # HandlerData[]
+___
+$code.=<<___ if ($SZ==4 && $shaext);
+.LSEH_info_${func}_shaext:
+ .byte 9,0,0,0
+ .rva shaext_handler
+___
+$code.=<<___ if ($SZ==4);
+.LSEH_info_${func}_ssse3:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
+___
+$code.=<<___ if ($avx && $SZ==8);
+.LSEH_info_${func}_xop:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lprologue_xop,.Lepilogue_xop # HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_${func}_avx:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_${func}_avx2:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lprologue_avx2,.Lepilogue_avx2 # HandlerData[]
___
}
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
+sub sha256op38 {
+ my $instr = shift;
+ my %opcodelet = (
+ "sha256rnds2" => 0xcb,
+ "sha256msg1" => 0xcc,
+ "sha256msg2" => 0xcd );
+
+ if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-7]),\s*%xmm([0-7])/) {
+ my @opcode=(0x0f,0x38);
+ push @opcode,$opcodelet{$instr};
+ push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
+ return ".byte\t".join(',',@opcode);
+ } else {
+ return $instr."\t".@_[0];
+ }
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/geo;
+
+ print $_,"\n";
+}
close STDOUT;
diff --git a/openssl/crypto/sha/asm/sha512p8-ppc.pl b/openssl/crypto/sha/asm/sha512p8-ppc.pl
new file mode 100755
index 000000000..47189502c
--- /dev/null
+++ b/openssl/crypto/sha/asm/sha512p8-ppc.pl
@@ -0,0 +1,424 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# SHA256/512 for PowerISA v2.07.
+#
+# Accurate performance measurements are problematic, because it's
+# always virtualized setup with possibly throttled processor.
+# Relative comparison is therefore more informative. This module is
+# ~60% faster than integer-only sha512-ppc.pl. To anchor to something
+# else, SHA256 is 24% slower than sha1-ppc.pl and 2.5x slower than
+# hardware-assisted aes-128-cbc encrypt. SHA512 is 20% faster than
+# sha1-ppc.pl and 1.6x slower than aes-128-cbc. Another interesting
+# result is degree of computational resources' utilization. POWER8 is
+# "massively multi-threaded chip" and difference between single- and
+# maximum multi-process benchmark results tells that utlization is
+# whooping 94%. For sha512-ppc.pl we get [not unimpressive] 84% and
+# for sha1-ppc.pl - 73%. 100% means that multi-process result equals
+# to single-process one, given that all threads end up on the same
+# physical core.
+
+$flavour=shift;
+$output =shift;
+
+if ($flavour =~ /64/) {
+ $SIZE_T=8;
+ $LRSAVE=2*$SIZE_T;
+ $STU="stdu";
+ $POP="ld";
+ $PUSH="std";
+} elsif ($flavour =~ /32/) {
+ $SIZE_T=4;
+ $LRSAVE=$SIZE_T;
+ $STU="stwu";
+ $POP="lwz";
+ $PUSH="stw";
+} else { die "nonsense $flavour"; }
+
+$LENDIAN=($flavour=~/le/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
+
+if ($output =~ /512/) {
+ $bits=512;
+ $SZ=8;
+ $sz="d";
+ $rounds=80;
+} else {
+ $bits=256;
+ $SZ=4;
+ $sz="w";
+ $rounds=64;
+}
+
+$func="sha${bits}_block_p8";
+$FRAME=8*$SIZE_T;
+
+$sp ="r1";
+$toc="r2";
+$ctx="r3";
+$inp="r4";
+$num="r5";
+$Tbl="r6";
+$idx="r7";
+$lrsave="r8";
+$offload="r11";
+$vrsave="r12";
+($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,10,26..31));
+ $x00=0 if ($flavour =~ /osx/);
+
+@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("v$_",(0..7));
+@X=map("v$_",(8..23));
+($Ki,$Func,$S0,$S1,$s0,$s1,$lemask)=map("v$_",(24..31));
+
+sub ROUND {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my $j=($i+1)%16;
+
+$code.=<<___ if ($i<15 && ($i%(16/$SZ))==(16/$SZ-1));
+ lvx_u @X[$i+1],0,$inp ; load X[i] in advance
+ addi $inp,$inp,16
+___
+$code.=<<___ if ($i<16 && ($i%(16/$SZ)));
+ vsldoi @X[$i],@X[$i-1],@X[$i-1],$SZ
+___
+$code.=<<___ if ($LENDIAN && $i<16 && ($i%(16/$SZ))==0);
+ vperm @X[$i],@X[$i],@X[$i],$lemask
+___
+$code.=<<___;
+ `"vshasigma${sz} $s0,@X[($j+1)%16],0,0" if ($i>=15)`
+ vsel $Func,$g,$f,$e ; Ch(e,f,g)
+ vshasigma${sz} $S1,$e,1,15 ; Sigma1(e)
+ vaddu${sz}m $h,$h,@X[$i%16] ; h+=X[i]
+ vshasigma${sz} $S0,$a,1,0 ; Sigma0(a)
+ `"vshasigma${sz} $s1,@X[($j+14)%16],0,15" if ($i>=15)`
+ vaddu${sz}m $h,$h,$Func ; h+=Ch(e,f,g)
+ vxor $Func,$a,$b
+ `"vaddu${sz}m @X[$j],@X[$j],@X[($j+9)%16]" if ($i>=15)`
+ vaddu${sz}m $h,$h,$S1 ; h+=Sigma1(e)
+ vsel $Func,$b,$c,$Func ; Maj(a,b,c)
+ vaddu${sz}m $g,$g,$Ki ; future h+=K[i]
+ vaddu${sz}m $d,$d,$h ; d+=h
+ vaddu${sz}m $S0,$S0,$Func ; Sigma0(a)+Maj(a,b,c)
+ `"vaddu${sz}m @X[$j],@X[$j],$s0" if ($i>=15)`
+ lvx $Ki,$idx,$Tbl ; load next K[i]
+ addi $idx,$idx,16
+ vaddu${sz}m $h,$h,$S0 ; h+=Sigma0(a)+Maj(a,b,c)
+ `"vaddu${sz}m @X[$j],@X[$j],$s1" if ($i>=15)`
+___
+}
+
+$code=<<___;
+.machine "any"
+.text
+
+.globl $func
+.align 6
+$func:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ mflr $lrsave
+ li r10,`$FRAME+8*16+15`
+ li r11,`$FRAME+8*16+31`
+ stvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ mfspr $vrsave,256
+ stvx v21,r11,$sp
+ addi r11,r11,32
+ stvx v22,r10,$sp
+ addi r10,r10,32
+ stvx v23,r11,$sp
+ addi r11,r11,32
+ stvx v24,r10,$sp
+ addi r10,r10,32
+ stvx v25,r11,$sp
+ addi r11,r11,32
+ stvx v26,r10,$sp
+ addi r10,r10,32
+ stvx v27,r11,$sp
+ addi r11,r11,32
+ stvx v28,r10,$sp
+ addi r10,r10,32
+ stvx v29,r11,$sp
+ addi r11,r11,32
+ stvx v30,r10,$sp
+ stvx v31,r11,$sp
+ li r11,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ $PUSH $lrsave,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+ mtspr 256,r11
+
+ bl LPICmeup
+ addi $offload,$sp,$FRAME+15
+___
+$code.=<<___ if ($LENDIAN);
+ li $idx,8
+ lvsl $lemask,0,$idx
+ vspltisb $Ki,0x0f
+ vxor $lemask,$lemask,$Ki
+___
+$code.=<<___ if ($SZ==4);
+ lvx_4w $A,$x00,$ctx
+ lvx_4w $E,$x10,$ctx
+ vsldoi $B,$A,$A,4 # unpack
+ vsldoi $C,$A,$A,8
+ vsldoi $D,$A,$A,12
+ vsldoi $F,$E,$E,4
+ vsldoi $G,$E,$E,8
+ vsldoi $H,$E,$E,12
+___
+$code.=<<___ if ($SZ==8);
+ lvx_u $A,$x00,$ctx
+ lvx_u $C,$x10,$ctx
+ lvx_u $E,$x20,$ctx
+ vsldoi $B,$A,$A,8 # unpack
+ lvx_u $G,$x30,$ctx
+ vsldoi $D,$C,$C,8
+ vsldoi $F,$E,$E,8
+ vsldoi $H,$G,$G,8
+___
+$code.=<<___;
+ li r0,`($rounds-16)/16` # inner loop counter
+ b Loop
+.align 5
+Loop:
+ lvx $Ki,$x00,$Tbl
+ li $idx,16
+ lvx_u @X[0],0,$inp
+ addi $inp,$inp,16
+ stvx $A,$x00,$offload # offload $A-$H
+ stvx $B,$x10,$offload
+ stvx $C,$x20,$offload
+ stvx $D,$x30,$offload
+ stvx $E,$x40,$offload
+ stvx $F,$x50,$offload
+ stvx $G,$x60,$offload
+ stvx $H,$x70,$offload
+ vaddu${sz}m $H,$H,$Ki # h+K[i]
+ lvx $Ki,$idx,$Tbl
+ addi $idx,$idx,16
+___
+for ($i=0;$i<16;$i++) { &ROUND($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ mtctr r0
+ b L16_xx
+.align 5
+L16_xx:
+___
+for (;$i<32;$i++) { &ROUND($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ bdnz L16_xx
+
+ lvx @X[2],$x00,$offload
+ subic. $num,$num,1
+ lvx @X[3],$x10,$offload
+ vaddu${sz}m $A,$A,@X[2]
+ lvx @X[4],$x20,$offload
+ vaddu${sz}m $B,$B,@X[3]
+ lvx @X[5],$x30,$offload
+ vaddu${sz}m $C,$C,@X[4]
+ lvx @X[6],$x40,$offload
+ vaddu${sz}m $D,$D,@X[5]
+ lvx @X[7],$x50,$offload
+ vaddu${sz}m $E,$E,@X[6]
+ lvx @X[8],$x60,$offload
+ vaddu${sz}m $F,$F,@X[7]
+ lvx @X[9],$x70,$offload
+ vaddu${sz}m $G,$G,@X[8]
+ vaddu${sz}m $H,$H,@X[9]
+ bne Loop
+___
+$code.=<<___ if ($SZ==4);
+ lvx @X[0],$idx,$Tbl
+ addi $idx,$idx,16
+ vperm $A,$A,$B,$Ki # pack the answer
+ lvx @X[1],$idx,$Tbl
+ vperm $E,$E,$F,$Ki
+ vperm $A,$A,$C,@X[0]
+ vperm $E,$E,$G,@X[0]
+ vperm $A,$A,$D,@X[1]
+ vperm $E,$E,$H,@X[1]
+ stvx_4w $A,$x00,$ctx
+ stvx_4w $E,$x10,$ctx
+___
+$code.=<<___ if ($SZ==8);
+ vperm $A,$A,$B,$Ki # pack the answer
+ vperm $C,$C,$D,$Ki
+ vperm $E,$E,$F,$Ki
+ vperm $G,$G,$H,$Ki
+ stvx_u $A,$x00,$ctx
+ stvx_u $C,$x10,$ctx
+ stvx_u $E,$x20,$ctx
+ stvx_u $G,$x30,$ctx
+___
+$code.=<<___;
+ li r10,`$FRAME+8*16+15`
+ mtlr $lrsave
+ li r11,`$FRAME+8*16+31`
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,4,1,0x80,6,3,0
+ .long 0
+.size $func,.-$func
+___
+
+# Ugly hack here, because PPC assembler syntax seem to vary too
+# much from platforms to platform...
+$code.=<<___;
+.align 6
+LPICmeup:
+ mflr r0
+ bcl 20,31,\$+4
+ mflr $Tbl ; vvvvvv "distance" between . and 1st data entry
+ addi $Tbl,$Tbl,`64-8`
+ mtlr r0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+ .space `64-9*4`
+___
+
+if ($SZ==8) {
+ local *table = sub {
+ foreach(@_) { $code.=".quad $_,$_\n"; }
+ };
+ table(
+ "0x428a2f98d728ae22","0x7137449123ef65cd",
+ "0xb5c0fbcfec4d3b2f","0xe9b5dba58189dbbc",
+ "0x3956c25bf348b538","0x59f111f1b605d019",
+ "0x923f82a4af194f9b","0xab1c5ed5da6d8118",
+ "0xd807aa98a3030242","0x12835b0145706fbe",
+ "0x243185be4ee4b28c","0x550c7dc3d5ffb4e2",
+ "0x72be5d74f27b896f","0x80deb1fe3b1696b1",
+ "0x9bdc06a725c71235","0xc19bf174cf692694",
+ "0xe49b69c19ef14ad2","0xefbe4786384f25e3",
+ "0x0fc19dc68b8cd5b5","0x240ca1cc77ac9c65",
+ "0x2de92c6f592b0275","0x4a7484aa6ea6e483",
+ "0x5cb0a9dcbd41fbd4","0x76f988da831153b5",
+ "0x983e5152ee66dfab","0xa831c66d2db43210",
+ "0xb00327c898fb213f","0xbf597fc7beef0ee4",
+ "0xc6e00bf33da88fc2","0xd5a79147930aa725",
+ "0x06ca6351e003826f","0x142929670a0e6e70",
+ "0x27b70a8546d22ffc","0x2e1b21385c26c926",
+ "0x4d2c6dfc5ac42aed","0x53380d139d95b3df",
+ "0x650a73548baf63de","0x766a0abb3c77b2a8",
+ "0x81c2c92e47edaee6","0x92722c851482353b",
+ "0xa2bfe8a14cf10364","0xa81a664bbc423001",
+ "0xc24b8b70d0f89791","0xc76c51a30654be30",
+ "0xd192e819d6ef5218","0xd69906245565a910",
+ "0xf40e35855771202a","0x106aa07032bbd1b8",
+ "0x19a4c116b8d2d0c8","0x1e376c085141ab53",
+ "0x2748774cdf8eeb99","0x34b0bcb5e19b48a8",
+ "0x391c0cb3c5c95a63","0x4ed8aa4ae3418acb",
+ "0x5b9cca4f7763e373","0x682e6ff3d6b2b8a3",
+ "0x748f82ee5defb2fc","0x78a5636f43172f60",
+ "0x84c87814a1f0ab72","0x8cc702081a6439ec",
+ "0x90befffa23631e28","0xa4506cebde82bde9",
+ "0xbef9a3f7b2c67915","0xc67178f2e372532b",
+ "0xca273eceea26619c","0xd186b8c721c0c207",
+ "0xeada7dd6cde0eb1e","0xf57d4f7fee6ed178",
+ "0x06f067aa72176fba","0x0a637dc5a2c898a6",
+ "0x113f9804bef90dae","0x1b710b35131c471b",
+ "0x28db77f523047d84","0x32caab7b40c72493",
+ "0x3c9ebe0a15c9bebc","0x431d67c49c100d4c",
+ "0x4cc5d4becb3e42b6","0x597f299cfc657e2a",
+ "0x5fcb6fab3ad6faec","0x6c44198c4a475817","0");
+$code.=<<___ if (!$LENDIAN);
+.quad 0x0001020304050607,0x1011121314151617
+___
+$code.=<<___ if ($LENDIAN); # quad-swapped
+.quad 0x1011121314151617,0x0001020304050607
+___
+} else {
+ local *table = sub {
+ foreach(@_) { $code.=".long $_,$_,$_,$_\n"; }
+ };
+ table(
+ "0x428a2f98","0x71374491","0xb5c0fbcf","0xe9b5dba5",
+ "0x3956c25b","0x59f111f1","0x923f82a4","0xab1c5ed5",
+ "0xd807aa98","0x12835b01","0x243185be","0x550c7dc3",
+ "0x72be5d74","0x80deb1fe","0x9bdc06a7","0xc19bf174",
+ "0xe49b69c1","0xefbe4786","0x0fc19dc6","0x240ca1cc",
+ "0x2de92c6f","0x4a7484aa","0x5cb0a9dc","0x76f988da",
+ "0x983e5152","0xa831c66d","0xb00327c8","0xbf597fc7",
+ "0xc6e00bf3","0xd5a79147","0x06ca6351","0x14292967",
+ "0x27b70a85","0x2e1b2138","0x4d2c6dfc","0x53380d13",
+ "0x650a7354","0x766a0abb","0x81c2c92e","0x92722c85",
+ "0xa2bfe8a1","0xa81a664b","0xc24b8b70","0xc76c51a3",
+ "0xd192e819","0xd6990624","0xf40e3585","0x106aa070",
+ "0x19a4c116","0x1e376c08","0x2748774c","0x34b0bcb5",
+ "0x391c0cb3","0x4ed8aa4a","0x5b9cca4f","0x682e6ff3",
+ "0x748f82ee","0x78a5636f","0x84c87814","0x8cc70208",
+ "0x90befffa","0xa4506ceb","0xbef9a3f7","0xc67178f2","0");
+$code.=<<___ if (!$LENDIAN);
+.long 0x00010203,0x10111213,0x10111213,0x10111213
+.long 0x00010203,0x04050607,0x10111213,0x10111213
+.long 0x00010203,0x04050607,0x08090a0b,0x10111213
+___
+$code.=<<___ if ($LENDIAN); # word-swapped
+.long 0x10111213,0x10111213,0x10111213,0x00010203
+.long 0x10111213,0x10111213,0x04050607,0x00010203
+.long 0x10111213,0x08090a0b,0x04050607,0x00010203
+___
+}
+$code.=<<___;
+.asciz "SHA${bits} for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/openssl/crypto/sha/sha.c b/openssl/crypto/sha/sha.c
index 42126551d..cfc12f3ed 100644
--- a/openssl/crypto/sha/sha.c
+++ b/openssl/crypto/sha/sha.c
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -60,65 +60,59 @@
#include <stdlib.h>
#include <openssl/sha.h>
-#define BUFSIZE 1024*16
+#define BUFSIZE 1024*16
void do_fp(FILE *f);
void pt(unsigned char *md);
int read(int, void *, unsigned int);
int main(int argc, char **argv)
- {
- int i,err=0;
- FILE *IN;
+{
+ int i, err = 0;
+ FILE *IN;
- if (argc == 1)
- {
- do_fp(stdin);
- }
- else
- {
- for (i=1; i<argc; i++)
- {
- IN=fopen(argv[i],"r");
- if (IN == NULL)
- {
- perror(argv[i]);
- err++;
- continue;
- }
- printf("SHA(%s)= ",argv[i]);
- do_fp(IN);
- fclose(IN);
- }
- }
- exit(err);
- }
+ if (argc == 1) {
+ do_fp(stdin);
+ } else {
+ for (i = 1; i < argc; i++) {
+ IN = fopen(argv[i], "r");
+ if (IN == NULL) {
+ perror(argv[i]);
+ err++;
+ continue;
+ }
+ printf("SHA(%s)= ", argv[i]);
+ do_fp(IN);
+ fclose(IN);
+ }
+ }
+ exit(err);
+}
void do_fp(FILE *f)
- {
- SHA_CTX c;
- unsigned char md[SHA_DIGEST_LENGTH];
- int fd;
- int i;
- unsigned char buf[BUFSIZE];
+{
+ SHA_CTX c;
+ unsigned char md[SHA_DIGEST_LENGTH];
+ int fd;
+ int i;
+ unsigned char buf[BUFSIZE];
- fd=fileno(f);
- SHA_Init(&c);
- for (;;)
- {
- i=read(fd,buf,BUFSIZE);
- if (i <= 0) break;
- SHA_Update(&c,buf,(unsigned long)i);
- }
- SHA_Final(&(md[0]),&c);
- pt(md);
- }
+ fd = fileno(f);
+ SHA_Init(&c);
+ for (;;) {
+ i = read(fd, buf, BUFSIZE);
+ if (i <= 0)
+ break;
+ SHA_Update(&c, buf, (unsigned long)i);
+ }
+ SHA_Final(&(md[0]), &c);
+ pt(md);
+}
void pt(unsigned char *md)
- {
- int i;
-
- for (i=0; i<SHA_DIGEST_LENGTH; i++)
- printf("%02x",md[i]);
- printf("\n");
- }
+{
+ int i;
+ for (i = 0; i < SHA_DIGEST_LENGTH; i++)
+ printf("%02x", md[i]);
+ printf("\n");
+}
diff --git a/openssl/crypto/sha/sha.h b/openssl/crypto/sha/sha.h
index 8a6bf4bbb..e5169e4fe 100644
--- a/openssl/crypto/sha/sha.h
+++ b/openssl/crypto/sha/sha.h
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -57,155 +57,155 @@
*/
#ifndef HEADER_SHA_H
-#define HEADER_SHA_H
+# define HEADER_SHA_H
-#include <openssl/e_os2.h>
-#include <stddef.h>
+# include <openssl/e_os2.h>
+# include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
-#if defined(OPENSSL_NO_SHA) || (defined(OPENSSL_NO_SHA0) && defined(OPENSSL_NO_SHA1))
-#error SHA is disabled.
-#endif
+# if defined(OPENSSL_NO_SHA) || (defined(OPENSSL_NO_SHA0) && defined(OPENSSL_NO_SHA1))
+# error SHA is disabled.
+# endif
-#if defined(OPENSSL_FIPS)
-#define FIPS_SHA_SIZE_T size_t
-#endif
+# if defined(OPENSSL_FIPS)
+# define FIPS_SHA_SIZE_T size_t
+# endif
-/*
+/*-
* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
* ! SHA_LONG has to be at least 32 bits wide. If it's wider, then !
* ! SHA_LONG_LOG2 has to be defined along. !
* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
*/
-#if defined(__LP32__)
-#define SHA_LONG unsigned long
-#elif defined(OPENSSL_SYS_CRAY) || defined(__ILP64__)
-#define SHA_LONG unsigned long
-#define SHA_LONG_LOG2 3
-#else
-#define SHA_LONG unsigned int
-#endif
-
-#define SHA_LBLOCK 16
-#define SHA_CBLOCK (SHA_LBLOCK*4) /* SHA treats input data as a
- * contiguous array of 32 bit
- * wide big-endian values. */
-#define SHA_LAST_BLOCK (SHA_CBLOCK-8)
-#define SHA_DIGEST_LENGTH 20
-
-typedef struct SHAstate_st
- {
- SHA_LONG h0,h1,h2,h3,h4;
- SHA_LONG Nl,Nh;
- SHA_LONG data[SHA_LBLOCK];
- unsigned int num;
- } SHA_CTX;
-
-#ifndef OPENSSL_NO_SHA0
-#ifdef OPENSSL_FIPS
+# if defined(__LP32__)
+# define SHA_LONG unsigned long
+# elif defined(OPENSSL_SYS_CRAY) || defined(__ILP64__)
+# define SHA_LONG unsigned long
+# define SHA_LONG_LOG2 3
+# else
+# define SHA_LONG unsigned int
+# endif
+
+# define SHA_LBLOCK 16
+# define SHA_CBLOCK (SHA_LBLOCK*4)/* SHA treats input data as a
+ * contiguous array of 32 bit wide
+ * big-endian values. */
+# define SHA_LAST_BLOCK (SHA_CBLOCK-8)
+# define SHA_DIGEST_LENGTH 20
+
+typedef struct SHAstate_st {
+ SHA_LONG h0, h1, h2, h3, h4;
+ SHA_LONG Nl, Nh;
+ SHA_LONG data[SHA_LBLOCK];
+ unsigned int num;
+} SHA_CTX;
+
+# ifndef OPENSSL_NO_SHA0
+# ifdef OPENSSL_FIPS
int private_SHA_Init(SHA_CTX *c);
-#endif
+# endif
int SHA_Init(SHA_CTX *c);
int SHA_Update(SHA_CTX *c, const void *data, size_t len);
int SHA_Final(unsigned char *md, SHA_CTX *c);
unsigned char *SHA(const unsigned char *d, size_t n, unsigned char *md);
void SHA_Transform(SHA_CTX *c, const unsigned char *data);
-#endif
-#ifndef OPENSSL_NO_SHA1
-#ifdef OPENSSL_FIPS
+# endif
+# ifndef OPENSSL_NO_SHA1
+# ifdef OPENSSL_FIPS
int private_SHA1_Init(SHA_CTX *c);
-#endif
+# endif
int SHA1_Init(SHA_CTX *c);
int SHA1_Update(SHA_CTX *c, const void *data, size_t len);
int SHA1_Final(unsigned char *md, SHA_CTX *c);
unsigned char *SHA1(const unsigned char *d, size_t n, unsigned char *md);
void SHA1_Transform(SHA_CTX *c, const unsigned char *data);
-#endif
-
-#define SHA256_CBLOCK (SHA_LBLOCK*4) /* SHA-256 treats input data as a
- * contiguous array of 32 bit
- * wide big-endian values. */
-#define SHA224_DIGEST_LENGTH 28
-#define SHA256_DIGEST_LENGTH 32
-
-typedef struct SHA256state_st
- {
- SHA_LONG h[8];
- SHA_LONG Nl,Nh;
- SHA_LONG data[SHA_LBLOCK];
- unsigned int num,md_len;
- } SHA256_CTX;
-
-#ifndef OPENSSL_NO_SHA256
-#ifdef OPENSSL_FIPS
+# endif
+
+# define SHA256_CBLOCK (SHA_LBLOCK*4)/* SHA-256 treats input data as a
+ * contiguous array of 32 bit wide
+ * big-endian values. */
+# define SHA224_DIGEST_LENGTH 28
+# define SHA256_DIGEST_LENGTH 32
+
+typedef struct SHA256state_st {
+ SHA_LONG h[8];
+ SHA_LONG Nl, Nh;
+ SHA_LONG data[SHA_LBLOCK];
+ unsigned int num, md_len;
+} SHA256_CTX;
+
+# ifndef OPENSSL_NO_SHA256
+# ifdef OPENSSL_FIPS
int private_SHA224_Init(SHA256_CTX *c);
int private_SHA256_Init(SHA256_CTX *c);
-#endif
+# endif
int SHA224_Init(SHA256_CTX *c);
int SHA224_Update(SHA256_CTX *c, const void *data, size_t len);
int SHA224_Final(unsigned char *md, SHA256_CTX *c);
-unsigned char *SHA224(const unsigned char *d, size_t n,unsigned char *md);
+unsigned char *SHA224(const unsigned char *d, size_t n, unsigned char *md);
int SHA256_Init(SHA256_CTX *c);
int SHA256_Update(SHA256_CTX *c, const void *data, size_t len);
int SHA256_Final(unsigned char *md, SHA256_CTX *c);
-unsigned char *SHA256(const unsigned char *d, size_t n,unsigned char *md);
+unsigned char *SHA256(const unsigned char *d, size_t n, unsigned char *md);
void SHA256_Transform(SHA256_CTX *c, const unsigned char *data);
-#endif
+# endif
-#define SHA384_DIGEST_LENGTH 48
-#define SHA512_DIGEST_LENGTH 64
+# define SHA384_DIGEST_LENGTH 48
+# define SHA512_DIGEST_LENGTH 64
-#ifndef OPENSSL_NO_SHA512
+# ifndef OPENSSL_NO_SHA512
/*
* Unlike 32-bit digest algorithms, SHA-512 *relies* on SHA_LONG64
* being exactly 64-bit wide. See Implementation Notes in sha512.c
* for further details.
*/
-#define SHA512_CBLOCK (SHA_LBLOCK*8) /* SHA-512 treats input data as a
- * contiguous array of 64 bit
- * wide big-endian values. */
-#if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__)
-#define SHA_LONG64 unsigned __int64
-#define U64(C) C##UI64
-#elif defined(__arch64__)
-#define SHA_LONG64 unsigned long
-#define U64(C) C##UL
-#else
-#define SHA_LONG64 unsigned long long
-#define U64(C) C##ULL
-#endif
-
-typedef struct SHA512state_st
- {
- SHA_LONG64 h[8];
- SHA_LONG64 Nl,Nh;
- union {
- SHA_LONG64 d[SHA_LBLOCK];
- unsigned char p[SHA512_CBLOCK];
- } u;
- unsigned int num,md_len;
- } SHA512_CTX;
-#endif
-
-#ifndef OPENSSL_NO_SHA512
-#ifdef OPENSSL_FIPS
+/*
+ * SHA-512 treats input data as a
+ * contiguous array of 64 bit
+ * wide big-endian values.
+ */
+# define SHA512_CBLOCK (SHA_LBLOCK*8)
+# if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__)
+# define SHA_LONG64 unsigned __int64
+# define U64(C) C##UI64
+# elif defined(__arch64__)
+# define SHA_LONG64 unsigned long
+# define U64(C) C##UL
+# else
+# define SHA_LONG64 unsigned long long
+# define U64(C) C##ULL
+# endif
+
+typedef struct SHA512state_st {
+ SHA_LONG64 h[8];
+ SHA_LONG64 Nl, Nh;
+ union {
+ SHA_LONG64 d[SHA_LBLOCK];
+ unsigned char p[SHA512_CBLOCK];
+ } u;
+ unsigned int num, md_len;
+} SHA512_CTX;
+# endif
+
+# ifndef OPENSSL_NO_SHA512
+# ifdef OPENSSL_FIPS
int private_SHA384_Init(SHA512_CTX *c);
int private_SHA512_Init(SHA512_CTX *c);
-#endif
+# endif
int SHA384_Init(SHA512_CTX *c);
int SHA384_Update(SHA512_CTX *c, const void *data, size_t len);
int SHA384_Final(unsigned char *md, SHA512_CTX *c);
-unsigned char *SHA384(const unsigned char *d, size_t n,unsigned char *md);
+unsigned char *SHA384(const unsigned char *d, size_t n, unsigned char *md);
int SHA512_Init(SHA512_CTX *c);
int SHA512_Update(SHA512_CTX *c, const void *data, size_t len);
int SHA512_Final(unsigned char *md, SHA512_CTX *c);
-unsigned char *SHA512(const unsigned char *d, size_t n,unsigned char *md);
+unsigned char *SHA512(const unsigned char *d, size_t n, unsigned char *md);
void SHA512_Transform(SHA512_CTX *c, const unsigned char *data);
-#endif
+# endif
#ifdef __cplusplus
}
diff --git a/openssl/crypto/sha/sha1.c b/openssl/crypto/sha/sha1.c
index d350c88ee..8dd19431b 100644
--- a/openssl/crypto/sha/sha1.c
+++ b/openssl/crypto/sha/sha1.c
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -60,7 +60,7 @@
#include <stdlib.h>
#include <openssl/sha.h>
-#define BUFSIZE 1024*16
+#define BUFSIZE 1024*16
void do_fp(FILE *f);
void pt(unsigned char *md);
@@ -69,59 +69,53 @@ int read(int, void *, unsigned int);
#endif
int main(int argc, char **argv)
- {
- int i,err=0;
- FILE *IN;
+{
+ int i, err = 0;
+ FILE *IN;
- if (argc == 1)
- {
- do_fp(stdin);
- }
- else
- {
- for (i=1; i<argc; i++)
- {
- IN=fopen(argv[i],"r");
- if (IN == NULL)
- {
- perror(argv[i]);
- err++;
- continue;
- }
- printf("SHA1(%s)= ",argv[i]);
- do_fp(IN);
- fclose(IN);
- }
- }
- exit(err);
- }
+ if (argc == 1) {
+ do_fp(stdin);
+ } else {
+ for (i = 1; i < argc; i++) {
+ IN = fopen(argv[i], "r");
+ if (IN == NULL) {
+ perror(argv[i]);
+ err++;
+ continue;
+ }
+ printf("SHA1(%s)= ", argv[i]);
+ do_fp(IN);
+ fclose(IN);
+ }
+ }
+ exit(err);
+}
void do_fp(FILE *f)
- {
- SHA_CTX c;
- unsigned char md[SHA_DIGEST_LENGTH];
- int fd;
- int i;
- unsigned char buf[BUFSIZE];
+{
+ SHA_CTX c;
+ unsigned char md[SHA_DIGEST_LENGTH];
+ int fd;
+ int i;
+ unsigned char buf[BUFSIZE];
- fd=fileno(f);
- SHA1_Init(&c);
- for (;;)
- {
- i=read(fd,buf,BUFSIZE);
- if (i <= 0) break;
- SHA1_Update(&c,buf,(unsigned long)i);
- }
- SHA1_Final(&(md[0]),&c);
- pt(md);
- }
+ fd = fileno(f);
+ SHA1_Init(&c);
+ for (;;) {
+ i = read(fd, buf, BUFSIZE);
+ if (i <= 0)
+ break;
+ SHA1_Update(&c, buf, (unsigned long)i);
+ }
+ SHA1_Final(&(md[0]), &c);
+ pt(md);
+}
void pt(unsigned char *md)
- {
- int i;
-
- for (i=0; i<SHA_DIGEST_LENGTH; i++)
- printf("%02x",md[i]);
- printf("\n");
- }
+{
+ int i;
+ for (i = 0; i < SHA_DIGEST_LENGTH; i++)
+ printf("%02x", md[i]);
+ printf("\n");
+}
diff --git a/openssl/crypto/sha/sha1_one.c b/openssl/crypto/sha/sha1_one.c
index c56ec9402..a6dd760a1 100644
--- a/openssl/crypto/sha/sha1_one.c
+++ b/openssl/crypto/sha/sha1_one.c
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -63,16 +63,17 @@
#ifndef OPENSSL_NO_SHA1
unsigned char *SHA1(const unsigned char *d, size_t n, unsigned char *md)
- {
- SHA_CTX c;
- static unsigned char m[SHA_DIGEST_LENGTH];
+{
+ SHA_CTX c;
+ static unsigned char m[SHA_DIGEST_LENGTH];
- if (md == NULL) md=m;
- if (!SHA1_Init(&c))
- return NULL;
- SHA1_Update(&c,d,n);
- SHA1_Final(md,&c);
- OPENSSL_cleanse(&c,sizeof(c));
- return(md);
- }
+ if (md == NULL)
+ md = m;
+ if (!SHA1_Init(&c))
+ return NULL;
+ SHA1_Update(&c, d, n);
+ SHA1_Final(md, &c);
+ OPENSSL_cleanse(&c, sizeof(c));
+ return (md);
+}
#endif
diff --git a/openssl/crypto/sha/sha1dgst.c b/openssl/crypto/sha/sha1dgst.c
index a98690225..a67f1fe36 100644
--- a/openssl/crypto/sha/sha1dgst.c
+++ b/openssl/crypto/sha/sha1dgst.c
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -60,16 +60,15 @@
#include <openssl/opensslconf.h>
#if !defined(OPENSSL_NO_SHA1) && !defined(OPENSSL_NO_SHA)
-#undef SHA_0
-#define SHA_1
+# undef SHA_0
+# define SHA_1
-#include <openssl/opensslv.h>
+# include <openssl/opensslv.h>
-const char SHA1_version[]="SHA1" OPENSSL_VERSION_PTEXT;
+const char SHA1_version[] = "SHA1" OPENSSL_VERSION_PTEXT;
/* The implementation is in ../md32_common.h */
-#include "sha_locl.h"
+# include "sha_locl.h"
#endif
-
diff --git a/openssl/crypto/sha/sha1test.c b/openssl/crypto/sha/sha1test.c
index 6feb3964c..0052a95c7 100644
--- a/openssl/crypto/sha/sha1test.c
+++ b/openssl/crypto/sha/sha1test.c
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -66,113 +66,109 @@
int main(int argc, char *argv[])
{
printf("No SHA support\n");
- return(0);
+ return (0);
}
#else
-#include <openssl/evp.h>
-#include <openssl/sha.h>
+# include <openssl/evp.h>
+# include <openssl/sha.h>
-#ifdef CHARSET_EBCDIC
-#include <openssl/ebcdic.h>
-#endif
+# ifdef CHARSET_EBCDIC
+# include <openssl/ebcdic.h>
+# endif
-#undef SHA_0 /* FIPS 180 */
-#define SHA_1 /* FIPS 180-1 */
-
-static char *test[]={
- "abc",
- "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
- NULL,
- };
-
-#ifdef SHA_0
-static char *ret[]={
- "0164b8a914cd2a5e74c4f7ff082c4d97f1edf880",
- "d2516ee1acfa5baf33dfc1c471e438449ef134c8",
- };
-static char *bigret=
- "3232affa48628a26653b5aaa44541fd90d690603";
-#endif
-#ifdef SHA_1
-static char *ret[]={
- "a9993e364706816aba3e25717850c26c9cd0d89d",
- "84983e441c3bd26ebaae4aa1f95129e5e54670f1",
- };
-static char *bigret=
- "34aa973cd4c4daa4f61eeb2bdbad27316534016f";
-#endif
+# undef SHA_0 /* FIPS 180 */
+# define SHA_1 /* FIPS 180-1 */
+
+static char *test[] = {
+ "abc",
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
+ NULL,
+};
+
+# ifdef SHA_0
+static char *ret[] = {
+ "0164b8a914cd2a5e74c4f7ff082c4d97f1edf880",
+ "d2516ee1acfa5baf33dfc1c471e438449ef134c8",
+};
+
+static char *bigret = "3232affa48628a26653b5aaa44541fd90d690603";
+# endif
+# ifdef SHA_1
+static char *ret[] = {
+ "a9993e364706816aba3e25717850c26c9cd0d89d",
+ "84983e441c3bd26ebaae4aa1f95129e5e54670f1",
+};
+
+static char *bigret = "34aa973cd4c4daa4f61eeb2bdbad27316534016f";
+# endif
static char *pt(unsigned char *md);
int main(int argc, char *argv[])
- {
- int i,err=0;
- char **P,**R;
- static unsigned char buf[1000];
- char *p,*r;
- EVP_MD_CTX c;
- unsigned char md[SHA_DIGEST_LENGTH];
-
-#ifdef CHARSET_EBCDIC
- ebcdic2ascii(test[0], test[0], strlen(test[0]));
- ebcdic2ascii(test[1], test[1], strlen(test[1]));
-#endif
+{
+ int i, err = 0;
+ char **P, **R;
+ static unsigned char buf[1000];
+ char *p, *r;
+ EVP_MD_CTX c;
+ unsigned char md[SHA_DIGEST_LENGTH];
- EVP_MD_CTX_init(&c);
- P=test;
- R=ret;
- i=1;
- while (*P != NULL)
- {
- EVP_Digest(*P,strlen((char *)*P),md,NULL,EVP_sha1(), NULL);
- p=pt(md);
- if (strcmp(p,(char *)*R) != 0)
- {
- printf("error calculating SHA1 on '%s'\n",*P);
- printf("got %s instead of %s\n",p,*R);
- err++;
- }
- else
- printf("test %d ok\n",i);
- i++;
- R++;
- P++;
- }
-
- memset(buf,'a',1000);
-#ifdef CHARSET_EBCDIC
- ebcdic2ascii(buf, buf, 1000);
-#endif /*CHARSET_EBCDIC*/
- EVP_DigestInit_ex(&c,EVP_sha1(), NULL);
- for (i=0; i<1000; i++)
- EVP_DigestUpdate(&c,buf,1000);
- EVP_DigestFinal_ex(&c,md,NULL);
- p=pt(md);
-
- r=bigret;
- if (strcmp(p,r) != 0)
- {
- printf("error calculating SHA1 on 'a' * 1000\n");
- printf("got %s instead of %s\n",p,r);
- err++;
- }
- else
- printf("test 3 ok\n");
-
-#ifdef OPENSSL_SYS_NETWARE
- if (err) printf("ERROR: %d\n", err);
-#endif
- EXIT(err);
- EVP_MD_CTX_cleanup(&c);
- return(0);
- }
+# ifdef CHARSET_EBCDIC
+ ebcdic2ascii(test[0], test[0], strlen(test[0]));
+ ebcdic2ascii(test[1], test[1], strlen(test[1]));
+# endif
+
+ EVP_MD_CTX_init(&c);
+ P = test;
+ R = ret;
+ i = 1;
+ while (*P != NULL) {
+ EVP_Digest(*P, strlen((char *)*P), md, NULL, EVP_sha1(), NULL);
+ p = pt(md);
+ if (strcmp(p, (char *)*R) != 0) {
+ printf("error calculating SHA1 on '%s'\n", *P);
+ printf("got %s instead of %s\n", p, *R);
+ err++;
+ } else
+ printf("test %d ok\n", i);
+ i++;
+ R++;
+ P++;
+ }
+
+ memset(buf, 'a', 1000);
+# ifdef CHARSET_EBCDIC
+ ebcdic2ascii(buf, buf, 1000);
+# endif /* CHARSET_EBCDIC */
+ EVP_DigestInit_ex(&c, EVP_sha1(), NULL);
+ for (i = 0; i < 1000; i++)
+ EVP_DigestUpdate(&c, buf, 1000);
+ EVP_DigestFinal_ex(&c, md, NULL);
+ p = pt(md);
+
+ r = bigret;
+ if (strcmp(p, r) != 0) {
+ printf("error calculating SHA1 on 'a' * 1000\n");
+ printf("got %s instead of %s\n", p, r);
+ err++;
+ } else
+ printf("test 3 ok\n");
+
+# ifdef OPENSSL_SYS_NETWARE
+ if (err)
+ printf("ERROR: %d\n", err);
+# endif
+ EXIT(err);
+ EVP_MD_CTX_cleanup(&c);
+ return (0);
+}
static char *pt(unsigned char *md)
- {
- int i;
- static char buf[80];
-
- for (i=0; i<SHA_DIGEST_LENGTH; i++)
- sprintf(&(buf[i*2]),"%02x",md[i]);
- return(buf);
- }
+{
+ int i;
+ static char buf[80];
+
+ for (i = 0; i < SHA_DIGEST_LENGTH; i++)
+ sprintf(&(buf[i * 2]), "%02x", md[i]);
+ return (buf);
+}
#endif
diff --git a/openssl/crypto/sha/sha256.c b/openssl/crypto/sha/sha256.c
index 4eae07484..72a115936 100644
--- a/openssl/crypto/sha/sha256.c
+++ b/openssl/crypto/sha/sha256.c
@@ -7,73 +7,88 @@
#include <openssl/opensslconf.h>
#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA256)
-#include <stdlib.h>
-#include <string.h>
+# include <stdlib.h>
+# include <string.h>
-#include <openssl/crypto.h>
-#include <openssl/sha.h>
-#include <openssl/opensslv.h>
+# include <openssl/crypto.h>
+# include <openssl/sha.h>
+# include <openssl/opensslv.h>
-const char SHA256_version[]="SHA-256" OPENSSL_VERSION_PTEXT;
+const char SHA256_version[] = "SHA-256" OPENSSL_VERSION_PTEXT;
fips_md_init_ctx(SHA224, SHA256)
- {
- memset (c,0,sizeof(*c));
- c->h[0]=0xc1059ed8UL; c->h[1]=0x367cd507UL;
- c->h[2]=0x3070dd17UL; c->h[3]=0xf70e5939UL;
- c->h[4]=0xffc00b31UL; c->h[5]=0x68581511UL;
- c->h[6]=0x64f98fa7UL; c->h[7]=0xbefa4fa4UL;
- c->md_len=SHA224_DIGEST_LENGTH;
- return 1;
- }
+{
+ memset(c, 0, sizeof(*c));
+ c->h[0] = 0xc1059ed8UL;
+ c->h[1] = 0x367cd507UL;
+ c->h[2] = 0x3070dd17UL;
+ c->h[3] = 0xf70e5939UL;
+ c->h[4] = 0xffc00b31UL;
+ c->h[5] = 0x68581511UL;
+ c->h[6] = 0x64f98fa7UL;
+ c->h[7] = 0xbefa4fa4UL;
+ c->md_len = SHA224_DIGEST_LENGTH;
+ return 1;
+}
fips_md_init(SHA256)
- {
- memset (c,0,sizeof(*c));
- c->h[0]=0x6a09e667UL; c->h[1]=0xbb67ae85UL;
- c->h[2]=0x3c6ef372UL; c->h[3]=0xa54ff53aUL;
- c->h[4]=0x510e527fUL; c->h[5]=0x9b05688cUL;
- c->h[6]=0x1f83d9abUL; c->h[7]=0x5be0cd19UL;
- c->md_len=SHA256_DIGEST_LENGTH;
- return 1;
- }
+{
+ memset(c, 0, sizeof(*c));
+ c->h[0] = 0x6a09e667UL;
+ c->h[1] = 0xbb67ae85UL;
+ c->h[2] = 0x3c6ef372UL;
+ c->h[3] = 0xa54ff53aUL;
+ c->h[4] = 0x510e527fUL;
+ c->h[5] = 0x9b05688cUL;
+ c->h[6] = 0x1f83d9abUL;
+ c->h[7] = 0x5be0cd19UL;
+ c->md_len = SHA256_DIGEST_LENGTH;
+ return 1;
+}
unsigned char *SHA224(const unsigned char *d, size_t n, unsigned char *md)
- {
- SHA256_CTX c;
- static unsigned char m[SHA224_DIGEST_LENGTH];
-
- if (md == NULL) md=m;
- SHA224_Init(&c);
- SHA256_Update(&c,d,n);
- SHA256_Final(md,&c);
- OPENSSL_cleanse(&c,sizeof(c));
- return(md);
- }
+{
+ SHA256_CTX c;
+ static unsigned char m[SHA224_DIGEST_LENGTH];
+
+ if (md == NULL)
+ md = m;
+ SHA224_Init(&c);
+ SHA256_Update(&c, d, n);
+ SHA256_Final(md, &c);
+ OPENSSL_cleanse(&c, sizeof(c));
+ return (md);
+}
unsigned char *SHA256(const unsigned char *d, size_t n, unsigned char *md)
- {
- SHA256_CTX c;
- static unsigned char m[SHA256_DIGEST_LENGTH];
-
- if (md == NULL) md=m;
- SHA256_Init(&c);
- SHA256_Update(&c,d,n);
- SHA256_Final(md,&c);
- OPENSSL_cleanse(&c,sizeof(c));
- return(md);
- }
+{
+ SHA256_CTX c;
+ static unsigned char m[SHA256_DIGEST_LENGTH];
+
+ if (md == NULL)
+ md = m;
+ SHA256_Init(&c);
+ SHA256_Update(&c, d, n);
+ SHA256_Final(md, &c);
+ OPENSSL_cleanse(&c, sizeof(c));
+ return (md);
+}
int SHA224_Update(SHA256_CTX *c, const void *data, size_t len)
-{ return SHA256_Update (c,data,len); }
-int SHA224_Final (unsigned char *md, SHA256_CTX *c)
-{ return SHA256_Final (md,c); }
+{
+ return SHA256_Update(c, data, len);
+}
-#define DATA_ORDER_IS_BIG_ENDIAN
+int SHA224_Final(unsigned char *md, SHA256_CTX *c)
+{
+ return SHA256_Final(md, c);
+}
+
+# define DATA_ORDER_IS_BIG_ENDIAN
-#define HASH_LONG SHA_LONG
-#define HASH_CTX SHA256_CTX
-#define HASH_CBLOCK SHA_CBLOCK
+# define HASH_LONG SHA_LONG
+# define HASH_CTX SHA256_CTX
+# define HASH_CBLOCK SHA_CBLOCK
/*
* Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
* default: case below covers for it. It's not clear however if it's
@@ -82,201 +97,291 @@ int SHA224_Final (unsigned char *md, SHA256_CTX *c)
* Idea behind separate cases for pre-defined lenghts is to let the
* compiler decide if it's appropriate to unroll small loops.
*/
-#define HASH_MAKE_STRING(c,s) do { \
- unsigned long ll; \
- unsigned int nn; \
- switch ((c)->md_len) \
- { case SHA224_DIGEST_LENGTH: \
- for (nn=0;nn<SHA224_DIGEST_LENGTH/4;nn++) \
- { ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \
- break; \
- case SHA256_DIGEST_LENGTH: \
- for (nn=0;nn<SHA256_DIGEST_LENGTH/4;nn++) \
- { ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \
- break; \
- default: \
- if ((c)->md_len > SHA256_DIGEST_LENGTH) \
- return 0; \
- for (nn=0;nn<(c)->md_len/4;nn++) \
- { ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \
- break; \
- } \
- } while (0)
-
-#define HASH_UPDATE SHA256_Update
-#define HASH_TRANSFORM SHA256_Transform
-#define HASH_FINAL SHA256_Final
-#define HASH_BLOCK_DATA_ORDER sha256_block_data_order
-#ifndef SHA256_ASM
+# define HASH_MAKE_STRING(c,s) do { \
+ unsigned long ll; \
+ unsigned int nn; \
+ switch ((c)->md_len) \
+ { case SHA224_DIGEST_LENGTH: \
+ for (nn=0;nn<SHA224_DIGEST_LENGTH/4;nn++) \
+ { ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \
+ break; \
+ case SHA256_DIGEST_LENGTH: \
+ for (nn=0;nn<SHA256_DIGEST_LENGTH/4;nn++) \
+ { ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \
+ break; \
+ default: \
+ if ((c)->md_len > SHA256_DIGEST_LENGTH) \
+ return 0; \
+ for (nn=0;nn<(c)->md_len/4;nn++) \
+ { ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \
+ break; \
+ } \
+ } while (0)
+
+# define HASH_UPDATE SHA256_Update
+# define HASH_TRANSFORM SHA256_Transform
+# define HASH_FINAL SHA256_Final
+# define HASH_BLOCK_DATA_ORDER sha256_block_data_order
+# ifndef SHA256_ASM
static
-#endif
-void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num);
+# endif
+void sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num);
-#include "md32_common.h"
+# include "md32_common.h"
-#ifndef SHA256_ASM
+# ifndef SHA256_ASM
static const SHA_LONG K256[64] = {
- 0x428a2f98UL,0x71374491UL,0xb5c0fbcfUL,0xe9b5dba5UL,
- 0x3956c25bUL,0x59f111f1UL,0x923f82a4UL,0xab1c5ed5UL,
- 0xd807aa98UL,0x12835b01UL,0x243185beUL,0x550c7dc3UL,
- 0x72be5d74UL,0x80deb1feUL,0x9bdc06a7UL,0xc19bf174UL,
- 0xe49b69c1UL,0xefbe4786UL,0x0fc19dc6UL,0x240ca1ccUL,
- 0x2de92c6fUL,0x4a7484aaUL,0x5cb0a9dcUL,0x76f988daUL,
- 0x983e5152UL,0xa831c66dUL,0xb00327c8UL,0xbf597fc7UL,
- 0xc6e00bf3UL,0xd5a79147UL,0x06ca6351UL,0x14292967UL,
- 0x27b70a85UL,0x2e1b2138UL,0x4d2c6dfcUL,0x53380d13UL,
- 0x650a7354UL,0x766a0abbUL,0x81c2c92eUL,0x92722c85UL,
- 0xa2bfe8a1UL,0xa81a664bUL,0xc24b8b70UL,0xc76c51a3UL,
- 0xd192e819UL,0xd6990624UL,0xf40e3585UL,0x106aa070UL,
- 0x19a4c116UL,0x1e376c08UL,0x2748774cUL,0x34b0bcb5UL,
- 0x391c0cb3UL,0x4ed8aa4aUL,0x5b9cca4fUL,0x682e6ff3UL,
- 0x748f82eeUL,0x78a5636fUL,0x84c87814UL,0x8cc70208UL,
- 0x90befffaUL,0xa4506cebUL,0xbef9a3f7UL,0xc67178f2UL };
+ 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
+ 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
+ 0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
+ 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
+ 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+ 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
+ 0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
+ 0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
+ 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
+ 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+ 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
+ 0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
+ 0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
+ 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
+ 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+ 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
+};
/*
* FIPS specification refers to right rotations, while our ROTATE macro
* is left one. This is why you might notice that rotation coefficients
* differ from those observed in FIPS document by 32-N...
*/
-#define Sigma0(x) (ROTATE((x),30) ^ ROTATE((x),19) ^ ROTATE((x),10))
-#define Sigma1(x) (ROTATE((x),26) ^ ROTATE((x),21) ^ ROTATE((x),7))
-#define sigma0(x) (ROTATE((x),25) ^ ROTATE((x),14) ^ ((x)>>3))
-#define sigma1(x) (ROTATE((x),15) ^ ROTATE((x),13) ^ ((x)>>10))
-
-#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
-#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
-
-#ifdef OPENSSL_SMALL_FOOTPRINT
-
-static void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num)
- {
- unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1,T2;
- SHA_LONG X[16],l;
- int i;
- const unsigned char *data=in;
-
- while (num--) {
-
- a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3];
- e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7];
-
- for (i=0;i<16;i++)
- {
- HOST_c2l(data,l); T1 = X[i] = l;
- T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i];
- T2 = Sigma0(a) + Maj(a,b,c);
- h = g; g = f; f = e; e = d + T1;
- d = c; c = b; b = a; a = T1 + T2;
- }
-
- for (;i<64;i++)
- {
- s0 = X[(i+1)&0x0f]; s0 = sigma0(s0);
- s1 = X[(i+14)&0x0f]; s1 = sigma1(s1);
-
- T1 = X[i&0xf] += s0 + s1 + X[(i+9)&0xf];
- T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i];
- T2 = Sigma0(a) + Maj(a,b,c);
- h = g; g = f; f = e; e = d + T1;
- d = c; c = b; b = a; a = T1 + T2;
- }
-
- ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
- ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
-
- }
+# define Sigma0(x) (ROTATE((x),30) ^ ROTATE((x),19) ^ ROTATE((x),10))
+# define Sigma1(x) (ROTATE((x),26) ^ ROTATE((x),21) ^ ROTATE((x),7))
+# define sigma0(x) (ROTATE((x),25) ^ ROTATE((x),14) ^ ((x)>>3))
+# define sigma1(x) (ROTATE((x),15) ^ ROTATE((x),13) ^ ((x)>>10))
+
+# define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
+# define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+# ifdef OPENSSL_SMALL_FOOTPRINT
+
+static void sha256_block_data_order(SHA256_CTX *ctx, const void *in,
+ size_t num)
+{
+ unsigned MD32_REG_T a, b, c, d, e, f, g, h, s0, s1, T1, T2;
+ SHA_LONG X[16], l;
+ int i;
+ const unsigned char *data = in;
+
+ while (num--) {
+
+ a = ctx->h[0];
+ b = ctx->h[1];
+ c = ctx->h[2];
+ d = ctx->h[3];
+ e = ctx->h[4];
+ f = ctx->h[5];
+ g = ctx->h[6];
+ h = ctx->h[7];
+
+ for (i = 0; i < 16; i++) {
+ HOST_c2l(data, l);
+ T1 = X[i] = l;
+ T1 += h + Sigma1(e) + Ch(e, f, g) + K256[i];
+ T2 = Sigma0(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+ }
+
+ for (; i < 64; i++) {
+ s0 = X[(i + 1) & 0x0f];
+ s0 = sigma0(s0);
+ s1 = X[(i + 14) & 0x0f];
+ s1 = sigma1(s1);
+
+ T1 = X[i & 0xf] += s0 + s1 + X[(i + 9) & 0xf];
+ T1 += h + Sigma1(e) + Ch(e, f, g) + K256[i];
+ T2 = Sigma0(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+ }
+
+ ctx->h[0] += a;
+ ctx->h[1] += b;
+ ctx->h[2] += c;
+ ctx->h[3] += d;
+ ctx->h[4] += e;
+ ctx->h[5] += f;
+ ctx->h[6] += g;
+ ctx->h[7] += h;
+
+ }
}
-#else
-
-#define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \
- T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; \
- h = Sigma0(a) + Maj(a,b,c); \
- d += T1; h += T1; } while (0)
-
-#define ROUND_16_63(i,a,b,c,d,e,f,g,h,X) do { \
- s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); \
- s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); \
- T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \
- ROUND_00_15(i,a,b,c,d,e,f,g,h); } while (0)
-
-static void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num)
- {
- unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1;
- SHA_LONG X[16];
- int i;
- const unsigned char *data=in;
- const union { long one; char little; } is_endian = {1};
-
- while (num--) {
-
- a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3];
- e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7];
-
- if (!is_endian.little && sizeof(SHA_LONG)==4 && ((size_t)in%4)==0)
- {
- const SHA_LONG *W=(const SHA_LONG *)data;
-
- T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h);
- T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g);
- T1 = X[2] = W[2]; ROUND_00_15(2,g,h,a,b,c,d,e,f);
- T1 = X[3] = W[3]; ROUND_00_15(3,f,g,h,a,b,c,d,e);
- T1 = X[4] = W[4]; ROUND_00_15(4,e,f,g,h,a,b,c,d);
- T1 = X[5] = W[5]; ROUND_00_15(5,d,e,f,g,h,a,b,c);
- T1 = X[6] = W[6]; ROUND_00_15(6,c,d,e,f,g,h,a,b);
- T1 = X[7] = W[7]; ROUND_00_15(7,b,c,d,e,f,g,h,a);
- T1 = X[8] = W[8]; ROUND_00_15(8,a,b,c,d,e,f,g,h);
- T1 = X[9] = W[9]; ROUND_00_15(9,h,a,b,c,d,e,f,g);
- T1 = X[10] = W[10]; ROUND_00_15(10,g,h,a,b,c,d,e,f);
- T1 = X[11] = W[11]; ROUND_00_15(11,f,g,h,a,b,c,d,e);
- T1 = X[12] = W[12]; ROUND_00_15(12,e,f,g,h,a,b,c,d);
- T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c);
- T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b);
- T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a);
-
- data += SHA256_CBLOCK;
- }
- else
- {
- SHA_LONG l;
-
- HOST_c2l(data,l); T1 = X[0] = l; ROUND_00_15(0,a,b,c,d,e,f,g,h);
- HOST_c2l(data,l); T1 = X[1] = l; ROUND_00_15(1,h,a,b,c,d,e,f,g);
- HOST_c2l(data,l); T1 = X[2] = l; ROUND_00_15(2,g,h,a,b,c,d,e,f);
- HOST_c2l(data,l); T1 = X[3] = l; ROUND_00_15(3,f,g,h,a,b,c,d,e);
- HOST_c2l(data,l); T1 = X[4] = l; ROUND_00_15(4,e,f,g,h,a,b,c,d);
- HOST_c2l(data,l); T1 = X[5] = l; ROUND_00_15(5,d,e,f,g,h,a,b,c);
- HOST_c2l(data,l); T1 = X[6] = l; ROUND_00_15(6,c,d,e,f,g,h,a,b);
- HOST_c2l(data,l); T1 = X[7] = l; ROUND_00_15(7,b,c,d,e,f,g,h,a);
- HOST_c2l(data,l); T1 = X[8] = l; ROUND_00_15(8,a,b,c,d,e,f,g,h);
- HOST_c2l(data,l); T1 = X[9] = l; ROUND_00_15(9,h,a,b,c,d,e,f,g);
- HOST_c2l(data,l); T1 = X[10] = l; ROUND_00_15(10,g,h,a,b,c,d,e,f);
- HOST_c2l(data,l); T1 = X[11] = l; ROUND_00_15(11,f,g,h,a,b,c,d,e);
- HOST_c2l(data,l); T1 = X[12] = l; ROUND_00_15(12,e,f,g,h,a,b,c,d);
- HOST_c2l(data,l); T1 = X[13] = l; ROUND_00_15(13,d,e,f,g,h,a,b,c);
- HOST_c2l(data,l); T1 = X[14] = l; ROUND_00_15(14,c,d,e,f,g,h,a,b);
- HOST_c2l(data,l); T1 = X[15] = l; ROUND_00_15(15,b,c,d,e,f,g,h,a);
- }
-
- for (i=16;i<64;i+=8)
- {
- ROUND_16_63(i+0,a,b,c,d,e,f,g,h,X);
- ROUND_16_63(i+1,h,a,b,c,d,e,f,g,X);
- ROUND_16_63(i+2,g,h,a,b,c,d,e,f,X);
- ROUND_16_63(i+3,f,g,h,a,b,c,d,e,X);
- ROUND_16_63(i+4,e,f,g,h,a,b,c,d,X);
- ROUND_16_63(i+5,d,e,f,g,h,a,b,c,X);
- ROUND_16_63(i+6,c,d,e,f,g,h,a,b,X);
- ROUND_16_63(i+7,b,c,d,e,f,g,h,a,X);
- }
-
- ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
- ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
-
- }
- }
-
-#endif
-#endif /* SHA256_ASM */
-
-#endif /* OPENSSL_NO_SHA256 */
+# else
+
+# define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \
+ T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; \
+ h = Sigma0(a) + Maj(a,b,c); \
+ d += T1; h += T1; } while (0)
+
+# define ROUND_16_63(i,a,b,c,d,e,f,g,h,X) do { \
+ s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); \
+ s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); \
+ T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \
+ ROUND_00_15(i,a,b,c,d,e,f,g,h); } while (0)
+
+static void sha256_block_data_order(SHA256_CTX *ctx, const void *in,
+ size_t num)
+{
+ unsigned MD32_REG_T a, b, c, d, e, f, g, h, s0, s1, T1;
+ SHA_LONG X[16];
+ int i;
+ const unsigned char *data = in;
+ const union {
+ long one;
+ char little;
+ } is_endian = {
+ 1
+ };
+
+ while (num--) {
+
+ a = ctx->h[0];
+ b = ctx->h[1];
+ c = ctx->h[2];
+ d = ctx->h[3];
+ e = ctx->h[4];
+ f = ctx->h[5];
+ g = ctx->h[6];
+ h = ctx->h[7];
+
+ if (!is_endian.little && sizeof(SHA_LONG) == 4
+ && ((size_t)in % 4) == 0) {
+ const SHA_LONG *W = (const SHA_LONG *)data;
+
+ T1 = X[0] = W[0];
+ ROUND_00_15(0, a, b, c, d, e, f, g, h);
+ T1 = X[1] = W[1];
+ ROUND_00_15(1, h, a, b, c, d, e, f, g);
+ T1 = X[2] = W[2];
+ ROUND_00_15(2, g, h, a, b, c, d, e, f);
+ T1 = X[3] = W[3];
+ ROUND_00_15(3, f, g, h, a, b, c, d, e);
+ T1 = X[4] = W[4];
+ ROUND_00_15(4, e, f, g, h, a, b, c, d);
+ T1 = X[5] = W[5];
+ ROUND_00_15(5, d, e, f, g, h, a, b, c);
+ T1 = X[6] = W[6];
+ ROUND_00_15(6, c, d, e, f, g, h, a, b);
+ T1 = X[7] = W[7];
+ ROUND_00_15(7, b, c, d, e, f, g, h, a);
+ T1 = X[8] = W[8];
+ ROUND_00_15(8, a, b, c, d, e, f, g, h);
+ T1 = X[9] = W[9];
+ ROUND_00_15(9, h, a, b, c, d, e, f, g);
+ T1 = X[10] = W[10];
+ ROUND_00_15(10, g, h, a, b, c, d, e, f);
+ T1 = X[11] = W[11];
+ ROUND_00_15(11, f, g, h, a, b, c, d, e);
+ T1 = X[12] = W[12];
+ ROUND_00_15(12, e, f, g, h, a, b, c, d);
+ T1 = X[13] = W[13];
+ ROUND_00_15(13, d, e, f, g, h, a, b, c);
+ T1 = X[14] = W[14];
+ ROUND_00_15(14, c, d, e, f, g, h, a, b);
+ T1 = X[15] = W[15];
+ ROUND_00_15(15, b, c, d, e, f, g, h, a);
+
+ data += SHA256_CBLOCK;
+ } else {
+ SHA_LONG l;
+
+ HOST_c2l(data, l);
+ T1 = X[0] = l;
+ ROUND_00_15(0, a, b, c, d, e, f, g, h);
+ HOST_c2l(data, l);
+ T1 = X[1] = l;
+ ROUND_00_15(1, h, a, b, c, d, e, f, g);
+ HOST_c2l(data, l);
+ T1 = X[2] = l;
+ ROUND_00_15(2, g, h, a, b, c, d, e, f);
+ HOST_c2l(data, l);
+ T1 = X[3] = l;
+ ROUND_00_15(3, f, g, h, a, b, c, d, e);
+ HOST_c2l(data, l);
+ T1 = X[4] = l;
+ ROUND_00_15(4, e, f, g, h, a, b, c, d);
+ HOST_c2l(data, l);
+ T1 = X[5] = l;
+ ROUND_00_15(5, d, e, f, g, h, a, b, c);
+ HOST_c2l(data, l);
+ T1 = X[6] = l;
+ ROUND_00_15(6, c, d, e, f, g, h, a, b);
+ HOST_c2l(data, l);
+ T1 = X[7] = l;
+ ROUND_00_15(7, b, c, d, e, f, g, h, a);
+ HOST_c2l(data, l);
+ T1 = X[8] = l;
+ ROUND_00_15(8, a, b, c, d, e, f, g, h);
+ HOST_c2l(data, l);
+ T1 = X[9] = l;
+ ROUND_00_15(9, h, a, b, c, d, e, f, g);
+ HOST_c2l(data, l);
+ T1 = X[10] = l;
+ ROUND_00_15(10, g, h, a, b, c, d, e, f);
+ HOST_c2l(data, l);
+ T1 = X[11] = l;
+ ROUND_00_15(11, f, g, h, a, b, c, d, e);
+ HOST_c2l(data, l);
+ T1 = X[12] = l;
+ ROUND_00_15(12, e, f, g, h, a, b, c, d);
+ HOST_c2l(data, l);
+ T1 = X[13] = l;
+ ROUND_00_15(13, d, e, f, g, h, a, b, c);
+ HOST_c2l(data, l);
+ T1 = X[14] = l;
+ ROUND_00_15(14, c, d, e, f, g, h, a, b);
+ HOST_c2l(data, l);
+ T1 = X[15] = l;
+ ROUND_00_15(15, b, c, d, e, f, g, h, a);
+ }
+
+ for (i = 16; i < 64; i += 8) {
+ ROUND_16_63(i + 0, a, b, c, d, e, f, g, h, X);
+ ROUND_16_63(i + 1, h, a, b, c, d, e, f, g, X);
+ ROUND_16_63(i + 2, g, h, a, b, c, d, e, f, X);
+ ROUND_16_63(i + 3, f, g, h, a, b, c, d, e, X);
+ ROUND_16_63(i + 4, e, f, g, h, a, b, c, d, X);
+ ROUND_16_63(i + 5, d, e, f, g, h, a, b, c, X);
+ ROUND_16_63(i + 6, c, d, e, f, g, h, a, b, X);
+ ROUND_16_63(i + 7, b, c, d, e, f, g, h, a, X);
+ }
+
+ ctx->h[0] += a;
+ ctx->h[1] += b;
+ ctx->h[2] += c;
+ ctx->h[3] += d;
+ ctx->h[4] += e;
+ ctx->h[5] += f;
+ ctx->h[6] += g;
+ ctx->h[7] += h;
+
+ }
+}
+
+# endif
+# endif /* SHA256_ASM */
+
+#endif /* OPENSSL_NO_SHA256 */
diff --git a/openssl/crypto/sha/sha256t.c b/openssl/crypto/sha/sha256t.c
index 6b4a3bd00..35dbbc2a9 100644
--- a/openssl/crypto/sha/sha256t.c
+++ b/openssl/crypto/sha/sha256t.c
@@ -14,134 +14,145 @@
int main(int argc, char *argv[])
{
printf("No SHA256 support\n");
- return(0);
+ return (0);
}
#else
unsigned char app_b1[SHA256_DIGEST_LENGTH] = {
- 0xba,0x78,0x16,0xbf,0x8f,0x01,0xcf,0xea,
- 0x41,0x41,0x40,0xde,0x5d,0xae,0x22,0x23,
- 0xb0,0x03,0x61,0xa3,0x96,0x17,0x7a,0x9c,
- 0xb4,0x10,0xff,0x61,0xf2,0x00,0x15,0xad };
+ 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
+ 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
+ 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
+ 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad
+};
unsigned char app_b2[SHA256_DIGEST_LENGTH] = {
- 0x24,0x8d,0x6a,0x61,0xd2,0x06,0x38,0xb8,
- 0xe5,0xc0,0x26,0x93,0x0c,0x3e,0x60,0x39,
- 0xa3,0x3c,0xe4,0x59,0x64,0xff,0x21,0x67,
- 0xf6,0xec,0xed,0xd4,0x19,0xdb,0x06,0xc1 };
+ 0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8,
+ 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39,
+ 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67,
+ 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1
+};
unsigned char app_b3[SHA256_DIGEST_LENGTH] = {
- 0xcd,0xc7,0x6e,0x5c,0x99,0x14,0xfb,0x92,
- 0x81,0xa1,0xc7,0xe2,0x84,0xd7,0x3e,0x67,
- 0xf1,0x80,0x9a,0x48,0xa4,0x97,0x20,0x0e,
- 0x04,0x6d,0x39,0xcc,0xc7,0x11,0x2c,0xd0 };
+ 0xcd, 0xc7, 0x6e, 0x5c, 0x99, 0x14, 0xfb, 0x92,
+ 0x81, 0xa1, 0xc7, 0xe2, 0x84, 0xd7, 0x3e, 0x67,
+ 0xf1, 0x80, 0x9a, 0x48, 0xa4, 0x97, 0x20, 0x0e,
+ 0x04, 0x6d, 0x39, 0xcc, 0xc7, 0x11, 0x2c, 0xd0
+};
unsigned char addenum_1[SHA224_DIGEST_LENGTH] = {
- 0x23,0x09,0x7d,0x22,0x34,0x05,0xd8,0x22,
- 0x86,0x42,0xa4,0x77,0xbd,0xa2,0x55,0xb3,
- 0x2a,0xad,0xbc,0xe4,0xbd,0xa0,0xb3,0xf7,
- 0xe3,0x6c,0x9d,0xa7 };
+ 0x23, 0x09, 0x7d, 0x22, 0x34, 0x05, 0xd8, 0x22,
+ 0x86, 0x42, 0xa4, 0x77, 0xbd, 0xa2, 0x55, 0xb3,
+ 0x2a, 0xad, 0xbc, 0xe4, 0xbd, 0xa0, 0xb3, 0xf7,
+ 0xe3, 0x6c, 0x9d, 0xa7
+};
unsigned char addenum_2[SHA224_DIGEST_LENGTH] = {
- 0x75,0x38,0x8b,0x16,0x51,0x27,0x76,0xcc,
- 0x5d,0xba,0x5d,0xa1,0xfd,0x89,0x01,0x50,
- 0xb0,0xc6,0x45,0x5c,0xb4,0xf5,0x8b,0x19,
- 0x52,0x52,0x25,0x25 };
+ 0x75, 0x38, 0x8b, 0x16, 0x51, 0x27, 0x76, 0xcc,
+ 0x5d, 0xba, 0x5d, 0xa1, 0xfd, 0x89, 0x01, 0x50,
+ 0xb0, 0xc6, 0x45, 0x5c, 0xb4, 0xf5, 0x8b, 0x19,
+ 0x52, 0x52, 0x25, 0x25
+};
unsigned char addenum_3[SHA224_DIGEST_LENGTH] = {
- 0x20,0x79,0x46,0x55,0x98,0x0c,0x91,0xd8,
- 0xbb,0xb4,0xc1,0xea,0x97,0x61,0x8a,0x4b,
- 0xf0,0x3f,0x42,0x58,0x19,0x48,0xb2,0xee,
- 0x4e,0xe7,0xad,0x67 };
-
-int main (int argc,char **argv)
-{ unsigned char md[SHA256_DIGEST_LENGTH];
- int i;
- EVP_MD_CTX evp;
-
- fprintf(stdout,"Testing SHA-256 ");
-
- EVP_Digest ("abc",3,md,NULL,EVP_sha256(),NULL);
- if (memcmp(md,app_b1,sizeof(app_b1)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 1 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- EVP_Digest ("abcdbcde""cdefdefg""efghfghi""ghijhijk"
- "ijkljklm""klmnlmno""mnopnopq",56,md,NULL,EVP_sha256(),NULL);
- if (memcmp(md,app_b2,sizeof(app_b2)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 2 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- EVP_MD_CTX_init (&evp);
- EVP_DigestInit_ex (&evp,EVP_sha256(),NULL);
- for (i=0;i<1000000;i+=160)
- EVP_DigestUpdate (&evp, "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa",
- (1000000-i)<160?1000000-i:160);
- EVP_DigestFinal_ex (&evp,md,NULL);
- EVP_MD_CTX_cleanup (&evp);
-
- if (memcmp(md,app_b3,sizeof(app_b3)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 3 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- fprintf(stdout," passed.\n"); fflush(stdout);
-
- fprintf(stdout,"Testing SHA-224 ");
-
- EVP_Digest ("abc",3,md,NULL,EVP_sha224(),NULL);
- if (memcmp(md,addenum_1,sizeof(addenum_1)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 1 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- EVP_Digest ("abcdbcde""cdefdefg""efghfghi""ghijhijk"
- "ijkljklm""klmnlmno""mnopnopq",56,md,NULL,EVP_sha224(),NULL);
- if (memcmp(md,addenum_2,sizeof(addenum_2)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 2 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- EVP_MD_CTX_init (&evp);
- EVP_DigestInit_ex (&evp,EVP_sha224(),NULL);
- for (i=0;i<1000000;i+=64)
- EVP_DigestUpdate (&evp, "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa",
- (1000000-i)<64?1000000-i:64);
- EVP_DigestFinal_ex (&evp,md,NULL);
- EVP_MD_CTX_cleanup (&evp);
-
- if (memcmp(md,addenum_3,sizeof(addenum_3)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 3 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- fprintf(stdout," passed.\n"); fflush(stdout);
-
- return 0;
+ 0x20, 0x79, 0x46, 0x55, 0x98, 0x0c, 0x91, 0xd8,
+ 0xbb, 0xb4, 0xc1, 0xea, 0x97, 0x61, 0x8a, 0x4b,
+ 0xf0, 0x3f, 0x42, 0x58, 0x19, 0x48, 0xb2, 0xee,
+ 0x4e, 0xe7, 0xad, 0x67
+};
+
+int main(int argc, char **argv)
+{
+ unsigned char md[SHA256_DIGEST_LENGTH];
+ int i;
+ EVP_MD_CTX evp;
+
+ fprintf(stdout, "Testing SHA-256 ");
+
+ EVP_Digest("abc", 3, md, NULL, EVP_sha256(), NULL);
+ if (memcmp(md, app_b1, sizeof(app_b1))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 1 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ EVP_Digest("abcdbcde" "cdefdefg" "efghfghi" "ghijhijk"
+ "ijkljklm" "klmnlmno" "mnopnopq", 56, md, NULL, EVP_sha256(),
+ NULL);
+ if (memcmp(md, app_b2, sizeof(app_b2))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 2 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ EVP_MD_CTX_init(&evp);
+ EVP_DigestInit_ex(&evp, EVP_sha256(), NULL);
+ for (i = 0; i < 1000000; i += 160)
+ EVP_DigestUpdate(&evp, "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa",
+ (1000000 - i) < 160 ? 1000000 - i : 160);
+ EVP_DigestFinal_ex(&evp, md, NULL);
+ EVP_MD_CTX_cleanup(&evp);
+
+ if (memcmp(md, app_b3, sizeof(app_b3))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 3 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ fprintf(stdout, " passed.\n");
+ fflush(stdout);
+
+ fprintf(stdout, "Testing SHA-224 ");
+
+ EVP_Digest("abc", 3, md, NULL, EVP_sha224(), NULL);
+ if (memcmp(md, addenum_1, sizeof(addenum_1))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 1 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ EVP_Digest("abcdbcde" "cdefdefg" "efghfghi" "ghijhijk"
+ "ijkljklm" "klmnlmno" "mnopnopq", 56, md, NULL, EVP_sha224(),
+ NULL);
+ if (memcmp(md, addenum_2, sizeof(addenum_2))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 2 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ EVP_MD_CTX_init(&evp);
+ EVP_DigestInit_ex(&evp, EVP_sha224(), NULL);
+ for (i = 0; i < 1000000; i += 64)
+ EVP_DigestUpdate(&evp, "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa",
+ (1000000 - i) < 64 ? 1000000 - i : 64);
+ EVP_DigestFinal_ex(&evp, md, NULL);
+ EVP_MD_CTX_cleanup(&evp);
+
+ if (memcmp(md, addenum_3, sizeof(addenum_3))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 3 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ fprintf(stdout, " passed.\n");
+ fflush(stdout);
+
+ return 0;
}
#endif
diff --git a/openssl/crypto/sha/sha512.c b/openssl/crypto/sha/sha512.c
index 50c229dde..3bf66ae19 100644
--- a/openssl/crypto/sha/sha512.c
+++ b/openssl/crypto/sha/sha512.c
@@ -6,7 +6,7 @@
*/
#include <openssl/opensslconf.h>
#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA512)
-/*
+/*-
* IMPLEMENTATION NOTES.
*
* As you might have noticed 32-bit hash algorithms:
@@ -39,566 +39,646 @@
* As this implementation relies on 64-bit integer type, it's totally
* inappropriate for platforms which don't support it, most notably
* 16-bit platforms.
- * <appro@fy.chalmers.se>
+ * <appro@fy.chalmers.se>
*/
-#include <stdlib.h>
-#include <string.h>
+# include <stdlib.h>
+# include <string.h>
-#include <openssl/crypto.h>
-#include <openssl/sha.h>
-#include <openssl/opensslv.h>
+# include <openssl/crypto.h>
+# include <openssl/sha.h>
+# include <openssl/opensslv.h>
-#include "cryptlib.h"
+# include "cryptlib.h"
-const char SHA512_version[]="SHA-512" OPENSSL_VERSION_PTEXT;
+const char SHA512_version[] = "SHA-512" OPENSSL_VERSION_PTEXT;
-#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
+# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) || \
defined(__s390__) || defined(__s390x__) || \
+ defined(__aarch64__) || \
defined(SHA512_ASM)
-#define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
-#endif
+# define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+# endif
fips_md_init_ctx(SHA384, SHA512)
- {
- c->h[0]=U64(0xcbbb9d5dc1059ed8);
- c->h[1]=U64(0x629a292a367cd507);
- c->h[2]=U64(0x9159015a3070dd17);
- c->h[3]=U64(0x152fecd8f70e5939);
- c->h[4]=U64(0x67332667ffc00b31);
- c->h[5]=U64(0x8eb44a8768581511);
- c->h[6]=U64(0xdb0c2e0d64f98fa7);
- c->h[7]=U64(0x47b5481dbefa4fa4);
-
- c->Nl=0; c->Nh=0;
- c->num=0; c->md_len=SHA384_DIGEST_LENGTH;
- return 1;
- }
+{
+ c->h[0] = U64(0xcbbb9d5dc1059ed8);
+ c->h[1] = U64(0x629a292a367cd507);
+ c->h[2] = U64(0x9159015a3070dd17);
+ c->h[3] = U64(0x152fecd8f70e5939);
+ c->h[4] = U64(0x67332667ffc00b31);
+ c->h[5] = U64(0x8eb44a8768581511);
+ c->h[6] = U64(0xdb0c2e0d64f98fa7);
+ c->h[7] = U64(0x47b5481dbefa4fa4);
+
+ c->Nl = 0;
+ c->Nh = 0;
+ c->num = 0;
+ c->md_len = SHA384_DIGEST_LENGTH;
+ return 1;
+}
fips_md_init(SHA512)
- {
- c->h[0]=U64(0x6a09e667f3bcc908);
- c->h[1]=U64(0xbb67ae8584caa73b);
- c->h[2]=U64(0x3c6ef372fe94f82b);
- c->h[3]=U64(0xa54ff53a5f1d36f1);
- c->h[4]=U64(0x510e527fade682d1);
- c->h[5]=U64(0x9b05688c2b3e6c1f);
- c->h[6]=U64(0x1f83d9abfb41bd6b);
- c->h[7]=U64(0x5be0cd19137e2179);
-
- c->Nl=0; c->Nh=0;
- c->num=0; c->md_len=SHA512_DIGEST_LENGTH;
+{
+ c->h[0] = U64(0x6a09e667f3bcc908);
+ c->h[1] = U64(0xbb67ae8584caa73b);
+ c->h[2] = U64(0x3c6ef372fe94f82b);
+ c->h[3] = U64(0xa54ff53a5f1d36f1);
+ c->h[4] = U64(0x510e527fade682d1);
+ c->h[5] = U64(0x9b05688c2b3e6c1f);
+ c->h[6] = U64(0x1f83d9abfb41bd6b);
+ c->h[7] = U64(0x5be0cd19137e2179);
+
+ c->Nl = 0;
+ c->Nh = 0;
+ c->num = 0;
+ c->md_len = SHA512_DIGEST_LENGTH;
+ return 1;
+}
+
+# ifndef SHA512_ASM
+static
+# endif
+void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num);
+
+int SHA512_Final(unsigned char *md, SHA512_CTX *c)
+{
+ unsigned char *p = (unsigned char *)c->u.p;
+ size_t n = c->num;
+
+ p[n] = 0x80; /* There always is a room for one */
+ n++;
+ if (n > (sizeof(c->u) - 16))
+ memset(p + n, 0, sizeof(c->u) - n), n = 0,
+ sha512_block_data_order(c, p, 1);
+
+ memset(p + n, 0, sizeof(c->u) - 16 - n);
+# ifdef B_ENDIAN
+ c->u.d[SHA_LBLOCK - 2] = c->Nh;
+ c->u.d[SHA_LBLOCK - 1] = c->Nl;
+# else
+ p[sizeof(c->u) - 1] = (unsigned char)(c->Nl);
+ p[sizeof(c->u) - 2] = (unsigned char)(c->Nl >> 8);
+ p[sizeof(c->u) - 3] = (unsigned char)(c->Nl >> 16);
+ p[sizeof(c->u) - 4] = (unsigned char)(c->Nl >> 24);
+ p[sizeof(c->u) - 5] = (unsigned char)(c->Nl >> 32);
+ p[sizeof(c->u) - 6] = (unsigned char)(c->Nl >> 40);
+ p[sizeof(c->u) - 7] = (unsigned char)(c->Nl >> 48);
+ p[sizeof(c->u) - 8] = (unsigned char)(c->Nl >> 56);
+ p[sizeof(c->u) - 9] = (unsigned char)(c->Nh);
+ p[sizeof(c->u) - 10] = (unsigned char)(c->Nh >> 8);
+ p[sizeof(c->u) - 11] = (unsigned char)(c->Nh >> 16);
+ p[sizeof(c->u) - 12] = (unsigned char)(c->Nh >> 24);
+ p[sizeof(c->u) - 13] = (unsigned char)(c->Nh >> 32);
+ p[sizeof(c->u) - 14] = (unsigned char)(c->Nh >> 40);
+ p[sizeof(c->u) - 15] = (unsigned char)(c->Nh >> 48);
+ p[sizeof(c->u) - 16] = (unsigned char)(c->Nh >> 56);
+# endif
+
+ sha512_block_data_order(c, p, 1);
+
+ if (md == 0)
+ return 0;
+
+ switch (c->md_len) {
+ /* Let compiler decide if it's appropriate to unroll... */
+ case SHA384_DIGEST_LENGTH:
+ for (n = 0; n < SHA384_DIGEST_LENGTH / 8; n++) {
+ SHA_LONG64 t = c->h[n];
+
+ *(md++) = (unsigned char)(t >> 56);
+ *(md++) = (unsigned char)(t >> 48);
+ *(md++) = (unsigned char)(t >> 40);
+ *(md++) = (unsigned char)(t >> 32);
+ *(md++) = (unsigned char)(t >> 24);
+ *(md++) = (unsigned char)(t >> 16);
+ *(md++) = (unsigned char)(t >> 8);
+ *(md++) = (unsigned char)(t);
+ }
+ break;
+ case SHA512_DIGEST_LENGTH:
+ for (n = 0; n < SHA512_DIGEST_LENGTH / 8; n++) {
+ SHA_LONG64 t = c->h[n];
+
+ *(md++) = (unsigned char)(t >> 56);
+ *(md++) = (unsigned char)(t >> 48);
+ *(md++) = (unsigned char)(t >> 40);
+ *(md++) = (unsigned char)(t >> 32);
+ *(md++) = (unsigned char)(t >> 24);
+ *(md++) = (unsigned char)(t >> 16);
+ *(md++) = (unsigned char)(t >> 8);
+ *(md++) = (unsigned char)(t);
+ }
+ break;
+ /* ... as well as make sure md_len is not abused. */
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+int SHA384_Final(unsigned char *md, SHA512_CTX *c)
+{
+ return SHA512_Final(md, c);
+}
+
+int SHA512_Update(SHA512_CTX *c, const void *_data, size_t len)
+{
+ SHA_LONG64 l;
+ unsigned char *p = c->u.p;
+ const unsigned char *data = (const unsigned char *)_data;
+
+ if (len == 0)
return 1;
- }
-#ifndef SHA512_ASM
-static
-#endif
-void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num);
-
-int SHA512_Final (unsigned char *md, SHA512_CTX *c)
- {
- unsigned char *p=(unsigned char *)c->u.p;
- size_t n=c->num;
-
- p[n]=0x80; /* There always is a room for one */
- n++;
- if (n > (sizeof(c->u)-16))
- memset (p+n,0,sizeof(c->u)-n), n=0,
- sha512_block_data_order (c,p,1);
-
- memset (p+n,0,sizeof(c->u)-16-n);
-#ifdef B_ENDIAN
- c->u.d[SHA_LBLOCK-2] = c->Nh;
- c->u.d[SHA_LBLOCK-1] = c->Nl;
-#else
- p[sizeof(c->u)-1] = (unsigned char)(c->Nl);
- p[sizeof(c->u)-2] = (unsigned char)(c->Nl>>8);
- p[sizeof(c->u)-3] = (unsigned char)(c->Nl>>16);
- p[sizeof(c->u)-4] = (unsigned char)(c->Nl>>24);
- p[sizeof(c->u)-5] = (unsigned char)(c->Nl>>32);
- p[sizeof(c->u)-6] = (unsigned char)(c->Nl>>40);
- p[sizeof(c->u)-7] = (unsigned char)(c->Nl>>48);
- p[sizeof(c->u)-8] = (unsigned char)(c->Nl>>56);
- p[sizeof(c->u)-9] = (unsigned char)(c->Nh);
- p[sizeof(c->u)-10] = (unsigned char)(c->Nh>>8);
- p[sizeof(c->u)-11] = (unsigned char)(c->Nh>>16);
- p[sizeof(c->u)-12] = (unsigned char)(c->Nh>>24);
- p[sizeof(c->u)-13] = (unsigned char)(c->Nh>>32);
- p[sizeof(c->u)-14] = (unsigned char)(c->Nh>>40);
- p[sizeof(c->u)-15] = (unsigned char)(c->Nh>>48);
- p[sizeof(c->u)-16] = (unsigned char)(c->Nh>>56);
-#endif
-
- sha512_block_data_order (c,p,1);
-
- if (md==0) return 0;
-
- switch (c->md_len)
- {
- /* Let compiler decide if it's appropriate to unroll... */
- case SHA384_DIGEST_LENGTH:
- for (n=0;n<SHA384_DIGEST_LENGTH/8;n++)
- {
- SHA_LONG64 t = c->h[n];
-
- *(md++) = (unsigned char)(t>>56);
- *(md++) = (unsigned char)(t>>48);
- *(md++) = (unsigned char)(t>>40);
- *(md++) = (unsigned char)(t>>32);
- *(md++) = (unsigned char)(t>>24);
- *(md++) = (unsigned char)(t>>16);
- *(md++) = (unsigned char)(t>>8);
- *(md++) = (unsigned char)(t);
- }
- break;
- case SHA512_DIGEST_LENGTH:
- for (n=0;n<SHA512_DIGEST_LENGTH/8;n++)
- {
- SHA_LONG64 t = c->h[n];
-
- *(md++) = (unsigned char)(t>>56);
- *(md++) = (unsigned char)(t>>48);
- *(md++) = (unsigned char)(t>>40);
- *(md++) = (unsigned char)(t>>32);
- *(md++) = (unsigned char)(t>>24);
- *(md++) = (unsigned char)(t>>16);
- *(md++) = (unsigned char)(t>>8);
- *(md++) = (unsigned char)(t);
- }
- break;
- /* ... as well as make sure md_len is not abused. */
- default: return 0;
- }
-
- return 1;
- }
-
-int SHA384_Final (unsigned char *md,SHA512_CTX *c)
-{ return SHA512_Final (md,c); }
-
-int SHA512_Update (SHA512_CTX *c, const void *_data, size_t len)
- {
- SHA_LONG64 l;
- unsigned char *p=c->u.p;
- const unsigned char *data=(const unsigned char *)_data;
-
- if (len==0) return 1;
-
- l = (c->Nl+(((SHA_LONG64)len)<<3))&U64(0xffffffffffffffff);
- if (l < c->Nl) c->Nh++;
- if (sizeof(len)>=8) c->Nh+=(((SHA_LONG64)len)>>61);
- c->Nl=l;
-
- if (c->num != 0)
- {
- size_t n = sizeof(c->u) - c->num;
-
- if (len < n)
- {
- memcpy (p+c->num,data,len), c->num += (unsigned int)len;
- return 1;
- }
- else {
- memcpy (p+c->num,data,n), c->num = 0;
- len-=n, data+=n;
- sha512_block_data_order (c,p,1);
- }
- }
-
- if (len >= sizeof(c->u))
- {
-#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
- if ((size_t)data%sizeof(c->u.d[0]) != 0)
- while (len >= sizeof(c->u))
- memcpy (p,data,sizeof(c->u)),
- sha512_block_data_order (c,p,1),
- len -= sizeof(c->u),
- data += sizeof(c->u);
- else
-#endif
- sha512_block_data_order (c,data,len/sizeof(c->u)),
- data += len,
- len %= sizeof(c->u),
- data -= len;
- }
-
- if (len != 0) memcpy (p,data,len), c->num = (int)len;
-
- return 1;
- }
-
-int SHA384_Update (SHA512_CTX *c, const void *data, size_t len)
-{ return SHA512_Update (c,data,len); }
-
-void SHA512_Transform (SHA512_CTX *c, const unsigned char *data)
- {
-#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
- if ((size_t)data%sizeof(c->u.d[0]) != 0)
- memcpy(c->u.p,data,sizeof(c->u.p)),
- data = c->u.p;
-#endif
- sha512_block_data_order (c,data,1);
- }
+ l = (c->Nl + (((SHA_LONG64) len) << 3)) & U64(0xffffffffffffffff);
+ if (l < c->Nl)
+ c->Nh++;
+ if (sizeof(len) >= 8)
+ c->Nh += (((SHA_LONG64) len) >> 61);
+ c->Nl = l;
+
+ if (c->num != 0) {
+ size_t n = sizeof(c->u) - c->num;
+
+ if (len < n) {
+ memcpy(p + c->num, data, len), c->num += (unsigned int)len;
+ return 1;
+ } else {
+ memcpy(p + c->num, data, n), c->num = 0;
+ len -= n, data += n;
+ sha512_block_data_order(c, p, 1);
+ }
+ }
+
+ if (len >= sizeof(c->u)) {
+# ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+ if ((size_t)data % sizeof(c->u.d[0]) != 0)
+ while (len >= sizeof(c->u))
+ memcpy(p, data, sizeof(c->u)),
+ sha512_block_data_order(c, p, 1),
+ len -= sizeof(c->u), data += sizeof(c->u);
+ else
+# endif
+ sha512_block_data_order(c, data, len / sizeof(c->u)),
+ data += len, len %= sizeof(c->u), data -= len;
+ }
+
+ if (len != 0)
+ memcpy(p, data, len), c->num = (int)len;
+
+ return 1;
+}
+
+int SHA384_Update(SHA512_CTX *c, const void *data, size_t len)
+{
+ return SHA512_Update(c, data, len);
+}
+
+void SHA512_Transform(SHA512_CTX *c, const unsigned char *data)
+{
+# ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+ if ((size_t)data % sizeof(c->u.d[0]) != 0)
+ memcpy(c->u.p, data, sizeof(c->u.p)), data = c->u.p;
+# endif
+ sha512_block_data_order(c, data, 1);
+}
unsigned char *SHA384(const unsigned char *d, size_t n, unsigned char *md)
- {
- SHA512_CTX c;
- static unsigned char m[SHA384_DIGEST_LENGTH];
-
- if (md == NULL) md=m;
- SHA384_Init(&c);
- SHA512_Update(&c,d,n);
- SHA512_Final(md,&c);
- OPENSSL_cleanse(&c,sizeof(c));
- return(md);
- }
+{
+ SHA512_CTX c;
+ static unsigned char m[SHA384_DIGEST_LENGTH];
+
+ if (md == NULL)
+ md = m;
+ SHA384_Init(&c);
+ SHA512_Update(&c, d, n);
+ SHA512_Final(md, &c);
+ OPENSSL_cleanse(&c, sizeof(c));
+ return (md);
+}
unsigned char *SHA512(const unsigned char *d, size_t n, unsigned char *md)
- {
- SHA512_CTX c;
- static unsigned char m[SHA512_DIGEST_LENGTH];
-
- if (md == NULL) md=m;
- SHA512_Init(&c);
- SHA512_Update(&c,d,n);
- SHA512_Final(md,&c);
- OPENSSL_cleanse(&c,sizeof(c));
- return(md);
- }
-
-#ifndef SHA512_ASM
+{
+ SHA512_CTX c;
+ static unsigned char m[SHA512_DIGEST_LENGTH];
+
+ if (md == NULL)
+ md = m;
+ SHA512_Init(&c);
+ SHA512_Update(&c, d, n);
+ SHA512_Final(md, &c);
+ OPENSSL_cleanse(&c, sizeof(c));
+ return (md);
+}
+
+# ifndef SHA512_ASM
static const SHA_LONG64 K512[80] = {
- U64(0x428a2f98d728ae22),U64(0x7137449123ef65cd),
- U64(0xb5c0fbcfec4d3b2f),U64(0xe9b5dba58189dbbc),
- U64(0x3956c25bf348b538),U64(0x59f111f1b605d019),
- U64(0x923f82a4af194f9b),U64(0xab1c5ed5da6d8118),
- U64(0xd807aa98a3030242),U64(0x12835b0145706fbe),
- U64(0x243185be4ee4b28c),U64(0x550c7dc3d5ffb4e2),
- U64(0x72be5d74f27b896f),U64(0x80deb1fe3b1696b1),
- U64(0x9bdc06a725c71235),U64(0xc19bf174cf692694),
- U64(0xe49b69c19ef14ad2),U64(0xefbe4786384f25e3),
- U64(0x0fc19dc68b8cd5b5),U64(0x240ca1cc77ac9c65),
- U64(0x2de92c6f592b0275),U64(0x4a7484aa6ea6e483),
- U64(0x5cb0a9dcbd41fbd4),U64(0x76f988da831153b5),
- U64(0x983e5152ee66dfab),U64(0xa831c66d2db43210),
- U64(0xb00327c898fb213f),U64(0xbf597fc7beef0ee4),
- U64(0xc6e00bf33da88fc2),U64(0xd5a79147930aa725),
- U64(0x06ca6351e003826f),U64(0x142929670a0e6e70),
- U64(0x27b70a8546d22ffc),U64(0x2e1b21385c26c926),
- U64(0x4d2c6dfc5ac42aed),U64(0x53380d139d95b3df),
- U64(0x650a73548baf63de),U64(0x766a0abb3c77b2a8),
- U64(0x81c2c92e47edaee6),U64(0x92722c851482353b),
- U64(0xa2bfe8a14cf10364),U64(0xa81a664bbc423001),
- U64(0xc24b8b70d0f89791),U64(0xc76c51a30654be30),
- U64(0xd192e819d6ef5218),U64(0xd69906245565a910),
- U64(0xf40e35855771202a),U64(0x106aa07032bbd1b8),
- U64(0x19a4c116b8d2d0c8),U64(0x1e376c085141ab53),
- U64(0x2748774cdf8eeb99),U64(0x34b0bcb5e19b48a8),
- U64(0x391c0cb3c5c95a63),U64(0x4ed8aa4ae3418acb),
- U64(0x5b9cca4f7763e373),U64(0x682e6ff3d6b2b8a3),
- U64(0x748f82ee5defb2fc),U64(0x78a5636f43172f60),
- U64(0x84c87814a1f0ab72),U64(0x8cc702081a6439ec),
- U64(0x90befffa23631e28),U64(0xa4506cebde82bde9),
- U64(0xbef9a3f7b2c67915),U64(0xc67178f2e372532b),
- U64(0xca273eceea26619c),U64(0xd186b8c721c0c207),
- U64(0xeada7dd6cde0eb1e),U64(0xf57d4f7fee6ed178),
- U64(0x06f067aa72176fba),U64(0x0a637dc5a2c898a6),
- U64(0x113f9804bef90dae),U64(0x1b710b35131c471b),
- U64(0x28db77f523047d84),U64(0x32caab7b40c72493),
- U64(0x3c9ebe0a15c9bebc),U64(0x431d67c49c100d4c),
- U64(0x4cc5d4becb3e42b6),U64(0x597f299cfc657e2a),
- U64(0x5fcb6fab3ad6faec),U64(0x6c44198c4a475817) };
-
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-# if defined(__x86_64) || defined(__x86_64__)
-# define ROTR(a,n) ({ SHA_LONG64 ret; \
- asm ("rorq %1,%0" \
- : "=r"(ret) \
- : "J"(n),"0"(a) \
- : "cc"); ret; })
-# if !defined(B_ENDIAN)
-# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \
- asm ("bswapq %0" \
- : "=r"(ret) \
- : "0"(ret)); ret; })
-# endif
-# elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN)
-# if defined(I386_ONLY)
-# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
- unsigned int hi=p[0],lo=p[1]; \
- asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\
- "roll $16,%%eax; roll $16,%%edx; "\
- "xchgb %%ah,%%al;xchgb %%dh,%%dl;" \
- : "=a"(lo),"=d"(hi) \
- : "0"(lo),"1"(hi) : "cc"); \
- ((SHA_LONG64)hi)<<32|lo; })
-# else
-# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
- unsigned int hi=p[0],lo=p[1]; \
- asm ("bswapl %0; bswapl %1;" \
- : "=r"(lo),"=r"(hi) \
- : "0"(lo),"1"(hi)); \
- ((SHA_LONG64)hi)<<32|lo; })
+ U64(0x428a2f98d728ae22), U64(0x7137449123ef65cd),
+ U64(0xb5c0fbcfec4d3b2f), U64(0xe9b5dba58189dbbc),
+ U64(0x3956c25bf348b538), U64(0x59f111f1b605d019),
+ U64(0x923f82a4af194f9b), U64(0xab1c5ed5da6d8118),
+ U64(0xd807aa98a3030242), U64(0x12835b0145706fbe),
+ U64(0x243185be4ee4b28c), U64(0x550c7dc3d5ffb4e2),
+ U64(0x72be5d74f27b896f), U64(0x80deb1fe3b1696b1),
+ U64(0x9bdc06a725c71235), U64(0xc19bf174cf692694),
+ U64(0xe49b69c19ef14ad2), U64(0xefbe4786384f25e3),
+ U64(0x0fc19dc68b8cd5b5), U64(0x240ca1cc77ac9c65),
+ U64(0x2de92c6f592b0275), U64(0x4a7484aa6ea6e483),
+ U64(0x5cb0a9dcbd41fbd4), U64(0x76f988da831153b5),
+ U64(0x983e5152ee66dfab), U64(0xa831c66d2db43210),
+ U64(0xb00327c898fb213f), U64(0xbf597fc7beef0ee4),
+ U64(0xc6e00bf33da88fc2), U64(0xd5a79147930aa725),
+ U64(0x06ca6351e003826f), U64(0x142929670a0e6e70),
+ U64(0x27b70a8546d22ffc), U64(0x2e1b21385c26c926),
+ U64(0x4d2c6dfc5ac42aed), U64(0x53380d139d95b3df),
+ U64(0x650a73548baf63de), U64(0x766a0abb3c77b2a8),
+ U64(0x81c2c92e47edaee6), U64(0x92722c851482353b),
+ U64(0xa2bfe8a14cf10364), U64(0xa81a664bbc423001),
+ U64(0xc24b8b70d0f89791), U64(0xc76c51a30654be30),
+ U64(0xd192e819d6ef5218), U64(0xd69906245565a910),
+ U64(0xf40e35855771202a), U64(0x106aa07032bbd1b8),
+ U64(0x19a4c116b8d2d0c8), U64(0x1e376c085141ab53),
+ U64(0x2748774cdf8eeb99), U64(0x34b0bcb5e19b48a8),
+ U64(0x391c0cb3c5c95a63), U64(0x4ed8aa4ae3418acb),
+ U64(0x5b9cca4f7763e373), U64(0x682e6ff3d6b2b8a3),
+ U64(0x748f82ee5defb2fc), U64(0x78a5636f43172f60),
+ U64(0x84c87814a1f0ab72), U64(0x8cc702081a6439ec),
+ U64(0x90befffa23631e28), U64(0xa4506cebde82bde9),
+ U64(0xbef9a3f7b2c67915), U64(0xc67178f2e372532b),
+ U64(0xca273eceea26619c), U64(0xd186b8c721c0c207),
+ U64(0xeada7dd6cde0eb1e), U64(0xf57d4f7fee6ed178),
+ U64(0x06f067aa72176fba), U64(0x0a637dc5a2c898a6),
+ U64(0x113f9804bef90dae), U64(0x1b710b35131c471b),
+ U64(0x28db77f523047d84), U64(0x32caab7b40c72493),
+ U64(0x3c9ebe0a15c9bebc), U64(0x431d67c49c100d4c),
+ U64(0x4cc5d4becb3e42b6), U64(0x597f299cfc657e2a),
+ U64(0x5fcb6fab3ad6faec), U64(0x6c44198c4a475817)
+};
+
+# ifndef PEDANTIC
+# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(__x86_64) || defined(__x86_64__)
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
+ asm ("rorq %1,%0" \
+ : "=r"(ret) \
+ : "J"(n),"0"(a) \
+ : "cc"); ret; })
+# if !defined(B_ENDIAN)
+# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \
+ asm ("bswapq %0" \
+ : "=r"(ret) \
+ : "0"(ret)); ret; })
+# endif
+# elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN)
+# if defined(I386_ONLY)
+# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
+ unsigned int hi=p[0],lo=p[1]; \
+ asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\
+ "roll $16,%%eax; roll $16,%%edx; "\
+ "xchgb %%ah,%%al;xchgb %%dh,%%dl;" \
+ : "=a"(lo),"=d"(hi) \
+ : "0"(lo),"1"(hi) : "cc"); \
+ ((SHA_LONG64)hi)<<32|lo; })
+# else
+# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
+ unsigned int hi=p[0],lo=p[1]; \
+ asm ("bswapl %0; bswapl %1;" \
+ : "=r"(lo),"=r"(hi) \
+ : "0"(lo),"1"(hi)); \
+ ((SHA_LONG64)hi)<<32|lo; })
+# endif
+# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
+ asm ("rotrdi %0,%1,%2" \
+ : "=r"(ret) \
+ : "r"(a),"K"(n)); ret; })
+# elif defined(__aarch64__)
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
+ asm ("ror %0,%1,%2" \
+ : "=r"(ret) \
+ : "r"(a),"I"(n)); ret; })
+# if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+ __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
+# define PULL64(x) ({ SHA_LONG64 ret; \
+ asm ("rev %0,%1" \
+ : "=r"(ret) \
+ : "r"(*((const SHA_LONG64 *)(&(x))))); ret; })
+# endif
+# endif
+# elif defined(_MSC_VER)
+# if defined(_WIN64) /* applies to both IA-64 and AMD64 */
+# pragma intrinsic(_rotr64)
+# define ROTR(a,n) _rotr64((a),n)
+# endif
+# if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(I386_ONLY)
+static SHA_LONG64 __fastcall __pull64be(const void *x)
+{
+ _asm mov edx,[ecx + 0]
+ _asm mov eax,[ecx + 4]
+_asm xchg dh, dl
+ _asm xchg ah, al
+ _asm rol edx, 16 _asm rol eax, 16 _asm xchg dh, dl _asm xchg ah, al}
+# else
+static SHA_LONG64 __fastcall __pull64be(const void *x)
+{
+ _asm mov edx,[ecx + 0]
+ _asm mov eax,[ecx + 4]
+_asm bswap edx _asm bswap eax}
+# endif
+# define PULL64(x) __pull64be(&(x))
+# if _MSC_VER<=1200
+# pragma inline_depth(0)
+# endif
+# endif
# endif
-# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
-# define ROTR(a,n) ({ SHA_LONG64 ret; \
- asm ("rotrdi %0,%1,%2" \
- : "=r"(ret) \
- : "r"(a),"K"(n)); ret; })
# endif
-# elif defined(_MSC_VER)
-# if defined(_WIN64) /* applies to both IA-64 and AMD64 */
-# pragma intrinsic(_rotr64)
-# define ROTR(a,n) _rotr64((a),n)
+# ifndef PULL64
+# define B(x,j) (((SHA_LONG64)(*(((const unsigned char *)(&x))+j)))<<((7-j)*8))
+# define PULL64(x) (B(x,0)|B(x,1)|B(x,2)|B(x,3)|B(x,4)|B(x,5)|B(x,6)|B(x,7))
# endif
-# if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-# if defined(I386_ONLY)
- static SHA_LONG64 __fastcall __pull64be(const void *x)
- { _asm mov edx, [ecx + 0]
- _asm mov eax, [ecx + 4]
- _asm xchg dh,dl
- _asm xchg ah,al
- _asm rol edx,16
- _asm rol eax,16
- _asm xchg dh,dl
- _asm xchg ah,al
- }
+# ifndef ROTR
+# define ROTR(x,s) (((x)>>s) | (x)<<(64-s))
+# endif
+# define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+# define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+# define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
+# define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+# define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
+# define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+# if defined(__i386) || defined(__i386__) || defined(_M_IX86)
+/*
+ * This code should give better results on 32-bit CPU with less than
+ * ~24 registers, both size and performance wise...
+ */ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
+ size_t num)
+{
+ const SHA_LONG64 *W = in;
+ SHA_LONG64 A, E, T;
+ SHA_LONG64 X[9 + 80], *F;
+ int i;
+
+ while (num--) {
+
+ F = X + 80;
+ A = ctx->h[0];
+ F[1] = ctx->h[1];
+ F[2] = ctx->h[2];
+ F[3] = ctx->h[3];
+ E = ctx->h[4];
+ F[5] = ctx->h[5];
+ F[6] = ctx->h[6];
+ F[7] = ctx->h[7];
+
+ for (i = 0; i < 16; i++, F--) {
+# ifdef B_ENDIAN
+ T = W[i];
# else
- static SHA_LONG64 __fastcall __pull64be(const void *x)
- { _asm mov edx, [ecx + 0]
- _asm mov eax, [ecx + 4]
- _asm bswap edx
- _asm bswap eax
+ T = PULL64(W[i]);
+# endif
+ F[0] = A;
+ F[4] = E;
+ F[8] = T;
+ T += F[7] + Sigma1(E) + Ch(E, F[5], F[6]) + K512[i];
+ E = F[3] + T;
+ A = T + Sigma0(A) + Maj(A, F[1], F[2]);
+ }
+
+ for (; i < 80; i++, F--) {
+ T = sigma0(F[8 + 16 - 1]);
+ T += sigma1(F[8 + 16 - 14]);
+ T += F[8 + 16] + F[8 + 16 - 9];
+
+ F[0] = A;
+ F[4] = E;
+ F[8] = T;
+ T += F[7] + Sigma1(E) + Ch(E, F[5], F[6]) + K512[i];
+ E = F[3] + T;
+ A = T + Sigma0(A) + Maj(A, F[1], F[2]);
+ }
+
+ ctx->h[0] += A;
+ ctx->h[1] += F[1];
+ ctx->h[2] += F[2];
+ ctx->h[3] += F[3];
+ ctx->h[4] += E;
+ ctx->h[5] += F[5];
+ ctx->h[6] += F[6];
+ ctx->h[7] += F[7];
+
+ W += SHA_LBLOCK;
}
+}
+
+# elif defined(OPENSSL_SMALL_FOOTPRINT)
+static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
+ size_t num)
+{
+ const SHA_LONG64 *W = in;
+ SHA_LONG64 a, b, c, d, e, f, g, h, s0, s1, T1, T2;
+ SHA_LONG64 X[16];
+ int i;
+
+ while (num--) {
+
+ a = ctx->h[0];
+ b = ctx->h[1];
+ c = ctx->h[2];
+ d = ctx->h[3];
+ e = ctx->h[4];
+ f = ctx->h[5];
+ g = ctx->h[6];
+ h = ctx->h[7];
+
+ for (i = 0; i < 16; i++) {
+# ifdef B_ENDIAN
+ T1 = X[i] = W[i];
+# else
+ T1 = X[i] = PULL64(W[i]);
# endif
-# define PULL64(x) __pull64be(&(x))
-# if _MSC_VER<=1200
-# pragma inline_depth(0)
+ T1 += h + Sigma1(e) + Ch(e, f, g) + K512[i];
+ T2 = Sigma0(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+ }
+
+ for (; i < 80; i++) {
+ s0 = X[(i + 1) & 0x0f];
+ s0 = sigma0(s0);
+ s1 = X[(i + 14) & 0x0f];
+ s1 = sigma1(s1);
+
+ T1 = X[i & 0xf] += s0 + s1 + X[(i + 9) & 0xf];
+ T1 += h + Sigma1(e) + Ch(e, f, g) + K512[i];
+ T2 = Sigma0(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+ }
+
+ ctx->h[0] += a;
+ ctx->h[1] += b;
+ ctx->h[2] += c;
+ ctx->h[3] += d;
+ ctx->h[4] += e;
+ ctx->h[5] += f;
+ ctx->h[6] += g;
+ ctx->h[7] += h;
+
+ W += SHA_LBLOCK;
+ }
+}
+
+# else
+# define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \
+ T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; \
+ h = Sigma0(a) + Maj(a,b,c); \
+ d += T1; h += T1; } while (0)
+# define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \
+ s0 = X[(j+1)&0x0f]; s0 = sigma0(s0); \
+ s1 = X[(j+14)&0x0f]; s1 = sigma1(s1); \
+ T1 = X[(j)&0x0f] += s0 + s1 + X[(j+9)&0x0f]; \
+ ROUND_00_15(i+j,a,b,c,d,e,f,g,h); } while (0)
+static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
+ size_t num)
+{
+ const SHA_LONG64 *W = in;
+ SHA_LONG64 a, b, c, d, e, f, g, h, s0, s1, T1;
+ SHA_LONG64 X[16];
+ int i;
+
+ while (num--) {
+
+ a = ctx->h[0];
+ b = ctx->h[1];
+ c = ctx->h[2];
+ d = ctx->h[3];
+ e = ctx->h[4];
+ f = ctx->h[5];
+ g = ctx->h[6];
+ h = ctx->h[7];
+
+# ifdef B_ENDIAN
+ T1 = X[0] = W[0];
+ ROUND_00_15(0, a, b, c, d, e, f, g, h);
+ T1 = X[1] = W[1];
+ ROUND_00_15(1, h, a, b, c, d, e, f, g);
+ T1 = X[2] = W[2];
+ ROUND_00_15(2, g, h, a, b, c, d, e, f);
+ T1 = X[3] = W[3];
+ ROUND_00_15(3, f, g, h, a, b, c, d, e);
+ T1 = X[4] = W[4];
+ ROUND_00_15(4, e, f, g, h, a, b, c, d);
+ T1 = X[5] = W[5];
+ ROUND_00_15(5, d, e, f, g, h, a, b, c);
+ T1 = X[6] = W[6];
+ ROUND_00_15(6, c, d, e, f, g, h, a, b);
+ T1 = X[7] = W[7];
+ ROUND_00_15(7, b, c, d, e, f, g, h, a);
+ T1 = X[8] = W[8];
+ ROUND_00_15(8, a, b, c, d, e, f, g, h);
+ T1 = X[9] = W[9];
+ ROUND_00_15(9, h, a, b, c, d, e, f, g);
+ T1 = X[10] = W[10];
+ ROUND_00_15(10, g, h, a, b, c, d, e, f);
+ T1 = X[11] = W[11];
+ ROUND_00_15(11, f, g, h, a, b, c, d, e);
+ T1 = X[12] = W[12];
+ ROUND_00_15(12, e, f, g, h, a, b, c, d);
+ T1 = X[13] = W[13];
+ ROUND_00_15(13, d, e, f, g, h, a, b, c);
+ T1 = X[14] = W[14];
+ ROUND_00_15(14, c, d, e, f, g, h, a, b);
+ T1 = X[15] = W[15];
+ ROUND_00_15(15, b, c, d, e, f, g, h, a);
+# else
+ T1 = X[0] = PULL64(W[0]);
+ ROUND_00_15(0, a, b, c, d, e, f, g, h);
+ T1 = X[1] = PULL64(W[1]);
+ ROUND_00_15(1, h, a, b, c, d, e, f, g);
+ T1 = X[2] = PULL64(W[2]);
+ ROUND_00_15(2, g, h, a, b, c, d, e, f);
+ T1 = X[3] = PULL64(W[3]);
+ ROUND_00_15(3, f, g, h, a, b, c, d, e);
+ T1 = X[4] = PULL64(W[4]);
+ ROUND_00_15(4, e, f, g, h, a, b, c, d);
+ T1 = X[5] = PULL64(W[5]);
+ ROUND_00_15(5, d, e, f, g, h, a, b, c);
+ T1 = X[6] = PULL64(W[6]);
+ ROUND_00_15(6, c, d, e, f, g, h, a, b);
+ T1 = X[7] = PULL64(W[7]);
+ ROUND_00_15(7, b, c, d, e, f, g, h, a);
+ T1 = X[8] = PULL64(W[8]);
+ ROUND_00_15(8, a, b, c, d, e, f, g, h);
+ T1 = X[9] = PULL64(W[9]);
+ ROUND_00_15(9, h, a, b, c, d, e, f, g);
+ T1 = X[10] = PULL64(W[10]);
+ ROUND_00_15(10, g, h, a, b, c, d, e, f);
+ T1 = X[11] = PULL64(W[11]);
+ ROUND_00_15(11, f, g, h, a, b, c, d, e);
+ T1 = X[12] = PULL64(W[12]);
+ ROUND_00_15(12, e, f, g, h, a, b, c, d);
+ T1 = X[13] = PULL64(W[13]);
+ ROUND_00_15(13, d, e, f, g, h, a, b, c);
+ T1 = X[14] = PULL64(W[14]);
+ ROUND_00_15(14, c, d, e, f, g, h, a, b);
+ T1 = X[15] = PULL64(W[15]);
+ ROUND_00_15(15, b, c, d, e, f, g, h, a);
# endif
-# endif
-# endif
-#endif
-#ifndef PULL64
-#define B(x,j) (((SHA_LONG64)(*(((const unsigned char *)(&x))+j)))<<((7-j)*8))
-#define PULL64(x) (B(x,0)|B(x,1)|B(x,2)|B(x,3)|B(x,4)|B(x,5)|B(x,6)|B(x,7))
-#endif
+ for (i = 16; i < 80; i += 16) {
+ ROUND_16_80(i, 0, a, b, c, d, e, f, g, h, X);
+ ROUND_16_80(i, 1, h, a, b, c, d, e, f, g, X);
+ ROUND_16_80(i, 2, g, h, a, b, c, d, e, f, X);
+ ROUND_16_80(i, 3, f, g, h, a, b, c, d, e, X);
+ ROUND_16_80(i, 4, e, f, g, h, a, b, c, d, X);
+ ROUND_16_80(i, 5, d, e, f, g, h, a, b, c, X);
+ ROUND_16_80(i, 6, c, d, e, f, g, h, a, b, X);
+ ROUND_16_80(i, 7, b, c, d, e, f, g, h, a, X);
+ ROUND_16_80(i, 8, a, b, c, d, e, f, g, h, X);
+ ROUND_16_80(i, 9, h, a, b, c, d, e, f, g, X);
+ ROUND_16_80(i, 10, g, h, a, b, c, d, e, f, X);
+ ROUND_16_80(i, 11, f, g, h, a, b, c, d, e, X);
+ ROUND_16_80(i, 12, e, f, g, h, a, b, c, d, X);
+ ROUND_16_80(i, 13, d, e, f, g, h, a, b, c, X);
+ ROUND_16_80(i, 14, c, d, e, f, g, h, a, b, X);
+ ROUND_16_80(i, 15, b, c, d, e, f, g, h, a, X);
+ }
+
+ ctx->h[0] += a;
+ ctx->h[1] += b;
+ ctx->h[2] += c;
+ ctx->h[3] += d;
+ ctx->h[4] += e;
+ ctx->h[5] += f;
+ ctx->h[6] += g;
+ ctx->h[7] += h;
+
+ W += SHA_LBLOCK;
+ }
+}
-#ifndef ROTR
-#define ROTR(x,s) (((x)>>s) | (x)<<(64-s))
-#endif
+# endif
-#define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
-#define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
-#define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
-#define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+# endif /* SHA512_ASM */
-#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
-#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+#else /* !OPENSSL_NO_SHA512 */
+# if defined(PEDANTIC) || defined(__DECC) || defined(OPENSSL_SYS_MACOSX)
+static void *dummy = &dummy;
+# endif
-#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
-/*
- * This code should give better results on 32-bit CPU with less than
- * ~24 registers, both size and performance wise...
- */
-static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num)
- {
- const SHA_LONG64 *W=in;
- SHA_LONG64 A,E,T;
- SHA_LONG64 X[9+80],*F;
- int i;
-
- while (num--) {
-
- F = X+80;
- A = ctx->h[0]; F[1] = ctx->h[1];
- F[2] = ctx->h[2]; F[3] = ctx->h[3];
- E = ctx->h[4]; F[5] = ctx->h[5];
- F[6] = ctx->h[6]; F[7] = ctx->h[7];
-
- for (i=0;i<16;i++,F--)
- {
-#ifdef B_ENDIAN
- T = W[i];
-#else
- T = PULL64(W[i]);
-#endif
- F[0] = A;
- F[4] = E;
- F[8] = T;
- T += F[7] + Sigma1(E) + Ch(E,F[5],F[6]) + K512[i];
- E = F[3] + T;
- A = T + Sigma0(A) + Maj(A,F[1],F[2]);
- }
-
- for (;i<80;i++,F--)
- {
- T = sigma0(F[8+16-1]);
- T += sigma1(F[8+16-14]);
- T += F[8+16] + F[8+16-9];
-
- F[0] = A;
- F[4] = E;
- F[8] = T;
- T += F[7] + Sigma1(E) + Ch(E,F[5],F[6]) + K512[i];
- E = F[3] + T;
- A = T + Sigma0(A) + Maj(A,F[1],F[2]);
- }
-
- ctx->h[0] += A; ctx->h[1] += F[1];
- ctx->h[2] += F[2]; ctx->h[3] += F[3];
- ctx->h[4] += E; ctx->h[5] += F[5];
- ctx->h[6] += F[6]; ctx->h[7] += F[7];
-
- W+=SHA_LBLOCK;
- }
- }
-
-#elif defined(OPENSSL_SMALL_FOOTPRINT)
-
-static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num)
- {
- const SHA_LONG64 *W=in;
- SHA_LONG64 a,b,c,d,e,f,g,h,s0,s1,T1,T2;
- SHA_LONG64 X[16];
- int i;
-
- while (num--) {
-
- a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3];
- e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7];
-
- for (i=0;i<16;i++)
- {
-#ifdef B_ENDIAN
- T1 = X[i] = W[i];
-#else
- T1 = X[i] = PULL64(W[i]);
-#endif
- T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i];
- T2 = Sigma0(a) + Maj(a,b,c);
- h = g; g = f; f = e; e = d + T1;
- d = c; c = b; b = a; a = T1 + T2;
- }
-
- for (;i<80;i++)
- {
- s0 = X[(i+1)&0x0f]; s0 = sigma0(s0);
- s1 = X[(i+14)&0x0f]; s1 = sigma1(s1);
-
- T1 = X[i&0xf] += s0 + s1 + X[(i+9)&0xf];
- T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i];
- T2 = Sigma0(a) + Maj(a,b,c);
- h = g; g = f; f = e; e = d + T1;
- d = c; c = b; b = a; a = T1 + T2;
- }
-
- ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
- ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
-
- W+=SHA_LBLOCK;
- }
- }
-
-#else
-
-#define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \
- T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; \
- h = Sigma0(a) + Maj(a,b,c); \
- d += T1; h += T1; } while (0)
-
-#define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \
- s0 = X[(j+1)&0x0f]; s0 = sigma0(s0); \
- s1 = X[(j+14)&0x0f]; s1 = sigma1(s1); \
- T1 = X[(j)&0x0f] += s0 + s1 + X[(j+9)&0x0f]; \
- ROUND_00_15(i+j,a,b,c,d,e,f,g,h); } while (0)
-
-static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num)
- {
- const SHA_LONG64 *W=in;
- SHA_LONG64 a,b,c,d,e,f,g,h,s0,s1,T1;
- SHA_LONG64 X[16];
- int i;
-
- while (num--) {
-
- a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3];
- e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7];
-
-#ifdef B_ENDIAN
- T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h);
- T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g);
- T1 = X[2] = W[2]; ROUND_00_15(2,g,h,a,b,c,d,e,f);
- T1 = X[3] = W[3]; ROUND_00_15(3,f,g,h,a,b,c,d,e);
- T1 = X[4] = W[4]; ROUND_00_15(4,e,f,g,h,a,b,c,d);
- T1 = X[5] = W[5]; ROUND_00_15(5,d,e,f,g,h,a,b,c);
- T1 = X[6] = W[6]; ROUND_00_15(6,c,d,e,f,g,h,a,b);
- T1 = X[7] = W[7]; ROUND_00_15(7,b,c,d,e,f,g,h,a);
- T1 = X[8] = W[8]; ROUND_00_15(8,a,b,c,d,e,f,g,h);
- T1 = X[9] = W[9]; ROUND_00_15(9,h,a,b,c,d,e,f,g);
- T1 = X[10] = W[10]; ROUND_00_15(10,g,h,a,b,c,d,e,f);
- T1 = X[11] = W[11]; ROUND_00_15(11,f,g,h,a,b,c,d,e);
- T1 = X[12] = W[12]; ROUND_00_15(12,e,f,g,h,a,b,c,d);
- T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c);
- T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b);
- T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a);
-#else
- T1 = X[0] = PULL64(W[0]); ROUND_00_15(0,a,b,c,d,e,f,g,h);
- T1 = X[1] = PULL64(W[1]); ROUND_00_15(1,h,a,b,c,d,e,f,g);
- T1 = X[2] = PULL64(W[2]); ROUND_00_15(2,g,h,a,b,c,d,e,f);
- T1 = X[3] = PULL64(W[3]); ROUND_00_15(3,f,g,h,a,b,c,d,e);
- T1 = X[4] = PULL64(W[4]); ROUND_00_15(4,e,f,g,h,a,b,c,d);
- T1 = X[5] = PULL64(W[5]); ROUND_00_15(5,d,e,f,g,h,a,b,c);
- T1 = X[6] = PULL64(W[6]); ROUND_00_15(6,c,d,e,f,g,h,a,b);
- T1 = X[7] = PULL64(W[7]); ROUND_00_15(7,b,c,d,e,f,g,h,a);
- T1 = X[8] = PULL64(W[8]); ROUND_00_15(8,a,b,c,d,e,f,g,h);
- T1 = X[9] = PULL64(W[9]); ROUND_00_15(9,h,a,b,c,d,e,f,g);
- T1 = X[10] = PULL64(W[10]); ROUND_00_15(10,g,h,a,b,c,d,e,f);
- T1 = X[11] = PULL64(W[11]); ROUND_00_15(11,f,g,h,a,b,c,d,e);
- T1 = X[12] = PULL64(W[12]); ROUND_00_15(12,e,f,g,h,a,b,c,d);
- T1 = X[13] = PULL64(W[13]); ROUND_00_15(13,d,e,f,g,h,a,b,c);
- T1 = X[14] = PULL64(W[14]); ROUND_00_15(14,c,d,e,f,g,h,a,b);
- T1 = X[15] = PULL64(W[15]); ROUND_00_15(15,b,c,d,e,f,g,h,a);
-#endif
-
- for (i=16;i<80;i+=16)
- {
- ROUND_16_80(i, 0,a,b,c,d,e,f,g,h,X);
- ROUND_16_80(i, 1,h,a,b,c,d,e,f,g,X);
- ROUND_16_80(i, 2,g,h,a,b,c,d,e,f,X);
- ROUND_16_80(i, 3,f,g,h,a,b,c,d,e,X);
- ROUND_16_80(i, 4,e,f,g,h,a,b,c,d,X);
- ROUND_16_80(i, 5,d,e,f,g,h,a,b,c,X);
- ROUND_16_80(i, 6,c,d,e,f,g,h,a,b,X);
- ROUND_16_80(i, 7,b,c,d,e,f,g,h,a,X);
- ROUND_16_80(i, 8,a,b,c,d,e,f,g,h,X);
- ROUND_16_80(i, 9,h,a,b,c,d,e,f,g,X);
- ROUND_16_80(i,10,g,h,a,b,c,d,e,f,X);
- ROUND_16_80(i,11,f,g,h,a,b,c,d,e,X);
- ROUND_16_80(i,12,e,f,g,h,a,b,c,d,X);
- ROUND_16_80(i,13,d,e,f,g,h,a,b,c,X);
- ROUND_16_80(i,14,c,d,e,f,g,h,a,b,X);
- ROUND_16_80(i,15,b,c,d,e,f,g,h,a,X);
- }
-
- ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
- ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
-
- W+=SHA_LBLOCK;
- }
- }
-
-#endif
-
-#endif /* SHA512_ASM */
-
-#else /* !OPENSSL_NO_SHA512 */
-
-#if defined(PEDANTIC) || defined(__DECC) || defined(OPENSSL_SYS_MACOSX)
-static void *dummy=&dummy;
-#endif
-
-#endif /* !OPENSSL_NO_SHA512 */
+#endif /* !OPENSSL_NO_SHA512 */
diff --git a/openssl/crypto/sha/sha512t.c b/openssl/crypto/sha/sha512t.c
index 210041d43..178882fc7 100644
--- a/openssl/crypto/sha/sha512t.c
+++ b/openssl/crypto/sha/sha512t.c
@@ -15,170 +15,182 @@
int main(int argc, char *argv[])
{
printf("No SHA512 support\n");
- return(0);
+ return (0);
}
#else
unsigned char app_c1[SHA512_DIGEST_LENGTH] = {
- 0xdd,0xaf,0x35,0xa1,0x93,0x61,0x7a,0xba,
- 0xcc,0x41,0x73,0x49,0xae,0x20,0x41,0x31,
- 0x12,0xe6,0xfa,0x4e,0x89,0xa9,0x7e,0xa2,
- 0x0a,0x9e,0xee,0xe6,0x4b,0x55,0xd3,0x9a,
- 0x21,0x92,0x99,0x2a,0x27,0x4f,0xc1,0xa8,
- 0x36,0xba,0x3c,0x23,0xa3,0xfe,0xeb,0xbd,
- 0x45,0x4d,0x44,0x23,0x64,0x3c,0xe8,0x0e,
- 0x2a,0x9a,0xc9,0x4f,0xa5,0x4c,0xa4,0x9f };
+ 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba,
+ 0xcc, 0x41, 0x73, 0x49, 0xae, 0x20, 0x41, 0x31,
+ 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2,
+ 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a,
+ 0x21, 0x92, 0x99, 0x2a, 0x27, 0x4f, 0xc1, 0xa8,
+ 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd,
+ 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e,
+ 0x2a, 0x9a, 0xc9, 0x4f, 0xa5, 0x4c, 0xa4, 0x9f
+};
unsigned char app_c2[SHA512_DIGEST_LENGTH] = {
- 0x8e,0x95,0x9b,0x75,0xda,0xe3,0x13,0xda,
- 0x8c,0xf4,0xf7,0x28,0x14,0xfc,0x14,0x3f,
- 0x8f,0x77,0x79,0xc6,0xeb,0x9f,0x7f,0xa1,
- 0x72,0x99,0xae,0xad,0xb6,0x88,0x90,0x18,
- 0x50,0x1d,0x28,0x9e,0x49,0x00,0xf7,0xe4,
- 0x33,0x1b,0x99,0xde,0xc4,0xb5,0x43,0x3a,
- 0xc7,0xd3,0x29,0xee,0xb6,0xdd,0x26,0x54,
- 0x5e,0x96,0xe5,0x5b,0x87,0x4b,0xe9,0x09 };
+ 0x8e, 0x95, 0x9b, 0x75, 0xda, 0xe3, 0x13, 0xda,
+ 0x8c, 0xf4, 0xf7, 0x28, 0x14, 0xfc, 0x14, 0x3f,
+ 0x8f, 0x77, 0x79, 0xc6, 0xeb, 0x9f, 0x7f, 0xa1,
+ 0x72, 0x99, 0xae, 0xad, 0xb6, 0x88, 0x90, 0x18,
+ 0x50, 0x1d, 0x28, 0x9e, 0x49, 0x00, 0xf7, 0xe4,
+ 0x33, 0x1b, 0x99, 0xde, 0xc4, 0xb5, 0x43, 0x3a,
+ 0xc7, 0xd3, 0x29, 0xee, 0xb6, 0xdd, 0x26, 0x54,
+ 0x5e, 0x96, 0xe5, 0x5b, 0x87, 0x4b, 0xe9, 0x09
+};
unsigned char app_c3[SHA512_DIGEST_LENGTH] = {
- 0xe7,0x18,0x48,0x3d,0x0c,0xe7,0x69,0x64,
- 0x4e,0x2e,0x42,0xc7,0xbc,0x15,0xb4,0x63,
- 0x8e,0x1f,0x98,0xb1,0x3b,0x20,0x44,0x28,
- 0x56,0x32,0xa8,0x03,0xaf,0xa9,0x73,0xeb,
- 0xde,0x0f,0xf2,0x44,0x87,0x7e,0xa6,0x0a,
- 0x4c,0xb0,0x43,0x2c,0xe5,0x77,0xc3,0x1b,
- 0xeb,0x00,0x9c,0x5c,0x2c,0x49,0xaa,0x2e,
- 0x4e,0xad,0xb2,0x17,0xad,0x8c,0xc0,0x9b };
+ 0xe7, 0x18, 0x48, 0x3d, 0x0c, 0xe7, 0x69, 0x64,
+ 0x4e, 0x2e, 0x42, 0xc7, 0xbc, 0x15, 0xb4, 0x63,
+ 0x8e, 0x1f, 0x98, 0xb1, 0x3b, 0x20, 0x44, 0x28,
+ 0x56, 0x32, 0xa8, 0x03, 0xaf, 0xa9, 0x73, 0xeb,
+ 0xde, 0x0f, 0xf2, 0x44, 0x87, 0x7e, 0xa6, 0x0a,
+ 0x4c, 0xb0, 0x43, 0x2c, 0xe5, 0x77, 0xc3, 0x1b,
+ 0xeb, 0x00, 0x9c, 0x5c, 0x2c, 0x49, 0xaa, 0x2e,
+ 0x4e, 0xad, 0xb2, 0x17, 0xad, 0x8c, 0xc0, 0x9b
+};
unsigned char app_d1[SHA384_DIGEST_LENGTH] = {
- 0xcb,0x00,0x75,0x3f,0x45,0xa3,0x5e,0x8b,
- 0xb5,0xa0,0x3d,0x69,0x9a,0xc6,0x50,0x07,
- 0x27,0x2c,0x32,0xab,0x0e,0xde,0xd1,0x63,
- 0x1a,0x8b,0x60,0x5a,0x43,0xff,0x5b,0xed,
- 0x80,0x86,0x07,0x2b,0xa1,0xe7,0xcc,0x23,
- 0x58,0xba,0xec,0xa1,0x34,0xc8,0x25,0xa7 };
+ 0xcb, 0x00, 0x75, 0x3f, 0x45, 0xa3, 0x5e, 0x8b,
+ 0xb5, 0xa0, 0x3d, 0x69, 0x9a, 0xc6, 0x50, 0x07,
+ 0x27, 0x2c, 0x32, 0xab, 0x0e, 0xde, 0xd1, 0x63,
+ 0x1a, 0x8b, 0x60, 0x5a, 0x43, 0xff, 0x5b, 0xed,
+ 0x80, 0x86, 0x07, 0x2b, 0xa1, 0xe7, 0xcc, 0x23,
+ 0x58, 0xba, 0xec, 0xa1, 0x34, 0xc8, 0x25, 0xa7
+};
unsigned char app_d2[SHA384_DIGEST_LENGTH] = {
- 0x09,0x33,0x0c,0x33,0xf7,0x11,0x47,0xe8,
- 0x3d,0x19,0x2f,0xc7,0x82,0xcd,0x1b,0x47,
- 0x53,0x11,0x1b,0x17,0x3b,0x3b,0x05,0xd2,
- 0x2f,0xa0,0x80,0x86,0xe3,0xb0,0xf7,0x12,
- 0xfc,0xc7,0xc7,0x1a,0x55,0x7e,0x2d,0xb9,
- 0x66,0xc3,0xe9,0xfa,0x91,0x74,0x60,0x39 };
+ 0x09, 0x33, 0x0c, 0x33, 0xf7, 0x11, 0x47, 0xe8,
+ 0x3d, 0x19, 0x2f, 0xc7, 0x82, 0xcd, 0x1b, 0x47,
+ 0x53, 0x11, 0x1b, 0x17, 0x3b, 0x3b, 0x05, 0xd2,
+ 0x2f, 0xa0, 0x80, 0x86, 0xe3, 0xb0, 0xf7, 0x12,
+ 0xfc, 0xc7, 0xc7, 0x1a, 0x55, 0x7e, 0x2d, 0xb9,
+ 0x66, 0xc3, 0xe9, 0xfa, 0x91, 0x74, 0x60, 0x39
+};
unsigned char app_d3[SHA384_DIGEST_LENGTH] = {
- 0x9d,0x0e,0x18,0x09,0x71,0x64,0x74,0xcb,
- 0x08,0x6e,0x83,0x4e,0x31,0x0a,0x4a,0x1c,
- 0xed,0x14,0x9e,0x9c,0x00,0xf2,0x48,0x52,
- 0x79,0x72,0xce,0xc5,0x70,0x4c,0x2a,0x5b,
- 0x07,0xb8,0xb3,0xdc,0x38,0xec,0xc4,0xeb,
- 0xae,0x97,0xdd,0xd8,0x7f,0x3d,0x89,0x85 };
-
-int main (int argc,char **argv)
-{ unsigned char md[SHA512_DIGEST_LENGTH];
- int i;
- EVP_MD_CTX evp;
-
-#ifdef OPENSSL_IA32_SSE2
- /* Alternative to this is to call OpenSSL_add_all_algorithms...
- * The below code is retained exclusively for debugging purposes. */
- { char *env;
-
- if ((env=getenv("OPENSSL_ia32cap")))
- OPENSSL_ia32cap = strtoul (env,NULL,0);
- }
-#endif
-
- fprintf(stdout,"Testing SHA-512 ");
-
- EVP_Digest ("abc",3,md,NULL,EVP_sha512(),NULL);
- if (memcmp(md,app_c1,sizeof(app_c1)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 1 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- EVP_Digest ("abcdefgh""bcdefghi""cdefghij""defghijk"
- "efghijkl""fghijklm""ghijklmn""hijklmno"
- "ijklmnop""jklmnopq""klmnopqr""lmnopqrs"
- "mnopqrst""nopqrstu",112,md,NULL,EVP_sha512(),NULL);
- if (memcmp(md,app_c2,sizeof(app_c2)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 2 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- EVP_MD_CTX_init (&evp);
- EVP_DigestInit_ex (&evp,EVP_sha512(),NULL);
- for (i=0;i<1000000;i+=288)
- EVP_DigestUpdate (&evp, "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa",
- (1000000-i)<288?1000000-i:288);
- EVP_DigestFinal_ex (&evp,md,NULL);
- EVP_MD_CTX_cleanup (&evp);
-
- if (memcmp(md,app_c3,sizeof(app_c3)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 3 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- fprintf(stdout," passed.\n"); fflush(stdout);
-
- fprintf(stdout,"Testing SHA-384 ");
-
- EVP_Digest ("abc",3,md,NULL,EVP_sha384(),NULL);
- if (memcmp(md,app_d1,sizeof(app_d1)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 1 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- EVP_Digest ("abcdefgh""bcdefghi""cdefghij""defghijk"
- "efghijkl""fghijklm""ghijklmn""hijklmno"
- "ijklmnop""jklmnopq""klmnopqr""lmnopqrs"
- "mnopqrst""nopqrstu",112,md,NULL,EVP_sha384(),NULL);
- if (memcmp(md,app_d2,sizeof(app_d2)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 2 of 3 failed.\n");
- return 1;
- }
- else
- fprintf(stdout,"."); fflush(stdout);
-
- EVP_MD_CTX_init (&evp);
- EVP_DigestInit_ex (&evp,EVP_sha384(),NULL);
- for (i=0;i<1000000;i+=64)
- EVP_DigestUpdate (&evp, "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa"
- "aaaaaaaa""aaaaaaaa""aaaaaaaa""aaaaaaaa",
- (1000000-i)<64?1000000-i:64);
- EVP_DigestFinal_ex (&evp,md,NULL);
- EVP_MD_CTX_cleanup (&evp);
-
- if (memcmp(md,app_d3,sizeof(app_d3)))
- { fflush(stdout);
- fprintf(stderr,"\nTEST 3 of 3 failed.\n");
- return 1;
+ 0x9d, 0x0e, 0x18, 0x09, 0x71, 0x64, 0x74, 0xcb,
+ 0x08, 0x6e, 0x83, 0x4e, 0x31, 0x0a, 0x4a, 0x1c,
+ 0xed, 0x14, 0x9e, 0x9c, 0x00, 0xf2, 0x48, 0x52,
+ 0x79, 0x72, 0xce, 0xc5, 0x70, 0x4c, 0x2a, 0x5b,
+ 0x07, 0xb8, 0xb3, 0xdc, 0x38, 0xec, 0xc4, 0xeb,
+ 0xae, 0x97, 0xdd, 0xd8, 0x7f, 0x3d, 0x89, 0x85
+};
+
+int main(int argc, char **argv)
+{
+ unsigned char md[SHA512_DIGEST_LENGTH];
+ int i;
+ EVP_MD_CTX evp;
+
+# ifdef OPENSSL_IA32_SSE2
+ /*
+ * Alternative to this is to call OpenSSL_add_all_algorithms... The below
+ * code is retained exclusively for debugging purposes.
+ */
+ {
+ char *env;
+
+ if ((env = getenv("OPENSSL_ia32cap")))
+ OPENSSL_ia32cap = strtoul(env, NULL, 0);
}
- else
- fprintf(stdout,"."); fflush(stdout);
-
- fprintf(stdout," passed.\n"); fflush(stdout);
-
- return 0;
+# endif
+
+ fprintf(stdout, "Testing SHA-512 ");
+
+ EVP_Digest("abc", 3, md, NULL, EVP_sha512(), NULL);
+ if (memcmp(md, app_c1, sizeof(app_c1))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 1 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ EVP_Digest("abcdefgh" "bcdefghi" "cdefghij" "defghijk"
+ "efghijkl" "fghijklm" "ghijklmn" "hijklmno"
+ "ijklmnop" "jklmnopq" "klmnopqr" "lmnopqrs"
+ "mnopqrst" "nopqrstu", 112, md, NULL, EVP_sha512(), NULL);
+ if (memcmp(md, app_c2, sizeof(app_c2))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 2 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ EVP_MD_CTX_init(&evp);
+ EVP_DigestInit_ex(&evp, EVP_sha512(), NULL);
+ for (i = 0; i < 1000000; i += 288)
+ EVP_DigestUpdate(&evp, "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa",
+ (1000000 - i) < 288 ? 1000000 - i : 288);
+ EVP_DigestFinal_ex(&evp, md, NULL);
+ EVP_MD_CTX_cleanup(&evp);
+
+ if (memcmp(md, app_c3, sizeof(app_c3))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 3 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ fprintf(stdout, " passed.\n");
+ fflush(stdout);
+
+ fprintf(stdout, "Testing SHA-384 ");
+
+ EVP_Digest("abc", 3, md, NULL, EVP_sha384(), NULL);
+ if (memcmp(md, app_d1, sizeof(app_d1))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 1 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ EVP_Digest("abcdefgh" "bcdefghi" "cdefghij" "defghijk"
+ "efghijkl" "fghijklm" "ghijklmn" "hijklmno"
+ "ijklmnop" "jklmnopq" "klmnopqr" "lmnopqrs"
+ "mnopqrst" "nopqrstu", 112, md, NULL, EVP_sha384(), NULL);
+ if (memcmp(md, app_d2, sizeof(app_d2))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 2 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ EVP_MD_CTX_init(&evp);
+ EVP_DigestInit_ex(&evp, EVP_sha384(), NULL);
+ for (i = 0; i < 1000000; i += 64)
+ EVP_DigestUpdate(&evp, "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa"
+ "aaaaaaaa" "aaaaaaaa" "aaaaaaaa" "aaaaaaaa",
+ (1000000 - i) < 64 ? 1000000 - i : 64);
+ EVP_DigestFinal_ex(&evp, md, NULL);
+ EVP_MD_CTX_cleanup(&evp);
+
+ if (memcmp(md, app_d3, sizeof(app_d3))) {
+ fflush(stdout);
+ fprintf(stderr, "\nTEST 3 of 3 failed.\n");
+ return 1;
+ } else
+ fprintf(stdout, ".");
+ fflush(stdout);
+
+ fprintf(stdout, " passed.\n");
+ fflush(stdout);
+
+ return 0;
}
#endif
diff --git a/openssl/crypto/sha/sha_dgst.c b/openssl/crypto/sha/sha_dgst.c
index fb63b17ff..f77cf5e38 100644
--- a/openssl/crypto/sha/sha_dgst.c
+++ b/openssl/crypto/sha/sha_dgst.c
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -60,16 +60,15 @@
#include <openssl/opensslconf.h>
#if !defined(OPENSSL_NO_SHA0) && !defined(OPENSSL_NO_SHA)
-#undef SHA_1
-#define SHA_0
+# undef SHA_1
+# define SHA_0
-#include <openssl/opensslv.h>
+# include <openssl/opensslv.h>
-const char SHA_version[]="SHA" OPENSSL_VERSION_PTEXT;
+const char SHA_version[] = "SHA" OPENSSL_VERSION_PTEXT;
/* The implementation is in ../md32_common.h */
-#include "sha_locl.h"
+# include "sha_locl.h"
#endif
-
diff --git a/openssl/crypto/sha/sha_locl.h b/openssl/crypto/sha/sha_locl.h
index d673255f7..03bd411ed 100644
--- a/openssl/crypto/sha/sha_locl.h
+++ b/openssl/crypto/sha/sha_locl.h
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -67,48 +67,48 @@
#define HASH_LONG SHA_LONG
#define HASH_CTX SHA_CTX
#define HASH_CBLOCK SHA_CBLOCK
-#define HASH_MAKE_STRING(c,s) do { \
- unsigned long ll; \
- ll=(c)->h0; (void)HOST_l2c(ll,(s)); \
- ll=(c)->h1; (void)HOST_l2c(ll,(s)); \
- ll=(c)->h2; (void)HOST_l2c(ll,(s)); \
- ll=(c)->h3; (void)HOST_l2c(ll,(s)); \
- ll=(c)->h4; (void)HOST_l2c(ll,(s)); \
- } while (0)
+#define HASH_MAKE_STRING(c,s) do { \
+ unsigned long ll; \
+ ll=(c)->h0; (void)HOST_l2c(ll,(s)); \
+ ll=(c)->h1; (void)HOST_l2c(ll,(s)); \
+ ll=(c)->h2; (void)HOST_l2c(ll,(s)); \
+ ll=(c)->h3; (void)HOST_l2c(ll,(s)); \
+ ll=(c)->h4; (void)HOST_l2c(ll,(s)); \
+ } while (0)
#if defined(SHA_0)
-# define HASH_UPDATE SHA_Update
-# define HASH_TRANSFORM SHA_Transform
-# define HASH_FINAL SHA_Final
-# define HASH_INIT SHA_Init
-# define HASH_BLOCK_DATA_ORDER sha_block_data_order
-# define Xupdate(a,ix,ia,ib,ic,id) (ix=(a)=(ia^ib^ic^id))
+# define HASH_UPDATE SHA_Update
+# define HASH_TRANSFORM SHA_Transform
+# define HASH_FINAL SHA_Final
+# define HASH_INIT SHA_Init
+# define HASH_BLOCK_DATA_ORDER sha_block_data_order
+# define Xupdate(a,ix,ia,ib,ic,id) (ix=(a)=(ia^ib^ic^id))
-static void sha_block_data_order (SHA_CTX *c, const void *p,size_t num);
+static void sha_block_data_order(SHA_CTX *c, const void *p, size_t num);
#elif defined(SHA_1)
-# define HASH_UPDATE SHA1_Update
-# define HASH_TRANSFORM SHA1_Transform
-# define HASH_FINAL SHA1_Final
-# define HASH_INIT SHA1_Init
-# define HASH_BLOCK_DATA_ORDER sha1_block_data_order
+# define HASH_UPDATE SHA1_Update
+# define HASH_TRANSFORM SHA1_Transform
+# define HASH_FINAL SHA1_Final
+# define HASH_INIT SHA1_Init
+# define HASH_BLOCK_DATA_ORDER sha1_block_data_order
# if defined(__MWERKS__) && defined(__MC68K__)
/* Metrowerks for Motorola fails otherwise:-( <appro@fy.chalmers.se> */
-# define Xupdate(a,ix,ia,ib,ic,id) do { (a)=(ia^ib^ic^id); \
- ix=(a)=ROTATE((a),1); \
- } while (0)
+# define Xupdate(a,ix,ia,ib,ic,id) do { (a)=(ia^ib^ic^id); \
+ ix=(a)=ROTATE((a),1); \
+ } while (0)
# else
-# define Xupdate(a,ix,ia,ib,ic,id) ( (a)=(ia^ib^ic^id), \
- ix=(a)=ROTATE((a),1) \
- )
+# define Xupdate(a,ix,ia,ib,ic,id) ( (a)=(ia^ib^ic^id), \
+ ix=(a)=ROTATE((a),1) \
+ )
# endif
-#ifndef SHA1_ASM
+# ifndef SHA1_ASM
static
-#endif
-void sha1_block_data_order (SHA_CTX *c, const void *p,size_t num);
+# endif
+void sha1_block_data_order(SHA_CTX *c, const void *p, size_t num);
#else
# error "Either SHA_0 or SHA_1 must be defined."
@@ -127,68 +127,68 @@ fips_md_init(SHA)
#else
fips_md_init_ctx(SHA1, SHA)
#endif
- {
- memset (c,0,sizeof(*c));
- c->h0=INIT_DATA_h0;
- c->h1=INIT_DATA_h1;
- c->h2=INIT_DATA_h2;
- c->h3=INIT_DATA_h3;
- c->h4=INIT_DATA_h4;
- return 1;
- }
-
-#define K_00_19 0x5a827999UL
+{
+ memset(c, 0, sizeof(*c));
+ c->h0 = INIT_DATA_h0;
+ c->h1 = INIT_DATA_h1;
+ c->h2 = INIT_DATA_h2;
+ c->h3 = INIT_DATA_h3;
+ c->h4 = INIT_DATA_h4;
+ return 1;
+}
+
+#define K_00_19 0x5a827999UL
#define K_20_39 0x6ed9eba1UL
#define K_40_59 0x8f1bbcdcUL
#define K_60_79 0xca62c1d6UL
-/* As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be
- * simplified to the code in F_00_19. Wei attributes these optimisations
- * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.
- * #define F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
- * I've just become aware of another tweak to be made, again from Wei Dai,
- * in F_40_59, (x&a)|(y&a) -> (x|y)&a
+/*
+ * As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
+ * to the code in F_00_19. Wei attributes these optimisations to Peter
+ * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
+ * F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another
+ * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a
*/
-#define F_00_19(b,c,d) ((((c) ^ (d)) & (b)) ^ (d))
-#define F_20_39(b,c,d) ((b) ^ (c) ^ (d))
-#define F_40_59(b,c,d) (((b) & (c)) | (((b)|(c)) & (d)))
-#define F_60_79(b,c,d) F_20_39(b,c,d)
+#define F_00_19(b,c,d) ((((c) ^ (d)) & (b)) ^ (d))
+#define F_20_39(b,c,d) ((b) ^ (c) ^ (d))
+#define F_40_59(b,c,d) (((b) & (c)) | (((b)|(c)) & (d)))
+#define F_60_79(b,c,d) F_20_39(b,c,d)
#ifndef OPENSSL_SMALL_FOOTPRINT
-#define BODY_00_15(i,a,b,c,d,e,f,xi) \
- (f)=xi+(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
- (b)=ROTATE((b),30);
-
-#define BODY_16_19(i,a,b,c,d,e,f,xi,xa,xb,xc,xd) \
- Xupdate(f,xi,xa,xb,xc,xd); \
- (f)+=(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
- (b)=ROTATE((b),30);
-
-#define BODY_20_31(i,a,b,c,d,e,f,xi,xa,xb,xc,xd) \
- Xupdate(f,xi,xa,xb,xc,xd); \
- (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
- (b)=ROTATE((b),30);
-
-#define BODY_32_39(i,a,b,c,d,e,f,xa,xb,xc,xd) \
- Xupdate(f,xa,xa,xb,xc,xd); \
- (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
- (b)=ROTATE((b),30);
-
-#define BODY_40_59(i,a,b,c,d,e,f,xa,xb,xc,xd) \
- Xupdate(f,xa,xa,xb,xc,xd); \
- (f)+=(e)+K_40_59+ROTATE((a),5)+F_40_59((b),(c),(d)); \
- (b)=ROTATE((b),30);
-
-#define BODY_60_79(i,a,b,c,d,e,f,xa,xb,xc,xd) \
- Xupdate(f,xa,xa,xb,xc,xd); \
- (f)=xa+(e)+K_60_79+ROTATE((a),5)+F_60_79((b),(c),(d)); \
- (b)=ROTATE((b),30);
-
-#ifdef X
-#undef X
-#endif
-#ifndef MD32_XARRAY
+# define BODY_00_15(i,a,b,c,d,e,f,xi) \
+ (f)=xi+(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+# define BODY_16_19(i,a,b,c,d,e,f,xi,xa,xb,xc,xd) \
+ Xupdate(f,xi,xa,xb,xc,xd); \
+ (f)+=(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+# define BODY_20_31(i,a,b,c,d,e,f,xi,xa,xb,xc,xd) \
+ Xupdate(f,xi,xa,xb,xc,xd); \
+ (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+# define BODY_32_39(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,xa,xa,xb,xc,xd); \
+ (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+# define BODY_40_59(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,xa,xa,xb,xc,xd); \
+ (f)+=(e)+K_40_59+ROTATE((a),5)+F_40_59((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+# define BODY_60_79(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,xa,xa,xb,xc,xd); \
+ (f)=xa+(e)+K_60_79+ROTATE((a),5)+F_60_79((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+# ifdef X
+# undef X
+# endif
+# ifndef MD32_XARRAY
/*
* Originally X was an array. As it's automatic it's natural
* to expect RISC compiler to accomodate at least part of it in
@@ -196,246 +196,305 @@ fips_md_init_ctx(SHA1, SHA)
* "find" this expectation reasonable:-( On order to make such
* compilers generate better code I replace X[] with a bunch of
* X0, X1, etc. See the function body below...
- * <appro@fy.chalmers.se>
+ * <appro@fy.chalmers.se>
*/
-# define X(i) XX##i
-#else
+# define X(i) XX##i
+# else
/*
* However! Some compilers (most notably HP C) get overwhelmed by
* that many local variables so that we have to have the way to
* fall down to the original behavior.
*/
-# define X(i) XX[i]
-#endif
-
-#if !defined(SHA_1) || !defined(SHA1_ASM)
-static void HASH_BLOCK_DATA_ORDER (SHA_CTX *c, const void *p, size_t num)
- {
- const unsigned char *data=p;
- register unsigned MD32_REG_T A,B,C,D,E,T,l;
-#ifndef MD32_XARRAY
- unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
- XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
-#else
- SHA_LONG XX[16];
-#endif
+# define X(i) XX[i]
+# endif
- A=c->h0;
- B=c->h1;
- C=c->h2;
- D=c->h3;
- E=c->h4;
-
- for (;;)
- {
- const union { long one; char little; } is_endian = {1};
-
- if (!is_endian.little && sizeof(SHA_LONG)==4 && ((size_t)p%4)==0)
- {
- const SHA_LONG *W=(const SHA_LONG *)data;
-
- X( 0) = W[0]; X( 1) = W[ 1];
- BODY_00_15( 0,A,B,C,D,E,T,X( 0)); X( 2) = W[ 2];
- BODY_00_15( 1,T,A,B,C,D,E,X( 1)); X( 3) = W[ 3];
- BODY_00_15( 2,E,T,A,B,C,D,X( 2)); X( 4) = W[ 4];
- BODY_00_15( 3,D,E,T,A,B,C,X( 3)); X( 5) = W[ 5];
- BODY_00_15( 4,C,D,E,T,A,B,X( 4)); X( 6) = W[ 6];
- BODY_00_15( 5,B,C,D,E,T,A,X( 5)); X( 7) = W[ 7];
- BODY_00_15( 6,A,B,C,D,E,T,X( 6)); X( 8) = W[ 8];
- BODY_00_15( 7,T,A,B,C,D,E,X( 7)); X( 9) = W[ 9];
- BODY_00_15( 8,E,T,A,B,C,D,X( 8)); X(10) = W[10];
- BODY_00_15( 9,D,E,T,A,B,C,X( 9)); X(11) = W[11];
- BODY_00_15(10,C,D,E,T,A,B,X(10)); X(12) = W[12];
- BODY_00_15(11,B,C,D,E,T,A,X(11)); X(13) = W[13];
- BODY_00_15(12,A,B,C,D,E,T,X(12)); X(14) = W[14];
- BODY_00_15(13,T,A,B,C,D,E,X(13)); X(15) = W[15];
- BODY_00_15(14,E,T,A,B,C,D,X(14));
- BODY_00_15(15,D,E,T,A,B,C,X(15));
-
- data += SHA_CBLOCK;
- }
- else
- {
- (void)HOST_c2l(data,l); X( 0)=l; (void)HOST_c2l(data,l); X( 1)=l;
- BODY_00_15( 0,A,B,C,D,E,T,X( 0)); (void)HOST_c2l(data,l); X( 2)=l;
- BODY_00_15( 1,T,A,B,C,D,E,X( 1)); (void)HOST_c2l(data,l); X( 3)=l;
- BODY_00_15( 2,E,T,A,B,C,D,X( 2)); (void)HOST_c2l(data,l); X( 4)=l;
- BODY_00_15( 3,D,E,T,A,B,C,X( 3)); (void)HOST_c2l(data,l); X( 5)=l;
- BODY_00_15( 4,C,D,E,T,A,B,X( 4)); (void)HOST_c2l(data,l); X( 6)=l;
- BODY_00_15( 5,B,C,D,E,T,A,X( 5)); (void)HOST_c2l(data,l); X( 7)=l;
- BODY_00_15( 6,A,B,C,D,E,T,X( 6)); (void)HOST_c2l(data,l); X( 8)=l;
- BODY_00_15( 7,T,A,B,C,D,E,X( 7)); (void)HOST_c2l(data,l); X( 9)=l;
- BODY_00_15( 8,E,T,A,B,C,D,X( 8)); (void)HOST_c2l(data,l); X(10)=l;
- BODY_00_15( 9,D,E,T,A,B,C,X( 9)); (void)HOST_c2l(data,l); X(11)=l;
- BODY_00_15(10,C,D,E,T,A,B,X(10)); (void)HOST_c2l(data,l); X(12)=l;
- BODY_00_15(11,B,C,D,E,T,A,X(11)); (void)HOST_c2l(data,l); X(13)=l;
- BODY_00_15(12,A,B,C,D,E,T,X(12)); (void)HOST_c2l(data,l); X(14)=l;
- BODY_00_15(13,T,A,B,C,D,E,X(13)); (void)HOST_c2l(data,l); X(15)=l;
- BODY_00_15(14,E,T,A,B,C,D,X(14));
- BODY_00_15(15,D,E,T,A,B,C,X(15));
- }
-
- BODY_16_19(16,C,D,E,T,A,B,X( 0),X( 0),X( 2),X( 8),X(13));
- BODY_16_19(17,B,C,D,E,T,A,X( 1),X( 1),X( 3),X( 9),X(14));
- BODY_16_19(18,A,B,C,D,E,T,X( 2),X( 2),X( 4),X(10),X(15));
- BODY_16_19(19,T,A,B,C,D,E,X( 3),X( 3),X( 5),X(11),X( 0));
-
- BODY_20_31(20,E,T,A,B,C,D,X( 4),X( 4),X( 6),X(12),X( 1));
- BODY_20_31(21,D,E,T,A,B,C,X( 5),X( 5),X( 7),X(13),X( 2));
- BODY_20_31(22,C,D,E,T,A,B,X( 6),X( 6),X( 8),X(14),X( 3));
- BODY_20_31(23,B,C,D,E,T,A,X( 7),X( 7),X( 9),X(15),X( 4));
- BODY_20_31(24,A,B,C,D,E,T,X( 8),X( 8),X(10),X( 0),X( 5));
- BODY_20_31(25,T,A,B,C,D,E,X( 9),X( 9),X(11),X( 1),X( 6));
- BODY_20_31(26,E,T,A,B,C,D,X(10),X(10),X(12),X( 2),X( 7));
- BODY_20_31(27,D,E,T,A,B,C,X(11),X(11),X(13),X( 3),X( 8));
- BODY_20_31(28,C,D,E,T,A,B,X(12),X(12),X(14),X( 4),X( 9));
- BODY_20_31(29,B,C,D,E,T,A,X(13),X(13),X(15),X( 5),X(10));
- BODY_20_31(30,A,B,C,D,E,T,X(14),X(14),X( 0),X( 6),X(11));
- BODY_20_31(31,T,A,B,C,D,E,X(15),X(15),X( 1),X( 7),X(12));
-
- BODY_32_39(32,E,T,A,B,C,D,X( 0),X( 2),X( 8),X(13));
- BODY_32_39(33,D,E,T,A,B,C,X( 1),X( 3),X( 9),X(14));
- BODY_32_39(34,C,D,E,T,A,B,X( 2),X( 4),X(10),X(15));
- BODY_32_39(35,B,C,D,E,T,A,X( 3),X( 5),X(11),X( 0));
- BODY_32_39(36,A,B,C,D,E,T,X( 4),X( 6),X(12),X( 1));
- BODY_32_39(37,T,A,B,C,D,E,X( 5),X( 7),X(13),X( 2));
- BODY_32_39(38,E,T,A,B,C,D,X( 6),X( 8),X(14),X( 3));
- BODY_32_39(39,D,E,T,A,B,C,X( 7),X( 9),X(15),X( 4));
-
- BODY_40_59(40,C,D,E,T,A,B,X( 8),X(10),X( 0),X( 5));
- BODY_40_59(41,B,C,D,E,T,A,X( 9),X(11),X( 1),X( 6));
- BODY_40_59(42,A,B,C,D,E,T,X(10),X(12),X( 2),X( 7));
- BODY_40_59(43,T,A,B,C,D,E,X(11),X(13),X( 3),X( 8));
- BODY_40_59(44,E,T,A,B,C,D,X(12),X(14),X( 4),X( 9));
- BODY_40_59(45,D,E,T,A,B,C,X(13),X(15),X( 5),X(10));
- BODY_40_59(46,C,D,E,T,A,B,X(14),X( 0),X( 6),X(11));
- BODY_40_59(47,B,C,D,E,T,A,X(15),X( 1),X( 7),X(12));
- BODY_40_59(48,A,B,C,D,E,T,X( 0),X( 2),X( 8),X(13));
- BODY_40_59(49,T,A,B,C,D,E,X( 1),X( 3),X( 9),X(14));
- BODY_40_59(50,E,T,A,B,C,D,X( 2),X( 4),X(10),X(15));
- BODY_40_59(51,D,E,T,A,B,C,X( 3),X( 5),X(11),X( 0));
- BODY_40_59(52,C,D,E,T,A,B,X( 4),X( 6),X(12),X( 1));
- BODY_40_59(53,B,C,D,E,T,A,X( 5),X( 7),X(13),X( 2));
- BODY_40_59(54,A,B,C,D,E,T,X( 6),X( 8),X(14),X( 3));
- BODY_40_59(55,T,A,B,C,D,E,X( 7),X( 9),X(15),X( 4));
- BODY_40_59(56,E,T,A,B,C,D,X( 8),X(10),X( 0),X( 5));
- BODY_40_59(57,D,E,T,A,B,C,X( 9),X(11),X( 1),X( 6));
- BODY_40_59(58,C,D,E,T,A,B,X(10),X(12),X( 2),X( 7));
- BODY_40_59(59,B,C,D,E,T,A,X(11),X(13),X( 3),X( 8));
-
- BODY_60_79(60,A,B,C,D,E,T,X(12),X(14),X( 4),X( 9));
- BODY_60_79(61,T,A,B,C,D,E,X(13),X(15),X( 5),X(10));
- BODY_60_79(62,E,T,A,B,C,D,X(14),X( 0),X( 6),X(11));
- BODY_60_79(63,D,E,T,A,B,C,X(15),X( 1),X( 7),X(12));
- BODY_60_79(64,C,D,E,T,A,B,X( 0),X( 2),X( 8),X(13));
- BODY_60_79(65,B,C,D,E,T,A,X( 1),X( 3),X( 9),X(14));
- BODY_60_79(66,A,B,C,D,E,T,X( 2),X( 4),X(10),X(15));
- BODY_60_79(67,T,A,B,C,D,E,X( 3),X( 5),X(11),X( 0));
- BODY_60_79(68,E,T,A,B,C,D,X( 4),X( 6),X(12),X( 1));
- BODY_60_79(69,D,E,T,A,B,C,X( 5),X( 7),X(13),X( 2));
- BODY_60_79(70,C,D,E,T,A,B,X( 6),X( 8),X(14),X( 3));
- BODY_60_79(71,B,C,D,E,T,A,X( 7),X( 9),X(15),X( 4));
- BODY_60_79(72,A,B,C,D,E,T,X( 8),X(10),X( 0),X( 5));
- BODY_60_79(73,T,A,B,C,D,E,X( 9),X(11),X( 1),X( 6));
- BODY_60_79(74,E,T,A,B,C,D,X(10),X(12),X( 2),X( 7));
- BODY_60_79(75,D,E,T,A,B,C,X(11),X(13),X( 3),X( 8));
- BODY_60_79(76,C,D,E,T,A,B,X(12),X(14),X( 4),X( 9));
- BODY_60_79(77,B,C,D,E,T,A,X(13),X(15),X( 5),X(10));
- BODY_60_79(78,A,B,C,D,E,T,X(14),X( 0),X( 6),X(11));
- BODY_60_79(79,T,A,B,C,D,E,X(15),X( 1),X( 7),X(12));
-
- c->h0=(c->h0+E)&0xffffffffL;
- c->h1=(c->h1+T)&0xffffffffL;
- c->h2=(c->h2+A)&0xffffffffL;
- c->h3=(c->h3+B)&0xffffffffL;
- c->h4=(c->h4+C)&0xffffffffL;
-
- if (--num == 0) break;
-
- A=c->h0;
- B=c->h1;
- C=c->h2;
- D=c->h3;
- E=c->h4;
-
- }
- }
-#endif
+# if !defined(SHA_1) || !defined(SHA1_ASM)
+static void HASH_BLOCK_DATA_ORDER(SHA_CTX *c, const void *p, size_t num)
+{
+ const unsigned char *data = p;
+ register unsigned MD32_REG_T A, B, C, D, E, T, l;
+# ifndef MD32_XARRAY
+ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
+ XX8, XX9, XX10, XX11, XX12, XX13, XX14, XX15;
+# else
+ SHA_LONG XX[16];
+# endif
+
+ A = c->h0;
+ B = c->h1;
+ C = c->h2;
+ D = c->h3;
+ E = c->h4;
+
+ for (;;) {
+ const union {
+ long one;
+ char little;
+ } is_endian = {
+ 1
+ };
+
+ if (!is_endian.little && sizeof(SHA_LONG) == 4
+ && ((size_t)p % 4) == 0) {
+ const SHA_LONG *W = (const SHA_LONG *)data;
+
+ X(0) = W[0];
+ X(1) = W[1];
+ BODY_00_15(0, A, B, C, D, E, T, X(0));
+ X(2) = W[2];
+ BODY_00_15(1, T, A, B, C, D, E, X(1));
+ X(3) = W[3];
+ BODY_00_15(2, E, T, A, B, C, D, X(2));
+ X(4) = W[4];
+ BODY_00_15(3, D, E, T, A, B, C, X(3));
+ X(5) = W[5];
+ BODY_00_15(4, C, D, E, T, A, B, X(4));
+ X(6) = W[6];
+ BODY_00_15(5, B, C, D, E, T, A, X(5));
+ X(7) = W[7];
+ BODY_00_15(6, A, B, C, D, E, T, X(6));
+ X(8) = W[8];
+ BODY_00_15(7, T, A, B, C, D, E, X(7));
+ X(9) = W[9];
+ BODY_00_15(8, E, T, A, B, C, D, X(8));
+ X(10) = W[10];
+ BODY_00_15(9, D, E, T, A, B, C, X(9));
+ X(11) = W[11];
+ BODY_00_15(10, C, D, E, T, A, B, X(10));
+ X(12) = W[12];
+ BODY_00_15(11, B, C, D, E, T, A, X(11));
+ X(13) = W[13];
+ BODY_00_15(12, A, B, C, D, E, T, X(12));
+ X(14) = W[14];
+ BODY_00_15(13, T, A, B, C, D, E, X(13));
+ X(15) = W[15];
+ BODY_00_15(14, E, T, A, B, C, D, X(14));
+ BODY_00_15(15, D, E, T, A, B, C, X(15));
+
+ data += SHA_CBLOCK;
+ } else {
+ (void)HOST_c2l(data, l);
+ X(0) = l;
+ (void)HOST_c2l(data, l);
+ X(1) = l;
+ BODY_00_15(0, A, B, C, D, E, T, X(0));
+ (void)HOST_c2l(data, l);
+ X(2) = l;
+ BODY_00_15(1, T, A, B, C, D, E, X(1));
+ (void)HOST_c2l(data, l);
+ X(3) = l;
+ BODY_00_15(2, E, T, A, B, C, D, X(2));
+ (void)HOST_c2l(data, l);
+ X(4) = l;
+ BODY_00_15(3, D, E, T, A, B, C, X(3));
+ (void)HOST_c2l(data, l);
+ X(5) = l;
+ BODY_00_15(4, C, D, E, T, A, B, X(4));
+ (void)HOST_c2l(data, l);
+ X(6) = l;
+ BODY_00_15(5, B, C, D, E, T, A, X(5));
+ (void)HOST_c2l(data, l);
+ X(7) = l;
+ BODY_00_15(6, A, B, C, D, E, T, X(6));
+ (void)HOST_c2l(data, l);
+ X(8) = l;
+ BODY_00_15(7, T, A, B, C, D, E, X(7));
+ (void)HOST_c2l(data, l);
+ X(9) = l;
+ BODY_00_15(8, E, T, A, B, C, D, X(8));
+ (void)HOST_c2l(data, l);
+ X(10) = l;
+ BODY_00_15(9, D, E, T, A, B, C, X(9));
+ (void)HOST_c2l(data, l);
+ X(11) = l;
+ BODY_00_15(10, C, D, E, T, A, B, X(10));
+ (void)HOST_c2l(data, l);
+ X(12) = l;
+ BODY_00_15(11, B, C, D, E, T, A, X(11));
+ (void)HOST_c2l(data, l);
+ X(13) = l;
+ BODY_00_15(12, A, B, C, D, E, T, X(12));
+ (void)HOST_c2l(data, l);
+ X(14) = l;
+ BODY_00_15(13, T, A, B, C, D, E, X(13));
+ (void)HOST_c2l(data, l);
+ X(15) = l;
+ BODY_00_15(14, E, T, A, B, C, D, X(14));
+ BODY_00_15(15, D, E, T, A, B, C, X(15));
+ }
+
+ BODY_16_19(16, C, D, E, T, A, B, X(0), X(0), X(2), X(8), X(13));
+ BODY_16_19(17, B, C, D, E, T, A, X(1), X(1), X(3), X(9), X(14));
+ BODY_16_19(18, A, B, C, D, E, T, X(2), X(2), X(4), X(10), X(15));
+ BODY_16_19(19, T, A, B, C, D, E, X(3), X(3), X(5), X(11), X(0));
+
+ BODY_20_31(20, E, T, A, B, C, D, X(4), X(4), X(6), X(12), X(1));
+ BODY_20_31(21, D, E, T, A, B, C, X(5), X(5), X(7), X(13), X(2));
+ BODY_20_31(22, C, D, E, T, A, B, X(6), X(6), X(8), X(14), X(3));
+ BODY_20_31(23, B, C, D, E, T, A, X(7), X(7), X(9), X(15), X(4));
+ BODY_20_31(24, A, B, C, D, E, T, X(8), X(8), X(10), X(0), X(5));
+ BODY_20_31(25, T, A, B, C, D, E, X(9), X(9), X(11), X(1), X(6));
+ BODY_20_31(26, E, T, A, B, C, D, X(10), X(10), X(12), X(2), X(7));
+ BODY_20_31(27, D, E, T, A, B, C, X(11), X(11), X(13), X(3), X(8));
+ BODY_20_31(28, C, D, E, T, A, B, X(12), X(12), X(14), X(4), X(9));
+ BODY_20_31(29, B, C, D, E, T, A, X(13), X(13), X(15), X(5), X(10));
+ BODY_20_31(30, A, B, C, D, E, T, X(14), X(14), X(0), X(6), X(11));
+ BODY_20_31(31, T, A, B, C, D, E, X(15), X(15), X(1), X(7), X(12));
+
+ BODY_32_39(32, E, T, A, B, C, D, X(0), X(2), X(8), X(13));
+ BODY_32_39(33, D, E, T, A, B, C, X(1), X(3), X(9), X(14));
+ BODY_32_39(34, C, D, E, T, A, B, X(2), X(4), X(10), X(15));
+ BODY_32_39(35, B, C, D, E, T, A, X(3), X(5), X(11), X(0));
+ BODY_32_39(36, A, B, C, D, E, T, X(4), X(6), X(12), X(1));
+ BODY_32_39(37, T, A, B, C, D, E, X(5), X(7), X(13), X(2));
+ BODY_32_39(38, E, T, A, B, C, D, X(6), X(8), X(14), X(3));
+ BODY_32_39(39, D, E, T, A, B, C, X(7), X(9), X(15), X(4));
+
+ BODY_40_59(40, C, D, E, T, A, B, X(8), X(10), X(0), X(5));
+ BODY_40_59(41, B, C, D, E, T, A, X(9), X(11), X(1), X(6));
+ BODY_40_59(42, A, B, C, D, E, T, X(10), X(12), X(2), X(7));
+ BODY_40_59(43, T, A, B, C, D, E, X(11), X(13), X(3), X(8));
+ BODY_40_59(44, E, T, A, B, C, D, X(12), X(14), X(4), X(9));
+ BODY_40_59(45, D, E, T, A, B, C, X(13), X(15), X(5), X(10));
+ BODY_40_59(46, C, D, E, T, A, B, X(14), X(0), X(6), X(11));
+ BODY_40_59(47, B, C, D, E, T, A, X(15), X(1), X(7), X(12));
+ BODY_40_59(48, A, B, C, D, E, T, X(0), X(2), X(8), X(13));
+ BODY_40_59(49, T, A, B, C, D, E, X(1), X(3), X(9), X(14));
+ BODY_40_59(50, E, T, A, B, C, D, X(2), X(4), X(10), X(15));
+ BODY_40_59(51, D, E, T, A, B, C, X(3), X(5), X(11), X(0));
+ BODY_40_59(52, C, D, E, T, A, B, X(4), X(6), X(12), X(1));
+ BODY_40_59(53, B, C, D, E, T, A, X(5), X(7), X(13), X(2));
+ BODY_40_59(54, A, B, C, D, E, T, X(6), X(8), X(14), X(3));
+ BODY_40_59(55, T, A, B, C, D, E, X(7), X(9), X(15), X(4));
+ BODY_40_59(56, E, T, A, B, C, D, X(8), X(10), X(0), X(5));
+ BODY_40_59(57, D, E, T, A, B, C, X(9), X(11), X(1), X(6));
+ BODY_40_59(58, C, D, E, T, A, B, X(10), X(12), X(2), X(7));
+ BODY_40_59(59, B, C, D, E, T, A, X(11), X(13), X(3), X(8));
+
+ BODY_60_79(60, A, B, C, D, E, T, X(12), X(14), X(4), X(9));
+ BODY_60_79(61, T, A, B, C, D, E, X(13), X(15), X(5), X(10));
+ BODY_60_79(62, E, T, A, B, C, D, X(14), X(0), X(6), X(11));
+ BODY_60_79(63, D, E, T, A, B, C, X(15), X(1), X(7), X(12));
+ BODY_60_79(64, C, D, E, T, A, B, X(0), X(2), X(8), X(13));
+ BODY_60_79(65, B, C, D, E, T, A, X(1), X(3), X(9), X(14));
+ BODY_60_79(66, A, B, C, D, E, T, X(2), X(4), X(10), X(15));
+ BODY_60_79(67, T, A, B, C, D, E, X(3), X(5), X(11), X(0));
+ BODY_60_79(68, E, T, A, B, C, D, X(4), X(6), X(12), X(1));
+ BODY_60_79(69, D, E, T, A, B, C, X(5), X(7), X(13), X(2));
+ BODY_60_79(70, C, D, E, T, A, B, X(6), X(8), X(14), X(3));
+ BODY_60_79(71, B, C, D, E, T, A, X(7), X(9), X(15), X(4));
+ BODY_60_79(72, A, B, C, D, E, T, X(8), X(10), X(0), X(5));
+ BODY_60_79(73, T, A, B, C, D, E, X(9), X(11), X(1), X(6));
+ BODY_60_79(74, E, T, A, B, C, D, X(10), X(12), X(2), X(7));
+ BODY_60_79(75, D, E, T, A, B, C, X(11), X(13), X(3), X(8));
+ BODY_60_79(76, C, D, E, T, A, B, X(12), X(14), X(4), X(9));
+ BODY_60_79(77, B, C, D, E, T, A, X(13), X(15), X(5), X(10));
+ BODY_60_79(78, A, B, C, D, E, T, X(14), X(0), X(6), X(11));
+ BODY_60_79(79, T, A, B, C, D, E, X(15), X(1), X(7), X(12));
+
+ c->h0 = (c->h0 + E) & 0xffffffffL;
+ c->h1 = (c->h1 + T) & 0xffffffffL;
+ c->h2 = (c->h2 + A) & 0xffffffffL;
+ c->h3 = (c->h3 + B) & 0xffffffffL;
+ c->h4 = (c->h4 + C) & 0xffffffffL;
+
+ if (--num == 0)
+ break;
+
+ A = c->h0;
+ B = c->h1;
+ C = c->h2;
+ D = c->h3;
+ E = c->h4;
+
+ }
+}
+# endif
-#else /* OPENSSL_SMALL_FOOTPRINT */
-
-#define BODY_00_15(xi) do { \
- T=E+K_00_19+F_00_19(B,C,D); \
- E=D, D=C, C=ROTATE(B,30), B=A; \
- A=ROTATE(A,5)+T+xi; } while(0)
-
-#define BODY_16_19(xa,xb,xc,xd) do { \
- Xupdate(T,xa,xa,xb,xc,xd); \
- T+=E+K_00_19+F_00_19(B,C,D); \
- E=D, D=C, C=ROTATE(B,30), B=A; \
- A=ROTATE(A,5)+T; } while(0)
-
-#define BODY_20_39(xa,xb,xc,xd) do { \
- Xupdate(T,xa,xa,xb,xc,xd); \
- T+=E+K_20_39+F_20_39(B,C,D); \
- E=D, D=C, C=ROTATE(B,30), B=A; \
- A=ROTATE(A,5)+T; } while(0)
-
-#define BODY_40_59(xa,xb,xc,xd) do { \
- Xupdate(T,xa,xa,xb,xc,xd); \
- T+=E+K_40_59+F_40_59(B,C,D); \
- E=D, D=C, C=ROTATE(B,30), B=A; \
- A=ROTATE(A,5)+T; } while(0)
-
-#define BODY_60_79(xa,xb,xc,xd) do { \
- Xupdate(T,xa,xa,xb,xc,xd); \
- T=E+K_60_79+F_60_79(B,C,D); \
- E=D, D=C, C=ROTATE(B,30), B=A; \
- A=ROTATE(A,5)+T+xa; } while(0)
-
-#if !defined(SHA_1) || !defined(SHA1_ASM)
-static void HASH_BLOCK_DATA_ORDER (SHA_CTX *c, const void *p, size_t num)
- {
- const unsigned char *data=p;
- register unsigned MD32_REG_T A,B,C,D,E,T,l;
- int i;
- SHA_LONG X[16];
-
- A=c->h0;
- B=c->h1;
- C=c->h2;
- D=c->h3;
- E=c->h4;
-
- for (;;)
- {
- for (i=0;i<16;i++)
- { HOST_c2l(data,l); X[i]=l; BODY_00_15(X[i]); }
- for (i=0;i<4;i++)
- { BODY_16_19(X[i], X[i+2], X[i+8], X[(i+13)&15]); }
- for (;i<24;i++)
- { BODY_20_39(X[i&15], X[(i+2)&15], X[(i+8)&15],X[(i+13)&15]); }
- for (i=0;i<20;i++)
- { BODY_40_59(X[(i+8)&15],X[(i+10)&15],X[i&15], X[(i+5)&15]); }
- for (i=4;i<24;i++)
- { BODY_60_79(X[(i+8)&15],X[(i+10)&15],X[i&15], X[(i+5)&15]); }
-
- c->h0=(c->h0+A)&0xffffffffL;
- c->h1=(c->h1+B)&0xffffffffL;
- c->h2=(c->h2+C)&0xffffffffL;
- c->h3=(c->h3+D)&0xffffffffL;
- c->h4=(c->h4+E)&0xffffffffL;
-
- if (--num == 0) break;
-
- A=c->h0;
- B=c->h1;
- C=c->h2;
- D=c->h3;
- E=c->h4;
-
- }
- }
-#endif
+#else /* OPENSSL_SMALL_FOOTPRINT */
+
+# define BODY_00_15(xi) do { \
+ T=E+K_00_19+F_00_19(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T+xi; } while(0)
+
+# define BODY_16_19(xa,xb,xc,xd) do { \
+ Xupdate(T,xa,xa,xb,xc,xd); \
+ T+=E+K_00_19+F_00_19(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T; } while(0)
+
+# define BODY_20_39(xa,xb,xc,xd) do { \
+ Xupdate(T,xa,xa,xb,xc,xd); \
+ T+=E+K_20_39+F_20_39(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T; } while(0)
+
+# define BODY_40_59(xa,xb,xc,xd) do { \
+ Xupdate(T,xa,xa,xb,xc,xd); \
+ T+=E+K_40_59+F_40_59(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T; } while(0)
+
+# define BODY_60_79(xa,xb,xc,xd) do { \
+ Xupdate(T,xa,xa,xb,xc,xd); \
+ T=E+K_60_79+F_60_79(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T+xa; } while(0)
+
+# if !defined(SHA_1) || !defined(SHA1_ASM)
+static void HASH_BLOCK_DATA_ORDER(SHA_CTX *c, const void *p, size_t num)
+{
+ const unsigned char *data = p;
+ register unsigned MD32_REG_T A, B, C, D, E, T, l;
+ int i;
+ SHA_LONG X[16];
+
+ A = c->h0;
+ B = c->h1;
+ C = c->h2;
+ D = c->h3;
+ E = c->h4;
+
+ for (;;) {
+ for (i = 0; i < 16; i++) {
+ HOST_c2l(data, l);
+ X[i] = l;
+ BODY_00_15(X[i]);
+ }
+ for (i = 0; i < 4; i++) {
+ BODY_16_19(X[i], X[i + 2], X[i + 8], X[(i + 13) & 15]);
+ }
+ for (; i < 24; i++) {
+ BODY_20_39(X[i & 15], X[(i + 2) & 15], X[(i + 8) & 15],
+ X[(i + 13) & 15]);
+ }
+ for (i = 0; i < 20; i++) {
+ BODY_40_59(X[(i + 8) & 15], X[(i + 10) & 15], X[i & 15],
+ X[(i + 5) & 15]);
+ }
+ for (i = 4; i < 24; i++) {
+ BODY_60_79(X[(i + 8) & 15], X[(i + 10) & 15], X[i & 15],
+ X[(i + 5) & 15]);
+ }
+
+ c->h0 = (c->h0 + A) & 0xffffffffL;
+ c->h1 = (c->h1 + B) & 0xffffffffL;
+ c->h2 = (c->h2 + C) & 0xffffffffL;
+ c->h3 = (c->h3 + D) & 0xffffffffL;
+ c->h4 = (c->h4 + E) & 0xffffffffL;
+
+ if (--num == 0)
+ break;
+
+ A = c->h0;
+ B = c->h1;
+ C = c->h2;
+ D = c->h3;
+ E = c->h4;
+
+ }
+}
+# endif
#endif
diff --git a/openssl/crypto/sha/sha_one.c b/openssl/crypto/sha/sha_one.c
index 3bae623ce..0930b98a6 100644
--- a/openssl/crypto/sha/sha_one.c
+++ b/openssl/crypto/sha/sha_one.c
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -63,16 +63,17 @@
#ifndef OPENSSL_NO_SHA0
unsigned char *SHA(const unsigned char *d, size_t n, unsigned char *md)
- {
- SHA_CTX c;
- static unsigned char m[SHA_DIGEST_LENGTH];
+{
+ SHA_CTX c;
+ static unsigned char m[SHA_DIGEST_LENGTH];
- if (md == NULL) md=m;
- if (!SHA_Init(&c))
- return NULL;
- SHA_Update(&c,d,n);
- SHA_Final(md,&c);
- OPENSSL_cleanse(&c,sizeof(c));
- return(md);
- }
+ if (md == NULL)
+ md = m;
+ if (!SHA_Init(&c))
+ return NULL;
+ SHA_Update(&c, d, n);
+ SHA_Final(md, &c);
+ OPENSSL_cleanse(&c, sizeof(c));
+ return (md);
+}
#endif
diff --git a/openssl/crypto/sha/shatest.c b/openssl/crypto/sha/shatest.c
index 27614646d..105060a7e 100644
--- a/openssl/crypto/sha/shatest.c
+++ b/openssl/crypto/sha/shatest.c
@@ -5,21 +5,21 @@
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
- *
+ *
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
+ *
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -34,10 +34,10 @@
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
+ * 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
+ *
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -49,7 +49,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
@@ -66,113 +66,109 @@
int main(int argc, char *argv[])
{
printf("No SHA0 support\n");
- return(0);
+ return (0);
}
#else
-#include <openssl/evp.h>
-#include <openssl/sha.h>
+# include <openssl/evp.h>
+# include <openssl/sha.h>
-#ifdef CHARSET_EBCDIC
-#include <openssl/ebcdic.h>
-#endif
+# ifdef CHARSET_EBCDIC
+# include <openssl/ebcdic.h>
+# endif
-#define SHA_0 /* FIPS 180 */
-#undef SHA_1 /* FIPS 180-1 */
-
-static char *test[]={
- "abc",
- "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
- NULL,
- };
-
-#ifdef SHA_0
-static char *ret[]={
- "0164b8a914cd2a5e74c4f7ff082c4d97f1edf880",
- "d2516ee1acfa5baf33dfc1c471e438449ef134c8",
- };
-static char *bigret=
- "3232affa48628a26653b5aaa44541fd90d690603";
-#endif
-#ifdef SHA_1
-static char *ret[]={
- "a9993e364706816aba3e25717850c26c9cd0d89d",
- "84983e441c3bd26ebaae4aa1f95129e5e54670f1",
- };
-static char *bigret=
- "34aa973cd4c4daa4f61eeb2bdbad27316534016f";
-#endif
+# define SHA_0 /* FIPS 180 */
+# undef SHA_1 /* FIPS 180-1 */
+
+static char *test[] = {
+ "abc",
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
+ NULL,
+};
+
+# ifdef SHA_0
+static char *ret[] = {
+ "0164b8a914cd2a5e74c4f7ff082c4d97f1edf880",
+ "d2516ee1acfa5baf33dfc1c471e438449ef134c8",
+};
+
+static char *bigret = "3232affa48628a26653b5aaa44541fd90d690603";
+# endif
+# ifdef SHA_1
+static char *ret[] = {
+ "a9993e364706816aba3e25717850c26c9cd0d89d",
+ "84983e441c3bd26ebaae4aa1f95129e5e54670f1",
+};
+
+static char *bigret = "34aa973cd4c4daa4f61eeb2bdbad27316534016f";
+# endif
static char *pt(unsigned char *md);
int main(int argc, char *argv[])
- {
- int i,err=0;
- char **P,**R;
- static unsigned char buf[1000];
- char *p,*r;
- EVP_MD_CTX c;
- unsigned char md[SHA_DIGEST_LENGTH];
-
-#ifdef CHARSET_EBCDIC
- ebcdic2ascii(test[0], test[0], strlen(test[0]));
- ebcdic2ascii(test[1], test[1], strlen(test[1]));
-#endif
+{
+ int i, err = 0;
+ char **P, **R;
+ static unsigned char buf[1000];
+ char *p, *r;
+ EVP_MD_CTX c;
+ unsigned char md[SHA_DIGEST_LENGTH];
- EVP_MD_CTX_init(&c);
- P=test;
- R=ret;
- i=1;
- while (*P != NULL)
- {
- EVP_Digest(*P,strlen(*P),md,NULL,EVP_sha(), NULL);
- p=pt(md);
- if (strcmp(p,*R) != 0)
- {
- printf("error calculating SHA on '%s'\n",*P);
- printf("got %s instead of %s\n",p,*R);
- err++;
- }
- else
- printf("test %d ok\n",i);
- i++;
- R++;
- P++;
- }
-
- memset(buf,'a',1000);
-#ifdef CHARSET_EBCDIC
- ebcdic2ascii(buf, buf, 1000);
-#endif /*CHARSET_EBCDIC*/
- EVP_DigestInit_ex(&c,EVP_sha(), NULL);
- for (i=0; i<1000; i++)
- EVP_DigestUpdate(&c,buf,1000);
- EVP_DigestFinal_ex(&c,md,NULL);
- p=pt(md);
-
- r=bigret;
- if (strcmp(p,r) != 0)
- {
- printf("error calculating SHA on '%s'\n",p);
- printf("got %s instead of %s\n",p,r);
- err++;
- }
- else
- printf("test 3 ok\n");
-
-#ifdef OPENSSL_SYS_NETWARE
- if (err) printf("ERROR: %d\n", err);
-#endif
- EVP_MD_CTX_cleanup(&c);
- EXIT(err);
- return(0);
- }
+# ifdef CHARSET_EBCDIC
+ ebcdic2ascii(test[0], test[0], strlen(test[0]));
+ ebcdic2ascii(test[1], test[1], strlen(test[1]));
+# endif
+
+ EVP_MD_CTX_init(&c);
+ P = test;
+ R = ret;
+ i = 1;
+ while (*P != NULL) {
+ EVP_Digest(*P, strlen(*P), md, NULL, EVP_sha(), NULL);
+ p = pt(md);
+ if (strcmp(p, *R) != 0) {
+ printf("error calculating SHA on '%s'\n", *P);
+ printf("got %s instead of %s\n", p, *R);
+ err++;
+ } else
+ printf("test %d ok\n", i);
+ i++;
+ R++;
+ P++;
+ }
+
+ memset(buf, 'a', 1000);
+# ifdef CHARSET_EBCDIC
+ ebcdic2ascii(buf, buf, 1000);
+# endif /* CHARSET_EBCDIC */
+ EVP_DigestInit_ex(&c, EVP_sha(), NULL);
+ for (i = 0; i < 1000; i++)
+ EVP_DigestUpdate(&c, buf, 1000);
+ EVP_DigestFinal_ex(&c, md, NULL);
+ p = pt(md);
+
+ r = bigret;
+ if (strcmp(p, r) != 0) {
+ printf("error calculating SHA on '%s'\n", p);
+ printf("got %s instead of %s\n", p, r);
+ err++;
+ } else
+ printf("test 3 ok\n");
+
+# ifdef OPENSSL_SYS_NETWARE
+ if (err)
+ printf("ERROR: %d\n", err);
+# endif
+ EVP_MD_CTX_cleanup(&c);
+ EXIT(err);
+ return (0);
+}
static char *pt(unsigned char *md)
- {
- int i;
- static char buf[80];
-
- for (i=0; i<SHA_DIGEST_LENGTH; i++)
- sprintf(&(buf[i*2]),"%02x",md[i]);
- return(buf);
- }
+{
+ int i;
+ static char buf[80];
+
+ for (i = 0; i < SHA_DIGEST_LENGTH; i++)
+ sprintf(&(buf[i * 2]), "%02x", md[i]);
+ return (buf);
+}
#endif