aboutsummaryrefslogtreecommitdiff
path: root/openssl/crypto/sha/asm/sha512-sparcv9.pl
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2015-02-22 14:43:31 +0100
committermarha <marha@users.sourceforge.net>2015-02-22 14:43:31 +0100
commitc9aad1ae6227c434d480d1d3aa8eae3c3c910c18 (patch)
tree94b917df998c3d547e191b3b9c58bbffc616470e /openssl/crypto/sha/asm/sha512-sparcv9.pl
parentf1c2db43dcf35d2cf4715390bd2391c28e42a8c2 (diff)
downloadvcxsrv-c9aad1ae6227c434d480d1d3aa8eae3c3c910c18.tar.gz
vcxsrv-c9aad1ae6227c434d480d1d3aa8eae3c3c910c18.tar.bz2
vcxsrv-c9aad1ae6227c434d480d1d3aa8eae3c3c910c18.zip
Upgraded to openssl-1.0.2
Diffstat (limited to 'openssl/crypto/sha/asm/sha512-sparcv9.pl')
-rw-r--r--openssl/crypto/sha/asm/sha512-sparcv9.pl320
1 files changed, 288 insertions, 32 deletions
diff --git a/openssl/crypto/sha/asm/sha512-sparcv9.pl b/openssl/crypto/sha/asm/sha512-sparcv9.pl
index 585740789..5a9c15d1d 100644
--- a/openssl/crypto/sha/asm/sha512-sparcv9.pl
+++ b/openssl/crypto/sha/asm/sha512-sparcv9.pl
@@ -5,6 +5,8 @@
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Hardware SPARC T4 support by David S. Miller <davem@davemloft.net>.
# ====================================================================
# SHA256 performance improvement over compiler generated code varies
@@ -41,11 +43,11 @@
# loads are always slower than one 64-bit load. Once again this
# is unlike pre-T1 UltraSPARC, where, if scheduled appropriately,
# 2x32-bit loads can be as fast as 1x64-bit ones.
-
-$bits=32;
-for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
-if ($bits==64) { $bias=2047; $frame=192; }
-else { $bias=0; $frame=112; }
+#
+# SPARC T4 SHA256/512 hardware achieves 3.17/2.01 cycles per byte,
+# which is 9.3x/11.1x faster than software. Multi-process benchmark
+# saturates at 11.5x single-process result on 8-core processor, or
+# ~11/16GBps per 2.85GHz socket.
$output=shift;
open STDOUT,">$output";
@@ -170,6 +172,7 @@ $code.=<<___ if ($i==0);
ld [$inp+16],%l4
ld [$inp+20],%l5
ld [$inp+24],%l6
+ cmp $tmp31,0
ld [$inp+28],%l7
___
$code.=<<___ if ($i<15);
@@ -182,29 +185,29 @@ $code.=<<___ if ($i<15);
or @pair[1],$tmp2,$tmp2
`"ld [$inp+".eval(32+4+$i*8)."],@pair[1]" if ($i<12)`
add $h,$tmp2,$T1
- $ST $tmp2,[%sp+`$bias+$frame+$i*$SZ`]
+ $ST $tmp2,[%sp+STACK_BIAS+STACK_FRAME+`$i*$SZ`]
___
$code.=<<___ if ($i==12);
- brnz,a $tmp31,.+8
+ bnz,a,pn %icc,.+8
ld [$inp+128],%l0
___
$code.=<<___ if ($i==15);
- ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+0`],%l2
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+1)%16)*$SZ+0`],%l2
sllx @pair[1],$tmp31,$tmp2 ! Xload($i)
add $tmp31,32,$tmp0
- ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+4`],%l3
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+1)%16)*$SZ+4`],%l3
sllx @pair[0],$tmp0,$tmp1
- ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+0`],%l4
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+9)%16)*$SZ+0`],%l4
srlx @pair[2],$tmp32,@pair[1]
or $tmp1,$tmp2,$tmp2
- ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+4`],%l5
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+9)%16)*$SZ+4`],%l5
or @pair[1],$tmp2,$tmp2
- ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+0`],%l6
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+14)%16)*$SZ+0`],%l6
add $h,$tmp2,$T1
- $ST $tmp2,[%sp+`$bias+$frame+$i*$SZ`]
- ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+4`],%l7
- ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+0`],%l0
- ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+4`],%l1
+ $ST $tmp2,[%sp+STACK_BIAS+STACK_FRAME+`$i*$SZ`]
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+14)%16)*$SZ+4`],%l7
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+0)%16)*$SZ+0`],%l0
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+0)%16)*$SZ+4`],%l1
___
} if ($SZ==8);
@@ -340,9 +343,9 @@ $code.=<<___;
or %l3,$tmp0,$tmp0
srlx $tmp0,@sigma0[0],$T1
- ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+0`],%l2
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+1)%16)*$SZ+0`],%l2
sllx $tmp0,`64-@sigma0[2]`,$tmp1
- ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+4`],%l3
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+1)%16)*$SZ+4`],%l3
srlx $tmp0,@sigma0[1],$tmp0
xor $tmp1,$T1,$T1
sllx $tmp1,`@sigma0[2]-@sigma0[1]`,$tmp1
@@ -354,9 +357,9 @@ $code.=<<___;
or %l7,$tmp2,$tmp2
srlx $tmp2,@sigma1[0],$tmp1
- ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+0`],%l6
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+14)%16)*$SZ+0`],%l6
sllx $tmp2,`64-@sigma1[2]`,$tmp0
- ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+4`],%l7
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+14)%16)*$SZ+4`],%l7
srlx $tmp2,@sigma1[1],$tmp2
xor $tmp0,$tmp1,$tmp1
sllx $tmp0,`@sigma1[2]-@sigma1[1]`,$tmp0
@@ -365,27 +368,30 @@ $code.=<<___;
xor $tmp0,$tmp1,$tmp1
sllx %l4,32,$tmp0
xor $tmp2,$tmp1,$tmp1 ! sigma1(X[$i+14])
- ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+0`],%l4
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+9)%16)*$SZ+0`],%l4
or %l5,$tmp0,$tmp0
- ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+4`],%l5
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+9)%16)*$SZ+4`],%l5
sllx %l0,32,$tmp2
add $tmp1,$T1,$T1
- ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+0`],%l0
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+0)%16)*$SZ+0`],%l0
or %l1,$tmp2,$tmp2
add $tmp0,$T1,$T1 ! +=X[$i+9]
- ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+4`],%l1
+ ld [%sp+STACK_BIAS+STACK_FRAME+`(($i+1+0)%16)*$SZ+4`],%l1
add $tmp2,$T1,$T1 ! +=X[$i]
- $ST $T1,[%sp+`$bias+$frame+($i%16)*$SZ`]
+ $ST $T1,[%sp+STACK_BIAS+STACK_FRAME+`($i%16)*$SZ`]
___
&BODY_00_15(@_);
} if ($SZ==8);
-$code.=<<___ if ($bits==64);
+$code.=<<___;
+#include "sparc_arch.h"
+
+#ifdef __arch64__
.register %g2,#scratch
.register %g3,#scratch
-___
-$code.=<<___;
+#endif
+
.section ".text",#alloc,#execinstr
.align 64
@@ -457,9 +463,203 @@ ___
}
$code.=<<___;
.size K${label},.-K${label}
+
+#ifdef __PIC__
+SPARC_PIC_THUNK(%g1)
+#endif
+
.globl sha${label}_block_data_order
+.align 32
sha${label}_block_data_order:
- save %sp,`-$frame-$locals`,%sp
+ SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
+ ld [%g1+4],%g1 ! OPENSSL_sparcv9cap_P[1]
+
+ andcc %g1, CFR_SHA${label}, %g0
+ be .Lsoftware
+ nop
+___
+$code.=<<___ if ($SZ==8); # SHA512
+ ldd [%o0 + 0x00], %f0 ! load context
+ ldd [%o0 + 0x08], %f2
+ ldd [%o0 + 0x10], %f4
+ ldd [%o0 + 0x18], %f6
+ ldd [%o0 + 0x20], %f8
+ ldd [%o0 + 0x28], %f10
+ andcc %o1, 0x7, %g0
+ ldd [%o0 + 0x30], %f12
+ bne,pn %icc, .Lhwunaligned
+ ldd [%o0 + 0x38], %f14
+
+.Lhwaligned_loop:
+ ldd [%o1 + 0x00], %f16
+ ldd [%o1 + 0x08], %f18
+ ldd [%o1 + 0x10], %f20
+ ldd [%o1 + 0x18], %f22
+ ldd [%o1 + 0x20], %f24
+ ldd [%o1 + 0x28], %f26
+ ldd [%o1 + 0x30], %f28
+ ldd [%o1 + 0x38], %f30
+ ldd [%o1 + 0x40], %f32
+ ldd [%o1 + 0x48], %f34
+ ldd [%o1 + 0x50], %f36
+ ldd [%o1 + 0x58], %f38
+ ldd [%o1 + 0x60], %f40
+ ldd [%o1 + 0x68], %f42
+ ldd [%o1 + 0x70], %f44
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x78], %f46
+ add %o1, 0x80, %o1
+ prefetch [%o1 + 63], 20
+ prefetch [%o1 + 64+63], 20
+
+ .word 0x81b02860 ! SHA512
+
+ bne,pt SIZE_T_CC, .Lhwaligned_loop
+ nop
+
+.Lhwfinish:
+ std %f0, [%o0 + 0x00] ! store context
+ std %f2, [%o0 + 0x08]
+ std %f4, [%o0 + 0x10]
+ std %f6, [%o0 + 0x18]
+ std %f8, [%o0 + 0x20]
+ std %f10, [%o0 + 0x28]
+ std %f12, [%o0 + 0x30]
+ retl
+ std %f14, [%o0 + 0x38]
+
+.align 16
+.Lhwunaligned:
+ alignaddr %o1, %g0, %o1
+
+ ldd [%o1 + 0x00], %f18
+.Lhwunaligned_loop:
+ ldd [%o1 + 0x08], %f20
+ ldd [%o1 + 0x10], %f22
+ ldd [%o1 + 0x18], %f24
+ ldd [%o1 + 0x20], %f26
+ ldd [%o1 + 0x28], %f28
+ ldd [%o1 + 0x30], %f30
+ ldd [%o1 + 0x38], %f32
+ ldd [%o1 + 0x40], %f34
+ ldd [%o1 + 0x48], %f36
+ ldd [%o1 + 0x50], %f38
+ ldd [%o1 + 0x58], %f40
+ ldd [%o1 + 0x60], %f42
+ ldd [%o1 + 0x68], %f44
+ ldd [%o1 + 0x70], %f46
+ ldd [%o1 + 0x78], %f48
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x80], %f50
+ add %o1, 0x80, %o1
+ prefetch [%o1 + 63], 20
+ prefetch [%o1 + 64+63], 20
+
+ faligndata %f18, %f20, %f16
+ faligndata %f20, %f22, %f18
+ faligndata %f22, %f24, %f20
+ faligndata %f24, %f26, %f22
+ faligndata %f26, %f28, %f24
+ faligndata %f28, %f30, %f26
+ faligndata %f30, %f32, %f28
+ faligndata %f32, %f34, %f30
+ faligndata %f34, %f36, %f32
+ faligndata %f36, %f38, %f34
+ faligndata %f38, %f40, %f36
+ faligndata %f40, %f42, %f38
+ faligndata %f42, %f44, %f40
+ faligndata %f44, %f46, %f42
+ faligndata %f46, %f48, %f44
+ faligndata %f48, %f50, %f46
+
+ .word 0x81b02860 ! SHA512
+
+ bne,pt SIZE_T_CC, .Lhwunaligned_loop
+ for %f50, %f50, %f18 ! %f18=%f50
+
+ ba .Lhwfinish
+ nop
+___
+$code.=<<___ if ($SZ==4); # SHA256
+ ld [%o0 + 0x00], %f0
+ ld [%o0 + 0x04], %f1
+ ld [%o0 + 0x08], %f2
+ ld [%o0 + 0x0c], %f3
+ ld [%o0 + 0x10], %f4
+ ld [%o0 + 0x14], %f5
+ andcc %o1, 0x7, %g0
+ ld [%o0 + 0x18], %f6
+ bne,pn %icc, .Lhwunaligned
+ ld [%o0 + 0x1c], %f7
+
+.Lhwloop:
+ ldd [%o1 + 0x00], %f8
+ ldd [%o1 + 0x08], %f10
+ ldd [%o1 + 0x10], %f12
+ ldd [%o1 + 0x18], %f14
+ ldd [%o1 + 0x20], %f16
+ ldd [%o1 + 0x28], %f18
+ ldd [%o1 + 0x30], %f20
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x38], %f22
+ add %o1, 0x40, %o1
+ prefetch [%o1 + 63], 20
+
+ .word 0x81b02840 ! SHA256
+
+ bne,pt SIZE_T_CC, .Lhwloop
+ nop
+
+.Lhwfinish:
+ st %f0, [%o0 + 0x00] ! store context
+ st %f1, [%o0 + 0x04]
+ st %f2, [%o0 + 0x08]
+ st %f3, [%o0 + 0x0c]
+ st %f4, [%o0 + 0x10]
+ st %f5, [%o0 + 0x14]
+ st %f6, [%o0 + 0x18]
+ retl
+ st %f7, [%o0 + 0x1c]
+
+.align 8
+.Lhwunaligned:
+ alignaddr %o1, %g0, %o1
+
+ ldd [%o1 + 0x00], %f10
+.Lhwunaligned_loop:
+ ldd [%o1 + 0x08], %f12
+ ldd [%o1 + 0x10], %f14
+ ldd [%o1 + 0x18], %f16
+ ldd [%o1 + 0x20], %f18
+ ldd [%o1 + 0x28], %f20
+ ldd [%o1 + 0x30], %f22
+ ldd [%o1 + 0x38], %f24
+ subcc %o2, 1, %o2 ! done yet?
+ ldd [%o1 + 0x40], %f26
+ add %o1, 0x40, %o1
+ prefetch [%o1 + 63], 20
+
+ faligndata %f10, %f12, %f8
+ faligndata %f12, %f14, %f10
+ faligndata %f14, %f16, %f12
+ faligndata %f16, %f18, %f14
+ faligndata %f18, %f20, %f16
+ faligndata %f20, %f22, %f18
+ faligndata %f22, %f24, %f20
+ faligndata %f24, %f26, %f22
+
+ .word 0x81b02840 ! SHA256
+
+ bne,pt SIZE_T_CC, .Lhwunaligned_loop
+ for %f26, %f26, %f10 ! %f10=%f26
+
+ ba .Lhwfinish
+ nop
+___
+$code.=<<___;
+.align 16
+.Lsoftware:
+ save %sp,-STACK_FRAME-$locals,%sp
and $inp,`$align-1`,$tmp31
sllx $len,`log(16*$SZ)/log(2)`,$len
andn $inp,`$align-1`,$inp
@@ -578,7 +778,7 @@ ___
$code.=<<___;
add $inp,`16*$SZ`,$inp ! advance inp
cmp $inp,$len
- bne `$bits==64?"%xcc":"%icc"`,.Lloop
+ bne SIZE_T_CC,.Lloop
sub $Ktbl,`($rounds-16)*$SZ`,$Ktbl ! rewind Ktbl
ret
@@ -589,6 +789,62 @@ $code.=<<___;
.align 4
___
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
+# Purpose of these subroutines is to explicitly encode VIS instructions,
+# so that one can compile the module without having to specify VIS
+# extentions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
+# Idea is to reserve for option to produce "universal" binary and let
+# programmer detect if current CPU is VIS capable at run-time.
+sub unvis {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my $ref,$opf;
+my %visopf = ( "faligndata" => 0x048,
+ "for" => 0x07c );
+
+ $ref = "$mnemonic\t$rs1,$rs2,$rd";
+
+ if ($opf=$visopf{$mnemonic}) {
+ foreach ($rs1,$rs2,$rd) {
+ return $ref if (!/%f([0-9]{1,2})/);
+ $_=$1;
+ if ($1>=32) {
+ return $ref if ($1&1);
+ # re-encode for upper double register addressing
+ $_=($1|$1>>5)&31;
+ }
+ }
+
+ return sprintf ".word\t0x%08x !%s",
+ 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
+ $ref;
+ } else {
+ return $ref;
+ }
+}
+sub unalignaddr {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
+my $ref="$mnemonic\t$rs1,$rs2,$rd";
+
+ foreach ($rs1,$rs2,$rd) {
+ if (/%([goli])([0-7])/) { $_=$bias{$1}+$2; }
+ else { return $ref; }
+ }
+ return sprintf ".word\t0x%08x !%s",
+ 0x81b00300|$rd<<25|$rs1<<14|$rs2,
+ $ref;
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ s/\b(f[^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
+ &unvis($1,$2,$3,$4)
+ /ge;
+ s/\b(alignaddr)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
+ &unalignaddr($1,$2,$3,$4)
+ /ge;
+
+ print $_,"\n";
+}
+
close STDOUT;