diff options
author | marha <marha@users.sourceforge.net> | 2015-02-22 14:43:31 +0100 |
---|---|---|
committer | marha <marha@users.sourceforge.net> | 2015-02-22 14:43:31 +0100 |
commit | c9aad1ae6227c434d480d1d3aa8eae3c3c910c18 (patch) | |
tree | 94b917df998c3d547e191b3b9c58bbffc616470e /openssl/crypto/sha/asm/sha512-armv4.pl | |
parent | f1c2db43dcf35d2cf4715390bd2391c28e42a8c2 (diff) | |
download | vcxsrv-c9aad1ae6227c434d480d1d3aa8eae3c3c910c18.tar.gz vcxsrv-c9aad1ae6227c434d480d1d3aa8eae3c3c910c18.tar.bz2 vcxsrv-c9aad1ae6227c434d480d1d3aa8eae3c3c910c18.zip |
Upgraded to openssl-1.0.2
Diffstat (limited to 'openssl/crypto/sha/asm/sha512-armv4.pl')
-rw-r--r-- | openssl/crypto/sha/asm/sha512-armv4.pl | 69 |
1 files changed, 48 insertions, 21 deletions
diff --git a/openssl/crypto/sha/asm/sha512-armv4.pl b/openssl/crypto/sha/asm/sha512-armv4.pl index 7faf37b14..fb7dc506a 100644 --- a/openssl/crypto/sha/asm/sha512-armv4.pl +++ b/openssl/crypto/sha/asm/sha512-armv4.pl @@ -1,7 +1,7 @@ #!/usr/bin/env perl # ==================================================================== -# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. @@ -26,7 +26,24 @@ # March 2011. # # Add NEON implementation. On Cortex A8 it was measured to process -# one byte in 25.5 cycles or 47% faster than integer-only code. +# one byte in 23.3 cycles or ~60% faster than integer-only code. + +# August 2012. +# +# Improve NEON performance by 12% on Snapdragon S4. In absolute +# terms it's 22.6 cycles per byte, which is disappointing result. +# Technical writers asserted that 3-way S4 pipeline can sustain +# multiple NEON instructions per cycle, but dual NEON issue could +# not be observed, and for NEON-only sequences IPC(*) was found to +# be limited by 1:-( 0.33 and 0.66 were measured for sequences with +# ILPs(*) of 1 and 2 respectively. This in turn means that you can +# even find yourself striving, as I did here, for achieving IPC +# adequate to one delivered by Cortex A8 [for reference, it's +# 0.5 for ILP of 1, and 1 for higher ILPs]. +# +# (*) ILP, instruction-level parallelism, how many instructions +# *can* execute at the same time. IPC, instructions per cycle, +# indicates how many instructions actually execute. # Byte order [in]dependence. ========================================= # @@ -220,16 +237,20 @@ WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .size K512,.-K512 +#if __ARM_MAX_ARCH__>=7 .LOPENSSL_armcap: .word OPENSSL_armcap_P-sha512_block_data_order .skip 32-4 +#else +.skip 32 +#endif .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: sub r3,pc,#8 @ sha512_block_data_order add $len,$inp,$len,lsl#7 @ len to point at the end of inp -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 ldr r12,.LOPENSSL_armcap ldr r12,[r3,r12] @ OPENSSL_armcap_P tst r12,#1 @@ -457,40 +478,40 @@ $code.=<<___ if ($i<16 || $i&1); vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned #endif vshr.u64 $t1,$e,#@Sigma1[1] +#if $i>0 + vadd.i64 $a,$Maj @ h+=Maj from the past +#endif vshr.u64 $t2,$e,#@Sigma1[2] ___ $code.=<<___; vld1.64 {$K},[$Ktbl,:64]! @ K[i++] vsli.64 $t0,$e,#`64-@Sigma1[0]` vsli.64 $t1,$e,#`64-@Sigma1[1]` + vmov $Ch,$e vsli.64 $t2,$e,#`64-@Sigma1[2]` #if $i<16 && defined(__ARMEL__) vrev64.8 @X[$i],@X[$i] #endif - vadd.i64 $T1,$K,$h - veor $Ch,$f,$g - veor $t0,$t1 - vand $Ch,$e - veor $t0,$t2 @ Sigma1(e) - veor $Ch,$g @ Ch(e,f,g) - vadd.i64 $T1,$t0 + veor $t1,$t0 + vbsl $Ch,$f,$g @ Ch(e,f,g) vshr.u64 $t0,$a,#@Sigma0[0] - vadd.i64 $T1,$Ch + veor $t2,$t1 @ Sigma1(e) + vadd.i64 $T1,$Ch,$h vshr.u64 $t1,$a,#@Sigma0[1] - vshr.u64 $t2,$a,#@Sigma0[2] vsli.64 $t0,$a,#`64-@Sigma0[0]` + vadd.i64 $T1,$t2 + vshr.u64 $t2,$a,#@Sigma0[2] + vadd.i64 $K,@X[$i%16] vsli.64 $t1,$a,#`64-@Sigma0[1]` + veor $Maj,$a,$b vsli.64 $t2,$a,#`64-@Sigma0[2]` - vadd.i64 $T1,@X[$i%16] - vorr $Maj,$a,$c - vand $Ch,$a,$c veor $h,$t0,$t1 - vand $Maj,$b + vadd.i64 $T1,$K + vbsl $Maj,$c,$b @ Maj(a,b,c) veor $h,$t2 @ Sigma0(a) - vorr $Maj,$Ch @ Maj(a,b,c) - vadd.i64 $h,$T1 vadd.i64 $d,$T1 - vadd.i64 $h,$Maj + vadd.i64 $Maj,$T1 + @ vadd.i64 $h,$Maj ___ } @@ -508,6 +529,7 @@ $i /= 2; $code.=<<___; vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0] vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1] + vadd.i64 @_[0],d30 @ h+=Maj from the past vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2] vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]` vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1] @@ -533,7 +555,8 @@ ___ } $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 +.arch armv7-a .fpu neon .align 4 @@ -554,6 +577,7 @@ for(;$i<32;$i++) { &NEON_16_79($i,@V); unshift(@V,pop(@V)); } $code.=<<___; bne .L16_79_neon + vadd.i64 $A,d30 @ h+=Maj from the past vldmia $ctx,{d24-d31} @ load context to temp vadd.i64 q8,q12 @ vectorized accumulate vadd.i64 q9,q13 @@ -565,7 +589,7 @@ $code.=<<___; bne .Loop_neon vldmia sp!,{d8-d15} @ epilogue - bx lr + ret @ bx lr #endif ___ } @@ -573,10 +597,13 @@ $code.=<<___; .size sha512_block_data_order,.-sha512_block_data_order .asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" .align 2 +#if __ARM_MAX_ARCH__>=7 .comm OPENSSL_armcap_P,4,4 +#endif ___ $code =~ s/\`([^\`]*)\`/eval $1/gem; $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 +$code =~ s/\bret\b/bx lr/gm; print $code; close STDOUT; # enforce flush |