diff options
Diffstat (limited to 'openssl/crypto/bn/asm')
31 files changed, 21488 insertions, 0 deletions
diff --git a/openssl/crypto/bn/asm/README b/openssl/crypto/bn/asm/README new file mode 100644 index 000000000..b0f3a68a0 --- /dev/null +++ b/openssl/crypto/bn/asm/README @@ -0,0 +1,27 @@ +<OBSOLETE> + +All assember in this directory are just version of the file +crypto/bn/bn_asm.c. + +Quite a few of these files are just the assember output from gcc since on +quite a few machines they are 2 times faster than the system compiler. + +For the x86, I have hand written assember because of the bad job all +compilers seem to do on it. This normally gives a 2 time speed up in the RSA +routines. + +For the DEC alpha, I also hand wrote the assember (except the division which +is just the output from the C compiler pasted on the end of the file). +On the 2 alpha C compilers I had access to, it was not possible to do +64b x 64b -> 128b calculations (both long and the long long data types +were 64 bits). So the hand assember gives access to the 128 bit result and +a 2 times speedup :-). + +There are 3 versions of assember for the HP PA-RISC. + +pa-risc.s is the origional one which works fine and generated using gcc :-) + +pa-risc2W.s and pa-risc2.s are 64 and 32-bit PA-RISC 2.0 implementations +by Chris Ruemmler from HP (with some help from the HP C compiler). + +</OBSOLETE> diff --git a/openssl/crypto/bn/asm/alpha-mont.pl b/openssl/crypto/bn/asm/alpha-mont.pl new file mode 100644 index 000000000..7a2cc3173 --- /dev/null +++ b/openssl/crypto/bn/asm/alpha-mont.pl @@ -0,0 +1,317 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# On 21264 RSA sign performance improves by 70/35/20/15 percent for +# 512/1024/2048/4096 bit key lengths. This is against vendor compiler +# instructed to '-tune host' code with in-line assembler. Other +# benchmarks improve by 15-20%. To anchor it to something else, the +# code provides approximately the same performance per GHz as AMD64. +# I.e. if you compare 1GHz 21264 and 2GHz Opteron, you'll observe ~2x +# difference. + +# int bn_mul_mont( +$rp="a0"; # BN_ULONG *rp, +$ap="a1"; # const BN_ULONG *ap, +$bp="a2"; # const BN_ULONG *bp, +$np="a3"; # const BN_ULONG *np, +$n0="a4"; # const BN_ULONG *n0, +$num="a5"; # int num); + +$lo0="t0"; +$hi0="t1"; +$lo1="t2"; +$hi1="t3"; +$aj="t4"; +$bi="t5"; +$nj="t6"; +$tp="t7"; +$alo="t8"; +$ahi="t9"; +$nlo="t10"; +$nhi="t11"; +$tj="t12"; +$i="s3"; +$j="s4"; +$m1="s5"; + +$code=<<___; +#include <asm.h> +#include <regdef.h> + +.text + +.set noat +.set noreorder + +.globl bn_mul_mont +.align 5 +.ent bn_mul_mont +bn_mul_mont: + lda sp,-40(sp) + stq ra,0(sp) + stq s3,8(sp) + stq s4,16(sp) + stq s5,24(sp) + stq fp,32(sp) + mov sp,fp + .mask 0x0400f000,-40 + .frame fp,40,ra + .prologue 0 + + .align 4 + .set reorder + sextl $num,$num + mov 0,v0 + cmplt $num,4,AT + bne AT,.Lexit + + ldq $hi0,0($ap) # ap[0] + s8addq $num,16,AT + ldq $aj,8($ap) + subq sp,AT,sp + ldq $bi,0($bp) # bp[0] + mov -4096,AT + ldq $n0,0($n0) + and sp,AT,sp + + mulq $hi0,$bi,$lo0 + ldq $hi1,0($np) # np[0] + umulh $hi0,$bi,$hi0 + ldq $nj,8($np) + + mulq $lo0,$n0,$m1 + + mulq $hi1,$m1,$lo1 + umulh $hi1,$m1,$hi1 + + addq $lo1,$lo0,$lo1 + cmpult $lo1,$lo0,AT + addq $hi1,AT,$hi1 + + mulq $aj,$bi,$alo + mov 2,$j + umulh $aj,$bi,$ahi + mov sp,$tp + + mulq $nj,$m1,$nlo + s8addq $j,$ap,$aj + umulh $nj,$m1,$nhi + s8addq $j,$np,$nj +.align 4 +.L1st: + .set noreorder + ldq $aj,($aj) + addl $j,1,$j + ldq $nj,($nj) + lda $tp,8($tp) + + addq $alo,$hi0,$lo0 + mulq $aj,$bi,$alo + cmpult $lo0,$hi0,AT + addq $nlo,$hi1,$lo1 + + mulq $nj,$m1,$nlo + addq $ahi,AT,$hi0 + cmpult $lo1,$hi1,v0 + cmplt $j,$num,$tj + + umulh $aj,$bi,$ahi + addq $nhi,v0,$hi1 + addq $lo1,$lo0,$lo1 + s8addq $j,$ap,$aj + + umulh $nj,$m1,$nhi + cmpult $lo1,$lo0,v0 + addq $hi1,v0,$hi1 + s8addq $j,$np,$nj + + stq $lo1,-8($tp) + nop + unop + bne $tj,.L1st + .set reorder + + addq $alo,$hi0,$lo0 + addq $nlo,$hi1,$lo1 + cmpult $lo0,$hi0,AT + cmpult $lo1,$hi1,v0 + addq $ahi,AT,$hi0 + addq $nhi,v0,$hi1 + + addq $lo1,$lo0,$lo1 + cmpult $lo1,$lo0,v0 + addq $hi1,v0,$hi1 + + stq $lo1,0($tp) + + addq $hi1,$hi0,$hi1 + cmpult $hi1,$hi0,AT + stq $hi1,8($tp) + stq AT,16($tp) + + mov 1,$i +.align 4 +.Louter: + s8addq $i,$bp,$bi + ldq $hi0,($ap) + ldq $aj,8($ap) + ldq $bi,($bi) + ldq $hi1,($np) + ldq $nj,8($np) + ldq $tj,(sp) + + mulq $hi0,$bi,$lo0 + umulh $hi0,$bi,$hi0 + + addq $lo0,$tj,$lo0 + cmpult $lo0,$tj,AT + addq $hi0,AT,$hi0 + + mulq $lo0,$n0,$m1 + + mulq $hi1,$m1,$lo1 + umulh $hi1,$m1,$hi1 + + addq $lo1,$lo0,$lo1 + cmpult $lo1,$lo0,AT + mov 2,$j + addq $hi1,AT,$hi1 + + mulq $aj,$bi,$alo + mov sp,$tp + umulh $aj,$bi,$ahi + + mulq $nj,$m1,$nlo + s8addq $j,$ap,$aj + umulh $nj,$m1,$nhi +.align 4 +.Linner: + .set noreorder + ldq $tj,8($tp) #L0 + nop #U1 + ldq $aj,($aj) #L1 + s8addq $j,$np,$nj #U0 + + ldq $nj,($nj) #L0 + nop #U1 + addq $alo,$hi0,$lo0 #L1 + lda $tp,8($tp) + + mulq $aj,$bi,$alo #U1 + cmpult $lo0,$hi0,AT #L0 + addq $nlo,$hi1,$lo1 #L1 + addl $j,1,$j + + mulq $nj,$m1,$nlo #U1 + addq $ahi,AT,$hi0 #L0 + addq $lo0,$tj,$lo0 #L1 + cmpult $lo1,$hi1,v0 #U0 + + umulh $aj,$bi,$ahi #U1 + cmpult $lo0,$tj,AT #L0 + addq $lo1,$lo0,$lo1 #L1 + addq $nhi,v0,$hi1 #U0 + + umulh $nj,$m1,$nhi #U1 + s8addq $j,$ap,$aj #L0 + cmpult $lo1,$lo0,v0 #L1 + cmplt $j,$num,$tj #U0 # borrow $tj + + addq $hi0,AT,$hi0 #L0 + addq $hi1,v0,$hi1 #U1 + stq $lo1,-8($tp) #L1 + bne $tj,.Linner #U0 + .set reorder + + ldq $tj,8($tp) + addq $alo,$hi0,$lo0 + addq $nlo,$hi1,$lo1 + cmpult $lo0,$hi0,AT + cmpult $lo1,$hi1,v0 + addq $ahi,AT,$hi0 + addq $nhi,v0,$hi1 + + addq $lo0,$tj,$lo0 + cmpult $lo0,$tj,AT + addq $hi0,AT,$hi0 + + ldq $tj,16($tp) + addq $lo1,$lo0,$j + cmpult $j,$lo0,v0 + addq $hi1,v0,$hi1 + + addq $hi1,$hi0,$lo1 + stq $j,($tp) + cmpult $lo1,$hi0,$hi1 + addq $lo1,$tj,$lo1 + cmpult $lo1,$tj,AT + addl $i,1,$i + addq $hi1,AT,$hi1 + stq $lo1,8($tp) + cmplt $i,$num,$tj # borrow $tj + stq $hi1,16($tp) + bne $tj,.Louter + + s8addq $num,sp,$tj # &tp[num] + mov $rp,$bp # put rp aside + mov sp,$tp + mov sp,$ap + mov 0,$hi0 # clear borrow bit + +.align 4 +.Lsub: ldq $lo0,($tp) + ldq $lo1,($np) + lda $tp,8($tp) + lda $np,8($np) + subq $lo0,$lo1,$lo1 # tp[i]-np[i] + cmpult $lo0,$lo1,AT + subq $lo1,$hi0,$lo0 + cmpult $lo1,$lo0,$hi0 + or $hi0,AT,$hi0 + stq $lo0,($rp) + cmpult $tp,$tj,v0 + lda $rp,8($rp) + bne v0,.Lsub + + subq $hi1,$hi0,$hi0 # handle upmost overflow bit + mov sp,$tp + mov $bp,$rp # restore rp + + and sp,$hi0,$ap + bic $bp,$hi0,$bp + bis $bp,$ap,$ap # ap=borrow?tp:rp + +.align 4 +.Lcopy: ldq $aj,($ap) # copy or in-place refresh + lda $tp,8($tp) + lda $rp,8($rp) + lda $ap,8($ap) + stq zero,-8($tp) # zap tp + cmpult $tp,$tj,AT + stq $aj,-8($rp) + bne AT,.Lcopy + mov 1,v0 + +.Lexit: + .set noreorder + mov fp,sp + /*ldq ra,0(sp)*/ + ldq s3,8(sp) + ldq s4,16(sp) + ldq s5,24(sp) + ldq fp,32(sp) + lda sp,40(sp) + ret (ra) +.end bn_mul_mont +.rdata +.asciiz "Montgomery Multiplication for Alpha, CRYPTOGAMS by <appro\@openssl.org>" +___ + +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/armv4-mont.pl b/openssl/crypto/bn/asm/armv4-mont.pl new file mode 100644 index 000000000..05d5dc1a4 --- /dev/null +++ b/openssl/crypto/bn/asm/armv4-mont.pl @@ -0,0 +1,200 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# January 2007. + +# Montgomery multiplication for ARMv4. +# +# Performance improvement naturally varies among CPU implementations +# and compilers. The code was observed to provide +65-35% improvement +# [depending on key length, less for longer keys] on ARM920T, and +# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code +# base and compiler generated code with in-lined umull and even umlal +# instructions. The latter means that this code didn't really have an +# "advantage" of utilizing some "secret" instruction. +# +# The code is interoperable with Thumb ISA and is rather compact, less +# than 1/2KB. Windows CE port would be trivial, as it's exclusively +# about decorations, ABI and instruction syntax are identical. + +$num="r0"; # starts as num argument, but holds &tp[num-1] +$ap="r1"; +$bp="r2"; $bi="r2"; $rp="r2"; +$np="r3"; +$tp="r4"; +$aj="r5"; +$nj="r6"; +$tj="r7"; +$n0="r8"; +########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer +$alo="r10"; # sl, gcc uses it to keep @GOT +$ahi="r11"; # fp +$nlo="r12"; # ip +########### # r13 is stack pointer +$nhi="r14"; # lr +########### # r15 is program counter + +#### argument block layout relative to &tp[num-1], a.k.a. $num +$_rp="$num,#12*4"; +# ap permanently resides in r1 +$_bp="$num,#13*4"; +# np permanently resides in r3 +$_n0="$num,#14*4"; +$_num="$num,#15*4"; $_bpend=$_num; + +$code=<<___; +.text + +.global bn_mul_mont +.type bn_mul_mont,%function + +.align 2 +bn_mul_mont: + stmdb sp!,{r0,r2} @ sp points at argument block + ldr $num,[sp,#3*4] @ load num + cmp $num,#2 + movlt r0,#0 + addlt sp,sp,#2*4 + blt .Labrt + + stmdb sp!,{r4-r12,lr} @ save 10 registers + + mov $num,$num,lsl#2 @ rescale $num for byte count + sub sp,sp,$num @ alloca(4*num) + sub sp,sp,#4 @ +extra dword + sub $num,$num,#4 @ "num=num-1" + add $tp,$bp,$num @ &bp[num-1] + + add $num,sp,$num @ $num to point at &tp[num-1] + ldr $n0,[$_n0] @ &n0 + ldr $bi,[$bp] @ bp[0] + ldr $aj,[$ap],#4 @ ap[0],ap++ + ldr $nj,[$np],#4 @ np[0],np++ + ldr $n0,[$n0] @ *n0 + str $tp,[$_bpend] @ save &bp[num] + + umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0] + str $n0,[$_n0] @ save n0 value + mul $n0,$alo,$n0 @ "tp[0]"*n0 + mov $nlo,#0 + umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]" + mov $tp,sp + +.L1st: + ldr $aj,[$ap],#4 @ ap[j],ap++ + mov $alo,$ahi + mov $ahi,#0 + umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0] + ldr $nj,[$np],#4 @ np[j],np++ + mov $nhi,#0 + umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 + adds $nlo,$nlo,$alo + str $nlo,[$tp],#4 @ tp[j-1]=,tp++ + adc $nlo,$nhi,#0 + cmp $tp,$num + bne .L1st + + adds $nlo,$nlo,$ahi + mov $nhi,#0 + adc $nhi,$nhi,#0 + ldr $tp,[$_bp] @ restore bp + str $nlo,[$num] @ tp[num-1]= + ldr $n0,[$_n0] @ restore n0 + str $nhi,[$num,#4] @ tp[num]= + +.Louter: + sub $tj,$num,sp @ "original" $num-1 value + sub $ap,$ap,$tj @ "rewind" ap to &ap[1] + sub $np,$np,$tj @ "rewind" np to &np[1] + ldr $bi,[$tp,#4]! @ *(++bp) + ldr $aj,[$ap,#-4] @ ap[0] + ldr $nj,[$np,#-4] @ np[0] + ldr $alo,[sp] @ tp[0] + ldr $tj,[sp,#4] @ tp[1] + + mov $ahi,#0 + umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0] + str $tp,[$_bp] @ save bp + mul $n0,$alo,$n0 + mov $nlo,#0 + umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]" + mov $tp,sp + +.Linner: + ldr $aj,[$ap],#4 @ ap[j],ap++ + adds $alo,$ahi,$tj @ +=tp[j] + mov $ahi,#0 + umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i] + ldr $nj,[$np],#4 @ np[j],np++ + mov $nhi,#0 + umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 + ldr $tj,[$tp,#8] @ tp[j+1] + adc $ahi,$ahi,#0 + adds $nlo,$nlo,$alo + str $nlo,[$tp],#4 @ tp[j-1]=,tp++ + adc $nlo,$nhi,#0 + cmp $tp,$num + bne .Linner + + adds $nlo,$nlo,$ahi + mov $nhi,#0 + adc $nhi,$nhi,#0 + adds $nlo,$nlo,$tj + adc $nhi,$nhi,#0 + ldr $tp,[$_bp] @ restore bp + ldr $tj,[$_bpend] @ restore &bp[num] + str $nlo,[$num] @ tp[num-1]= + ldr $n0,[$_n0] @ restore n0 + str $nhi,[$num,#4] @ tp[num]= + + cmp $tp,$tj + bne .Louter + + ldr $rp,[$_rp] @ pull rp + add $num,$num,#4 @ $num to point at &tp[num] + sub $aj,$num,sp @ "original" num value + mov $tp,sp @ "rewind" $tp + mov $ap,$tp @ "borrow" $ap + sub $np,$np,$aj @ "rewind" $np to &np[0] + + subs $tj,$tj,$tj @ "clear" carry flag +.Lsub: ldr $tj,[$tp],#4 + ldr $nj,[$np],#4 + sbcs $tj,$tj,$nj @ tp[j]-np[j] + str $tj,[$rp],#4 @ rp[j]= + teq $tp,$num @ preserve carry + bne .Lsub + sbcs $nhi,$nhi,#0 @ upmost carry + mov $tp,sp @ "rewind" $tp + sub $rp,$rp,$aj @ "rewind" $rp + + and $ap,$tp,$nhi + bic $np,$rp,$nhi + orr $ap,$ap,$np @ ap=borrow?tp:rp + +.Lcopy: ldr $tj,[$ap],#4 @ copy or in-place refresh + str sp,[$tp],#4 @ zap tp + str $tj,[$rp],#4 + cmp $tp,$num + bne .Lcopy + + add sp,$num,#4 @ skip over tp[num+1] + ldmia sp!,{r4-r12,lr} @ restore registers + add sp,sp,#2*4 @ skip over {r0,r2} + mov r0,#1 +.Labrt: tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +.size bn_mul_mont,.-bn_mul_mont +.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro\@openssl.org>" +___ + +$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/bn-586.pl b/openssl/crypto/bn/asm/bn-586.pl new file mode 100644 index 000000000..26c2685a7 --- /dev/null +++ b/openssl/crypto/bn/asm/bn-586.pl @@ -0,0 +1,675 @@ +#!/usr/local/bin/perl + +push(@INC,"perlasm","../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],$0); + +$sse2=0; +for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } + +&external_label("OPENSSL_ia32cap_P") if ($sse2); + +&bn_mul_add_words("bn_mul_add_words"); +&bn_mul_words("bn_mul_words"); +&bn_sqr_words("bn_sqr_words"); +&bn_div_words("bn_div_words"); +&bn_add_words("bn_add_words"); +&bn_sub_words("bn_sub_words"); +&bn_sub_part_words("bn_sub_part_words"); + +&asm_finish(); + +sub bn_mul_add_words + { + local($name)=@_; + + &function_begin($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":""); + + &comment(""); + $Low="eax"; + $High="edx"; + $a="ebx"; + $w="ebp"; + $r="edi"; + $c="esi"; + + &xor($c,$c); # clear carry + &mov($r,&wparam(0)); # + + &mov("ecx",&wparam(2)); # + &mov($a,&wparam(1)); # + + &and("ecx",0xfffffff8); # num / 8 + &mov($w,&wparam(3)); # + + &push("ecx"); # Up the stack for a tmp variable + + &jz(&label("maw_finish")); + + if ($sse2) { + &picmeup("eax","OPENSSL_ia32cap_P"); + &bt(&DWP(0,"eax"),26); + &jnc(&label("maw_loop")); + + &movd("mm0",$w); # mm0 = w + &pxor("mm1","mm1"); # mm1 = carry_in + + &set_label("maw_sse2_loop",0); + &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0] + &paddq("mm1","mm3"); # mm1 = carry_in + r[0] + &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0] + &pmuludq("mm2","mm0"); # mm2 = w*a[0] + &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1] + &pmuludq("mm4","mm0"); # mm4 = w*a[1] + &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2] + &pmuludq("mm6","mm0"); # mm6 = w*a[2] + &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3] + &pmuludq("mm7","mm0"); # mm7 = w*a[3] + &paddq("mm1","mm2"); # mm1 = carry_in + r[0] + w*a[0] + &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1] + &paddq("mm3","mm4"); # mm3 = r[1] + w*a[1] + &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2] + &paddq("mm5","mm6"); # mm5 = r[2] + w*a[2] + &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3] + &paddq("mm7","mm4"); # mm7 = r[3] + w*a[3] + &movd(&DWP(0,$r,"",0),"mm1"); + &movd("mm2",&DWP(16,$a,"",0)); # mm2 = a[4] + &pmuludq("mm2","mm0"); # mm2 = w*a[4] + &psrlq("mm1",32); # mm1 = carry0 + &movd("mm4",&DWP(20,$a,"",0)); # mm4 = a[5] + &pmuludq("mm4","mm0"); # mm4 = w*a[5] + &paddq("mm1","mm3"); # mm1 = carry0 + r[1] + w*a[1] + &movd("mm6",&DWP(24,$a,"",0)); # mm6 = a[6] + &pmuludq("mm6","mm0"); # mm6 = w*a[6] + &movd(&DWP(4,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry1 + &movd("mm3",&DWP(28,$a,"",0)); # mm3 = a[7] + &add($a,32); + &pmuludq("mm3","mm0"); # mm3 = w*a[7] + &paddq("mm1","mm5"); # mm1 = carry1 + r[2] + w*a[2] + &movd("mm5",&DWP(16,$r,"",0)); # mm5 = r[4] + &paddq("mm2","mm5"); # mm2 = r[4] + w*a[4] + &movd(&DWP(8,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry2 + &paddq("mm1","mm7"); # mm1 = carry2 + r[3] + w*a[3] + &movd("mm5",&DWP(20,$r,"",0)); # mm5 = r[5] + &paddq("mm4","mm5"); # mm4 = r[5] + w*a[5] + &movd(&DWP(12,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry3 + &paddq("mm1","mm2"); # mm1 = carry3 + r[4] + w*a[4] + &movd("mm5",&DWP(24,$r,"",0)); # mm5 = r[6] + &paddq("mm6","mm5"); # mm6 = r[6] + w*a[6] + &movd(&DWP(16,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry4 + &paddq("mm1","mm4"); # mm1 = carry4 + r[5] + w*a[5] + &movd("mm5",&DWP(28,$r,"",0)); # mm5 = r[7] + &paddq("mm3","mm5"); # mm3 = r[7] + w*a[7] + &movd(&DWP(20,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry5 + &paddq("mm1","mm6"); # mm1 = carry5 + r[6] + w*a[6] + &movd(&DWP(24,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry6 + &paddq("mm1","mm3"); # mm1 = carry6 + r[7] + w*a[7] + &movd(&DWP(28,$r,"",0),"mm1"); + &add($r,32); + &psrlq("mm1",32); # mm1 = carry_out + + &sub("ecx",8); + &jnz(&label("maw_sse2_loop")); + + &movd($c,"mm1"); # c = carry_out + &emms(); + + &jmp(&label("maw_finish")); + } + + &set_label("maw_loop",0); + + &mov(&swtmp(0),"ecx"); # + + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + + &mov("eax",&DWP($i,$a,"",0)); # *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+= *r + &mov($c,&DWP($i,$r,"",0)); # L(t)+= *r + &adc("edx",0); # H(t)+=carry + &add("eax",$c); # L(t)+=c + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t); + &mov($c,"edx"); # c= H(t); + } + + &comment(""); + &mov("ecx",&swtmp(0)); # + &add($a,32); + &add($r,32); + &sub("ecx",8); + &jnz(&label("maw_loop")); + + &set_label("maw_finish",0); + &mov("ecx",&wparam(2)); # get num + &and("ecx",7); + &jnz(&label("maw_finish2")); # helps branch prediction + &jmp(&label("maw_end")); + + &set_label("maw_finish2",1); + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0));# *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + &mov($c,&DWP($i*4,$r,"",0)); # L(t)+= *r + &adc("edx",0); # H(t)+=carry + &add("eax",$c); + &adc("edx",0); # H(t)+=carry + &dec("ecx") if ($i != 7-1); + &mov(&DWP($i*4,$r,"",0),"eax"); # *r= L(t); + &mov($c,"edx"); # c= H(t); + &jz(&label("maw_end")) if ($i != 7-1); + } + &set_label("maw_end",0); + &mov("eax",$c); + + &pop("ecx"); # clear variable from + + &function_end($name); + } + +sub bn_mul_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $Low="eax"; + $High="edx"; + $a="ebx"; + $w="ecx"; + $r="edi"; + $c="esi"; + $num="ebp"; + + &xor($c,$c); # clear carry + &mov($r,&wparam(0)); # + &mov($a,&wparam(1)); # + &mov($num,&wparam(2)); # + &mov($w,&wparam(3)); # + + &and($num,0xfffffff8); # num / 8 + &jz(&label("mw_finish")); + + &set_label("mw_loop",0); + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + + &mov("eax",&DWP($i,$a,"",0)); # *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + # XXX + + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t); + + &mov($c,"edx"); # c= H(t); + } + + &comment(""); + &add($a,32); + &add($r,32); + &sub($num,8); + &jz(&label("mw_finish")); + &jmp(&label("mw_loop")); + + &set_label("mw_finish",0); + &mov($num,&wparam(2)); # get num + &and($num,7); + &jnz(&label("mw_finish2")); + &jmp(&label("mw_end")); + + &set_label("mw_finish2",1); + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0));# *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + # XXX + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t); + &mov($c,"edx"); # c= H(t); + &dec($num) if ($i != 7-1); + &jz(&label("mw_end")) if ($i != 7-1); + } + &set_label("mw_end",0); + &mov("eax",$c); + + &function_end($name); + } + +sub bn_sqr_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $r="esi"; + $a="edi"; + $num="ebx"; + + &mov($r,&wparam(0)); # + &mov($a,&wparam(1)); # + &mov($num,&wparam(2)); # + + &and($num,0xfffffff8); # num / 8 + &jz(&label("sw_finish")); + + &set_label("sw_loop",0); + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + &mov("eax",&DWP($i,$a,"",0)); # *a + # XXX + &mul("eax"); # *a * *a + &mov(&DWP($i*2,$r,"",0),"eax"); # + &mov(&DWP($i*2+4,$r,"",0),"edx");# + } + + &comment(""); + &add($a,32); + &add($r,64); + &sub($num,8); + &jnz(&label("sw_loop")); + + &set_label("sw_finish",0); + &mov($num,&wparam(2)); # get num + &and($num,7); + &jz(&label("sw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0)); # *a + # XXX + &mul("eax"); # *a * *a + &mov(&DWP($i*8,$r,"",0),"eax"); # + &dec($num) if ($i != 7-1); + &mov(&DWP($i*8+4,$r,"",0),"edx"); + &jz(&label("sw_end")) if ($i != 7-1); + } + &set_label("sw_end",0); + + &function_end($name); + } + +sub bn_div_words + { + local($name)=@_; + + &function_begin($name,""); + &mov("edx",&wparam(0)); # + &mov("eax",&wparam(1)); # + &mov("ebx",&wparam(2)); # + &div("ebx"); + &function_end($name); + } + +sub bn_add_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &add($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &add($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &add($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &add($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + +sub bn_sub_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + +sub bn_sub_part_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP(0,$a,"",0)); # *a + &mov($tmp2,&DWP(0,$b,"",0));# *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP(0,$r,"",0),$tmp1); # *r + &add($a, 4); + &add($b, 4); + &add($r, 4); + &dec($num) if ($i != 6); + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + + &cmp(&wparam(4),0); + &je(&label("pw_end")); + + &mov($num,&wparam(4)); # get dl + &cmp($num,0); + &je(&label("pw_end")); + &jge(&label("pw_pos")); + + &comment("pw_neg"); + &mov($tmp2,0); + &sub($tmp2,$num); + &mov($num,$tmp2); + &and($num,0xfffffff8); # num / 8 + &jz(&label("pw_neg_finish")); + + &set_label("pw_neg_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("dl<0 Round $i"); + + &mov($tmp1,0); + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("pw_neg_loop")); + + &set_label("pw_neg_finish",0); + &mov($tmp2,&wparam(4)); # get dl + &mov($num,0); + &sub($num,$tmp2); + &and($num,7); + &jz(&label("pw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("dl<0 Tail Round $i"); + &mov($tmp1,0); + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jz(&label("pw_end")) if ($i != 6); + } + + &jmp(&label("pw_end")); + + &set_label("pw_pos",0); + + &and($num,0xfffffff8); # num / 8 + &jz(&label("pw_pos_finish")); + + &set_label("pw_pos_loop",0); + + for ($i=0; $i<8; $i++) + { + &comment("dl>0 Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &sub($tmp1,$c); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jnc(&label("pw_nc".$i)); + } + + &comment(""); + &add($a,32); + &add($r,32); + &sub($num,8); + &jnz(&label("pw_pos_loop")); + + &set_label("pw_pos_finish",0); + &mov($num,&wparam(4)); # get dl + &and($num,7); + &jz(&label("pw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("dl>0 Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &sub($tmp1,$c); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jnc(&label("pw_tail_nc".$i)); + &dec($num) if ($i != 6); + &jz(&label("pw_end")) if ($i != 6); + } + &mov($c,1); + &jmp(&label("pw_end")); + + &set_label("pw_nc_loop",0); + for ($i=0; $i<8; $i++) + { + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &set_label("pw_nc".$i,0); + } + + &comment(""); + &add($a,32); + &add($r,32); + &sub($num,8); + &jnz(&label("pw_nc_loop")); + + &mov($num,&wparam(4)); # get dl + &and($num,7); + &jz(&label("pw_nc_end")); + + for ($i=0; $i<7; $i++) + { + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &set_label("pw_tail_nc".$i,0); + &dec($num) if ($i != 6); + &jz(&label("pw_nc_end")) if ($i != 6); + } + + &set_label("pw_nc_end",0); + &mov($c,0); + + &set_label("pw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + diff --git a/openssl/crypto/bn/asm/co-586.pl b/openssl/crypto/bn/asm/co-586.pl new file mode 100644 index 000000000..5d962cb95 --- /dev/null +++ b/openssl/crypto/bn/asm/co-586.pl @@ -0,0 +1,286 @@ +#!/usr/local/bin/perl + +push(@INC,"perlasm","../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],$0); + +&bn_mul_comba("bn_mul_comba8",8); +&bn_mul_comba("bn_mul_comba4",4); +&bn_sqr_comba("bn_sqr_comba8",8); +&bn_sqr_comba("bn_sqr_comba4",4); + +&asm_finish(); + +sub mul_add_c + { + local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("mul a[$ai]*b[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$b,"",0)); + + &mul("edx"); + &add($c0,"eax"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a + &mov("eax",&wparam(0)) if $pos > 0; # load r[] + ### + &adc($c1,"edx"); + &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b + &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b + ### + &adc($c2,0); + # is pos > 1, it means it is the last loop + &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[]; + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a + } + +sub sqr_add_c + { + local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("sqr a[$ai]*a[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$b,"",0)); + + if ($ai == $bi) + { &mul("eax");} + else + { &mul("edx");} + &add($c0,"eax"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a + ### + &adc($c1,"edx"); + &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb); + ### + &adc($c2,0); + # is pos > 1, it means it is the last loop + &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[]; + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b + } + +sub sqr_add_c2 + { + local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("sqr a[$ai]*a[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$a,"",0)); + + if ($ai == $bi) + { &mul("eax");} + else + { &mul("edx");} + &add("eax","eax"); + ### + &adc("edx","edx"); + ### + &adc($c2,0); + &add($c0,"eax"); + &adc($c1,"edx"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b + &adc($c2,0); + &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[]; + &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb); + ### + } + +sub bn_mul_comba + { + local($name,$num)=@_; + local($a,$b,$c0,$c1,$c2); + local($i,$as,$ae,$bs,$be,$ai,$bi); + local($tot,$end); + + &function_begin_B($name,""); + + $c0="ebx"; + $c1="ecx"; + $c2="ebp"; + $a="esi"; + $b="edi"; + + $as=0; + $ae=0; + $bs=0; + $be=0; + $tot=$num+$num-1; + + &push("esi"); + &mov($a,&wparam(1)); + &push("edi"); + &mov($b,&wparam(2)); + &push("ebp"); + &push("ebx"); + + &xor($c0,$c0); + &mov("eax",&DWP(0,$a,"",0)); # load the first word + &xor($c1,$c1); + &mov("edx",&DWP(0,$b,"",0)); # load the first second + + for ($i=0; $i<$tot; $i++) + { + $ai=$as; + $bi=$bs; + $end=$be+1; + + &comment("################## Calculate word $i"); + + for ($j=$bs; $j<$end; $j++) + { + &xor($c2,$c2) if ($j == $bs); + if (($j+1) == $end) + { + $v=1; + $v=2 if (($i+1) == $tot); + } + else + { $v=0; } + if (($j+1) != $end) + { + $na=($ai-1); + $nb=($bi+1); + } + else + { + $na=$as+($i < ($num-1)); + $nb=$bs+($i >= ($num-1)); + } +#printf STDERR "[$ai,$bi] -> [$na,$nb]\n"; + &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb); + if ($v) + { + &comment("saved r[$i]"); + # &mov("eax",&wparam(0)); + # &mov(&DWP($i*4,"eax","",0),$c0); + ($c0,$c1,$c2)=($c1,$c2,$c0); + } + $ai--; + $bi++; + } + $as++ if ($i < ($num-1)); + $ae++ if ($i >= ($num-1)); + + $bs++ if ($i >= ($num-1)); + $be++ if ($i < ($num-1)); + } + &comment("save r[$i]"); + # &mov("eax",&wparam(0)); + &mov(&DWP($i*4,"eax","",0),$c0); + + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } + +sub bn_sqr_comba + { + local($name,$num)=@_; + local($r,$a,$c0,$c1,$c2)=@_; + local($i,$as,$ae,$bs,$be,$ai,$bi); + local($b,$tot,$end,$half); + + &function_begin_B($name,""); + + $c0="ebx"; + $c1="ecx"; + $c2="ebp"; + $a="esi"; + $r="edi"; + + &push("esi"); + &push("edi"); + &push("ebp"); + &push("ebx"); + &mov($r,&wparam(0)); + &mov($a,&wparam(1)); + &xor($c0,$c0); + &xor($c1,$c1); + &mov("eax",&DWP(0,$a,"",0)); # load the first word + + $as=0; + $ae=0; + $bs=0; + $be=0; + $tot=$num+$num-1; + + for ($i=0; $i<$tot; $i++) + { + $ai=$as; + $bi=$bs; + $end=$be+1; + + &comment("############### Calculate word $i"); + for ($j=$bs; $j<$end; $j++) + { + &xor($c2,$c2) if ($j == $bs); + if (($ai-1) < ($bi+1)) + { + $v=1; + $v=2 if ($i+1) == $tot; + } + else + { $v=0; } + if (!$v) + { + $na=$ai-1; + $nb=$bi+1; + } + else + { + $na=$as+($i < ($num-1)); + $nb=$bs+($i >= ($num-1)); + } + if ($ai == $bi) + { + &sqr_add_c($r,$a,$ai,$bi, + $c0,$c1,$c2,$v,$i,$na,$nb); + } + else + { + &sqr_add_c2($r,$a,$ai,$bi, + $c0,$c1,$c2,$v,$i,$na,$nb); + } + if ($v) + { + &comment("saved r[$i]"); + #&mov(&DWP($i*4,$r,"",0),$c0); + ($c0,$c1,$c2)=($c1,$c2,$c0); + last; + } + $ai--; + $bi++; + } + $as++ if ($i < ($num-1)); + $ae++ if ($i >= ($num-1)); + + $bs++ if ($i >= ($num-1)); + $be++ if ($i < ($num-1)); + } + &mov(&DWP($i*4,$r,"",0),$c0); + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } diff --git a/openssl/crypto/bn/asm/ia64.S b/openssl/crypto/bn/asm/ia64.S new file mode 100644 index 000000000..951abc53e --- /dev/null +++ b/openssl/crypto/bn/asm/ia64.S @@ -0,0 +1,1555 @@ +.explicit +.text +.ident "ia64.S, Version 2.1" +.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>" + +// +// ==================================================================== +// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +// project. +// +// Rights for redistribution and usage in source and binary forms are +// granted according to the OpenSSL license. Warranty of any kind is +// disclaimed. +// ==================================================================== +// +// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is +// different from Itanium to this module viewpoint. Most notably, is it +// "wider" than Itanium? Can you experience loop scalability as +// discussed in commentary sections? Not really:-( Itanium2 has 6 +// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to +// spin twice as fast, as I need 8 IALU ports. Amount of floating point +// ports is the same, i.e. 2, while I need 4. In other words, to this +// module Itanium2 remains effectively as "wide" as Itanium. Yet it's +// essentially different in respect to this module, and a re-tune was +// required. Well, because some intruction latencies has changed. Most +// noticeably those intensively used: +// +// Itanium Itanium2 +// ldf8 9 6 L2 hit +// ld8 2 1 L1 hit +// getf 2 5 +// xma[->getf] 7[+1] 4[+0] +// add[->st8] 1[+1] 1[+0] +// +// What does it mean? You might ratiocinate that the original code +// should run just faster... Because sum of latencies is smaller... +// Wrong! Note that getf latency increased. This means that if a loop is +// scheduled for lower latency (as they were), then it will suffer from +// stall condition and the code will therefore turn anti-scalable, e.g. +// original bn_mul_words spun at 5*n or 2.5 times slower than expected +// on Itanium2! What to do? Reschedule loops for Itanium2? But then +// Itanium would exhibit anti-scalability. So I've chosen to reschedule +// for worst latency for every instruction aiming for best *all-round* +// performance. + +// Q. How much faster does it get? +// A. Here is the output from 'openssl speed rsa dsa' for vanilla +// 0.9.6a compiled with gcc version 2.96 20000731 (Red Hat +// Linux 7.1 2.96-81): +// +// sign verify sign/s verify/s +// rsa 512 bits 0.0036s 0.0003s 275.3 2999.2 +// rsa 1024 bits 0.0203s 0.0011s 49.3 894.1 +// rsa 2048 bits 0.1331s 0.0040s 7.5 250.9 +// rsa 4096 bits 0.9270s 0.0147s 1.1 68.1 +// sign verify sign/s verify/s +// dsa 512 bits 0.0035s 0.0043s 288.3 234.8 +// dsa 1024 bits 0.0111s 0.0135s 90.0 74.2 +// +// And here is similar output but for this assembler +// implementation:-) +// +// sign verify sign/s verify/s +// rsa 512 bits 0.0021s 0.0001s 549.4 9638.5 +// rsa 1024 bits 0.0055s 0.0002s 183.8 4481.1 +// rsa 2048 bits 0.0244s 0.0006s 41.4 1726.3 +// rsa 4096 bits 0.1295s 0.0018s 7.7 561.5 +// sign verify sign/s verify/s +// dsa 512 bits 0.0012s 0.0013s 891.9 756.6 +// dsa 1024 bits 0.0023s 0.0028s 440.4 376.2 +// +// Yes, you may argue that it's not fair comparison as it's +// possible to craft the C implementation with BN_UMULT_HIGH +// inline assembler macro. But of course! Here is the output +// with the macro: +// +// sign verify sign/s verify/s +// rsa 512 bits 0.0020s 0.0002s 495.0 6561.0 +// rsa 1024 bits 0.0086s 0.0004s 116.2 2235.7 +// rsa 2048 bits 0.0519s 0.0015s 19.3 667.3 +// rsa 4096 bits 0.3464s 0.0053s 2.9 187.7 +// sign verify sign/s verify/s +// dsa 512 bits 0.0016s 0.0020s 613.1 510.5 +// dsa 1024 bits 0.0045s 0.0054s 221.0 183.9 +// +// My code is still way faster, huh:-) And I believe that even +// higher performance can be achieved. Note that as keys get +// longer, performance gain is larger. Why? According to the +// profiler there is another player in the field, namely +// BN_from_montgomery consuming larger and larger portion of CPU +// time as keysize decreases. I therefore consider putting effort +// to assembler implementation of the following routine: +// +// void bn_mul_add_mont (BN_ULONG *rp,BN_ULONG *np,int nl,BN_ULONG n0) +// { +// int i,j; +// BN_ULONG v; +// +// for (i=0; i<nl; i++) +// { +// v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2); +// nrp++; +// rp++; +// if (((nrp[-1]+=v)&BN_MASK2) < v) +// for (j=0; ((++nrp[j])&BN_MASK2) == 0; j++) ; +// } +// } +// +// It might as well be beneficial to implement even combaX +// variants, as it appears as it can literally unleash the +// performance (see comment section to bn_mul_comba8 below). +// +// And finally for your reference the output for 0.9.6a compiled +// with SGIcc version 0.01.0-12 (keep in mind that for the moment +// of this writing it's not possible to convince SGIcc to use +// BN_UMULT_HIGH inline assembler macro, yet the code is fast, +// i.e. for a compiler generated one:-): +// +// sign verify sign/s verify/s +// rsa 512 bits 0.0022s 0.0002s 452.7 5894.3 +// rsa 1024 bits 0.0097s 0.0005s 102.7 2002.9 +// rsa 2048 bits 0.0578s 0.0017s 17.3 600.2 +// rsa 4096 bits 0.3838s 0.0061s 2.6 164.5 +// sign verify sign/s verify/s +// dsa 512 bits 0.0018s 0.0022s 547.3 459.6 +// dsa 1024 bits 0.0051s 0.0062s 196.6 161.3 +// +// Oh! Benchmarks were performed on 733MHz Lion-class Itanium +// system running Redhat Linux 7.1 (very special thanks to Ray +// McCaffity of Williams Communications for providing an account). +// +// Q. What's the heck with 'rum 1<<5' at the end of every function? +// A. Well, by clearing the "upper FP registers written" bit of the +// User Mask I want to excuse the kernel from preserving upper +// (f32-f128) FP register bank over process context switch, thus +// minimizing bus bandwidth consumption during the switch (i.e. +// after PKI opration completes and the program is off doing +// something else like bulk symmetric encryption). Having said +// this, I also want to point out that it might be good idea +// to compile the whole toolkit (as well as majority of the +// programs for that matter) with -mfixed-range=f32-f127 command +// line option. No, it doesn't prevent the compiler from writing +// to upper bank, but at least discourages to do so. If you don't +// like the idea you have the option to compile the module with +// -Drum=nop.m in command line. +// + +#if defined(_HPUX_SOURCE) && !defined(_LP64) +#define ADDP addp4 +#else +#define ADDP add +#endif + +#if 1 +// +// bn_[add|sub]_words routines. +// +// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the +// data reside in L1 cache, i.e. 2 ticks away). It's possible to +// compress the epilogue and get down to 2*n+6, but at the cost of +// scalability (the neat feature of this implementation is that it +// shall automagically spin in n+5 on "wider" IA-64 implementations:-) +// I consider that the epilogue is short enough as it is to trade tiny +// performance loss on Itanium for scalability. +// +// BN_ULONG bn_add_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num) +// +.global bn_add_words# +.proc bn_add_words# +.align 64 +.skip 32 // makes the loop body aligned at 64-byte boundary +bn_add_words: + .prologue + .save ar.pfs,r2 +{ .mii; alloc r2=ar.pfs,4,12,0,16 + cmp4.le p6,p0=r35,r0 };; +{ .mfb; mov r8=r0 // return value +(p6) br.ret.spnt.many b0 };; + +{ .mib; sub r10=r35,r0,1 + .save ar.lc,r3 + mov r3=ar.lc + brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16 + } +{ .mib; ADDP r14=0,r32 // rp + .save pr,r9 + mov r9=pr };; + .body +{ .mii; ADDP r15=0,r33 // ap + mov ar.lc=r10 + mov ar.ec=6 } +{ .mib; ADDP r16=0,r34 // bp + mov pr.rot=1<<16 };; + +.L_bn_add_words_ctop: +{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++) + (p18) add r39=r37,r34 + (p19) cmp.ltu.unc p56,p0=r40,r38 } +{ .mfb; (p0) nop.m 0x0 + (p0) nop.f 0x0 + (p0) nop.b 0x0 } +{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++) + (p58) cmp.eq.or p57,p0=-1,r41 // (p20) + (p58) add r41=1,r41 } // (p20) +{ .mfb; (p21) st8 [r14]=r42,8 // *(rp++)=r + (p0) nop.f 0x0 + br.ctop.sptk .L_bn_add_words_ctop };; +.L_bn_add_words_cend: + +{ .mii; +(p59) add r8=1,r8 // return value + mov pr=r9,0x1ffff + mov ar.lc=r3 } +{ .mbb; nop.b 0x0 + br.ret.sptk.many b0 };; +.endp bn_add_words# + +// +// BN_ULONG bn_sub_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num) +// +.global bn_sub_words# +.proc bn_sub_words# +.align 64 +.skip 32 // makes the loop body aligned at 64-byte boundary +bn_sub_words: + .prologue + .save ar.pfs,r2 +{ .mii; alloc r2=ar.pfs,4,12,0,16 + cmp4.le p6,p0=r35,r0 };; +{ .mfb; mov r8=r0 // return value +(p6) br.ret.spnt.many b0 };; + +{ .mib; sub r10=r35,r0,1 + .save ar.lc,r3 + mov r3=ar.lc + brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16 + } +{ .mib; ADDP r14=0,r32 // rp + .save pr,r9 + mov r9=pr };; + .body +{ .mii; ADDP r15=0,r33 // ap + mov ar.lc=r10 + mov ar.ec=6 } +{ .mib; ADDP r16=0,r34 // bp + mov pr.rot=1<<16 };; + +.L_bn_sub_words_ctop: +{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++) + (p18) sub r39=r37,r34 + (p19) cmp.gtu.unc p56,p0=r40,r38 } +{ .mfb; (p0) nop.m 0x0 + (p0) nop.f 0x0 + (p0) nop.b 0x0 } +{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++) + (p58) cmp.eq.or p57,p0=0,r41 // (p20) + (p58) add r41=-1,r41 } // (p20) +{ .mbb; (p21) st8 [r14]=r42,8 // *(rp++)=r + (p0) nop.b 0x0 + br.ctop.sptk .L_bn_sub_words_ctop };; +.L_bn_sub_words_cend: + +{ .mii; +(p59) add r8=1,r8 // return value + mov pr=r9,0x1ffff + mov ar.lc=r3 } +{ .mbb; nop.b 0x0 + br.ret.sptk.many b0 };; +.endp bn_sub_words# +#endif + +#if 0 +#define XMA_TEMPTATION +#endif + +#if 1 +// +// BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +// +.global bn_mul_words# +.proc bn_mul_words# +.align 64 +.skip 32 // makes the loop body aligned at 64-byte boundary +bn_mul_words: + .prologue + .save ar.pfs,r2 +#ifdef XMA_TEMPTATION +{ .mfi; alloc r2=ar.pfs,4,0,0,0 };; +#else +{ .mfi; alloc r2=ar.pfs,4,12,0,16 };; +#endif +{ .mib; mov r8=r0 // return value + cmp4.le p6,p0=r34,r0 +(p6) br.ret.spnt.many b0 };; + +{ .mii; sub r10=r34,r0,1 + .save ar.lc,r3 + mov r3=ar.lc + .save pr,r9 + mov r9=pr };; + + .body +{ .mib; setf.sig f8=r35 // w + mov pr.rot=0x800001<<16 + // ------^----- serves as (p50) at first (p27) + brp.loop.imp .L_bn_mul_words_ctop,.L_bn_mul_words_cend-16 + } + +#ifndef XMA_TEMPTATION + +{ .mmi; ADDP r14=0,r32 // rp + ADDP r15=0,r33 // ap + mov ar.lc=r10 } +{ .mmi; mov r40=0 // serves as r35 at first (p27) + mov ar.ec=13 };; + +// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium +// L2 cache (i.e. 9 ticks away) as floating point load/store instructions +// bypass L1 cache and L2 latency is actually best-case scenario for +// ldf8. The loop is not scalable and shall run in 2*(n+12) even on +// "wider" IA-64 implementations. It's a trade-off here. n+24 loop +// would give us ~5% in *overall* performance improvement on "wider" +// IA-64, but would hurt Itanium for about same because of longer +// epilogue. As it's a matter of few percents in either case I've +// chosen to trade the scalability for development time (you can see +// this very instruction sequence in bn_mul_add_words loop which in +// turn is scalable). +.L_bn_mul_words_ctop: +{ .mfi; (p25) getf.sig r36=f52 // low + (p21) xmpy.lu f48=f37,f8 + (p28) cmp.ltu p54,p50=r41,r39 } +{ .mfi; (p16) ldf8 f32=[r15],8 + (p21) xmpy.hu f40=f37,f8 + (p0) nop.i 0x0 };; +{ .mii; (p25) getf.sig r32=f44 // high + .pred.rel "mutex",p50,p54 + (p50) add r40=r38,r35 // (p27) + (p54) add r40=r38,r35,1 } // (p27) +{ .mfb; (p28) st8 [r14]=r41,8 + (p0) nop.f 0x0 + br.ctop.sptk .L_bn_mul_words_ctop };; +.L_bn_mul_words_cend: + +{ .mii; nop.m 0x0 +.pred.rel "mutex",p51,p55 +(p51) add r8=r36,r0 +(p55) add r8=r36,r0,1 } +{ .mfb; nop.m 0x0 + nop.f 0x0 + nop.b 0x0 } + +#else // XMA_TEMPTATION + + setf.sig f37=r0 // serves as carry at (p18) tick + mov ar.lc=r10 + mov ar.ec=5;; + +// Most of you examining this code very likely wonder why in the name +// of Intel the following loop is commented out? Indeed, it looks so +// neat that you find it hard to believe that it's something wrong +// with it, right? The catch is that every iteration depends on the +// result from previous one and the latter isn't available instantly. +// The loop therefore spins at the latency of xma minus 1, or in other +// words at 6*(n+4) ticks:-( Compare to the "production" loop above +// that runs in 2*(n+11) where the low latency problem is worked around +// by moving the dependency to one-tick latent interger ALU. Note that +// "distance" between ldf8 and xma is not latency of ldf8, but the +// *difference* between xma and ldf8 latencies. +.L_bn_mul_words_ctop: +{ .mfi; (p16) ldf8 f32=[r33],8 + (p18) xma.hu f38=f34,f8,f39 } +{ .mfb; (p20) stf8 [r32]=f37,8 + (p18) xma.lu f35=f34,f8,f39 + br.ctop.sptk .L_bn_mul_words_ctop };; +.L_bn_mul_words_cend: + + getf.sig r8=f41 // the return value + +#endif // XMA_TEMPTATION + +{ .mii; nop.m 0x0 + mov pr=r9,0x1ffff + mov ar.lc=r3 } +{ .mfb; rum 1<<5 // clear um.mfh + nop.f 0x0 + br.ret.sptk.many b0 };; +.endp bn_mul_words# +#endif + +#if 1 +// +// BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +// +.global bn_mul_add_words# +.proc bn_mul_add_words# +.align 64 +.skip 48 // makes the loop body aligned at 64-byte boundary +bn_mul_add_words: + .prologue + .save ar.pfs,r2 +{ .mmi; alloc r2=ar.pfs,4,4,0,8 + cmp4.le p6,p0=r34,r0 + .save ar.lc,r3 + mov r3=ar.lc };; +{ .mib; mov r8=r0 // return value + sub r10=r34,r0,1 +(p6) br.ret.spnt.many b0 };; + +{ .mib; setf.sig f8=r35 // w + .save pr,r9 + mov r9=pr + brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16 + } + .body +{ .mmi; ADDP r14=0,r32 // rp + ADDP r15=0,r33 // ap + mov ar.lc=r10 } +{ .mii; ADDP r16=0,r32 // rp copy + mov pr.rot=0x2001<<16 + // ------^----- serves as (p40) at first (p27) + mov ar.ec=11 };; + +// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on +// Itanium 2. Yes, unlike previous versions it scales:-) Previous +// version was peforming *all* additions in IALU and was starving +// for those even on Itanium 2. In this version one addition is +// moved to FPU and is folded with multiplication. This is at cost +// of propogating the result from previous call to this subroutine +// to L2 cache... In other words negligible even for shorter keys. +// *Overall* performance improvement [over previous version] varies +// from 11 to 22 percent depending on key length. +.L_bn_mul_add_words_ctop: +.pred.rel "mutex",p40,p42 +{ .mfi; (p23) getf.sig r36=f45 // low + (p20) xma.lu f42=f36,f8,f50 // low + (p40) add r39=r39,r35 } // (p27) +{ .mfi; (p16) ldf8 f32=[r15],8 // *(ap++) + (p20) xma.hu f36=f36,f8,f50 // high + (p42) add r39=r39,r35,1 };; // (p27) +{ .mmi; (p24) getf.sig r32=f40 // high + (p16) ldf8 f46=[r16],8 // *(rp1++) + (p40) cmp.ltu p41,p39=r39,r35 } // (p27) +{ .mib; (p26) st8 [r14]=r39,8 // *(rp2++) + (p42) cmp.leu p41,p39=r39,r35 // (p27) + br.ctop.sptk .L_bn_mul_add_words_ctop};; +.L_bn_mul_add_words_cend: + +{ .mmi; .pred.rel "mutex",p40,p42 +(p40) add r8=r35,r0 +(p42) add r8=r35,r0,1 + mov pr=r9,0x1ffff } +{ .mib; rum 1<<5 // clear um.mfh + mov ar.lc=r3 + br.ret.sptk.many b0 };; +.endp bn_mul_add_words# +#endif + +#if 1 +// +// void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num) +// +.global bn_sqr_words# +.proc bn_sqr_words# +.align 64 +.skip 32 // makes the loop body aligned at 64-byte boundary +bn_sqr_words: + .prologue + .save ar.pfs,r2 +{ .mii; alloc r2=ar.pfs,3,0,0,0 + sxt4 r34=r34 };; +{ .mii; cmp.le p6,p0=r34,r0 + mov r8=r0 } // return value +{ .mfb; ADDP r32=0,r32 + nop.f 0x0 +(p6) br.ret.spnt.many b0 };; + +{ .mii; sub r10=r34,r0,1 + .save ar.lc,r3 + mov r3=ar.lc + .save pr,r9 + mov r9=pr };; + + .body +{ .mib; ADDP r33=0,r33 + mov pr.rot=1<<16 + brp.loop.imp .L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16 + } +{ .mii; add r34=8,r32 + mov ar.lc=r10 + mov ar.ec=18 };; + +// 2*(n+17) on Itanium, (n+17) on "wider" IA-64 implementations. It's +// possible to compress the epilogue (I'm getting tired to write this +// comment over and over) and get down to 2*n+16 at the cost of +// scalability. The decision will very likely be reconsidered after the +// benchmark program is profiled. I.e. if perfomance gain on Itanium +// will appear larger than loss on "wider" IA-64, then the loop should +// be explicitely split and the epilogue compressed. +.L_bn_sqr_words_ctop: +{ .mfi; (p16) ldf8 f32=[r33],8 + (p25) xmpy.lu f42=f41,f41 + (p0) nop.i 0x0 } +{ .mib; (p33) stf8 [r32]=f50,16 + (p0) nop.i 0x0 + (p0) nop.b 0x0 } +{ .mfi; (p0) nop.m 0x0 + (p25) xmpy.hu f52=f41,f41 + (p0) nop.i 0x0 } +{ .mib; (p33) stf8 [r34]=f60,16 + (p0) nop.i 0x0 + br.ctop.sptk .L_bn_sqr_words_ctop };; +.L_bn_sqr_words_cend: + +{ .mii; nop.m 0x0 + mov pr=r9,0x1ffff + mov ar.lc=r3 } +{ .mfb; rum 1<<5 // clear um.mfh + nop.f 0x0 + br.ret.sptk.many b0 };; +.endp bn_sqr_words# +#endif + +#if 1 +// Apparently we win nothing by implementing special bn_sqr_comba8. +// Yes, it is possible to reduce the number of multiplications by +// almost factor of two, but then the amount of additions would +// increase by factor of two (as we would have to perform those +// otherwise performed by xma ourselves). Normally we would trade +// anyway as multiplications are way more expensive, but not this +// time... Multiplication kernel is fully pipelined and as we drain +// one 128-bit multiplication result per clock cycle multiplications +// are effectively as inexpensive as additions. Special implementation +// might become of interest for "wider" IA-64 implementation as you'll +// be able to get through the multiplication phase faster (there won't +// be any stall issues as discussed in the commentary section below and +// you therefore will be able to employ all 4 FP units)... But these +// Itanium days it's simply too hard to justify the effort so I just +// drop down to bn_mul_comba8 code:-) +// +// void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a) +// +.global bn_sqr_comba8# +.proc bn_sqr_comba8# +.align 64 +bn_sqr_comba8: + .prologue + .save ar.pfs,r2 +#if defined(_HPUX_SOURCE) && !defined(_LP64) +{ .mii; alloc r2=ar.pfs,2,1,0,0 + addp4 r33=0,r33 + addp4 r32=0,r32 };; +{ .mii; +#else +{ .mii; alloc r2=ar.pfs,2,1,0,0 +#endif + mov r34=r33 + add r14=8,r33 };; + .body +{ .mii; add r17=8,r34 + add r15=16,r33 + add r18=16,r34 } +{ .mfb; add r16=24,r33 + br .L_cheat_entry_point8 };; +.endp bn_sqr_comba8# +#endif + +#if 1 +// I've estimated this routine to run in ~120 ticks, but in reality +// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra +// cycles consumed for instructions fetch? Or did I misinterpret some +// clause in Itanium µ-architecture manual? Comments are welcomed and +// highly appreciated. +// +// On Itanium 2 it takes ~190 ticks. This is because of stalls on +// result from getf.sig. I do nothing about it at this point for +// reasons depicted below. +// +// However! It should be noted that even 160 ticks is darn good result +// as it's over 10 (yes, ten, spelled as t-e-n) times faster than the +// C version (compiled with gcc with inline assembler). I really +// kicked compiler's butt here, didn't I? Yeah! This brings us to the +// following statement. It's damn shame that this routine isn't called +// very often nowadays! According to the profiler most CPU time is +// consumed by bn_mul_add_words called from BN_from_montgomery. In +// order to estimate what we're missing, I've compared the performance +// of this routine against "traditional" implementation, i.e. against +// following routine: +// +// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +// { r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]); +// r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]); +// r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]); +// r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]); +// r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]); +// r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]); +// r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]); +// r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]); +// } +// +// The one below is over 8 times faster than the one above:-( Even +// more reasons to "combafy" bn_mul_add_mont... +// +// And yes, this routine really made me wish there were an optimizing +// assembler! It also feels like it deserves a dedication. +// +// To my wife for being there and to my kids... +// +// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +// +#define carry1 r14 +#define carry2 r15 +#define carry3 r34 +.global bn_mul_comba8# +.proc bn_mul_comba8# +.align 64 +bn_mul_comba8: + .prologue + .save ar.pfs,r2 +#if defined(_HPUX_SOURCE) && !defined(_LP64) +{ .mii; alloc r2=ar.pfs,3,0,0,0 + addp4 r33=0,r33 + addp4 r34=0,r34 };; +{ .mii; addp4 r32=0,r32 +#else +{ .mii; alloc r2=ar.pfs,3,0,0,0 +#endif + add r14=8,r33 + add r17=8,r34 } + .body +{ .mii; add r15=16,r33 + add r18=16,r34 + add r16=24,r33 } +.L_cheat_entry_point8: +{ .mmi; add r19=24,r34 + + ldf8 f32=[r33],32 };; + +{ .mmi; ldf8 f120=[r34],32 + ldf8 f121=[r17],32 } +{ .mmi; ldf8 f122=[r18],32 + ldf8 f123=[r19],32 };; +{ .mmi; ldf8 f124=[r34] + ldf8 f125=[r17] } +{ .mmi; ldf8 f126=[r18] + ldf8 f127=[r19] } + +{ .mmi; ldf8 f33=[r14],32 + ldf8 f34=[r15],32 } +{ .mmi; ldf8 f35=[r16],32;; + ldf8 f36=[r33] } +{ .mmi; ldf8 f37=[r14] + ldf8 f38=[r15] } +{ .mfi; ldf8 f39=[r16] +// -------\ Entering multiplier's heaven /------- +// ------------\ /------------ +// -----------------\ /----------------- +// ----------------------\/---------------------- + xma.hu f41=f32,f120,f0 } +{ .mfi; xma.lu f40=f32,f120,f0 };; // (*) +{ .mfi; xma.hu f51=f32,f121,f0 } +{ .mfi; xma.lu f50=f32,f121,f0 };; +{ .mfi; xma.hu f61=f32,f122,f0 } +{ .mfi; xma.lu f60=f32,f122,f0 };; +{ .mfi; xma.hu f71=f32,f123,f0 } +{ .mfi; xma.lu f70=f32,f123,f0 };; +{ .mfi; xma.hu f81=f32,f124,f0 } +{ .mfi; xma.lu f80=f32,f124,f0 };; +{ .mfi; xma.hu f91=f32,f125,f0 } +{ .mfi; xma.lu f90=f32,f125,f0 };; +{ .mfi; xma.hu f101=f32,f126,f0 } +{ .mfi; xma.lu f100=f32,f126,f0 };; +{ .mfi; xma.hu f111=f32,f127,f0 } +{ .mfi; xma.lu f110=f32,f127,f0 };;// +// (*) You can argue that splitting at every second bundle would +// prevent "wider" IA-64 implementations from achieving the peak +// performance. Well, not really... The catch is that if you +// intend to keep 4 FP units busy by splitting at every fourth +// bundle and thus perform these 16 multiplications in 4 ticks, +// the first bundle *below* would stall because the result from +// the first xma bundle *above* won't be available for another 3 +// ticks (if not more, being an optimist, I assume that "wider" +// implementation will have same latency:-). This stall will hold +// you back and the performance would be as if every second bundle +// were split *anyway*... +{ .mfi; getf.sig r16=f40 + xma.hu f42=f33,f120,f41 + add r33=8,r32 } +{ .mfi; xma.lu f41=f33,f120,f41 };; +{ .mfi; getf.sig r24=f50 + xma.hu f52=f33,f121,f51 } +{ .mfi; xma.lu f51=f33,f121,f51 };; +{ .mfi; st8 [r32]=r16,16 + xma.hu f62=f33,f122,f61 } +{ .mfi; xma.lu f61=f33,f122,f61 };; +{ .mfi; xma.hu f72=f33,f123,f71 } +{ .mfi; xma.lu f71=f33,f123,f71 };; +{ .mfi; xma.hu f82=f33,f124,f81 } +{ .mfi; xma.lu f81=f33,f124,f81 };; +{ .mfi; xma.hu f92=f33,f125,f91 } +{ .mfi; xma.lu f91=f33,f125,f91 };; +{ .mfi; xma.hu f102=f33,f126,f101 } +{ .mfi; xma.lu f101=f33,f126,f101 };; +{ .mfi; xma.hu f112=f33,f127,f111 } +{ .mfi; xma.lu f111=f33,f127,f111 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r25=f41 + xma.hu f43=f34,f120,f42 } +{ .mfi; xma.lu f42=f34,f120,f42 };; +{ .mfi; getf.sig r16=f60 + xma.hu f53=f34,f121,f52 } +{ .mfi; xma.lu f52=f34,f121,f52 };; +{ .mfi; getf.sig r17=f51 + xma.hu f63=f34,f122,f62 + add r25=r25,r24 } +{ .mfi; xma.lu f62=f34,f122,f62 + mov carry1=0 };; +{ .mfi; cmp.ltu p6,p0=r25,r24 + xma.hu f73=f34,f123,f72 } +{ .mfi; xma.lu f72=f34,f123,f72 };; +{ .mfi; st8 [r33]=r25,16 + xma.hu f83=f34,f124,f82 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f82=f34,f124,f82 };; +{ .mfi; xma.hu f93=f34,f125,f92 } +{ .mfi; xma.lu f92=f34,f125,f92 };; +{ .mfi; xma.hu f103=f34,f126,f102 } +{ .mfi; xma.lu f102=f34,f126,f102 };; +{ .mfi; xma.hu f113=f34,f127,f112 } +{ .mfi; xma.lu f112=f34,f127,f112 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r18=f42 + xma.hu f44=f35,f120,f43 + add r17=r17,r16 } +{ .mfi; xma.lu f43=f35,f120,f43 };; +{ .mfi; getf.sig r24=f70 + xma.hu f54=f35,f121,f53 } +{ .mfi; mov carry2=0 + xma.lu f53=f35,f121,f53 };; +{ .mfi; getf.sig r25=f61 + xma.hu f64=f35,f122,f63 + cmp.ltu p7,p0=r17,r16 } +{ .mfi; add r18=r18,r17 + xma.lu f63=f35,f122,f63 };; +{ .mfi; getf.sig r26=f52 + xma.hu f74=f35,f123,f73 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r18,r17 + xma.lu f73=f35,f123,f73 + add r18=r18,carry1 };; +{ .mfi; + xma.hu f84=f35,f124,f83 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r18,carry1 + xma.lu f83=f35,f124,f83 };; +{ .mfi; st8 [r32]=r18,16 + xma.hu f94=f35,f125,f93 +(p7) add carry2=1,carry2 } +{ .mfi; xma.lu f93=f35,f125,f93 };; +{ .mfi; xma.hu f104=f35,f126,f103 } +{ .mfi; xma.lu f103=f35,f126,f103 };; +{ .mfi; xma.hu f114=f35,f127,f113 } +{ .mfi; mov carry1=0 + xma.lu f113=f35,f127,f113 + add r25=r25,r24 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r27=f43 + xma.hu f45=f36,f120,f44 + cmp.ltu p6,p0=r25,r24 } +{ .mfi; xma.lu f44=f36,f120,f44 + add r26=r26,r25 };; +{ .mfi; getf.sig r16=f80 + xma.hu f55=f36,f121,f54 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f54=f36,f121,f54 };; +{ .mfi; getf.sig r17=f71 + xma.hu f65=f36,f122,f64 + cmp.ltu p6,p0=r26,r25 } +{ .mfi; xma.lu f64=f36,f122,f64 + add r27=r27,r26 };; +{ .mfi; getf.sig r18=f62 + xma.hu f75=f36,f123,f74 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r27,r26 + xma.lu f74=f36,f123,f74 + add r27=r27,carry2 };; +{ .mfi; getf.sig r19=f53 + xma.hu f85=f36,f124,f84 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f84=f36,f124,f84 + cmp.ltu p6,p0=r27,carry2 };; +{ .mfi; st8 [r33]=r27,16 + xma.hu f95=f36,f125,f94 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f94=f36,f125,f94 };; +{ .mfi; xma.hu f105=f36,f126,f104 } +{ .mfi; mov carry2=0 + xma.lu f104=f36,f126,f104 + add r17=r17,r16 };; +{ .mfi; xma.hu f115=f36,f127,f114 + cmp.ltu p7,p0=r17,r16 } +{ .mfi; xma.lu f114=f36,f127,f114 + add r18=r18,r17 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r20=f44 + xma.hu f46=f37,f120,f45 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r18,r17 + xma.lu f45=f37,f120,f45 + add r19=r19,r18 };; +{ .mfi; getf.sig r24=f90 + xma.hu f56=f37,f121,f55 } +{ .mfi; xma.lu f55=f37,f121,f55 };; +{ .mfi; getf.sig r25=f81 + xma.hu f66=f37,f122,f65 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r19,r18 + xma.lu f65=f37,f122,f65 + add r20=r20,r19 };; +{ .mfi; getf.sig r26=f72 + xma.hu f76=f37,f123,f75 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r20,r19 + xma.lu f75=f37,f123,f75 + add r20=r20,carry1 };; +{ .mfi; getf.sig r27=f63 + xma.hu f86=f37,f124,f85 +(p7) add carry2=1,carry2 } +{ .mfi; xma.lu f85=f37,f124,f85 + cmp.ltu p7,p0=r20,carry1 };; +{ .mfi; getf.sig r28=f54 + xma.hu f96=f37,f125,f95 +(p7) add carry2=1,carry2 } +{ .mfi; st8 [r32]=r20,16 + xma.lu f95=f37,f125,f95 };; +{ .mfi; xma.hu f106=f37,f126,f105 } +{ .mfi; mov carry1=0 + xma.lu f105=f37,f126,f105 + add r25=r25,r24 };; +{ .mfi; xma.hu f116=f37,f127,f115 + cmp.ltu p6,p0=r25,r24 } +{ .mfi; xma.lu f115=f37,f127,f115 + add r26=r26,r25 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r29=f45 + xma.hu f47=f38,f120,f46 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r26,r25 + xma.lu f46=f38,f120,f46 + add r27=r27,r26 };; +{ .mfi; getf.sig r16=f100 + xma.hu f57=f38,f121,f56 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r27,r26 + xma.lu f56=f38,f121,f56 + add r28=r28,r27 };; +{ .mfi; getf.sig r17=f91 + xma.hu f67=f38,f122,f66 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r28,r27 + xma.lu f66=f38,f122,f66 + add r29=r29,r28 };; +{ .mfi; getf.sig r18=f82 + xma.hu f77=f38,f123,f76 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r29,r28 + xma.lu f76=f38,f123,f76 + add r29=r29,carry2 };; +{ .mfi; getf.sig r19=f73 + xma.hu f87=f38,f124,f86 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f86=f38,f124,f86 + cmp.ltu p6,p0=r29,carry2 };; +{ .mfi; getf.sig r20=f64 + xma.hu f97=f38,f125,f96 +(p6) add carry1=1,carry1 } +{ .mfi; st8 [r33]=r29,16 + xma.lu f96=f38,f125,f96 };; +{ .mfi; getf.sig r21=f55 + xma.hu f107=f38,f126,f106 } +{ .mfi; mov carry2=0 + xma.lu f106=f38,f126,f106 + add r17=r17,r16 };; +{ .mfi; xma.hu f117=f38,f127,f116 + cmp.ltu p7,p0=r17,r16 } +{ .mfi; xma.lu f116=f38,f127,f116 + add r18=r18,r17 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r22=f46 + xma.hu f48=f39,f120,f47 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r18,r17 + xma.lu f47=f39,f120,f47 + add r19=r19,r18 };; +{ .mfi; getf.sig r24=f110 + xma.hu f58=f39,f121,f57 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r19,r18 + xma.lu f57=f39,f121,f57 + add r20=r20,r19 };; +{ .mfi; getf.sig r25=f101 + xma.hu f68=f39,f122,f67 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r20,r19 + xma.lu f67=f39,f122,f67 + add r21=r21,r20 };; +{ .mfi; getf.sig r26=f92 + xma.hu f78=f39,f123,f77 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r21,r20 + xma.lu f77=f39,f123,f77 + add r22=r22,r21 };; +{ .mfi; getf.sig r27=f83 + xma.hu f88=f39,f124,f87 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r22,r21 + xma.lu f87=f39,f124,f87 + add r22=r22,carry1 };; +{ .mfi; getf.sig r28=f74 + xma.hu f98=f39,f125,f97 +(p7) add carry2=1,carry2 } +{ .mfi; xma.lu f97=f39,f125,f97 + cmp.ltu p7,p0=r22,carry1 };; +{ .mfi; getf.sig r29=f65 + xma.hu f108=f39,f126,f107 +(p7) add carry2=1,carry2 } +{ .mfi; st8 [r32]=r22,16 + xma.lu f107=f39,f126,f107 };; +{ .mfi; getf.sig r30=f56 + xma.hu f118=f39,f127,f117 } +{ .mfi; xma.lu f117=f39,f127,f117 };;// +//-------------------------------------------------// +// Leaving muliplier's heaven... Quite a ride, huh? + +{ .mii; getf.sig r31=f47 + add r25=r25,r24 + mov carry1=0 };; +{ .mii; getf.sig r16=f111 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mfb; getf.sig r17=f102 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r27=r27,r26 };; +{ .mfb; nop.m 0x0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,r26 + add r28=r28,r27 };; +{ .mii; getf.sig r18=f93 + add r17=r17,r16 + mov carry3=0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r28,r27 + add r29=r29,r28 };; +{ .mii; getf.sig r19=f84 + cmp.ltu p7,p0=r17,r16 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r29,r28 + add r30=r30,r29 };; +{ .mii; getf.sig r20=f75 + add r18=r18,r17 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r30,r29 + add r31=r31,r30 };; +{ .mfb; getf.sig r21=f66 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r18,r17 + add r19=r19,r18 } +{ .mfb; nop.m 0x0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r31,r30 + add r31=r31,carry2 };; +{ .mfb; getf.sig r22=f57 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r19,r18 + add r20=r20,r19 } +{ .mfb; nop.m 0x0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r31,carry2 };; +{ .mfb; getf.sig r23=f48 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r20,r19 + add r21=r21,r20 } +{ .mii; +(p6) add carry1=1,carry1 } +{ .mfb; st8 [r33]=r31,16 };; + +{ .mfb; getf.sig r24=f112 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r21,r20 + add r22=r22,r21 };; +{ .mfb; getf.sig r25=f103 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r22,r21 + add r23=r23,r22 };; +{ .mfb; getf.sig r26=f94 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r23,r22 + add r23=r23,carry1 };; +{ .mfb; getf.sig r27=f85 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p8=r23,carry1};; +{ .mii; getf.sig r28=f76 + add r25=r25,r24 + mov carry1=0 } +{ .mii; st8 [r32]=r23,16 + (p7) add carry2=1,carry3 + (p8) add carry2=0,carry3 };; + +{ .mfb; nop.m 0x0 } +{ .mii; getf.sig r29=f67 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mfb; getf.sig r30=f58 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r27=r27,r26 };; +{ .mfb; getf.sig r16=f113 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,r26 + add r28=r28,r27 };; +{ .mfb; getf.sig r17=f104 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r28,r27 + add r29=r29,r28 };; +{ .mfb; getf.sig r18=f95 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r29,r28 + add r30=r30,r29 };; +{ .mii; getf.sig r19=f86 + add r17=r17,r16 + mov carry3=0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r30,r29 + add r30=r30,carry2 };; +{ .mii; getf.sig r20=f77 + cmp.ltu p7,p0=r17,r16 + add r18=r18,r17 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r30,carry2 };; +{ .mfb; getf.sig r21=f68 } +{ .mii; st8 [r33]=r30,16 +(p6) add carry1=1,carry1 };; + +{ .mfb; getf.sig r24=f114 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r18,r17 + add r19=r19,r18 };; +{ .mfb; getf.sig r25=f105 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r19,r18 + add r20=r20,r19 };; +{ .mfb; getf.sig r26=f96 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r20,r19 + add r21=r21,r20 };; +{ .mfb; getf.sig r27=f87 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r21,r20 + add r21=r21,carry1 };; +{ .mib; getf.sig r28=f78 + add r25=r25,r24 } +{ .mib; (p7) add carry3=1,carry3 + cmp.ltu p7,p8=r21,carry1};; +{ .mii; st8 [r32]=r21,16 + (p7) add carry2=1,carry3 + (p8) add carry2=0,carry3 } + +{ .mii; mov carry1=0 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mfb; getf.sig r16=f115 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r27=r27,r26 };; +{ .mfb; getf.sig r17=f106 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,r26 + add r28=r28,r27 };; +{ .mfb; getf.sig r18=f97 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r28,r27 + add r28=r28,carry2 };; +{ .mib; getf.sig r19=f88 + add r17=r17,r16 } +{ .mib; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r28,carry2 };; +{ .mii; st8 [r33]=r28,16 +(p6) add carry1=1,carry1 } + +{ .mii; mov carry2=0 + cmp.ltu p7,p0=r17,r16 + add r18=r18,r17 };; +{ .mfb; getf.sig r24=f116 } +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r18,r17 + add r19=r19,r18 };; +{ .mfb; getf.sig r25=f107 } +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r19,r18 + add r19=r19,carry1 };; +{ .mfb; getf.sig r26=f98 } +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r19,carry1};; +{ .mii; st8 [r32]=r19,16 + (p7) add carry2=1,carry2 } + +{ .mfb; add r25=r25,r24 };; + +{ .mfb; getf.sig r16=f117 } +{ .mii; mov carry1=0 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mfb; getf.sig r17=f108 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r26=r26,carry2 };; +{ .mfb; nop.m 0x0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,carry2 };; +{ .mii; st8 [r33]=r26,16 +(p6) add carry1=1,carry1 } + +{ .mfb; add r17=r17,r16 };; +{ .mfb; getf.sig r24=f118 } +{ .mii; mov carry2=0 + cmp.ltu p7,p0=r17,r16 + add r17=r17,carry1 };; +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r17,carry1};; +{ .mii; st8 [r32]=r17 + (p7) add carry2=1,carry2 };; +{ .mfb; add r24=r24,carry2 };; +{ .mib; st8 [r33]=r24 } + +{ .mib; rum 1<<5 // clear um.mfh + br.ret.sptk.many b0 };; +.endp bn_mul_comba8# +#undef carry3 +#undef carry2 +#undef carry1 +#endif + +#if 1 +// It's possible to make it faster (see comment to bn_sqr_comba8), but +// I reckon it doesn't worth the effort. Basically because the routine +// (actually both of them) practically never called... So I just play +// same trick as with bn_sqr_comba8. +// +// void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a) +// +.global bn_sqr_comba4# +.proc bn_sqr_comba4# +.align 64 +bn_sqr_comba4: + .prologue + .save ar.pfs,r2 +#if defined(_HPUX_SOURCE) && !defined(_LP64) +{ .mii; alloc r2=ar.pfs,2,1,0,0 + addp4 r32=0,r32 + addp4 r33=0,r33 };; +{ .mii; +#else +{ .mii; alloc r2=ar.pfs,2,1,0,0 +#endif + mov r34=r33 + add r14=8,r33 };; + .body +{ .mii; add r17=8,r34 + add r15=16,r33 + add r18=16,r34 } +{ .mfb; add r16=24,r33 + br .L_cheat_entry_point4 };; +.endp bn_sqr_comba4# +#endif + +#if 1 +// Runs in ~115 cycles and ~4.5 times faster than C. Well, whatever... +// +// void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +// +#define carry1 r14 +#define carry2 r15 +.global bn_mul_comba4# +.proc bn_mul_comba4# +.align 64 +bn_mul_comba4: + .prologue + .save ar.pfs,r2 +#if defined(_HPUX_SOURCE) && !defined(_LP64) +{ .mii; alloc r2=ar.pfs,3,0,0,0 + addp4 r33=0,r33 + addp4 r34=0,r34 };; +{ .mii; addp4 r32=0,r32 +#else +{ .mii; alloc r2=ar.pfs,3,0,0,0 +#endif + add r14=8,r33 + add r17=8,r34 } + .body +{ .mii; add r15=16,r33 + add r18=16,r34 + add r16=24,r33 };; +.L_cheat_entry_point4: +{ .mmi; add r19=24,r34 + + ldf8 f32=[r33] } + +{ .mmi; ldf8 f120=[r34] + ldf8 f121=[r17] };; +{ .mmi; ldf8 f122=[r18] + ldf8 f123=[r19] } + +{ .mmi; ldf8 f33=[r14] + ldf8 f34=[r15] } +{ .mfi; ldf8 f35=[r16] + + xma.hu f41=f32,f120,f0 } +{ .mfi; xma.lu f40=f32,f120,f0 };; +{ .mfi; xma.hu f51=f32,f121,f0 } +{ .mfi; xma.lu f50=f32,f121,f0 };; +{ .mfi; xma.hu f61=f32,f122,f0 } +{ .mfi; xma.lu f60=f32,f122,f0 };; +{ .mfi; xma.hu f71=f32,f123,f0 } +{ .mfi; xma.lu f70=f32,f123,f0 };;// +// Major stall takes place here, and 3 more places below. Result from +// first xma is not available for another 3 ticks. +{ .mfi; getf.sig r16=f40 + xma.hu f42=f33,f120,f41 + add r33=8,r32 } +{ .mfi; xma.lu f41=f33,f120,f41 };; +{ .mfi; getf.sig r24=f50 + xma.hu f52=f33,f121,f51 } +{ .mfi; xma.lu f51=f33,f121,f51 };; +{ .mfi; st8 [r32]=r16,16 + xma.hu f62=f33,f122,f61 } +{ .mfi; xma.lu f61=f33,f122,f61 };; +{ .mfi; xma.hu f72=f33,f123,f71 } +{ .mfi; xma.lu f71=f33,f123,f71 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r25=f41 + xma.hu f43=f34,f120,f42 } +{ .mfi; xma.lu f42=f34,f120,f42 };; +{ .mfi; getf.sig r16=f60 + xma.hu f53=f34,f121,f52 } +{ .mfi; xma.lu f52=f34,f121,f52 };; +{ .mfi; getf.sig r17=f51 + xma.hu f63=f34,f122,f62 + add r25=r25,r24 } +{ .mfi; mov carry1=0 + xma.lu f62=f34,f122,f62 };; +{ .mfi; st8 [r33]=r25,16 + xma.hu f73=f34,f123,f72 + cmp.ltu p6,p0=r25,r24 } +{ .mfi; xma.lu f72=f34,f123,f72 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r18=f42 + xma.hu f44=f35,f120,f43 +(p6) add carry1=1,carry1 } +{ .mfi; add r17=r17,r16 + xma.lu f43=f35,f120,f43 + mov carry2=0 };; +{ .mfi; getf.sig r24=f70 + xma.hu f54=f35,f121,f53 + cmp.ltu p7,p0=r17,r16 } +{ .mfi; xma.lu f53=f35,f121,f53 };; +{ .mfi; getf.sig r25=f61 + xma.hu f64=f35,f122,f63 + add r18=r18,r17 } +{ .mfi; xma.lu f63=f35,f122,f63 +(p7) add carry2=1,carry2 };; +{ .mfi; getf.sig r26=f52 + xma.hu f74=f35,f123,f73 + cmp.ltu p7,p0=r18,r17 } +{ .mfi; xma.lu f73=f35,f123,f73 + add r18=r18,carry1 };; +//-------------------------------------------------// +{ .mii; st8 [r32]=r18,16 +(p7) add carry2=1,carry2 + cmp.ltu p7,p0=r18,carry1 };; + +{ .mfi; getf.sig r27=f43 // last major stall +(p7) add carry2=1,carry2 };; +{ .mii; getf.sig r16=f71 + add r25=r25,r24 + mov carry1=0 };; +{ .mii; getf.sig r17=f62 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r27=r27,r26 };; +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,r26 + add r27=r27,carry2 };; +{ .mii; getf.sig r18=f53 +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,carry2 };; +{ .mfi; st8 [r33]=r27,16 +(p6) add carry1=1,carry1 } + +{ .mii; getf.sig r19=f44 + add r17=r17,r16 + mov carry2=0 };; +{ .mii; getf.sig r24=f72 + cmp.ltu p7,p0=r17,r16 + add r18=r18,r17 };; +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r18,r17 + add r19=r19,r18 };; +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r19,r18 + add r19=r19,carry1 };; +{ .mii; getf.sig r25=f63 + (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r19,carry1};; +{ .mii; st8 [r32]=r19,16 + (p7) add carry2=1,carry2 } + +{ .mii; getf.sig r26=f54 + add r25=r25,r24 + mov carry1=0 };; +{ .mii; getf.sig r16=f73 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r26=r26,carry2 };; +{ .mii; getf.sig r17=f64 +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,carry2 };; +{ .mii; st8 [r33]=r26,16 +(p6) add carry1=1,carry1 } + +{ .mii; getf.sig r24=f74 + add r17=r17,r16 + mov carry2=0 };; +{ .mii; cmp.ltu p7,p0=r17,r16 + add r17=r17,carry1 };; + +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r17,carry1};; +{ .mii; st8 [r32]=r17,16 + (p7) add carry2=1,carry2 };; + +{ .mii; add r24=r24,carry2 };; +{ .mii; st8 [r33]=r24 } + +{ .mib; rum 1<<5 // clear um.mfh + br.ret.sptk.many b0 };; +.endp bn_mul_comba4# +#undef carry2 +#undef carry1 +#endif + +#if 1 +// +// BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) +// +// In the nutshell it's a port of my MIPS III/IV implementation. +// +#define AT r14 +#define H r16 +#define HH r20 +#define L r17 +#define D r18 +#define DH r22 +#define I r21 + +#if 0 +// Some preprocessors (most notably HP-UX) appear to be allergic to +// macros enclosed to parenthesis [as these three were]. +#define cont p16 +#define break p0 // p20 +#define equ p24 +#else +cont=p16 +break=p0 +equ=p24 +#endif + +.global abort# +.global bn_div_words# +.proc bn_div_words# +.align 64 +bn_div_words: + .prologue + .save ar.pfs,r2 +{ .mii; alloc r2=ar.pfs,3,5,0,8 + .save b0,r3 + mov r3=b0 + .save pr,r10 + mov r10=pr };; +{ .mmb; cmp.eq p6,p0=r34,r0 + mov r8=-1 +(p6) br.ret.spnt.many b0 };; + + .body +{ .mii; mov H=r32 // save h + mov ar.ec=0 // don't rotate at exit + mov pr.rot=0 } +{ .mii; mov L=r33 // save l + mov r36=r0 };; + +.L_divw_shift: // -vv- note signed comparison +{ .mfi; (p0) cmp.lt p16,p0=r0,r34 // d + (p0) shladd r33=r34,1,r0 } +{ .mfb; (p0) add r35=1,r36 + (p0) nop.f 0x0 +(p16) br.wtop.dpnt .L_divw_shift };; + +{ .mii; mov D=r34 + shr.u DH=r34,32 + sub r35=64,r36 };; +{ .mii; setf.sig f7=DH + shr.u AT=H,r35 + mov I=r36 };; +{ .mib; cmp.ne p6,p0=r0,AT + shl H=H,r36 +(p6) br.call.spnt.clr b0=abort };; // overflow, die... + +{ .mfi; fcvt.xuf.s1 f7=f7 + shr.u AT=L,r35 };; +{ .mii; shl L=L,r36 + or H=H,AT };; + +{ .mii; nop.m 0x0 + cmp.leu p6,p0=D,H;; +(p6) sub H=H,D } + +{ .mlx; setf.sig f14=D + movl AT=0xffffffff };; +/////////////////////////////////////////////////////////// +{ .mii; setf.sig f6=H + shr.u HH=H,32;; + cmp.eq p6,p7=HH,DH };; +{ .mfb; +(p6) setf.sig f8=AT +(p7) fcvt.xuf.s1 f6=f6 +(p7) br.call.sptk b6=.L_udiv64_32_b6 };; + +{ .mfi; getf.sig r33=f8 // q + xmpy.lu f9=f8,f14 } +{ .mfi; xmpy.hu f10=f8,f14 + shrp H=H,L,32 };; + +{ .mmi; getf.sig r35=f9 // tl + getf.sig r31=f10 };; // th + +.L_divw_1st_iter: +{ .mii; (p0) add r32=-1,r33 + (p0) cmp.eq equ,cont=HH,r31 };; +{ .mii; (p0) cmp.ltu p8,p0=r35,D + (p0) sub r34=r35,D + (equ) cmp.leu break,cont=r35,H };; +{ .mib; (cont) cmp.leu cont,break=HH,r31 + (p8) add r31=-1,r31 +(cont) br.wtop.spnt .L_divw_1st_iter };; +/////////////////////////////////////////////////////////// +{ .mii; sub H=H,r35 + shl r8=r33,32 + shl L=L,32 };; +/////////////////////////////////////////////////////////// +{ .mii; setf.sig f6=H + shr.u HH=H,32;; + cmp.eq p6,p7=HH,DH };; +{ .mfb; +(p6) setf.sig f8=AT +(p7) fcvt.xuf.s1 f6=f6 +(p7) br.call.sptk b6=.L_udiv64_32_b6 };; + +{ .mfi; getf.sig r33=f8 // q + xmpy.lu f9=f8,f14 } +{ .mfi; xmpy.hu f10=f8,f14 + shrp H=H,L,32 };; + +{ .mmi; getf.sig r35=f9 // tl + getf.sig r31=f10 };; // th + +.L_divw_2nd_iter: +{ .mii; (p0) add r32=-1,r33 + (p0) cmp.eq equ,cont=HH,r31 };; +{ .mii; (p0) cmp.ltu p8,p0=r35,D + (p0) sub r34=r35,D + (equ) cmp.leu break,cont=r35,H };; +{ .mib; (cont) cmp.leu cont,break=HH,r31 + (p8) add r31=-1,r31 +(cont) br.wtop.spnt .L_divw_2nd_iter };; +/////////////////////////////////////////////////////////// +{ .mii; sub H=H,r35 + or r8=r8,r33 + mov ar.pfs=r2 };; +{ .mii; shr.u r9=H,I // remainder if anybody wants it + mov pr=r10,0x1ffff } +{ .mfb; br.ret.sptk.many b0 };; + +// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division +// procedure. +// +// inputs: f6 = (double)a, f7 = (double)b +// output: f8 = (int)(a/b) +// clobbered: f8,f9,f10,f11,pred +pred=p15 +// One can argue that this snippet is copyrighted to Intel +// Corporation, as it's essentially identical to one of those +// found in "Divide, Square Root and Remainder" section at +// http://www.intel.com/software/products/opensource/libraries/num.htm. +// Yes, I admit that the referred code was used as template, +// but after I realized that there hardly is any other instruction +// sequence which would perform this operation. I mean I figure that +// any independent attempt to implement high-performance division +// will result in code virtually identical to the Intel code. It +// should be noted though that below division kernel is 1 cycle +// faster than Intel one (note commented splits:-), not to mention +// original prologue (rather lack of one) and epilogue. +.align 32 +.skip 16 +.L_udiv64_32_b6: + frcpa.s1 f8,pred=f6,f7;; // [0] y0 = 1 / b + +(pred) fnma.s1 f9=f7,f8,f1 // [5] e0 = 1 - b * y0 +(pred) fmpy.s1 f10=f6,f8;; // [5] q0 = a * y0 +(pred) fmpy.s1 f11=f9,f9 // [10] e1 = e0 * e0 +(pred) fma.s1 f10=f9,f10,f10;; // [10] q1 = q0 + e0 * q0 +(pred) fma.s1 f8=f9,f8,f8 //;; // [15] y1 = y0 + e0 * y0 +(pred) fma.s1 f9=f11,f10,f10;; // [15] q2 = q1 + e1 * q1 +(pred) fma.s1 f8=f11,f8,f8 //;; // [20] y2 = y1 + e1 * y1 +(pred) fnma.s1 f10=f7,f9,f6;; // [20] r2 = a - b * q2 +(pred) fma.s1 f8=f10,f8,f9;; // [25] q3 = q2 + r2 * y2 + + fcvt.fxu.trunc.s1 f8=f8 // [30] q = trunc(q3) + br.ret.sptk.many b6;; +.endp bn_div_words# +#endif diff --git a/openssl/crypto/bn/asm/mips3-mont.pl b/openssl/crypto/bn/asm/mips3-mont.pl new file mode 100644 index 000000000..8f9156e02 --- /dev/null +++ b/openssl/crypto/bn/asm/mips3-mont.pl @@ -0,0 +1,327 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# This module doesn't present direct interest for OpenSSL, because it +# doesn't provide better performance for longer keys. While 512-bit +# RSA private key operations are 40% faster, 1024-bit ones are hardly +# faster at all, while longer key operations are slower by up to 20%. +# It might be of interest to embedded system developers though, as +# it's smaller than 1KB, yet offers ~3x improvement over compiler +# generated code. +# +# The module targets N32 and N64 MIPS ABIs and currently is a bit +# IRIX-centric, i.e. is likely to require adaptation for other OSes. + +# int bn_mul_mont( +$rp="a0"; # BN_ULONG *rp, +$ap="a1"; # const BN_ULONG *ap, +$bp="a2"; # const BN_ULONG *bp, +$np="a3"; # const BN_ULONG *np, +$n0="a4"; # const BN_ULONG *n0, +$num="a5"; # int num); + +$lo0="a6"; +$hi0="a7"; +$lo1="v0"; +$hi1="v1"; +$aj="t0"; +$bi="t1"; +$nj="t2"; +$tp="t3"; +$alo="s0"; +$ahi="s1"; +$nlo="s2"; +$nhi="s3"; +$tj="s4"; +$i="s5"; +$j="s6"; +$fp="t8"; +$m1="t9"; + +$FRAME=8*(2+8); + +$code=<<___; +#include <asm.h> +#include <regdef.h> + +.text + +.set noat +.set reorder + +.align 5 +.globl bn_mul_mont +.ent bn_mul_mont +bn_mul_mont: + .set noreorder + PTR_SUB sp,64 + move $fp,sp + .frame $fp,64,ra + slt AT,$num,4 + li v0,0 + beqzl AT,.Lproceed + nop + jr ra + PTR_ADD sp,$fp,64 + .set reorder +.align 5 +.Lproceed: + ld $n0,0($n0) + ld $bi,0($bp) # bp[0] + ld $aj,0($ap) # ap[0] + ld $nj,0($np) # np[0] + PTR_SUB sp,16 # place for two extra words + sll $num,3 + li AT,-4096 + PTR_SUB sp,$num + and sp,AT + + sd s0,0($fp) + sd s1,8($fp) + sd s2,16($fp) + sd s3,24($fp) + sd s4,32($fp) + sd s5,40($fp) + sd s6,48($fp) + sd s7,56($fp) + + dmultu $aj,$bi + ld $alo,8($ap) + ld $nlo,8($np) + mflo $lo0 + mfhi $hi0 + dmultu $lo0,$n0 + mflo $m1 + + dmultu $alo,$bi + mflo $alo + mfhi $ahi + + dmultu $nj,$m1 + mflo $lo1 + mfhi $hi1 + dmultu $nlo,$m1 + daddu $lo1,$lo0 + sltu AT,$lo1,$lo0 + daddu $hi1,AT + mflo $nlo + mfhi $nhi + + move $tp,sp + li $j,16 +.align 4 +.L1st: + .set noreorder + PTR_ADD $aj,$ap,$j + ld $aj,($aj) + PTR_ADD $nj,$np,$j + ld $nj,($nj) + + dmultu $aj,$bi + daddu $lo0,$alo,$hi0 + daddu $lo1,$nlo,$hi1 + sltu AT,$lo0,$hi0 + sltu s7,$lo1,$hi1 + daddu $hi0,$ahi,AT + daddu $hi1,$nhi,s7 + mflo $alo + mfhi $ahi + + daddu $lo1,$lo0 + sltu AT,$lo1,$lo0 + dmultu $nj,$m1 + daddu $hi1,AT + addu $j,8 + sd $lo1,($tp) + sltu s7,$j,$num + mflo $nlo + mfhi $nhi + + bnez s7,.L1st + PTR_ADD $tp,8 + .set reorder + + daddu $lo0,$alo,$hi0 + sltu AT,$lo0,$hi0 + daddu $hi0,$ahi,AT + + daddu $lo1,$nlo,$hi1 + sltu s7,$lo1,$hi1 + daddu $hi1,$nhi,s7 + daddu $lo1,$lo0 + sltu AT,$lo1,$lo0 + daddu $hi1,AT + + sd $lo1,($tp) + + daddu $hi1,$hi0 + sltu AT,$hi1,$hi0 + sd $hi1,8($tp) + sd AT,16($tp) + + li $i,8 +.align 4 +.Louter: + PTR_ADD $bi,$bp,$i + ld $bi,($bi) + ld $aj,($ap) + ld $alo,8($ap) + ld $tj,(sp) + + dmultu $aj,$bi + ld $nj,($np) + ld $nlo,8($np) + mflo $lo0 + mfhi $hi0 + daddu $lo0,$tj + dmultu $lo0,$n0 + sltu AT,$lo0,$tj + daddu $hi0,AT + mflo $m1 + + dmultu $alo,$bi + mflo $alo + mfhi $ahi + + dmultu $nj,$m1 + mflo $lo1 + mfhi $hi1 + + dmultu $nlo,$m1 + daddu $lo1,$lo0 + sltu AT,$lo1,$lo0 + daddu $hi1,AT + mflo $nlo + mfhi $nhi + + move $tp,sp + li $j,16 + ld $tj,8($tp) +.align 4 +.Linner: + .set noreorder + PTR_ADD $aj,$ap,$j + ld $aj,($aj) + PTR_ADD $nj,$np,$j + ld $nj,($nj) + + dmultu $aj,$bi + daddu $lo0,$alo,$hi0 + daddu $lo1,$nlo,$hi1 + sltu AT,$lo0,$hi0 + sltu s7,$lo1,$hi1 + daddu $hi0,$ahi,AT + daddu $hi1,$nhi,s7 + mflo $alo + mfhi $ahi + + daddu $lo0,$tj + addu $j,8 + dmultu $nj,$m1 + sltu AT,$lo0,$tj + daddu $lo1,$lo0 + daddu $hi0,AT + sltu s7,$lo1,$lo0 + ld $tj,16($tp) + daddu $hi1,s7 + sltu AT,$j,$num + mflo $nlo + mfhi $nhi + sd $lo1,($tp) + bnez AT,.Linner + PTR_ADD $tp,8 + .set reorder + + daddu $lo0,$alo,$hi0 + sltu AT,$lo0,$hi0 + daddu $hi0,$ahi,AT + daddu $lo0,$tj + sltu s7,$lo0,$tj + daddu $hi0,s7 + + ld $tj,16($tp) + daddu $lo1,$nlo,$hi1 + sltu AT,$lo1,$hi1 + daddu $hi1,$nhi,AT + daddu $lo1,$lo0 + sltu s7,$lo1,$lo0 + daddu $hi1,s7 + sd $lo1,($tp) + + daddu $lo1,$hi1,$hi0 + sltu $hi1,$lo1,$hi0 + daddu $lo1,$tj + sltu AT,$lo1,$tj + daddu $hi1,AT + sd $lo1,8($tp) + sd $hi1,16($tp) + + addu $i,8 + sltu s7,$i,$num + bnez s7,.Louter + + .set noreorder + PTR_ADD $tj,sp,$num # &tp[num] + move $tp,sp + move $ap,sp + li $hi0,0 # clear borrow bit + +.align 4 +.Lsub: ld $lo0,($tp) + ld $lo1,($np) + PTR_ADD $tp,8 + PTR_ADD $np,8 + dsubu $lo1,$lo0,$lo1 # tp[i]-np[i] + sgtu AT,$lo1,$lo0 + dsubu $lo0,$lo1,$hi0 + sgtu $hi0,$lo0,$lo1 + sd $lo0,($rp) + or $hi0,AT + sltu AT,$tp,$tj + bnez AT,.Lsub + PTR_ADD $rp,8 + + dsubu $hi0,$hi1,$hi0 # handle upmost overflow bit + move $tp,sp + PTR_SUB $rp,$num # restore rp + not $hi1,$hi0 + + and $ap,$hi0,sp + and $bp,$hi1,$rp + or $ap,$ap,$bp # ap=borrow?tp:rp + +.align 4 +.Lcopy: ld $aj,($ap) + PTR_ADD $ap,8 + PTR_ADD $tp,8 + sd zero,-8($tp) + sltu AT,$tp,$tj + sd $aj,($rp) + bnez AT,.Lcopy + PTR_ADD $rp,8 + + ld s0,0($fp) + ld s1,8($fp) + ld s2,16($fp) + ld s3,24($fp) + ld s4,32($fp) + ld s5,40($fp) + ld s6,48($fp) + ld s7,56($fp) + li v0,1 + jr ra + PTR_ADD sp,$fp,64 + .set reorder +END(bn_mul_mont) +.rdata +.asciiz "Montgomery Multiplication for MIPS III/IV, CRYPTOGAMS by <appro\@openssl.org>" +___ + +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/mo-586.pl b/openssl/crypto/bn/asm/mo-586.pl new file mode 100644 index 000000000..098229309 --- /dev/null +++ b/openssl/crypto/bn/asm/mo-586.pl @@ -0,0 +1,603 @@ +#!/usr/bin/env perl + +# This is crypto/bn/asm/x86-mont.pl (with asciz from crypto/perlasm/x86asm.pl) +# from OpenSSL 0.9.9-dev + +sub ::asciz +{ my @str=unpack("C*",shift); + push @str,0; + while ($#str>15) { + &data_byte(@str[0..15]); + foreach (0..15) { shift @str; } + } + &data_byte(@str) if (@str); +} + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# October 2005 +# +# This is a "teaser" code, as it can be improved in several ways... +# First of all non-SSE2 path should be implemented (yes, for now it +# performs Montgomery multiplication/convolution only on SSE2-capable +# CPUs such as P4, others fall down to original code). Then inner loop +# can be unrolled and modulo-scheduled to improve ILP and possibly +# moved to 128-bit XMM register bank (though it would require input +# rearrangement and/or increase bus bandwidth utilization). Dedicated +# squaring procedure should give further performance improvement... +# Yet, for being draft, the code improves rsa512 *sign* benchmark by +# 110%(!), rsa1024 one - by 70% and rsa4096 - by 20%:-) + +# December 2006 +# +# Modulo-scheduling SSE2 loops results in further 15-20% improvement. +# Integer-only code [being equipped with dedicated squaring procedure] +# gives ~40% on rsa512 sign benchmark... + +push(@INC,"perlasm","../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],$0); + +$sse2=0; +for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } + +&external_label("OPENSSL_ia32cap_P") if ($sse2); + +&function_begin("bn_mul_mont"); + +$i="edx"; +$j="ecx"; +$ap="esi"; $tp="esi"; # overlapping variables!!! +$rp="edi"; $bp="edi"; # overlapping variables!!! +$np="ebp"; +$num="ebx"; + +$_num=&DWP(4*0,"esp"); # stack top layout +$_rp=&DWP(4*1,"esp"); +$_ap=&DWP(4*2,"esp"); +$_bp=&DWP(4*3,"esp"); +$_np=&DWP(4*4,"esp"); +$_n0=&DWP(4*5,"esp"); $_n0q=&QWP(4*5,"esp"); +$_sp=&DWP(4*6,"esp"); +$_bpend=&DWP(4*7,"esp"); +$frame=32; # size of above frame rounded up to 16n + + &xor ("eax","eax"); + &mov ("edi",&wparam(5)); # int num + &cmp ("edi",4); + &jl (&label("just_leave")); + + &lea ("esi",&wparam(0)); # put aside pointer to argument block + &lea ("edx",&wparam(1)); # load ap + &mov ("ebp","esp"); # saved stack pointer! + &add ("edi",2); # extra two words on top of tp + &neg ("edi"); + &lea ("esp",&DWP(-$frame,"esp","edi",4)); # alloca($frame+4*(num+2)) + &neg ("edi"); + + # minimize cache contention by arraning 2K window between stack + # pointer and ap argument [np is also position sensitive vector, + # but it's assumed to be near ap, as it's allocated at ~same + # time]. + &mov ("eax","esp"); + &sub ("eax","edx"); + &and ("eax",2047); + &sub ("esp","eax"); # this aligns sp and ap modulo 2048 + + &xor ("edx","esp"); + &and ("edx",2048); + &xor ("edx",2048); + &sub ("esp","edx"); # this splits them apart modulo 4096 + + &and ("esp",-64); # align to cache line + + ################################# load argument block... + &mov ("eax",&DWP(0*4,"esi"));# BN_ULONG *rp + &mov ("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap + &mov ("ecx",&DWP(2*4,"esi"));# const BN_ULONG *bp + &mov ("edx",&DWP(3*4,"esi"));# const BN_ULONG *np + &mov ("esi",&DWP(4*4,"esi"));# const BN_ULONG *n0 + #&mov ("edi",&DWP(5*4,"esi"));# int num + + &mov ("esi",&DWP(0,"esi")); # pull n0[0] + &mov ($_rp,"eax"); # ... save a copy of argument block + &mov ($_ap,"ebx"); + &mov ($_bp,"ecx"); + &mov ($_np,"edx"); + &mov ($_n0,"esi"); + &lea ($num,&DWP(-3,"edi")); # num=num-1 to assist modulo-scheduling + #&mov ($_num,$num); # redundant as $num is not reused + &mov ($_sp,"ebp"); # saved stack pointer! + +if($sse2) { +$acc0="mm0"; # mmx register bank layout +$acc1="mm1"; +$car0="mm2"; +$car1="mm3"; +$mul0="mm4"; +$mul1="mm5"; +$temp="mm6"; +$mask="mm7"; + + &picmeup("eax","OPENSSL_ia32cap_P"); + &bt (&DWP(0,"eax"),26); + &jnc (&label("non_sse2")); + + &mov ("eax",-1); + &movd ($mask,"eax"); # mask 32 lower bits + + &mov ($ap,$_ap); # load input pointers + &mov ($bp,$_bp); + &mov ($np,$_np); + + &xor ($i,$i); # i=0 + &xor ($j,$j); # j=0 + + &movd ($mul0,&DWP(0,$bp)); # bp[0] + &movd ($mul1,&DWP(0,$ap)); # ap[0] + &movd ($car1,&DWP(0,$np)); # np[0] + + &pmuludq($mul1,$mul0); # ap[0]*bp[0] + &movq ($car0,$mul1); + &movq ($acc0,$mul1); # I wish movd worked for + &pand ($acc0,$mask); # inter-register transfers + + &pmuludq($mul1,$_n0q); # *=n0 + + &pmuludq($car1,$mul1); # "t[0]"*np[0]*n0 + &paddq ($car1,$acc0); + + &movd ($acc1,&DWP(4,$np)); # np[1] + &movd ($acc0,&DWP(4,$ap)); # ap[1] + + &psrlq ($car0,32); + &psrlq ($car1,32); + + &inc ($j); # j++ +&set_label("1st",16); + &pmuludq($acc0,$mul0); # ap[j]*bp[0] + &pmuludq($acc1,$mul1); # np[j]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &pand ($acc0,$mask); + &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1] + &paddq ($car1,$acc0); # +=ap[j]*bp[0]; + &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1] + &psrlq ($car0,32); + &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[j-1]= + &psrlq ($car1,32); + + &lea ($j,&DWP(1,$j)); + &cmp ($j,$num); + &jl (&label("1st")); + + &pmuludq($acc0,$mul0); # ap[num-1]*bp[0] + &pmuludq($acc1,$mul1); # np[num-1]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &pand ($acc0,$mask); + &paddq ($car1,$acc0); # +=ap[num-1]*bp[0]; + &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]= + + &psrlq ($car0,32); + &psrlq ($car1,32); + + &paddq ($car1,$car0); + &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1] + + &inc ($i); # i++ +&set_label("outer"); + &xor ($j,$j); # j=0 + + &movd ($mul0,&DWP(0,$bp,$i,4)); # bp[i] + &movd ($mul1,&DWP(0,$ap)); # ap[0] + &movd ($temp,&DWP($frame,"esp")); # tp[0] + &movd ($car1,&DWP(0,$np)); # np[0] + &pmuludq($mul1,$mul0); # ap[0]*bp[i] + + &paddq ($mul1,$temp); # +=tp[0] + &movq ($acc0,$mul1); + &movq ($car0,$mul1); + &pand ($acc0,$mask); + + &pmuludq($mul1,$_n0q); # *=n0 + + &pmuludq($car1,$mul1); + &paddq ($car1,$acc0); + + &movd ($temp,&DWP($frame+4,"esp")); # tp[1] + &movd ($acc1,&DWP(4,$np)); # np[1] + &movd ($acc0,&DWP(4,$ap)); # ap[1] + + &psrlq ($car0,32); + &psrlq ($car1,32); + &paddq ($car0,$temp); # +=tp[1] + + &inc ($j); # j++ + &dec ($num); +&set_label("inner"); + &pmuludq($acc0,$mul0); # ap[j]*bp[i] + &pmuludq($acc1,$mul1); # np[j]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &movd ($temp,&DWP($frame+4,"esp",$j,4));# tp[j+1] + &pand ($acc0,$mask); + &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1] + &paddq ($car1,$acc0); # +=ap[j]*bp[i]+tp[j] + &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1] + &psrlq ($car0,32); + &movd (&DWP($frame-4,"esp",$j,4),$car1);# tp[j-1]= + &psrlq ($car1,32); + &paddq ($car0,$temp); # +=tp[j+1] + + &dec ($num); + &lea ($j,&DWP(1,$j)); # j++ + &jnz (&label("inner")); + + &mov ($num,$j); + &pmuludq($acc0,$mul0); # ap[num-1]*bp[i] + &pmuludq($acc1,$mul1); # np[num-1]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &pand ($acc0,$mask); + &paddq ($car1,$acc0); # +=ap[num-1]*bp[i]+tp[num-1] + &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]= + &psrlq ($car0,32); + &psrlq ($car1,32); + + &movd ($temp,&DWP($frame+4,"esp",$num,4)); # += tp[num] + &paddq ($car1,$car0); + &paddq ($car1,$temp); + &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1] + + &lea ($i,&DWP(1,$i)); # i++ + &cmp ($i,$num); + &jle (&label("outer")); + + &emms (); # done with mmx bank + &jmp (&label("common_tail")); + +&set_label("non_sse2",16); +} + +if (0) { + &mov ("esp",$_sp); + &xor ("eax","eax"); # signal "not fast enough [yet]" + &jmp (&label("just_leave")); + # While the below code provides competitive performance for + # all key lengthes on modern Intel cores, it's still more + # than 10% slower for 4096-bit key elsewhere:-( "Competitive" + # means compared to the original integer-only assembler. + # 512-bit RSA sign is better by ~40%, but that's about all + # one can say about all CPUs... +} else { +$inp="esi"; # integer path uses these registers differently +$word="edi"; +$carry="ebp"; + + &mov ($inp,$_ap); + &lea ($carry,&DWP(1,$num)); + &mov ($word,$_bp); + &xor ($j,$j); # j=0 + &mov ("edx",$inp); + &and ($carry,1); # see if num is even + &sub ("edx",$word); # see if ap==bp + &lea ("eax",&DWP(4,$word,$num,4)); # &bp[num] + &or ($carry,"edx"); + &mov ($word,&DWP(0,$word)); # bp[0] + &jz (&label("bn_sqr_mont")); + &mov ($_bpend,"eax"); + &mov ("eax",&DWP(0,$inp)); + &xor ("edx","edx"); + +&set_label("mull",16); + &mov ($carry,"edx"); + &mul ($word); # ap[j]*bp[0] + &add ($carry,"eax"); + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1] + &cmp ($j,$num); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &jl (&label("mull")); + + &mov ($carry,"edx"); + &mul ($word); # ap[num-1]*bp[0] + &mov ($word,$_n0); + &add ("eax",$carry); + &mov ($inp,$_np); + &adc ("edx",0); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + + &mov (&DWP($frame,"esp",$num,4),"eax"); # tp[num-1]= + &xor ($j,$j); + &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]= + &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]= + + &mov ("eax",&DWP(0,$inp)); # np[0] + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &mov ("eax",&DWP(4,$inp)); # np[1] + &adc ("edx",0); + &inc ($j); + + &jmp (&label("2ndmadd")); + +&set_label("1stmadd",16); + &mov ($carry,"edx"); + &mul ($word); # ap[j]*bp[i] + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1] + &adc ("edx",0); + &cmp ($j,$num); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &jl (&label("1stmadd")); + + &mov ($carry,"edx"); + &mul ($word); # ap[num-1]*bp[i] + &add ("eax",&DWP($frame,"esp",$num,4)); # +=tp[num-1] + &mov ($word,$_n0); + &adc ("edx",0); + &mov ($inp,$_np); + &add ($carry,"eax"); + &adc ("edx",0); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + + &xor ($j,$j); + &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num] + &mov (&DWP($frame,"esp",$num,4),$carry); # tp[num-1]= + &adc ($j,0); + &mov ("eax",&DWP(0,$inp)); # np[0] + &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]= + &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]= + + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &mov ("eax",&DWP(4,$inp)); # np[1] + &adc ("edx",0); + &mov ($j,1); + +&set_label("2ndmadd",16); + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+1] + &adc ("edx",0); + &cmp ($j,$num); + &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j-1]= + &jl (&label("2ndmadd")); + + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1] + &adc ("edx",0); + &add ($carry,"eax"); + &adc ("edx",0); + &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]= + + &xor ("eax","eax"); + &mov ($j,$_bp); # &bp[i] + &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num] + &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1] + &lea ($j,&DWP(4,$j)); + &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]= + &cmp ($j,$_bpend); + &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]= + &je (&label("common_tail")); + + &mov ($word,&DWP(0,$j)); # bp[i+1] + &mov ($inp,$_ap); + &mov ($_bp,$j); # &bp[++i] + &xor ($j,$j); + &xor ("edx","edx"); + &mov ("eax",&DWP(0,$inp)); + &jmp (&label("1stmadd")); + +&set_label("bn_sqr_mont",16); +$sbit=$num; + &mov ($_num,$num); + &mov ($_bp,$j); # i=0 + + &mov ("eax",$word); # ap[0] + &mul ($word); # ap[0]*ap[0] + &mov (&DWP($frame,"esp"),"eax"); # tp[0]= + &mov ($sbit,"edx"); + &shr ("edx",1); + &and ($sbit,1); + &inc ($j); +&set_label("sqr",16); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j] + &mov ($carry,"edx"); + &mul ($word); # ap[j]*ap[0] + &add ("eax",$carry); + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &lea ($carry,&DWP(0,$sbit,"eax",2)); + &shr ("eax",31); + &cmp ($j,$_num); + &mov ($sbit,"eax"); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &jl (&label("sqr")); + + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[num-1] + &mov ($carry,"edx"); + &mul ($word); # ap[num-1]*ap[0] + &add ("eax",$carry); + &mov ($word,$_n0); + &adc ("edx",0); + &mov ($inp,$_np); + &lea ($carry,&DWP(0,$sbit,"eax",2)); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + &shr ("eax",31); + &mov (&DWP($frame,"esp",$j,4),$carry); # tp[num-1]= + + &lea ($carry,&DWP(0,"eax","edx",2)); + &mov ("eax",&DWP(0,$inp)); # np[0] + &shr ("edx",31); + &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num]= + &mov (&DWP($frame+8,"esp",$j,4),"edx"); # tp[num+1]= + + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &mov ($num,$j); + &adc ("edx",0); + &mov ("eax",&DWP(4,$inp)); # np[1] + &mov ($j,1); + +&set_label("3rdmadd",16); + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(4,$inp,$j,4)); # np[j+1] + &adc ("edx",0); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j-1]= + + &mov ($carry,"edx"); + &mul ($word); # np[j+1]*m + &add ($carry,&DWP($frame+4,"esp",$j,4)); # +=tp[j+1] + &lea ($j,&DWP(2,$j)); + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+2] + &adc ("edx",0); + &cmp ($j,$num); + &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j]= + &jl (&label("3rdmadd")); + + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1] + &adc ("edx",0); + &add ($carry,"eax"); + &adc ("edx",0); + &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]= + + &mov ($j,$_bp); # i + &xor ("eax","eax"); + &mov ($inp,$_ap); + &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num] + &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1] + &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]= + &cmp ($j,$num); + &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]= + &je (&label("common_tail")); + + &mov ($word,&DWP(4,$inp,$j,4)); # ap[i] + &lea ($j,&DWP(1,$j)); + &mov ("eax",$word); + &mov ($_bp,$j); # ++i + &mul ($word); # ap[i]*ap[i] + &add ("eax",&DWP($frame,"esp",$j,4)); # +=tp[i] + &adc ("edx",0); + &mov (&DWP($frame,"esp",$j,4),"eax"); # tp[i]= + &xor ($carry,$carry); + &cmp ($j,$num); + &lea ($j,&DWP(1,$j)); + &je (&label("sqrlast")); + + &mov ($sbit,"edx"); # zaps $num + &shr ("edx",1); + &and ($sbit,1); +&set_label("sqradd",16); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j] + &mov ($carry,"edx"); + &mul ($word); # ap[j]*ap[i] + &add ("eax",$carry); + &lea ($carry,&DWP(0,"eax","eax")); + &adc ("edx",0); + &shr ("eax",31); + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &lea ($j,&DWP(1,$j)); + &adc ("eax",0); + &add ($carry,$sbit); + &adc ("eax",0); + &cmp ($j,$_num); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &mov ($sbit,"eax"); + &jle (&label("sqradd")); + + &mov ($carry,"edx"); + &lea ("edx",&DWP(0,$sbit,"edx",2)); + &shr ($carry,31); +&set_label("sqrlast"); + &mov ($word,$_n0); + &mov ($inp,$_np); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + + &add ("edx",&DWP($frame,"esp",$j,4)); # +=tp[num] + &mov ("eax",&DWP(0,$inp)); # np[0] + &adc ($carry,0); + &mov (&DWP($frame,"esp",$j,4),"edx"); # tp[num]= + &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num+1]= + + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &lea ($num,&DWP(-1,$j)); + &adc ("edx",0); + &mov ($j,1); + &mov ("eax",&DWP(4,$inp)); # np[1] + + &jmp (&label("3rdmadd")); +} + +&set_label("common_tail",16); + &mov ($np,$_np); # load modulus pointer + &mov ($rp,$_rp); # load result pointer + &lea ($tp,&DWP($frame,"esp")); # [$ap and $bp are zapped] + + &mov ("eax",&DWP(0,$tp)); # tp[0] + &mov ($j,$num); # j=num-1 + &xor ($i,$i); # i=0 and clear CF! + +&set_label("sub",16); + &sbb ("eax",&DWP(0,$np,$i,4)); + &mov (&DWP(0,$rp,$i,4),"eax"); # rp[i]=tp[i]-np[i] + &dec ($j); # doesn't affect CF! + &mov ("eax",&DWP(4,$tp,$i,4)); # tp[i+1] + &lea ($i,&DWP(1,$i)); # i++ + &jge (&label("sub")); + + &sbb ("eax",0); # handle upmost overflow bit + &and ($tp,"eax"); + ¬ ("eax"); + &mov ($np,$rp); + &and ($np,"eax"); + &or ($tp,$np); # tp=carry?tp:rp + +&set_label("copy",16); # copy or in-place refresh + &mov ("eax",&DWP(0,$tp,$num,4)); + &mov (&DWP(0,$rp,$num,4),"eax"); # rp[i]=tp[i] + &mov (&DWP($frame,"esp",$num,4),$j); # zap temporary vector + &dec ($num); + &jge (&label("copy")); + + &mov ("esp",$_sp); # pull saved stack pointer + &mov ("eax",1); +&set_label("just_leave"); +&function_end("bn_mul_mont"); + +&asciz("Montgomery Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>"); + +&asm_finish(); diff --git a/openssl/crypto/bn/asm/ppc-mont.pl b/openssl/crypto/bn/asm/ppc-mont.pl new file mode 100644 index 000000000..7849eae95 --- /dev/null +++ b/openssl/crypto/bn/asm/ppc-mont.pl @@ -0,0 +1,323 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# April 2006 + +# "Teaser" Montgomery multiplication module for PowerPC. It's possible +# to gain a bit more by modulo-scheduling outer loop, then dedicated +# squaring procedure should give further 20% and code can be adapted +# for 32-bit application running on 64-bit CPU. As for the latter. +# It won't be able to achieve "native" 64-bit performance, because in +# 32-bit application context every addc instruction will have to be +# expanded as addc, twice right shift by 32 and finally adde, etc. +# So far RSA *sign* performance improvement over pre-bn_mul_mont asm +# for 64-bit application running on PPC970/G5 is: +# +# 512-bit +65% +# 1024-bit +35% +# 2048-bit +18% +# 4096-bit +4% + +$flavour = shift; + +if ($flavour =~ /32/) { + $BITS= 32; + $BNSZ= $BITS/8; + $SIZE_T=4; + $RZONE= 224; + $FRAME= $SIZE_T*16; + + $LD= "lwz"; # load + $LDU= "lwzu"; # load and update + $LDX= "lwzx"; # load indexed + $ST= "stw"; # store + $STU= "stwu"; # store and update + $STX= "stwx"; # store indexed + $STUX= "stwux"; # store indexed and update + $UMULL= "mullw"; # unsigned multiply low + $UMULH= "mulhwu"; # unsigned multiply high + $UCMP= "cmplw"; # unsigned compare + $SHRI= "srwi"; # unsigned shift right by immediate + $PUSH= $ST; + $POP= $LD; +} elsif ($flavour =~ /64/) { + $BITS= 64; + $BNSZ= $BITS/8; + $SIZE_T=8; + $RZONE= 288; + $FRAME= $SIZE_T*16; + + # same as above, but 64-bit mnemonics... + $LD= "ld"; # load + $LDU= "ldu"; # load and update + $LDX= "ldx"; # load indexed + $ST= "std"; # store + $STU= "stdu"; # store and update + $STX= "stdx"; # store indexed + $STUX= "stdux"; # store indexed and update + $UMULL= "mulld"; # unsigned multiply low + $UMULH= "mulhdu"; # unsigned multiply high + $UCMP= "cmpld"; # unsigned compare + $SHRI= "srdi"; # unsigned shift right by immediate + $PUSH= $ST; + $POP= $LD; +} else { die "nonsense $flavour"; } + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or +die "can't locate ppc-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; + +$sp="r1"; +$toc="r2"; +$rp="r3"; $ovf="r3"; +$ap="r4"; +$bp="r5"; +$np="r6"; +$n0="r7"; +$num="r8"; +$rp="r9"; # $rp is reassigned +$aj="r10"; +$nj="r11"; +$tj="r12"; +# non-volatile registers +$i="r14"; +$j="r15"; +$tp="r16"; +$m0="r17"; +$m1="r18"; +$lo0="r19"; +$hi0="r20"; +$lo1="r21"; +$hi1="r22"; +$alo="r23"; +$ahi="r24"; +$nlo="r25"; +# +$nhi="r0"; + +$code=<<___; +.machine "any" +.text + +.globl .bn_mul_mont +.align 4 +.bn_mul_mont: + cmpwi $num,4 + mr $rp,r3 ; $rp is reassigned + li r3,0 + bltlr + + slwi $num,$num,`log($BNSZ)/log(2)` + li $tj,-4096 + addi $ovf,$num,`$FRAME+$RZONE` + subf $ovf,$ovf,$sp ; $sp-$ovf + and $ovf,$ovf,$tj ; minimize TLB usage + subf $ovf,$sp,$ovf ; $ovf-$sp + srwi $num,$num,`log($BNSZ)/log(2)` + $STUX $sp,$sp,$ovf + + $PUSH r14,`4*$SIZE_T`($sp) + $PUSH r15,`5*$SIZE_T`($sp) + $PUSH r16,`6*$SIZE_T`($sp) + $PUSH r17,`7*$SIZE_T`($sp) + $PUSH r18,`8*$SIZE_T`($sp) + $PUSH r19,`9*$SIZE_T`($sp) + $PUSH r20,`10*$SIZE_T`($sp) + $PUSH r21,`11*$SIZE_T`($sp) + $PUSH r22,`12*$SIZE_T`($sp) + $PUSH r23,`13*$SIZE_T`($sp) + $PUSH r24,`14*$SIZE_T`($sp) + $PUSH r25,`15*$SIZE_T`($sp) + + $LD $n0,0($n0) ; pull n0[0] value + addi $num,$num,-2 ; adjust $num for counter register + + $LD $m0,0($bp) ; m0=bp[0] + $LD $aj,0($ap) ; ap[0] + addi $tp,$sp,$FRAME + $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0] + $UMULH $hi0,$aj,$m0 + + $LD $aj,$BNSZ($ap) ; ap[1] + $LD $nj,0($np) ; np[0] + + $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0 + + $UMULL $alo,$aj,$m0 ; ap[1]*bp[0] + $UMULH $ahi,$aj,$m0 + + $UMULL $lo1,$nj,$m1 ; np[0]*m1 + $UMULH $hi1,$nj,$m1 + $LD $nj,$BNSZ($np) ; np[1] + addc $lo1,$lo1,$lo0 + addze $hi1,$hi1 + + $UMULL $nlo,$nj,$m1 ; np[1]*m1 + $UMULH $nhi,$nj,$m1 + + mtctr $num + li $j,`2*$BNSZ` +.align 4 +L1st: + $LDX $aj,$ap,$j ; ap[j] + addc $lo0,$alo,$hi0 + $LDX $nj,$np,$j ; np[j] + addze $hi0,$ahi + $UMULL $alo,$aj,$m0 ; ap[j]*bp[0] + addc $lo1,$nlo,$hi1 + $UMULH $ahi,$aj,$m0 + addze $hi1,$nhi + $UMULL $nlo,$nj,$m1 ; np[j]*m1 + addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0] + $UMULH $nhi,$nj,$m1 + addze $hi1,$hi1 + $ST $lo1,0($tp) ; tp[j-1] + + addi $j,$j,$BNSZ ; j++ + addi $tp,$tp,$BNSZ ; tp++ + bdnz- L1st +;L1st + addc $lo0,$alo,$hi0 + addze $hi0,$ahi + + addc $lo1,$nlo,$hi1 + addze $hi1,$nhi + addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0] + addze $hi1,$hi1 + $ST $lo1,0($tp) ; tp[j-1] + + li $ovf,0 + addc $hi1,$hi1,$hi0 + addze $ovf,$ovf ; upmost overflow bit + $ST $hi1,$BNSZ($tp) + + li $i,$BNSZ +.align 4 +Louter: + $LDX $m0,$bp,$i ; m0=bp[i] + $LD $aj,0($ap) ; ap[0] + addi $tp,$sp,$FRAME + $LD $tj,$FRAME($sp) ; tp[0] + $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i] + $UMULH $hi0,$aj,$m0 + $LD $aj,$BNSZ($ap) ; ap[1] + $LD $nj,0($np) ; np[0] + addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0] + $UMULL $alo,$aj,$m0 ; ap[j]*bp[i] + addze $hi0,$hi0 + $UMULL $m1,$lo0,$n0 ; tp[0]*n0 + $UMULH $ahi,$aj,$m0 + $UMULL $lo1,$nj,$m1 ; np[0]*m1 + $UMULH $hi1,$nj,$m1 + $LD $nj,$BNSZ($np) ; np[1] + addc $lo1,$lo1,$lo0 + $UMULL $nlo,$nj,$m1 ; np[1]*m1 + addze $hi1,$hi1 + $UMULH $nhi,$nj,$m1 + + mtctr $num + li $j,`2*$BNSZ` +.align 4 +Linner: + $LDX $aj,$ap,$j ; ap[j] + addc $lo0,$alo,$hi0 + $LD $tj,$BNSZ($tp) ; tp[j] + addze $hi0,$ahi + $LDX $nj,$np,$j ; np[j] + addc $lo1,$nlo,$hi1 + $UMULL $alo,$aj,$m0 ; ap[j]*bp[i] + addze $hi1,$nhi + $UMULH $ahi,$aj,$m0 + addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j] + $UMULL $nlo,$nj,$m1 ; np[j]*m1 + addze $hi0,$hi0 + $UMULH $nhi,$nj,$m1 + addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j] + addi $j,$j,$BNSZ ; j++ + addze $hi1,$hi1 + $ST $lo1,0($tp) ; tp[j-1] + addi $tp,$tp,$BNSZ ; tp++ + bdnz- Linner +;Linner + $LD $tj,$BNSZ($tp) ; tp[j] + addc $lo0,$alo,$hi0 + addze $hi0,$ahi + addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j] + addze $hi0,$hi0 + + addc $lo1,$nlo,$hi1 + addze $hi1,$nhi + addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j] + addze $hi1,$hi1 + $ST $lo1,0($tp) ; tp[j-1] + + addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA] + li $ovf,0 + adde $hi1,$hi1,$hi0 + addze $ovf,$ovf + $ST $hi1,$BNSZ($tp) +; + slwi $tj,$num,`log($BNSZ)/log(2)` + $UCMP $i,$tj + addi $i,$i,$BNSZ + ble- Louter + + addi $num,$num,2 ; restore $num + subfc $j,$j,$j ; j=0 and "clear" XER[CA] + addi $tp,$sp,$FRAME + mtctr $num + +.align 4 +Lsub: $LDX $tj,$tp,$j + $LDX $nj,$np,$j + subfe $aj,$nj,$tj ; tp[j]-np[j] + $STX $aj,$rp,$j + addi $j,$j,$BNSZ + bdnz- Lsub + + li $j,0 + mtctr $num + subfe $ovf,$j,$ovf ; handle upmost overflow bit + and $ap,$tp,$ovf + andc $np,$rp,$ovf + or $ap,$ap,$np ; ap=borrow?tp:rp + +.align 4 +Lcopy: ; copy or in-place refresh + $LDX $tj,$ap,$j + $STX $tj,$rp,$j + $STX $j,$tp,$j ; zap at once + addi $j,$j,$BNSZ + bdnz- Lcopy + + $POP r14,`4*$SIZE_T`($sp) + $POP r15,`5*$SIZE_T`($sp) + $POP r16,`6*$SIZE_T`($sp) + $POP r17,`7*$SIZE_T`($sp) + $POP r18,`8*$SIZE_T`($sp) + $POP r19,`9*$SIZE_T`($sp) + $POP r20,`10*$SIZE_T`($sp) + $POP r21,`11*$SIZE_T`($sp) + $POP r22,`12*$SIZE_T`($sp) + $POP r23,`13*$SIZE_T`($sp) + $POP r24,`14*$SIZE_T`($sp) + $POP r25,`15*$SIZE_T`($sp) + $POP $sp,0($sp) + li r3,1 + blr + .long 0 +.asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@fy.chalmers.se>" +___ + +$code =~ s/\`([^\`]*)\`/eval $1/gem; +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/ppc.pl b/openssl/crypto/bn/asm/ppc.pl new file mode 100644 index 000000000..08e005347 --- /dev/null +++ b/openssl/crypto/bn/asm/ppc.pl @@ -0,0 +1,2078 @@ +#!/usr/bin/env perl +# +# Implemented as a Perl wrapper as we want to support several different +# architectures with single file. We pick up the target based on the +# file name we are asked to generate. +# +# It should be noted though that this perl code is nothing like +# <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much +# as pre-processor to cover for platform differences in name decoration, +# linker tables, 32-/64-bit instruction sets... +# +# As you might know there're several PowerPC ABI in use. Most notably +# Linux and AIX use different 32-bit ABIs. Good news are that these ABIs +# are similar enough to implement leaf(!) functions, which would be ABI +# neutral. And that's what you find here: ABI neutral leaf functions. +# In case you wonder what that is... +# +# AIX performance +# +# MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e. +# +# The following is the performance of 32-bit compiler +# generated code: +# +# OpenSSL 0.9.6c 21 dec 2001 +# built on: Tue Jun 11 11:06:51 EDT 2002 +# options:bn(64,32) ... +#compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3 +# sign verify sign/s verify/s +#rsa 512 bits 0.0098s 0.0009s 102.0 1170.6 +#rsa 1024 bits 0.0507s 0.0026s 19.7 387.5 +#rsa 2048 bits 0.3036s 0.0085s 3.3 117.1 +#rsa 4096 bits 2.0040s 0.0299s 0.5 33.4 +#dsa 512 bits 0.0087s 0.0106s 114.3 94.5 +#dsa 1024 bits 0.0256s 0.0313s 39.0 32.0 +# +# Same bechmark with this assembler code: +# +#rsa 512 bits 0.0056s 0.0005s 178.6 2049.2 +#rsa 1024 bits 0.0283s 0.0015s 35.3 674.1 +#rsa 2048 bits 0.1744s 0.0050s 5.7 201.2 +#rsa 4096 bits 1.1644s 0.0179s 0.9 55.7 +#dsa 512 bits 0.0052s 0.0062s 191.6 162.0 +#dsa 1024 bits 0.0149s 0.0180s 67.0 55.5 +# +# Number of operations increases by at almost 75% +# +# Here are performance numbers for 64-bit compiler +# generated code: +# +# OpenSSL 0.9.6g [engine] 9 Aug 2002 +# built on: Fri Apr 18 16:59:20 EDT 2003 +# options:bn(64,64) ... +# compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3 +# sign verify sign/s verify/s +#rsa 512 bits 0.0028s 0.0003s 357.1 3844.4 +#rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7 +#rsa 2048 bits 0.0963s 0.0028s 10.4 353.0 +#rsa 4096 bits 0.6538s 0.0102s 1.5 98.1 +#dsa 512 bits 0.0026s 0.0032s 382.5 313.7 +#dsa 1024 bits 0.0081s 0.0099s 122.8 100.6 +# +# Same benchmark with this assembler code: +# +#rsa 512 bits 0.0020s 0.0002s 510.4 6273.7 +#rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3 +#rsa 2048 bits 0.0540s 0.0016s 18.5 622.5 +#rsa 4096 bits 0.3700s 0.0058s 2.7 171.0 +#dsa 512 bits 0.0016s 0.0020s 610.7 507.1 +#dsa 1024 bits 0.0047s 0.0058s 212.5 173.2 +# +# Again, performance increases by at about 75% +# +# Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code) +# OpenSSL 0.9.7c 30 Sep 2003 +# +# Original code. +# +#rsa 512 bits 0.0011s 0.0001s 906.1 11012.5 +#rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1 +#rsa 2048 bits 0.0370s 0.0010s 27.1 982.4 +#rsa 4096 bits 0.2426s 0.0036s 4.1 280.4 +#dsa 512 bits 0.0010s 0.0012s 1038.1 841.5 +#dsa 1024 bits 0.0030s 0.0037s 329.6 269.7 +#dsa 2048 bits 0.0101s 0.0127s 98.9 78.6 +# +# Same benchmark with this assembler code: +# +#rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9 +#rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6 +#rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5 +#rsa 4096 bits 0.1469s 0.0022s 6.8 449.6 +#dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2 +#dsa 1024 bits 0.0018s 0.0023s 545.0 442.2 +#dsa 2048 bits 0.0061s 0.0075s 163.5 132.8 +# +# Performance increase of ~60% +# +# If you have comments or suggestions to improve code send +# me a note at schari@us.ibm.com +# + +$opf = shift; + +if ($opf =~ /32\.s/) { + $BITS= 32; + $BNSZ= $BITS/8; + $ISA= "\"ppc\""; + + $LD= "lwz"; # load + $LDU= "lwzu"; # load and update + $ST= "stw"; # store + $STU= "stwu"; # store and update + $UMULL= "mullw"; # unsigned multiply low + $UMULH= "mulhwu"; # unsigned multiply high + $UDIV= "divwu"; # unsigned divide + $UCMPI= "cmplwi"; # unsigned compare with immediate + $UCMP= "cmplw"; # unsigned compare + $CNTLZ= "cntlzw"; # count leading zeros + $SHL= "slw"; # shift left + $SHR= "srw"; # unsigned shift right + $SHRI= "srwi"; # unsigned shift right by immediate + $SHLI= "slwi"; # shift left by immediate + $CLRU= "clrlwi"; # clear upper bits + $INSR= "insrwi"; # insert right + $ROTL= "rotlwi"; # rotate left by immediate + $TR= "tw"; # conditional trap +} elsif ($opf =~ /64\.s/) { + $BITS= 64; + $BNSZ= $BITS/8; + $ISA= "\"ppc64\""; + + # same as above, but 64-bit mnemonics... + $LD= "ld"; # load + $LDU= "ldu"; # load and update + $ST= "std"; # store + $STU= "stdu"; # store and update + $UMULL= "mulld"; # unsigned multiply low + $UMULH= "mulhdu"; # unsigned multiply high + $UDIV= "divdu"; # unsigned divide + $UCMPI= "cmpldi"; # unsigned compare with immediate + $UCMP= "cmpld"; # unsigned compare + $CNTLZ= "cntlzd"; # count leading zeros + $SHL= "sld"; # shift left + $SHR= "srd"; # unsigned shift right + $SHRI= "srdi"; # unsigned shift right by immediate + $SHLI= "sldi"; # shift left by immediate + $CLRU= "clrldi"; # clear upper bits + $INSR= "insrdi"; # insert right + $ROTL= "rotldi"; # rotate left by immediate + $TR= "td"; # conditional trap +} else { die "nonsense $opf"; } + +( defined shift || open STDOUT,">$opf" ) || die "can't open $opf: $!"; + +# function entry points from the AIX code +# +# There are other, more elegant, ways to handle this. We (IBM) chose +# this approach as it plays well with scripts we run to 'namespace' +# OpenSSL .i.e. we add a prefix to all the public symbols so we can +# co-exist in the same process with other implementations of OpenSSL. +# 'cleverer' ways of doing these substitutions tend to hide data we +# need to be obvious. +# +my @items = ("bn_sqr_comba4", + "bn_sqr_comba8", + "bn_mul_comba4", + "bn_mul_comba8", + "bn_sub_words", + "bn_add_words", + "bn_div_words", + "bn_sqr_words", + "bn_mul_words", + "bn_mul_add_words"); + +if ($opf =~ /linux/) { do_linux(); } +elsif ($opf =~ /aix/) { do_aix(); } +elsif ($opf =~ /osx/) { do_osx(); } +else { do_bsd(); } + +sub do_linux { + $d=&data(); + + if ($BITS==64) { + foreach $t (@items) { + $d =~ s/\.$t:/\ +\t.section\t".opd","aw"\ +\t.align\t3\ +\t.globl\t$t\ +$t:\ +\t.quad\t.$t,.TOC.\@tocbase,0\ +\t.size\t$t,24\ +\t.previous\n\ +\t.type\t.$t,\@function\ +\t.globl\t.$t\ +.$t:/g; + } + } + else { + foreach $t (@items) { + $d=~s/\.$t/$t/g; + } + } + # hide internal labels to avoid pollution of name table... + $d=~s/Lppcasm_/.Lppcasm_/gm; + print $d; +} + +sub do_aix { + # AIX assembler is smart enough to please the linker without + # making us do something special... + print &data(); +} + +# MacOSX 32 bit +sub do_osx { + $d=&data(); + # Change the bn symbol prefix from '.' to '_' + foreach $t (@items) { + $d=~s/\.$t/_$t/g; + } + # Change .machine to something OS X asm will accept + $d=~s/\.machine.*/.text/g; + $d=~s/\#/;/g; # change comment from '#' to ';' + print $d; +} + +# BSD (Untested) +sub do_bsd { + $d=&data(); + foreach $t (@items) { + $d=~s/\.$t/_$t/g; + } + print $d; +} + +sub data { + local($data)=<<EOF; +#-------------------------------------------------------------------- +# +# +# +# +# File: ppc32.s +# +# Created by: Suresh Chari +# IBM Thomas J. Watson Research Library +# Hawthorne, NY +# +# +# Description: Optimized assembly routines for OpenSSL crypto +# on the 32 bitPowerPC platform. +# +# +# Version History +# +# 2. Fixed bn_add,bn_sub and bn_div_words, added comments, +# cleaned up code. Also made a single version which can +# be used for both the AIX and Linux compilers. See NOTE +# below. +# 12/05/03 Suresh Chari +# (with lots of help from) Andy Polyakov +## +# 1. Initial version 10/20/02 Suresh Chari +# +# +# The following file works for the xlc,cc +# and gcc compilers. +# +# NOTE: To get the file to link correctly with the gcc compiler +# you have to change the names of the routines and remove +# the first .(dot) character. This should automatically +# be done in the build process. +# +# Hand optimized assembly code for the following routines +# +# bn_sqr_comba4 +# bn_sqr_comba8 +# bn_mul_comba4 +# bn_mul_comba8 +# bn_sub_words +# bn_add_words +# bn_div_words +# bn_sqr_words +# bn_mul_words +# bn_mul_add_words +# +# NOTE: It is possible to optimize this code more for +# specific PowerPC or Power architectures. On the Northstar +# architecture the optimizations in this file do +# NOT provide much improvement. +# +# If you have comments or suggestions to improve code send +# me a note at schari\@us.ibm.com +# +#-------------------------------------------------------------------------- +# +# Defines to be used in the assembly code. +# +.set r0,0 # we use it as storage for value of 0 +.set SP,1 # preserved +.set RTOC,2 # preserved +.set r3,3 # 1st argument/return value +.set r4,4 # 2nd argument/volatile register +.set r5,5 # 3rd argument/volatile register +.set r6,6 # ... +.set r7,7 +.set r8,8 +.set r9,9 +.set r10,10 +.set r11,11 +.set r12,12 +.set r13,13 # not used, nor any other "below" it... + +.set BO_IF_NOT,4 +.set BO_IF,12 +.set BO_dCTR_NZERO,16 +.set BO_dCTR_ZERO,18 +.set BO_ALWAYS,20 +.set CR0_LT,0; +.set CR0_GT,1; +.set CR0_EQ,2 +.set CR1_FX,4; +.set CR1_FEX,5; +.set CR1_VX,6 +.set LR,8 + +# Declare function names to be global +# NOTE: For gcc these names MUST be changed to remove +# the first . i.e. for example change ".bn_sqr_comba4" +# to "bn_sqr_comba4". This should be automatically done +# in the build. + + .globl .bn_sqr_comba4 + .globl .bn_sqr_comba8 + .globl .bn_mul_comba4 + .globl .bn_mul_comba8 + .globl .bn_sub_words + .globl .bn_add_words + .globl .bn_div_words + .globl .bn_sqr_words + .globl .bn_mul_words + .globl .bn_mul_add_words + +# .text section + + .machine $ISA + +# +# NOTE: The following label name should be changed to +# "bn_sqr_comba4" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_sqr_comba4: +# +# Optimized version of bn_sqr_comba4. +# +# void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a) +# r3 contains r +# r4 contains a +# +# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows: +# +# r5,r6 are the two BN_ULONGs being multiplied. +# r7,r8 are the results of the 32x32 giving 64 bit multiply. +# r9,r10, r11 are the equivalents of c1,c2, c3. +# Here's the assembly +# +# + xor r0,r0,r0 # set r0 = 0. Used in the addze + # instructions below + + #sqr_add_c(a,0,c1,c2,c3) + $LD r5,`0*$BNSZ`(r4) + $UMULL r9,r5,r5 + $UMULH r10,r5,r5 #in first iteration. No need + #to add since c1=c2=c3=0. + # Note c3(r11) is NOT set to 0 + # but will be. + + $ST r9,`0*$BNSZ`(r3) # r[0]=c1; + # sqr_add_c2(a,1,0,c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8) + adde r8,r8,r8 + addze r9,r0 # catch carry if any. + # r9= r0(=0) and carry + + addc r10,r7,r10 # now add to temp result. + addze r11,r8 # r8 added to r11 which is 0 + addze r9,r9 + + $ST r10,`1*$BNSZ`(r3) #r[1]=c2; + #sqr_add_c(a,1,c3,c1,c2) + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + #sqr_add_c2(a,2,0,c3,c1,c2) + $LD r6,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r7,r7,r7 + adde r8,r8,r8 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`2*$BNSZ`(r3) #r[2]=c3 + #sqr_add_c2(a,3,0,c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r7,r7,r7 + adde r8,r8,r8 + addze r11,r0 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,2,1,c1,c2,c3); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r7,r7,r7 + adde r8,r8,r8 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + $ST r9,`3*$BNSZ`(r3) #r[3]=c1 + #sqr_add_c(a,2,c2,c3,c1); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + #sqr_add_c2(a,3,1,c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r7,r7,r7 + adde r8,r8,r8 + addze r9,r9 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`4*$BNSZ`(r3) #r[4]=c2 + #sqr_add_c2(a,3,2,c3,c1,c2); + $LD r5,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r7,r7,r7 + adde r8,r8,r8 + addze r10,r0 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`5*$BNSZ`(r3) #r[5] = c3 + #sqr_add_c(a,3,c1,c2,c3); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r9,r7,r9 + adde r10,r8,r10 + + $ST r9,`6*$BNSZ`(r3) #r[6]=c1 + $ST r10,`7*$BNSZ`(r3) #r[7]=c2 + bclr BO_ALWAYS,CR0_LT + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_sqr_comba8" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_sqr_comba8: +# +# This is an optimized version of the bn_sqr_comba8 routine. +# Tightly uses the adde instruction +# +# +# void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a) +# r3 contains r +# r4 contains a +# +# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows: +# +# r5,r6 are the two BN_ULONGs being multiplied. +# r7,r8 are the results of the 32x32 giving 64 bit multiply. +# r9,r10, r11 are the equivalents of c1,c2, c3. +# +# Possible optimization of loading all 8 longs of a into registers +# doesnt provide any speedup +# + + xor r0,r0,r0 #set r0 = 0.Used in addze + #instructions below. + + #sqr_add_c(a,0,c1,c2,c3); + $LD r5,`0*$BNSZ`(r4) + $UMULL r9,r5,r5 #1st iteration: no carries. + $UMULH r10,r5,r5 + $ST r9,`0*$BNSZ`(r3) # r[0]=c1; + #sqr_add_c2(a,1,0,c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 #add the two register number + adde r11,r8,r0 # (r8,r7) to the three register + addze r9,r0 # number (r9,r11,r10).NOTE:r0=0 + + addc r10,r7,r10 #add the two register number + adde r11,r8,r11 # (r8,r7) to the three register + addze r9,r9 # number (r9,r11,r10). + + $ST r10,`1*$BNSZ`(r3) # r[1]=c2 + + #sqr_add_c(a,1,c3,c1,c2); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + #sqr_add_c2(a,2,0,c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + $ST r11,`2*$BNSZ`(r3) #r[2]=c3 + #sqr_add_c2(a,3,0,c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0]. + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r0 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,2,1,c1,c2,c3); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + + $ST r9,`3*$BNSZ`(r3) #r[3]=c1; + #sqr_add_c(a,2,c2,c3,c1); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + #sqr_add_c2(a,3,1,c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,4,0,c2,c3,c1); + $LD r5,`0*$BNSZ`(r4) + $LD r6,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`4*$BNSZ`(r3) #r[4]=c2; + #sqr_add_c2(a,5,0,c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,4,1,c3,c1,c2); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,3,2,c3,c1,c2); + $LD r5,`2*$BNSZ`(r4) + $LD r6,`3*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`5*$BNSZ`(r3) #r[5]=c3; + #sqr_add_c(a,3,c1,c2,c3); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r0 + #sqr_add_c2(a,4,2,c1,c2,c3); + $LD r6,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,5,1,c1,c2,c3); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,6,0,c1,c2,c3); + $LD r5,`0*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + $ST r9,`6*$BNSZ`(r3) #r[6]=c1; + #sqr_add_c2(a,7,0,c2,c3,c1); + $LD r6,`7*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,6,1,c2,c3,c1); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,5,2,c2,c3,c1); + $LD r5,`2*$BNSZ`(r4) + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,4,3,c2,c3,c1); + $LD r5,`3*$BNSZ`(r4) + $LD r6,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`7*$BNSZ`(r3) #r[7]=c2; + #sqr_add_c(a,4,c3,c1,c2); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + #sqr_add_c2(a,5,3,c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,6,2,c3,c1,c2); + $LD r5,`2*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,7,1,c3,c1,c2); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`7*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`8*$BNSZ`(r3) #r[8]=c3; + #sqr_add_c2(a,7,2,c1,c2,c3); + $LD r5,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r0 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,6,3,c1,c2,c3); + $LD r5,`3*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,5,4,c1,c2,c3); + $LD r5,`4*$BNSZ`(r4) + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + $ST r9,`9*$BNSZ`(r3) #r[9]=c1; + #sqr_add_c(a,5,c2,c3,c1); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + #sqr_add_c2(a,6,4,c2,c3,c1); + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,7,3,c2,c3,c1); + $LD r5,`3*$BNSZ`(r4) + $LD r6,`7*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`10*$BNSZ`(r3) #r[10]=c2; + #sqr_add_c2(a,7,4,c3,c1,c2); + $LD r5,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,6,5,c3,c1,c2); + $LD r5,`5*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`11*$BNSZ`(r3) #r[11]=c3; + #sqr_add_c(a,6,c1,c2,c3); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r0 + #sqr_add_c2(a,7,5,c1,c2,c3) + $LD r6,`7*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + $ST r9,`12*$BNSZ`(r3) #r[12]=c1; + + #sqr_add_c2(a,7,6,c2,c3,c1) + $LD r5,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`13*$BNSZ`(r3) #r[13]=c2; + #sqr_add_c(a,7,c3,c1,c2); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r11,r7,r11 + adde r9,r8,r9 + $ST r11,`14*$BNSZ`(r3) #r[14]=c3; + $ST r9, `15*$BNSZ`(r3) #r[15]=c1; + + + bclr BO_ALWAYS,CR0_LT + + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_mul_comba4" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_mul_comba4: +# +# This is an optimized version of the bn_mul_comba4 routine. +# +# void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +# r3 contains r +# r4 contains a +# r5 contains b +# r6, r7 are the 2 BN_ULONGs being multiplied. +# r8, r9 are the results of the 32x32 giving 64 multiply. +# r10, r11, r12 are the equivalents of c1, c2, and c3. +# + xor r0,r0,r0 #r0=0. Used in addze below. + #mul_add_c(a[0],b[0],c1,c2,c3); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r10,r6,r7 + $UMULH r11,r6,r7 + $ST r10,`0*$BNSZ`(r3) #r[0]=c1 + #mul_add_c(a[0],b[1],c2,c3,c1); + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r0 + addze r10,r0 + #mul_add_c(a[1],b[0],c2,c3,c1); + $LD r6, `1*$BNSZ`(r4) + $LD r7, `0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r12 + addze r10,r10 + $ST r11,`1*$BNSZ`(r3) #r[1]=c2 + #mul_add_c(a[2],b[0],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r0 + #mul_add_c(a[1],b[1],c3,c1,c2); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r11 + #mul_add_c(a[0],b[2],c3,c1,c2); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r11 + $ST r12,`2*$BNSZ`(r3) #r[2]=c3 + #mul_add_c(a[0],b[3],c1,c2,c3); + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + addze r12,r0 + #mul_add_c(a[1],b[2],c1,c2,c3); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + addze r12,r12 + #mul_add_c(a[2],b[1],c1,c2,c3); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + addze r12,r12 + #mul_add_c(a[3],b[0],c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + addze r12,r12 + $ST r10,`3*$BNSZ`(r3) #r[3]=c1 + #mul_add_c(a[3],b[1],c2,c3,c1); + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r12 + addze r10,r0 + #mul_add_c(a[2],b[2],c2,c3,c1); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r12 + addze r10,r10 + #mul_add_c(a[1],b[3],c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r12 + addze r10,r10 + $ST r11,`4*$BNSZ`(r3) #r[4]=c2 + #mul_add_c(a[2],b[3],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r0 + #mul_add_c(a[3],b[2],c3,c1,c2); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r11 + $ST r12,`5*$BNSZ`(r3) #r[5]=c3 + #mul_add_c(a[3],b[3],c1,c2,c3); + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + + $ST r10,`6*$BNSZ`(r3) #r[6]=c1 + $ST r11,`7*$BNSZ`(r3) #r[7]=c2 + bclr BO_ALWAYS,CR0_LT + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_mul_comba8" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_mul_comba8: +# +# Optimized version of the bn_mul_comba8 routine. +# +# void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +# r3 contains r +# r4 contains a +# r5 contains b +# r6, r7 are the 2 BN_ULONGs being multiplied. +# r8, r9 are the results of the 32x32 giving 64 multiply. +# r10, r11, r12 are the equivalents of c1, c2, and c3. +# + xor r0,r0,r0 #r0=0. Used in addze below. + + #mul_add_c(a[0],b[0],c1,c2,c3); + $LD r6,`0*$BNSZ`(r4) #a[0] + $LD r7,`0*$BNSZ`(r5) #b[0] + $UMULL r10,r6,r7 + $UMULH r11,r6,r7 + $ST r10,`0*$BNSZ`(r3) #r[0]=c1; + #mul_add_c(a[0],b[1],c2,c3,c1); + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + addze r12,r9 # since we didnt set r12 to zero before. + addze r10,r0 + #mul_add_c(a[1],b[0],c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`1*$BNSZ`(r3) #r[1]=c2; + #mul_add_c(a[2],b[0],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r0 + #mul_add_c(a[1],b[1],c3,c1,c2); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[0],b[2],c3,c1,c2); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + $ST r12,`2*$BNSZ`(r3) #r[2]=c3; + #mul_add_c(a[0],b[3],c1,c2,c3); + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r0 + #mul_add_c(a[1],b[2],c1,c2,c3); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + + #mul_add_c(a[2],b[1],c1,c2,c3); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[3],b[0],c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + $ST r10,`3*$BNSZ`(r3) #r[3]=c1; + #mul_add_c(a[4],b[0],c2,c3,c1); + $LD r6,`4*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r0 + #mul_add_c(a[3],b[1],c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[2],b[2],c2,c3,c1); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[1],b[3],c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[0],b[4],c2,c3,c1); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`4*$BNSZ`(r3) #r[4]=c2; + #mul_add_c(a[0],b[5],c3,c1,c2); + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r0 + #mul_add_c(a[1],b[4],c3,c1,c2); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[2],b[3],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[3],b[2],c3,c1,c2); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[4],b[1],c3,c1,c2); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[5],b[0],c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + $ST r12,`5*$BNSZ`(r3) #r[5]=c3; + #mul_add_c(a[6],b[0],c1,c2,c3); + $LD r6,`6*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r0 + #mul_add_c(a[5],b[1],c1,c2,c3); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[4],b[2],c1,c2,c3); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[3],b[3],c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[2],b[4],c1,c2,c3); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[1],b[5],c1,c2,c3); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[0],b[6],c1,c2,c3); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + $ST r10,`6*$BNSZ`(r3) #r[6]=c1; + #mul_add_c(a[0],b[7],c2,c3,c1); + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r0 + #mul_add_c(a[1],b[6],c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[2],b[5],c2,c3,c1); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[3],b[4],c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[4],b[3],c2,c3,c1); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[5],b[2],c2,c3,c1); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[6],b[1],c2,c3,c1); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[7],b[0],c2,c3,c1); + $LD r6,`7*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`7*$BNSZ`(r3) #r[7]=c2; + #mul_add_c(a[7],b[1],c3,c1,c2); + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r0 + #mul_add_c(a[6],b[2],c3,c1,c2); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[5],b[3],c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[4],b[4],c3,c1,c2); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[3],b[5],c3,c1,c2); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[2],b[6],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[1],b[7],c3,c1,c2); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + $ST r12,`8*$BNSZ`(r3) #r[8]=c3; + #mul_add_c(a[2],b[7],c1,c2,c3); + $LD r6,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r0 + #mul_add_c(a[3],b[6],c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[4],b[5],c1,c2,c3); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[5],b[4],c1,c2,c3); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[6],b[3],c1,c2,c3); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[7],b[2],c1,c2,c3); + $LD r6,`7*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + $ST r10,`9*$BNSZ`(r3) #r[9]=c1; + #mul_add_c(a[7],b[3],c2,c3,c1); + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r0 + #mul_add_c(a[6],b[4],c2,c3,c1); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[5],b[5],c2,c3,c1); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[4],b[6],c2,c3,c1); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[3],b[7],c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`10*$BNSZ`(r3) #r[10]=c2; + #mul_add_c(a[4],b[7],c3,c1,c2); + $LD r6,`4*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r0 + #mul_add_c(a[5],b[6],c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[6],b[5],c3,c1,c2); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[7],b[4],c3,c1,c2); + $LD r6,`7*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + $ST r12,`11*$BNSZ`(r3) #r[11]=c3; + #mul_add_c(a[7],b[5],c1,c2,c3); + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r0 + #mul_add_c(a[6],b[6],c1,c2,c3); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[5],b[7],c1,c2,c3); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + $ST r10,`12*$BNSZ`(r3) #r[12]=c1; + #mul_add_c(a[6],b[7],c2,c3,c1); + $LD r6,`6*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r0 + #mul_add_c(a[7],b[6],c2,c3,c1); + $LD r6,`7*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`13*$BNSZ`(r3) #r[13]=c2; + #mul_add_c(a[7],b[7],c3,c1,c2); + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + $ST r12,`14*$BNSZ`(r3) #r[14]=c3; + $ST r10,`15*$BNSZ`(r3) #r[15]=c1; + bclr BO_ALWAYS,CR0_LT + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_sub_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# +# +.align 4 +.bn_sub_words: +# +# Handcoded version of bn_sub_words +# +#BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) +# +# r3 = r +# r4 = a +# r5 = b +# r6 = n +# +# Note: No loop unrolling done since this is not a performance +# critical loop. + + xor r0,r0,r0 #set r0 = 0 +# +# check for r6 = 0 AND set carry bit. +# + subfc. r7,r0,r6 # If r6 is 0 then result is 0. + # if r6 > 0 then result !=0 + # In either case carry bit is set. + bc BO_IF,CR0_EQ,Lppcasm_sub_adios + addi r4,r4,-$BNSZ + addi r3,r3,-$BNSZ + addi r5,r5,-$BNSZ + mtctr r6 +Lppcasm_sub_mainloop: + $LDU r7,$BNSZ(r4) + $LDU r8,$BNSZ(r5) + subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8) + # if carry = 1 this is r7-r8. Else it + # is r7-r8 -1 as we need. + $STU r6,$BNSZ(r3) + bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sub_mainloop +Lppcasm_sub_adios: + subfze r3,r0 # if carry bit is set then r3 = 0 else -1 + andi. r3,r3,1 # keep only last bit. + bclr BO_ALWAYS,CR0_LT + .long 0x00000000 + + +# +# NOTE: The following label name should be changed to +# "bn_add_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_add_words: +# +# Handcoded version of bn_add_words +# +#BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) +# +# r3 = r +# r4 = a +# r5 = b +# r6 = n +# +# Note: No loop unrolling done since this is not a performance +# critical loop. + + xor r0,r0,r0 +# +# check for r6 = 0. Is this needed? +# + addic. r6,r6,0 #test r6 and clear carry bit. + bc BO_IF,CR0_EQ,Lppcasm_add_adios + addi r4,r4,-$BNSZ + addi r3,r3,-$BNSZ + addi r5,r5,-$BNSZ + mtctr r6 +Lppcasm_add_mainloop: + $LDU r7,$BNSZ(r4) + $LDU r8,$BNSZ(r5) + adde r8,r7,r8 + $STU r8,$BNSZ(r3) + bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_add_mainloop +Lppcasm_add_adios: + addze r3,r0 #return carry bit. + bclr BO_ALWAYS,CR0_LT + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_div_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_div_words: +# +# This is a cleaned up version of code generated by +# the AIX compiler. The only optimization is to use +# the PPC instruction to count leading zeros instead +# of call to num_bits_word. Since this was compiled +# only at level -O2 we can possibly squeeze it more? +# +# r3 = h +# r4 = l +# r5 = d + + $UCMPI 0,r5,0 # compare r5 and 0 + bc BO_IF_NOT,CR0_EQ,Lppcasm_div1 # proceed if d!=0 + li r3,-1 # d=0 return -1 + bclr BO_ALWAYS,CR0_LT +Lppcasm_div1: + xor r0,r0,r0 #r0=0 + li r8,$BITS + $CNTLZ. r7,r5 #r7 = num leading 0s in d. + bc BO_IF,CR0_EQ,Lppcasm_div2 #proceed if no leading zeros + subf r8,r7,r8 #r8 = BN_num_bits_word(d) + $SHR. r9,r3,r8 #are there any bits above r8'th? + $TR 16,r9,r0 #if there're, signal to dump core... +Lppcasm_div2: + $UCMP 0,r3,r5 #h>=d? + bc BO_IF,CR0_LT,Lppcasm_div3 #goto Lppcasm_div3 if not + subf r3,r5,r3 #h-=d ; +Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i + cmpi 0,0,r7,0 # is (i == 0)? + bc BO_IF,CR0_EQ,Lppcasm_div4 + $SHL r3,r3,r7 # h = (h<< i) + $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i) + $SHL r5,r5,r7 # d<<=i + or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i)) + $SHL r4,r4,r7 # l <<=i +Lppcasm_div4: + $SHRI r9,r5,`$BITS/2` # r9 = dh + # dl will be computed when needed + # as it saves registers. + li r6,2 #r6=2 + mtctr r6 #counter will be in count. +Lppcasm_divouterloop: + $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4) + $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4 + # compute here for innerloop. + $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh + bc BO_IF_NOT,CR0_EQ,Lppcasm_div5 # goto Lppcasm_div5 if not + + li r8,-1 + $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l + b Lppcasm_div6 +Lppcasm_div5: + $UDIV r8,r3,r9 #q = h/dh +Lppcasm_div6: + $UMULL r12,r9,r8 #th = q*dh + $CLRU r10,r5,`$BITS/2` #r10=dl + $UMULL r6,r8,r10 #tl = q*dl + +Lppcasm_divinnerloop: + subf r10,r12,r3 #t = h -th + $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of... + addic. r7,r7,0 #test if r7 == 0. used below. + # now want to compute + # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4) + # the following 2 instructions do that + $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4) + or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4) + $UCMP 1,r6,r7 # compare (tl <= r7) + bc BO_IF_NOT,CR0_EQ,Lppcasm_divinnerexit + bc BO_IF_NOT,CR1_FEX,Lppcasm_divinnerexit + addi r8,r8,-1 #q-- + subf r12,r9,r12 #th -=dh + $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop. + subf r6,r10,r6 #tl -=dl + b Lppcasm_divinnerloop +Lppcasm_divinnerexit: + $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4) + $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h; + $UCMP 1,r4,r11 # compare l and tl + add r12,r12,r10 # th+=t + bc BO_IF_NOT,CR1_FX,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7 + addi r12,r12,1 # th++ +Lppcasm_div7: + subf r11,r11,r4 #r11=l-tl + $UCMP 1,r3,r12 #compare h and th + bc BO_IF_NOT,CR1_FX,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8 + addi r8,r8,-1 # q-- + add r3,r5,r3 # h+=d +Lppcasm_div8: + subf r12,r12,r3 #r12 = h-th + $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4 + # want to compute + # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2 + # the following 2 instructions will do this. + $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2. + $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3 + bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_div9#if (count==0) break ; + $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4 + b Lppcasm_divouterloop +Lppcasm_div9: + or r3,r8,r0 + bclr BO_ALWAYS,CR0_LT + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_sqr_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# +.align 4 +.bn_sqr_words: +# +# Optimized version of bn_sqr_words +# +# void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n) +# +# r3 = r +# r4 = a +# r5 = n +# +# r6 = a[i]. +# r7,r8 = product. +# +# No unrolling done here. Not performance critical. + + addic. r5,r5,0 #test r5. + bc BO_IF,CR0_EQ,Lppcasm_sqr_adios + addi r4,r4,-$BNSZ + addi r3,r3,-$BNSZ + mtctr r5 +Lppcasm_sqr_mainloop: + #sqr(r[0],r[1],a[0]); + $LDU r6,$BNSZ(r4) + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + $STU r7,$BNSZ(r3) + $STU r8,$BNSZ(r3) + bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sqr_mainloop +Lppcasm_sqr_adios: + bclr BO_ALWAYS,CR0_LT + .long 0x00000000 + + +# +# NOTE: The following label name should be changed to +# "bn_mul_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_mul_words: +# +# BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +# +# r3 = rp +# r4 = ap +# r5 = num +# r6 = w + xor r0,r0,r0 + xor r12,r12,r12 # used for carry + rlwinm. r7,r5,30,2,31 # num >> 2 + bc BO_IF,CR0_EQ,Lppcasm_mw_REM + mtctr r7 +Lppcasm_mw_LOOP: + #mul(rp[0],ap[0],w,c1); + $LD r8,`0*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 + #addze r10,r10 #carry is NOT ignored. + #will be taken care of + #in second spin below + #using adde. + $ST r9,`0*$BNSZ`(r3) + #mul(rp[1],ap[1],w,c1); + $LD r8,`1*$BNSZ`(r4) + $UMULL r11,r6,r8 + $UMULH r12,r6,r8 + adde r11,r11,r10 + #addze r12,r12 + $ST r11,`1*$BNSZ`(r3) + #mul(rp[2],ap[2],w,c1); + $LD r8,`2*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + adde r9,r9,r12 + #addze r10,r10 + $ST r9,`2*$BNSZ`(r3) + #mul_add(rp[3],ap[3],w,c1); + $LD r8,`3*$BNSZ`(r4) + $UMULL r11,r6,r8 + $UMULH r12,r6,r8 + adde r11,r11,r10 + addze r12,r12 #this spin we collect carry into + #r12 + $ST r11,`3*$BNSZ`(r3) + + addi r3,r3,`4*$BNSZ` + addi r4,r4,`4*$BNSZ` + bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_mw_LOOP + +Lppcasm_mw_REM: + andi. r5,r5,0x3 + bc BO_IF,CR0_EQ,Lppcasm_mw_OVER + #mul(rp[0],ap[0],w,c1); + $LD r8,`0*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 + addze r10,r10 + $ST r9,`0*$BNSZ`(r3) + addi r12,r10,0 + + addi r5,r5,-1 + cmpli 0,0,r5,0 + bc BO_IF,CR0_EQ,Lppcasm_mw_OVER + + + #mul(rp[1],ap[1],w,c1); + $LD r8,`1*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 + addze r10,r10 + $ST r9,`1*$BNSZ`(r3) + addi r12,r10,0 + + addi r5,r5,-1 + cmpli 0,0,r5,0 + bc BO_IF,CR0_EQ,Lppcasm_mw_OVER + + #mul_add(rp[2],ap[2],w,c1); + $LD r8,`2*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 + addze r10,r10 + $ST r9,`2*$BNSZ`(r3) + addi r12,r10,0 + +Lppcasm_mw_OVER: + addi r3,r12,0 + bclr BO_ALWAYS,CR0_LT + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_mul_add_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_mul_add_words: +# +# BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +# +# r3 = rp +# r4 = ap +# r5 = num +# r6 = w +# +# empirical evidence suggests that unrolled version performs best!! +# + xor r0,r0,r0 #r0 = 0 + xor r12,r12,r12 #r12 = 0 . used for carry + rlwinm. r7,r5,30,2,31 # num >> 2 + bc BO_IF,CR0_EQ,Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover + mtctr r7 +Lppcasm_maw_mainloop: + #mul_add(rp[0],ap[0],w,c1); + $LD r8,`0*$BNSZ`(r4) + $LD r11,`0*$BNSZ`(r3) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 #r12 is carry. + addze r10,r10 + addc r9,r9,r11 + #addze r10,r10 + #the above instruction addze + #is NOT needed. Carry will NOT + #be ignored. It's not affected + #by multiply and will be collected + #in the next spin + $ST r9,`0*$BNSZ`(r3) + + #mul_add(rp[1],ap[1],w,c1); + $LD r8,`1*$BNSZ`(r4) + $LD r9,`1*$BNSZ`(r3) + $UMULL r11,r6,r8 + $UMULH r12,r6,r8 + adde r11,r11,r10 #r10 is carry. + addze r12,r12 + addc r11,r11,r9 + #addze r12,r12 + $ST r11,`1*$BNSZ`(r3) + + #mul_add(rp[2],ap[2],w,c1); + $LD r8,`2*$BNSZ`(r4) + $UMULL r9,r6,r8 + $LD r11,`2*$BNSZ`(r3) + $UMULH r10,r6,r8 + adde r9,r9,r12 + addze r10,r10 + addc r9,r9,r11 + #addze r10,r10 + $ST r9,`2*$BNSZ`(r3) + + #mul_add(rp[3],ap[3],w,c1); + $LD r8,`3*$BNSZ`(r4) + $UMULL r11,r6,r8 + $LD r9,`3*$BNSZ`(r3) + $UMULH r12,r6,r8 + adde r11,r11,r10 + addze r12,r12 + addc r11,r11,r9 + addze r12,r12 + $ST r11,`3*$BNSZ`(r3) + addi r3,r3,`4*$BNSZ` + addi r4,r4,`4*$BNSZ` + bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_maw_mainloop + +Lppcasm_maw_leftover: + andi. r5,r5,0x3 + bc BO_IF,CR0_EQ,Lppcasm_maw_adios + addi r3,r3,-$BNSZ + addi r4,r4,-$BNSZ + #mul_add(rp[0],ap[0],w,c1); + mtctr r5 + $LDU r8,$BNSZ(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + $LDU r11,$BNSZ(r3) + addc r9,r9,r11 + addze r10,r10 + addc r9,r9,r12 + addze r12,r10 + $ST r9,0(r3) + + bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios + #mul_add(rp[1],ap[1],w,c1); + $LDU r8,$BNSZ(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + $LDU r11,$BNSZ(r3) + addc r9,r9,r11 + addze r10,r10 + addc r9,r9,r12 + addze r12,r10 + $ST r9,0(r3) + + bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios + #mul_add(rp[2],ap[2],w,c1); + $LDU r8,$BNSZ(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + $LDU r11,$BNSZ(r3) + addc r9,r9,r11 + addze r10,r10 + addc r9,r9,r12 + addze r12,r10 + $ST r9,0(r3) + +Lppcasm_maw_adios: + addi r3,r12,0 + bclr BO_ALWAYS,CR0_LT + .long 0x00000000 + .align 4 +EOF + $data =~ s/\`([^\`]*)\`/eval $1/gem; + + # if some assembler chokes on some simplified mnemonic, + # this is the spot to fix it up, e.g.: + # GNU as doesn't seem to accept cmplw, 32-bit unsigned compare + $data =~ s/^(\s*)cmplw(\s+)([^,]+),(.*)/$1cmpl$2$3,0,$4/gm; + # assembler X doesn't accept li, load immediate value + #$data =~ s/^(\s*)li(\s+)([^,]+),(.*)/$1addi$2$3,0,$4/gm; + return($data); +} diff --git a/openssl/crypto/bn/asm/ppc64-mont.pl b/openssl/crypto/bn/asm/ppc64-mont.pl new file mode 100644 index 000000000..3449b3585 --- /dev/null +++ b/openssl/crypto/bn/asm/ppc64-mont.pl @@ -0,0 +1,918 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# December 2007 + +# The reason for undertaken effort is basically following. Even though +# Power 6 CPU operates at incredible 4.7GHz clock frequency, its PKI +# performance was observed to be less than impressive, essentially as +# fast as 1.8GHz PPC970, or 2.6 times(!) slower than one would hope. +# Well, it's not surprising that IBM had to make some sacrifices to +# boost the clock frequency that much, but no overall improvement? +# Having observed how much difference did switching to FPU make on +# UltraSPARC, playing same stunt on Power 6 appeared appropriate... +# Unfortunately the resulting performance improvement is not as +# impressive, ~30%, and in absolute terms is still very far from what +# one would expect from 4.7GHz CPU. There is a chance that I'm doing +# something wrong, but in the lack of assembler level micro-profiling +# data or at least decent platform guide I can't tell... Or better +# results might be achieved with VMX... Anyway, this module provides +# *worse* performance on other PowerPC implementations, ~40-15% slower +# on PPC970 depending on key length and ~40% slower on Power 5 for all +# key lengths. As it's obviously inappropriate as "best all-round" +# alternative, it has to be complemented with run-time CPU family +# detection. Oh! It should also be noted that unlike other PowerPC +# implementation IALU ppc-mont.pl module performs *suboptimaly* on +# >=1024-bit key lengths on Power 6. It should also be noted that +# *everything* said so far applies to 64-bit builds! As far as 32-bit +# application executed on 64-bit CPU goes, this module is likely to +# become preferred choice, because it's easy to adapt it for such +# case and *is* faster than 32-bit ppc-mont.pl on *all* processors. + +# February 2008 + +# Micro-profiling assisted optimization results in ~15% improvement +# over original ppc64-mont.pl version, or overall ~50% improvement +# over ppc.pl module on Power 6. If compared to ppc-mont.pl on same +# Power 6 CPU, this module is 5-150% faster depending on key length, +# [hereafter] more for longer keys. But if compared to ppc-mont.pl +# on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive +# in absolute terms, but it's apparently the way Power 6 is... + +$flavour = shift; + +if ($flavour =~ /32/) { + $SIZE_T=4; + $RZONE= 224; + $FRAME= $SIZE_T*12+8*12; + $fname= "bn_mul_mont_ppc64"; + + $STUX= "stwux"; # store indexed and update + $PUSH= "stw"; + $POP= "lwz"; + die "not implemented yet"; +} elsif ($flavour =~ /64/) { + $SIZE_T=8; + $RZONE= 288; + $FRAME= $SIZE_T*12+8*12; + $fname= "bn_mul_mont"; + + # same as above, but 64-bit mnemonics... + $STUX= "stdux"; # store indexed and update + $PUSH= "std"; + $POP= "ld"; +} else { die "nonsense $flavour"; } + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or +die "can't locate ppc-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; + +$FRAME=($FRAME+63)&~63; +$TRANSFER=16*8; + +$carry="r0"; +$sp="r1"; +$toc="r2"; +$rp="r3"; $ovf="r3"; +$ap="r4"; +$bp="r5"; +$np="r6"; +$n0="r7"; +$num="r8"; +$rp="r9"; # $rp is reassigned +$tp="r10"; +$j="r11"; +$i="r12"; +# non-volatile registers +$nap_d="r14"; # interleaved ap and np in double format +$a0="r15"; # ap[0] +$t0="r16"; # temporary registers +$t1="r17"; +$t2="r18"; +$t3="r19"; +$t4="r20"; +$t5="r21"; +$t6="r22"; +$t7="r23"; + +# PPC offers enough register bank capacity to unroll inner loops twice +# +# ..A3A2A1A0 +# dcba +# ----------- +# A0a +# A0b +# A0c +# A0d +# A1a +# A1b +# A1c +# A1d +# A2a +# A2b +# A2c +# A2d +# A3a +# A3b +# A3c +# A3d +# ..a +# ..b +# +$ba="f0"; $bb="f1"; $bc="f2"; $bd="f3"; +$na="f4"; $nb="f5"; $nc="f6"; $nd="f7"; +$dota="f8"; $dotb="f9"; +$A0="f10"; $A1="f11"; $A2="f12"; $A3="f13"; +$N0="f14"; $N1="f15"; $N2="f16"; $N3="f17"; +$T0a="f18"; $T0b="f19"; +$T1a="f20"; $T1b="f21"; +$T2a="f22"; $T2b="f23"; +$T3a="f24"; $T3b="f25"; + +# sp----------->+-------------------------------+ +# | saved sp | +# +-------------------------------+ +# | | +# +-------------------------------+ +# | 10 saved gpr, r14-r23 | +# . . +# . . +# +12*size_t +-------------------------------+ +# | 12 saved fpr, f14-f25 | +# . . +# . . +# +12*8 +-------------------------------+ +# | padding to 64 byte boundary | +# . . +# +X +-------------------------------+ +# | 16 gpr<->fpr transfer zone | +# . . +# . . +# +16*8 +-------------------------------+ +# | __int64 tmp[-1] | +# +-------------------------------+ +# | __int64 tmp[num] | +# . . +# . . +# . . +# +(num+1)*8 +-------------------------------+ +# | padding to 64 byte boundary | +# . . +# +X +-------------------------------+ +# | double nap_d[4*num] | +# . . +# . . +# . . +# +-------------------------------+ + +$code=<<___; +.machine "any" +.text + +.globl .$fname +.align 5 +.$fname: + cmpwi $num,4 + mr $rp,r3 ; $rp is reassigned + li r3,0 ; possible "not handled" return code + bltlr- + andi. r0,$num,1 ; $num has to be even + bnelr- + + slwi $num,$num,3 ; num*=8 + li $i,-4096 + slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num + add $tp,$tp,$num ; place for tp[num+1] + addi $tp,$tp,`$FRAME+$TRANSFER+8+64+$RZONE` + subf $tp,$tp,$sp ; $sp-$tp + and $tp,$tp,$i ; minimize TLB usage + subf $tp,$sp,$tp ; $tp-$sp + $STUX $sp,$sp,$tp ; alloca + + $PUSH r14,`2*$SIZE_T`($sp) + $PUSH r15,`3*$SIZE_T`($sp) + $PUSH r16,`4*$SIZE_T`($sp) + $PUSH r17,`5*$SIZE_T`($sp) + $PUSH r18,`6*$SIZE_T`($sp) + $PUSH r19,`7*$SIZE_T`($sp) + $PUSH r20,`8*$SIZE_T`($sp) + $PUSH r21,`9*$SIZE_T`($sp) + $PUSH r22,`10*$SIZE_T`($sp) + $PUSH r23,`11*$SIZE_T`($sp) + stfd f14,`12*$SIZE_T+0`($sp) + stfd f15,`12*$SIZE_T+8`($sp) + stfd f16,`12*$SIZE_T+16`($sp) + stfd f17,`12*$SIZE_T+24`($sp) + stfd f18,`12*$SIZE_T+32`($sp) + stfd f19,`12*$SIZE_T+40`($sp) + stfd f20,`12*$SIZE_T+48`($sp) + stfd f21,`12*$SIZE_T+56`($sp) + stfd f22,`12*$SIZE_T+64`($sp) + stfd f23,`12*$SIZE_T+72`($sp) + stfd f24,`12*$SIZE_T+80`($sp) + stfd f25,`12*$SIZE_T+88`($sp) + + ld $a0,0($ap) ; pull ap[0] value + ld $n0,0($n0) ; pull n0[0] value + ld $t3,0($bp) ; bp[0] + + addi $tp,$sp,`$FRAME+$TRANSFER+8+64` + li $i,-64 + add $nap_d,$tp,$num + and $nap_d,$nap_d,$i ; align to 64 bytes + + mulld $t7,$a0,$t3 ; ap[0]*bp[0] + ; nap_d is off by 1, because it's used with stfdu/lfdu + addi $nap_d,$nap_d,-8 + srwi $j,$num,`3+1` ; counter register, num/2 + mulld $t7,$t7,$n0 ; tp[0]*n0 + addi $j,$j,-1 + addi $tp,$sp,`$FRAME+$TRANSFER-8` + li $carry,0 + mtctr $j + + ; transfer bp[0] to FPU as 4x16-bit values + extrdi $t0,$t3,16,48 + extrdi $t1,$t3,16,32 + extrdi $t2,$t3,16,16 + extrdi $t3,$t3,16,0 + std $t0,`$FRAME+0`($sp) + std $t1,`$FRAME+8`($sp) + std $t2,`$FRAME+16`($sp) + std $t3,`$FRAME+24`($sp) + ; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values + extrdi $t4,$t7,16,48 + extrdi $t5,$t7,16,32 + extrdi $t6,$t7,16,16 + extrdi $t7,$t7,16,0 + std $t4,`$FRAME+32`($sp) + std $t5,`$FRAME+40`($sp) + std $t6,`$FRAME+48`($sp) + std $t7,`$FRAME+56`($sp) + lwz $t0,4($ap) ; load a[j] as 32-bit word pair + lwz $t1,0($ap) + lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair + lwz $t3,8($ap) + lwz $t4,4($np) ; load n[j] as 32-bit word pair + lwz $t5,0($np) + lwz $t6,12($np) ; load n[j+1] as 32-bit word pair + lwz $t7,8($np) + lfd $ba,`$FRAME+0`($sp) + lfd $bb,`$FRAME+8`($sp) + lfd $bc,`$FRAME+16`($sp) + lfd $bd,`$FRAME+24`($sp) + lfd $na,`$FRAME+32`($sp) + lfd $nb,`$FRAME+40`($sp) + lfd $nc,`$FRAME+48`($sp) + lfd $nd,`$FRAME+56`($sp) + std $t0,`$FRAME+64`($sp) + std $t1,`$FRAME+72`($sp) + std $t2,`$FRAME+80`($sp) + std $t3,`$FRAME+88`($sp) + std $t4,`$FRAME+96`($sp) + std $t5,`$FRAME+104`($sp) + std $t6,`$FRAME+112`($sp) + std $t7,`$FRAME+120`($sp) + fcfid $ba,$ba + fcfid $bb,$bb + fcfid $bc,$bc + fcfid $bd,$bd + fcfid $na,$na + fcfid $nb,$nb + fcfid $nc,$nc + fcfid $nd,$nd + + lfd $A0,`$FRAME+64`($sp) + lfd $A1,`$FRAME+72`($sp) + lfd $A2,`$FRAME+80`($sp) + lfd $A3,`$FRAME+88`($sp) + lfd $N0,`$FRAME+96`($sp) + lfd $N1,`$FRAME+104`($sp) + lfd $N2,`$FRAME+112`($sp) + lfd $N3,`$FRAME+120`($sp) + fcfid $A0,$A0 + fcfid $A1,$A1 + fcfid $A2,$A2 + fcfid $A3,$A3 + fcfid $N0,$N0 + fcfid $N1,$N1 + fcfid $N2,$N2 + fcfid $N3,$N3 + addi $ap,$ap,16 + addi $np,$np,16 + + fmul $T1a,$A1,$ba + fmul $T1b,$A1,$bb + stfd $A0,8($nap_d) ; save a[j] in double format + stfd $A1,16($nap_d) + fmul $T2a,$A2,$ba + fmul $T2b,$A2,$bb + stfd $A2,24($nap_d) ; save a[j+1] in double format + stfd $A3,32($nap_d) + fmul $T3a,$A3,$ba + fmul $T3b,$A3,$bb + stfd $N0,40($nap_d) ; save n[j] in double format + stfd $N1,48($nap_d) + fmul $T0a,$A0,$ba + fmul $T0b,$A0,$bb + stfd $N2,56($nap_d) ; save n[j+1] in double format + stfdu $N3,64($nap_d) + + fmadd $T1a,$A0,$bc,$T1a + fmadd $T1b,$A0,$bd,$T1b + fmadd $T2a,$A1,$bc,$T2a + fmadd $T2b,$A1,$bd,$T2b + fmadd $T3a,$A2,$bc,$T3a + fmadd $T3b,$A2,$bd,$T3b + fmul $dota,$A3,$bc + fmul $dotb,$A3,$bd + + fmadd $T1a,$N1,$na,$T1a + fmadd $T1b,$N1,$nb,$T1b + fmadd $T2a,$N2,$na,$T2a + fmadd $T2b,$N2,$nb,$T2b + fmadd $T3a,$N3,$na,$T3a + fmadd $T3b,$N3,$nb,$T3b + fmadd $T0a,$N0,$na,$T0a + fmadd $T0b,$N0,$nb,$T0b + + fmadd $T1a,$N0,$nc,$T1a + fmadd $T1b,$N0,$nd,$T1b + fmadd $T2a,$N1,$nc,$T2a + fmadd $T2b,$N1,$nd,$T2b + fmadd $T3a,$N2,$nc,$T3a + fmadd $T3b,$N2,$nd,$T3b + fmadd $dota,$N3,$nc,$dota + fmadd $dotb,$N3,$nd,$dotb + + fctid $T0a,$T0a + fctid $T0b,$T0b + fctid $T1a,$T1a + fctid $T1b,$T1b + fctid $T2a,$T2a + fctid $T2b,$T2b + fctid $T3a,$T3a + fctid $T3b,$T3b + + stfd $T0a,`$FRAME+0`($sp) + stfd $T0b,`$FRAME+8`($sp) + stfd $T1a,`$FRAME+16`($sp) + stfd $T1b,`$FRAME+24`($sp) + stfd $T2a,`$FRAME+32`($sp) + stfd $T2b,`$FRAME+40`($sp) + stfd $T3a,`$FRAME+48`($sp) + stfd $T3b,`$FRAME+56`($sp) + +.align 5 +L1st: + lwz $t0,4($ap) ; load a[j] as 32-bit word pair + lwz $t1,0($ap) + lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair + lwz $t3,8($ap) + lwz $t4,4($np) ; load n[j] as 32-bit word pair + lwz $t5,0($np) + lwz $t6,12($np) ; load n[j+1] as 32-bit word pair + lwz $t7,8($np) + std $t0,`$FRAME+64`($sp) + std $t1,`$FRAME+72`($sp) + std $t2,`$FRAME+80`($sp) + std $t3,`$FRAME+88`($sp) + std $t4,`$FRAME+96`($sp) + std $t5,`$FRAME+104`($sp) + std $t6,`$FRAME+112`($sp) + std $t7,`$FRAME+120`($sp) + ld $t0,`$FRAME+0`($sp) + ld $t1,`$FRAME+8`($sp) + ld $t2,`$FRAME+16`($sp) + ld $t3,`$FRAME+24`($sp) + ld $t4,`$FRAME+32`($sp) + ld $t5,`$FRAME+40`($sp) + ld $t6,`$FRAME+48`($sp) + ld $t7,`$FRAME+56`($sp) + lfd $A0,`$FRAME+64`($sp) + lfd $A1,`$FRAME+72`($sp) + lfd $A2,`$FRAME+80`($sp) + lfd $A3,`$FRAME+88`($sp) + lfd $N0,`$FRAME+96`($sp) + lfd $N1,`$FRAME+104`($sp) + lfd $N2,`$FRAME+112`($sp) + lfd $N3,`$FRAME+120`($sp) + fcfid $A0,$A0 + fcfid $A1,$A1 + fcfid $A2,$A2 + fcfid $A3,$A3 + fcfid $N0,$N0 + fcfid $N1,$N1 + fcfid $N2,$N2 + fcfid $N3,$N3 + addi $ap,$ap,16 + addi $np,$np,16 + + fmul $T1a,$A1,$ba + fmul $T1b,$A1,$bb + fmul $T2a,$A2,$ba + fmul $T2b,$A2,$bb + stfd $A0,8($nap_d) ; save a[j] in double format + stfd $A1,16($nap_d) + fmul $T3a,$A3,$ba + fmul $T3b,$A3,$bb + fmadd $T0a,$A0,$ba,$dota + fmadd $T0b,$A0,$bb,$dotb + stfd $A2,24($nap_d) ; save a[j+1] in double format + stfd $A3,32($nap_d) + + fmadd $T1a,$A0,$bc,$T1a + fmadd $T1b,$A0,$bd,$T1b + fmadd $T2a,$A1,$bc,$T2a + fmadd $T2b,$A1,$bd,$T2b + stfd $N0,40($nap_d) ; save n[j] in double format + stfd $N1,48($nap_d) + fmadd $T3a,$A2,$bc,$T3a + fmadd $T3b,$A2,$bd,$T3b + add $t0,$t0,$carry ; can not overflow + fmul $dota,$A3,$bc + fmul $dotb,$A3,$bd + stfd $N2,56($nap_d) ; save n[j+1] in double format + stfdu $N3,64($nap_d) + srdi $carry,$t0,16 + add $t1,$t1,$carry + srdi $carry,$t1,16 + + fmadd $T1a,$N1,$na,$T1a + fmadd $T1b,$N1,$nb,$T1b + insrdi $t0,$t1,16,32 + fmadd $T2a,$N2,$na,$T2a + fmadd $T2b,$N2,$nb,$T2b + add $t2,$t2,$carry + fmadd $T3a,$N3,$na,$T3a + fmadd $T3b,$N3,$nb,$T3b + srdi $carry,$t2,16 + fmadd $T0a,$N0,$na,$T0a + fmadd $T0b,$N0,$nb,$T0b + insrdi $t0,$t2,16,16 + add $t3,$t3,$carry + srdi $carry,$t3,16 + + fmadd $T1a,$N0,$nc,$T1a + fmadd $T1b,$N0,$nd,$T1b + insrdi $t0,$t3,16,0 ; 0..63 bits + fmadd $T2a,$N1,$nc,$T2a + fmadd $T2b,$N1,$nd,$T2b + add $t4,$t4,$carry + fmadd $T3a,$N2,$nc,$T3a + fmadd $T3b,$N2,$nd,$T3b + srdi $carry,$t4,16 + fmadd $dota,$N3,$nc,$dota + fmadd $dotb,$N3,$nd,$dotb + add $t5,$t5,$carry + srdi $carry,$t5,16 + insrdi $t4,$t5,16,32 + + fctid $T0a,$T0a + fctid $T0b,$T0b + add $t6,$t6,$carry + fctid $T1a,$T1a + fctid $T1b,$T1b + srdi $carry,$t6,16 + fctid $T2a,$T2a + fctid $T2b,$T2b + insrdi $t4,$t6,16,16 + fctid $T3a,$T3a + fctid $T3b,$T3b + add $t7,$t7,$carry + insrdi $t4,$t7,16,0 ; 64..127 bits + srdi $carry,$t7,16 ; upper 33 bits + + stfd $T0a,`$FRAME+0`($sp) + stfd $T0b,`$FRAME+8`($sp) + stfd $T1a,`$FRAME+16`($sp) + stfd $T1b,`$FRAME+24`($sp) + stfd $T2a,`$FRAME+32`($sp) + stfd $T2b,`$FRAME+40`($sp) + stfd $T3a,`$FRAME+48`($sp) + stfd $T3b,`$FRAME+56`($sp) + std $t0,8($tp) ; tp[j-1] + stdu $t4,16($tp) ; tp[j] + bdnz- L1st + + fctid $dota,$dota + fctid $dotb,$dotb + + ld $t0,`$FRAME+0`($sp) + ld $t1,`$FRAME+8`($sp) + ld $t2,`$FRAME+16`($sp) + ld $t3,`$FRAME+24`($sp) + ld $t4,`$FRAME+32`($sp) + ld $t5,`$FRAME+40`($sp) + ld $t6,`$FRAME+48`($sp) + ld $t7,`$FRAME+56`($sp) + stfd $dota,`$FRAME+64`($sp) + stfd $dotb,`$FRAME+72`($sp) + + add $t0,$t0,$carry ; can not overflow + srdi $carry,$t0,16 + add $t1,$t1,$carry + srdi $carry,$t1,16 + insrdi $t0,$t1,16,32 + add $t2,$t2,$carry + srdi $carry,$t2,16 + insrdi $t0,$t2,16,16 + add $t3,$t3,$carry + srdi $carry,$t3,16 + insrdi $t0,$t3,16,0 ; 0..63 bits + add $t4,$t4,$carry + srdi $carry,$t4,16 + add $t5,$t5,$carry + srdi $carry,$t5,16 + insrdi $t4,$t5,16,32 + add $t6,$t6,$carry + srdi $carry,$t6,16 + insrdi $t4,$t6,16,16 + add $t7,$t7,$carry + insrdi $t4,$t7,16,0 ; 64..127 bits + srdi $carry,$t7,16 ; upper 33 bits + ld $t6,`$FRAME+64`($sp) + ld $t7,`$FRAME+72`($sp) + + std $t0,8($tp) ; tp[j-1] + stdu $t4,16($tp) ; tp[j] + + add $t6,$t6,$carry ; can not overflow + srdi $carry,$t6,16 + add $t7,$t7,$carry + insrdi $t6,$t7,48,0 + srdi $ovf,$t7,48 + std $t6,8($tp) ; tp[num-1] + + slwi $t7,$num,2 + subf $nap_d,$t7,$nap_d ; rewind pointer + + li $i,8 ; i=1 +.align 5 +Louter: + ldx $t3,$bp,$i ; bp[i] + ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0] + mulld $t7,$a0,$t3 ; ap[0]*bp[i] + + addi $tp,$sp,`$FRAME+$TRANSFER` + add $t7,$t7,$t6 ; ap[0]*bp[i]+tp[0] + li $carry,0 + mulld $t7,$t7,$n0 ; tp[0]*n0 + mtctr $j + + ; transfer bp[i] to FPU as 4x16-bit values + extrdi $t0,$t3,16,48 + extrdi $t1,$t3,16,32 + extrdi $t2,$t3,16,16 + extrdi $t3,$t3,16,0 + std $t0,`$FRAME+0`($sp) + std $t1,`$FRAME+8`($sp) + std $t2,`$FRAME+16`($sp) + std $t3,`$FRAME+24`($sp) + ; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values + extrdi $t4,$t7,16,48 + extrdi $t5,$t7,16,32 + extrdi $t6,$t7,16,16 + extrdi $t7,$t7,16,0 + std $t4,`$FRAME+32`($sp) + std $t5,`$FRAME+40`($sp) + std $t6,`$FRAME+48`($sp) + std $t7,`$FRAME+56`($sp) + + lfd $A0,8($nap_d) ; load a[j] in double format + lfd $A1,16($nap_d) + lfd $A2,24($nap_d) ; load a[j+1] in double format + lfd $A3,32($nap_d) + lfd $N0,40($nap_d) ; load n[j] in double format + lfd $N1,48($nap_d) + lfd $N2,56($nap_d) ; load n[j+1] in double format + lfdu $N3,64($nap_d) + + lfd $ba,`$FRAME+0`($sp) + lfd $bb,`$FRAME+8`($sp) + lfd $bc,`$FRAME+16`($sp) + lfd $bd,`$FRAME+24`($sp) + lfd $na,`$FRAME+32`($sp) + lfd $nb,`$FRAME+40`($sp) + lfd $nc,`$FRAME+48`($sp) + lfd $nd,`$FRAME+56`($sp) + + fcfid $ba,$ba + fcfid $bb,$bb + fcfid $bc,$bc + fcfid $bd,$bd + fcfid $na,$na + fcfid $nb,$nb + fcfid $nc,$nc + fcfid $nd,$nd + + fmul $T1a,$A1,$ba + fmul $T1b,$A1,$bb + fmul $T2a,$A2,$ba + fmul $T2b,$A2,$bb + fmul $T3a,$A3,$ba + fmul $T3b,$A3,$bb + fmul $T0a,$A0,$ba + fmul $T0b,$A0,$bb + + fmadd $T1a,$A0,$bc,$T1a + fmadd $T1b,$A0,$bd,$T1b + fmadd $T2a,$A1,$bc,$T2a + fmadd $T2b,$A1,$bd,$T2b + fmadd $T3a,$A2,$bc,$T3a + fmadd $T3b,$A2,$bd,$T3b + fmul $dota,$A3,$bc + fmul $dotb,$A3,$bd + + fmadd $T1a,$N1,$na,$T1a + fmadd $T1b,$N1,$nb,$T1b + lfd $A0,8($nap_d) ; load a[j] in double format + lfd $A1,16($nap_d) + fmadd $T2a,$N2,$na,$T2a + fmadd $T2b,$N2,$nb,$T2b + lfd $A2,24($nap_d) ; load a[j+1] in double format + lfd $A3,32($nap_d) + fmadd $T3a,$N3,$na,$T3a + fmadd $T3b,$N3,$nb,$T3b + fmadd $T0a,$N0,$na,$T0a + fmadd $T0b,$N0,$nb,$T0b + + fmadd $T1a,$N0,$nc,$T1a + fmadd $T1b,$N0,$nd,$T1b + fmadd $T2a,$N1,$nc,$T2a + fmadd $T2b,$N1,$nd,$T2b + fmadd $T3a,$N2,$nc,$T3a + fmadd $T3b,$N2,$nd,$T3b + fmadd $dota,$N3,$nc,$dota + fmadd $dotb,$N3,$nd,$dotb + + fctid $T0a,$T0a + fctid $T0b,$T0b + fctid $T1a,$T1a + fctid $T1b,$T1b + fctid $T2a,$T2a + fctid $T2b,$T2b + fctid $T3a,$T3a + fctid $T3b,$T3b + + stfd $T0a,`$FRAME+0`($sp) + stfd $T0b,`$FRAME+8`($sp) + stfd $T1a,`$FRAME+16`($sp) + stfd $T1b,`$FRAME+24`($sp) + stfd $T2a,`$FRAME+32`($sp) + stfd $T2b,`$FRAME+40`($sp) + stfd $T3a,`$FRAME+48`($sp) + stfd $T3b,`$FRAME+56`($sp) + +.align 5 +Linner: + fmul $T1a,$A1,$ba + fmul $T1b,$A1,$bb + fmul $T2a,$A2,$ba + fmul $T2b,$A2,$bb + lfd $N0,40($nap_d) ; load n[j] in double format + lfd $N1,48($nap_d) + fmul $T3a,$A3,$ba + fmul $T3b,$A3,$bb + fmadd $T0a,$A0,$ba,$dota + fmadd $T0b,$A0,$bb,$dotb + lfd $N2,56($nap_d) ; load n[j+1] in double format + lfdu $N3,64($nap_d) + + fmadd $T1a,$A0,$bc,$T1a + fmadd $T1b,$A0,$bd,$T1b + fmadd $T2a,$A1,$bc,$T2a + fmadd $T2b,$A1,$bd,$T2b + lfd $A0,8($nap_d) ; load a[j] in double format + lfd $A1,16($nap_d) + fmadd $T3a,$A2,$bc,$T3a + fmadd $T3b,$A2,$bd,$T3b + fmul $dota,$A3,$bc + fmul $dotb,$A3,$bd + lfd $A2,24($nap_d) ; load a[j+1] in double format + lfd $A3,32($nap_d) + + fmadd $T1a,$N1,$na,$T1a + fmadd $T1b,$N1,$nb,$T1b + ld $t0,`$FRAME+0`($sp) + ld $t1,`$FRAME+8`($sp) + fmadd $T2a,$N2,$na,$T2a + fmadd $T2b,$N2,$nb,$T2b + ld $t2,`$FRAME+16`($sp) + ld $t3,`$FRAME+24`($sp) + fmadd $T3a,$N3,$na,$T3a + fmadd $T3b,$N3,$nb,$T3b + add $t0,$t0,$carry ; can not overflow + ld $t4,`$FRAME+32`($sp) + ld $t5,`$FRAME+40`($sp) + fmadd $T0a,$N0,$na,$T0a + fmadd $T0b,$N0,$nb,$T0b + srdi $carry,$t0,16 + add $t1,$t1,$carry + srdi $carry,$t1,16 + ld $t6,`$FRAME+48`($sp) + ld $t7,`$FRAME+56`($sp) + + fmadd $T1a,$N0,$nc,$T1a + fmadd $T1b,$N0,$nd,$T1b + insrdi $t0,$t1,16,32 + ld $t1,8($tp) ; tp[j] + fmadd $T2a,$N1,$nc,$T2a + fmadd $T2b,$N1,$nd,$T2b + add $t2,$t2,$carry + fmadd $T3a,$N2,$nc,$T3a + fmadd $T3b,$N2,$nd,$T3b + srdi $carry,$t2,16 + insrdi $t0,$t2,16,16 + fmadd $dota,$N3,$nc,$dota + fmadd $dotb,$N3,$nd,$dotb + add $t3,$t3,$carry + ldu $t2,16($tp) ; tp[j+1] + srdi $carry,$t3,16 + insrdi $t0,$t3,16,0 ; 0..63 bits + add $t4,$t4,$carry + + fctid $T0a,$T0a + fctid $T0b,$T0b + srdi $carry,$t4,16 + fctid $T1a,$T1a + fctid $T1b,$T1b + add $t5,$t5,$carry + fctid $T2a,$T2a + fctid $T2b,$T2b + srdi $carry,$t5,16 + insrdi $t4,$t5,16,32 + fctid $T3a,$T3a + fctid $T3b,$T3b + add $t6,$t6,$carry + srdi $carry,$t6,16 + insrdi $t4,$t6,16,16 + + stfd $T0a,`$FRAME+0`($sp) + stfd $T0b,`$FRAME+8`($sp) + add $t7,$t7,$carry + addc $t3,$t0,$t1 + stfd $T1a,`$FRAME+16`($sp) + stfd $T1b,`$FRAME+24`($sp) + insrdi $t4,$t7,16,0 ; 64..127 bits + srdi $carry,$t7,16 ; upper 33 bits + stfd $T2a,`$FRAME+32`($sp) + stfd $T2b,`$FRAME+40`($sp) + adde $t5,$t4,$t2 + stfd $T3a,`$FRAME+48`($sp) + stfd $T3b,`$FRAME+56`($sp) + addze $carry,$carry + std $t3,-16($tp) ; tp[j-1] + std $t5,-8($tp) ; tp[j] + bdnz- Linner + + fctid $dota,$dota + fctid $dotb,$dotb + ld $t0,`$FRAME+0`($sp) + ld $t1,`$FRAME+8`($sp) + ld $t2,`$FRAME+16`($sp) + ld $t3,`$FRAME+24`($sp) + ld $t4,`$FRAME+32`($sp) + ld $t5,`$FRAME+40`($sp) + ld $t6,`$FRAME+48`($sp) + ld $t7,`$FRAME+56`($sp) + stfd $dota,`$FRAME+64`($sp) + stfd $dotb,`$FRAME+72`($sp) + + add $t0,$t0,$carry ; can not overflow + srdi $carry,$t0,16 + add $t1,$t1,$carry + srdi $carry,$t1,16 + insrdi $t0,$t1,16,32 + add $t2,$t2,$carry + ld $t1,8($tp) ; tp[j] + srdi $carry,$t2,16 + insrdi $t0,$t2,16,16 + add $t3,$t3,$carry + ldu $t2,16($tp) ; tp[j+1] + srdi $carry,$t3,16 + insrdi $t0,$t3,16,0 ; 0..63 bits + add $t4,$t4,$carry + srdi $carry,$t4,16 + add $t5,$t5,$carry + srdi $carry,$t5,16 + insrdi $t4,$t5,16,32 + add $t6,$t6,$carry + srdi $carry,$t6,16 + insrdi $t4,$t6,16,16 + add $t7,$t7,$carry + insrdi $t4,$t7,16,0 ; 64..127 bits + srdi $carry,$t7,16 ; upper 33 bits + ld $t6,`$FRAME+64`($sp) + ld $t7,`$FRAME+72`($sp) + + addc $t3,$t0,$t1 + adde $t5,$t4,$t2 + addze $carry,$carry + + std $t3,-16($tp) ; tp[j-1] + std $t5,-8($tp) ; tp[j] + + add $carry,$carry,$ovf ; comsume upmost overflow + add $t6,$t6,$carry ; can not overflow + srdi $carry,$t6,16 + add $t7,$t7,$carry + insrdi $t6,$t7,48,0 + srdi $ovf,$t7,48 + std $t6,0($tp) ; tp[num-1] + + slwi $t7,$num,2 + addi $i,$i,8 + subf $nap_d,$t7,$nap_d ; rewind pointer + cmpw $i,$num + blt- Louter + + subf $np,$num,$np ; rewind np + addi $j,$j,1 ; restore counter + subfc $i,$i,$i ; j=0 and "clear" XER[CA] + addi $tp,$sp,`$FRAME+$TRANSFER+8` + addi $t4,$sp,`$FRAME+$TRANSFER+16` + addi $t5,$np,8 + addi $t6,$rp,8 + mtctr $j + +.align 4 +Lsub: ldx $t0,$tp,$i + ldx $t1,$np,$i + ldx $t2,$t4,$i + ldx $t3,$t5,$i + subfe $t0,$t1,$t0 ; tp[j]-np[j] + subfe $t2,$t3,$t2 ; tp[j+1]-np[j+1] + stdx $t0,$rp,$i + stdx $t2,$t6,$i + addi $i,$i,16 + bdnz- Lsub + + li $i,0 + subfe $ovf,$i,$ovf ; handle upmost overflow bit + and $ap,$tp,$ovf + andc $np,$rp,$ovf + or $ap,$ap,$np ; ap=borrow?tp:rp + addi $t7,$ap,8 + mtctr $j + +.align 4 +Lcopy: ; copy or in-place refresh + ldx $t0,$ap,$i + ldx $t1,$t7,$i + std $i,8($nap_d) ; zap nap_d + std $i,16($nap_d) + std $i,24($nap_d) + std $i,32($nap_d) + std $i,40($nap_d) + std $i,48($nap_d) + std $i,56($nap_d) + stdu $i,64($nap_d) + stdx $t0,$rp,$i + stdx $t1,$t6,$i + stdx $i,$tp,$i ; zap tp at once + stdx $i,$t4,$i + addi $i,$i,16 + bdnz- Lcopy + + $POP r14,`2*$SIZE_T`($sp) + $POP r15,`3*$SIZE_T`($sp) + $POP r16,`4*$SIZE_T`($sp) + $POP r17,`5*$SIZE_T`($sp) + $POP r18,`6*$SIZE_T`($sp) + $POP r19,`7*$SIZE_T`($sp) + $POP r20,`8*$SIZE_T`($sp) + $POP r21,`9*$SIZE_T`($sp) + $POP r22,`10*$SIZE_T`($sp) + $POP r23,`11*$SIZE_T`($sp) + lfd f14,`12*$SIZE_T+0`($sp) + lfd f15,`12*$SIZE_T+8`($sp) + lfd f16,`12*$SIZE_T+16`($sp) + lfd f17,`12*$SIZE_T+24`($sp) + lfd f18,`12*$SIZE_T+32`($sp) + lfd f19,`12*$SIZE_T+40`($sp) + lfd f20,`12*$SIZE_T+48`($sp) + lfd f21,`12*$SIZE_T+56`($sp) + lfd f22,`12*$SIZE_T+64`($sp) + lfd f23,`12*$SIZE_T+72`($sp) + lfd f24,`12*$SIZE_T+80`($sp) + lfd f25,`12*$SIZE_T+88`($sp) + $POP $sp,0($sp) + li r3,1 ; signal "handled" + blr + .long 0 +.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@fy.chalmers.se>" +___ + +$code =~ s/\`([^\`]*)\`/eval $1/gem; +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/s390x-mont.pl b/openssl/crypto/bn/asm/s390x-mont.pl new file mode 100644 index 000000000..d23251033 --- /dev/null +++ b/openssl/crypto/bn/asm/s390x-mont.pl @@ -0,0 +1,225 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# April 2007. +# +# Performance improvement over vanilla C code varies from 85% to 45% +# depending on key length and benchmark. Unfortunately in this context +# these are not very impressive results [for code that utilizes "wide" +# 64x64=128-bit multiplication, which is not commonly available to C +# programmers], at least hand-coded bn_asm.c replacement is known to +# provide 30-40% better results for longest keys. Well, on a second +# thought it's not very surprising, because z-CPUs are single-issue +# and _strictly_ in-order execution, while bn_mul_mont is more or less +# dependent on CPU ability to pipe-line instructions and have several +# of them "in-flight" at the same time. I mean while other methods, +# for example Karatsuba, aim to minimize amount of multiplications at +# the cost of other operations increase, bn_mul_mont aim to neatly +# "overlap" multiplications and the other operations [and on most +# platforms even minimize the amount of the other operations, in +# particular references to memory]. But it's possible to improve this +# module performance by implementing dedicated squaring code-path and +# possibly by unrolling loops... + +# January 2009. +# +# Reschedule to minimize/avoid Address Generation Interlock hazard, +# make inner loops counter-based. + +$mn0="%r0"; +$num="%r1"; + +# int bn_mul_mont( +$rp="%r2"; # BN_ULONG *rp, +$ap="%r3"; # const BN_ULONG *ap, +$bp="%r4"; # const BN_ULONG *bp, +$np="%r5"; # const BN_ULONG *np, +$n0="%r6"; # const BN_ULONG *n0, +#$num="160(%r15)" # int num); + +$bi="%r2"; # zaps rp +$j="%r7"; + +$ahi="%r8"; +$alo="%r9"; +$nhi="%r10"; +$nlo="%r11"; +$AHI="%r12"; +$NHI="%r13"; +$count="%r14"; +$sp="%r15"; + +$code.=<<___; +.text +.globl bn_mul_mont +.type bn_mul_mont,\@function +bn_mul_mont: + lgf $num,164($sp) # pull $num + sla $num,3 # $num to enumerate bytes + la $bp,0($num,$bp) + + stg %r2,16($sp) + + cghi $num,16 # + lghi %r2,0 # + blr %r14 # if($num<16) return 0; + cghi $num,128 # + bhr %r14 # if($num>128) return 0; + + stmg %r3,%r15,24($sp) + + lghi $rp,-160-8 # leave room for carry bit + lcgr $j,$num # -$num + lgr %r0,$sp + la $rp,0($rp,$sp) + la $sp,0($j,$rp) # alloca + stg %r0,0($sp) # back chain + + sra $num,3 # restore $num + la $bp,0($j,$bp) # restore $bp + ahi $num,-1 # adjust $num for inner loop + lg $n0,0($n0) # pull n0 + + lg $bi,0($bp) + lg $alo,0($ap) + mlgr $ahi,$bi # ap[0]*bp[0] + lgr $AHI,$ahi + + lgr $mn0,$alo # "tp[0]"*n0 + msgr $mn0,$n0 + + lg $nlo,0($np) # + mlgr $nhi,$mn0 # np[0]*m1 + algr $nlo,$alo # +="tp[0]" + lghi $NHI,0 + alcgr $NHI,$nhi + + la $j,8(%r0) # j=1 + lr $count,$num + +.align 16 +.L1st: + lg $alo,0($j,$ap) + mlgr $ahi,$bi # ap[j]*bp[0] + algr $alo,$AHI + lghi $AHI,0 + alcgr $AHI,$ahi + + lg $nlo,0($j,$np) + mlgr $nhi,$mn0 # np[j]*m1 + algr $nlo,$NHI + lghi $NHI,0 + alcgr $nhi,$NHI # +="tp[j]" + algr $nlo,$alo + alcgr $NHI,$nhi + + stg $nlo,160-8($j,$sp) # tp[j-1]= + la $j,8($j) # j++ + brct $count,.L1st + + algr $NHI,$AHI + lghi $AHI,0 + alcgr $AHI,$AHI # upmost overflow bit + stg $NHI,160-8($j,$sp) + stg $AHI,160($j,$sp) + la $bp,8($bp) # bp++ + +.Louter: + lg $bi,0($bp) # bp[i] + lg $alo,0($ap) + mlgr $ahi,$bi # ap[0]*bp[i] + alg $alo,160($sp) # +=tp[0] + lghi $AHI,0 + alcgr $AHI,$ahi + + lgr $mn0,$alo + msgr $mn0,$n0 # tp[0]*n0 + + lg $nlo,0($np) # np[0] + mlgr $nhi,$mn0 # np[0]*m1 + algr $nlo,$alo # +="tp[0]" + lghi $NHI,0 + alcgr $NHI,$nhi + + la $j,8(%r0) # j=1 + lr $count,$num + +.align 16 +.Linner: + lg $alo,0($j,$ap) + mlgr $ahi,$bi # ap[j]*bp[i] + algr $alo,$AHI + lghi $AHI,0 + alcgr $ahi,$AHI + alg $alo,160($j,$sp)# +=tp[j] + alcgr $AHI,$ahi + + lg $nlo,0($j,$np) + mlgr $nhi,$mn0 # np[j]*m1 + algr $nlo,$NHI + lghi $NHI,0 + alcgr $nhi,$NHI + algr $nlo,$alo # +="tp[j]" + alcgr $NHI,$nhi + + stg $nlo,160-8($j,$sp) # tp[j-1]= + la $j,8($j) # j++ + brct $count,.Linner + + algr $NHI,$AHI + lghi $AHI,0 + alcgr $AHI,$AHI + alg $NHI,160($j,$sp)# accumulate previous upmost overflow bit + lghi $ahi,0 + alcgr $AHI,$ahi # new upmost overflow bit + stg $NHI,160-8($j,$sp) + stg $AHI,160($j,$sp) + + la $bp,8($bp) # bp++ + clg $bp,160+8+32($j,$sp) # compare to &bp[num] + jne .Louter + + lg $rp,160+8+16($j,$sp) # reincarnate rp + la $ap,160($sp) + ahi $num,1 # restore $num, incidentally clears "borrow" + + la $j,0(%r0) + lr $count,$num +.Lsub: lg $alo,0($j,$ap) + slbg $alo,0($j,$np) + stg $alo,0($j,$rp) + la $j,8($j) + brct $count,.Lsub + lghi $ahi,0 + slbgr $AHI,$ahi # handle upmost carry + + ngr $ap,$AHI + lghi $np,-1 + xgr $np,$AHI + ngr $np,$rp + ogr $ap,$np # ap=borrow?tp:rp + + la $j,0(%r0) + lgr $count,$num +.Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh + stg $j,160($j,$sp) # zap tp + stg $alo,0($j,$rp) + la $j,8($j) + brct $count,.Lcopy + + la %r1,160+8+48($j,$sp) + lmg %r6,%r15,0(%r1) + lghi %r2,1 # signal "processed" + br %r14 +.size bn_mul_mont,.-bn_mul_mont +.string "Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>" +___ + +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/s390x.S b/openssl/crypto/bn/asm/s390x.S new file mode 100644 index 000000000..8f45f5d51 --- /dev/null +++ b/openssl/crypto/bn/asm/s390x.S @@ -0,0 +1,678 @@ +.ident "s390x.S, version 1.0" +// ==================================================================== +// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +// project. +// +// Rights for redistribution and usage in source and binary forms are +// granted according to the OpenSSL license. Warranty of any kind is +// disclaimed. +// ==================================================================== + +.text + +#define zero %r0 + +// BN_ULONG bn_mul_add_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5); +.globl bn_mul_add_words +.type bn_mul_add_words,@function +.align 4 +bn_mul_add_words: + lghi zero,0 // zero = 0 + la %r1,0(%r2) // put rp aside + lghi %r2,0 // i=0; + ltgfr %r4,%r4 + bler %r14 // if (len<=0) return 0; + + stmg %r6,%r10,48(%r15) + lghi %r8,0 // carry = 0 + srag %r10,%r4,2 // cnt=len/4 + jz .Loop1_madd + +.Loop4_madd: + lg %r7,0(%r2,%r3) // ap[i] + mlgr %r6,%r5 // *=w + algr %r7,%r8 // +=carry + alcgr %r6,zero + alg %r7,0(%r2,%r1) // +=rp[i] + alcgr %r6,zero + stg %r7,0(%r2,%r1) // rp[i]= + + lg %r9,8(%r2,%r3) + mlgr %r8,%r5 + algr %r9,%r6 + alcgr %r8,zero + alg %r9,8(%r2,%r1) + alcgr %r8,zero + stg %r9,8(%r2,%r1) + + lg %r7,16(%r2,%r3) + mlgr %r6,%r5 + algr %r7,%r8 + alcgr %r6,zero + alg %r7,16(%r2,%r1) + alcgr %r6,zero + stg %r7,16(%r2,%r1) + + lg %r9,24(%r2,%r3) + mlgr %r8,%r5 + algr %r9,%r6 + alcgr %r8,zero + alg %r9,24(%r2,%r1) + alcgr %r8,zero + stg %r9,24(%r2,%r1) + + la %r2,32(%r2) // i+=4 + brct %r10,.Loop4_madd + + lghi %r10,3 + nr %r4,%r10 // cnt=len%4 + jz .Lend_madd + +.Loop1_madd: + lg %r7,0(%r2,%r3) // ap[i] + mlgr %r6,%r5 // *=w + algr %r7,%r8 // +=carry + alcgr %r6,zero + alg %r7,0(%r2,%r1) // +=rp[i] + alcgr %r6,zero + stg %r7,0(%r2,%r1) // rp[i]= + + lgr %r8,%r6 + la %r2,8(%r2) // i++ + brct %r4,.Loop1_madd + +.Lend_madd: + lgr %r2,%r8 + lmg %r6,%r10,48(%r15) + br %r14 +.size bn_mul_add_words,.-bn_mul_add_words + +// BN_ULONG bn_mul_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5); +.globl bn_mul_words +.type bn_mul_words,@function +.align 4 +bn_mul_words: + lghi zero,0 // zero = 0 + la %r1,0(%r2) // put rp aside + lghi %r2,0 // i=0; + ltgfr %r4,%r4 + bler %r14 // if (len<=0) return 0; + + stmg %r6,%r10,48(%r15) + lghi %r8,0 // carry = 0 + srag %r10,%r4,2 // cnt=len/4 + jz .Loop1_mul + +.Loop4_mul: + lg %r7,0(%r2,%r3) // ap[i] + mlgr %r6,%r5 // *=w + algr %r7,%r8 // +=carry + alcgr %r6,zero + stg %r7,0(%r2,%r1) // rp[i]= + + lg %r9,8(%r2,%r3) + mlgr %r8,%r5 + algr %r9,%r6 + alcgr %r8,zero + stg %r9,8(%r2,%r1) + + lg %r7,16(%r2,%r3) + mlgr %r6,%r5 + algr %r7,%r8 + alcgr %r6,zero + stg %r7,16(%r2,%r1) + + lg %r9,24(%r2,%r3) + mlgr %r8,%r5 + algr %r9,%r6 + alcgr %r8,zero + stg %r9,24(%r2,%r1) + + la %r2,32(%r2) // i+=4 + brct %r10,.Loop4_mul + + lghi %r10,3 + nr %r4,%r10 // cnt=len%4 + jz .Lend_mul + +.Loop1_mul: + lg %r7,0(%r2,%r3) // ap[i] + mlgr %r6,%r5 // *=w + algr %r7,%r8 // +=carry + alcgr %r6,zero + stg %r7,0(%r2,%r1) // rp[i]= + + lgr %r8,%r6 + la %r2,8(%r2) // i++ + brct %r4,.Loop1_mul + +.Lend_mul: + lgr %r2,%r8 + lmg %r6,%r10,48(%r15) + br %r14 +.size bn_mul_words,.-bn_mul_words + +// void bn_sqr_words(BN_ULONG *r2,BN_ULONG *r2,int r4) +.globl bn_sqr_words +.type bn_sqr_words,@function +.align 4 +bn_sqr_words: + ltgfr %r4,%r4 + bler %r14 + + stmg %r6,%r7,48(%r15) + srag %r1,%r4,2 // cnt=len/4 + jz .Loop1_sqr + +.Loop4_sqr: + lg %r7,0(%r3) + mlgr %r6,%r7 + stg %r7,0(%r2) + stg %r6,8(%r2) + + lg %r7,8(%r3) + mlgr %r6,%r7 + stg %r7,16(%r2) + stg %r6,24(%r2) + + lg %r7,16(%r3) + mlgr %r6,%r7 + stg %r7,32(%r2) + stg %r6,40(%r2) + + lg %r7,24(%r3) + mlgr %r6,%r7 + stg %r7,48(%r2) + stg %r6,56(%r2) + + la %r3,32(%r3) + la %r2,64(%r2) + brct %r1,.Loop4_sqr + + lghi %r1,3 + nr %r4,%r1 // cnt=len%4 + jz .Lend_sqr + +.Loop1_sqr: + lg %r7,0(%r3) + mlgr %r6,%r7 + stg %r7,0(%r2) + stg %r6,8(%r2) + + la %r3,8(%r3) + la %r2,16(%r2) + brct %r4,.Loop1_sqr + +.Lend_sqr: + lmg %r6,%r7,48(%r15) + br %r14 +.size bn_sqr_words,.-bn_sqr_words + +// BN_ULONG bn_div_words(BN_ULONG h,BN_ULONG l,BN_ULONG d); +.globl bn_div_words +.type bn_div_words,@function +.align 4 +bn_div_words: + dlgr %r2,%r4 + lgr %r2,%r3 + br %r14 +.size bn_div_words,.-bn_div_words + +// BN_ULONG bn_add_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5); +.globl bn_add_words +.type bn_add_words,@function +.align 4 +bn_add_words: + la %r1,0(%r2) // put rp aside + lghi %r2,0 // i=0 + ltgfr %r5,%r5 + bler %r14 // if (len<=0) return 0; + + stg %r6,48(%r15) + lghi %r6,3 + nr %r6,%r5 // len%4 + sra %r5,2 // len/4, use sra because it sets condition code + jz .Loop1_add // carry is incidentally cleared if branch taken + algr %r2,%r2 // clear carry + +.Loop4_add: + lg %r0,0(%r2,%r3) + alcg %r0,0(%r2,%r4) + stg %r0,0(%r2,%r1) + lg %r0,8(%r2,%r3) + alcg %r0,8(%r2,%r4) + stg %r0,8(%r2,%r1) + lg %r0,16(%r2,%r3) + alcg %r0,16(%r2,%r4) + stg %r0,16(%r2,%r1) + lg %r0,24(%r2,%r3) + alcg %r0,24(%r2,%r4) + stg %r0,24(%r2,%r1) + + la %r2,32(%r2) // i+=4 + brct %r5,.Loop4_add + + la %r6,1(%r6) // see if len%4 is zero ... + brct %r6,.Loop1_add // without touching condition code:-) + +.Lexit_add: + lghi %r2,0 + alcgr %r2,%r2 + lg %r6,48(%r15) + br %r14 + +.Loop1_add: + lg %r0,0(%r2,%r3) + alcg %r0,0(%r2,%r4) + stg %r0,0(%r2,%r1) + + la %r2,8(%r2) // i++ + brct %r6,.Loop1_add + + j .Lexit_add +.size bn_add_words,.-bn_add_words + +// BN_ULONG bn_sub_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5); +.globl bn_sub_words +.type bn_sub_words,@function +.align 4 +bn_sub_words: + la %r1,0(%r2) // put rp aside + lghi %r2,0 // i=0 + ltgfr %r5,%r5 + bler %r14 // if (len<=0) return 0; + + stg %r6,48(%r15) + lghi %r6,3 + nr %r6,%r5 // len%4 + sra %r5,2 // len/4, use sra because it sets condition code + jnz .Loop4_sub // borrow is incidentally cleared if branch taken + slgr %r2,%r2 // clear borrow + +.Loop1_sub: + lg %r0,0(%r2,%r3) + slbg %r0,0(%r2,%r4) + stg %r0,0(%r2,%r1) + + la %r2,8(%r2) // i++ + brct %r6,.Loop1_sub + j .Lexit_sub + +.Loop4_sub: + lg %r0,0(%r2,%r3) + slbg %r0,0(%r2,%r4) + stg %r0,0(%r2,%r1) + lg %r0,8(%r2,%r3) + slbg %r0,8(%r2,%r4) + stg %r0,8(%r2,%r1) + lg %r0,16(%r2,%r3) + slbg %r0,16(%r2,%r4) + stg %r0,16(%r2,%r1) + lg %r0,24(%r2,%r3) + slbg %r0,24(%r2,%r4) + stg %r0,24(%r2,%r1) + + la %r2,32(%r2) // i+=4 + brct %r5,.Loop4_sub + + la %r6,1(%r6) // see if len%4 is zero ... + brct %r6,.Loop1_sub // without touching condition code:-) + +.Lexit_sub: + lghi %r2,0 + slbgr %r2,%r2 + lcgr %r2,%r2 + lg %r6,48(%r15) + br %r14 +.size bn_sub_words,.-bn_sub_words + +#define c1 %r1 +#define c2 %r5 +#define c3 %r8 + +#define mul_add_c(ai,bi,c1,c2,c3) \ + lg %r7,ai*8(%r3); \ + mlg %r6,bi*8(%r4); \ + algr c1,%r7; \ + alcgr c2,%r6; \ + alcgr c3,zero + +// void bn_mul_comba8(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4); +.globl bn_mul_comba8 +.type bn_mul_comba8,@function +.align 4 +bn_mul_comba8: + stmg %r6,%r8,48(%r15) + + lghi c1,0 + lghi c2,0 + lghi c3,0 + lghi zero,0 + + mul_add_c(0,0,c1,c2,c3); + stg c1,0*8(%r2) + lghi c1,0 + + mul_add_c(0,1,c2,c3,c1); + mul_add_c(1,0,c2,c3,c1); + stg c2,1*8(%r2) + lghi c2,0 + + mul_add_c(2,0,c3,c1,c2); + mul_add_c(1,1,c3,c1,c2); + mul_add_c(0,2,c3,c1,c2); + stg c3,2*8(%r2) + lghi c3,0 + + mul_add_c(0,3,c1,c2,c3); + mul_add_c(1,2,c1,c2,c3); + mul_add_c(2,1,c1,c2,c3); + mul_add_c(3,0,c1,c2,c3); + stg c1,3*8(%r2) + lghi c1,0 + + mul_add_c(4,0,c2,c3,c1); + mul_add_c(3,1,c2,c3,c1); + mul_add_c(2,2,c2,c3,c1); + mul_add_c(1,3,c2,c3,c1); + mul_add_c(0,4,c2,c3,c1); + stg c2,4*8(%r2) + lghi c2,0 + + mul_add_c(0,5,c3,c1,c2); + mul_add_c(1,4,c3,c1,c2); + mul_add_c(2,3,c3,c1,c2); + mul_add_c(3,2,c3,c1,c2); + mul_add_c(4,1,c3,c1,c2); + mul_add_c(5,0,c3,c1,c2); + stg c3,5*8(%r2) + lghi c3,0 + + mul_add_c(6,0,c1,c2,c3); + mul_add_c(5,1,c1,c2,c3); + mul_add_c(4,2,c1,c2,c3); + mul_add_c(3,3,c1,c2,c3); + mul_add_c(2,4,c1,c2,c3); + mul_add_c(1,5,c1,c2,c3); + mul_add_c(0,6,c1,c2,c3); + stg c1,6*8(%r2) + lghi c1,0 + + mul_add_c(0,7,c2,c3,c1); + mul_add_c(1,6,c2,c3,c1); + mul_add_c(2,5,c2,c3,c1); + mul_add_c(3,4,c2,c3,c1); + mul_add_c(4,3,c2,c3,c1); + mul_add_c(5,2,c2,c3,c1); + mul_add_c(6,1,c2,c3,c1); + mul_add_c(7,0,c2,c3,c1); + stg c2,7*8(%r2) + lghi c2,0 + + mul_add_c(7,1,c3,c1,c2); + mul_add_c(6,2,c3,c1,c2); + mul_add_c(5,3,c3,c1,c2); + mul_add_c(4,4,c3,c1,c2); + mul_add_c(3,5,c3,c1,c2); + mul_add_c(2,6,c3,c1,c2); + mul_add_c(1,7,c3,c1,c2); + stg c3,8*8(%r2) + lghi c3,0 + + mul_add_c(2,7,c1,c2,c3); + mul_add_c(3,6,c1,c2,c3); + mul_add_c(4,5,c1,c2,c3); + mul_add_c(5,4,c1,c2,c3); + mul_add_c(6,3,c1,c2,c3); + mul_add_c(7,2,c1,c2,c3); + stg c1,9*8(%r2) + lghi c1,0 + + mul_add_c(7,3,c2,c3,c1); + mul_add_c(6,4,c2,c3,c1); + mul_add_c(5,5,c2,c3,c1); + mul_add_c(4,6,c2,c3,c1); + mul_add_c(3,7,c2,c3,c1); + stg c2,10*8(%r2) + lghi c2,0 + + mul_add_c(4,7,c3,c1,c2); + mul_add_c(5,6,c3,c1,c2); + mul_add_c(6,5,c3,c1,c2); + mul_add_c(7,4,c3,c1,c2); + stg c3,11*8(%r2) + lghi c3,0 + + mul_add_c(7,5,c1,c2,c3); + mul_add_c(6,6,c1,c2,c3); + mul_add_c(5,7,c1,c2,c3); + stg c1,12*8(%r2) + lghi c1,0 + + + mul_add_c(6,7,c2,c3,c1); + mul_add_c(7,6,c2,c3,c1); + stg c2,13*8(%r2) + lghi c2,0 + + mul_add_c(7,7,c3,c1,c2); + stg c3,14*8(%r2) + stg c1,15*8(%r2) + + lmg %r6,%r8,48(%r15) + br %r14 +.size bn_mul_comba8,.-bn_mul_comba8 + +// void bn_mul_comba4(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4); +.globl bn_mul_comba4 +.type bn_mul_comba4,@function +.align 4 +bn_mul_comba4: + stmg %r6,%r8,48(%r15) + + lghi c1,0 + lghi c2,0 + lghi c3,0 + lghi zero,0 + + mul_add_c(0,0,c1,c2,c3); + stg c1,0*8(%r3) + lghi c1,0 + + mul_add_c(0,1,c2,c3,c1); + mul_add_c(1,0,c2,c3,c1); + stg c2,1*8(%r2) + lghi c2,0 + + mul_add_c(2,0,c3,c1,c2); + mul_add_c(1,1,c3,c1,c2); + mul_add_c(0,2,c3,c1,c2); + stg c3,2*8(%r2) + lghi c3,0 + + mul_add_c(0,3,c1,c2,c3); + mul_add_c(1,2,c1,c2,c3); + mul_add_c(2,1,c1,c2,c3); + mul_add_c(3,0,c1,c2,c3); + stg c1,3*8(%r2) + lghi c1,0 + + mul_add_c(3,1,c2,c3,c1); + mul_add_c(2,2,c2,c3,c1); + mul_add_c(1,3,c2,c3,c1); + stg c2,4*8(%r2) + lghi c2,0 + + mul_add_c(2,3,c3,c1,c2); + mul_add_c(3,2,c3,c1,c2); + stg c3,5*8(%r2) + lghi c3,0 + + mul_add_c(3,3,c1,c2,c3); + stg c1,6*8(%r2) + stg c2,7*8(%r2) + + stmg %r6,%r8,48(%r15) + br %r14 +.size bn_mul_comba4,.-bn_mul_comba4 + +#define sqr_add_c(ai,c1,c2,c3) \ + lg %r7,ai*8(%r3); \ + mlgr %r6,%r7; \ + algr c1,%r7; \ + alcgr c2,%r6; \ + alcgr c3,zero + +#define sqr_add_c2(ai,aj,c1,c2,c3) \ + lg %r7,ai*8(%r3); \ + mlg %r6,aj*8(%r3); \ + algr c1,%r7; \ + alcgr c2,%r6; \ + alcgr c3,zero; \ + algr c1,%r7; \ + alcgr c2,%r6; \ + alcgr c3,zero + +// void bn_sqr_comba8(BN_ULONG *r2,BN_ULONG *r3); +.globl bn_sqr_comba8 +.type bn_sqr_comba8,@function +.align 4 +bn_sqr_comba8: + stmg %r6,%r8,48(%r15) + + lghi c1,0 + lghi c2,0 + lghi c3,0 + lghi zero,0 + + sqr_add_c(0,c1,c2,c3); + stg c1,0*8(%r2) + lghi c1,0 + + sqr_add_c2(1,0,c2,c3,c1); + stg c2,1*8(%r2) + lghi c2,0 + + sqr_add_c(1,c3,c1,c2); + sqr_add_c2(2,0,c3,c1,c2); + stg c3,2*8(%r2) + lghi c3,0 + + sqr_add_c2(3,0,c1,c2,c3); + sqr_add_c2(2,1,c1,c2,c3); + stg c1,3*8(%r2) + lghi c1,0 + + sqr_add_c(2,c2,c3,c1); + sqr_add_c2(3,1,c2,c3,c1); + sqr_add_c2(4,0,c2,c3,c1); + stg c2,4*8(%r2) + lghi c2,0 + + sqr_add_c2(5,0,c3,c1,c2); + sqr_add_c2(4,1,c3,c1,c2); + sqr_add_c2(3,2,c3,c1,c2); + stg c3,5*8(%r2) + lghi c3,0 + + sqr_add_c(3,c1,c2,c3); + sqr_add_c2(4,2,c1,c2,c3); + sqr_add_c2(5,1,c1,c2,c3); + sqr_add_c2(6,0,c1,c2,c3); + stg c1,6*8(%r2) + lghi c1,0 + + sqr_add_c2(7,0,c2,c3,c1); + sqr_add_c2(6,1,c2,c3,c1); + sqr_add_c2(5,2,c2,c3,c1); + sqr_add_c2(4,3,c2,c3,c1); + stg c2,7*8(%r2) + lghi c2,0 + + sqr_add_c(4,c3,c1,c2); + sqr_add_c2(5,3,c3,c1,c2); + sqr_add_c2(6,2,c3,c1,c2); + sqr_add_c2(7,1,c3,c1,c2); + stg c3,8*8(%r2) + lghi c3,0 + + sqr_add_c2(7,2,c1,c2,c3); + sqr_add_c2(6,3,c1,c2,c3); + sqr_add_c2(5,4,c1,c2,c3); + stg c1,9*8(%r2) + lghi c1,0 + + sqr_add_c(5,c2,c3,c1); + sqr_add_c2(6,4,c2,c3,c1); + sqr_add_c2(7,3,c2,c3,c1); + stg c2,10*8(%r2) + lghi c2,0 + + sqr_add_c2(7,4,c3,c1,c2); + sqr_add_c2(6,5,c3,c1,c2); + stg c3,11*8(%r2) + lghi c3,0 + + sqr_add_c(6,c1,c2,c3); + sqr_add_c2(7,5,c1,c2,c3); + stg c1,12*8(%r2) + lghi c1,0 + + sqr_add_c2(7,6,c2,c3,c1); + stg c2,13*8(%r2) + lghi c2,0 + + sqr_add_c(7,c3,c1,c2); + stg c3,14*8(%r2) + stg c1,15*8(%r2) + + lmg %r6,%r8,48(%r15) + br %r14 +.size bn_sqr_comba8,.-bn_sqr_comba8 + +// void bn_sqr_comba4(BN_ULONG *r2,BN_ULONG *r3); +.globl bn_sqr_comba4 +.type bn_sqr_comba4,@function +.align 4 +bn_sqr_comba4: + stmg %r6,%r8,48(%r15) + + lghi c1,0 + lghi c2,0 + lghi c3,0 + lghi zero,0 + + sqr_add_c(0,c1,c2,c3); + stg c1,0*8(%r2) + lghi c1,0 + + sqr_add_c2(1,0,c2,c3,c1); + stg c2,1*8(%r2) + lghi c2,0 + + sqr_add_c(1,c3,c1,c2); + sqr_add_c2(2,0,c3,c1,c2); + stg c3,2*8(%r2) + lghi c3,0 + + sqr_add_c2(3,0,c1,c2,c3); + sqr_add_c2(2,1,c1,c2,c3); + stg c1,3*8(%r2) + lghi c1,0 + + sqr_add_c(2,c2,c3,c1); + sqr_add_c2(3,1,c2,c3,c1); + stg c2,4*8(%r2) + lghi c2,0 + + sqr_add_c2(3,2,c3,c1,c2); + stg c3,5*8(%r2) + lghi c3,0 + + sqr_add_c(3,c1,c2,c3); + stg c1,6*8(%r2) + stg c2,7*8(%r2) + + lmg %r6,%r8,48(%r15) + br %r14 +.size bn_sqr_comba4,.-bn_sqr_comba4 diff --git a/openssl/crypto/bn/asm/sparcv8.S b/openssl/crypto/bn/asm/sparcv8.S new file mode 100644 index 000000000..88c5dc480 --- /dev/null +++ b/openssl/crypto/bn/asm/sparcv8.S @@ -0,0 +1,1458 @@ +.ident "sparcv8.s, Version 1.4" +.ident "SPARC v8 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>" + +/* + * ==================================================================== + * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL + * project. + * + * Rights for redistribution and usage in source and binary forms are + * granted according to the OpenSSL license. Warranty of any kind is + * disclaimed. + * ==================================================================== + */ + +/* + * This is my modest contributon to OpenSSL project (see + * http://www.openssl.org/ for more information about it) and is + * a drop-in SuperSPARC ISA replacement for crypto/bn/bn_asm.c + * module. For updates see http://fy.chalmers.se/~appro/hpe/. + * + * See bn_asm.sparc.v8plus.S for more details. + */ + +/* + * Revision history. + * + * 1.1 - new loop unrolling model(*); + * 1.2 - made gas friendly; + * 1.3 - fixed problem with /usr/ccs/lib/cpp; + * 1.4 - some retunes; + * + * (*) see bn_asm.sparc.v8plus.S for details + */ + +.section ".text",#alloc,#execinstr +.file "bn_asm.sparc.v8.S" + +.align 32 + +.global bn_mul_add_words +/* + * BN_ULONG bn_mul_add_words(rp,ap,num,w) + * BN_ULONG *rp,*ap; + * int num; + * BN_ULONG w; + */ +bn_mul_add_words: + cmp %o2,0 + bg,a .L_bn_mul_add_words_proceed + ld [%o1],%g2 + retl + clr %o0 + +.L_bn_mul_add_words_proceed: + andcc %o2,-4,%g0 + bz .L_bn_mul_add_words_tail + clr %o5 + +.L_bn_mul_add_words_loop: + ld [%o0],%o4 + ld [%o1+4],%g3 + umul %o3,%g2,%g2 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + st %o4,[%o0] + addx %g1,0,%o5 + + ld [%o0+4],%o4 + ld [%o1+8],%g2 + umul %o3,%g3,%g3 + dec 4,%o2 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g3,%o4 + st %o4,[%o0+4] + addx %g1,0,%o5 + + ld [%o0+8],%o4 + ld [%o1+12],%g3 + umul %o3,%g2,%g2 + inc 16,%o1 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + st %o4,[%o0+8] + addx %g1,0,%o5 + + ld [%o0+12],%o4 + umul %o3,%g3,%g3 + inc 16,%o0 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g3,%o4 + st %o4,[%o0-4] + addx %g1,0,%o5 + andcc %o2,-4,%g0 + bnz,a .L_bn_mul_add_words_loop + ld [%o1],%g2 + + tst %o2 + bnz,a .L_bn_mul_add_words_tail + ld [%o1],%g2 +.L_bn_mul_add_words_return: + retl + mov %o5,%o0 + nop + +.L_bn_mul_add_words_tail: + ld [%o0],%o4 + umul %o3,%g2,%g2 + addcc %o4,%o5,%o4 + rd %y,%g1 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + addx %g1,0,%o5 + deccc %o2 + bz .L_bn_mul_add_words_return + st %o4,[%o0] + + ld [%o1+4],%g2 + ld [%o0+4],%o4 + umul %o3,%g2,%g2 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + addx %g1,0,%o5 + deccc %o2 + bz .L_bn_mul_add_words_return + st %o4,[%o0+4] + + ld [%o1+8],%g2 + ld [%o0+8],%o4 + umul %o3,%g2,%g2 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + st %o4,[%o0+8] + retl + addx %g1,0,%o0 + +.type bn_mul_add_words,#function +.size bn_mul_add_words,(.-bn_mul_add_words) + +.align 32 + +.global bn_mul_words +/* + * BN_ULONG bn_mul_words(rp,ap,num,w) + * BN_ULONG *rp,*ap; + * int num; + * BN_ULONG w; + */ +bn_mul_words: + cmp %o2,0 + bg,a .L_bn_mul_words_proceeed + ld [%o1],%g2 + retl + clr %o0 + +.L_bn_mul_words_proceeed: + andcc %o2,-4,%g0 + bz .L_bn_mul_words_tail + clr %o5 + +.L_bn_mul_words_loop: + ld [%o1+4],%g3 + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + addx %g1,0,%o5 + st %g2,[%o0] + + ld [%o1+8],%g2 + umul %o3,%g3,%g3 + addcc %g3,%o5,%g3 + rd %y,%g1 + dec 4,%o2 + addx %g1,0,%o5 + st %g3,[%o0+4] + + ld [%o1+12],%g3 + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + inc 16,%o1 + st %g2,[%o0+8] + addx %g1,0,%o5 + + umul %o3,%g3,%g3 + addcc %g3,%o5,%g3 + rd %y,%g1 + inc 16,%o0 + addx %g1,0,%o5 + st %g3,[%o0-4] + andcc %o2,-4,%g0 + nop + bnz,a .L_bn_mul_words_loop + ld [%o1],%g2 + + tst %o2 + bnz,a .L_bn_mul_words_tail + ld [%o1],%g2 +.L_bn_mul_words_return: + retl + mov %o5,%o0 + nop + +.L_bn_mul_words_tail: + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + addx %g1,0,%o5 + deccc %o2 + bz .L_bn_mul_words_return + st %g2,[%o0] + nop + + ld [%o1+4],%g2 + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + addx %g1,0,%o5 + deccc %o2 + bz .L_bn_mul_words_return + st %g2,[%o0+4] + + ld [%o1+8],%g2 + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + st %g2,[%o0+8] + retl + addx %g1,0,%o0 + +.type bn_mul_words,#function +.size bn_mul_words,(.-bn_mul_words) + +.align 32 +.global bn_sqr_words +/* + * void bn_sqr_words(r,a,n) + * BN_ULONG *r,*a; + * int n; + */ +bn_sqr_words: + cmp %o2,0 + bg,a .L_bn_sqr_words_proceeed + ld [%o1],%g2 + retl + clr %o0 + +.L_bn_sqr_words_proceeed: + andcc %o2,-4,%g0 + bz .L_bn_sqr_words_tail + clr %o5 + +.L_bn_sqr_words_loop: + ld [%o1+4],%g3 + umul %g2,%g2,%o4 + st %o4,[%o0] + rd %y,%o5 + st %o5,[%o0+4] + + ld [%o1+8],%g2 + umul %g3,%g3,%o4 + dec 4,%o2 + st %o4,[%o0+8] + rd %y,%o5 + st %o5,[%o0+12] + nop + + ld [%o1+12],%g3 + umul %g2,%g2,%o4 + st %o4,[%o0+16] + rd %y,%o5 + inc 16,%o1 + st %o5,[%o0+20] + + umul %g3,%g3,%o4 + inc 32,%o0 + st %o4,[%o0-8] + rd %y,%o5 + st %o5,[%o0-4] + andcc %o2,-4,%g2 + bnz,a .L_bn_sqr_words_loop + ld [%o1],%g2 + + tst %o2 + nop + bnz,a .L_bn_sqr_words_tail + ld [%o1],%g2 +.L_bn_sqr_words_return: + retl + clr %o0 + +.L_bn_sqr_words_tail: + umul %g2,%g2,%o4 + st %o4,[%o0] + deccc %o2 + rd %y,%o5 + bz .L_bn_sqr_words_return + st %o5,[%o0+4] + + ld [%o1+4],%g2 + umul %g2,%g2,%o4 + st %o4,[%o0+8] + deccc %o2 + rd %y,%o5 + nop + bz .L_bn_sqr_words_return + st %o5,[%o0+12] + + ld [%o1+8],%g2 + umul %g2,%g2,%o4 + st %o4,[%o0+16] + rd %y,%o5 + st %o5,[%o0+20] + retl + clr %o0 + +.type bn_sqr_words,#function +.size bn_sqr_words,(.-bn_sqr_words) + +.align 32 + +.global bn_div_words +/* + * BN_ULONG bn_div_words(h,l,d) + * BN_ULONG h,l,d; + */ +bn_div_words: + wr %o0,%y + udiv %o1,%o2,%o0 + retl + nop + +.type bn_div_words,#function +.size bn_div_words,(.-bn_div_words) + +.align 32 + +.global bn_add_words +/* + * BN_ULONG bn_add_words(rp,ap,bp,n) + * BN_ULONG *rp,*ap,*bp; + * int n; + */ +bn_add_words: + cmp %o3,0 + bg,a .L_bn_add_words_proceed + ld [%o1],%o4 + retl + clr %o0 + +.L_bn_add_words_proceed: + andcc %o3,-4,%g0 + bz .L_bn_add_words_tail + clr %g1 + ba .L_bn_add_words_warn_loop + addcc %g0,0,%g0 ! clear carry flag + +.L_bn_add_words_loop: + ld [%o1],%o4 +.L_bn_add_words_warn_loop: + ld [%o2],%o5 + ld [%o1+4],%g3 + ld [%o2+4],%g4 + dec 4,%o3 + addxcc %o5,%o4,%o5 + st %o5,[%o0] + + ld [%o1+8],%o4 + ld [%o2+8],%o5 + inc 16,%o1 + addxcc %g3,%g4,%g3 + st %g3,[%o0+4] + + ld [%o1-4],%g3 + ld [%o2+12],%g4 + inc 16,%o2 + addxcc %o5,%o4,%o5 + st %o5,[%o0+8] + + inc 16,%o0 + addxcc %g3,%g4,%g3 + st %g3,[%o0-4] + addx %g0,0,%g1 + andcc %o3,-4,%g0 + bnz,a .L_bn_add_words_loop + addcc %g1,-1,%g0 + + tst %o3 + bnz,a .L_bn_add_words_tail + ld [%o1],%o4 +.L_bn_add_words_return: + retl + mov %g1,%o0 + +.L_bn_add_words_tail: + addcc %g1,-1,%g0 + ld [%o2],%o5 + addxcc %o5,%o4,%o5 + addx %g0,0,%g1 + deccc %o3 + bz .L_bn_add_words_return + st %o5,[%o0] + + ld [%o1+4],%o4 + addcc %g1,-1,%g0 + ld [%o2+4],%o5 + addxcc %o5,%o4,%o5 + addx %g0,0,%g1 + deccc %o3 + bz .L_bn_add_words_return + st %o5,[%o0+4] + + ld [%o1+8],%o4 + addcc %g1,-1,%g0 + ld [%o2+8],%o5 + addxcc %o5,%o4,%o5 + st %o5,[%o0+8] + retl + addx %g0,0,%o0 + +.type bn_add_words,#function +.size bn_add_words,(.-bn_add_words) + +.align 32 + +.global bn_sub_words +/* + * BN_ULONG bn_sub_words(rp,ap,bp,n) + * BN_ULONG *rp,*ap,*bp; + * int n; + */ +bn_sub_words: + cmp %o3,0 + bg,a .L_bn_sub_words_proceed + ld [%o1],%o4 + retl + clr %o0 + +.L_bn_sub_words_proceed: + andcc %o3,-4,%g0 + bz .L_bn_sub_words_tail + clr %g1 + ba .L_bn_sub_words_warm_loop + addcc %g0,0,%g0 ! clear carry flag + +.L_bn_sub_words_loop: + ld [%o1],%o4 +.L_bn_sub_words_warm_loop: + ld [%o2],%o5 + ld [%o1+4],%g3 + ld [%o2+4],%g4 + dec 4,%o3 + subxcc %o4,%o5,%o5 + st %o5,[%o0] + + ld [%o1+8],%o4 + ld [%o2+8],%o5 + inc 16,%o1 + subxcc %g3,%g4,%g4 + st %g4,[%o0+4] + + ld [%o1-4],%g3 + ld [%o2+12],%g4 + inc 16,%o2 + subxcc %o4,%o5,%o5 + st %o5,[%o0+8] + + inc 16,%o0 + subxcc %g3,%g4,%g4 + st %g4,[%o0-4] + addx %g0,0,%g1 + andcc %o3,-4,%g0 + bnz,a .L_bn_sub_words_loop + addcc %g1,-1,%g0 + + tst %o3 + nop + bnz,a .L_bn_sub_words_tail + ld [%o1],%o4 +.L_bn_sub_words_return: + retl + mov %g1,%o0 + +.L_bn_sub_words_tail: + addcc %g1,-1,%g0 + ld [%o2],%o5 + subxcc %o4,%o5,%o5 + addx %g0,0,%g1 + deccc %o3 + bz .L_bn_sub_words_return + st %o5,[%o0] + nop + + ld [%o1+4],%o4 + addcc %g1,-1,%g0 + ld [%o2+4],%o5 + subxcc %o4,%o5,%o5 + addx %g0,0,%g1 + deccc %o3 + bz .L_bn_sub_words_return + st %o5,[%o0+4] + + ld [%o1+8],%o4 + addcc %g1,-1,%g0 + ld [%o2+8],%o5 + subxcc %o4,%o5,%o5 + st %o5,[%o0+8] + retl + addx %g0,0,%o0 + +.type bn_sub_words,#function +.size bn_sub_words,(.-bn_sub_words) + +#define FRAME_SIZE -96 + +/* + * Here is register usage map for *all* routines below. + */ +#define t_1 %o0 +#define t_2 %o1 +#define c_1 %o2 +#define c_2 %o3 +#define c_3 %o4 + +#define ap(I) [%i1+4*I] +#define bp(I) [%i2+4*I] +#define rp(I) [%i0+4*I] + +#define a_0 %l0 +#define a_1 %l1 +#define a_2 %l2 +#define a_3 %l3 +#define a_4 %l4 +#define a_5 %l5 +#define a_6 %l6 +#define a_7 %l7 + +#define b_0 %i3 +#define b_1 %i4 +#define b_2 %i5 +#define b_3 %o5 +#define b_4 %g1 +#define b_5 %g2 +#define b_6 %g3 +#define b_7 %g4 + +.align 32 +.global bn_mul_comba8 +/* + * void bn_mul_comba8(r,a,b) + * BN_ULONG *r,*a,*b; + */ +bn_mul_comba8: + save %sp,FRAME_SIZE,%sp + ld ap(0),a_0 + ld bp(0),b_0 + umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3); + ld bp(1),b_1 + rd %y,c_2 + st c_1,rp(0) !r[0]=c1; + + umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1); + ld ap(1),a_1 + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc %g0,t_2,c_3 != + addx %g0,%g0,c_1 + ld ap(2),a_2 + umul a_1,b_0,t_1 !mul_add_c(a[1],b[0],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + st c_2,rp(1) !r[1]=c2; + addx c_1,%g0,c_1 != + + umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx %g0,%g0,c_2 + ld bp(2),b_2 + umul a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + ld bp(3),b_3 + addx c_2,%g0,c_2 != + umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + st c_3,rp(2) !r[2]=c3; + + umul a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 + umul a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + ld ap(3),a_3 + umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + ld ap(4),a_4 + umul a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!= + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(3) !r[3]=c1; + + umul a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + ld bp(4),b_4 + umul a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + ld bp(5),b_5 + umul a_0,b_4,t_1 !=!mul_add_c(a[0],b[4],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + st c_2,rp(4) !r[4]=c2; + + umul a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + umul a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_2,b_3,t_1 !=!mul_add_c(a[2],b[3],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + ld ap(5),a_5 + umul a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + ld ap(6),a_6 + addx c_2,%g0,c_2 != + umul a_5,b_0,t_1 !mul_add_c(a[5],b[0],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + st c_3,rp(5) !r[5]=c3; + + umul a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 + umul a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_4,b_2,t_1 !mul_add_c(a[4],b[2],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_2,b_4,t_1 !mul_add_c(a[2],b[4],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + ld bp(6),b_6 + addx c_3,%g0,c_3 != + umul a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + ld bp(7),b_7 + umul a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + st c_1,rp(6) !r[6]=c1; + addx c_3,%g0,c_3 != + + umul a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx %g0,%g0,c_1 + umul a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_3,b_4,t_1 !=!mul_add_c(a[3],b[4],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + umul a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + ld ap(7),a_7 + umul a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + umul a_7,b_0,t_1 !mul_add_c(a[7],b[0],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + st c_2,rp(7) !r[7]=c2; + + umul a_7,b_1,t_1 !mul_add_c(a[7],b[1],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + umul a_6,b_2,t_1 !=!mul_add_c(a[6],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + umul a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + umul a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_2,b_6,t_1 !=!mul_add_c(a[2],b[6],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + umul a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 ! + addx c_2,%g0,c_2 + st c_3,rp(8) !r[8]=c3; + + umul a_2,b_7,t_1 !mul_add_c(a[2],b[7],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 + umul a_3,b_6,t_1 !=!mul_add_c(a[3],b[6],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + umul a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_7,b_2,t_1 !=!mul_add_c(a[7],b[2],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(9) !r[9]=c1; + + umul a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + umul a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_5,b_5,t_1 !=!mul_add_c(a[5],b[5],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + umul a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + st c_2,rp(10) !r[10]=c2; + + umul a_4,b_7,t_1 !=!mul_add_c(a[4],b[7],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 != + umul a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + umul a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + st c_3,rp(11) !r[11]=c3; + addx c_2,%g0,c_2 != + + umul a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx %g0,%g0,c_3 + umul a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + st c_1,rp(12) !r[12]=c1; + addx c_3,%g0,c_3 != + + umul a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx %g0,%g0,c_1 + umul a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + st c_2,rp(13) !r[13]=c2; + + umul a_7,b_7,t_1 !=!mul_add_c(a[7],b[7],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + nop != + st c_3,rp(14) !r[14]=c3; + st c_1,rp(15) !r[15]=c1; + + ret + restore %g0,%g0,%o0 + +.type bn_mul_comba8,#function +.size bn_mul_comba8,(.-bn_mul_comba8) + +.align 32 + +.global bn_mul_comba4 +/* + * void bn_mul_comba4(r,a,b) + * BN_ULONG *r,*a,*b; + */ +bn_mul_comba4: + save %sp,FRAME_SIZE,%sp + ld ap(0),a_0 + ld bp(0),b_0 + umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3); + ld bp(1),b_1 + rd %y,c_2 + st c_1,rp(0) !r[0]=c1; + + umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1); + ld ap(1),a_1 + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc %g0,t_2,c_3 + addx %g0,%g0,c_1 + ld ap(2),a_2 + umul a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + st c_2,rp(1) !r[1]=c2; + + umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + ld bp(2),b_2 + umul a_1,b_1,t_1 !=!mul_add_c(a[1],b[1],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + ld bp(3),b_3 + umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + st c_3,rp(2) !r[2]=c3; + + umul a_0,b_3,t_1 !=!mul_add_c(a[0],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 != + umul a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + ld ap(3),a_3 + umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(3) !r[3]=c1; + + umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + umul a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_1,b_3,t_1 !=!mul_add_c(a[1],b[3],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + st c_2,rp(4) !r[4]=c2; + + umul a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + st c_3,rp(5) !r[5]=c3; + addx c_2,%g0,c_2 != + + umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + st c_1,rp(6) !r[6]=c1; + st c_2,rp(7) !r[7]=c2; + + ret + restore %g0,%g0,%o0 + +.type bn_mul_comba4,#function +.size bn_mul_comba4,(.-bn_mul_comba4) + +.align 32 + +.global bn_sqr_comba8 +bn_sqr_comba8: + save %sp,FRAME_SIZE,%sp + ld ap(0),a_0 + ld ap(1),a_1 + umul a_0,a_0,c_1 !=!sqr_add_c(a,0,c1,c2,c3); + rd %y,c_2 + st c_1,rp(0) !r[0]=c1; + + ld ap(2),a_2 + umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc %g0,t_2,c_3 + addx %g0,%g0,c_1 != + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 + st c_2,rp(1) !r[1]=c2; + addx c_1,%g0,c_1 != + + umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx %g0,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + ld ap(3),a_3 + umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + st c_3,rp(2) !r[2]=c3; + + umul a_0,a_3,t_1 !=!sqr_add_c2(a,3,0,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 != + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + ld ap(4),a_4 + addx c_3,%g0,c_3 != + umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(3) !r[3]=c1; + + umul a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + ld ap(5),a_5 + umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + st c_2,rp(4) !r[4]=c2; + addx c_1,%g0,c_1 != + + umul a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx %g0,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + umul a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + ld ap(6),a_6 + umul a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + st c_3,rp(5) !r[5]=c3; + + umul a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 + addcc c_1,t_1,c_1 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + ld ap(7),a_7 + umul a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(6) !r[6]=c1; + + umul a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + st c_2,rp(7) !r[7]=c2; + + umul a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + addcc c_3,t_1,c_3 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + addcc c_3,t_1,c_3 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + addcc c_3,t_1,c_3 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + st c_3,rp(8) !r[8]=c3; + addx c_2,%g0,c_2 != + + umul a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx %g0,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(9) !r[9]=c1; + + umul a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + st c_2,rp(10) !r[10]=c2; + + umul a_4,a_7,t_1 !=!sqr_add_c2(a,7,4,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 != + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_5,a_6,t_1 !=!sqr_add_c2(a,6,5,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + st c_3,rp(11) !r[11]=c3; + addx c_2,%g0,c_2 != + + umul a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx %g0,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + st c_1,rp(12) !r[12]=c1; + + umul a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 != + addxcc c_3,t_2,c_3 + st c_2,rp(13) !r[13]=c2; + addx c_1,%g0,c_1 != + + umul a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + st c_3,rp(14) !r[14]=c3; + st c_1,rp(15) !r[15]=c1; + + ret + restore %g0,%g0,%o0 + +.type bn_sqr_comba8,#function +.size bn_sqr_comba8,(.-bn_sqr_comba8) + +.align 32 + +.global bn_sqr_comba4 +/* + * void bn_sqr_comba4(r,a) + * BN_ULONG *r,*a; + */ +bn_sqr_comba4: + save %sp,FRAME_SIZE,%sp + ld ap(0),a_0 + umul a_0,a_0,c_1 !sqr_add_c(a,0,c1,c2,c3); + ld ap(1),a_1 != + rd %y,c_2 + st c_1,rp(0) !r[0]=c1; + + ld ap(2),a_2 + umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc %g0,t_2,c_3 + addx %g0,%g0,c_1 != + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + st c_2,rp(1) !r[1]=c2; + + umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + ld ap(3),a_3 + umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + st c_3,rp(2) !r[2]=c3; + addx c_2,%g0,c_2 != + + umul a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx %g0,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(3) !r[3]=c1; + + umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + st c_2,rp(4) !r[4]=c2; + + umul a_2,a_3,t_1 !=!sqr_add_c2(a,3,2,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 != + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + st c_3,rp(5) !r[5]=c3; + addx c_2,%g0,c_2 != + + umul a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + st c_1,rp(6) !r[6]=c1; + st c_2,rp(7) !r[7]=c2; + + ret + restore %g0,%g0,%o0 + +.type bn_sqr_comba4,#function +.size bn_sqr_comba4,(.-bn_sqr_comba4) + +.align 32 diff --git a/openssl/crypto/bn/asm/sparcv8plus.S b/openssl/crypto/bn/asm/sparcv8plus.S new file mode 100644 index 000000000..8c56e2e7e --- /dev/null +++ b/openssl/crypto/bn/asm/sparcv8plus.S @@ -0,0 +1,1547 @@ +.ident "sparcv8plus.s, Version 1.4" +.ident "SPARC v9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>" + +/* + * ==================================================================== + * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL + * project. + * + * Rights for redistribution and usage in source and binary forms are + * granted according to the OpenSSL license. Warranty of any kind is + * disclaimed. + * ==================================================================== + */ + +/* + * This is my modest contributon to OpenSSL project (see + * http://www.openssl.org/ for more information about it) and is + * a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c + * module. For updates see http://fy.chalmers.se/~appro/hpe/. + * + * Questions-n-answers. + * + * Q. How to compile? + * A. With SC4.x/SC5.x: + * + * cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o + * + * and with gcc: + * + * gcc -mcpu=ultrasparc -c bn_asm.sparc.v8plus.S -o bn_asm.o + * + * or if above fails (it does if you have gas installed): + * + * gcc -E bn_asm.sparc.v8plus.S | as -xarch=v8plus /dev/fd/0 -o bn_asm.o + * + * Quick-n-dirty way to fuse the module into the library. + * Provided that the library is already configured and built + * (in 0.9.2 case with no-asm option): + * + * # cd crypto/bn + * # cp /some/place/bn_asm.sparc.v8plus.S . + * # cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o + * # make + * # cd ../.. + * # make; make test + * + * Quick-n-dirty way to get rid of it: + * + * # cd crypto/bn + * # touch bn_asm.c + * # make + * # cd ../.. + * # make; make test + * + * Q. V8plus achitecture? What kind of beast is that? + * A. Well, it's rather a programming model than an architecture... + * It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under + * special conditions, namely when kernel doesn't preserve upper + * 32 bits of otherwise 64-bit registers during a context switch. + * + * Q. Why just UltraSPARC? What about SuperSPARC? + * A. Original release did target UltraSPARC only. Now SuperSPARC + * version is provided along. Both version share bn_*comba[48] + * implementations (see comment later in code for explanation). + * But what's so special about this UltraSPARC implementation? + * Why didn't I let compiler do the job? Trouble is that most of + * available compilers (well, SC5.0 is the only exception) don't + * attempt to take advantage of UltraSPARC's 64-bitness under + * 32-bit kernels even though it's perfectly possible (see next + * question). + * + * Q. 64-bit registers under 32-bit kernels? Didn't you just say it + * doesn't work? + * A. You can't adress *all* registers as 64-bit wide:-( The catch is + * that you actually may rely upon %o0-%o5 and %g1-%g4 being fully + * preserved if you're in a leaf function, i.e. such never calling + * any other functions. All functions in this module are leaf and + * 10 registers is a handful. And as a matter of fact none-"comba" + * routines don't require even that much and I could even afford to + * not allocate own stack frame for 'em:-) + * + * Q. What about 64-bit kernels? + * A. What about 'em? Just kidding:-) Pure 64-bit version is currently + * under evaluation and development... + * + * Q. What about shared libraries? + * A. What about 'em? Kidding again:-) Code does *not* contain any + * code position dependencies and it's safe to include it into + * shared library as is. + * + * Q. How much faster does it go? + * A. Do you have a good benchmark? In either case below is what I + * experience with crypto/bn/expspeed.c test program: + * + * v8plus module on U10/300MHz against bn_asm.c compiled with: + * + * cc-5.0 -xarch=v8plus -xO5 -xdepend +7-12% + * cc-4.2 -xarch=v8plus -xO5 -xdepend +25-35% + * egcs-1.1.2 -mcpu=ultrasparc -O3 +35-45% + * + * v8 module on SS10/60MHz against bn_asm.c compiled with: + * + * cc-5.0 -xarch=v8 -xO5 -xdepend +7-10% + * cc-4.2 -xarch=v8 -xO5 -xdepend +10% + * egcs-1.1.2 -mv8 -O3 +35-45% + * + * As you can see it's damn hard to beat the new Sun C compiler + * and it's in first place GNU C users who will appreciate this + * assembler implementation:-) + */ + +/* + * Revision history. + * + * 1.0 - initial release; + * 1.1 - new loop unrolling model(*); + * - some more fine tuning; + * 1.2 - made gas friendly; + * - updates to documentation concerning v9; + * - new performance comparison matrix; + * 1.3 - fixed problem with /usr/ccs/lib/cpp; + * 1.4 - native V9 bn_*_comba[48] implementation (15% more efficient) + * resulting in slight overall performance kick; + * - some retunes; + * - support for GNU as added; + * + * (*) Originally unrolled loop looked like this: + * for (;;) { + * op(p+0); if (--n==0) break; + * op(p+1); if (--n==0) break; + * op(p+2); if (--n==0) break; + * op(p+3); if (--n==0) break; + * p+=4; + * } + * I unroll according to following: + * while (n&~3) { + * op(p+0); op(p+1); op(p+2); op(p+3); + * p+=4; n=-4; + * } + * if (n) { + * op(p+0); if (--n==0) return; + * op(p+2); if (--n==0) return; + * op(p+3); return; + * } + */ + +/* + * GNU assembler can't stand stuw:-( + */ +#define stuw st + +.section ".text",#alloc,#execinstr +.file "bn_asm.sparc.v8plus.S" + +.align 32 + +.global bn_mul_add_words +/* + * BN_ULONG bn_mul_add_words(rp,ap,num,w) + * BN_ULONG *rp,*ap; + * int num; + * BN_ULONG w; + */ +bn_mul_add_words: + sra %o2,%g0,%o2 ! signx %o2 + brgz,a %o2,.L_bn_mul_add_words_proceed + lduw [%o1],%g2 + retl + clr %o0 + nop + nop + nop + +.L_bn_mul_add_words_proceed: + srl %o3,%g0,%o3 ! clruw %o3 + andcc %o2,-4,%g0 + bz,pn %icc,.L_bn_mul_add_words_tail + clr %o5 + +.L_bn_mul_add_words_loop: ! wow! 32 aligned! + lduw [%o0],%g1 + lduw [%o1+4],%g3 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + nop + add %o4,%g2,%o4 + stuw %o4,[%o0] + srlx %o4,32,%o5 + + lduw [%o0+4],%g1 + lduw [%o1+8],%g2 + mulx %o3,%g3,%g3 + add %g1,%o5,%o4 + dec 4,%o2 + add %o4,%g3,%o4 + stuw %o4,[%o0+4] + srlx %o4,32,%o5 + + lduw [%o0+8],%g1 + lduw [%o1+12],%g3 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + inc 16,%o1 + add %o4,%g2,%o4 + stuw %o4,[%o0+8] + srlx %o4,32,%o5 + + lduw [%o0+12],%g1 + mulx %o3,%g3,%g3 + add %g1,%o5,%o4 + inc 16,%o0 + add %o4,%g3,%o4 + andcc %o2,-4,%g0 + stuw %o4,[%o0-4] + srlx %o4,32,%o5 + bnz,a,pt %icc,.L_bn_mul_add_words_loop + lduw [%o1],%g2 + + brnz,a,pn %o2,.L_bn_mul_add_words_tail + lduw [%o1],%g2 +.L_bn_mul_add_words_return: + retl + mov %o5,%o0 + +.L_bn_mul_add_words_tail: + lduw [%o0],%g1 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + dec %o2 + add %o4,%g2,%o4 + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_mul_add_words_return + stuw %o4,[%o0] + + lduw [%o1+4],%g2 + lduw [%o0+4],%g1 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + dec %o2 + add %o4,%g2,%o4 + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_mul_add_words_return + stuw %o4,[%o0+4] + + lduw [%o1+8],%g2 + lduw [%o0+8],%g1 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + add %o4,%g2,%o4 + stuw %o4,[%o0+8] + retl + srlx %o4,32,%o0 + +.type bn_mul_add_words,#function +.size bn_mul_add_words,(.-bn_mul_add_words) + +.align 32 + +.global bn_mul_words +/* + * BN_ULONG bn_mul_words(rp,ap,num,w) + * BN_ULONG *rp,*ap; + * int num; + * BN_ULONG w; + */ +bn_mul_words: + sra %o2,%g0,%o2 ! signx %o2 + brgz,a %o2,.L_bn_mul_words_proceeed + lduw [%o1],%g2 + retl + clr %o0 + nop + nop + nop + +.L_bn_mul_words_proceeed: + srl %o3,%g0,%o3 ! clruw %o3 + andcc %o2,-4,%g0 + bz,pn %icc,.L_bn_mul_words_tail + clr %o5 + +.L_bn_mul_words_loop: ! wow! 32 aligned! + lduw [%o1+4],%g3 + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + nop + stuw %o4,[%o0] + srlx %o4,32,%o5 + + lduw [%o1+8],%g2 + mulx %o3,%g3,%g3 + add %g3,%o5,%o4 + dec 4,%o2 + stuw %o4,[%o0+4] + srlx %o4,32,%o5 + + lduw [%o1+12],%g3 + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + inc 16,%o1 + stuw %o4,[%o0+8] + srlx %o4,32,%o5 + + mulx %o3,%g3,%g3 + add %g3,%o5,%o4 + inc 16,%o0 + stuw %o4,[%o0-4] + srlx %o4,32,%o5 + andcc %o2,-4,%g0 + bnz,a,pt %icc,.L_bn_mul_words_loop + lduw [%o1],%g2 + nop + nop + + brnz,a,pn %o2,.L_bn_mul_words_tail + lduw [%o1],%g2 +.L_bn_mul_words_return: + retl + mov %o5,%o0 + +.L_bn_mul_words_tail: + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + dec %o2 + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_mul_words_return + stuw %o4,[%o0] + + lduw [%o1+4],%g2 + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + dec %o2 + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_mul_words_return + stuw %o4,[%o0+4] + + lduw [%o1+8],%g2 + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + stuw %o4,[%o0+8] + retl + srlx %o4,32,%o0 + +.type bn_mul_words,#function +.size bn_mul_words,(.-bn_mul_words) + +.align 32 +.global bn_sqr_words +/* + * void bn_sqr_words(r,a,n) + * BN_ULONG *r,*a; + * int n; + */ +bn_sqr_words: + sra %o2,%g0,%o2 ! signx %o2 + brgz,a %o2,.L_bn_sqr_words_proceeed + lduw [%o1],%g2 + retl + clr %o0 + nop + nop + nop + +.L_bn_sqr_words_proceeed: + andcc %o2,-4,%g0 + nop + bz,pn %icc,.L_bn_sqr_words_tail + nop + +.L_bn_sqr_words_loop: ! wow! 32 aligned! + lduw [%o1+4],%g3 + mulx %g2,%g2,%o4 + stuw %o4,[%o0] + srlx %o4,32,%o5 + stuw %o5,[%o0+4] + nop + + lduw [%o1+8],%g2 + mulx %g3,%g3,%o4 + dec 4,%o2 + stuw %o4,[%o0+8] + srlx %o4,32,%o5 + stuw %o5,[%o0+12] + + lduw [%o1+12],%g3 + mulx %g2,%g2,%o4 + srlx %o4,32,%o5 + stuw %o4,[%o0+16] + inc 16,%o1 + stuw %o5,[%o0+20] + + mulx %g3,%g3,%o4 + inc 32,%o0 + stuw %o4,[%o0-8] + srlx %o4,32,%o5 + andcc %o2,-4,%g2 + stuw %o5,[%o0-4] + bnz,a,pt %icc,.L_bn_sqr_words_loop + lduw [%o1],%g2 + nop + + brnz,a,pn %o2,.L_bn_sqr_words_tail + lduw [%o1],%g2 +.L_bn_sqr_words_return: + retl + clr %o0 + +.L_bn_sqr_words_tail: + mulx %g2,%g2,%o4 + dec %o2 + stuw %o4,[%o0] + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_sqr_words_return + stuw %o5,[%o0+4] + + lduw [%o1+4],%g2 + mulx %g2,%g2,%o4 + dec %o2 + stuw %o4,[%o0+8] + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_sqr_words_return + stuw %o5,[%o0+12] + + lduw [%o1+8],%g2 + mulx %g2,%g2,%o4 + srlx %o4,32,%o5 + stuw %o4,[%o0+16] + stuw %o5,[%o0+20] + retl + clr %o0 + +.type bn_sqr_words,#function +.size bn_sqr_words,(.-bn_sqr_words) + +.align 32 +.global bn_div_words +/* + * BN_ULONG bn_div_words(h,l,d) + * BN_ULONG h,l,d; + */ +bn_div_words: + sllx %o0,32,%o0 + or %o0,%o1,%o0 + udivx %o0,%o2,%o0 + retl + srl %o0,%g0,%o0 ! clruw %o0 + +.type bn_div_words,#function +.size bn_div_words,(.-bn_div_words) + +.align 32 + +.global bn_add_words +/* + * BN_ULONG bn_add_words(rp,ap,bp,n) + * BN_ULONG *rp,*ap,*bp; + * int n; + */ +bn_add_words: + sra %o3,%g0,%o3 ! signx %o3 + brgz,a %o3,.L_bn_add_words_proceed + lduw [%o1],%o4 + retl + clr %o0 + +.L_bn_add_words_proceed: + andcc %o3,-4,%g0 + bz,pn %icc,.L_bn_add_words_tail + addcc %g0,0,%g0 ! clear carry flag + +.L_bn_add_words_loop: ! wow! 32 aligned! + dec 4,%o3 + lduw [%o2],%o5 + lduw [%o1+4],%g1 + lduw [%o2+4],%g2 + lduw [%o1+8],%g3 + lduw [%o2+8],%g4 + addccc %o5,%o4,%o5 + stuw %o5,[%o0] + + lduw [%o1+12],%o4 + lduw [%o2+12],%o5 + inc 16,%o1 + addccc %g1,%g2,%g1 + stuw %g1,[%o0+4] + + inc 16,%o2 + addccc %g3,%g4,%g3 + stuw %g3,[%o0+8] + + inc 16,%o0 + addccc %o5,%o4,%o5 + stuw %o5,[%o0-4] + and %o3,-4,%g1 + brnz,a,pt %g1,.L_bn_add_words_loop + lduw [%o1],%o4 + + brnz,a,pn %o3,.L_bn_add_words_tail + lduw [%o1],%o4 +.L_bn_add_words_return: + clr %o0 + retl + movcs %icc,1,%o0 + nop + +.L_bn_add_words_tail: + lduw [%o2],%o5 + dec %o3 + addccc %o5,%o4,%o5 + brz,pt %o3,.L_bn_add_words_return + stuw %o5,[%o0] + + lduw [%o1+4],%o4 + lduw [%o2+4],%o5 + dec %o3 + addccc %o5,%o4,%o5 + brz,pt %o3,.L_bn_add_words_return + stuw %o5,[%o0+4] + + lduw [%o1+8],%o4 + lduw [%o2+8],%o5 + addccc %o5,%o4,%o5 + stuw %o5,[%o0+8] + clr %o0 + retl + movcs %icc,1,%o0 + +.type bn_add_words,#function +.size bn_add_words,(.-bn_add_words) + +.global bn_sub_words +/* + * BN_ULONG bn_sub_words(rp,ap,bp,n) + * BN_ULONG *rp,*ap,*bp; + * int n; + */ +bn_sub_words: + sra %o3,%g0,%o3 ! signx %o3 + brgz,a %o3,.L_bn_sub_words_proceed + lduw [%o1],%o4 + retl + clr %o0 + +.L_bn_sub_words_proceed: + andcc %o3,-4,%g0 + bz,pn %icc,.L_bn_sub_words_tail + addcc %g0,0,%g0 ! clear carry flag + +.L_bn_sub_words_loop: ! wow! 32 aligned! + dec 4,%o3 + lduw [%o2],%o5 + lduw [%o1+4],%g1 + lduw [%o2+4],%g2 + lduw [%o1+8],%g3 + lduw [%o2+8],%g4 + subccc %o4,%o5,%o5 + stuw %o5,[%o0] + + lduw [%o1+12],%o4 + lduw [%o2+12],%o5 + inc 16,%o1 + subccc %g1,%g2,%g2 + stuw %g2,[%o0+4] + + inc 16,%o2 + subccc %g3,%g4,%g4 + stuw %g4,[%o0+8] + + inc 16,%o0 + subccc %o4,%o5,%o5 + stuw %o5,[%o0-4] + and %o3,-4,%g1 + brnz,a,pt %g1,.L_bn_sub_words_loop + lduw [%o1],%o4 + + brnz,a,pn %o3,.L_bn_sub_words_tail + lduw [%o1],%o4 +.L_bn_sub_words_return: + clr %o0 + retl + movcs %icc,1,%o0 + nop + +.L_bn_sub_words_tail: ! wow! 32 aligned! + lduw [%o2],%o5 + dec %o3 + subccc %o4,%o5,%o5 + brz,pt %o3,.L_bn_sub_words_return + stuw %o5,[%o0] + + lduw [%o1+4],%o4 + lduw [%o2+4],%o5 + dec %o3 + subccc %o4,%o5,%o5 + brz,pt %o3,.L_bn_sub_words_return + stuw %o5,[%o0+4] + + lduw [%o1+8],%o4 + lduw [%o2+8],%o5 + subccc %o4,%o5,%o5 + stuw %o5,[%o0+8] + clr %o0 + retl + movcs %icc,1,%o0 + +.type bn_sub_words,#function +.size bn_sub_words,(.-bn_sub_words) + +/* + * Code below depends on the fact that upper parts of the %l0-%l7 + * and %i0-%i7 are zeroed by kernel after context switch. In + * previous versions this comment stated that "the trouble is that + * it's not feasible to implement the mumbo-jumbo in less V9 + * instructions:-(" which apparently isn't true thanks to + * 'bcs,a %xcc,.+8; inc %rd' pair. But the performance improvement + * results not from the shorter code, but from elimination of + * multicycle none-pairable 'rd %y,%rd' instructions. + * + * Andy. + */ + +#define FRAME_SIZE -96 + +/* + * Here is register usage map for *all* routines below. + */ +#define t_1 %o0 +#define t_2 %o1 +#define c_12 %o2 +#define c_3 %o3 + +#define ap(I) [%i1+4*I] +#define bp(I) [%i2+4*I] +#define rp(I) [%i0+4*I] + +#define a_0 %l0 +#define a_1 %l1 +#define a_2 %l2 +#define a_3 %l3 +#define a_4 %l4 +#define a_5 %l5 +#define a_6 %l6 +#define a_7 %l7 + +#define b_0 %i3 +#define b_1 %i4 +#define b_2 %i5 +#define b_3 %o4 +#define b_4 %o5 +#define b_5 %o7 +#define b_6 %g1 +#define b_7 %g4 + +.align 32 +.global bn_mul_comba8 +/* + * void bn_mul_comba8(r,a,b) + * BN_ULONG *r,*a,*b; + */ +bn_mul_comba8: + save %sp,FRAME_SIZE,%sp + mov 1,t_2 + lduw ap(0),a_0 + sllx t_2,32,t_2 + lduw bp(0),b_0 != + lduw bp(1),b_1 + mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3); + srlx t_1,32,c_12 + stuw t_1,rp(0) !=!r[0]=c1; + + lduw ap(1),a_1 + mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(2),a_2 + mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(1) !r[1]=c2; + or c_12,c_3,c_12 + + mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2); + addcc c_12,t_1,c_12 != + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw bp(2),b_2 != + mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + lduw bp(3),b_3 + mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(2) !r[2]=c3; + or c_12,c_3,c_12 != + + mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + lduw ap(3),a_3 + mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3); + addcc c_12,t_1,c_12 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(4),a_4 + mulx a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);!= + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(3) !r[3]=c1; + or c_12,c_3,c_12 + + mulx a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1); + addcc c_12,t_1,c_12 != + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,b_1,t_1 !=!mul_add_c(a[3],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw bp(4),b_4 != + mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + lduw bp(5),b_5 + mulx a_0,b_4,t_1 !mul_add_c(a[0],b[4],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(4) !r[4]=c2; + or c_12,c_3,c_12 != + + mulx a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + lduw ap(5),a_5 + mulx a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2); + addcc c_12,t_1,c_12 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(6),a_6 + mulx a_5,b_0,t_1 !=!mul_add_c(a[5],b[0],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(5) !r[5]=c3; + or c_12,c_3,c_12 + + mulx a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3); + addcc c_12,t_1,c_12 != + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_4,b_2,t_1 !=!mul_add_c(a[4],b[2],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,b_3,t_1 !=!mul_add_c(a[3],b[3],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,b_4,t_1 !=!mul_add_c(a[2],b[4],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw bp(6),b_6 != + mulx a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + lduw bp(7),b_7 + mulx a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(6) !r[6]=c1; + or c_12,c_3,c_12 != + + mulx a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_3,b_4,t_1 !mul_add_c(a[3],b[4],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + lduw ap(7),a_7 + mulx a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_7,b_0,t_1 !=!mul_add_c(a[7],b[0],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(7) !r[7]=c2; + or c_12,c_3,c_12 + + mulx a_7,b_1,t_1 !=!mul_add_c(a[7],b[1],c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_6,b_2,t_1 !mul_add_c(a[6],b[2],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_2,b_6,t_1 !mul_add_c(a[2],b[6],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + srlx t_1,32,c_12 + stuw t_1,rp(8) !r[8]=c3; + or c_12,c_3,c_12 + + mulx a_2,b_7,t_1 !=!mul_add_c(a[2],b[7],c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_3,b_6,t_1 !mul_add_c(a[3],b[6],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_7,b_2,t_1 !mul_add_c(a[7],b[2],c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(9) !r[9]=c1; + or c_12,c_3,c_12 != + + mulx a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_5,t_1 !mul_add_c(a[5],b[5],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(10) !r[10]=c2; + or c_12,c_3,c_12 != + + mulx a_4,b_7,t_1 !mul_add_c(a[4],b[7],c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(11) !r[11]=c3; + or c_12,c_3,c_12 != + + mulx a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(12) !r[12]=c1; + or c_12,c_3,c_12 != + + mulx a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + st t_1,rp(13) !r[13]=c2; + or c_12,c_3,c_12 != + + mulx a_7,b_7,t_1 !mul_add_c(a[7],b[7],c3,c1,c2); + addcc c_12,t_1,t_1 + srlx t_1,32,c_12 != + stuw t_1,rp(14) !r[14]=c3; + stuw c_12,rp(15) !r[15]=c1; + + ret + restore %g0,%g0,%o0 != + +.type bn_mul_comba8,#function +.size bn_mul_comba8,(.-bn_mul_comba8) + +.align 32 + +.global bn_mul_comba4 +/* + * void bn_mul_comba4(r,a,b) + * BN_ULONG *r,*a,*b; + */ +bn_mul_comba4: + save %sp,FRAME_SIZE,%sp + lduw ap(0),a_0 + mov 1,t_2 + lduw bp(0),b_0 + sllx t_2,32,t_2 != + lduw bp(1),b_1 + mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3); + srlx t_1,32,c_12 + stuw t_1,rp(0) !=!r[0]=c1; + + lduw ap(1),a_1 + mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(2),a_2 + mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(1) !r[1]=c2; + or c_12,c_3,c_12 + + mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2); + addcc c_12,t_1,c_12 != + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw bp(2),b_2 != + mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + lduw bp(3),b_3 + mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(2) !r[2]=c3; + or c_12,c_3,c_12 != + + mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + lduw ap(3),a_3 + mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3); + addcc c_12,t_1,c_12 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!= + addcc c_12,t_1,t_1 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(3) !=!r[3]=c1; + or c_12,c_3,c_12 + + mulx a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1); + addcc c_12,t_1,c_12 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1); + addcc c_12,t_1,t_1 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(4) !=!r[4]=c2; + or c_12,c_3,c_12 + + mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2); + addcc c_12,t_1,t_1 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(5) !=!r[5]=c3; + or c_12,c_3,c_12 + + mulx a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3); + addcc c_12,t_1,t_1 + srlx t_1,32,c_12 != + stuw t_1,rp(6) !r[6]=c1; + stuw c_12,rp(7) !r[7]=c2; + + ret + restore %g0,%g0,%o0 + +.type bn_mul_comba4,#function +.size bn_mul_comba4,(.-bn_mul_comba4) + +.align 32 + +.global bn_sqr_comba8 +bn_sqr_comba8: + save %sp,FRAME_SIZE,%sp + mov 1,t_2 + lduw ap(0),a_0 + sllx t_2,32,t_2 + lduw ap(1),a_1 + mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3); + srlx t_1,32,c_12 + stuw t_1,rp(0) !r[0]=c1; + + lduw ap(2),a_2 + mulx a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(1) !r[1]=c2; + or c_12,c_3,c_12 + + mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(3),a_3 + mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(2) !r[2]=c3; + or c_12,c_3,c_12 + + mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(4),a_4 + mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + st t_1,rp(3) !r[3]=c1; + or c_12,c_3,c_12 + + mulx a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(5),a_5 + mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(4) !r[4]=c2; + or c_12,c_3,c_12 + + mulx a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(6),a_6 + mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(5) !r[5]=c3; + or c_12,c_3,c_12 + + mulx a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(7),a_7 + mulx a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(6) !r[6]=c1; + or c_12,c_3,c_12 + + mulx a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(7) !r[7]=c2; + or c_12,c_3,c_12 + + mulx a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(8) !r[8]=c3; + or c_12,c_3,c_12 + + mulx a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(9) !r[9]=c1; + or c_12,c_3,c_12 + + mulx a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(10) !r[10]=c2; + or c_12,c_3,c_12 + + mulx a_4,a_7,t_1 !sqr_add_c2(a,7,4,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,a_6,t_1 !sqr_add_c2(a,6,5,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(11) !r[11]=c3; + or c_12,c_3,c_12 + + mulx a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(12) !r[12]=c1; + or c_12,c_3,c_12 + + mulx a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(13) !r[13]=c2; + or c_12,c_3,c_12 + + mulx a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2); + addcc c_12,t_1,t_1 + srlx t_1,32,c_12 + stuw t_1,rp(14) !r[14]=c3; + stuw c_12,rp(15) !r[15]=c1; + + ret + restore %g0,%g0,%o0 + +.type bn_sqr_comba8,#function +.size bn_sqr_comba8,(.-bn_sqr_comba8) + +.align 32 + +.global bn_sqr_comba4 +/* + * void bn_sqr_comba4(r,a) + * BN_ULONG *r,*a; + */ +bn_sqr_comba4: + save %sp,FRAME_SIZE,%sp + mov 1,t_2 + lduw ap(0),a_0 + sllx t_2,32,t_2 + lduw ap(1),a_1 + mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3); + srlx t_1,32,c_12 + stuw t_1,rp(0) !r[0]=c1; + + lduw ap(2),a_2 + mulx a_0,a_1,t_1 !sqr_add_c2(a,1,0,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(1) !r[1]=c2; + or c_12,c_3,c_12 + + mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(3),a_3 + mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(2) !r[2]=c3; + or c_12,c_3,c_12 + + mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(3) !r[3]=c1; + or c_12,c_3,c_12 + + mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(4) !r[4]=c2; + or c_12,c_3,c_12 + + mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(5) !r[5]=c3; + or c_12,c_3,c_12 + + mulx a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3); + addcc c_12,t_1,t_1 + srlx t_1,32,c_12 + stuw t_1,rp(6) !r[6]=c1; + stuw c_12,rp(7) !r[7]=c2; + + ret + restore %g0,%g0,%o0 + +.type bn_sqr_comba4,#function +.size bn_sqr_comba4,(.-bn_sqr_comba4) + +.align 32 diff --git a/openssl/crypto/bn/asm/sparcv9-mont.pl b/openssl/crypto/bn/asm/sparcv9-mont.pl new file mode 100644 index 000000000..b8fb1e8a2 --- /dev/null +++ b/openssl/crypto/bn/asm/sparcv9-mont.pl @@ -0,0 +1,606 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# December 2005 +# +# Pure SPARCv9/8+ and IALU-only bn_mul_mont implementation. The reasons +# for undertaken effort are multiple. First of all, UltraSPARC is not +# the whole SPARCv9 universe and other VIS-free implementations deserve +# optimized code as much. Secondly, newly introduced UltraSPARC T1, +# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes, +# such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with +# several integrated RSA/DSA accelerator circuits accessible through +# kernel driver [only(*)], but having decent user-land software +# implementation is important too. Finally, reasons like desire to +# experiment with dedicated squaring procedure. Yes, this module +# implements one, because it was easiest to draft it in SPARCv9 +# instructions... + +# (*) Engine accessing the driver in question is on my TODO list. +# For reference, acceleator is estimated to give 6 to 10 times +# improvement on single-threaded RSA sign. It should be noted +# that 6-10x improvement coefficient does not actually mean +# something extraordinary in terms of absolute [single-threaded] +# performance, as SPARCv9 instruction set is by all means least +# suitable for high performance crypto among other 64 bit +# platforms. 6-10x factor simply places T1 in same performance +# domain as say AMD64 and IA-64. Improvement of RSA verify don't +# appear impressive at all, but it's the sign operation which is +# far more critical/interesting. + +# You might notice that inner loops are modulo-scheduled:-) This has +# essentially negligible impact on UltraSPARC performance, it's +# Fujitsu SPARC64 V users who should notice and hopefully appreciate +# the advantage... Currently this module surpasses sparcv9a-mont.pl +# by ~20% on UltraSPARC-III and later cores, but recall that sparcv9a +# module still have hidden potential [see TODO list there], which is +# estimated to be larger than 20%... + +# int bn_mul_mont( +$rp="%i0"; # BN_ULONG *rp, +$ap="%i1"; # const BN_ULONG *ap, +$bp="%i2"; # const BN_ULONG *bp, +$np="%i3"; # const BN_ULONG *np, +$n0="%i4"; # const BN_ULONG *n0, +$num="%i5"; # int num); + +$bits=32; +for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); } +if ($bits==64) { $bias=2047; $frame=192; } +else { $bias=0; $frame=128; } + +$car0="%o0"; +$car1="%o1"; +$car2="%o2"; # 1 bit +$acc0="%o3"; +$acc1="%o4"; +$mask="%g1"; # 32 bits, what a waste... +$tmp0="%g4"; +$tmp1="%g5"; + +$i="%l0"; +$j="%l1"; +$mul0="%l2"; +$mul1="%l3"; +$tp="%l4"; +$apj="%l5"; +$npj="%l6"; +$tpj="%l7"; + +$fname="bn_mul_mont_int"; + +$code=<<___; +.section ".text",#alloc,#execinstr + +.global $fname +.align 32 +$fname: + cmp %o5,4 ! 128 bits minimum + bge,pt %icc,.Lenter + sethi %hi(0xffffffff),$mask + retl + clr %o0 +.align 32 +.Lenter: + save %sp,-$frame,%sp + sll $num,2,$num ! num*=4 + or $mask,%lo(0xffffffff),$mask + ld [$n0],$n0 + cmp $ap,$bp + and $num,$mask,$num + ld [$bp],$mul0 ! bp[0] + nop + + add %sp,$bias,%o7 ! real top of stack + ld [$ap],$car0 ! ap[0] ! redundant in squaring context + sub %o7,$num,%o7 + ld [$ap+4],$apj ! ap[1] + and %o7,-1024,%o7 + ld [$np],$car1 ! np[0] + sub %o7,$bias,%sp ! alloca + ld [$np+4],$npj ! np[1] + be,pt `$bits==32?"%icc":"%xcc"`,.Lbn_sqr_mont + mov 12,$j + + mulx $car0,$mul0,$car0 ! ap[0]*bp[0] + mulx $apj,$mul0,$tmp0 !prologue! ap[1]*bp[0] + and $car0,$mask,$acc0 + add %sp,$bias+$frame,$tp + ld [$ap+8],$apj !prologue! + + mulx $n0,$acc0,$mul1 ! "t[0]"*n0 + and $mul1,$mask,$mul1 + + mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0 + mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + ld [$np+8],$npj !prologue! + srlx $car1,32,$car1 + mov $tmp0,$acc0 !prologue! + +.L1st: + mulx $apj,$mul0,$tmp0 + mulx $npj,$mul1,$tmp1 + add $acc0,$car0,$car0 + ld [$ap+$j],$apj ! ap[j] + and $car0,$mask,$acc0 + add $acc1,$car1,$car1 + ld [$np+$j],$npj ! np[j] + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + add $j,4,$j ! j++ + mov $tmp0,$acc0 + st $car1,[$tp] + cmp $j,$num + mov $tmp1,$acc1 + srlx $car1,32,$car1 + bl %icc,.L1st + add $tp,4,$tp ! tp++ +!.L1st + + mulx $apj,$mul0,$tmp0 !epilogue! + mulx $npj,$mul1,$tmp1 + add $acc0,$car0,$car0 + and $car0,$mask,$acc0 + add $acc1,$car1,$car1 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + st $car1,[$tp] + srlx $car1,32,$car1 + + add $tmp0,$car0,$car0 + and $car0,$mask,$acc0 + add $tmp1,$car1,$car1 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car1 + + add $car0,$car1,$car1 + st $car1,[$tp+8] + srlx $car1,32,$car2 + + mov 4,$i ! i++ + ld [$bp+4],$mul0 ! bp[1] +.Louter: + add %sp,$bias+$frame,$tp + ld [$ap],$car0 ! ap[0] + ld [$ap+4],$apj ! ap[1] + ld [$np],$car1 ! np[0] + ld [$np+4],$npj ! np[1] + ld [$tp],$tmp1 ! tp[0] + ld [$tp+4],$tpj ! tp[1] + mov 12,$j + + mulx $car0,$mul0,$car0 + mulx $apj,$mul0,$tmp0 !prologue! + add $tmp1,$car0,$car0 + ld [$ap+8],$apj !prologue! + and $car0,$mask,$acc0 + + mulx $n0,$acc0,$mul1 + and $mul1,$mask,$mul1 + + mulx $car1,$mul1,$car1 + mulx $npj,$mul1,$acc1 !prologue! + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + ld [$np+8],$npj !prologue! + srlx $car1,32,$car1 + mov $tmp0,$acc0 !prologue! + +.Linner: + mulx $apj,$mul0,$tmp0 + mulx $npj,$mul1,$tmp1 + add $tpj,$car0,$car0 + ld [$ap+$j],$apj ! ap[j] + add $acc0,$car0,$car0 + add $acc1,$car1,$car1 + ld [$np+$j],$npj ! np[j] + and $car0,$mask,$acc0 + ld [$tp+8],$tpj ! tp[j] + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + add $j,4,$j ! j++ + mov $tmp0,$acc0 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + mov $tmp1,$acc1 + cmp $j,$num + bl %icc,.Linner + add $tp,4,$tp ! tp++ +!.Linner + + mulx $apj,$mul0,$tmp0 !epilogue! + mulx $npj,$mul1,$tmp1 + add $tpj,$car0,$car0 + add $acc0,$car0,$car0 + ld [$tp+8],$tpj ! tp[j] + and $car0,$mask,$acc0 + add $acc1,$car1,$car1 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + + add $tpj,$car0,$car0 + add $tmp0,$car0,$car0 + and $car0,$mask,$acc0 + add $tmp1,$car1,$car1 + add $acc0,$car1,$car1 + st $car1,[$tp+4] ! tp[j-1] + srlx $car0,32,$car0 + add $i,4,$i ! i++ + srlx $car1,32,$car1 + + add $car0,$car1,$car1 + cmp $i,$num + add $car2,$car1,$car1 + st $car1,[$tp+8] + + srlx $car1,32,$car2 + bl,a %icc,.Louter + ld [$bp+$i],$mul0 ! bp[i] +!.Louter + + add $tp,12,$tp + +.Ltail: + add $np,$num,$np + add $rp,$num,$rp + mov $tp,$ap + sub %g0,$num,%o7 ! k=-num + ba .Lsub + subcc %g0,%g0,%g0 ! clear %icc.c +.align 16 +.Lsub: + ld [$tp+%o7],%o0 + ld [$np+%o7],%o1 + subccc %o0,%o1,%o1 ! tp[j]-np[j] + add $rp,%o7,$i + add %o7,4,%o7 + brnz %o7,.Lsub + st %o1,[$i] + subc $car2,0,$car2 ! handle upmost overflow bit + and $tp,$car2,$ap + andn $rp,$car2,$np + or $ap,$np,$ap + sub %g0,$num,%o7 + +.Lcopy: + ld [$ap+%o7],%o0 ! copy or in-place refresh + st %g0,[$tp+%o7] ! zap tp + st %o0,[$rp+%o7] + add %o7,4,%o7 + brnz %o7,.Lcopy + nop + mov 1,%i0 + ret + restore +___ + +######## +######## .Lbn_sqr_mont gives up to 20% *overall* improvement over +######## code without following dedicated squaring procedure. +######## +$sbit="%i2"; # re-use $bp! + +$code.=<<___; +.align 32 +.Lbn_sqr_mont: + mulx $mul0,$mul0,$car0 ! ap[0]*ap[0] + mulx $apj,$mul0,$tmp0 !prologue! + and $car0,$mask,$acc0 + add %sp,$bias+$frame,$tp + ld [$ap+8],$apj !prologue! + + mulx $n0,$acc0,$mul1 ! "t[0]"*n0 + srlx $car0,32,$car0 + and $mul1,$mask,$mul1 + + mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0 + mulx $npj,$mul1,$acc1 !prologue! + and $car0,1,$sbit + ld [$np+8],$npj !prologue! + srlx $car0,1,$car0 + add $acc0,$car1,$car1 + srlx $car1,32,$car1 + mov $tmp0,$acc0 !prologue! + +.Lsqr_1st: + mulx $apj,$mul0,$tmp0 + mulx $npj,$mul1,$tmp1 + add $acc0,$car0,$car0 ! ap[j]*a0+c0 + add $acc1,$car1,$car1 + ld [$ap+$j],$apj ! ap[j] + and $car0,$mask,$acc0 + ld [$np+$j],$npj ! np[j] + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + mov $tmp1,$acc1 + srlx $acc0,32,$sbit + add $j,4,$j ! j++ + and $acc0,$mask,$acc0 + cmp $j,$num + add $acc0,$car1,$car1 + st $car1,[$tp] + mov $tmp0,$acc0 + srlx $car1,32,$car1 + bl %icc,.Lsqr_1st + add $tp,4,$tp ! tp++ +!.Lsqr_1st + + mulx $apj,$mul0,$tmp0 ! epilogue + mulx $npj,$mul1,$tmp1 + add $acc0,$car0,$car0 ! ap[j]*a0+c0 + add $acc1,$car1,$car1 + and $car0,$mask,$acc0 + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + add $acc0,$car1,$car1 + st $car1,[$tp] + srlx $car1,32,$car1 + + add $tmp0,$car0,$car0 ! ap[j]*a0+c0 + add $tmp1,$car1,$car1 + and $car0,$mask,$acc0 + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + add $acc0,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car1 + + add $car0,$car0,$car0 + or $sbit,$car0,$car0 + add $car0,$car1,$car1 + st $car1,[$tp+8] + srlx $car1,32,$car2 + + ld [%sp+$bias+$frame],$tmp0 ! tp[0] + ld [%sp+$bias+$frame+4],$tmp1 ! tp[1] + ld [%sp+$bias+$frame+8],$tpj ! tp[2] + ld [$ap+4],$mul0 ! ap[1] + ld [$ap+8],$apj ! ap[2] + ld [$np],$car1 ! np[0] + ld [$np+4],$npj ! np[1] + mulx $n0,$tmp0,$mul1 + + mulx $mul0,$mul0,$car0 + and $mul1,$mask,$mul1 + + mulx $car1,$mul1,$car1 + mulx $npj,$mul1,$acc1 + add $tmp0,$car1,$car1 + and $car0,$mask,$acc0 + ld [$np+8],$npj ! np[2] + srlx $car1,32,$car1 + add $tmp1,$car1,$car1 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + and $car0,1,$sbit + add $acc1,$car1,$car1 + srlx $car0,1,$car0 + mov 12,$j + st $car1,[%sp+$bias+$frame] ! tp[0]= + srlx $car1,32,$car1 + add %sp,$bias+$frame+4,$tp + +.Lsqr_2nd: + mulx $apj,$mul0,$acc0 + mulx $npj,$mul1,$acc1 + add $acc0,$car0,$car0 + add $tpj,$car1,$car1 + ld [$ap+$j],$apj ! ap[j] + and $car0,$mask,$acc0 + ld [$np+$j],$npj ! np[j] + srlx $car0,32,$car0 + add $acc1,$car1,$car1 + ld [$tp+8],$tpj ! tp[j] + add $acc0,$acc0,$acc0 + add $j,4,$j ! j++ + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + cmp $j,$num + add $acc0,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + bl %icc,.Lsqr_2nd + add $tp,4,$tp ! tp++ +!.Lsqr_2nd + + mulx $apj,$mul0,$acc0 + mulx $npj,$mul1,$acc1 + add $acc0,$car0,$car0 + add $tpj,$car1,$car1 + and $car0,$mask,$acc0 + srlx $car0,32,$car0 + add $acc1,$car1,$car1 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + add $acc0,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + + add $car0,$car0,$car0 + or $sbit,$car0,$car0 + add $car0,$car1,$car1 + add $car2,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car2 + + ld [%sp+$bias+$frame],$tmp1 ! tp[0] + ld [%sp+$bias+$frame+4],$tpj ! tp[1] + ld [$ap+8],$mul0 ! ap[2] + ld [$np],$car1 ! np[0] + ld [$np+4],$npj ! np[1] + mulx $n0,$tmp1,$mul1 + and $mul1,$mask,$mul1 + mov 8,$i + + mulx $mul0,$mul0,$car0 + mulx $car1,$mul1,$car1 + and $car0,$mask,$acc0 + add $tmp1,$car1,$car1 + srlx $car0,32,$car0 + add %sp,$bias+$frame,$tp + srlx $car1,32,$car1 + and $car0,1,$sbit + srlx $car0,1,$car0 + mov 4,$j + +.Lsqr_outer: +.Lsqr_inner1: + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $j,4,$j + ld [$tp+8],$tpj + cmp $j,$i + add $acc1,$car1,$car1 + ld [$np+$j],$npj + st $car1,[$tp] + srlx $car1,32,$car1 + bl %icc,.Lsqr_inner1 + add $tp,4,$tp +!.Lsqr_inner1 + + add $j,4,$j + ld [$ap+$j],$apj ! ap[j] + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + ld [$np+$j],$npj ! np[j] + add $acc0,$car1,$car1 + ld [$tp+8],$tpj ! tp[j] + add $acc1,$car1,$car1 + st $car1,[$tp] + srlx $car1,32,$car1 + + add $j,4,$j + cmp $j,$num + be,pn %icc,.Lsqr_no_inner2 + add $tp,4,$tp + +.Lsqr_inner2: + mulx $apj,$mul0,$acc0 + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $acc0,$car0,$car0 + ld [$ap+$j],$apj ! ap[j] + and $car0,$mask,$acc0 + ld [$np+$j],$npj ! np[j] + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + ld [$tp+8],$tpj ! tp[j] + or $sbit,$acc0,$acc0 + add $j,4,$j ! j++ + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + cmp $j,$num + add $acc0,$car1,$car1 + add $acc1,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + bl %icc,.Lsqr_inner2 + add $tp,4,$tp ! tp++ + +.Lsqr_no_inner2: + mulx $apj,$mul0,$acc0 + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $acc0,$car0,$car0 + and $car0,$mask,$acc0 + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + add $acc0,$car1,$car1 + add $acc1,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + + add $car0,$car0,$car0 + or $sbit,$car0,$car0 + add $car0,$car1,$car1 + add $car2,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car2 + + add $i,4,$i ! i++ + ld [%sp+$bias+$frame],$tmp1 ! tp[0] + ld [%sp+$bias+$frame+4],$tpj ! tp[1] + ld [$ap+$i],$mul0 ! ap[j] + ld [$np],$car1 ! np[0] + ld [$np+4],$npj ! np[1] + mulx $n0,$tmp1,$mul1 + and $mul1,$mask,$mul1 + add $i,4,$tmp0 + + mulx $mul0,$mul0,$car0 + mulx $car1,$mul1,$car1 + and $car0,$mask,$acc0 + add $tmp1,$car1,$car1 + srlx $car0,32,$car0 + add %sp,$bias+$frame,$tp + srlx $car1,32,$car1 + and $car0,1,$sbit + srlx $car0,1,$car0 + + cmp $tmp0,$num ! i<num-1 + bl %icc,.Lsqr_outer + mov 4,$j + +.Lsqr_last: + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $j,4,$j + ld [$tp+8],$tpj + cmp $j,$i + add $acc1,$car1,$car1 + ld [$np+$j],$npj + st $car1,[$tp] + srlx $car1,32,$car1 + bl %icc,.Lsqr_last + add $tp,4,$tp +!.Lsqr_last + + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $acc0,$car1,$car1 + add $acc1,$car1,$car1 + st $car1,[$tp] + srlx $car1,32,$car1 + + add $car0,$car0,$car0 ! recover $car0 + or $sbit,$car0,$car0 + add $car0,$car1,$car1 + add $car2,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car2 + + ba .Ltail + add $tp,8,$tp +.type $fname,#function +.size $fname,(.-$fname) +.asciz "Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>" +.align 32 +___ +$code =~ s/\`([^\`]*)\`/eval($1)/gem; +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/sparcv9a-mont.pl b/openssl/crypto/bn/asm/sparcv9a-mont.pl new file mode 100644 index 000000000..a14205f2f --- /dev/null +++ b/openssl/crypto/bn/asm/sparcv9a-mont.pl @@ -0,0 +1,882 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# October 2005 +# +# "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU? +# Because unlike integer multiplier, which simply stalls whole CPU, +# FPU is fully pipelined and can effectively emit 48 bit partial +# product every cycle. Why not blended SPARC v9? One can argue that +# making this module dependent on UltraSPARC VIS extension limits its +# binary compatibility. Well yes, it does exclude SPARC64 prior-V(!) +# implementations from compatibility matrix. But the rest, whole Sun +# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support +# VIS extension instructions used in this module. This is considered +# good enough to not care about HAL SPARC64 users [if any] who have +# integer-only pure SPARCv9 module to "fall down" to. + +# USI&II cores currently exhibit uniform 2x improvement [over pre- +# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII +# performance improves few percents for shorter keys and worsens few +# percents for longer keys. This is because USIII integer multiplier +# is >3x faster than USI&II one, which is harder to match [but see +# TODO list below]. It should also be noted that SPARC64 V features +# out-of-order execution, which *might* mean that integer multiplier +# is pipelined, which in turn *might* be impossible to match... On +# additional note, SPARC64 V implements FP Multiply-Add instruction, +# which is perfectly usable in this context... In other words, as far +# as Fujitsu SPARC64 V goes, talk to the author:-) + +# The implementation implies following "non-natural" limitations on +# input arguments: +# - num may not be less than 4; +# - num has to be even; +# Failure to meet either condition has no fatal effects, simply +# doesn't give any performance gain. + +# TODO: +# - modulo-schedule inner loop for better performance (on in-order +# execution core such as UltraSPARC this shall result in further +# noticeable(!) improvement); +# - dedicated squaring procedure[?]; + +###################################################################### +# November 2006 +# +# Modulo-scheduled inner loops allow to interleave floating point and +# integer instructions and minimize Read-After-Write penalties. This +# results in *further* 20-50% perfromance improvement [depending on +# key length, more for longer keys] on USI&II cores and 30-80% - on +# USIII&IV. + +$fname="bn_mul_mont_fpu"; +$bits=32; +for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); } + +if ($bits==64) { + $bias=2047; + $frame=192; +} else { + $bias=0; + $frame=128; # 96 rounded up to largest known cache-line +} +$locals=64; + +# In order to provide for 32-/64-bit ABI duality, I keep integers wider +# than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used +# exclusively for pointers, indexes and other small values... +# int bn_mul_mont( +$rp="%i0"; # BN_ULONG *rp, +$ap="%i1"; # const BN_ULONG *ap, +$bp="%i2"; # const BN_ULONG *bp, +$np="%i3"; # const BN_ULONG *np, +$n0="%i4"; # const BN_ULONG *n0, +$num="%i5"; # int num); + +$tp="%l0"; # t[num] +$ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved +$ap_h="%l2"; # to these four vectors as double-precision FP values. +$np_l="%l3"; # This way a bunch of fxtods are eliminated in second +$np_h="%l4"; # loop and L1-cache aliasing is minimized... +$i="%l5"; +$j="%l6"; +$mask="%l7"; # 16-bit mask, 0xffff + +$n0="%g4"; # reassigned(!) to "64-bit" register +$carry="%i4"; # %i4 reused(!) for a carry bit + +# FP register naming chart +# +# ..HILO +# dcba +# -------- +# LOa +# LOb +# LOc +# LOd +# HIa +# HIb +# HIc +# HId +# ..a +# ..b +$ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6"; +$na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14"; +$alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19"; +$nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23"; + +$dota="%f24"; $dotb="%f26"; + +$aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38"; +$ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46"; +$nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54"; +$nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62"; + +$ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load + +$code=<<___; +.section ".text",#alloc,#execinstr + +.global $fname +.align 32 +$fname: + save %sp,-$frame-$locals,%sp + + cmp $num,4 + bl,a,pn %icc,.Lret + clr %i0 + andcc $num,1,%g0 ! $num has to be even... + bnz,a,pn %icc,.Lret + clr %i0 ! signal "unsupported input value" + + srl $num,1,$num + sethi %hi(0xffff),$mask + ld [%i4+0],$n0 ! $n0 reassigned, remember? + or $mask,%lo(0xffff),$mask + ld [%i4+4],%o0 + sllx %o0,32,%o0 + or %o0,$n0,$n0 ! $n0=n0[1].n0[0] + + sll $num,3,$num ! num*=8 + + add %sp,$bias,%o0 ! real top of stack + sll $num,2,%o1 + add %o1,$num,%o1 ! %o1=num*5 + sub %o0,%o1,%o0 + and %o0,-2048,%o0 ! optimize TLB utilization + sub %o0,$bias,%sp ! alloca(5*num*8) + + rd %asi,%o7 ! save %asi + add %sp,$bias+$frame+$locals,$tp + add $tp,$num,$ap_l + add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends ! + add $ap_l,$num,$ap_h + add $ap_h,$num,$np_l + add $np_l,$num,$np_h + + wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads + + add $rp,$num,$rp ! readjust input pointers to point + add $ap,$num,$ap ! at the ends too... + add $bp,$num,$bp + add $np,$num,$np + + stx %o7,[%sp+$bias+$frame+48] ! save %asi + + sub %g0,$num,$i ! i=-num + sub %g0,$num,$j ! j=-num + + add $ap,$j,%o3 + add $bp,$i,%o4 + + ld [%o3+4],%g1 ! bp[0] + ld [%o3+0],%o0 + ld [%o4+4],%g5 ! ap[0] + sllx %g1,32,%g1 + ld [%o4+0],%o1 + sllx %g5,32,%g5 + or %g1,%o0,%o0 + or %g5,%o1,%o1 + + add $np,$j,%o5 + + mulx %o1,%o0,%o0 ! ap[0]*bp[0] + mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0 + stx %o0,[%sp+$bias+$frame+0] + + ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words + fzeros $alo + ld [%o3+4],$ahi_ + fzeros $ahi + ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words + fzeros $nlo + ld [%o5+4],$nhi_ + fzeros $nhi + + ! transfer b[i] to FPU as 4x16-bit values + ldda [%o4+2]%asi,$ba + fxtod $alo,$alo + ldda [%o4+0]%asi,$bb + fxtod $ahi,$ahi + ldda [%o4+6]%asi,$bc + fxtod $nlo,$nlo + ldda [%o4+4]%asi,$bd + fxtod $nhi,$nhi + + ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values + ldda [%sp+$bias+$frame+6]%asi,$na + fxtod $ba,$ba + ldda [%sp+$bias+$frame+4]%asi,$nb + fxtod $bb,$bb + ldda [%sp+$bias+$frame+2]%asi,$nc + fxtod $bc,$bc + ldda [%sp+$bias+$frame+0]%asi,$nd + fxtod $bd,$bd + + std $alo,[$ap_l+$j] ! save smashed ap[j] in double format + fxtod $na,$na + std $ahi,[$ap_h+$j] + fxtod $nb,$nb + std $nlo,[$np_l+$j] ! save smashed np[j] in double format + fxtod $nc,$nc + std $nhi,[$np_h+$j] + fxtod $nd,$nd + + fmuld $alo,$ba,$aloa + fmuld $nlo,$na,$nloa + fmuld $alo,$bb,$alob + fmuld $nlo,$nb,$nlob + fmuld $alo,$bc,$aloc + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + fmuld $alo,$bd,$alod + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + fmuld $ahi,$ba,$ahia + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + fmuld $ahi,$bb,$ahib + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + fmuld $ahi,$bc,$ahic + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + fmuld $ahi,$bd,$ahid + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + + faddd $ahic,$nhic,$dota ! $nhic + faddd $ahid,$nhid,$dotb ! $nhid + + faddd $nloc,$nhia,$nloc + faddd $nlod,$nhib,$nlod + + fdtox $nloa,$nloa + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + add $j,8,$j + std $nlob,[%sp+$bias+$frame+8] + add $ap,$j,%o4 + std $nloc,[%sp+$bias+$frame+16] + add $np,$j,%o5 + std $nlod,[%sp+$bias+$frame+24] + + ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words + fzeros $alo + ld [%o4+4],$ahi_ + fzeros $ahi + ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words + fzeros $nlo + ld [%o5+4],$nhi_ + fzeros $nhi + + fxtod $alo,$alo + fxtod $ahi,$ahi + fxtod $nlo,$nlo + fxtod $nhi,$nhi + + ldx [%sp+$bias+$frame+0],%o0 + fmuld $alo,$ba,$aloa + ldx [%sp+$bias+$frame+8],%o1 + fmuld $nlo,$na,$nloa + ldx [%sp+$bias+$frame+16],%o2 + fmuld $alo,$bb,$alob + ldx [%sp+$bias+$frame+24],%o3 + fmuld $nlo,$nb,$nlob + + srlx %o0,16,%o7 + std $alo,[$ap_l+$j] ! save smashed ap[j] in double format + fmuld $alo,$bc,$aloc + add %o7,%o1,%o1 + std $ahi,[$ap_h+$j] + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + srlx %o1,16,%o7 + std $nlo,[$np_l+$j] ! save smashed np[j] in double format + fmuld $alo,$bd,$alod + add %o7,%o2,%o2 + std $nhi,[$np_h+$j] + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + srlx %o2,16,%o7 + fmuld $ahi,$ba,$ahia + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + !and %o0,$mask,%o0 + !and %o1,$mask,%o1 + !and %o2,$mask,%o2 + !sllx %o1,16,%o1 + !sllx %o2,32,%o2 + !sllx %o3,48,%o7 + !or %o1,%o0,%o0 + !or %o2,%o0,%o0 + !or %o7,%o0,%o0 ! 64-bit result + srlx %o3,16,%g1 ! 34-bit carry + fmuld $ahi,$bb,$ahib + + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + fmuld $ahi,$bc,$ahic + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + fmuld $ahi,$bd,$ahid + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + + faddd $dota,$nloa,$nloa + faddd $dotb,$nlob,$nlob + faddd $ahic,$nhic,$dota ! $nhic + faddd $ahid,$nhid,$dotb ! $nhid + + faddd $nloc,$nhia,$nloc + faddd $nlod,$nhib,$nlod + + fdtox $nloa,$nloa + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + addcc $j,8,$j + std $nloc,[%sp+$bias+$frame+16] + bz,pn %icc,.L1stskip + std $nlod,[%sp+$bias+$frame+24] + +.align 32 ! incidentally already aligned ! +.L1st: + add $ap,$j,%o4 + add $np,$j,%o5 + ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words + fzeros $alo + ld [%o4+4],$ahi_ + fzeros $ahi + ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words + fzeros $nlo + ld [%o5+4],$nhi_ + fzeros $nhi + + fxtod $alo,$alo + fxtod $ahi,$ahi + fxtod $nlo,$nlo + fxtod $nhi,$nhi + + ldx [%sp+$bias+$frame+0],%o0 + fmuld $alo,$ba,$aloa + ldx [%sp+$bias+$frame+8],%o1 + fmuld $nlo,$na,$nloa + ldx [%sp+$bias+$frame+16],%o2 + fmuld $alo,$bb,$alob + ldx [%sp+$bias+$frame+24],%o3 + fmuld $nlo,$nb,$nlob + + srlx %o0,16,%o7 + std $alo,[$ap_l+$j] ! save smashed ap[j] in double format + fmuld $alo,$bc,$aloc + add %o7,%o1,%o1 + std $ahi,[$ap_h+$j] + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + srlx %o1,16,%o7 + std $nlo,[$np_l+$j] ! save smashed np[j] in double format + fmuld $alo,$bd,$alod + add %o7,%o2,%o2 + std $nhi,[$np_h+$j] + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + srlx %o2,16,%o7 + fmuld $ahi,$ba,$ahia + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + and %o1,$mask,%o1 + and %o2,$mask,%o2 + fmuld $ahi,$bb,$ahib + sllx %o1,16,%o1 + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + sllx %o2,32,%o2 + fmuld $ahi,$bc,$ahic + sllx %o3,48,%o7 + or %o1,%o0,%o0 + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + or %o2,%o0,%o0 + fmuld $ahi,$bd,$ahid + or %o7,%o0,%o0 ! 64-bit result + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + addcc %g1,%o0,%o0 + faddd $dota,$nloa,$nloa + srlx %o3,16,%g1 ! 34-bit carry + faddd $dotb,$nlob,$nlob + bcs,a %xcc,.+8 + add %g1,1,%g1 + + stx %o0,[$tp] ! tp[j-1]= + + faddd $ahic,$nhic,$dota ! $nhic + faddd $ahid,$nhid,$dotb ! $nhid + + faddd $nloc,$nhia,$nloc + faddd $nlod,$nhib,$nlod + + fdtox $nloa,$nloa + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + std $nloc,[%sp+$bias+$frame+16] + std $nlod,[%sp+$bias+$frame+24] + + addcc $j,8,$j + bnz,pt %icc,.L1st + add $tp,8,$tp + +.L1stskip: + fdtox $dota,$dota + fdtox $dotb,$dotb + + ldx [%sp+$bias+$frame+0],%o0 + ldx [%sp+$bias+$frame+8],%o1 + ldx [%sp+$bias+$frame+16],%o2 + ldx [%sp+$bias+$frame+24],%o3 + + srlx %o0,16,%o7 + std $dota,[%sp+$bias+$frame+32] + add %o7,%o1,%o1 + std $dotb,[%sp+$bias+$frame+40] + srlx %o1,16,%o7 + add %o7,%o2,%o2 + srlx %o2,16,%o7 + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 + and %o1,$mask,%o1 + and %o2,$mask,%o2 + sllx %o1,16,%o1 + sllx %o2,32,%o2 + sllx %o3,48,%o7 + or %o1,%o0,%o0 + or %o2,%o0,%o0 + or %o7,%o0,%o0 ! 64-bit result + ldx [%sp+$bias+$frame+32],%o4 + addcc %g1,%o0,%o0 + ldx [%sp+$bias+$frame+40],%o5 + srlx %o3,16,%g1 ! 34-bit carry + bcs,a %xcc,.+8 + add %g1,1,%g1 + + stx %o0,[$tp] ! tp[j-1]= + add $tp,8,$tp + + srlx %o4,16,%o7 + add %o7,%o5,%o5 + and %o4,$mask,%o4 + sllx %o5,16,%o7 + or %o7,%o4,%o4 + addcc %g1,%o4,%o4 + srlx %o5,48,%g1 + bcs,a %xcc,.+8 + add %g1,1,%g1 + + mov %g1,$carry + stx %o4,[$tp] ! tp[num-1]= + + ba .Louter + add $i,8,$i +.align 32 +.Louter: + sub %g0,$num,$j ! j=-num + add %sp,$bias+$frame+$locals,$tp + + add $ap,$j,%o3 + add $bp,$i,%o4 + + ld [%o3+4],%g1 ! bp[i] + ld [%o3+0],%o0 + ld [%o4+4],%g5 ! ap[0] + sllx %g1,32,%g1 + ld [%o4+0],%o1 + sllx %g5,32,%g5 + or %g1,%o0,%o0 + or %g5,%o1,%o1 + + ldx [$tp],%o2 ! tp[0] + mulx %o1,%o0,%o0 + addcc %o2,%o0,%o0 + mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0 + stx %o0,[%sp+$bias+$frame+0] + + ! transfer b[i] to FPU as 4x16-bit values + ldda [%o4+2]%asi,$ba + ldda [%o4+0]%asi,$bb + ldda [%o4+6]%asi,$bc + ldda [%o4+4]%asi,$bd + + ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values + ldda [%sp+$bias+$frame+6]%asi,$na + fxtod $ba,$ba + ldda [%sp+$bias+$frame+4]%asi,$nb + fxtod $bb,$bb + ldda [%sp+$bias+$frame+2]%asi,$nc + fxtod $bc,$bc + ldda [%sp+$bias+$frame+0]%asi,$nd + fxtod $bd,$bd + ldd [$ap_l+$j],$alo ! load a[j] in double format + fxtod $na,$na + ldd [$ap_h+$j],$ahi + fxtod $nb,$nb + ldd [$np_l+$j],$nlo ! load n[j] in double format + fxtod $nc,$nc + ldd [$np_h+$j],$nhi + fxtod $nd,$nd + + fmuld $alo,$ba,$aloa + fmuld $nlo,$na,$nloa + fmuld $alo,$bb,$alob + fmuld $nlo,$nb,$nlob + fmuld $alo,$bc,$aloc + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + fmuld $alo,$bd,$alod + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + fmuld $ahi,$ba,$ahia + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + fmuld $ahi,$bb,$ahib + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + fmuld $ahi,$bc,$ahic + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + fmuld $ahi,$bd,$ahid + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + + faddd $ahic,$nhic,$dota ! $nhic + faddd $ahid,$nhid,$dotb ! $nhid + + faddd $nloc,$nhia,$nloc + faddd $nlod,$nhib,$nlod + + fdtox $nloa,$nloa + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + std $nloc,[%sp+$bias+$frame+16] + add $j,8,$j + std $nlod,[%sp+$bias+$frame+24] + + ldd [$ap_l+$j],$alo ! load a[j] in double format + ldd [$ap_h+$j],$ahi + ldd [$np_l+$j],$nlo ! load n[j] in double format + ldd [$np_h+$j],$nhi + + fmuld $alo,$ba,$aloa + fmuld $nlo,$na,$nloa + fmuld $alo,$bb,$alob + fmuld $nlo,$nb,$nlob + fmuld $alo,$bc,$aloc + ldx [%sp+$bias+$frame+0],%o0 + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + ldx [%sp+$bias+$frame+8],%o1 + fmuld $alo,$bd,$alod + ldx [%sp+$bias+$frame+16],%o2 + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + ldx [%sp+$bias+$frame+24],%o3 + fmuld $ahi,$ba,$ahia + + srlx %o0,16,%o7 + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + add %o7,%o1,%o1 + fmuld $ahi,$bb,$ahib + srlx %o1,16,%o7 + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + add %o7,%o2,%o2 + fmuld $ahi,$bc,$ahic + srlx %o2,16,%o7 + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + ! why? + and %o0,$mask,%o0 + fmuld $ahi,$bd,$ahid + and %o1,$mask,%o1 + and %o2,$mask,%o2 + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + sllx %o1,16,%o1 + faddd $dota,$nloa,$nloa + sllx %o2,32,%o2 + faddd $dotb,$nlob,$nlob + sllx %o3,48,%o7 + or %o1,%o0,%o0 + faddd $ahic,$nhic,$dota ! $nhic + or %o2,%o0,%o0 + faddd $ahid,$nhid,$dotb ! $nhid + or %o7,%o0,%o0 ! 64-bit result + ldx [$tp],%o7 + faddd $nloc,$nhia,$nloc + addcc %o7,%o0,%o0 + ! end-of-why? + faddd $nlod,$nhib,$nlod + srlx %o3,16,%g1 ! 34-bit carry + fdtox $nloa,$nloa + bcs,a %xcc,.+8 + add %g1,1,%g1 + + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + addcc $j,8,$j + std $nloc,[%sp+$bias+$frame+16] + bz,pn %icc,.Linnerskip + std $nlod,[%sp+$bias+$frame+24] + + ba .Linner + nop +.align 32 +.Linner: + ldd [$ap_l+$j],$alo ! load a[j] in double format + ldd [$ap_h+$j],$ahi + ldd [$np_l+$j],$nlo ! load n[j] in double format + ldd [$np_h+$j],$nhi + + fmuld $alo,$ba,$aloa + fmuld $nlo,$na,$nloa + fmuld $alo,$bb,$alob + fmuld $nlo,$nb,$nlob + fmuld $alo,$bc,$aloc + ldx [%sp+$bias+$frame+0],%o0 + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + ldx [%sp+$bias+$frame+8],%o1 + fmuld $alo,$bd,$alod + ldx [%sp+$bias+$frame+16],%o2 + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + ldx [%sp+$bias+$frame+24],%o3 + fmuld $ahi,$ba,$ahia + + srlx %o0,16,%o7 + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + add %o7,%o1,%o1 + fmuld $ahi,$bb,$ahib + srlx %o1,16,%o7 + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + add %o7,%o2,%o2 + fmuld $ahi,$bc,$ahic + srlx %o2,16,%o7 + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 + fmuld $ahi,$bd,$ahid + and %o1,$mask,%o1 + and %o2,$mask,%o2 + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + sllx %o1,16,%o1 + faddd $dota,$nloa,$nloa + sllx %o2,32,%o2 + faddd $dotb,$nlob,$nlob + sllx %o3,48,%o7 + or %o1,%o0,%o0 + faddd $ahic,$nhic,$dota ! $nhic + or %o2,%o0,%o0 + faddd $ahid,$nhid,$dotb ! $nhid + or %o7,%o0,%o0 ! 64-bit result + faddd $nloc,$nhia,$nloc + addcc %g1,%o0,%o0 + ldx [$tp+8],%o7 ! tp[j] + faddd $nlod,$nhib,$nlod + srlx %o3,16,%g1 ! 34-bit carry + fdtox $nloa,$nloa + bcs,a %xcc,.+8 + add %g1,1,%g1 + fdtox $nlob,$nlob + addcc %o7,%o0,%o0 + fdtox $nloc,$nloc + bcs,a %xcc,.+8 + add %g1,1,%g1 + + stx %o0,[$tp] ! tp[j-1] + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + std $nloc,[%sp+$bias+$frame+16] + addcc $j,8,$j + std $nlod,[%sp+$bias+$frame+24] + bnz,pt %icc,.Linner + add $tp,8,$tp + +.Linnerskip: + fdtox $dota,$dota + fdtox $dotb,$dotb + + ldx [%sp+$bias+$frame+0],%o0 + ldx [%sp+$bias+$frame+8],%o1 + ldx [%sp+$bias+$frame+16],%o2 + ldx [%sp+$bias+$frame+24],%o3 + + srlx %o0,16,%o7 + std $dota,[%sp+$bias+$frame+32] + add %o7,%o1,%o1 + std $dotb,[%sp+$bias+$frame+40] + srlx %o1,16,%o7 + add %o7,%o2,%o2 + srlx %o2,16,%o7 + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 + and %o1,$mask,%o1 + and %o2,$mask,%o2 + sllx %o1,16,%o1 + sllx %o2,32,%o2 + sllx %o3,48,%o7 + or %o1,%o0,%o0 + or %o2,%o0,%o0 + ldx [%sp+$bias+$frame+32],%o4 + or %o7,%o0,%o0 ! 64-bit result + ldx [%sp+$bias+$frame+40],%o5 + addcc %g1,%o0,%o0 + ldx [$tp+8],%o7 ! tp[j] + srlx %o3,16,%g1 ! 34-bit carry + bcs,a %xcc,.+8 + add %g1,1,%g1 + + addcc %o7,%o0,%o0 + bcs,a %xcc,.+8 + add %g1,1,%g1 + + stx %o0,[$tp] ! tp[j-1] + add $tp,8,$tp + + srlx %o4,16,%o7 + add %o7,%o5,%o5 + and %o4,$mask,%o4 + sllx %o5,16,%o7 + or %o7,%o4,%o4 + addcc %g1,%o4,%o4 + srlx %o5,48,%g1 + bcs,a %xcc,.+8 + add %g1,1,%g1 + + addcc $carry,%o4,%o4 + stx %o4,[$tp] ! tp[num-1] + mov %g1,$carry + bcs,a %xcc,.+8 + add $carry,1,$carry + + addcc $i,8,$i + bnz %icc,.Louter + nop + + add $tp,8,$tp ! adjust tp to point at the end + orn %g0,%g0,%g4 + sub %g0,$num,%o7 ! n=-num + ba .Lsub + subcc %g0,%g0,%g0 ! clear %icc.c + +.align 32 +.Lsub: + ldx [$tp+%o7],%o0 + add $np,%o7,%g1 + ld [%g1+0],%o2 + ld [%g1+4],%o3 + srlx %o0,32,%o1 + subccc %o0,%o2,%o2 + add $rp,%o7,%g1 + subccc %o1,%o3,%o3 + st %o2,[%g1+0] + add %o7,8,%o7 + brnz,pt %o7,.Lsub + st %o3,[%g1+4] + subc $carry,0,%g4 + sub %g0,$num,%o7 ! n=-num + ba .Lcopy + nop + +.align 32 +.Lcopy: + ldx [$tp+%o7],%o0 + add $rp,%o7,%g1 + ld [%g1+0],%o2 + ld [%g1+4],%o3 + stx %g0,[$tp+%o7] + and %o0,%g4,%o0 + srlx %o0,32,%o1 + andn %o2,%g4,%o2 + andn %o3,%g4,%o3 + or %o2,%o0,%o0 + or %o3,%o1,%o1 + st %o0,[%g1+0] + add %o7,8,%o7 + brnz,pt %o7,.Lcopy + st %o1,[%g1+4] + sub %g0,$num,%o7 ! n=-num + +.Lzap: + stx %g0,[$ap_l+%o7] + stx %g0,[$ap_h+%o7] + stx %g0,[$np_l+%o7] + stx %g0,[$np_h+%o7] + add %o7,8,%o7 + brnz,pt %o7,.Lzap + nop + + ldx [%sp+$bias+$frame+48],%o7 + wr %g0,%o7,%asi ! restore %asi + + mov 1,%i0 +.Lret: + ret + restore +.type $fname,#function +.size $fname,(.-$fname) +.asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>" +.align 32 +___ + +$code =~ s/\`([^\`]*)\`/eval($1)/gem; + +# Below substitution makes it possible to compile without demanding +# VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I +# dare to do this, because VIS capability is detected at run-time now +# and this routine is not called on CPU not capable to execute it. Do +# note that fzeros is not the only VIS dependency! Another dependency +# is implicit and is just _a_ numerical value loaded to %asi register, +# which assembler can't recognize as VIS specific... +$code =~ s/fzeros\s+%f([0-9]+)/ + sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1) + /gem; + +print $code; +# flush +close STDOUT; diff --git a/openssl/crypto/bn/asm/via-mont.pl b/openssl/crypto/bn/asm/via-mont.pl new file mode 100644 index 000000000..c046a514c --- /dev/null +++ b/openssl/crypto/bn/asm/via-mont.pl @@ -0,0 +1,242 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# Wrapper around 'rep montmul', VIA-specific instruction accessing +# PadLock Montgomery Multiplier. The wrapper is designed as drop-in +# replacement for OpenSSL bn_mul_mont [first implemented in 0.9.9]. +# +# Below are interleaved outputs from 'openssl speed rsa dsa' for 4 +# different software configurations on 1.5GHz VIA Esther processor. +# Lines marked with "software integer" denote performance of hand- +# coded integer-only assembler found in OpenSSL 0.9.7. "Software SSE2" +# refers to hand-coded SSE2 Montgomery multiplication procedure found +# OpenSSL 0.9.9. "Hardware VIA SDK" refers to padlock_pmm routine from +# Padlock SDK 2.0.1 available for download from VIA, which naturally +# utilizes the magic 'repz montmul' instruction. And finally "hardware +# this" refers to *this* implementation which also uses 'repz montmul' +# +# sign verify sign/s verify/s +# rsa 512 bits 0.001720s 0.000140s 581.4 7149.7 software integer +# rsa 512 bits 0.000690s 0.000086s 1450.3 11606.0 software SSE2 +# rsa 512 bits 0.006136s 0.000201s 163.0 4974.5 hardware VIA SDK +# rsa 512 bits 0.000712s 0.000050s 1404.9 19858.5 hardware this +# +# rsa 1024 bits 0.008518s 0.000413s 117.4 2420.8 software integer +# rsa 1024 bits 0.004275s 0.000277s 233.9 3609.7 software SSE2 +# rsa 1024 bits 0.012136s 0.000260s 82.4 3844.5 hardware VIA SDK +# rsa 1024 bits 0.002522s 0.000116s 396.5 8650.9 hardware this +# +# rsa 2048 bits 0.050101s 0.001371s 20.0 729.6 software integer +# rsa 2048 bits 0.030273s 0.001008s 33.0 991.9 software SSE2 +# rsa 2048 bits 0.030833s 0.000976s 32.4 1025.1 hardware VIA SDK +# rsa 2048 bits 0.011879s 0.000342s 84.2 2921.7 hardware this +# +# rsa 4096 bits 0.327097s 0.004859s 3.1 205.8 software integer +# rsa 4096 bits 0.229318s 0.003859s 4.4 259.2 software SSE2 +# rsa 4096 bits 0.233953s 0.003274s 4.3 305.4 hardware VIA SDK +# rsa 4096 bits 0.070493s 0.001166s 14.2 857.6 hardware this +# +# dsa 512 bits 0.001342s 0.001651s 745.2 605.7 software integer +# dsa 512 bits 0.000844s 0.000987s 1185.3 1013.1 software SSE2 +# dsa 512 bits 0.001902s 0.002247s 525.6 444.9 hardware VIA SDK +# dsa 512 bits 0.000458s 0.000524s 2182.2 1909.1 hardware this +# +# dsa 1024 bits 0.003964s 0.004926s 252.3 203.0 software integer +# dsa 1024 bits 0.002686s 0.003166s 372.3 315.8 software SSE2 +# dsa 1024 bits 0.002397s 0.002823s 417.1 354.3 hardware VIA SDK +# dsa 1024 bits 0.000978s 0.001170s 1022.2 855.0 hardware this +# +# dsa 2048 bits 0.013280s 0.016518s 75.3 60.5 software integer +# dsa 2048 bits 0.009911s 0.011522s 100.9 86.8 software SSE2 +# dsa 2048 bits 0.009542s 0.011763s 104.8 85.0 hardware VIA SDK +# dsa 2048 bits 0.002884s 0.003352s 346.8 298.3 hardware this +# +# To give you some other reference point here is output for 2.4GHz P4 +# running hand-coded SSE2 bn_mul_mont found in 0.9.9, i.e. "software +# SSE2" in above terms. +# +# rsa 512 bits 0.000407s 0.000047s 2454.2 21137.0 +# rsa 1024 bits 0.002426s 0.000141s 412.1 7100.0 +# rsa 2048 bits 0.015046s 0.000491s 66.5 2034.9 +# rsa 4096 bits 0.109770s 0.002379s 9.1 420.3 +# dsa 512 bits 0.000438s 0.000525s 2281.1 1904.1 +# dsa 1024 bits 0.001346s 0.001595s 742.7 627.0 +# dsa 2048 bits 0.004745s 0.005582s 210.7 179.1 +# +# Conclusions: +# - VIA SDK leaves a *lot* of room for improvement (which this +# implementation successfully fills:-); +# - 'rep montmul' gives up to >3x performance improvement depending on +# key length; +# - in terms of absolute performance it delivers approximately as much +# as modern out-of-order 32-bit cores [again, for longer keys]. + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],"via-mont.pl"); + +# int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num); +$func="bn_mul_mont_padlock"; + +$pad=16*1; # amount of reserved bytes on top of every vector + +# stack layout +$mZeroPrime=&DWP(0,"esp"); # these are specified by VIA +$A=&DWP(4,"esp"); +$B=&DWP(8,"esp"); +$T=&DWP(12,"esp"); +$M=&DWP(16,"esp"); +$scratch=&DWP(20,"esp"); +$rp=&DWP(24,"esp"); # these are mine +$sp=&DWP(28,"esp"); +# &DWP(32,"esp") # 32 byte scratch area +# &DWP(64+(4*$num+$pad)*0,"esp") # padded tp[num] +# &DWP(64+(4*$num+$pad)*1,"esp") # padded copy of ap[num] +# &DWP(64+(4*$num+$pad)*2,"esp") # padded copy of bp[num] +# &DWP(64+(4*$num+$pad)*3,"esp") # padded copy of np[num] +# Note that SDK suggests to unconditionally allocate 2K per vector. This +# has quite an impact on performance. It naturally depends on key length, +# but to give an example 1024 bit private RSA key operations suffer >30% +# penalty. I allocate only as much as actually required... + +&function_begin($func); + &xor ("eax","eax"); + &mov ("ecx",&wparam(5)); # num + # meet VIA's limitations for num [note that the specification + # expresses them in bits, while we work with amount of 32-bit words] + &test ("ecx",3); + &jnz (&label("leave")); # num % 4 != 0 + &cmp ("ecx",8); + &jb (&label("leave")); # num < 8 + &cmp ("ecx",1024); + &ja (&label("leave")); # num > 1024 + + &pushf (); + &cld (); + + &mov ("edi",&wparam(0)); # rp + &mov ("eax",&wparam(1)); # ap + &mov ("ebx",&wparam(2)); # bp + &mov ("edx",&wparam(3)); # np + &mov ("esi",&wparam(4)); # n0 + &mov ("esi",&DWP(0,"esi")); # *n0 + + &lea ("ecx",&DWP($pad,"","ecx",4)); # ecx becomes vector size in bytes + &lea ("ebp",&DWP(64,"","ecx",4)); # allocate 4 vectors + 64 bytes + &neg ("ebp"); + &add ("ebp","esp"); + &and ("ebp",-64); # align to cache-line + &xchg ("ebp","esp"); # alloca + + &mov ($rp,"edi"); # save rp + &mov ($sp,"ebp"); # save esp + + &mov ($mZeroPrime,"esi"); + &lea ("esi",&DWP(64,"esp")); # tp + &mov ($T,"esi"); + &lea ("edi",&DWP(32,"esp")); # scratch area + &mov ($scratch,"edi"); + &mov ("esi","eax"); + + &lea ("ebp",&DWP(-$pad,"ecx")); + &shr ("ebp",2); # restore original num value in ebp + + &xor ("eax","eax"); + + &mov ("ecx","ebp"); + &lea ("ecx",&DWP((32+$pad)/4,"ecx"));# padded tp + scratch + &data_byte(0xf3,0xab); # rep stosl, bzero + + &mov ("ecx","ebp"); + &lea ("edi",&DWP(64+$pad,"esp","ecx",4));# pointer to ap copy + &mov ($A,"edi"); + &data_byte(0xf3,0xa5); # rep movsl, memcpy + &mov ("ecx",$pad/4); + &data_byte(0xf3,0xab); # rep stosl, bzero pad + # edi points at the end of padded ap copy... + + &mov ("ecx","ebp"); + &mov ("esi","ebx"); + &mov ($B,"edi"); + &data_byte(0xf3,0xa5); # rep movsl, memcpy + &mov ("ecx",$pad/4); + &data_byte(0xf3,0xab); # rep stosl, bzero pad + # edi points at the end of padded bp copy... + + &mov ("ecx","ebp"); + &mov ("esi","edx"); + &mov ($M,"edi"); + &data_byte(0xf3,0xa5); # rep movsl, memcpy + &mov ("ecx",$pad/4); + &data_byte(0xf3,0xab); # rep stosl, bzero pad + # edi points at the end of padded np copy... + + # let magic happen... + &mov ("ecx","ebp"); + &mov ("esi","esp"); + &shl ("ecx",5); # convert word counter to bit counter + &align (4); + &data_byte(0xf3,0x0f,0xa6,0xc0);# rep montmul + + &mov ("ecx","ebp"); + &lea ("esi",&DWP(64,"esp")); # tp + # edi still points at the end of padded np copy... + &neg ("ebp"); + &lea ("ebp",&DWP(-$pad,"edi","ebp",4)); # so just "rewind" + &mov ("edi",$rp); # restore rp + &xor ("edx","edx"); # i=0 and clear CF + +&set_label("sub",8); + &mov ("eax",&DWP(0,"esi","edx",4)); + &sbb ("eax",&DWP(0,"ebp","edx",4)); + &mov (&DWP(0,"edi","edx",4),"eax"); # rp[i]=tp[i]-np[i] + &lea ("edx",&DWP(1,"edx")); # i++ + &loop (&label("sub")); # doesn't affect CF! + + &mov ("eax",&DWP(0,"esi","edx",4)); # upmost overflow bit + &sbb ("eax",0); + &and ("esi","eax"); + ¬ ("eax"); + &mov ("ebp","edi"); + &and ("ebp","eax"); + &or ("esi","ebp"); # tp=carry?tp:rp + + &mov ("ecx","edx"); # num + &xor ("edx","edx"); # i=0 + +&set_label("copy",8); + &mov ("eax",&DWP(0,"esi","edx",4)); + &mov (&DWP(64,"esp","edx",4),"ecx"); # zap tp + &mov (&DWP(0,"edi","edx",4),"eax"); + &lea ("edx",&DWP(1,"edx")); # i++ + &loop (&label("copy")); + + &mov ("ebp",$sp); + &xor ("eax","eax"); + + &mov ("ecx",64/4); + &mov ("edi","esp"); # zap frame including scratch area + &data_byte(0xf3,0xab); # rep stosl, bzero + + # zap copies of ap, bp and np + &lea ("edi",&DWP(64+$pad,"esp","edx",4));# pointer to ap + &lea ("ecx",&DWP(3*$pad/4,"edx","edx",2)); + &data_byte(0xf3,0xab); # rep stosl, bzero + + &mov ("esp","ebp"); + &inc ("eax"); # signal "done" + &popf (); +&set_label("leave"); +&function_end($func); + +&asciz("Padlock Montgomery Multiplication, CRYPTOGAMS by <appro\@openssl.org>"); + +&asm_finish(); diff --git a/openssl/crypto/bn/asm/vms.mar b/openssl/crypto/bn/asm/vms.mar new file mode 100644 index 000000000..aefab15cd --- /dev/null +++ b/openssl/crypto/bn/asm/vms.mar @@ -0,0 +1,6440 @@ + .title vax_bn_mul_add_words unsigned multiply & add, 32*32+32+32=>64 +; +; w.j.m. 15-jan-1999 +; +; it's magic ... +; +; ULONG bn_mul_add_words(ULONG r[],ULONG a[],int n,ULONG w) { +; ULONG c = 0; +; int i; +; for(i = 0; i < n; i++) <c,r[i]> := r[i] + c + a[i] * w ; +; return c; +; } + +r=4 ;(AP) +a=8 ;(AP) +n=12 ;(AP) n by value (input) +w=16 ;(AP) w by value (input) + + + .psect code,nowrt + +.entry bn_mul_add_words,^m<r2,r3,r4,r5,r6> + + moval @r(ap),r2 + moval @a(ap),r3 + movl n(ap),r4 ; assumed >0 by C code + movl w(ap),r5 + clrl r6 ; c + +0$: + emul r5,(r3),(r2),r0 ; w, a[], r[] considered signed + + ; fixup for "negative" r[] + tstl (r2) + bgeq 10$ + incl r1 +10$: + + ; add in c + addl2 r6,r0 + adwc #0,r1 + + ; combined fixup for "negative" w, a[] + tstl r5 + bgeq 20$ + addl2 (r3),r1 +20$: + tstl (r3) + bgeq 30$ + addl2 r5,r1 +30$: + + movl r0,(r2)+ ; store lo result in r[] & advance + addl #4,r3 ; advance a[] + movl r1,r6 ; store hi result => c + + sobgtr r4,0$ + + movl r6,r0 ; return c + ret + + .title vax_bn_mul_words unsigned multiply & add, 32*32+32=>64 +; +; w.j.m. 15-jan-1999 +; +; it's magic ... +; +; ULONG bn_mul_words(ULONG r[],ULONG a[],int n,ULONG w) { +; ULONG c = 0; +; int i; +; for(i = 0; i < num; i++) <c,r[i]> := a[i] * w + c ; +; return(c); +; } + +r=4 ;(AP) +a=8 ;(AP) +n=12 ;(AP) n by value (input) +w=16 ;(AP) w by value (input) + + + .psect code,nowrt + +.entry bn_mul_words,^m<r2,r3,r4,r5,r6> + + moval @r(ap),r2 ; r2 -> r[] + moval @a(ap),r3 ; r3 -> a[] + movl n(ap),r4 ; r4 = loop count (assumed >0 by C code) + movl w(ap),r5 ; r5 = w + clrl r6 ; r6 = c + +0$: + ; <r1,r0> := w * a[] + c + emul r5,(r3),r6,r0 ; w, a[], c considered signed + + ; fixup for "negative" c + tstl r6 ; c + bgeq 10$ + incl r1 +10$: + + ; combined fixup for "negative" w, a[] + tstl r5 ; w + bgeq 20$ + addl2 (r3),r1 ; a[] +20$: + tstl (r3) ; a[] + bgeq 30$ + addl2 r5,r1 ; w +30$: + + movl r0,(r2)+ ; store lo result in r[] & advance + addl #4,r3 ; advance a[] + movl r1,r6 ; store hi result => c + + sobgtr r4,0$ + + movl r6,r0 ; return c + ret + + .title vax_bn_sqr_words unsigned square, 32*32=>64 +; +; w.j.m. 15-jan-1999 +; +; it's magic ... +; +; void bn_sqr_words(ULONG r[],ULONG a[],int n) { +; int i; +; for(i = 0; i < n; i++) <r[2*i+1],r[2*i]> := a[i] * a[i] ; +; } + +r=4 ;(AP) +a=8 ;(AP) +n=12 ;(AP) n by value (input) + + + .psect code,nowrt + +.entry bn_sqr_words,^m<r2,r3,r4,r5> + + moval @r(ap),r2 ; r2 -> r[] + moval @a(ap),r3 ; r3 -> a[] + movl n(ap),r4 ; r4 = n (assumed >0 by C code) + +0$: + movl (r3)+,r5 ; r5 = a[] & advance + + ; <r1,r0> := a[] * a[] + emul r5,r5,#0,r0 ; a[] considered signed + + ; fixup for "negative" a[] + tstl r5 ; a[] + bgeq 30$ + addl2 r5,r1 ; a[] + addl2 r5,r1 ; a[] +30$: + + movl r0,(r2)+ ; store lo result in r[] & advance + movl r1,(r2)+ ; store hi result in r[] & advance + + sobgtr r4,0$ + + movl #1,r0 ; return SS$_NORMAL + ret + + .title vax_bn_div_words unsigned divide +; +; Richard Levitte 20-Nov-2000 +; +; ULONG bn_div_words(ULONG h, ULONG l, ULONG d) +; { +; return ((ULONG)((((ULLONG)h)<<32)|l) / (ULLONG)d); +; } +; +; Using EDIV would be very easy, if it didn't do signed calculations. +; Any time any of the input numbers are signed, there are problems, +; usually with integer overflow, at which point it returns useless +; data (the quotient gets the value of l, and the remainder becomes 0). +; +; If it was just for the dividend, it would be very easy, just divide +; it by 2 (unsigned), do the division, multiply the resulting quotient +; and remainder by 2, add the bit that was dropped when dividing by 2 +; to the remainder, and do some adjustment so the remainder doesn't +; end up larger than the divisor. For some cases when the divisor is +; negative (from EDIV's point of view, i.e. when the highest bit is set), +; dividing the dividend by 2 isn't enough, and since some operations +; might generate integer overflows even when the dividend is divided by +; 4 (when the high part of the shifted down dividend ends up being exactly +; half of the divisor, the result is the quotient 0x80000000, which is +; negative...) it needs to be divided by 8. Furthermore, the divisor needs +; to be divided by 2 (unsigned) as well, to avoid more problems with the sign. +; In this case, a little extra fiddling with the remainder is required. +; +; So, the simplest way to handle this is always to divide the dividend +; by 8, and to divide the divisor by 2 if it's highest bit is set. +; After EDIV has been used, the quotient gets multiplied by 8 if the +; original divisor was positive, otherwise 4. The remainder, oddly +; enough, is *always* multiplied by 8. +; NOTE: in the case mentioned above, where the high part of the shifted +; down dividend ends up being exactly half the shifted down divisor, we +; end up with a 33 bit quotient. That's no problem however, it usually +; means we have ended up with a too large remainder as well, and the +; problem is fixed by the last part of the algorithm (next paragraph). +; +; The routine ends with comparing the resulting remainder with the +; original divisor and if the remainder is larger, subtract the +; original divisor from it, and increase the quotient by 1. This is +; done until the remainder is smaller than the divisor. +; +; The complete algorithm looks like this: +; +; d' = d +; l' = l & 7 +; [h,l] = [h,l] >> 3 +; [q,r] = floor([h,l] / d) # This is the EDIV operation +; if (q < 0) q = -q # I doubt this is necessary any more +; +; r' = r >> 29 +; if (d' >= 0) +; q' = q >> 29 +; q = q << 3 +; else +; q' = q >> 30 +; q = q << 2 +; r = (r << 3) + l' +; +; if (d' < 0) +; { +; [r',r] = [r',r] - q +; while ([r',r] < 0) +; { +; [r',r] = [r',r] + d +; [q',q] = [q',q] - 1 +; } +; } +; +; while ([r',r] >= d') +; { +; [r',r] = [r',r] - d' +; [q',q] = [q',q] + 1 +; } +; +; return q + +h=4 ;(AP) h by value (input) +l=8 ;(AP) l by value (input) +d=12 ;(AP) d by value (input) + +;r2 = l, q +;r3 = h, r +;r4 = d +;r5 = l' +;r6 = r' +;r7 = d' +;r8 = q' + + .psect code,nowrt + +.entry bn_div_words,^m<r2,r3,r4,r5,r6,r7,r8> + movl l(ap),r2 + movl h(ap),r3 + movl d(ap),r4 + + bicl3 #^XFFFFFFF8,r2,r5 ; l' = l & 7 + bicl3 #^X00000007,r2,r2 + + bicl3 #^XFFFFFFF8,r3,r6 + bicl3 #^X00000007,r3,r3 + + addl r6,r2 + + rotl #-3,r2,r2 ; l = l >> 3 + rotl #-3,r3,r3 ; h = h >> 3 + + movl r4,r7 ; d' = d + + movl #0,r6 ; r' = 0 + movl #0,r8 ; q' = 0 + + tstl r4 + beql 666$ ; Uh-oh, the divisor is 0... + bgtr 1$ + rotl #-1,r4,r4 ; If d is negative, shift it right. + bicl2 #^X80000000,r4 ; Since d is then a large number, the + ; lowest bit is insignificant + ; (contradict that, and I'll fix the problem!) +1$: + ediv r4,r2,r2,r3 ; Do the actual division + + tstl r2 + bgeq 3$ + mnegl r2,r2 ; if q < 0, negate it +3$: + tstl r7 + blss 4$ + rotl #3,r2,r2 ; q = q << 3 + bicl3 #^XFFFFFFF8,r2,r8 ; q' gets the high bits from q + bicl3 #^X00000007,r2,r2 + bsb 41$ +4$: ; else + rotl #2,r2,r2 ; q = q << 2 + bicl3 #^XFFFFFFFC,r2,r8 ; q' gets the high bits from q + bicl3 #^X00000003,r2,r2 +41$: + rotl #3,r3,r3 ; r = r << 3 + bicl3 #^XFFFFFFF8,r3,r6 ; r' gets the high bits from r + bicl3 #^X00000007,r3,r3 + addl r5,r3 ; r = r + l' + + tstl r7 + bgeq 5$ + bitl #1,r7 + beql 5$ ; if d' < 0 && d' & 1 + subl r2,r3 ; [r',r] = [r',r] - [q',q] + sbwc r8,r6 +45$: + bgeq 5$ ; while r < 0 + decl r2 ; [q',q] = [q',q] - 1 + sbwc #0,r8 + addl r7,r3 ; [r',r] = [r',r] + d' + adwc #0,r6 + brb 45$ + +; The return points are placed in the middle to keep a short distance from +; all the branch points +42$: +; movl r3,r1 + movl r2,r0 + ret +666$: + movl #^XFFFFFFFF,r0 + ret + +5$: + tstl r6 + bneq 6$ + cmpl r3,r7 + blssu 42$ ; while [r',r] >= d' +6$: + subl r7,r3 ; [r',r] = [r',r] - d' + sbwc #0,r6 + incl r2 ; [q',q] = [q',q] + 1 + adwc #0,r8 + brb 5$ + + .title vax_bn_add_words unsigned add of two arrays +; +; Richard Levitte 20-Nov-2000 +; +; ULONG bn_add_words(ULONG r[], ULONG a[], ULONG b[], int n) { +; ULONG c = 0; +; int i; +; for (i = 0; i < n; i++) <c,r[i]> = a[i] + b[i] + c; +; return(c); +; } + +r=4 ;(AP) r by reference (output) +a=8 ;(AP) a by reference (input) +b=12 ;(AP) b by reference (input) +n=16 ;(AP) n by value (input) + + + .psect code,nowrt + +.entry bn_add_words,^m<r2,r3,r4,r5,r6> + + moval @r(ap),r2 + moval @a(ap),r3 + moval @b(ap),r4 + movl n(ap),r5 ; assumed >0 by C code + clrl r0 ; c + + tstl r5 ; carry = 0 + bleq 666$ + +0$: + movl (r3)+,r6 ; carry untouched + adwc (r4)+,r6 ; carry used and touched + movl r6,(r2)+ ; carry untouched + sobgtr r5,0$ ; carry untouched + + adwc #0,r0 +666$: + ret + + .title vax_bn_sub_words unsigned add of two arrays +; +; Richard Levitte 20-Nov-2000 +; +; ULONG bn_sub_words(ULONG r[], ULONG a[], ULONG b[], int n) { +; ULONG c = 0; +; int i; +; for (i = 0; i < n; i++) <c,r[i]> = a[i] - b[i] - c; +; return(c); +; } + +r=4 ;(AP) r by reference (output) +a=8 ;(AP) a by reference (input) +b=12 ;(AP) b by reference (input) +n=16 ;(AP) n by value (input) + + + .psect code,nowrt + +.entry bn_sub_words,^m<r2,r3,r4,r5,r6> + + moval @r(ap),r2 + moval @a(ap),r3 + moval @b(ap),r4 + movl n(ap),r5 ; assumed >0 by C code + clrl r0 ; c + + tstl r5 ; carry = 0 + bleq 666$ + +0$: + movl (r3)+,r6 ; carry untouched + sbwc (r4)+,r6 ; carry used and touched + movl r6,(r2)+ ; carry untouched + sobgtr r5,0$ ; carry untouched + + adwc #0,r0 +666$: + ret + + +;r=4 ;(AP) +;a=8 ;(AP) +;b=12 ;(AP) +;n=16 ;(AP) n by value (input) + + .psect code,nowrt + +.entry BN_MUL_COMBA8,^m<r2,r3,r4,r5,r6,r7,r8,r9,r10,r11> + movab -924(sp),sp + clrq r8 + + clrl r10 + + movl 8(ap),r6 + movzwl 2(r6),r3 + movl 12(ap),r7 + bicl3 #-65536,(r7),r2 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,(r6),-12(fp) + bicl3 #-65536,r3,-16(fp) + mull3 r0,-12(fp),-4(fp) + mull2 r2,-12(fp) + mull3 r2,-16(fp),-8(fp) + mull2 r0,-16(fp) + addl3 -4(fp),-8(fp),r0 + bicl3 #0,r0,-4(fp) + cmpl -4(fp),-8(fp) + bgequ noname.45 + addl2 #65536,-16(fp) +noname.45: + movzwl -2(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-16(fp) + bicl3 #-65536,-4(fp),r0 + ashl #16,r0,-8(fp) + addl3 -8(fp),-12(fp),r0 + bicl3 #0,r0,-12(fp) + cmpl -12(fp),-8(fp) + bgequ noname.46 + incl -16(fp) +noname.46: + movl -12(fp),r1 + movl -16(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.47 + incl r2 +noname.47: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.48 + incl r10 +noname.48: + + movl 4(ap),r11 + movl r9,(r11) + + clrl r9 + + movzwl 2(r6),r2 + bicl3 #-65536,4(r7),r3 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,(r6),-28(fp) + bicl3 #-65536,r2,-32(fp) + mull3 r0,-28(fp),-20(fp) + mull2 r3,-28(fp) + mull3 r3,-32(fp),-24(fp) + mull2 r0,-32(fp) + addl3 -20(fp),-24(fp),r0 + bicl3 #0,r0,-20(fp) + cmpl -20(fp),-24(fp) + bgequ noname.49 + addl2 #65536,-32(fp) +noname.49: + movzwl -18(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-32(fp) + bicl3 #-65536,-20(fp),r0 + ashl #16,r0,-24(fp) + addl3 -24(fp),-28(fp),r0 + bicl3 #0,r0,-28(fp) + cmpl -28(fp),-24(fp) + bgequ noname.50 + incl -32(fp) +noname.50: + movl -28(fp),r1 + movl -32(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.51 + incl r2 +noname.51: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.52 + incl r9 +noname.52: + + movzwl 6(r6),r2 + bicl3 #-65536,(r7),r3 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r6),-44(fp) + bicl3 #-65536,r2,-48(fp) + mull3 r0,-44(fp),-36(fp) + mull2 r3,-44(fp) + mull3 r3,-48(fp),-40(fp) + mull2 r0,-48(fp) + addl3 -36(fp),-40(fp),r0 + bicl3 #0,r0,-36(fp) + cmpl -36(fp),-40(fp) + bgequ noname.53 + addl2 #65536,-48(fp) +noname.53: + movzwl -34(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-48(fp) + bicl3 #-65536,-36(fp),r0 + ashl #16,r0,-40(fp) + addl3 -40(fp),-44(fp),r0 + bicl3 #0,r0,-44(fp) + cmpl -44(fp),-40(fp) + bgequ noname.54 + incl -48(fp) +noname.54: + movl -44(fp),r1 + movl -48(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.55 + incl r2 +noname.55: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.56 + incl r9 +noname.56: + + movl r8,4(r11) + + clrl r8 + + movzwl 10(r6),r2 + bicl3 #-65536,(r7),r3 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,8(r6),-60(fp) + bicl3 #-65536,r2,-64(fp) + mull3 r0,-60(fp),-52(fp) + mull2 r3,-60(fp) + mull3 r3,-64(fp),-56(fp) + mull2 r0,-64(fp) + addl3 -52(fp),-56(fp),r0 + bicl3 #0,r0,-52(fp) + cmpl -52(fp),-56(fp) + bgequ noname.57 + addl2 #65536,-64(fp) +noname.57: + movzwl -50(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-64(fp) + bicl3 #-65536,-52(fp),r0 + ashl #16,r0,-56(fp) + addl3 -56(fp),-60(fp),r0 + bicl3 #0,r0,-60(fp) + cmpl -60(fp),-56(fp) + bgequ noname.58 + incl -64(fp) +noname.58: + movl -60(fp),r1 + movl -64(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.59 + incl r2 +noname.59: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.60 + incl r8 +noname.60: + + movzwl 6(r6),r2 + bicl3 #-65536,4(r7),r3 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r6),-76(fp) + bicl3 #-65536,r2,-80(fp) + mull3 r0,-76(fp),-68(fp) + mull2 r3,-76(fp) + mull3 r3,-80(fp),-72(fp) + mull2 r0,-80(fp) + addl3 -68(fp),-72(fp),r0 + bicl3 #0,r0,-68(fp) + cmpl -68(fp),-72(fp) + bgequ noname.61 + addl2 #65536,-80(fp) +noname.61: + movzwl -66(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-80(fp) + bicl3 #-65536,-68(fp),r0 + ashl #16,r0,-72(fp) + addl3 -72(fp),-76(fp),r0 + bicl3 #0,r0,-76(fp) + cmpl -76(fp),-72(fp) + bgequ noname.62 + incl -80(fp) +noname.62: + movl -76(fp),r1 + movl -80(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.63 + incl r2 +noname.63: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.64 + incl r8 +noname.64: + + movzwl 2(r6),r2 + bicl3 #-65536,8(r7),r3 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,(r6),-92(fp) + bicl3 #-65536,r2,-96(fp) + mull3 r0,-92(fp),-84(fp) + mull2 r3,-92(fp) + mull3 r3,-96(fp),-88(fp) + mull2 r0,-96(fp) + addl3 -84(fp),-88(fp),r0 + bicl3 #0,r0,-84(fp) + cmpl -84(fp),-88(fp) + bgequ noname.65 + addl2 #65536,-96(fp) +noname.65: + movzwl -82(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-96(fp) + bicl3 #-65536,-84(fp),r0 + ashl #16,r0,-88(fp) + addl3 -88(fp),-92(fp),r0 + bicl3 #0,r0,-92(fp) + cmpl -92(fp),-88(fp) + bgequ noname.66 + incl -96(fp) +noname.66: + movl -92(fp),r1 + movl -96(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.67 + incl r2 +noname.67: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.68 + incl r8 +noname.68: + + movl r10,8(r11) + + clrl r10 + + movzwl 2(r6),r2 + bicl3 #-65536,12(r7),r3 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,(r6),-108(fp) + bicl3 #-65536,r2,-112(fp) + mull3 r0,-108(fp),-100(fp) + mull2 r3,-108(fp) + mull3 r3,-112(fp),-104(fp) + mull2 r0,-112(fp) + addl3 -100(fp),-104(fp),r0 + bicl3 #0,r0,-100(fp) + cmpl -100(fp),-104(fp) + bgequ noname.69 + addl2 #65536,-112(fp) +noname.69: + movzwl -98(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-112(fp) + bicl3 #-65536,-100(fp),r0 + ashl #16,r0,-104(fp) + addl3 -104(fp),-108(fp),r0 + bicl3 #0,r0,-108(fp) + cmpl -108(fp),-104(fp) + bgequ noname.70 + incl -112(fp) +noname.70: + movl -108(fp),r1 + movl -112(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.71 + incl r2 +noname.71: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.72 + incl r10 +noname.72: + + movzwl 6(r6),r2 + bicl3 #-65536,8(r7),r3 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r6),-124(fp) + bicl3 #-65536,r2,-128(fp) + mull3 r0,-124(fp),-116(fp) + mull2 r3,-124(fp) + mull3 r3,-128(fp),-120(fp) + mull2 r0,-128(fp) + addl3 -116(fp),-120(fp),r0 + bicl3 #0,r0,-116(fp) + cmpl -116(fp),-120(fp) + bgequ noname.73 + addl2 #65536,-128(fp) +noname.73: + movzwl -114(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-128(fp) + bicl3 #-65536,-116(fp),r0 + ashl #16,r0,-120(fp) + addl3 -120(fp),-124(fp),r0 + bicl3 #0,r0,-124(fp) + cmpl -124(fp),-120(fp) + bgequ noname.74 + incl -128(fp) +noname.74: + movl -124(fp),r1 + movl -128(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.75 + incl r2 +noname.75: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.76 + incl r10 +noname.76: + + movzwl 10(r6),r2 + bicl3 #-65536,4(r7),r3 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,8(r6),-140(fp) + bicl3 #-65536,r2,-144(fp) + mull3 r0,-140(fp),-132(fp) + mull2 r3,-140(fp) + mull3 r3,-144(fp),-136(fp) + mull2 r0,-144(fp) + addl3 -132(fp),-136(fp),r0 + bicl3 #0,r0,-132(fp) + cmpl -132(fp),-136(fp) + bgequ noname.77 + addl2 #65536,-144(fp) +noname.77: + movzwl -130(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-144(fp) + bicl3 #-65536,-132(fp),r0 + ashl #16,r0,-136(fp) + addl3 -136(fp),-140(fp),r0 + bicl3 #0,r0,-140(fp) + cmpl -140(fp),-136(fp) + bgequ noname.78 + incl -144(fp) +noname.78: + movl -140(fp),r1 + movl -144(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.79 + incl r2 +noname.79: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.80 + incl r10 +noname.80: + + movzwl 14(r6),r2 + bicl3 #-65536,(r7),r3 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r6),-156(fp) + bicl3 #-65536,r2,-160(fp) + mull3 r0,-156(fp),-148(fp) + mull2 r3,-156(fp) + mull3 r3,-160(fp),-152(fp) + mull2 r0,-160(fp) + addl3 -148(fp),-152(fp),r0 + bicl3 #0,r0,-148(fp) + cmpl -148(fp),-152(fp) + bgequ noname.81 + addl2 #65536,-160(fp) +noname.81: + movzwl -146(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-160(fp) + bicl3 #-65536,-148(fp),r0 + ashl #16,r0,-152(fp) + addl3 -152(fp),-156(fp),r0 + bicl3 #0,r0,-156(fp) + cmpl -156(fp),-152(fp) + bgequ noname.82 + incl -160(fp) +noname.82: + movl -156(fp),r1 + movl -160(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.83 + incl r2 +noname.83: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.84 + incl r10 +noname.84: + + movl r9,12(r11) + + clrl r9 + + movzwl 18(r6),r2 + bicl3 #-65536,(r7),r3 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,16(r6),-172(fp) + bicl3 #-65536,r2,-176(fp) + mull3 r0,-172(fp),-164(fp) + mull2 r3,-172(fp) + mull3 r3,-176(fp),-168(fp) + mull2 r0,-176(fp) + addl3 -164(fp),-168(fp),r0 + bicl3 #0,r0,-164(fp) + cmpl -164(fp),-168(fp) + bgequ noname.85 + addl2 #65536,-176(fp) +noname.85: + movzwl -162(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-176(fp) + bicl3 #-65536,-164(fp),r0 + ashl #16,r0,-168(fp) + addl3 -168(fp),-172(fp),r0 + bicl3 #0,r0,-172(fp) + cmpl -172(fp),-168(fp) + bgequ noname.86 + incl -176(fp) +noname.86: + movl -172(fp),r1 + movl -176(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.87 + incl r2 +noname.87: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.88 + incl r9 +noname.88: + + movzwl 14(r6),r2 + bicl3 #-65536,4(r7),r3 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r6),-188(fp) + bicl3 #-65536,r2,-192(fp) + mull3 r0,-188(fp),-180(fp) + mull2 r3,-188(fp) + mull3 r3,-192(fp),-184(fp) + mull2 r0,-192(fp) + addl3 -180(fp),-184(fp),r0 + bicl3 #0,r0,-180(fp) + cmpl -180(fp),-184(fp) + bgequ noname.89 + addl2 #65536,-192(fp) +noname.89: + movzwl -178(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-192(fp) + bicl3 #-65536,-180(fp),r0 + ashl #16,r0,-184(fp) + addl3 -184(fp),-188(fp),r0 + bicl3 #0,r0,-188(fp) + cmpl -188(fp),-184(fp) + bgequ noname.90 + incl -192(fp) +noname.90: + movl -188(fp),r1 + movl -192(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.91 + incl r2 +noname.91: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.92 + incl r9 +noname.92: + + movzwl 10(r6),r2 + bicl3 #-65536,8(r7),r3 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,8(r6),-204(fp) + bicl3 #-65536,r2,-208(fp) + mull3 r0,-204(fp),-196(fp) + mull2 r3,-204(fp) + mull3 r3,-208(fp),-200(fp) + mull2 r0,-208(fp) + addl3 -196(fp),-200(fp),r0 + bicl3 #0,r0,-196(fp) + cmpl -196(fp),-200(fp) + bgequ noname.93 + addl2 #65536,-208(fp) +noname.93: + movzwl -194(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-208(fp) + bicl3 #-65536,-196(fp),r0 + ashl #16,r0,-200(fp) + addl3 -200(fp),-204(fp),r0 + bicl3 #0,r0,-204(fp) + cmpl -204(fp),-200(fp) + bgequ noname.94 + incl -208(fp) +noname.94: + movl -204(fp),r1 + movl -208(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.95 + incl r2 +noname.95: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.96 + incl r9 +noname.96: + + movzwl 6(r6),r2 + bicl3 #-65536,12(r7),r3 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r6),-220(fp) + bicl3 #-65536,r2,-224(fp) + mull3 r0,-220(fp),-212(fp) + mull2 r3,-220(fp) + mull3 r3,-224(fp),-216(fp) + mull2 r0,-224(fp) + addl3 -212(fp),-216(fp),r0 + bicl3 #0,r0,-212(fp) + cmpl -212(fp),-216(fp) + bgequ noname.97 + addl2 #65536,-224(fp) +noname.97: + movzwl -210(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-224(fp) + bicl3 #-65536,-212(fp),r0 + ashl #16,r0,-216(fp) + addl3 -216(fp),-220(fp),r0 + bicl3 #0,r0,-220(fp) + cmpl -220(fp),-216(fp) + bgequ noname.98 + incl -224(fp) +noname.98: + movl -220(fp),r1 + movl -224(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.99 + incl r2 +noname.99: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.100 + incl r9 +noname.100: + + movzwl 2(r6),r2 + bicl3 #-65536,16(r7),r3 + movzwl 18(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,(r6),-236(fp) + bicl3 #-65536,r2,-240(fp) + mull3 r0,-236(fp),-228(fp) + mull2 r3,-236(fp) + mull3 r3,-240(fp),-232(fp) + mull2 r0,-240(fp) + addl3 -228(fp),-232(fp),r0 + bicl3 #0,r0,-228(fp) + cmpl -228(fp),-232(fp) + bgequ noname.101 + addl2 #65536,-240(fp) +noname.101: + movzwl -226(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-240(fp) + bicl3 #-65536,-228(fp),r0 + ashl #16,r0,-232(fp) + addl3 -232(fp),-236(fp),r0 + bicl3 #0,r0,-236(fp) + cmpl -236(fp),-232(fp) + bgequ noname.102 + incl -240(fp) +noname.102: + movl -236(fp),r1 + movl -240(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.103 + incl r2 +noname.103: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.104 + incl r9 +noname.104: + + movl r8,16(r11) + + clrl r8 + + movzwl 2(r6),r2 + bicl3 #-65536,20(r7),r3 + movzwl 22(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,(r6),-252(fp) + bicl3 #-65536,r2,-256(fp) + mull3 r0,-252(fp),-244(fp) + mull2 r3,-252(fp) + mull3 r3,-256(fp),-248(fp) + mull2 r0,-256(fp) + addl3 -244(fp),-248(fp),r0 + bicl3 #0,r0,-244(fp) + cmpl -244(fp),-248(fp) + bgequ noname.105 + addl2 #65536,-256(fp) +noname.105: + movzwl -242(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-256(fp) + bicl3 #-65536,-244(fp),r0 + ashl #16,r0,-248(fp) + addl3 -248(fp),-252(fp),r0 + bicl3 #0,r0,-252(fp) + cmpl -252(fp),-248(fp) + bgequ noname.106 + incl -256(fp) +noname.106: + movl -252(fp),r1 + movl -256(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.107 + incl r2 +noname.107: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.108 + incl r8 +noname.108: + + movzwl 6(r6),r2 + bicl3 #-65536,16(r7),r3 + movzwl 18(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r6),-268(fp) + bicl3 #-65536,r2,-272(fp) + mull3 r0,-268(fp),-260(fp) + mull2 r3,-268(fp) + mull3 r3,-272(fp),-264(fp) + mull2 r0,-272(fp) + addl3 -260(fp),-264(fp),r0 + bicl3 #0,r0,-260(fp) + cmpl -260(fp),-264(fp) + bgequ noname.109 + addl2 #65536,-272(fp) +noname.109: + movzwl -258(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-272(fp) + bicl3 #-65536,-260(fp),r0 + ashl #16,r0,-264(fp) + addl3 -264(fp),-268(fp),r0 + bicl3 #0,r0,-268(fp) + cmpl -268(fp),-264(fp) + bgequ noname.110 + incl -272(fp) +noname.110: + movl -268(fp),r1 + movl -272(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.111 + incl r2 +noname.111: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.112 + incl r8 +noname.112: + + movzwl 10(r6),r2 + bicl3 #-65536,12(r7),r3 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,8(r6),-284(fp) + bicl3 #-65536,r2,-288(fp) + mull3 r0,-284(fp),-276(fp) + mull2 r3,-284(fp) + mull3 r3,-288(fp),-280(fp) + mull2 r0,-288(fp) + addl3 -276(fp),-280(fp),r0 + bicl3 #0,r0,-276(fp) + cmpl -276(fp),-280(fp) + bgequ noname.113 + addl2 #65536,-288(fp) +noname.113: + movzwl -274(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-288(fp) + bicl3 #-65536,-276(fp),r0 + ashl #16,r0,-280(fp) + addl3 -280(fp),-284(fp),r0 + bicl3 #0,r0,-284(fp) + cmpl -284(fp),-280(fp) + bgequ noname.114 + incl -288(fp) +noname.114: + movl -284(fp),r1 + movl -288(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.115 + incl r2 +noname.115: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.116 + incl r8 +noname.116: + + movzwl 14(r6),r2 + bicl3 #-65536,8(r7),r3 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r6),-300(fp) + bicl3 #-65536,r2,-304(fp) + mull3 r0,-300(fp),-292(fp) + mull2 r3,-300(fp) + mull3 r3,-304(fp),-296(fp) + mull2 r0,-304(fp) + addl3 -292(fp),-296(fp),r0 + bicl3 #0,r0,-292(fp) + cmpl -292(fp),-296(fp) + bgequ noname.117 + addl2 #65536,-304(fp) +noname.117: + movzwl -290(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-304(fp) + bicl3 #-65536,-292(fp),r0 + ashl #16,r0,-296(fp) + addl3 -296(fp),-300(fp),r0 + bicl3 #0,r0,-300(fp) + cmpl -300(fp),-296(fp) + bgequ noname.118 + incl -304(fp) +noname.118: + movl -300(fp),r1 + movl -304(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.119 + incl r2 +noname.119: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.120 + incl r8 +noname.120: + + movzwl 18(r6),r2 + bicl3 #-65536,4(r7),r3 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,16(r6),-316(fp) + bicl3 #-65536,r2,-320(fp) + mull3 r0,-316(fp),-308(fp) + mull2 r3,-316(fp) + mull3 r3,-320(fp),-312(fp) + mull2 r0,-320(fp) + addl3 -308(fp),-312(fp),r0 + bicl3 #0,r0,-308(fp) + cmpl -308(fp),-312(fp) + bgequ noname.121 + addl2 #65536,-320(fp) +noname.121: + movzwl -306(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-320(fp) + bicl3 #-65536,-308(fp),r0 + ashl #16,r0,-312(fp) + addl3 -312(fp),-316(fp),r0 + bicl3 #0,r0,-316(fp) + cmpl -316(fp),-312(fp) + bgequ noname.122 + incl -320(fp) +noname.122: + movl -316(fp),r1 + movl -320(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.123 + incl r2 + +noname.123: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.124 + incl r8 +noname.124: + + movzwl 22(r6),r2 + bicl3 #-65536,(r7),r3 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,20(r6),-332(fp) + bicl3 #-65536,r2,-336(fp) + mull3 r0,-332(fp),-324(fp) + mull2 r3,-332(fp) + mull3 r3,-336(fp),-328(fp) + mull2 r0,-336(fp) + addl3 -324(fp),-328(fp),r0 + bicl3 #0,r0,-324(fp) + cmpl -324(fp),-328(fp) + bgequ noname.125 + addl2 #65536,-336(fp) +noname.125: + movzwl -322(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-336(fp) + bicl3 #-65536,-324(fp),r0 + ashl #16,r0,-328(fp) + addl3 -328(fp),-332(fp),r0 + bicl3 #0,r0,-332(fp) + cmpl -332(fp),-328(fp) + bgequ noname.126 + incl -336(fp) +noname.126: + movl -332(fp),r1 + movl -336(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.127 + incl r2 +noname.127: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.128 + incl r8 +noname.128: + + movl r10,20(r11) + + clrl r10 + + movzwl 26(r6),r2 + bicl3 #-65536,(r7),r3 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,24(r6),-348(fp) + bicl3 #-65536,r2,-352(fp) + mull3 r0,-348(fp),-340(fp) + mull2 r3,-348(fp) + mull3 r3,-352(fp),-344(fp) + mull2 r0,-352(fp) + addl3 -340(fp),-344(fp),r0 + bicl3 #0,r0,-340(fp) + cmpl -340(fp),-344(fp) + bgequ noname.129 + addl2 #65536,-352(fp) +noname.129: + movzwl -338(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-352(fp) + bicl3 #-65536,-340(fp),r0 + ashl #16,r0,-344(fp) + addl3 -344(fp),-348(fp),r0 + bicl3 #0,r0,-348(fp) + cmpl -348(fp),-344(fp) + bgequ noname.130 + incl -352(fp) +noname.130: + movl -348(fp),r1 + movl -352(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.131 + incl r2 +noname.131: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.132 + incl r10 +noname.132: + + movzwl 22(r6),r2 + bicl3 #-65536,4(r7),r3 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,20(r6),-364(fp) + bicl3 #-65536,r2,-368(fp) + mull3 r0,-364(fp),-356(fp) + mull2 r3,-364(fp) + mull3 r3,-368(fp),-360(fp) + mull2 r0,-368(fp) + addl3 -356(fp),-360(fp),r0 + bicl3 #0,r0,-356(fp) + cmpl -356(fp),-360(fp) + bgequ noname.133 + addl2 #65536,-368(fp) +noname.133: + movzwl -354(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-368(fp) + bicl3 #-65536,-356(fp),r0 + ashl #16,r0,-360(fp) + addl3 -360(fp),-364(fp),r0 + bicl3 #0,r0,-364(fp) + cmpl -364(fp),-360(fp) + bgequ noname.134 + incl -368(fp) +noname.134: + movl -364(fp),r1 + movl -368(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.135 + incl r2 +noname.135: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.136 + incl r10 +noname.136: + + movzwl 18(r6),r2 + bicl3 #-65536,8(r7),r3 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,16(r6),-380(fp) + bicl3 #-65536,r2,-384(fp) + mull3 r0,-380(fp),-372(fp) + mull2 r3,-380(fp) + mull3 r3,-384(fp),-376(fp) + mull2 r0,-384(fp) + addl3 -372(fp),-376(fp),r0 + bicl3 #0,r0,-372(fp) + cmpl -372(fp),-376(fp) + bgequ noname.137 + addl2 #65536,-384(fp) +noname.137: + movzwl -370(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-384(fp) + bicl3 #-65536,-372(fp),r0 + ashl #16,r0,-376(fp) + addl3 -376(fp),-380(fp),r0 + bicl3 #0,r0,-380(fp) + cmpl -380(fp),-376(fp) + bgequ noname.138 + incl -384(fp) +noname.138: + movl -380(fp),r1 + movl -384(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.139 + incl r2 +noname.139: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.140 + incl r10 +noname.140: + + movzwl 14(r6),r2 + bicl3 #-65536,12(r7),r3 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r6),-396(fp) + bicl3 #-65536,r2,-400(fp) + mull3 r0,-396(fp),-388(fp) + mull2 r3,-396(fp) + mull3 r3,-400(fp),-392(fp) + mull2 r0,-400(fp) + addl3 -388(fp),-392(fp),r0 + bicl3 #0,r0,-388(fp) + cmpl -388(fp),-392(fp) + bgequ noname.141 + addl2 #65536,-400(fp) +noname.141: + movzwl -386(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-400(fp) + bicl3 #-65536,-388(fp),r0 + ashl #16,r0,-392(fp) + addl3 -392(fp),-396(fp),r0 + bicl3 #0,r0,-396(fp) + cmpl -396(fp),-392(fp) + bgequ noname.142 + incl -400(fp) +noname.142: + movl -396(fp),r1 + movl -400(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.143 + incl r2 +noname.143: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.144 + incl r10 +noname.144: + + movzwl 10(r6),r2 + bicl3 #-65536,16(r7),r3 + movzwl 18(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,8(r6),-412(fp) + bicl3 #-65536,r2,-416(fp) + mull3 r0,-412(fp),-404(fp) + mull2 r3,-412(fp) + mull3 r3,-416(fp),-408(fp) + mull2 r0,-416(fp) + addl3 -404(fp),-408(fp),r0 + bicl3 #0,r0,-404(fp) + cmpl -404(fp),-408(fp) + bgequ noname.145 + addl2 #65536,-416(fp) +noname.145: + movzwl -402(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-416(fp) + bicl3 #-65536,-404(fp),r0 + ashl #16,r0,-408(fp) + addl3 -408(fp),-412(fp),r0 + bicl3 #0,r0,-412(fp) + cmpl -412(fp),-408(fp) + bgequ noname.146 + incl -416(fp) +noname.146: + movl -412(fp),r1 + movl -416(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.147 + incl r2 +noname.147: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.148 + incl r10 +noname.148: + + movzwl 6(r6),r2 + bicl3 #-65536,20(r7),r3 + movzwl 22(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r6),-428(fp) + bicl3 #-65536,r2,-432(fp) + mull3 r0,-428(fp),-420(fp) + mull2 r3,-428(fp) + mull3 r3,-432(fp),-424(fp) + mull2 r0,-432(fp) + addl3 -420(fp),-424(fp),r0 + bicl3 #0,r0,-420(fp) + cmpl -420(fp),-424(fp) + bgequ noname.149 + addl2 #65536,-432(fp) +noname.149: + movzwl -418(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-432(fp) + bicl3 #-65536,-420(fp),r0 + ashl #16,r0,-424(fp) + addl3 -424(fp),-428(fp),r0 + bicl3 #0,r0,-428(fp) + cmpl -428(fp),-424(fp) + bgequ noname.150 + incl -432(fp) +noname.150: + movl -428(fp),r1 + movl -432(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.151 + incl r2 +noname.151: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.152 + incl r10 +noname.152: + + movzwl 2(r6),r2 + bicl3 #-65536,24(r7),r3 + movzwl 26(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,(r6),-444(fp) + bicl3 #-65536,r2,-448(fp) + mull3 r0,-444(fp),-436(fp) + mull2 r3,-444(fp) + mull3 r3,-448(fp),-440(fp) + mull2 r0,-448(fp) + addl3 -436(fp),-440(fp),r0 + bicl3 #0,r0,-436(fp) + cmpl -436(fp),-440(fp) + bgequ noname.153 + addl2 #65536,-448(fp) +noname.153: + movzwl -434(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-448(fp) + bicl3 #-65536,-436(fp),r0 + ashl #16,r0,-440(fp) + addl3 -440(fp),-444(fp),r0 + bicl3 #0,r0,-444(fp) + cmpl -444(fp),-440(fp) + bgequ noname.154 + incl -448(fp) +noname.154: + movl -444(fp),r1 + movl -448(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.155 + incl r2 +noname.155: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.156 + incl r10 +noname.156: + + movl r9,24(r11) + + clrl r9 + + movzwl 2(r6),r2 + bicl3 #-65536,28(r7),r3 + movzwl 30(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,(r6),-460(fp) + bicl3 #-65536,r2,-464(fp) + mull3 r0,-460(fp),-452(fp) + mull2 r3,-460(fp) + mull3 r3,-464(fp),-456(fp) + mull2 r0,-464(fp) + addl3 -452(fp),-456(fp),r0 + bicl3 #0,r0,-452(fp) + cmpl -452(fp),-456(fp) + bgequ noname.157 + addl2 #65536,-464(fp) +noname.157: + movzwl -450(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-464(fp) + bicl3 #-65536,-452(fp),r0 + ashl #16,r0,-456(fp) + addl3 -456(fp),-460(fp),r0 + bicl3 #0,r0,-460(fp) + cmpl -460(fp),-456(fp) + bgequ noname.158 + incl -464(fp) +noname.158: + movl -460(fp),r1 + movl -464(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.159 + incl r2 +noname.159: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.160 + incl r9 +noname.160: + + movzwl 6(r6),r2 + bicl3 #-65536,24(r7),r3 + movzwl 26(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r6),-476(fp) + bicl3 #-65536,r2,-480(fp) + mull3 r0,-476(fp),-468(fp) + mull2 r3,-476(fp) + mull3 r3,-480(fp),-472(fp) + mull2 r0,-480(fp) + addl3 -468(fp),-472(fp),r0 + bicl3 #0,r0,-468(fp) + cmpl -468(fp),-472(fp) + bgequ noname.161 + addl2 #65536,-480(fp) +noname.161: + movzwl -466(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-480(fp) + bicl3 #-65536,-468(fp),r0 + ashl #16,r0,-472(fp) + addl3 -472(fp),-476(fp),r0 + bicl3 #0,r0,-476(fp) + cmpl -476(fp),-472(fp) + bgequ noname.162 + incl -480(fp) +noname.162: + movl -476(fp),r1 + movl -480(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.163 + incl r2 +noname.163: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.164 + incl r9 +noname.164: + + movzwl 10(r6),r2 + bicl3 #-65536,20(r7),r3 + movzwl 22(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,8(r6),-492(fp) + bicl3 #-65536,r2,-496(fp) + mull3 r0,-492(fp),-484(fp) + mull2 r3,-492(fp) + mull3 r3,-496(fp),-488(fp) + mull2 r0,-496(fp) + addl3 -484(fp),-488(fp),r0 + bicl3 #0,r0,-484(fp) + cmpl -484(fp),-488(fp) + bgequ noname.165 + addl2 #65536,-496(fp) +noname.165: + movzwl -482(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-496(fp) + bicl3 #-65536,-484(fp),r0 + ashl #16,r0,-488(fp) + addl3 -488(fp),-492(fp),r0 + bicl3 #0,r0,-492(fp) + cmpl -492(fp),-488(fp) + bgequ noname.166 + incl -496(fp) +noname.166: + movl -492(fp),r1 + movl -496(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.167 + incl r2 +noname.167: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.168 + incl r9 +noname.168: + + movzwl 14(r6),r2 + bicl3 #-65536,16(r7),r3 + movzwl 18(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r6),-508(fp) + bicl3 #-65536,r2,-512(fp) + mull3 r0,-508(fp),-500(fp) + mull2 r3,-508(fp) + mull3 r3,-512(fp),-504(fp) + mull2 r0,-512(fp) + addl3 -500(fp),-504(fp),r0 + bicl3 #0,r0,-500(fp) + cmpl -500(fp),-504(fp) + bgequ noname.169 + addl2 #65536,-512(fp) +noname.169: + movzwl -498(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-512(fp) + bicl3 #-65536,-500(fp),r0 + ashl #16,r0,-504(fp) + addl3 -504(fp),-508(fp),r0 + bicl3 #0,r0,-508(fp) + cmpl -508(fp),-504(fp) + bgequ noname.170 + incl -512(fp) +noname.170: + movl -508(fp),r1 + movl -512(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.171 + incl r2 +noname.171: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.172 + incl r9 +noname.172: + + movzwl 18(r6),r2 + bicl3 #-65536,12(r7),r3 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,16(r6),-524(fp) + bicl3 #-65536,r2,-528(fp) + mull3 r0,-524(fp),-516(fp) + mull2 r3,-524(fp) + mull3 r3,-528(fp),-520(fp) + mull2 r0,-528(fp) + addl3 -516(fp),-520(fp),r0 + bicl3 #0,r0,-516(fp) + cmpl -516(fp),-520(fp) + bgequ noname.173 + addl2 #65536,-528(fp) +noname.173: + movzwl -514(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-528(fp) + bicl3 #-65536,-516(fp),r0 + ashl #16,r0,-520(fp) + addl3 -520(fp),-524(fp),r0 + bicl3 #0,r0,-524(fp) + cmpl -524(fp),-520(fp) + bgequ noname.174 + incl -528(fp) +noname.174: + movl -524(fp),r1 + movl -528(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.175 + incl r2 +noname.175: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.176 + incl r9 +noname.176: + + movzwl 22(r6),r2 + bicl3 #-65536,8(r7),r3 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,20(r6),-540(fp) + bicl3 #-65536,r2,-544(fp) + mull3 r0,-540(fp),-532(fp) + mull2 r3,-540(fp) + mull3 r3,-544(fp),-536(fp) + mull2 r0,-544(fp) + addl3 -532(fp),-536(fp),r0 + bicl3 #0,r0,-532(fp) + cmpl -532(fp),-536(fp) + bgequ noname.177 + addl2 #65536,-544(fp) +noname.177: + movzwl -530(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-544(fp) + bicl3 #-65536,-532(fp),r0 + ashl #16,r0,-536(fp) + addl3 -536(fp),-540(fp),r0 + bicl3 #0,r0,-540(fp) + cmpl -540(fp),-536(fp) + bgequ noname.178 + incl -544(fp) +noname.178: + movl -540(fp),r1 + movl -544(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.179 + incl r2 +noname.179: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.180 + incl r9 +noname.180: + + movzwl 26(r6),r2 + bicl3 #-65536,4(r7),r3 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,24(r6),-556(fp) + bicl3 #-65536,r2,-560(fp) + mull3 r0,-556(fp),-548(fp) + mull2 r3,-556(fp) + mull3 r3,-560(fp),-552(fp) + mull2 r0,-560(fp) + addl3 -548(fp),-552(fp),r0 + bicl3 #0,r0,-548(fp) + cmpl -548(fp),-552(fp) + bgequ noname.181 + addl2 #65536,-560(fp) +noname.181: + movzwl -546(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-560(fp) + bicl3 #-65536,-548(fp),r0 + ashl #16,r0,-552(fp) + addl3 -552(fp),-556(fp),r0 + bicl3 #0,r0,-556(fp) + cmpl -556(fp),-552(fp) + bgequ noname.182 + incl -560(fp) +noname.182: + movl -556(fp),r1 + movl -560(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.183 + incl r2 +noname.183: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.184 + incl r9 +noname.184: + + movzwl 30(r6),r2 + bicl3 #-65536,(r7),r3 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,28(r6),-572(fp) + bicl3 #-65536,r2,-576(fp) + mull3 r0,-572(fp),-564(fp) + mull2 r3,-572(fp) + mull3 r3,-576(fp),-568(fp) + mull2 r0,-576(fp) + addl3 -564(fp),-568(fp),r0 + bicl3 #0,r0,-564(fp) + cmpl -564(fp),-568(fp) + bgequ noname.185 + addl2 #65536,-576(fp) +noname.185: + movzwl -562(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-576(fp) + bicl3 #-65536,-564(fp),r0 + ashl #16,r0,-568(fp) + addl3 -568(fp),-572(fp),r0 + bicl3 #0,r0,-572(fp) + cmpl -572(fp),-568(fp) + bgequ noname.186 + incl -576(fp) +noname.186: + movl -572(fp),r1 + movl -576(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.187 + incl r2 +noname.187: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.188 + incl r9 +noname.188: + + movl r8,28(r11) + + clrl r8 + + movzwl 30(r6),r2 + bicl3 #-65536,4(r7),r3 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,28(r6),-588(fp) + bicl3 #-65536,r2,-592(fp) + mull3 r0,-588(fp),-580(fp) + mull2 r3,-588(fp) + mull3 r3,-592(fp),-584(fp) + mull2 r0,-592(fp) + addl3 -580(fp),-584(fp),r0 + bicl3 #0,r0,-580(fp) + cmpl -580(fp),-584(fp) + bgequ noname.189 + addl2 #65536,-592(fp) +noname.189: + movzwl -578(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-592(fp) + bicl3 #-65536,-580(fp),r0 + ashl #16,r0,-584(fp) + addl3 -584(fp),-588(fp),r0 + bicl3 #0,r0,-588(fp) + cmpl -588(fp),-584(fp) + bgequ noname.190 + incl -592(fp) +noname.190: + movl -588(fp),r1 + movl -592(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.191 + incl r2 +noname.191: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.192 + incl r8 +noname.192: + + movzwl 26(r6),r2 + bicl3 #-65536,8(r7),r3 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,24(r6),-604(fp) + bicl3 #-65536,r2,-608(fp) + mull3 r0,-604(fp),-596(fp) + mull2 r3,-604(fp) + mull3 r3,-608(fp),-600(fp) + mull2 r0,-608(fp) + addl3 -596(fp),-600(fp),r0 + bicl3 #0,r0,-596(fp) + cmpl -596(fp),-600(fp) + bgequ noname.193 + addl2 #65536,-608(fp) +noname.193: + movzwl -594(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-608(fp) + bicl3 #-65536,-596(fp),r0 + ashl #16,r0,-600(fp) + addl3 -600(fp),-604(fp),r0 + bicl3 #0,r0,-604(fp) + cmpl -604(fp),-600(fp) + bgequ noname.194 + incl -608(fp) +noname.194: + movl -604(fp),r1 + movl -608(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.195 + incl r2 +noname.195: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.196 + incl r8 +noname.196: + + movzwl 22(r6),r2 + bicl3 #-65536,12(r7),r3 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,20(r6),-620(fp) + bicl3 #-65536,r2,-624(fp) + mull3 r0,-620(fp),-612(fp) + mull2 r3,-620(fp) + mull3 r3,-624(fp),-616(fp) + mull2 r0,-624(fp) + addl3 -612(fp),-616(fp),r0 + bicl3 #0,r0,-612(fp) + cmpl -612(fp),-616(fp) + bgequ noname.197 + addl2 #65536,-624(fp) +noname.197: + movzwl -610(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-624(fp) + bicl3 #-65536,-612(fp),r0 + ashl #16,r0,-616(fp) + addl3 -616(fp),-620(fp),r0 + bicl3 #0,r0,-620(fp) + cmpl -620(fp),-616(fp) + bgequ noname.198 + incl -624(fp) +noname.198: + movl -620(fp),r1 + movl -624(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.199 + incl r2 +noname.199: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.200 + incl r8 +noname.200: + + movzwl 18(r6),r2 + bicl3 #-65536,16(r7),r3 + movzwl 18(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,16(r6),-636(fp) + bicl3 #-65536,r2,-640(fp) + mull3 r0,-636(fp),-628(fp) + mull2 r3,-636(fp) + mull3 r3,-640(fp),-632(fp) + mull2 r0,-640(fp) + addl3 -628(fp),-632(fp),r0 + bicl3 #0,r0,-628(fp) + cmpl -628(fp),-632(fp) + bgequ noname.201 + addl2 #65536,-640(fp) +noname.201: + movzwl -626(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-640(fp) + bicl3 #-65536,-628(fp),r0 + ashl #16,r0,-632(fp) + addl3 -632(fp),-636(fp),r0 + bicl3 #0,r0,-636(fp) + cmpl -636(fp),-632(fp) + bgequ noname.202 + incl -640(fp) +noname.202: + movl -636(fp),r1 + movl -640(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.203 + incl r2 +noname.203: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.204 + incl r8 +noname.204: + + movzwl 14(r6),r2 + bicl3 #-65536,20(r7),r3 + movzwl 22(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r6),-652(fp) + bicl3 #-65536,r2,-656(fp) + mull3 r0,-652(fp),-644(fp) + mull2 r3,-652(fp) + mull3 r3,-656(fp),-648(fp) + mull2 r0,-656(fp) + addl3 -644(fp),-648(fp),r0 + bicl3 #0,r0,-644(fp) + cmpl -644(fp),-648(fp) + bgequ noname.205 + addl2 #65536,-656(fp) +noname.205: + movzwl -642(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-656(fp) + bicl3 #-65536,-644(fp),r0 + ashl #16,r0,-648(fp) + addl3 -648(fp),-652(fp),r0 + bicl3 #0,r0,-652(fp) + cmpl -652(fp),-648(fp) + bgequ noname.206 + incl -656(fp) +noname.206: + movl -652(fp),r1 + movl -656(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.207 + incl r2 +noname.207: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.208 + incl r8 +noname.208: + + movzwl 10(r6),r2 + bicl3 #-65536,24(r7),r3 + movzwl 26(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,8(r6),-668(fp) + bicl3 #-65536,r2,-672(fp) + mull3 r0,-668(fp),-660(fp) + mull2 r3,-668(fp) + mull3 r3,-672(fp),-664(fp) + mull2 r0,-672(fp) + addl3 -660(fp),-664(fp),r0 + bicl3 #0,r0,-660(fp) + cmpl -660(fp),-664(fp) + bgequ noname.209 + addl2 #65536,-672(fp) +noname.209: + movzwl -658(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-672(fp) + bicl3 #-65536,-660(fp),r0 + ashl #16,r0,-664(fp) + addl3 -664(fp),-668(fp),r0 + bicl3 #0,r0,-668(fp) + cmpl -668(fp),-664(fp) + bgequ noname.210 + incl -672(fp) +noname.210: + movl -668(fp),r1 + movl -672(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.211 + incl r2 +noname.211: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.212 + incl r8 +noname.212: + + movzwl 6(r6),r2 + bicl3 #-65536,28(r7),r3 + movzwl 30(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r6),-684(fp) + bicl3 #-65536,r2,-688(fp) + mull3 r0,-684(fp),-676(fp) + mull2 r3,-684(fp) + mull3 r3,-688(fp),-680(fp) + mull2 r0,-688(fp) + addl3 -676(fp),-680(fp),r0 + bicl3 #0,r0,-676(fp) + cmpl -676(fp),-680(fp) + bgequ noname.213 + addl2 #65536,-688(fp) +noname.213: + movzwl -674(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-688(fp) + bicl3 #-65536,-676(fp),r0 + ashl #16,r0,-680(fp) + addl3 -680(fp),-684(fp),r0 + bicl3 #0,r0,-684(fp) + cmpl -684(fp),-680(fp) + bgequ noname.214 + incl -688(fp) +noname.214: + movl -684(fp),r1 + movl -688(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.215 + incl r2 +noname.215: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.216 + incl r8 +noname.216: + + movl r10,32(r11) + + clrl r10 + + movzwl 10(r6),r2 + bicl3 #-65536,28(r7),r3 + movzwl 30(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,8(r6),-700(fp) + bicl3 #-65536,r2,-704(fp) + mull3 r0,-700(fp),-692(fp) + mull2 r3,-700(fp) + mull3 r3,-704(fp),-696(fp) + mull2 r0,-704(fp) + addl3 -692(fp),-696(fp),r0 + bicl3 #0,r0,-692(fp) + cmpl -692(fp),-696(fp) + bgequ noname.217 + addl2 #65536,-704(fp) +noname.217: + movzwl -690(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-704(fp) + bicl3 #-65536,-692(fp),r0 + ashl #16,r0,-696(fp) + addl3 -696(fp),-700(fp),r0 + bicl3 #0,r0,-700(fp) + cmpl -700(fp),-696(fp) + bgequ noname.218 + incl -704(fp) +noname.218: + movl -700(fp),r1 + movl -704(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.219 + incl r2 +noname.219: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.220 + incl r10 +noname.220: + + movzwl 14(r6),r2 + bicl3 #-65536,24(r7),r3 + movzwl 26(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r6),-716(fp) + bicl3 #-65536,r2,-720(fp) + mull3 r0,-716(fp),-708(fp) + mull2 r3,-716(fp) + mull3 r3,-720(fp),-712(fp) + mull2 r0,-720(fp) + addl3 -708(fp),-712(fp),r0 + bicl3 #0,r0,-708(fp) + cmpl -708(fp),-712(fp) + bgequ noname.221 + addl2 #65536,-720(fp) +noname.221: + movzwl -706(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-720(fp) + bicl3 #-65536,-708(fp),r0 + ashl #16,r0,-712(fp) + addl3 -712(fp),-716(fp),r0 + bicl3 #0,r0,-716(fp) + cmpl -716(fp),-712(fp) + bgequ noname.222 + incl -720(fp) +noname.222: + movl -716(fp),r1 + movl -720(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.223 + incl r2 +noname.223: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.224 + incl r10 +noname.224: + + movzwl 18(r6),r2 + bicl3 #-65536,20(r7),r3 + movzwl 22(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,16(r6),-732(fp) + bicl3 #-65536,r2,-736(fp) + mull3 r0,-732(fp),-724(fp) + mull2 r3,-732(fp) + mull3 r3,-736(fp),-728(fp) + mull2 r0,-736(fp) + addl3 -724(fp),-728(fp),r0 + bicl3 #0,r0,-724(fp) + cmpl -724(fp),-728(fp) + bgequ noname.225 + addl2 #65536,-736(fp) +noname.225: + movzwl -722(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-736(fp) + bicl3 #-65536,-724(fp),r0 + ashl #16,r0,-728(fp) + addl3 -728(fp),-732(fp),r0 + bicl3 #0,r0,-732(fp) + cmpl -732(fp),-728(fp) + bgequ noname.226 + incl -736(fp) +noname.226: + movl -732(fp),r1 + movl -736(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.227 + incl r2 +noname.227: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.228 + incl r10 +noname.228: + + movzwl 22(r6),r2 + bicl3 #-65536,16(r7),r3 + movzwl 18(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,20(r6),-748(fp) + bicl3 #-65536,r2,-752(fp) + mull3 r0,-748(fp),-740(fp) + mull2 r3,-748(fp) + mull3 r3,-752(fp),-744(fp) + mull2 r0,-752(fp) + addl3 -740(fp),-744(fp),r0 + bicl3 #0,r0,-740(fp) + cmpl -740(fp),-744(fp) + bgequ noname.229 + addl2 #65536,-752(fp) +noname.229: + movzwl -738(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-752(fp) + bicl3 #-65536,-740(fp),r0 + ashl #16,r0,-744(fp) + addl3 -744(fp),-748(fp),r0 + bicl3 #0,r0,-748(fp) + cmpl -748(fp),-744(fp) + bgequ noname.230 + incl -752(fp) +noname.230: + movl -748(fp),r1 + movl -752(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.231 + incl r2 +noname.231: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.232 + incl r10 +noname.232: + + movzwl 26(r6),r2 + bicl3 #-65536,12(r7),r3 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,24(r6),-764(fp) + bicl3 #-65536,r2,-768(fp) + mull3 r0,-764(fp),-756(fp) + mull2 r3,-764(fp) + mull3 r3,-768(fp),-760(fp) + mull2 r0,-768(fp) + addl3 -756(fp),-760(fp),r0 + bicl3 #0,r0,-756(fp) + cmpl -756(fp),-760(fp) + bgequ noname.233 + addl2 #65536,-768(fp) +noname.233: + movzwl -754(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-768(fp) + bicl3 #-65536,-756(fp),r0 + ashl #16,r0,-760(fp) + addl3 -760(fp),-764(fp),r0 + bicl3 #0,r0,-764(fp) + cmpl -764(fp),-760(fp) + bgequ noname.234 + incl -768(fp) +noname.234: + movl -764(fp),r1 + movl -768(fp),r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.235 + incl r2 +noname.235: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.236 + incl r10 +noname.236: + + bicl3 #-65536,28(r6),r3 + movzwl 30(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,8(r7),r2 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-772(fp) + mull2 r2,r5 + mull3 r2,r4,-776(fp) + mull2 r0,r4 + addl3 -772(fp),-776(fp),r0 + bicl3 #0,r0,-772(fp) + cmpl -772(fp),-776(fp) + bgequ noname.237 + addl2 #65536,r4 +noname.237: + movzwl -770(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-772(fp),r0 + ashl #16,r0,-776(fp) + addl2 -776(fp),r5 + bicl2 #0,r5 + cmpl r5,-776(fp) + bgequ noname.238 + incl r4 +noname.238: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.239 + incl r2 +noname.239: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.240 + incl r10 +noname.240: + + movl r9,36(r11) + + clrl r9 + + bicl3 #-65536,28(r6),r3 + movzwl 30(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,12(r7),r2 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-780(fp) + mull2 r2,r5 + mull3 r2,r4,-784(fp) + mull2 r0,r4 + addl3 -780(fp),-784(fp),r0 + bicl3 #0,r0,-780(fp) + cmpl -780(fp),-784(fp) + bgequ noname.241 + addl2 #65536,r4 +noname.241: + movzwl -778(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-780(fp),r0 + ashl #16,r0,-784(fp) + addl2 -784(fp),r5 + bicl2 #0,r5 + cmpl r5,-784(fp) + bgequ noname.242 + incl r4 +noname.242: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.243 + incl r2 +noname.243: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.244 + incl r9 +noname.244: + + bicl3 #-65536,24(r6),r3 + movzwl 26(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,16(r7),r2 + movzwl 18(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-788(fp) + mull2 r2,r5 + mull3 r2,r4,-792(fp) + mull2 r0,r4 + addl3 -788(fp),-792(fp),r0 + bicl3 #0,r0,-788(fp) + cmpl -788(fp),-792(fp) + bgequ noname.245 + addl2 #65536,r4 +noname.245: + movzwl -786(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-788(fp),r0 + ashl #16,r0,-792(fp) + addl2 -792(fp),r5 + bicl2 #0,r5 + cmpl r5,-792(fp) + bgequ noname.246 + incl r4 +noname.246: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.247 + incl r2 +noname.247: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.248 + incl r9 +noname.248: + + bicl3 #-65536,20(r6),r3 + movzwl 22(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,20(r7),r2 + movzwl 22(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-796(fp) + mull2 r2,r5 + mull3 r2,r4,-800(fp) + mull2 r0,r4 + addl3 -796(fp),-800(fp),r0 + bicl3 #0,r0,-796(fp) + cmpl -796(fp),-800(fp) + bgequ noname.249 + addl2 #65536,r4 +noname.249: + movzwl -794(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-796(fp),r0 + ashl #16,r0,-800(fp) + addl2 -800(fp),r5 + bicl2 #0,r5 + cmpl r5,-800(fp) + bgequ noname.250 + incl r4 +noname.250: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.251 + incl r2 +noname.251: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.252 + incl r9 +noname.252: + + bicl3 #-65536,16(r6),r3 + movzwl 18(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,24(r7),r2 + movzwl 26(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-804(fp) + mull2 r2,r5 + mull3 r2,r4,-808(fp) + mull2 r0,r4 + addl3 -804(fp),-808(fp),r0 + bicl3 #0,r0,-804(fp) + cmpl -804(fp),-808(fp) + bgequ noname.253 + addl2 #65536,r4 +noname.253: + movzwl -802(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-804(fp),r0 + ashl #16,r0,-808(fp) + addl2 -808(fp),r5 + bicl2 #0,r5 + cmpl r5,-808(fp) + bgequ noname.254 + incl r4 +noname.254: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.255 + incl r2 +noname.255: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.256 + incl r9 +noname.256: + + bicl3 #-65536,12(r6),r3 + movzwl 14(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,28(r7),r2 + movzwl 30(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-812(fp) + mull2 r2,r5 + mull3 r2,r4,-816(fp) + mull2 r0,r4 + addl3 -812(fp),-816(fp),r0 + bicl3 #0,r0,-812(fp) + cmpl -812(fp),-816(fp) + bgequ noname.257 + addl2 #65536,r4 +noname.257: + movzwl -810(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-812(fp),r0 + ashl #16,r0,-816(fp) + addl2 -816(fp),r5 + bicl2 #0,r5 + cmpl r5,-816(fp) + bgequ noname.258 + incl r4 +noname.258: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.259 + incl r2 +noname.259: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.260 + incl r9 +noname.260: + + movl r8,40(r11) + + clrl r8 + + bicl3 #-65536,16(r6),r3 + movzwl 18(r6),r2 + bicl3 #-65536,28(r7),r1 + movzwl 30(r7),r0 + bicl2 #-65536,r0 + movl r3,r4 + bicl3 #-65536,r2,-828(fp) + mull3 r0,r4,-820(fp) + mull2 r1,r4 + mull3 r1,-828(fp),-824(fp) + mull2 r0,-828(fp) + addl3 -820(fp),-824(fp),r0 + bicl3 #0,r0,-820(fp) + cmpl -820(fp),-824(fp) + bgequ noname.261 + addl2 #65536,-828(fp) +noname.261: + movzwl -818(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-828(fp) + bicl3 #-65536,-820(fp),r0 + ashl #16,r0,-824(fp) + addl2 -824(fp),r4 + bicl2 #0,r4 + cmpl r4,-824(fp) + bgequ noname.262 + incl -828(fp) +noname.262: + movl r4,r1 + movl -828(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.263 + incl r2 +noname.263: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.264 + incl r8 +noname.264: + + movzwl 22(r6),r2 + bicl3 #-65536,24(r7),r3 + movzwl 26(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,20(r6),-840(fp) + bicl3 #-65536,r2,-844(fp) + mull3 r0,-840(fp),-832(fp) + mull2 r3,-840(fp) + mull3 r3,-844(fp),-836(fp) + mull2 r0,-844(fp) + addl3 -832(fp),-836(fp),r0 + bicl3 #0,r0,-832(fp) + cmpl -832(fp),-836(fp) + bgequ noname.265 + addl2 #65536,-844(fp) +noname.265: + movzwl -830(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-844(fp) + bicl3 #-65536,-832(fp),r0 + ashl #16,r0,-836(fp) + addl3 -836(fp),-840(fp),r0 + bicl3 #0,r0,-840(fp) + cmpl -840(fp),-836(fp) + bgequ noname.266 + incl -844(fp) +noname.266: + movl -840(fp),r1 + movl -844(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.267 + incl r2 +noname.267: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.268 + incl r8 +noname.268: + + bicl3 #-65536,24(r6),r3 + movzwl 26(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,20(r7),r2 + movzwl 22(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-848(fp) + mull2 r2,r5 + mull3 r2,r4,-852(fp) + mull2 r0,r4 + addl3 -848(fp),-852(fp),r0 + bicl3 #0,r0,-848(fp) + cmpl -848(fp),-852(fp) + bgequ noname.269 + addl2 #65536,r4 +noname.269: + movzwl -846(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-848(fp),r0 + ashl #16,r0,-852(fp) + addl2 -852(fp),r5 + bicl2 #0,r5 + cmpl r5,-852(fp) + bgequ noname.270 + incl r4 +noname.270: + movl r5,r1 + movl r4,r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.271 + incl r2 +noname.271: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.272 + incl r8 +noname.272: + + bicl3 #-65536,28(r6),r3 + movzwl 30(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,16(r7),r2 + movzwl 18(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-856(fp) + mull2 r2,r5 + mull3 r2,r4,-860(fp) + mull2 r0,r4 + addl3 -856(fp),-860(fp),r0 + bicl3 #0,r0,-856(fp) + cmpl -856(fp),-860(fp) + bgequ noname.273 + addl2 #65536,r4 +noname.273: + movzwl -854(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-856(fp),r0 + ashl #16,r0,-860(fp) + addl2 -860(fp),r5 + bicl2 #0,r5 + cmpl r5,-860(fp) + bgequ noname.274 + incl r4 +noname.274: + movl r5,r1 + movl r4,r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.275 + incl r2 +noname.275: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.276 + incl r8 +noname.276: + + movl r10,44(r11) + + clrl r10 + + bicl3 #-65536,28(r6),r3 + movzwl 30(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,20(r7),r2 + movzwl 22(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-864(fp) + mull2 r2,r5 + mull3 r2,r4,-868(fp) + mull2 r0,r4 + addl3 -864(fp),-868(fp),r0 + bicl3 #0,r0,-864(fp) + cmpl -864(fp),-868(fp) + bgequ noname.277 + addl2 #65536,r4 +noname.277: + movzwl -862(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-864(fp),r0 + ashl #16,r0,-868(fp) + addl2 -868(fp),r5 + bicl2 #0,r5 + cmpl r5,-868(fp) + bgequ noname.278 + incl r4 +noname.278: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.279 + incl r2 +noname.279: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.280 + incl r10 +noname.280: + + bicl3 #-65536,24(r6),r3 + movzwl 26(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,24(r7),r2 + movzwl 26(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-872(fp) + mull2 r2,r5 + mull3 r2,r4,-876(fp) + mull2 r0,r4 + addl3 -872(fp),-876(fp),r0 + bicl3 #0,r0,-872(fp) + cmpl -872(fp),-876(fp) + bgequ noname.281 + addl2 #65536,r4 +noname.281: + movzwl -870(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-872(fp),r0 + ashl #16,r0,-876(fp) + addl2 -876(fp),r5 + bicl2 #0,r5 + cmpl r5,-876(fp) + bgequ noname.282 + incl r4 +noname.282: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.283 + incl r2 +noname.283: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.284 + incl r10 +noname.284: + + bicl3 #-65536,20(r6),r3 + movzwl 22(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,28(r7),r2 + movzwl 30(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-880(fp) + mull2 r2,r5 + mull3 r2,r4,-884(fp) + mull2 r0,r4 + addl3 -880(fp),-884(fp),r0 + bicl3 #0,r0,-880(fp) + cmpl -880(fp),-884(fp) + bgequ noname.285 + addl2 #65536,r4 +noname.285: + movzwl -878(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-880(fp),r0 + ashl #16,r0,-884(fp) + addl2 -884(fp),r5 + bicl2 #0,r5 + cmpl r5,-884(fp) + bgequ noname.286 + incl r4 +noname.286: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.287 + incl r2 +noname.287: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.288 + incl r10 +noname.288: + + movl r9,48(r11) + + clrl r9 + + bicl3 #-65536,24(r6),r3 + movzwl 26(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,28(r7),r2 + movzwl 30(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-888(fp) + mull2 r2,r5 + mull3 r2,r4,-892(fp) + mull2 r0,r4 + addl3 -888(fp),-892(fp),r0 + bicl3 #0,r0,-888(fp) + cmpl -888(fp),-892(fp) + bgequ noname.289 + addl2 #65536,r4 +noname.289: + movzwl -886(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-888(fp),r0 + ashl #16,r0,-892(fp) + addl2 -892(fp),r5 + bicl2 #0,r5 + cmpl r5,-892(fp) + bgequ noname.290 + incl r4 +noname.290: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.291 + incl r2 +noname.291: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.292 + incl r9 +noname.292: + + movzwl 30(r6),r2 + bicl3 #-65536,24(r7),r3 + movzwl 26(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,28(r6),-904(fp) + bicl3 #-65536,r2,-908(fp) + mull3 r0,-904(fp),-896(fp) + mull2 r3,-904(fp) + mull3 r3,-908(fp),-900(fp) + mull2 r0,-908(fp) + addl3 -896(fp),-900(fp),r0 + bicl3 #0,r0,-896(fp) + cmpl -896(fp),-900(fp) + bgequ noname.293 + addl2 #65536,-908(fp) +noname.293: + movzwl -894(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-908(fp) + bicl3 #-65536,-896(fp),r0 + ashl #16,r0,-900(fp) + addl3 -900(fp),-904(fp),r0 + bicl3 #0,r0,-904(fp) + cmpl -904(fp),-900(fp) + bgequ noname.294 + incl -908(fp) +noname.294: + movl -904(fp),r1 + movl -908(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.295 + incl r2 +noname.295: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.296 + incl r9 +noname.296: + + movl r8,52(r11) + + clrl r8 + + movzwl 30(r6),r2 + bicl3 #-65536,28(r7),r3 + movzwl 30(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,28(r6),-920(fp) + bicl3 #-65536,r2,-924(fp) + mull3 r0,-920(fp),-912(fp) + mull2 r3,-920(fp) + mull3 r3,-924(fp),-916(fp) + mull2 r0,-924(fp) + addl3 -912(fp),-916(fp),r0 + bicl3 #0,r0,-912(fp) + cmpl -912(fp),-916(fp) + bgequ noname.297 + addl2 #65536,-924(fp) +noname.297: + movzwl -910(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-924(fp) + bicl3 #-65536,-912(fp),r0 + ashl #16,r0,-916(fp) + addl3 -916(fp),-920(fp),r0 + bicl3 #0,r0,-920(fp) + cmpl -920(fp),-916(fp) + bgequ noname.298 + incl -924(fp) +noname.298: + movl -920(fp),r1 + movl -924(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.299 + incl r2 +noname.299: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.300 + incl r8 +noname.300: + + movl r10,56(r11) + + movl r9,60(r11) + + ret + + + +;r=4 ;(AP) +;a=8 ;(AP) +;b=12 ;(AP) +;n=16 ;(AP) n by value (input) + + .psect code,nowrt + +.entry BN_MUL_COMBA4,^m<r2,r3,r4,r5,r6,r7,r8,r9,r10,r11> + movab -156(sp),sp + + clrq r9 + + clrl r8 + + movl 8(ap),r6 + bicl3 #-65536,(r6),r3 + movzwl 2(r6),r2 + bicl2 #-65536,r2 + movl 12(ap),r7 + bicl3 #-65536,(r7),r1 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r2,r4 + mull3 r0,r5,-4(fp) + mull2 r1,r5 + mull3 r1,r4,-8(fp) + mull2 r0,r4 + addl3 -4(fp),-8(fp),r0 + bicl3 #0,r0,-4(fp) + cmpl -4(fp),-8(fp) + bgequ noname.303 + addl2 #65536,r4 +noname.303: + movzwl -2(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-4(fp),r0 + ashl #16,r0,-8(fp) + addl2 -8(fp),r5 + bicl2 #0,r5 + cmpl r5,-8(fp) + bgequ noname.304 + incl r4 +noname.304: + movl r5,r1 + movl r4,r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.305 + incl r2 +noname.305: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.306 + incl r8 +noname.306: + + movl 4(ap),r11 + movl r10,(r11) + + clrl r10 + + bicl3 #-65536,(r6),r3 + movzwl 2(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,4(r7),r2 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-12(fp) + mull2 r2,r5 + mull3 r2,r4,-16(fp) + mull2 r0,r4 + addl3 -12(fp),-16(fp),r0 + bicl3 #0,r0,-12(fp) + cmpl -12(fp),-16(fp) + bgequ noname.307 + addl2 #65536,r4 +noname.307: + movzwl -10(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-12(fp),r0 + ashl #16,r0,-16(fp) + addl2 -16(fp),r5 + bicl2 #0,r5 + cmpl r5,-16(fp) + bgequ noname.308 + incl r4 +noname.308: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.309 + incl r2 +noname.309: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.310 + incl r10 +noname.310: + + bicl3 #-65536,4(r6),r3 + movzwl 6(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,(r7),r2 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-20(fp) + mull2 r2,r5 + mull3 r2,r4,-24(fp) + mull2 r0,r4 + addl3 -20(fp),-24(fp),r0 + bicl3 #0,r0,-20(fp) + cmpl -20(fp),-24(fp) + bgequ noname.311 + addl2 #65536,r4 +noname.311: + movzwl -18(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-20(fp),r0 + ashl #16,r0,-24(fp) + addl2 -24(fp),r5 + bicl2 #0,r5 + cmpl r5,-24(fp) + bgequ noname.312 + incl r4 +noname.312: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.313 + incl r2 +noname.313: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.314 + incl r10 +noname.314: + + movl r9,4(r11) + + clrl r9 + + bicl3 #-65536,8(r6),r3 + movzwl 10(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,(r7),r2 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-28(fp) + mull2 r2,r5 + mull3 r2,r4,-32(fp) + mull2 r0,r4 + addl3 -28(fp),-32(fp),r0 + bicl3 #0,r0,-28(fp) + cmpl -28(fp),-32(fp) + bgequ noname.315 + addl2 #65536,r4 +noname.315: + movzwl -26(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-28(fp),r0 + ashl #16,r0,-32(fp) + addl2 -32(fp),r5 + bicl2 #0,r5 + cmpl r5,-32(fp) + bgequ noname.316 + incl r4 +noname.316: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.317 + incl r2 +noname.317: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.318 + incl r9 +noname.318: + + bicl3 #-65536,4(r6),r3 + movzwl 6(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,4(r7),r2 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-36(fp) + mull2 r2,r5 + mull3 r2,r4,-40(fp) + mull2 r0,r4 + addl3 -36(fp),-40(fp),r0 + bicl3 #0,r0,-36(fp) + cmpl -36(fp),-40(fp) + bgequ noname.319 + addl2 #65536,r4 +noname.319: + movzwl -34(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-36(fp),r0 + ashl #16,r0,-40(fp) + addl2 -40(fp),r5 + bicl2 #0,r5 + cmpl r5,-40(fp) + bgequ noname.320 + incl r4 +noname.320: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.321 + incl r2 +noname.321: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.322 + incl r9 +noname.322: + + bicl3 #-65536,(r6),r3 + movzwl 2(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,8(r7),r2 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-44(fp) + mull2 r2,r5 + mull3 r2,r4,-48(fp) + mull2 r0,r4 + addl3 -44(fp),-48(fp),r0 + bicl3 #0,r0,-44(fp) + cmpl -44(fp),-48(fp) + bgequ noname.323 + addl2 #65536,r4 +noname.323: + movzwl -42(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-44(fp),r0 + ashl #16,r0,-48(fp) + addl2 -48(fp),r5 + bicl2 #0,r5 + cmpl r5,-48(fp) + bgequ noname.324 + incl r4 +noname.324: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.325 + incl r2 +noname.325: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.326 + incl r9 +noname.326: + + movl r8,8(r11) + + clrl r8 + + bicl3 #-65536,(r6),r3 + movzwl 2(r6),r2 + bicl3 #-65536,12(r7),r1 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + movl r3,r4 + bicl3 #-65536,r2,-60(fp) + mull3 r0,r4,-52(fp) + mull2 r1,r4 + mull3 r1,-60(fp),-56(fp) + mull2 r0,-60(fp) + addl3 -52(fp),-56(fp),r0 + bicl3 #0,r0,-52(fp) + cmpl -52(fp),-56(fp) + bgequ noname.327 + addl2 #65536,-60(fp) +noname.327: + movzwl -50(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-60(fp) + bicl3 #-65536,-52(fp),r0 + ashl #16,r0,-56(fp) + addl2 -56(fp),r4 + bicl2 #0,r4 + cmpl r4,-56(fp) + bgequ noname.328 + incl -60(fp) +noname.328: + movl r4,r1 + movl -60(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.329 + incl r2 +noname.329: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.330 + incl r8 +noname.330: + + movzwl 6(r6),r2 + bicl3 #-65536,8(r7),r3 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r6),-72(fp) + bicl3 #-65536,r2,-76(fp) + mull3 r0,-72(fp),-64(fp) + mull2 r3,-72(fp) + mull3 r3,-76(fp),-68(fp) + mull2 r0,-76(fp) + addl3 -64(fp),-68(fp),r0 + bicl3 #0,r0,-64(fp) + cmpl -64(fp),-68(fp) + bgequ noname.331 + addl2 #65536,-76(fp) +noname.331: + movzwl -62(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-76(fp) + bicl3 #-65536,-64(fp),r0 + ashl #16,r0,-68(fp) + addl3 -68(fp),-72(fp),r0 + bicl3 #0,r0,-72(fp) + cmpl -72(fp),-68(fp) + bgequ noname.332 + incl -76(fp) +noname.332: + movl -72(fp),r1 + movl -76(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.333 + incl r2 +noname.333: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.334 + incl r8 +noname.334: + + bicl3 #-65536,8(r6),r3 + movzwl 10(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,4(r7),r2 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-80(fp) + mull2 r2,r5 + mull3 r2,r4,-84(fp) + mull2 r0,r4 + addl3 -80(fp),-84(fp),r0 + bicl3 #0,r0,-80(fp) + cmpl -80(fp),-84(fp) + bgequ noname.335 + addl2 #65536,r4 +noname.335: + movzwl -78(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-80(fp),r0 + ashl #16,r0,-84(fp) + addl2 -84(fp),r5 + bicl2 #0,r5 + cmpl r5,-84(fp) + bgequ noname.336 + incl r4 +noname.336: + movl r5,r1 + movl r4,r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.337 + incl r2 +noname.337: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.338 + incl r8 +noname.338: + + bicl3 #-65536,12(r6),r3 + movzwl 14(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,(r7),r2 + movzwl 2(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-88(fp) + mull2 r2,r5 + mull3 r2,r4,-92(fp) + mull2 r0,r4 + addl3 -88(fp),-92(fp),r0 + bicl3 #0,r0,-88(fp) + cmpl -88(fp),-92(fp) + bgequ noname.339 + addl2 #65536,r4 +noname.339: + movzwl -86(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-88(fp),r0 + ashl #16,r0,-92(fp) + addl2 -92(fp),r5 + bicl2 #0,r5 + cmpl r5,-92(fp) + bgequ noname.340 + incl r4 +noname.340: + movl r5,r1 + movl r4,r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.341 + incl r2 +noname.341: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.342 + incl r8 +noname.342: + + movl r10,12(r11) + + clrl r10 + + bicl3 #-65536,12(r6),r3 + movzwl 14(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,4(r7),r2 + movzwl 6(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-96(fp) + mull2 r2,r5 + mull3 r2,r4,-100(fp) + mull2 r0,r4 + addl3 -96(fp),-100(fp),r0 + bicl3 #0,r0,-96(fp) + cmpl -96(fp),-100(fp) + bgequ noname.343 + addl2 #65536,r4 +noname.343: + movzwl -94(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-96(fp),r0 + ashl #16,r0,-100(fp) + addl2 -100(fp),r5 + bicl2 #0,r5 + cmpl r5,-100(fp) + bgequ noname.344 + incl r4 +noname.344: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.345 + incl r2 +noname.345: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.346 + incl r10 +noname.346: + + bicl3 #-65536,8(r6),r3 + movzwl 10(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,8(r7),r2 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-104(fp) + mull2 r2,r5 + mull3 r2,r4,-108(fp) + mull2 r0,r4 + addl3 -104(fp),-108(fp),r0 + bicl3 #0,r0,-104(fp) + cmpl -104(fp),-108(fp) + bgequ noname.347 + addl2 #65536,r4 +noname.347: + movzwl -102(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-104(fp),r0 + ashl #16,r0,-108(fp) + addl2 -108(fp),r5 + bicl2 #0,r5 + cmpl r5,-108(fp) + bgequ noname.348 + incl r4 +noname.348: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.349 + incl r2 +noname.349: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.350 + incl r10 +noname.350: + + bicl3 #-65536,4(r6),r3 + movzwl 6(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,12(r7),r2 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-112(fp) + mull2 r2,r5 + mull3 r2,r4,-116(fp) + mull2 r0,r4 + addl3 -112(fp),-116(fp),r0 + bicl3 #0,r0,-112(fp) + cmpl -112(fp),-116(fp) + bgequ noname.351 + addl2 #65536,r4 +noname.351: + movzwl -110(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-112(fp),r0 + ashl #16,r0,-116(fp) + addl2 -116(fp),r5 + bicl2 #0,r5 + cmpl r5,-116(fp) + bgequ noname.352 + incl r4 +noname.352: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.353 + incl r2 +noname.353: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.354 + incl r10 +noname.354: + + movl r9,16(r11) + + clrl r9 + + bicl3 #-65536,8(r6),r3 + movzwl 10(r6),r1 + bicl2 #-65536,r1 + bicl3 #-65536,12(r7),r2 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-120(fp) + mull2 r2,r5 + mull3 r2,r4,-124(fp) + mull2 r0,r4 + addl3 -120(fp),-124(fp),r0 + bicl3 #0,r0,-120(fp) + cmpl -120(fp),-124(fp) + bgequ noname.355 + addl2 #65536,r4 +noname.355: + movzwl -118(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-120(fp),r0 + ashl #16,r0,-124(fp) + addl2 -124(fp),r5 + bicl2 #0,r5 + cmpl r5,-124(fp) + bgequ noname.356 + incl r4 +noname.356: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.357 + incl r2 +noname.357: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.358 + incl r9 +noname.358: + + movzwl 14(r6),r2 + bicl3 #-65536,8(r7),r3 + movzwl 10(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r6),-136(fp) + bicl3 #-65536,r2,-140(fp) + mull3 r0,-136(fp),-128(fp) + mull2 r3,-136(fp) + mull3 r3,-140(fp),-132(fp) + mull2 r0,-140(fp) + addl3 -128(fp),-132(fp),r0 + bicl3 #0,r0,-128(fp) + cmpl -128(fp),-132(fp) + bgequ noname.359 + addl2 #65536,-140(fp) +noname.359: + movzwl -126(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-140(fp) + bicl3 #-65536,-128(fp),r0 + ashl #16,r0,-132(fp) + addl3 -132(fp),-136(fp),r0 + bicl3 #0,r0,-136(fp) + cmpl -136(fp),-132(fp) + bgequ noname.360 + incl -140(fp) +noname.360: + movl -136(fp),r1 + movl -140(fp),r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.361 + incl r2 +noname.361: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.362 + incl r9 +noname.362: + + movl r8,20(r11) + + clrl r8 + + movzwl 14(r6),r2 + bicl3 #-65536,12(r7),r3 + movzwl 14(r7),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r6),-152(fp) + bicl3 #-65536,r2,-156(fp) + mull3 r0,-152(fp),-144(fp) + mull2 r3,-152(fp) + mull3 r3,-156(fp),-148(fp) + mull2 r0,-156(fp) + addl3 -144(fp),-148(fp),r0 + bicl3 #0,r0,-144(fp) + cmpl -144(fp),-148(fp) + bgequ noname.363 + addl2 #65536,-156(fp) +noname.363: + movzwl -142(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-156(fp) + bicl3 #-65536,-144(fp),r0 + ashl #16,r0,-148(fp) + addl3 -148(fp),-152(fp),r0 + bicl3 #0,r0,-152(fp) + cmpl -152(fp),-148(fp) + bgequ noname.364 + incl -156(fp) +noname.364: + movl -152(fp),r1 + movl -156(fp),r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.365 + incl r2 +noname.365: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.366 + incl r8 +noname.366: + + movl r10,24(r11) + + movl r9,28(r11) + + ret + + + +;r=4 ;(AP) +;a=8 ;(AP) +;b=12 ;(AP) +;n=16 ;(AP) n by value (input) + + .psect code,nowrt + +.entry BN_SQR_COMBA8,^m<r2,r3,r4,r5,r6,r7,r8,r9> + movab -444(sp),sp + + clrq r8 + + clrl r7 + + movl 8(ap),r4 + movl (r4),r3 + bicl3 #-65536,r3,-4(fp) + extzv #16,#16,r3,r0 + bicl3 #-65536,r0,r3 + movl -4(fp),r0 + mull3 r0,r3,-8(fp) + mull3 r0,r0,-4(fp) + mull2 r3,r3 + bicl3 #32767,-8(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r3 + bicl3 #-65536,-8(fp),r0 + ashl #17,r0,-8(fp) + addl3 -4(fp),-8(fp),r0 + bicl3 #0,r0,-4(fp) + cmpl -4(fp),-8(fp) + bgequ noname.369 + incl r3 +noname.369: + movl -4(fp),r1 + movl r3,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.370 + incl r2 +noname.370: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.371 + incl r7 +noname.371: + + movl r9,@4(ap) + + clrl r9 + + movzwl 6(r4),r2 + bicl3 #-65536,(r4),r3 + movzwl 2(r4),r0 + bicl2 #-65536,r0 + bicl3 #-65536,4(r4),-20(fp) + bicl3 #-65536,r2,-24(fp) + mull3 r0,-20(fp),-12(fp) + mull2 r3,-20(fp) + mull3 r3,-24(fp),-16(fp) + mull2 r0,-24(fp) + addl3 -12(fp),-16(fp),r0 + bicl3 #0,r0,-12(fp) + cmpl -12(fp),-16(fp) + bgequ noname.372 + addl2 #65536,-24(fp) +noname.372: + movzwl -10(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-24(fp) + bicl3 #-65536,-12(fp),r0 + ashl #16,r0,-16(fp) + addl3 -16(fp),-20(fp),r0 + bicl3 #0,r0,-20(fp) + cmpl -20(fp),-16(fp) + bgequ noname.373 + incl -24(fp) +noname.373: + movl -20(fp),r3 + movl -24(fp),r2 + bbc #31,r2,noname.374 + incl r9 +noname.374: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.375 + incl r2 +noname.375: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.376 + incl r2 + bicl3 #0,r2,r0 + bneq noname.376 + incl r9 +noname.376: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.377 + incl r9 +noname.377: + + movl 4(ap),r0 + movl r8,4(r0) + + clrl r8 + + movl 8(ap),r4 + movl 4(r4),r3 + bicl3 #-65536,r3,-28(fp) + extzv #16,#16,r3,r0 + bicl3 #-65536,r0,r3 + movl -28(fp),r0 + mull3 r0,r3,-32(fp) + mull3 r0,r0,-28(fp) + mull2 r3,r3 + bicl3 #32767,-32(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r3 + bicl3 #-65536,-32(fp),r0 + ashl #17,r0,-32(fp) + addl3 -28(fp),-32(fp),r0 + bicl3 #0,r0,-28(fp) + cmpl -28(fp),-32(fp) + bgequ noname.378 + incl r3 +noname.378: + movl -28(fp),r1 + movl r3,r2 + addl2 r1,r7 + bicl2 #0,r7 + cmpl r7,r1 + bgequ noname.379 + incl r2 +noname.379: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.380 + incl r8 +noname.380: + + movzwl 10(r4),r2 + bicl3 #-65536,(r4),r3 + movzwl 2(r4),r0 + bicl2 #-65536,r0 + bicl3 #-65536,8(r4),-44(fp) + bicl3 #-65536,r2,-48(fp) + mull3 r0,-44(fp),-36(fp) + mull2 r3,-44(fp) + mull3 r3,-48(fp),-40(fp) + mull2 r0,-48(fp) + addl3 -36(fp),-40(fp),r0 + bicl3 #0,r0,-36(fp) + cmpl -36(fp),-40(fp) + bgequ noname.381 + addl2 #65536,-48(fp) +noname.381: + movzwl -34(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-48(fp) + bicl3 #-65536,-36(fp),r0 + ashl #16,r0,-40(fp) + addl3 -40(fp),-44(fp),r0 + bicl3 #0,r0,-44(fp) + cmpl -44(fp),-40(fp) + bgequ noname.382 + incl -48(fp) +noname.382: + movl -44(fp),r3 + movl -48(fp),r2 + bbc #31,r2,noname.383 + incl r8 +noname.383: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.384 + incl r2 +noname.384: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.385 + incl r2 + bicl3 #0,r2,r0 + bneq noname.385 + incl r8 +noname.385: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.386 + incl r8 +noname.386: + + movl 4(ap),r0 + movl r7,8(r0) + + clrl r7 + + movl 8(ap),r0 + movzwl 14(r0),r2 + bicl3 #-65536,(r0),r3 + movzwl 2(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,12(r0),-60(fp) + bicl3 #-65536,r2,-64(fp) + mull3 r1,-60(fp),-52(fp) + mull2 r3,-60(fp) + mull3 r3,-64(fp),-56(fp) + mull2 r1,-64(fp) + addl3 -52(fp),-56(fp),r0 + bicl3 #0,r0,-52(fp) + cmpl -52(fp),-56(fp) + bgequ noname.387 + addl2 #65536,-64(fp) +noname.387: + movzwl -50(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-64(fp) + bicl3 #-65536,-52(fp),r0 + ashl #16,r0,-56(fp) + addl3 -56(fp),-60(fp),r0 + bicl3 #0,r0,-60(fp) + cmpl -60(fp),-56(fp) + bgequ noname.388 + incl -64(fp) +noname.388: + movl -60(fp),r3 + movl -64(fp),r2 + bbc #31,r2,noname.389 + incl r7 +noname.389: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.390 + incl r2 +noname.390: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.391 + incl r2 + bicl3 #0,r2,r0 + bneq noname.391 + incl r7 +noname.391: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.392 + incl r7 +noname.392: + + movl 8(ap),r0 + movzwl 10(r0),r2 + bicl3 #-65536,4(r0),r3 + movzwl 6(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,8(r0),-76(fp) + bicl3 #-65536,r2,-80(fp) + mull3 r1,-76(fp),-68(fp) + mull2 r3,-76(fp) + mull3 r3,-80(fp),-72(fp) + mull2 r1,-80(fp) + addl3 -68(fp),-72(fp),r0 + bicl3 #0,r0,-68(fp) + cmpl -68(fp),-72(fp) + bgequ noname.393 + addl2 #65536,-80(fp) +noname.393: + movzwl -66(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-80(fp) + bicl3 #-65536,-68(fp),r0 + ashl #16,r0,-72(fp) + addl3 -72(fp),-76(fp),r0 + bicl3 #0,r0,-76(fp) + cmpl -76(fp),-72(fp) + bgequ noname.394 + incl -80(fp) +noname.394: + movl -76(fp),r3 + movl -80(fp),r2 + bbc #31,r2,noname.395 + incl r7 +noname.395: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.396 + incl r2 +noname.396: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.397 + incl r2 + bicl3 #0,r2,r0 + bneq noname.397 + incl r7 +noname.397: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.398 + incl r7 +noname.398: + + movl 4(ap),r0 + movl r9,12(r0) + + clrl r9 + + movl 8(ap),r2 + movl 8(r2),r4 + bicl3 #-65536,r4,-84(fp) + extzv #16,#16,r4,r0 + bicl3 #-65536,r0,r4 + movl -84(fp),r0 + mull3 r0,r4,-88(fp) + mull3 r0,r0,-84(fp) + mull2 r4,r4 + bicl3 #32767,-88(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r4 + bicl3 #-65536,-88(fp),r0 + ashl #17,r0,-88(fp) + addl3 -84(fp),-88(fp),r0 + bicl3 #0,r0,-84(fp) + cmpl -84(fp),-88(fp) + bgequ noname.399 + incl r4 +noname.399: + movl -84(fp),r1 + movl r4,r3 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.400 + incl r3 +noname.400: + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.401 + incl r9 +noname.401: + + movzwl 14(r2),r3 + bicl3 #-65536,4(r2),r1 + movzwl 6(r2),r0 + bicl2 #-65536,r0 + bicl3 #-65536,12(r2),-100(fp) + bicl3 #-65536,r3,-104(fp) + mull3 r0,-100(fp),-92(fp) + mull2 r1,-100(fp) + mull3 r1,-104(fp),-96(fp) + mull2 r0,-104(fp) + addl3 -92(fp),-96(fp),r0 + bicl3 #0,r0,-92(fp) + cmpl -92(fp),-96(fp) + bgequ noname.402 + addl2 #65536,-104(fp) +noname.402: + movzwl -90(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-104(fp) + bicl3 #-65536,-92(fp),r0 + ashl #16,r0,-96(fp) + addl3 -96(fp),-100(fp),r0 + bicl3 #0,r0,-100(fp) + cmpl -100(fp),-96(fp) + bgequ noname.403 + incl -104(fp) +noname.403: + movl -100(fp),r3 + movl -104(fp),r2 + bbc #31,r2,noname.404 + incl r9 +noname.404: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.405 + incl r2 +noname.405: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.406 + incl r2 + bicl3 #0,r2,r0 + bneq noname.406 + incl r9 +noname.406: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.407 + incl r9 +noname.407: + + movl 8(ap),r0 + movzwl 18(r0),r2 + bicl3 #-65536,(r0),r3 + movzwl 2(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,16(r0),-116(fp) + bicl3 #-65536,r2,-120(fp) + mull3 r1,-116(fp),-108(fp) + mull2 r3,-116(fp) + mull3 r3,-120(fp),-112(fp) + mull2 r1,-120(fp) + addl3 -108(fp),-112(fp),r0 + bicl3 #0,r0,-108(fp) + cmpl -108(fp),-112(fp) + bgequ noname.408 + addl2 #65536,-120(fp) +noname.408: + movzwl -106(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-120(fp) + bicl3 #-65536,-108(fp),r0 + ashl #16,r0,-112(fp) + addl3 -112(fp),-116(fp),r0 + bicl3 #0,r0,-116(fp) + cmpl -116(fp),-112(fp) + bgequ noname.409 + incl -120(fp) +noname.409: + movl -116(fp),r3 + movl -120(fp),r2 + bbc #31,r2,noname.410 + incl r9 +noname.410: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.411 + incl r2 +noname.411: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.412 + incl r2 + bicl3 #0,r2,r0 + bneq noname.412 + incl r9 +noname.412: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.413 + incl r9 +noname.413: + + movl 4(ap),r0 + movl r8,16(r0) + + clrl r8 + + movl 8(ap),r0 + movzwl 22(r0),r2 + bicl3 #-65536,(r0),r3 + movzwl 2(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,20(r0),-132(fp) + bicl3 #-65536,r2,-136(fp) + mull3 r1,-132(fp),-124(fp) + mull2 r3,-132(fp) + mull3 r3,-136(fp),-128(fp) + mull2 r1,-136(fp) + addl3 -124(fp),-128(fp),r0 + bicl3 #0,r0,-124(fp) + cmpl -124(fp),-128(fp) + bgequ noname.414 + addl2 #65536,-136(fp) +noname.414: + movzwl -122(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-136(fp) + bicl3 #-65536,-124(fp),r0 + ashl #16,r0,-128(fp) + addl3 -128(fp),-132(fp),r0 + bicl3 #0,r0,-132(fp) + cmpl -132(fp),-128(fp) + bgequ noname.415 + incl -136(fp) +noname.415: + movl -132(fp),r3 + movl -136(fp),r2 + bbc #31,r2,noname.416 + incl r8 +noname.416: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.417 + incl r2 +noname.417: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.418 + incl r2 + bicl3 #0,r2,r0 + bneq noname.418 + incl r8 +noname.418: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.419 + incl r8 +noname.419: + + movl 8(ap),r0 + movzwl 18(r0),r2 + bicl3 #-65536,4(r0),r3 + movzwl 6(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,16(r0),-148(fp) + bicl3 #-65536,r2,-152(fp) + mull3 r1,-148(fp),-140(fp) + mull2 r3,-148(fp) + mull3 r3,-152(fp),-144(fp) + mull2 r1,-152(fp) + addl3 -140(fp),-144(fp),r0 + bicl3 #0,r0,-140(fp) + cmpl -140(fp),-144(fp) + bgequ noname.420 + addl2 #65536,-152(fp) +noname.420: + movzwl -138(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-152(fp) + bicl3 #-65536,-140(fp),r0 + ashl #16,r0,-144(fp) + addl3 -144(fp),-148(fp),r0 + bicl3 #0,r0,-148(fp) + cmpl -148(fp),-144(fp) + bgequ noname.421 + incl -152(fp) +noname.421: + movl -148(fp),r3 + movl -152(fp),r2 + bbc #31,r2,noname.422 + incl r8 +noname.422: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.423 + incl r2 +noname.423: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.424 + incl r2 + bicl3 #0,r2,r0 + bneq noname.424 + incl r8 +noname.424: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.425 + incl r8 +noname.425: + + movl 8(ap),r0 + movzwl 14(r0),r2 + bicl3 #-65536,8(r0),r3 + movzwl 10(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,12(r0),-164(fp) + bicl3 #-65536,r2,-168(fp) + mull3 r1,-164(fp),-156(fp) + mull2 r3,-164(fp) + mull3 r3,-168(fp),-160(fp) + mull2 r1,-168(fp) + addl3 -156(fp),-160(fp),r0 + bicl3 #0,r0,-156(fp) + cmpl -156(fp),-160(fp) + bgequ noname.426 + addl2 #65536,-168(fp) +noname.426: + movzwl -154(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-168(fp) + bicl3 #-65536,-156(fp),r0 + ashl #16,r0,-160(fp) + addl3 -160(fp),-164(fp),r0 + bicl3 #0,r0,-164(fp) + cmpl -164(fp),-160(fp) + bgequ noname.427 + incl -168(fp) +noname.427: + movl -164(fp),r3 + movl -168(fp),r2 + bbc #31,r2,noname.428 + incl r8 +noname.428: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.429 + incl r2 +noname.429: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.430 + incl r2 + bicl3 #0,r2,r0 + bneq noname.430 + incl r8 +noname.430: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.431 + incl r8 +noname.431: + + movl 4(ap),r0 + movl r7,20(r0) + + clrl r7 + + movl 8(ap),r2 + movl 12(r2),r4 + bicl3 #-65536,r4,-172(fp) + extzv #16,#16,r4,r0 + bicl3 #-65536,r0,r4 + movl -172(fp),r0 + mull3 r0,r4,-176(fp) + mull3 r0,r0,-172(fp) + mull2 r4,r4 + bicl3 #32767,-176(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r4 + bicl3 #-65536,-176(fp),r0 + ashl #17,r0,-176(fp) + addl3 -172(fp),-176(fp),r0 + bicl3 #0,r0,-172(fp) + cmpl -172(fp),-176(fp) + bgequ noname.432 + incl r4 +noname.432: + movl -172(fp),r1 + movl r4,r3 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.433 + incl r3 +noname.433: + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.434 + incl r7 +noname.434: + + movzwl 18(r2),r3 + bicl3 #-65536,8(r2),r1 + movzwl 10(r2),r0 + bicl2 #-65536,r0 + bicl3 #-65536,16(r2),-188(fp) + bicl3 #-65536,r3,-192(fp) + mull3 r0,-188(fp),-180(fp) + mull2 r1,-188(fp) + mull3 r1,-192(fp),-184(fp) + mull2 r0,-192(fp) + addl3 -180(fp),-184(fp),r0 + bicl3 #0,r0,-180(fp) + cmpl -180(fp),-184(fp) + bgequ noname.435 + addl2 #65536,-192(fp) +noname.435: + movzwl -178(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-192(fp) + bicl3 #-65536,-180(fp),r0 + ashl #16,r0,-184(fp) + addl3 -184(fp),-188(fp),r0 + bicl3 #0,r0,-188(fp) + cmpl -188(fp),-184(fp) + bgequ noname.436 + incl -192(fp) +noname.436: + movl -188(fp),r3 + movl -192(fp),r2 + bbc #31,r2,noname.437 + incl r7 +noname.437: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.438 + incl r2 +noname.438: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.439 + incl r2 + bicl3 #0,r2,r0 + bneq noname.439 + incl r7 +noname.439: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.440 + incl r7 +noname.440: + + movl 8(ap),r0 + movzwl 22(r0),r2 + bicl3 #-65536,4(r0),r3 + movzwl 6(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,20(r0),-204(fp) + bicl3 #-65536,r2,-208(fp) + mull3 r1,-204(fp),-196(fp) + mull2 r3,-204(fp) + mull3 r3,-208(fp),-200(fp) + mull2 r1,-208(fp) + addl3 -196(fp),-200(fp),r0 + bicl3 #0,r0,-196(fp) + cmpl -196(fp),-200(fp) + bgequ noname.441 + addl2 #65536,-208(fp) +noname.441: + movzwl -194(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-208(fp) + bicl3 #-65536,-196(fp),r0 + ashl #16,r0,-200(fp) + addl3 -200(fp),-204(fp),r0 + bicl3 #0,r0,-204(fp) + cmpl -204(fp),-200(fp) + bgequ noname.442 + incl -208(fp) +noname.442: + movl -204(fp),r3 + movl -208(fp),r2 + bbc #31,r2,noname.443 + incl r7 +noname.443: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.444 + incl r2 +noname.444: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.445 + incl r2 + bicl3 #0,r2,r0 + bneq noname.445 + incl r7 +noname.445: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.446 + incl r7 +noname.446: + + movl 8(ap),r0 + movzwl 26(r0),r2 + bicl3 #-65536,(r0),r3 + movzwl 2(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,24(r0),-220(fp) + bicl3 #-65536,r2,-224(fp) + mull3 r1,-220(fp),-212(fp) + mull2 r3,-220(fp) + mull3 r3,-224(fp),-216(fp) + mull2 r1,-224(fp) + addl3 -212(fp),-216(fp),r0 + bicl3 #0,r0,-212(fp) + cmpl -212(fp),-216(fp) + bgequ noname.447 + addl2 #65536,-224(fp) +noname.447: + movzwl -210(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-224(fp) + bicl3 #-65536,-212(fp),r0 + ashl #16,r0,-216(fp) + addl3 -216(fp),-220(fp),r0 + bicl3 #0,r0,-220(fp) + cmpl -220(fp),-216(fp) + bgequ noname.448 + incl -224(fp) +noname.448: + movl -220(fp),r3 + movl -224(fp),r2 + bbc #31,r2,noname.449 + incl r7 +noname.449: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.450 + incl r2 +noname.450: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.451 + incl r2 + bicl3 #0,r2,r0 + bneq noname.451 + incl r7 +noname.451: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.452 + incl r7 +noname.452: + + movl 4(ap),r0 + movl r9,24(r0) + + clrl r9 + + movl 8(ap),r0 + movzwl 30(r0),r2 + bicl3 #-65536,(r0),r3 + movzwl 2(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,28(r0),-236(fp) + bicl3 #-65536,r2,-240(fp) + mull3 r1,-236(fp),-228(fp) + mull2 r3,-236(fp) + mull3 r3,-240(fp),-232(fp) + mull2 r1,-240(fp) + addl3 -228(fp),-232(fp),r0 + bicl3 #0,r0,-228(fp) + cmpl -228(fp),-232(fp) + bgequ noname.453 + addl2 #65536,-240(fp) +noname.453: + movzwl -226(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-240(fp) + bicl3 #-65536,-228(fp),r0 + ashl #16,r0,-232(fp) + addl3 -232(fp),-236(fp),r0 + bicl3 #0,r0,-236(fp) + cmpl -236(fp),-232(fp) + bgequ noname.454 + incl -240(fp) +noname.454: + movl -236(fp),r3 + movl -240(fp),r2 + bbc #31,r2,noname.455 + incl r9 +noname.455: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.456 + incl r2 +noname.456: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.457 + incl r2 + bicl3 #0,r2,r0 + bneq noname.457 + incl r9 +noname.457: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.458 + incl r9 +noname.458: + + movl 8(ap),r0 + movzwl 26(r0),r2 + bicl3 #-65536,4(r0),r3 + movzwl 6(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,24(r0),-252(fp) + bicl3 #-65536,r2,-256(fp) + mull3 r1,-252(fp),-244(fp) + mull2 r3,-252(fp) + mull3 r3,-256(fp),-248(fp) + mull2 r1,-256(fp) + addl3 -244(fp),-248(fp),r0 + bicl3 #0,r0,-244(fp) + cmpl -244(fp),-248(fp) + bgequ noname.459 + addl2 #65536,-256(fp) +noname.459: + movzwl -242(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-256(fp) + bicl3 #-65536,-244(fp),r0 + ashl #16,r0,-248(fp) + addl3 -248(fp),-252(fp),r0 + bicl3 #0,r0,-252(fp) + cmpl -252(fp),-248(fp) + bgequ noname.460 + incl -256(fp) +noname.460: + movl -252(fp),r3 + movl -256(fp),r2 + bbc #31,r2,noname.461 + incl r9 +noname.461: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.462 + incl r2 +noname.462: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.463 + incl r2 + bicl3 #0,r2,r0 + bneq noname.463 + incl r9 +noname.463: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.464 + incl r9 +noname.464: + + movl 8(ap),r0 + movzwl 22(r0),r2 + bicl3 #-65536,8(r0),r3 + movzwl 10(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,20(r0),-268(fp) + bicl3 #-65536,r2,-272(fp) + mull3 r1,-268(fp),-260(fp) + mull2 r3,-268(fp) + mull3 r3,-272(fp),-264(fp) + mull2 r1,-272(fp) + addl3 -260(fp),-264(fp),r0 + bicl3 #0,r0,-260(fp) + cmpl -260(fp),-264(fp) + bgequ noname.465 + addl2 #65536,-272(fp) +noname.465: + movzwl -258(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-272(fp) + bicl3 #-65536,-260(fp),r0 + ashl #16,r0,-264(fp) + addl3 -264(fp),-268(fp),r0 + bicl3 #0,r0,-268(fp) + cmpl -268(fp),-264(fp) + bgequ noname.466 + incl -272(fp) +noname.466: + movl -268(fp),r3 + movl -272(fp),r2 + bbc #31,r2,noname.467 + incl r9 +noname.467: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.468 + incl r2 +noname.468: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.469 + incl r2 + bicl3 #0,r2,r0 + bneq noname.469 + incl r9 +noname.469: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.470 + incl r9 +noname.470: + + movl 8(ap),r0 + movzwl 18(r0),r2 + bicl3 #-65536,12(r0),r3 + movzwl 14(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,16(r0),-284(fp) + bicl3 #-65536,r2,-288(fp) + mull3 r1,-284(fp),-276(fp) + mull2 r3,-284(fp) + mull3 r3,-288(fp),-280(fp) + mull2 r1,-288(fp) + addl3 -276(fp),-280(fp),r0 + bicl3 #0,r0,-276(fp) + cmpl -276(fp),-280(fp) + bgequ noname.471 + addl2 #65536,-288(fp) +noname.471: + movzwl -274(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-288(fp) + bicl3 #-65536,-276(fp),r0 + ashl #16,r0,-280(fp) + addl3 -280(fp),-284(fp),r0 + bicl3 #0,r0,-284(fp) + cmpl -284(fp),-280(fp) + bgequ noname.472 + incl -288(fp) +noname.472: + movl -284(fp),r3 + movl -288(fp),r2 + bbc #31,r2,noname.473 + incl r9 +noname.473: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.474 + incl r2 +noname.474: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.475 + incl r2 + bicl3 #0,r2,r0 + bneq noname.475 + incl r9 +noname.475: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.476 + incl r9 +noname.476: + + movl 4(ap),r0 + movl r8,28(r0) + + clrl r8 + + movl 8(ap),r3 + movl 16(r3),r4 + bicl3 #-65536,r4,r5 + extzv #16,#16,r4,r0 + bicl3 #-65536,r0,r4 + mull3 r5,r4,-292(fp) + mull2 r5,r5 + mull2 r4,r4 + bicl3 #32767,-292(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r4 + bicl3 #-65536,-292(fp),r0 + ashl #17,r0,-292(fp) + addl2 -292(fp),r5 + bicl2 #0,r5 + cmpl r5,-292(fp) + bgequ noname.477 + incl r4 +noname.477: + movl r5,r1 + movl r4,r2 + addl2 r1,r7 + bicl2 #0,r7 + cmpl r7,r1 + bgequ noname.478 + incl r2 +noname.478: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.479 + incl r8 +noname.479: + + bicl3 #-65536,20(r3),r4 + movzwl 22(r3),r1 + bicl2 #-65536,r1 + bicl3 #-65536,12(r3),r2 + movzwl 14(r3),r0 + bicl2 #-65536,r0 + movl r4,r6 + movl r1,r5 + mull3 r0,r6,-296(fp) + mull2 r2,r6 + mull3 r2,r5,-300(fp) + mull2 r0,r5 + addl3 -296(fp),-300(fp),r0 + bicl3 #0,r0,-296(fp) + cmpl -296(fp),-300(fp) + bgequ noname.480 + addl2 #65536,r5 +noname.480: + movzwl -294(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r5 + bicl3 #-65536,-296(fp),r0 + ashl #16,r0,-300(fp) + addl2 -300(fp),r6 + bicl2 #0,r6 + cmpl r6,-300(fp) + bgequ noname.481 + incl r5 +noname.481: + movl r6,r3 + movl r5,r2 + bbc #31,r2,noname.482 + incl r8 +noname.482: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.483 + incl r2 +noname.483: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.484 + incl r2 + bicl3 #0,r2,r0 + bneq noname.484 + incl r8 +noname.484: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.485 + incl r8 +noname.485: + + movl 8(ap),r0 + bicl3 #-65536,24(r0),r3 + movzwl 26(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,8(r0),r2 + movzwl 10(r0),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-304(fp) + mull2 r2,r5 + mull3 r2,r4,-308(fp) + mull2 r0,r4 + addl3 -304(fp),-308(fp),r0 + bicl3 #0,r0,-304(fp) + cmpl -304(fp),-308(fp) + bgequ noname.486 + addl2 #65536,r4 +noname.486: + movzwl -302(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-304(fp),r0 + ashl #16,r0,-308(fp) + addl2 -308(fp),r5 + bicl2 #0,r5 + cmpl r5,-308(fp) + bgequ noname.487 + incl r4 +noname.487: + movl r5,r3 + movl r4,r2 + bbc #31,r2,noname.488 + incl r8 +noname.488: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.489 + incl r2 +noname.489: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.490 + incl r2 + bicl3 #0,r2,r0 + bneq noname.490 + incl r8 +noname.490: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.491 + incl r8 +noname.491: + + movl 8(ap),r0 + bicl3 #-65536,28(r0),r3 + movzwl 30(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,4(r0),r2 + movzwl 6(r0),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-312(fp) + mull2 r2,r5 + mull3 r2,r4,-316(fp) + mull2 r0,r4 + addl3 -312(fp),-316(fp),r0 + bicl3 #0,r0,-312(fp) + cmpl -312(fp),-316(fp) + bgequ noname.492 + addl2 #65536,r4 +noname.492: + movzwl -310(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-312(fp),r0 + ashl #16,r0,-316(fp) + addl2 -316(fp),r5 + bicl2 #0,r5 + cmpl r5,-316(fp) + bgequ noname.493 + incl r4 +noname.493: + movl r5,r3 + movl r4,r2 + bbc #31,r2,noname.494 + incl r8 +noname.494: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.495 + incl r2 +noname.495: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.496 + incl r2 + bicl3 #0,r2,r0 + bneq noname.496 + incl r8 +noname.496: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.497 + incl r8 +noname.497: + + movl 4(ap),r0 + movl r7,32(r0) + + clrl r7 + + movl 8(ap),r0 + bicl3 #-65536,28(r0),r3 + movzwl 30(r0),r2 + bicl3 #-65536,8(r0),r1 + movzwl 10(r0),r0 + bicl2 #-65536,r0 + movl r3,r4 + bicl3 #-65536,r2,-328(fp) + mull3 r0,r4,-320(fp) + mull2 r1,r4 + mull3 r1,-328(fp),-324(fp) + mull2 r0,-328(fp) + addl3 -320(fp),-324(fp),r0 + bicl3 #0,r0,-320(fp) + cmpl -320(fp),-324(fp) + bgequ noname.498 + addl2 #65536,-328(fp) +noname.498: + movzwl -318(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-328(fp) + bicl3 #-65536,-320(fp),r0 + ashl #16,r0,-324(fp) + addl2 -324(fp),r4 + bicl2 #0,r4 + cmpl r4,-324(fp) + bgequ noname.499 + incl -328(fp) +noname.499: + movl r4,r3 + movl -328(fp),r2 + bbc #31,r2,noname.500 + incl r7 +noname.500: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.501 + incl r2 +noname.501: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.502 + incl r2 + bicl3 #0,r2,r0 + bneq noname.502 + incl r7 +noname.502: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.503 + incl r7 +noname.503: + + movl 8(ap),r0 + movzwl 26(r0),r2 + bicl3 #-65536,12(r0),r3 + movzwl 14(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,24(r0),-340(fp) + bicl3 #-65536,r2,-344(fp) + mull3 r1,-340(fp),-332(fp) + mull2 r3,-340(fp) + mull3 r3,-344(fp),-336(fp) + mull2 r1,-344(fp) + addl3 -332(fp),-336(fp),r0 + bicl3 #0,r0,-332(fp) + cmpl -332(fp),-336(fp) + bgequ noname.504 + addl2 #65536,-344(fp) +noname.504: + movzwl -330(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-344(fp) + bicl3 #-65536,-332(fp),r0 + ashl #16,r0,-336(fp) + addl3 -336(fp),-340(fp),r0 + bicl3 #0,r0,-340(fp) + cmpl -340(fp),-336(fp) + bgequ noname.505 + incl -344(fp) +noname.505: + movl -340(fp),r3 + movl -344(fp),r2 + bbc #31,r2,noname.506 + incl r7 +noname.506: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.507 + incl r2 +noname.507: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.508 + incl r2 + bicl3 #0,r2,r0 + bneq noname.508 + incl r7 +noname.508: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.509 + incl r7 +noname.509: + + movl 8(ap),r0 + movzwl 22(r0),r2 + bicl3 #-65536,16(r0),r3 + movzwl 18(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,20(r0),-356(fp) + bicl3 #-65536,r2,-360(fp) + mull3 r1,-356(fp),-348(fp) + mull2 r3,-356(fp) + mull3 r3,-360(fp),-352(fp) + mull2 r1,-360(fp) + addl3 -348(fp),-352(fp),r0 + bicl3 #0,r0,-348(fp) + cmpl -348(fp),-352(fp) + bgequ noname.510 + addl2 #65536,-360(fp) +noname.510: + movzwl -346(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-360(fp) + bicl3 #-65536,-348(fp),r0 + ashl #16,r0,-352(fp) + addl3 -352(fp),-356(fp),r0 + bicl3 #0,r0,-356(fp) + cmpl -356(fp),-352(fp) + bgequ noname.511 + incl -360(fp) +noname.511: + movl -356(fp),r3 + movl -360(fp),r2 + bbc #31,r2,noname.512 + incl r7 +noname.512: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.513 + incl r2 +noname.513: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.514 + incl r2 + bicl3 #0,r2,r0 + bneq noname.514 + incl r7 +noname.514: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.515 + incl r7 +noname.515: + + movl 4(ap),r0 + movl r9,36(r0) + + clrl r9 + + movl 8(ap),r3 + movl 20(r3),r4 + bicl3 #-65536,r4,-364(fp) + extzv #16,#16,r4,r0 + bicl3 #-65536,r0,r4 + movl -364(fp),r0 + mull3 r0,r4,-368(fp) + mull3 r0,r0,-364(fp) + mull2 r4,r4 + bicl3 #32767,-368(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r4 + bicl3 #-65536,-368(fp),r0 + ashl #17,r0,-368(fp) + addl3 -364(fp),-368(fp),r0 + bicl3 #0,r0,-364(fp) + cmpl -364(fp),-368(fp) + bgequ noname.516 + incl r4 +noname.516: + movl -364(fp),r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.517 + incl r2 +noname.517: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.518 + incl r9 +noname.518: + + bicl3 #-65536,24(r3),r4 + movzwl 26(r3),r1 + bicl2 #-65536,r1 + bicl3 #-65536,16(r3),r2 + movzwl 18(r3),r0 + bicl2 #-65536,r0 + movl r4,r6 + movl r1,r5 + mull3 r0,r6,-372(fp) + mull2 r2,r6 + mull3 r2,r5,-376(fp) + mull2 r0,r5 + addl3 -372(fp),-376(fp),r0 + bicl3 #0,r0,-372(fp) + cmpl -372(fp),-376(fp) + bgequ noname.519 + addl2 #65536,r5 +noname.519: + movzwl -370(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r5 + bicl3 #-65536,-372(fp),r0 + ashl #16,r0,-376(fp) + addl2 -376(fp),r6 + bicl2 #0,r6 + cmpl r6,-376(fp) + bgequ noname.520 + incl r5 +noname.520: + movl r6,r3 + movl r5,r2 + bbc #31,r2,noname.521 + incl r9 +noname.521: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.522 + incl r2 +noname.522: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.523 + incl r2 + bicl3 #0,r2,r0 + bneq noname.523 + incl r9 +noname.523: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.524 + incl r9 +noname.524: + + movl 8(ap),r0 + bicl3 #-65536,28(r0),r3 + movzwl 30(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,12(r0),r2 + movzwl 14(r0),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-380(fp) + mull2 r2,r5 + mull3 r2,r4,-384(fp) + mull2 r0,r4 + addl3 -380(fp),-384(fp),r0 + bicl3 #0,r0,-380(fp) + cmpl -380(fp),-384(fp) + bgequ noname.525 + addl2 #65536,r4 +noname.525: + movzwl -378(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-380(fp),r0 + ashl #16,r0,-384(fp) + addl2 -384(fp),r5 + bicl2 #0,r5 + cmpl r5,-384(fp) + bgequ noname.526 + incl r4 +noname.526: + movl r5,r3 + movl r4,r2 + bbc #31,r2,noname.527 + incl r9 +noname.527: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.528 + incl r2 +noname.528: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.529 + incl r2 + bicl3 #0,r2,r0 + bneq noname.529 + incl r9 +noname.529: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.530 + incl r9 +noname.530: + movl 4(ap),r0 + movl r8,40(r0) + + clrl r8 + + movl 8(ap),r0 + bicl3 #-65536,28(r0),r3 + movzwl 30(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,16(r0),r2 + movzwl 18(r0),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-388(fp) + mull2 r2,r5 + mull3 r2,r4,-392(fp) + mull2 r0,r4 + addl3 -388(fp),-392(fp),r0 + bicl3 #0,r0,-388(fp) + cmpl -388(fp),-392(fp) + bgequ noname.531 + addl2 #65536,r4 +noname.531: + movzwl -386(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-388(fp),r0 + ashl #16,r0,-392(fp) + addl2 -392(fp),r5 + bicl2 #0,r5 + cmpl r5,-392(fp) + bgequ noname.532 + incl r4 +noname.532: + movl r5,r3 + movl r4,r2 + bbc #31,r2,noname.533 + incl r8 +noname.533: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.534 + incl r2 +noname.534: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.535 + incl r2 + bicl3 #0,r2,r0 + bneq noname.535 + incl r8 +noname.535: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.536 + incl r8 +noname.536: + + movl 8(ap),r0 + bicl3 #-65536,24(r0),r3 + movzwl 26(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,20(r0),r2 + movzwl 22(r0),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-396(fp) + mull2 r2,r5 + mull3 r2,r4,-400(fp) + mull2 r0,r4 + addl3 -396(fp),-400(fp),r0 + bicl3 #0,r0,-396(fp) + cmpl -396(fp),-400(fp) + bgequ noname.537 + addl2 #65536,r4 +noname.537: + movzwl -394(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-396(fp),r0 + ashl #16,r0,-400(fp) + addl2 -400(fp),r5 + bicl2 #0,r5 + cmpl r5,-400(fp) + bgequ noname.538 + incl r4 +noname.538: + movl r5,r3 + movl r4,r2 + bbc #31,r2,noname.539 + incl r8 +noname.539: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.540 + incl r2 +noname.540: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r7 + bicl2 #0,r7 + cmpl r7,r3 + bgequ noname.541 + incl r2 + bicl3 #0,r2,r0 + bneq noname.541 + incl r8 +noname.541: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.542 + incl r8 +noname.542: + + movl 4(ap),r0 + movl r7,44(r0) + + clrl r7 + + movl 8(ap),r3 + movl 24(r3),r4 + bicl3 #-65536,r4,r5 + extzv #16,#16,r4,r0 + bicl3 #-65536,r0,r4 + mull3 r5,r4,-404(fp) + mull2 r5,r5 + mull2 r4,r4 + bicl3 #32767,-404(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r4 + bicl3 #-65536,-404(fp),r0 + ashl #17,r0,-404(fp) + addl2 -404(fp),r5 + bicl2 #0,r5 + cmpl r5,-404(fp) + bgequ noname.543 + incl r4 +noname.543: + movl r5,r1 + movl r4,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.544 + incl r2 +noname.544: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.545 + incl r7 +noname.545: + + movzwl 30(r3),r2 + bicl3 #-65536,20(r3),r1 + movzwl 22(r3),r0 + bicl2 #-65536,r0 + bicl3 #-65536,28(r3),-416(fp) + bicl3 #-65536,r2,-420(fp) + mull3 r0,-416(fp),-408(fp) + mull2 r1,-416(fp) + mull3 r1,-420(fp),-412(fp) + mull2 r0,-420(fp) + addl3 -408(fp),-412(fp),r0 + bicl3 #0,r0,-408(fp) + cmpl -408(fp),-412(fp) + bgequ noname.546 + addl2 #65536,-420(fp) +noname.546: + movzwl -406(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-420(fp) + bicl3 #-65536,-408(fp),r0 + ashl #16,r0,-412(fp) + addl3 -412(fp),-416(fp),r0 + bicl3 #0,r0,-416(fp) + cmpl -416(fp),-412(fp) + bgequ noname.547 + incl -420(fp) +noname.547: + movl -416(fp),r3 + movl -420(fp),r2 + bbc #31,r2,noname.548 + incl r7 +noname.548: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.549 + incl r2 +noname.549: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.550 + incl r2 + bicl3 #0,r2,r0 + bneq noname.550 + incl r7 +noname.550: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.551 + incl r7 +noname.551: + + movl 4(ap),r0 + movl r9,48(r0) + + clrl r9 + + movl 8(ap),r0 + movzwl 30(r0),r2 + bicl3 #-65536,24(r0),r3 + movzwl 26(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,28(r0),-432(fp) + bicl3 #-65536,r2,-436(fp) + mull3 r1,-432(fp),-424(fp) + mull2 r3,-432(fp) + mull3 r3,-436(fp),-428(fp) + mull2 r1,-436(fp) + addl3 -424(fp),-428(fp),r0 + bicl3 #0,r0,-424(fp) + cmpl -424(fp),-428(fp) + bgequ noname.552 + addl2 #65536,-436(fp) +noname.552: + movzwl -422(fp),r0 + bicl2 #-65536,r0 + addl2 r0,-436(fp) + bicl3 #-65536,-424(fp),r0 + ashl #16,r0,-428(fp) + addl3 -428(fp),-432(fp),r0 + bicl3 #0,r0,-432(fp) + cmpl -432(fp),-428(fp) + bgequ noname.553 + incl -436(fp) +noname.553: + movl -432(fp),r3 + movl -436(fp),r2 + bbc #31,r2,noname.554 + incl r9 +noname.554: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.555 + incl r2 +noname.555: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.556 + incl r2 + bicl3 #0,r2,r0 + bneq noname.556 + incl r9 +noname.556: + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.557 + incl r9 +noname.557: + + movl 4(ap),r4 + movl r8,52(r4) + + clrl r8 + + movl 8(ap),r0 + movl 28(r0),r3 + bicl3 #-65536,r3,-440(fp) + extzv #16,#16,r3,r0 + bicl3 #-65536,r0,r3 + movl -440(fp),r0 + mull3 r0,r3,-444(fp) + mull3 r0,r0,-440(fp) + mull2 r3,r3 + bicl3 #32767,-444(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r3 + bicl3 #-65536,-444(fp),r0 + ashl #17,r0,-444(fp) + addl3 -440(fp),-444(fp),r0 + bicl3 #0,r0,-440(fp) + cmpl -440(fp),-444(fp) + bgequ noname.558 + incl r3 +noname.558: + movl -440(fp),r1 + movl r3,r2 + addl2 r1,r7 + bicl2 #0,r7 + cmpl r7,r1 + bgequ noname.559 + incl r2 +noname.559: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.560 + incl r8 +noname.560: + + movl r7,56(r4) + + movl r9,60(r4) + + ret + + + +;r=4 ;(AP) +;a=8 ;(AP) +;b=12 ;(AP) +;n=16 ;(AP) n by value (input) + + .psect code,nowrt + +.entry BN_SQR_COMBA4,^m<r2,r3,r4,r5,r6,r7,r8,r9,r10> + subl2 #44,sp + + clrq r8 + + clrl r10 + + movl 8(ap),r5 + movl (r5),r3 + bicl3 #-65536,r3,r4 + extzv #16,#16,r3,r0 + bicl3 #-65536,r0,r3 + mull3 r4,r3,-4(fp) + mull2 r4,r4 + mull2 r3,r3 + bicl3 #32767,-4(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r3 + bicl3 #-65536,-4(fp),r0 + ashl #17,r0,-4(fp) + addl2 -4(fp),r4 + bicl2 #0,r4 + cmpl r4,-4(fp) + bgequ noname.563 + incl r3 +noname.563: + movl r4,r1 + movl r3,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.564 + incl r2 +noname.564: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.565 + incl r10 +noname.565: + + movl r9,@4(ap) + + clrl r9 + + bicl3 #-65536,4(r5),r3 + movzwl 6(r5),r1 + bicl2 #-65536,r1 + bicl3 #-65536,(r5),r2 + movzwl 2(r5),r0 + bicl2 #-65536,r0 + movl r3,r6 + movl r1,r4 + mull3 r0,r6,-8(fp) + mull2 r2,r6 + mull2 r4,r2 + mull2 r0,r4 + addl3 -8(fp),r2,r0 + bicl3 #0,r0,-8(fp) + cmpl -8(fp),r2 + bgequ noname.566 + addl2 #65536,r4 +noname.566: + movzwl -6(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-8(fp),r0 + ashl #16,r0,r1 + addl2 r1,r6 + bicl2 #0,r6 + cmpl r6,r1 + bgequ noname.567 + incl r4 +noname.567: + movl r6,r3 + movl r4,r2 + bbc #31,r2,noname.568 + incl r9 +noname.568: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.569 + incl r2 +noname.569: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.570 + incl r2 + bicl3 #0,r2,r0 + bneq noname.570 + incl r9 +noname.570: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.571 + incl r9 +noname.571: + + movl 4(ap),r0 + movl r8,4(r0) + + clrl r8 + + movl 8(ap),r4 + movl 4(r4),r3 + bicl3 #-65536,r3,r5 + extzv #16,#16,r3,r0 + bicl3 #-65536,r0,r3 + mull3 r5,r3,r1 + mull2 r5,r5 + mull2 r3,r3 + bicl3 #32767,r1,r0 + extzv #15,#17,r0,r0 + addl2 r0,r3 + bicl2 #-65536,r1 + ashl #17,r1,r1 + addl2 r1,r5 + bicl2 #0,r5 + cmpl r5,r1 + bgequ noname.572 + incl r3 +noname.572: + movl r5,r1 + movl r3,r2 + addl2 r1,r10 + bicl2 #0,r10 + cmpl r10,r1 + bgequ noname.573 + incl r2 +noname.573: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.574 + incl r8 +noname.574: + + bicl3 #-65536,8(r4),r3 + movzwl 10(r4),r1 + bicl2 #-65536,r1 + bicl3 #-65536,(r4),r2 + movzwl 2(r4),r0 + bicl2 #-65536,r0 + movl r3,r6 + movl r1,r5 + mull3 r0,r6,r7 + mull2 r2,r6 + mull2 r5,r2 + mull2 r0,r5 + addl2 r2,r7 + bicl2 #0,r7 + cmpl r7,r2 + bgequ noname.575 + addl2 #65536,r5 +noname.575: + extzv #16,#16,r7,r0 + bicl2 #-65536,r0 + addl2 r0,r5 + bicl3 #-65536,r7,r0 + ashl #16,r0,r1 + addl2 r1,r6 + bicl2 #0,r6 + cmpl r6,r1 + bgequ noname.576 + incl r5 +noname.576: + movl r6,r3 + movl r5,r2 + bbc #31,r2,noname.577 + incl r8 +noname.577: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.578 + incl r2 +noname.578: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r10 + bicl2 #0,r10 + cmpl r10,r3 + bgequ noname.579 + incl r2 + bicl3 #0,r2,r0 + bneq noname.579 + incl r8 +noname.579: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.580 + incl r8 +noname.580: + + movl 4(ap),r0 + movl r10,8(r0) + + clrl r10 + + movl 8(ap),r0 + bicl3 #-65536,12(r0),r3 + movzwl 14(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,(r0),r2 + movzwl 2(r0),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,r6 + mull2 r2,r5 + mull3 r2,r4,-12(fp) + mull2 r0,r4 + addl2 -12(fp),r6 + bicl2 #0,r6 + cmpl r6,-12(fp) + bgequ noname.581 + addl2 #65536,r4 +noname.581: + extzv #16,#16,r6,r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,r6,r0 + ashl #16,r0,-12(fp) + addl2 -12(fp),r5 + bicl2 #0,r5 + cmpl r5,-12(fp) + bgequ noname.582 + incl r4 +noname.582: + movl r5,r3 + movl r4,r2 + bbc #31,r2,noname.583 + incl r10 +noname.583: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.584 + incl r2 +noname.584: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.585 + incl r2 + bicl3 #0,r2,r0 + bneq noname.585 + incl r10 +noname.585: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.586 + incl r10 +noname.586: + + movl 8(ap),r0 + bicl3 #-65536,8(r0),r3 + movzwl 10(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,4(r0),r2 + movzwl 6(r0),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-16(fp) + mull2 r2,r5 + mull3 r2,r4,-20(fp) + mull2 r0,r4 + addl3 -16(fp),-20(fp),r0 + bicl3 #0,r0,-16(fp) + cmpl -16(fp),-20(fp) + bgequ noname.587 + addl2 #65536,r4 +noname.587: + movzwl -14(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-16(fp),r0 + ashl #16,r0,-20(fp) + addl2 -20(fp),r5 + bicl2 #0,r5 + cmpl r5,-20(fp) + bgequ noname.588 + incl r4 +noname.588: + movl r5,r3 + movl r4,r2 + bbc #31,r2,noname.589 + incl r10 +noname.589: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.590 + incl r2 +noname.590: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r9 + bicl2 #0,r9 + cmpl r9,r3 + bgequ noname.591 + incl r2 + bicl3 #0,r2,r0 + bneq noname.591 + incl r10 +noname.591: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.592 + incl r10 +noname.592: + movl 4(ap),r0 + movl r9,12(r0) + + clrl r9 + + movl 8(ap),r3 + movl 8(r3),r4 + bicl3 #-65536,r4,r5 + extzv #16,#16,r4,r0 + bicl3 #-65536,r0,r4 + mull3 r5,r4,-24(fp) + mull2 r5,r5 + mull2 r4,r4 + bicl3 #32767,-24(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r4 + bicl3 #-65536,-24(fp),r0 + ashl #17,r0,-24(fp) + addl2 -24(fp),r5 + bicl2 #0,r5 + cmpl r5,-24(fp) + bgequ noname.593 + incl r4 +noname.593: + movl r5,r1 + movl r4,r2 + addl2 r1,r8 + bicl2 #0,r8 + cmpl r8,r1 + bgequ noname.594 + incl r2 +noname.594: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.595 + incl r9 +noname.595: + + bicl3 #-65536,12(r3),r4 + movzwl 14(r3),r1 + bicl2 #-65536,r1 + bicl3 #-65536,4(r3),r2 + movzwl 6(r3),r0 + bicl2 #-65536,r0 + movl r4,r6 + movl r1,r5 + mull3 r0,r6,-28(fp) + mull2 r2,r6 + mull3 r2,r5,-32(fp) + mull2 r0,r5 + addl3 -28(fp),-32(fp),r0 + bicl3 #0,r0,-28(fp) + cmpl -28(fp),-32(fp) + bgequ noname.596 + addl2 #65536,r5 +noname.596: + movzwl -26(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r5 + bicl3 #-65536,-28(fp),r0 + ashl #16,r0,-32(fp) + addl2 -32(fp),r6 + bicl2 #0,r6 + cmpl r6,-32(fp) + bgequ noname.597 + incl r5 +noname.597: + movl r6,r3 + movl r5,r2 + bbc #31,r2,noname.598 + incl r9 +noname.598: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.599 + incl r2 +noname.599: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r8 + bicl2 #0,r8 + cmpl r8,r3 + bgequ noname.600 + incl r2 + bicl3 #0,r2,r0 + bneq noname.600 + incl r9 +noname.600: + addl2 r2,r10 + bicl2 #0,r10 + cmpl r10,r2 + bgequ noname.601 + incl r9 +noname.601: + + movl 4(ap),r0 + movl r8,16(r0) + + clrl r8 + + movl 8(ap),r0 + bicl3 #-65536,12(r0),r3 + movzwl 14(r0),r1 + bicl2 #-65536,r1 + bicl3 #-65536,8(r0),r2 + movzwl 10(r0),r0 + bicl2 #-65536,r0 + movl r3,r5 + movl r1,r4 + mull3 r0,r5,-36(fp) + mull2 r2,r5 + mull3 r2,r4,-40(fp) + mull2 r0,r4 + addl3 -36(fp),-40(fp),r0 + bicl3 #0,r0,-36(fp) + cmpl -36(fp),-40(fp) + bgequ noname.602 + addl2 #65536,r4 +noname.602: + movzwl -34(fp),r0 + bicl2 #-65536,r0 + addl2 r0,r4 + bicl3 #-65536,-36(fp),r0 + ashl #16,r0,-40(fp) + addl2 -40(fp),r5 + bicl2 #0,r5 + cmpl r5,-40(fp) + bgequ noname.603 + incl r4 +noname.603: + movl r5,r3 + movl r4,r2 + bbc #31,r2,noname.604 + incl r8 +noname.604: + addl2 r2,r2 + bicl2 #0,r2 + bbc #31,r3,noname.605 + incl r2 +noname.605: + addl2 r3,r3 + bicl2 #0,r3 + addl2 r3,r10 + bicl2 #0,r10 + cmpl r10,r3 + bgequ noname.606 + incl r2 + bicl3 #0,r2,r0 + bneq noname.606 + incl r8 +noname.606: + addl2 r2,r9 + bicl2 #0,r9 + cmpl r9,r2 + bgequ noname.607 + incl r8 +noname.607: + + movl 4(ap),r4 + movl r10,20(r4) + + clrl r10 + + movl 8(ap),r0 + movl 12(r0),r3 + bicl3 #-65536,r3,r5 + extzv #16,#16,r3,r0 + bicl3 #-65536,r0,r3 + mull3 r5,r3,-44(fp) + mull2 r5,r5 + mull2 r3,r3 + bicl3 #32767,-44(fp),r0 + extzv #15,#17,r0,r0 + addl2 r0,r3 + bicl3 #-65536,-44(fp),r0 + ashl #17,r0,-44(fp) + addl2 -44(fp),r5 + bicl2 #0,r5 + cmpl r5,-44(fp) + bgequ noname.608 + incl r3 +noname.608: + movl r5,r1 + movl r3,r2 + addl2 r1,r9 + bicl2 #0,r9 + cmpl r9,r1 + bgequ noname.609 + incl r2 +noname.609: + addl2 r2,r8 + bicl2 #0,r8 + cmpl r8,r2 + bgequ noname.610 + incl r10 +noname.610: + + movl r9,24(r4) + + movl r8,28(r4) + + ret + +; For now, the code below doesn't work, so I end this prematurely. +.end diff --git a/openssl/crypto/bn/asm/x86-mont.pl b/openssl/crypto/bn/asm/x86-mont.pl new file mode 100644 index 000000000..5cd3cd2ed --- /dev/null +++ b/openssl/crypto/bn/asm/x86-mont.pl @@ -0,0 +1,591 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# October 2005 +# +# This is a "teaser" code, as it can be improved in several ways... +# First of all non-SSE2 path should be implemented (yes, for now it +# performs Montgomery multiplication/convolution only on SSE2-capable +# CPUs such as P4, others fall down to original code). Then inner loop +# can be unrolled and modulo-scheduled to improve ILP and possibly +# moved to 128-bit XMM register bank (though it would require input +# rearrangement and/or increase bus bandwidth utilization). Dedicated +# squaring procedure should give further performance improvement... +# Yet, for being draft, the code improves rsa512 *sign* benchmark by +# 110%(!), rsa1024 one - by 70% and rsa4096 - by 20%:-) + +# December 2006 +# +# Modulo-scheduling SSE2 loops results in further 15-20% improvement. +# Integer-only code [being equipped with dedicated squaring procedure] +# gives ~40% on rsa512 sign benchmark... + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],$0); + +$sse2=0; +for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } + +&external_label("OPENSSL_ia32cap_P") if ($sse2); + +&function_begin("bn_mul_mont"); + +$i="edx"; +$j="ecx"; +$ap="esi"; $tp="esi"; # overlapping variables!!! +$rp="edi"; $bp="edi"; # overlapping variables!!! +$np="ebp"; +$num="ebx"; + +$_num=&DWP(4*0,"esp"); # stack top layout +$_rp=&DWP(4*1,"esp"); +$_ap=&DWP(4*2,"esp"); +$_bp=&DWP(4*3,"esp"); +$_np=&DWP(4*4,"esp"); +$_n0=&DWP(4*5,"esp"); $_n0q=&QWP(4*5,"esp"); +$_sp=&DWP(4*6,"esp"); +$_bpend=&DWP(4*7,"esp"); +$frame=32; # size of above frame rounded up to 16n + + &xor ("eax","eax"); + &mov ("edi",&wparam(5)); # int num + &cmp ("edi",4); + &jl (&label("just_leave")); + + &lea ("esi",&wparam(0)); # put aside pointer to argument block + &lea ("edx",&wparam(1)); # load ap + &mov ("ebp","esp"); # saved stack pointer! + &add ("edi",2); # extra two words on top of tp + &neg ("edi"); + &lea ("esp",&DWP(-$frame,"esp","edi",4)); # alloca($frame+4*(num+2)) + &neg ("edi"); + + # minimize cache contention by arraning 2K window between stack + # pointer and ap argument [np is also position sensitive vector, + # but it's assumed to be near ap, as it's allocated at ~same + # time]. + &mov ("eax","esp"); + &sub ("eax","edx"); + &and ("eax",2047); + &sub ("esp","eax"); # this aligns sp and ap modulo 2048 + + &xor ("edx","esp"); + &and ("edx",2048); + &xor ("edx",2048); + &sub ("esp","edx"); # this splits them apart modulo 4096 + + &and ("esp",-64); # align to cache line + + ################################# load argument block... + &mov ("eax",&DWP(0*4,"esi"));# BN_ULONG *rp + &mov ("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap + &mov ("ecx",&DWP(2*4,"esi"));# const BN_ULONG *bp + &mov ("edx",&DWP(3*4,"esi"));# const BN_ULONG *np + &mov ("esi",&DWP(4*4,"esi"));# const BN_ULONG *n0 + #&mov ("edi",&DWP(5*4,"esi"));# int num + + &mov ("esi",&DWP(0,"esi")); # pull n0[0] + &mov ($_rp,"eax"); # ... save a copy of argument block + &mov ($_ap,"ebx"); + &mov ($_bp,"ecx"); + &mov ($_np,"edx"); + &mov ($_n0,"esi"); + &lea ($num,&DWP(-3,"edi")); # num=num-1 to assist modulo-scheduling + #&mov ($_num,$num); # redundant as $num is not reused + &mov ($_sp,"ebp"); # saved stack pointer! + +if($sse2) { +$acc0="mm0"; # mmx register bank layout +$acc1="mm1"; +$car0="mm2"; +$car1="mm3"; +$mul0="mm4"; +$mul1="mm5"; +$temp="mm6"; +$mask="mm7"; + + &picmeup("eax","OPENSSL_ia32cap_P"); + &bt (&DWP(0,"eax"),26); + &jnc (&label("non_sse2")); + + &mov ("eax",-1); + &movd ($mask,"eax"); # mask 32 lower bits + + &mov ($ap,$_ap); # load input pointers + &mov ($bp,$_bp); + &mov ($np,$_np); + + &xor ($i,$i); # i=0 + &xor ($j,$j); # j=0 + + &movd ($mul0,&DWP(0,$bp)); # bp[0] + &movd ($mul1,&DWP(0,$ap)); # ap[0] + &movd ($car1,&DWP(0,$np)); # np[0] + + &pmuludq($mul1,$mul0); # ap[0]*bp[0] + &movq ($car0,$mul1); + &movq ($acc0,$mul1); # I wish movd worked for + &pand ($acc0,$mask); # inter-register transfers + + &pmuludq($mul1,$_n0q); # *=n0 + + &pmuludq($car1,$mul1); # "t[0]"*np[0]*n0 + &paddq ($car1,$acc0); + + &movd ($acc1,&DWP(4,$np)); # np[1] + &movd ($acc0,&DWP(4,$ap)); # ap[1] + + &psrlq ($car0,32); + &psrlq ($car1,32); + + &inc ($j); # j++ +&set_label("1st",16); + &pmuludq($acc0,$mul0); # ap[j]*bp[0] + &pmuludq($acc1,$mul1); # np[j]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &pand ($acc0,$mask); + &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1] + &paddq ($car1,$acc0); # +=ap[j]*bp[0]; + &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1] + &psrlq ($car0,32); + &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[j-1]= + &psrlq ($car1,32); + + &lea ($j,&DWP(1,$j)); + &cmp ($j,$num); + &jl (&label("1st")); + + &pmuludq($acc0,$mul0); # ap[num-1]*bp[0] + &pmuludq($acc1,$mul1); # np[num-1]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &pand ($acc0,$mask); + &paddq ($car1,$acc0); # +=ap[num-1]*bp[0]; + &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]= + + &psrlq ($car0,32); + &psrlq ($car1,32); + + &paddq ($car1,$car0); + &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1] + + &inc ($i); # i++ +&set_label("outer"); + &xor ($j,$j); # j=0 + + &movd ($mul0,&DWP(0,$bp,$i,4)); # bp[i] + &movd ($mul1,&DWP(0,$ap)); # ap[0] + &movd ($temp,&DWP($frame,"esp")); # tp[0] + &movd ($car1,&DWP(0,$np)); # np[0] + &pmuludq($mul1,$mul0); # ap[0]*bp[i] + + &paddq ($mul1,$temp); # +=tp[0] + &movq ($acc0,$mul1); + &movq ($car0,$mul1); + &pand ($acc0,$mask); + + &pmuludq($mul1,$_n0q); # *=n0 + + &pmuludq($car1,$mul1); + &paddq ($car1,$acc0); + + &movd ($temp,&DWP($frame+4,"esp")); # tp[1] + &movd ($acc1,&DWP(4,$np)); # np[1] + &movd ($acc0,&DWP(4,$ap)); # ap[1] + + &psrlq ($car0,32); + &psrlq ($car1,32); + &paddq ($car0,$temp); # +=tp[1] + + &inc ($j); # j++ + &dec ($num); +&set_label("inner"); + &pmuludq($acc0,$mul0); # ap[j]*bp[i] + &pmuludq($acc1,$mul1); # np[j]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &movd ($temp,&DWP($frame+4,"esp",$j,4));# tp[j+1] + &pand ($acc0,$mask); + &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1] + &paddq ($car1,$acc0); # +=ap[j]*bp[i]+tp[j] + &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1] + &psrlq ($car0,32); + &movd (&DWP($frame-4,"esp",$j,4),$car1);# tp[j-1]= + &psrlq ($car1,32); + &paddq ($car0,$temp); # +=tp[j+1] + + &dec ($num); + &lea ($j,&DWP(1,$j)); # j++ + &jnz (&label("inner")); + + &mov ($num,$j); + &pmuludq($acc0,$mul0); # ap[num-1]*bp[i] + &pmuludq($acc1,$mul1); # np[num-1]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &pand ($acc0,$mask); + &paddq ($car1,$acc0); # +=ap[num-1]*bp[i]+tp[num-1] + &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]= + &psrlq ($car0,32); + &psrlq ($car1,32); + + &movd ($temp,&DWP($frame+4,"esp",$num,4)); # += tp[num] + &paddq ($car1,$car0); + &paddq ($car1,$temp); + &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1] + + &lea ($i,&DWP(1,$i)); # i++ + &cmp ($i,$num); + &jle (&label("outer")); + + &emms (); # done with mmx bank + &jmp (&label("common_tail")); + +&set_label("non_sse2",16); +} + +if (0) { + &mov ("esp",$_sp); + &xor ("eax","eax"); # signal "not fast enough [yet]" + &jmp (&label("just_leave")); + # While the below code provides competitive performance for + # all key lengthes on modern Intel cores, it's still more + # than 10% slower for 4096-bit key elsewhere:-( "Competitive" + # means compared to the original integer-only assembler. + # 512-bit RSA sign is better by ~40%, but that's about all + # one can say about all CPUs... +} else { +$inp="esi"; # integer path uses these registers differently +$word="edi"; +$carry="ebp"; + + &mov ($inp,$_ap); + &lea ($carry,&DWP(1,$num)); + &mov ($word,$_bp); + &xor ($j,$j); # j=0 + &mov ("edx",$inp); + &and ($carry,1); # see if num is even + &sub ("edx",$word); # see if ap==bp + &lea ("eax",&DWP(4,$word,$num,4)); # &bp[num] + &or ($carry,"edx"); + &mov ($word,&DWP(0,$word)); # bp[0] + &jz (&label("bn_sqr_mont")); + &mov ($_bpend,"eax"); + &mov ("eax",&DWP(0,$inp)); + &xor ("edx","edx"); + +&set_label("mull",16); + &mov ($carry,"edx"); + &mul ($word); # ap[j]*bp[0] + &add ($carry,"eax"); + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1] + &cmp ($j,$num); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &jl (&label("mull")); + + &mov ($carry,"edx"); + &mul ($word); # ap[num-1]*bp[0] + &mov ($word,$_n0); + &add ("eax",$carry); + &mov ($inp,$_np); + &adc ("edx",0); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + + &mov (&DWP($frame,"esp",$num,4),"eax"); # tp[num-1]= + &xor ($j,$j); + &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]= + &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]= + + &mov ("eax",&DWP(0,$inp)); # np[0] + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &mov ("eax",&DWP(4,$inp)); # np[1] + &adc ("edx",0); + &inc ($j); + + &jmp (&label("2ndmadd")); + +&set_label("1stmadd",16); + &mov ($carry,"edx"); + &mul ($word); # ap[j]*bp[i] + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1] + &adc ("edx",0); + &cmp ($j,$num); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &jl (&label("1stmadd")); + + &mov ($carry,"edx"); + &mul ($word); # ap[num-1]*bp[i] + &add ("eax",&DWP($frame,"esp",$num,4)); # +=tp[num-1] + &mov ($word,$_n0); + &adc ("edx",0); + &mov ($inp,$_np); + &add ($carry,"eax"); + &adc ("edx",0); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + + &xor ($j,$j); + &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num] + &mov (&DWP($frame,"esp",$num,4),$carry); # tp[num-1]= + &adc ($j,0); + &mov ("eax",&DWP(0,$inp)); # np[0] + &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]= + &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]= + + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &mov ("eax",&DWP(4,$inp)); # np[1] + &adc ("edx",0); + &mov ($j,1); + +&set_label("2ndmadd",16); + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+1] + &adc ("edx",0); + &cmp ($j,$num); + &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j-1]= + &jl (&label("2ndmadd")); + + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1] + &adc ("edx",0); + &add ($carry,"eax"); + &adc ("edx",0); + &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]= + + &xor ("eax","eax"); + &mov ($j,$_bp); # &bp[i] + &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num] + &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1] + &lea ($j,&DWP(4,$j)); + &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]= + &cmp ($j,$_bpend); + &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]= + &je (&label("common_tail")); + + &mov ($word,&DWP(0,$j)); # bp[i+1] + &mov ($inp,$_ap); + &mov ($_bp,$j); # &bp[++i] + &xor ($j,$j); + &xor ("edx","edx"); + &mov ("eax",&DWP(0,$inp)); + &jmp (&label("1stmadd")); + +&set_label("bn_sqr_mont",16); +$sbit=$num; + &mov ($_num,$num); + &mov ($_bp,$j); # i=0 + + &mov ("eax",$word); # ap[0] + &mul ($word); # ap[0]*ap[0] + &mov (&DWP($frame,"esp"),"eax"); # tp[0]= + &mov ($sbit,"edx"); + &shr ("edx",1); + &and ($sbit,1); + &inc ($j); +&set_label("sqr",16); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j] + &mov ($carry,"edx"); + &mul ($word); # ap[j]*ap[0] + &add ("eax",$carry); + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &lea ($carry,&DWP(0,$sbit,"eax",2)); + &shr ("eax",31); + &cmp ($j,$_num); + &mov ($sbit,"eax"); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &jl (&label("sqr")); + + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[num-1] + &mov ($carry,"edx"); + &mul ($word); # ap[num-1]*ap[0] + &add ("eax",$carry); + &mov ($word,$_n0); + &adc ("edx",0); + &mov ($inp,$_np); + &lea ($carry,&DWP(0,$sbit,"eax",2)); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + &shr ("eax",31); + &mov (&DWP($frame,"esp",$j,4),$carry); # tp[num-1]= + + &lea ($carry,&DWP(0,"eax","edx",2)); + &mov ("eax",&DWP(0,$inp)); # np[0] + &shr ("edx",31); + &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num]= + &mov (&DWP($frame+8,"esp",$j,4),"edx"); # tp[num+1]= + + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &mov ($num,$j); + &adc ("edx",0); + &mov ("eax",&DWP(4,$inp)); # np[1] + &mov ($j,1); + +&set_label("3rdmadd",16); + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(4,$inp,$j,4)); # np[j+1] + &adc ("edx",0); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j-1]= + + &mov ($carry,"edx"); + &mul ($word); # np[j+1]*m + &add ($carry,&DWP($frame+4,"esp",$j,4)); # +=tp[j+1] + &lea ($j,&DWP(2,$j)); + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+2] + &adc ("edx",0); + &cmp ($j,$num); + &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j]= + &jl (&label("3rdmadd")); + + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1] + &adc ("edx",0); + &add ($carry,"eax"); + &adc ("edx",0); + &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]= + + &mov ($j,$_bp); # i + &xor ("eax","eax"); + &mov ($inp,$_ap); + &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num] + &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1] + &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]= + &cmp ($j,$num); + &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]= + &je (&label("common_tail")); + + &mov ($word,&DWP(4,$inp,$j,4)); # ap[i] + &lea ($j,&DWP(1,$j)); + &mov ("eax",$word); + &mov ($_bp,$j); # ++i + &mul ($word); # ap[i]*ap[i] + &add ("eax",&DWP($frame,"esp",$j,4)); # +=tp[i] + &adc ("edx",0); + &mov (&DWP($frame,"esp",$j,4),"eax"); # tp[i]= + &xor ($carry,$carry); + &cmp ($j,$num); + &lea ($j,&DWP(1,$j)); + &je (&label("sqrlast")); + + &mov ($sbit,"edx"); # zaps $num + &shr ("edx",1); + &and ($sbit,1); +&set_label("sqradd",16); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j] + &mov ($carry,"edx"); + &mul ($word); # ap[j]*ap[i] + &add ("eax",$carry); + &lea ($carry,&DWP(0,"eax","eax")); + &adc ("edx",0); + &shr ("eax",31); + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &lea ($j,&DWP(1,$j)); + &adc ("eax",0); + &add ($carry,$sbit); + &adc ("eax",0); + &cmp ($j,$_num); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &mov ($sbit,"eax"); + &jle (&label("sqradd")); + + &mov ($carry,"edx"); + &lea ("edx",&DWP(0,$sbit,"edx",2)); + &shr ($carry,31); +&set_label("sqrlast"); + &mov ($word,$_n0); + &mov ($inp,$_np); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + + &add ("edx",&DWP($frame,"esp",$j,4)); # +=tp[num] + &mov ("eax",&DWP(0,$inp)); # np[0] + &adc ($carry,0); + &mov (&DWP($frame,"esp",$j,4),"edx"); # tp[num]= + &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num+1]= + + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &lea ($num,&DWP(-1,$j)); + &adc ("edx",0); + &mov ($j,1); + &mov ("eax",&DWP(4,$inp)); # np[1] + + &jmp (&label("3rdmadd")); +} + +&set_label("common_tail",16); + &mov ($np,$_np); # load modulus pointer + &mov ($rp,$_rp); # load result pointer + &lea ($tp,&DWP($frame,"esp")); # [$ap and $bp are zapped] + + &mov ("eax",&DWP(0,$tp)); # tp[0] + &mov ($j,$num); # j=num-1 + &xor ($i,$i); # i=0 and clear CF! + +&set_label("sub",16); + &sbb ("eax",&DWP(0,$np,$i,4)); + &mov (&DWP(0,$rp,$i,4),"eax"); # rp[i]=tp[i]-np[i] + &dec ($j); # doesn't affect CF! + &mov ("eax",&DWP(4,$tp,$i,4)); # tp[i+1] + &lea ($i,&DWP(1,$i)); # i++ + &jge (&label("sub")); + + &sbb ("eax",0); # handle upmost overflow bit + &and ($tp,"eax"); + ¬ ("eax"); + &mov ($np,$rp); + &and ($np,"eax"); + &or ($tp,$np); # tp=carry?tp:rp + +&set_label("copy",16); # copy or in-place refresh + &mov ("eax",&DWP(0,$tp,$num,4)); + &mov (&DWP(0,$rp,$num,4),"eax"); # rp[i]=tp[i] + &mov (&DWP($frame,"esp",$num,4),$j); # zap temporary vector + &dec ($num); + &jge (&label("copy")); + + &mov ("esp",$_sp); # pull saved stack pointer + &mov ("eax",1); +&set_label("just_leave"); +&function_end("bn_mul_mont"); + +&asciz("Montgomery Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>"); + +&asm_finish(); diff --git a/openssl/crypto/bn/asm/x86.pl b/openssl/crypto/bn/asm/x86.pl new file mode 100644 index 000000000..1bc4f1bb2 --- /dev/null +++ b/openssl/crypto/bn/asm/x86.pl @@ -0,0 +1,28 @@ +#!/usr/local/bin/perl + +push(@INC,"perlasm","../../perlasm"); +require "x86asm.pl"; + +require("x86/mul_add.pl"); +require("x86/mul.pl"); +require("x86/sqr.pl"); +require("x86/div.pl"); +require("x86/add.pl"); +require("x86/sub.pl"); +require("x86/comba.pl"); + +&asm_init($ARGV[0],$0); + +&bn_mul_add_words("bn_mul_add_words"); +&bn_mul_words("bn_mul_words"); +&bn_sqr_words("bn_sqr_words"); +&bn_div_words("bn_div_words"); +&bn_add_words("bn_add_words"); +&bn_sub_words("bn_sub_words"); +&bn_mul_comba("bn_mul_comba8",8); +&bn_mul_comba("bn_mul_comba4",4); +&bn_sqr_comba("bn_sqr_comba8",8); +&bn_sqr_comba("bn_sqr_comba4",4); + +&asm_finish(); + diff --git a/openssl/crypto/bn/asm/x86/add.pl b/openssl/crypto/bn/asm/x86/add.pl new file mode 100644 index 000000000..0b5cf583e --- /dev/null +++ b/openssl/crypto/bn/asm/x86/add.pl @@ -0,0 +1,76 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_add_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &add($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &add($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &add($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &add($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *a + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/comba.pl b/openssl/crypto/bn/asm/x86/comba.pl new file mode 100644 index 000000000..229125362 --- /dev/null +++ b/openssl/crypto/bn/asm/x86/comba.pl @@ -0,0 +1,277 @@ +#!/usr/local/bin/perl +# x86 assember + +sub mul_add_c + { + local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("mul a[$ai]*b[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$b,"",0)); + + &mul("edx"); + &add($c0,"eax"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a + &mov("eax",&wparam(0)) if $pos > 0; # load r[] + ### + &adc($c1,"edx"); + &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b + &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b + ### + &adc($c2,0); + # is pos > 1, it means it is the last loop + &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[]; + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a + } + +sub sqr_add_c + { + local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("sqr a[$ai]*a[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$b,"",0)); + + if ($ai == $bi) + { &mul("eax");} + else + { &mul("edx");} + &add($c0,"eax"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a + ### + &adc($c1,"edx"); + &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb); + ### + &adc($c2,0); + # is pos > 1, it means it is the last loop + &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[]; + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b + } + +sub sqr_add_c2 + { + local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("sqr a[$ai]*a[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$a,"",0)); + + if ($ai == $bi) + { &mul("eax");} + else + { &mul("edx");} + &add("eax","eax"); + ### + &adc("edx","edx"); + ### + &adc($c2,0); + &add($c0,"eax"); + &adc($c1,"edx"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b + &adc($c2,0); + &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[]; + &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb); + ### + } + +sub bn_mul_comba + { + local($name,$num)=@_; + local($a,$b,$c0,$c1,$c2); + local($i,$as,$ae,$bs,$be,$ai,$bi); + local($tot,$end); + + &function_begin_B($name,""); + + $c0="ebx"; + $c1="ecx"; + $c2="ebp"; + $a="esi"; + $b="edi"; + + $as=0; + $ae=0; + $bs=0; + $be=0; + $tot=$num+$num-1; + + &push("esi"); + &mov($a,&wparam(1)); + &push("edi"); + &mov($b,&wparam(2)); + &push("ebp"); + &push("ebx"); + + &xor($c0,$c0); + &mov("eax",&DWP(0,$a,"",0)); # load the first word + &xor($c1,$c1); + &mov("edx",&DWP(0,$b,"",0)); # load the first second + + for ($i=0; $i<$tot; $i++) + { + $ai=$as; + $bi=$bs; + $end=$be+1; + + &comment("################## Calculate word $i"); + + for ($j=$bs; $j<$end; $j++) + { + &xor($c2,$c2) if ($j == $bs); + if (($j+1) == $end) + { + $v=1; + $v=2 if (($i+1) == $tot); + } + else + { $v=0; } + if (($j+1) != $end) + { + $na=($ai-1); + $nb=($bi+1); + } + else + { + $na=$as+($i < ($num-1)); + $nb=$bs+($i >= ($num-1)); + } +#printf STDERR "[$ai,$bi] -> [$na,$nb]\n"; + &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb); + if ($v) + { + &comment("saved r[$i]"); + # &mov("eax",&wparam(0)); + # &mov(&DWP($i*4,"eax","",0),$c0); + ($c0,$c1,$c2)=($c1,$c2,$c0); + } + $ai--; + $bi++; + } + $as++ if ($i < ($num-1)); + $ae++ if ($i >= ($num-1)); + + $bs++ if ($i >= ($num-1)); + $be++ if ($i < ($num-1)); + } + &comment("save r[$i]"); + # &mov("eax",&wparam(0)); + &mov(&DWP($i*4,"eax","",0),$c0); + + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } + +sub bn_sqr_comba + { + local($name,$num)=@_; + local($r,$a,$c0,$c1,$c2)=@_; + local($i,$as,$ae,$bs,$be,$ai,$bi); + local($b,$tot,$end,$half); + + &function_begin_B($name,""); + + $c0="ebx"; + $c1="ecx"; + $c2="ebp"; + $a="esi"; + $r="edi"; + + &push("esi"); + &push("edi"); + &push("ebp"); + &push("ebx"); + &mov($r,&wparam(0)); + &mov($a,&wparam(1)); + &xor($c0,$c0); + &xor($c1,$c1); + &mov("eax",&DWP(0,$a,"",0)); # load the first word + + $as=0; + $ae=0; + $bs=0; + $be=0; + $tot=$num+$num-1; + + for ($i=0; $i<$tot; $i++) + { + $ai=$as; + $bi=$bs; + $end=$be+1; + + &comment("############### Calculate word $i"); + for ($j=$bs; $j<$end; $j++) + { + &xor($c2,$c2) if ($j == $bs); + if (($ai-1) < ($bi+1)) + { + $v=1; + $v=2 if ($i+1) == $tot; + } + else + { $v=0; } + if (!$v) + { + $na=$ai-1; + $nb=$bi+1; + } + else + { + $na=$as+($i < ($num-1)); + $nb=$bs+($i >= ($num-1)); + } + if ($ai == $bi) + { + &sqr_add_c($r,$a,$ai,$bi, + $c0,$c1,$c2,$v,$i,$na,$nb); + } + else + { + &sqr_add_c2($r,$a,$ai,$bi, + $c0,$c1,$c2,$v,$i,$na,$nb); + } + if ($v) + { + &comment("saved r[$i]"); + #&mov(&DWP($i*4,$r,"",0),$c0); + ($c0,$c1,$c2)=($c1,$c2,$c0); + last; + } + $ai--; + $bi++; + } + $as++ if ($i < ($num-1)); + $ae++ if ($i >= ($num-1)); + + $bs++ if ($i >= ($num-1)); + $be++ if ($i < ($num-1)); + } + &mov(&DWP($i*4,$r,"",0),$c0); + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/div.pl b/openssl/crypto/bn/asm/x86/div.pl new file mode 100644 index 000000000..0e90152ca --- /dev/null +++ b/openssl/crypto/bn/asm/x86/div.pl @@ -0,0 +1,15 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_div_words + { + local($name)=@_; + + &function_begin($name,""); + &mov("edx",&wparam(0)); # + &mov("eax",&wparam(1)); # + &mov("ebx",&wparam(2)); # + &div("ebx"); + &function_end($name); + } +1; diff --git a/openssl/crypto/bn/asm/x86/f b/openssl/crypto/bn/asm/x86/f new file mode 100644 index 000000000..22e411222 --- /dev/null +++ b/openssl/crypto/bn/asm/x86/f @@ -0,0 +1,3 @@ +#!/usr/local/bin/perl +# x86 assember + diff --git a/openssl/crypto/bn/asm/x86/mul.pl b/openssl/crypto/bn/asm/x86/mul.pl new file mode 100644 index 000000000..674cb9b05 --- /dev/null +++ b/openssl/crypto/bn/asm/x86/mul.pl @@ -0,0 +1,77 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_mul_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $Low="eax"; + $High="edx"; + $a="ebx"; + $w="ecx"; + $r="edi"; + $c="esi"; + $num="ebp"; + + &xor($c,$c); # clear carry + &mov($r,&wparam(0)); # + &mov($a,&wparam(1)); # + &mov($num,&wparam(2)); # + &mov($w,&wparam(3)); # + + &and($num,0xfffffff8); # num / 8 + &jz(&label("mw_finish")); + + &set_label("mw_loop",0); + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + + &mov("eax",&DWP($i,$a,"",0)); # *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + # XXX + + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t); + + &mov($c,"edx"); # c= H(t); + } + + &comment(""); + &add($a,32); + &add($r,32); + &sub($num,8); + &jz(&label("mw_finish")); + &jmp(&label("mw_loop")); + + &set_label("mw_finish",0); + &mov($num,&wparam(2)); # get num + &and($num,7); + &jnz(&label("mw_finish2")); + &jmp(&label("mw_end")); + + &set_label("mw_finish2",1); + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0));# *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + # XXX + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t); + &mov($c,"edx"); # c= H(t); + &dec($num) if ($i != 7-1); + &jz(&label("mw_end")) if ($i != 7-1); + } + &set_label("mw_end",0); + &mov("eax",$c); + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/mul_add.pl b/openssl/crypto/bn/asm/x86/mul_add.pl new file mode 100644 index 000000000..61830d3a9 --- /dev/null +++ b/openssl/crypto/bn/asm/x86/mul_add.pl @@ -0,0 +1,87 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_mul_add_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $Low="eax"; + $High="edx"; + $a="ebx"; + $w="ebp"; + $r="edi"; + $c="esi"; + + &xor($c,$c); # clear carry + &mov($r,&wparam(0)); # + + &mov("ecx",&wparam(2)); # + &mov($a,&wparam(1)); # + + &and("ecx",0xfffffff8); # num / 8 + &mov($w,&wparam(3)); # + + &push("ecx"); # Up the stack for a tmp variable + + &jz(&label("maw_finish")); + + &set_label("maw_loop",0); + + &mov(&swtmp(0),"ecx"); # + + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + + &mov("eax",&DWP($i,$a,"",0)); # *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+= *r + &mov($c,&DWP($i,$r,"",0)); # L(t)+= *r + &adc("edx",0); # H(t)+=carry + &add("eax",$c); # L(t)+=c + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t); + &mov($c,"edx"); # c= H(t); + } + + &comment(""); + &mov("ecx",&swtmp(0)); # + &add($a,32); + &add($r,32); + &sub("ecx",8); + &jnz(&label("maw_loop")); + + &set_label("maw_finish",0); + &mov("ecx",&wparam(2)); # get num + &and("ecx",7); + &jnz(&label("maw_finish2")); # helps branch prediction + &jmp(&label("maw_end")); + + &set_label("maw_finish2",1); + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0));# *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + &mov($c,&DWP($i*4,$r,"",0)); # L(t)+= *r + &adc("edx",0); # H(t)+=carry + &add("eax",$c); + &adc("edx",0); # H(t)+=carry + &dec("ecx") if ($i != 7-1); + &mov(&DWP($i*4,$r,"",0),"eax"); # *r= L(t); + &mov($c,"edx"); # c= H(t); + &jz(&label("maw_end")) if ($i != 7-1); + } + &set_label("maw_end",0); + &mov("eax",$c); + + &pop("ecx"); # clear variable from + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/sqr.pl b/openssl/crypto/bn/asm/x86/sqr.pl new file mode 100644 index 000000000..1f90993cf --- /dev/null +++ b/openssl/crypto/bn/asm/x86/sqr.pl @@ -0,0 +1,60 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_sqr_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $r="esi"; + $a="edi"; + $num="ebx"; + + &mov($r,&wparam(0)); # + &mov($a,&wparam(1)); # + &mov($num,&wparam(2)); # + + &and($num,0xfffffff8); # num / 8 + &jz(&label("sw_finish")); + + &set_label("sw_loop",0); + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + &mov("eax",&DWP($i,$a,"",0)); # *a + # XXX + &mul("eax"); # *a * *a + &mov(&DWP($i*2,$r,"",0),"eax"); # + &mov(&DWP($i*2+4,$r,"",0),"edx");# + } + + &comment(""); + &add($a,32); + &add($r,64); + &sub($num,8); + &jnz(&label("sw_loop")); + + &set_label("sw_finish",0); + &mov($num,&wparam(2)); # get num + &and($num,7); + &jz(&label("sw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0)); # *a + # XXX + &mul("eax"); # *a * *a + &mov(&DWP($i*8,$r,"",0),"eax"); # + &dec($num) if ($i != 7-1); + &mov(&DWP($i*8+4,$r,"",0),"edx"); + &jz(&label("sw_end")) if ($i != 7-1); + } + &set_label("sw_end",0); + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/sub.pl b/openssl/crypto/bn/asm/x86/sub.pl new file mode 100644 index 000000000..837b0e1b0 --- /dev/null +++ b/openssl/crypto/bn/asm/x86/sub.pl @@ -0,0 +1,76 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_sub_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *a + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86_64-gcc.c b/openssl/crypto/bn/asm/x86_64-gcc.c new file mode 100644 index 000000000..f13f52dd8 --- /dev/null +++ b/openssl/crypto/bn/asm/x86_64-gcc.c @@ -0,0 +1,597 @@ +#ifdef __SUNPRO_C +# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */ +#else +/* + * x86_64 BIGNUM accelerator version 0.1, December 2002. + * + * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL + * project. + * + * Rights for redistribution and usage in source and binary forms are + * granted according to the OpenSSL license. Warranty of any kind is + * disclaimed. + * + * Q. Version 0.1? It doesn't sound like Andy, he used to assign real + * versions, like 1.0... + * A. Well, that's because this code is basically a quick-n-dirty + * proof-of-concept hack. As you can see it's implemented with + * inline assembler, which means that you're bound to GCC and that + * there might be enough room for further improvement. + * + * Q. Why inline assembler? + * A. x86_64 features own ABI which I'm not familiar with. This is + * why I decided to let the compiler take care of subroutine + * prologue/epilogue as well as register allocation. For reference. + * Win64 implements different ABI for AMD64, different from Linux. + * + * Q. How much faster does it get? + * A. 'apps/openssl speed rsa dsa' output with no-asm: + * + * sign verify sign/s verify/s + * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2 + * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0 + * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8 + * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6 + * sign verify sign/s verify/s + * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3 + * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2 + * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0 + * + * 'apps/openssl speed rsa dsa' output with this module: + * + * sign verify sign/s verify/s + * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9 + * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7 + * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0 + * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8 + * sign verify sign/s verify/s + * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3 + * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4 + * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6 + * + * For the reference. IA-32 assembler implementation performs + * very much like 64-bit code compiled with no-asm on the same + * machine. + */ + +#define BN_ULONG unsigned long + +/* + * "m"(a), "+m"(r) is the way to favor DirectPath µ-code; + * "g"(0) let the compiler to decide where does it + * want to keep the value of zero; + */ +#define mul_add(r,a,word,carry) do { \ + register BN_ULONG high,low; \ + asm ("mulq %3" \ + : "=a"(low),"=d"(high) \ + : "a"(word),"m"(a) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(carry),"+d"(high)\ + : "a"(low),"g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+m"(r),"+d"(high) \ + : "r"(carry),"g"(0) \ + : "cc"); \ + carry=high; \ + } while (0) + +#define mul(r,a,word,carry) do { \ + register BN_ULONG high,low; \ + asm ("mulq %3" \ + : "=a"(low),"=d"(high) \ + : "a"(word),"g"(a) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(carry),"+d"(high)\ + : "a"(low),"g"(0) \ + : "cc"); \ + (r)=carry, carry=high; \ + } while (0) + +#define sqr(r0,r1,a) \ + asm ("mulq %2" \ + : "=a"(r0),"=d"(r1) \ + : "a"(a) \ + : "cc"); + +BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) + { + BN_ULONG c1=0; + + if (num <= 0) return(c1); + + while (num&~3) + { + mul_add(rp[0],ap[0],w,c1); + mul_add(rp[1],ap[1],w,c1); + mul_add(rp[2],ap[2],w,c1); + mul_add(rp[3],ap[3],w,c1); + ap+=4; rp+=4; num-=4; + } + if (num) + { + mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1; + mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1; + mul_add(rp[2],ap[2],w,c1); return c1; + } + + return(c1); + } + +BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) + { + BN_ULONG c1=0; + + if (num <= 0) return(c1); + + while (num&~3) + { + mul(rp[0],ap[0],w,c1); + mul(rp[1],ap[1],w,c1); + mul(rp[2],ap[2],w,c1); + mul(rp[3],ap[3],w,c1); + ap+=4; rp+=4; num-=4; + } + if (num) + { + mul(rp[0],ap[0],w,c1); if (--num == 0) return c1; + mul(rp[1],ap[1],w,c1); if (--num == 0) return c1; + mul(rp[2],ap[2],w,c1); + } + return(c1); + } + +void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n) + { + if (n <= 0) return; + + while (n&~3) + { + sqr(r[0],r[1],a[0]); + sqr(r[2],r[3],a[1]); + sqr(r[4],r[5],a[2]); + sqr(r[6],r[7],a[3]); + a+=4; r+=8; n-=4; + } + if (n) + { + sqr(r[0],r[1],a[0]); if (--n == 0) return; + sqr(r[2],r[3],a[1]); if (--n == 0) return; + sqr(r[4],r[5],a[2]); + } + } + +BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) +{ BN_ULONG ret,waste; + + asm ("divq %4" + : "=a"(ret),"=d"(waste) + : "a"(l),"d"(h),"g"(d) + : "cc"); + + return ret; +} + +BN_ULONG bn_add_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n) +{ BN_ULONG ret=0,i=0; + + if (n <= 0) return 0; + + asm ( + " subq %2,%2 \n" + ".align 16 \n" + "1: movq (%4,%2,8),%0 \n" + " adcq (%5,%2,8),%0 \n" + " movq %0,(%3,%2,8) \n" + " leaq 1(%2),%2 \n" + " loop 1b \n" + " sbbq %0,%0 \n" + : "=&a"(ret),"+c"(n),"=&r"(i) + : "r"(rp),"r"(ap),"r"(bp) + : "cc" + ); + + return ret&1; +} + +#ifndef SIMICS +BN_ULONG bn_sub_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n) +{ BN_ULONG ret=0,i=0; + + if (n <= 0) return 0; + + asm ( + " subq %2,%2 \n" + ".align 16 \n" + "1: movq (%4,%2,8),%0 \n" + " sbbq (%5,%2,8),%0 \n" + " movq %0,(%3,%2,8) \n" + " leaq 1(%2),%2 \n" + " loop 1b \n" + " sbbq %0,%0 \n" + : "=&a"(ret),"+c"(n),"=&r"(i) + : "r"(rp),"r"(ap),"r"(bp) + : "cc" + ); + + return ret&1; +} +#else +/* Simics 1.4<7 has buggy sbbq:-( */ +#define BN_MASK2 0xffffffffffffffffL +BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) + { + BN_ULONG t1,t2; + int c=0; + + if (n <= 0) return((BN_ULONG)0); + + for (;;) + { + t1=a[0]; t2=b[0]; + r[0]=(t1-t2-c)&BN_MASK2; + if (t1 != t2) c=(t1 < t2); + if (--n <= 0) break; + + t1=a[1]; t2=b[1]; + r[1]=(t1-t2-c)&BN_MASK2; + if (t1 != t2) c=(t1 < t2); + if (--n <= 0) break; + + t1=a[2]; t2=b[2]; + r[2]=(t1-t2-c)&BN_MASK2; + if (t1 != t2) c=(t1 < t2); + if (--n <= 0) break; + + t1=a[3]; t2=b[3]; + r[3]=(t1-t2-c)&BN_MASK2; + if (t1 != t2) c=(t1 < t2); + if (--n <= 0) break; + + a+=4; + b+=4; + r+=4; + } + return(c); + } +#endif + +/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */ +/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */ +/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */ +/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */ + +#if 0 +/* original macros are kept for reference purposes */ +#define mul_add_c(a,b,c0,c1,c2) { \ + BN_ULONG ta=(a),tb=(b); \ + t1 = ta * tb; \ + t2 = BN_UMULT_HIGH(ta,tb); \ + c0 += t1; t2 += (c0<t1)?1:0; \ + c1 += t2; c2 += (c1<t2)?1:0; \ + } + +#define mul_add_c2(a,b,c0,c1,c2) { \ + BN_ULONG ta=(a),tb=(b),t0; \ + t1 = BN_UMULT_HIGH(ta,tb); \ + t0 = ta * tb; \ + t2 = t1+t1; c2 += (t2<t1)?1:0; \ + t1 = t0+t0; t2 += (t1<t0)?1:0; \ + c0 += t1; t2 += (c0<t1)?1:0; \ + c1 += t2; c2 += (c1<t2)?1:0; \ + } +#else +#define mul_add_c(a,b,c0,c1,c2) do { \ + asm ("mulq %3" \ + : "=a"(t1),"=d"(t2) \ + : "a"(a),"m"(b) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c0),"+d"(t2) \ + : "a"(t1),"g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c1),"+r"(c2) \ + : "d"(t2),"g"(0) \ + : "cc"); \ + } while (0) + +#define sqr_add_c(a,i,c0,c1,c2) do { \ + asm ("mulq %2" \ + : "=a"(t1),"=d"(t2) \ + : "a"(a[i]) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c0),"+d"(t2) \ + : "a"(t1),"g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c1),"+r"(c2) \ + : "d"(t2),"g"(0) \ + : "cc"); \ + } while (0) + +#define mul_add_c2(a,b,c0,c1,c2) do { \ + asm ("mulq %3" \ + : "=a"(t1),"=d"(t2) \ + : "a"(a),"m"(b) \ + : "cc"); \ + asm ("addq %0,%0; adcq %2,%1" \ + : "+d"(t2),"+r"(c2) \ + : "g"(0) \ + : "cc"); \ + asm ("addq %0,%0; adcq %2,%1" \ + : "+a"(t1),"+d"(t2) \ + : "g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c0),"+d"(t2) \ + : "a"(t1),"g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c1),"+r"(c2) \ + : "d"(t2),"g"(0) \ + : "cc"); \ + } while (0) +#endif + +#define sqr_add_c2(a,i,j,c0,c1,c2) \ + mul_add_c2((a)[i],(a)[j],c0,c1,c2) + +void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) + { + BN_ULONG t1,t2; + BN_ULONG c1,c2,c3; + + c1=0; + c2=0; + c3=0; + mul_add_c(a[0],b[0],c1,c2,c3); + r[0]=c1; + c1=0; + mul_add_c(a[0],b[1],c2,c3,c1); + mul_add_c(a[1],b[0],c2,c3,c1); + r[1]=c2; + c2=0; + mul_add_c(a[2],b[0],c3,c1,c2); + mul_add_c(a[1],b[1],c3,c1,c2); + mul_add_c(a[0],b[2],c3,c1,c2); + r[2]=c3; + c3=0; + mul_add_c(a[0],b[3],c1,c2,c3); + mul_add_c(a[1],b[2],c1,c2,c3); + mul_add_c(a[2],b[1],c1,c2,c3); + mul_add_c(a[3],b[0],c1,c2,c3); + r[3]=c1; + c1=0; + mul_add_c(a[4],b[0],c2,c3,c1); + mul_add_c(a[3],b[1],c2,c3,c1); + mul_add_c(a[2],b[2],c2,c3,c1); + mul_add_c(a[1],b[3],c2,c3,c1); + mul_add_c(a[0],b[4],c2,c3,c1); + r[4]=c2; + c2=0; + mul_add_c(a[0],b[5],c3,c1,c2); + mul_add_c(a[1],b[4],c3,c1,c2); + mul_add_c(a[2],b[3],c3,c1,c2); + mul_add_c(a[3],b[2],c3,c1,c2); + mul_add_c(a[4],b[1],c3,c1,c2); + mul_add_c(a[5],b[0],c3,c1,c2); + r[5]=c3; + c3=0; + mul_add_c(a[6],b[0],c1,c2,c3); + mul_add_c(a[5],b[1],c1,c2,c3); + mul_add_c(a[4],b[2],c1,c2,c3); + mul_add_c(a[3],b[3],c1,c2,c3); + mul_add_c(a[2],b[4],c1,c2,c3); + mul_add_c(a[1],b[5],c1,c2,c3); + mul_add_c(a[0],b[6],c1,c2,c3); + r[6]=c1; + c1=0; + mul_add_c(a[0],b[7],c2,c3,c1); + mul_add_c(a[1],b[6],c2,c3,c1); + mul_add_c(a[2],b[5],c2,c3,c1); + mul_add_c(a[3],b[4],c2,c3,c1); + mul_add_c(a[4],b[3],c2,c3,c1); + mul_add_c(a[5],b[2],c2,c3,c1); + mul_add_c(a[6],b[1],c2,c3,c1); + mul_add_c(a[7],b[0],c2,c3,c1); + r[7]=c2; + c2=0; + mul_add_c(a[7],b[1],c3,c1,c2); + mul_add_c(a[6],b[2],c3,c1,c2); + mul_add_c(a[5],b[3],c3,c1,c2); + mul_add_c(a[4],b[4],c3,c1,c2); + mul_add_c(a[3],b[5],c3,c1,c2); + mul_add_c(a[2],b[6],c3,c1,c2); + mul_add_c(a[1],b[7],c3,c1,c2); + r[8]=c3; + c3=0; + mul_add_c(a[2],b[7],c1,c2,c3); + mul_add_c(a[3],b[6],c1,c2,c3); + mul_add_c(a[4],b[5],c1,c2,c3); + mul_add_c(a[5],b[4],c1,c2,c3); + mul_add_c(a[6],b[3],c1,c2,c3); + mul_add_c(a[7],b[2],c1,c2,c3); + r[9]=c1; + c1=0; + mul_add_c(a[7],b[3],c2,c3,c1); + mul_add_c(a[6],b[4],c2,c3,c1); + mul_add_c(a[5],b[5],c2,c3,c1); + mul_add_c(a[4],b[6],c2,c3,c1); + mul_add_c(a[3],b[7],c2,c3,c1); + r[10]=c2; + c2=0; + mul_add_c(a[4],b[7],c3,c1,c2); + mul_add_c(a[5],b[6],c3,c1,c2); + mul_add_c(a[6],b[5],c3,c1,c2); + mul_add_c(a[7],b[4],c3,c1,c2); + r[11]=c3; + c3=0; + mul_add_c(a[7],b[5],c1,c2,c3); + mul_add_c(a[6],b[6],c1,c2,c3); + mul_add_c(a[5],b[7],c1,c2,c3); + r[12]=c1; + c1=0; + mul_add_c(a[6],b[7],c2,c3,c1); + mul_add_c(a[7],b[6],c2,c3,c1); + r[13]=c2; + c2=0; + mul_add_c(a[7],b[7],c3,c1,c2); + r[14]=c3; + r[15]=c1; + } + +void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) + { + BN_ULONG t1,t2; + BN_ULONG c1,c2,c3; + + c1=0; + c2=0; + c3=0; + mul_add_c(a[0],b[0],c1,c2,c3); + r[0]=c1; + c1=0; + mul_add_c(a[0],b[1],c2,c3,c1); + mul_add_c(a[1],b[0],c2,c3,c1); + r[1]=c2; + c2=0; + mul_add_c(a[2],b[0],c3,c1,c2); + mul_add_c(a[1],b[1],c3,c1,c2); + mul_add_c(a[0],b[2],c3,c1,c2); + r[2]=c3; + c3=0; + mul_add_c(a[0],b[3],c1,c2,c3); + mul_add_c(a[1],b[2],c1,c2,c3); + mul_add_c(a[2],b[1],c1,c2,c3); + mul_add_c(a[3],b[0],c1,c2,c3); + r[3]=c1; + c1=0; + mul_add_c(a[3],b[1],c2,c3,c1); + mul_add_c(a[2],b[2],c2,c3,c1); + mul_add_c(a[1],b[3],c2,c3,c1); + r[4]=c2; + c2=0; + mul_add_c(a[2],b[3],c3,c1,c2); + mul_add_c(a[3],b[2],c3,c1,c2); + r[5]=c3; + c3=0; + mul_add_c(a[3],b[3],c1,c2,c3); + r[6]=c1; + r[7]=c2; + } + +void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a) + { + BN_ULONG t1,t2; + BN_ULONG c1,c2,c3; + + c1=0; + c2=0; + c3=0; + sqr_add_c(a,0,c1,c2,c3); + r[0]=c1; + c1=0; + sqr_add_c2(a,1,0,c2,c3,c1); + r[1]=c2; + c2=0; + sqr_add_c(a,1,c3,c1,c2); + sqr_add_c2(a,2,0,c3,c1,c2); + r[2]=c3; + c3=0; + sqr_add_c2(a,3,0,c1,c2,c3); + sqr_add_c2(a,2,1,c1,c2,c3); + r[3]=c1; + c1=0; + sqr_add_c(a,2,c2,c3,c1); + sqr_add_c2(a,3,1,c2,c3,c1); + sqr_add_c2(a,4,0,c2,c3,c1); + r[4]=c2; + c2=0; + sqr_add_c2(a,5,0,c3,c1,c2); + sqr_add_c2(a,4,1,c3,c1,c2); + sqr_add_c2(a,3,2,c3,c1,c2); + r[5]=c3; + c3=0; + sqr_add_c(a,3,c1,c2,c3); + sqr_add_c2(a,4,2,c1,c2,c3); + sqr_add_c2(a,5,1,c1,c2,c3); + sqr_add_c2(a,6,0,c1,c2,c3); + r[6]=c1; + c1=0; + sqr_add_c2(a,7,0,c2,c3,c1); + sqr_add_c2(a,6,1,c2,c3,c1); + sqr_add_c2(a,5,2,c2,c3,c1); + sqr_add_c2(a,4,3,c2,c3,c1); + r[7]=c2; + c2=0; + sqr_add_c(a,4,c3,c1,c2); + sqr_add_c2(a,5,3,c3,c1,c2); + sqr_add_c2(a,6,2,c3,c1,c2); + sqr_add_c2(a,7,1,c3,c1,c2); + r[8]=c3; + c3=0; + sqr_add_c2(a,7,2,c1,c2,c3); + sqr_add_c2(a,6,3,c1,c2,c3); + sqr_add_c2(a,5,4,c1,c2,c3); + r[9]=c1; + c1=0; + sqr_add_c(a,5,c2,c3,c1); + sqr_add_c2(a,6,4,c2,c3,c1); + sqr_add_c2(a,7,3,c2,c3,c1); + r[10]=c2; + c2=0; + sqr_add_c2(a,7,4,c3,c1,c2); + sqr_add_c2(a,6,5,c3,c1,c2); + r[11]=c3; + c3=0; + sqr_add_c(a,6,c1,c2,c3); + sqr_add_c2(a,7,5,c1,c2,c3); + r[12]=c1; + c1=0; + sqr_add_c2(a,7,6,c2,c3,c1); + r[13]=c2; + c2=0; + sqr_add_c(a,7,c3,c1,c2); + r[14]=c3; + r[15]=c1; + } + +void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a) + { + BN_ULONG t1,t2; + BN_ULONG c1,c2,c3; + + c1=0; + c2=0; + c3=0; + sqr_add_c(a,0,c1,c2,c3); + r[0]=c1; + c1=0; + sqr_add_c2(a,1,0,c2,c3,c1); + r[1]=c2; + c2=0; + sqr_add_c(a,1,c3,c1,c2); + sqr_add_c2(a,2,0,c3,c1,c2); + r[2]=c3; + c3=0; + sqr_add_c2(a,3,0,c1,c2,c3); + sqr_add_c2(a,2,1,c1,c2,c3); + r[3]=c1; + c1=0; + sqr_add_c(a,2,c2,c3,c1); + sqr_add_c2(a,3,1,c2,c3,c1); + r[4]=c2; + c2=0; + sqr_add_c2(a,3,2,c3,c1,c2); + r[5]=c3; + c3=0; + sqr_add_c(a,3,c1,c2,c3); + r[6]=c1; + r[7]=c2; + } +#endif diff --git a/openssl/crypto/bn/asm/x86_64-mont.pl b/openssl/crypto/bn/asm/x86_64-mont.pl new file mode 100644 index 000000000..c43b69592 --- /dev/null +++ b/openssl/crypto/bn/asm/x86_64-mont.pl @@ -0,0 +1,214 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# October 2005. +# +# Montgomery multiplication routine for x86_64. While it gives modest +# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more +# than twice, >2x, as fast. Most common rsa1024 sign is improved by +# respectful 50%. It remains to be seen if loop unrolling and +# dedicated squaring routine can provide further improvement... + +$output=shift; + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or +die "can't locate x86_64-xlate.pl"; + +open STDOUT,"| $^X $xlate $output"; + +# int bn_mul_mont( +$rp="%rdi"; # BN_ULONG *rp, +$ap="%rsi"; # const BN_ULONG *ap, +$bp="%rdx"; # const BN_ULONG *bp, +$np="%rcx"; # const BN_ULONG *np, +$n0="%r8"; # const BN_ULONG *n0, +$num="%r9"; # int num); +$lo0="%r10"; +$hi0="%r11"; +$bp="%r12"; # reassign $bp +$hi1="%r13"; +$i="%r14"; +$j="%r15"; +$m0="%rbx"; +$m1="%rbp"; + +$code=<<___; +.text + +.globl bn_mul_mont +.type bn_mul_mont,\@function,6 +.align 16 +bn_mul_mont: + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + + mov ${num}d,${num}d + lea 2($num),%rax + mov %rsp,%rbp + neg %rax + lea (%rsp,%rax,8),%rsp # tp=alloca(8*(num+2)) + and \$-1024,%rsp # minimize TLB usage + + mov %rbp,8(%rsp,$num,8) # tp[num+1]=%rsp + mov %rdx,$bp # $bp reassigned, remember? + + mov ($n0),$n0 # pull n0[0] value + + xor $i,$i # i=0 + xor $j,$j # j=0 + + mov ($bp),$m0 # m0=bp[0] + mov ($ap),%rax + mulq $m0 # ap[0]*bp[0] + mov %rax,$lo0 + mov %rdx,$hi0 + + imulq $n0,%rax # "tp[0]"*n0 + mov %rax,$m1 + + mulq ($np) # np[0]*m1 + add $lo0,%rax # discarded + adc \$0,%rdx + mov %rdx,$hi1 + + lea 1($j),$j # j++ +.L1st: + mov ($ap,$j,8),%rax + mulq $m0 # ap[j]*bp[0] + add $hi0,%rax + adc \$0,%rdx + mov %rax,$lo0 + mov ($np,$j,8),%rax + mov %rdx,$hi0 + + mulq $m1 # np[j]*m1 + add $hi1,%rax + lea 1($j),$j # j++ + adc \$0,%rdx + add $lo0,%rax # np[j]*m1+ap[j]*bp[0] + adc \$0,%rdx + mov %rax,-16(%rsp,$j,8) # tp[j-1] + cmp $num,$j + mov %rdx,$hi1 + jl .L1st + + xor %rdx,%rdx + add $hi0,$hi1 + adc \$0,%rdx + mov $hi1,-8(%rsp,$num,8) + mov %rdx,(%rsp,$num,8) # store upmost overflow bit + + lea 1($i),$i # i++ +.align 4 +.Louter: + xor $j,$j # j=0 + + mov ($bp,$i,8),$m0 # m0=bp[i] + mov ($ap),%rax # ap[0] + mulq $m0 # ap[0]*bp[i] + add (%rsp),%rax # ap[0]*bp[i]+tp[0] + adc \$0,%rdx + mov %rax,$lo0 + mov %rdx,$hi0 + + imulq $n0,%rax # tp[0]*n0 + mov %rax,$m1 + + mulq ($np,$j,8) # np[0]*m1 + add $lo0,%rax # discarded + mov 8(%rsp),$lo0 # tp[1] + adc \$0,%rdx + mov %rdx,$hi1 + + lea 1($j),$j # j++ +.align 4 +.Linner: + mov ($ap,$j,8),%rax + mulq $m0 # ap[j]*bp[i] + add $hi0,%rax + adc \$0,%rdx + add %rax,$lo0 # ap[j]*bp[i]+tp[j] + mov ($np,$j,8),%rax + adc \$0,%rdx + mov %rdx,$hi0 + + mulq $m1 # np[j]*m1 + add $hi1,%rax + lea 1($j),$j # j++ + adc \$0,%rdx + add $lo0,%rax # np[j]*m1+ap[j]*bp[i]+tp[j] + adc \$0,%rdx + mov (%rsp,$j,8),$lo0 + cmp $num,$j + mov %rax,-16(%rsp,$j,8) # tp[j-1] + mov %rdx,$hi1 + jl .Linner + + xor %rdx,%rdx + add $hi0,$hi1 + adc \$0,%rdx + add $lo0,$hi1 # pull upmost overflow bit + adc \$0,%rdx + mov $hi1,-8(%rsp,$num,8) + mov %rdx,(%rsp,$num,8) # store upmost overflow bit + + lea 1($i),$i # i++ + cmp $num,$i + jl .Louter + + lea (%rsp),$ap # borrow ap for tp + lea -1($num),$j # j=num-1 + + mov ($ap),%rax # tp[0] + xor $i,$i # i=0 and clear CF! + jmp .Lsub +.align 16 +.Lsub: sbb ($np,$i,8),%rax + mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i] + dec $j # doesn't affect CF! + mov 8($ap,$i,8),%rax # tp[i+1] + lea 1($i),$i # i++ + jge .Lsub + + sbb \$0,%rax # handle upmost overflow bit + and %rax,$ap + not %rax + mov $rp,$np + and %rax,$np + lea -1($num),$j + or $np,$ap # ap=borrow?tp:rp +.align 16 +.Lcopy: # copy or in-place refresh + mov ($ap,$j,8),%rax + mov %rax,($rp,$j,8) # rp[i]=tp[i] + mov $i,(%rsp,$j,8) # zap temporary vector + dec $j + jge .Lcopy + + mov 8(%rsp,$num,8),%rsp # restore %rsp + mov \$1,%rax + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + ret +.size bn_mul_mont,.-bn_mul_mont +.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>" +___ + +print $code; +close STDOUT; |