aboutsummaryrefslogtreecommitdiff
path: root/openssl/crypto/bn
diff options
context:
space:
mode:
Diffstat (limited to 'openssl/crypto/bn')
-rw-r--r--openssl/crypto/bn/Makefile34
-rw-r--r--openssl/crypto/bn/asm/armv4-gf2m.pl278
-rw-r--r--openssl/crypto/bn/asm/armv4-mont.pl23
-rw-r--r--openssl/crypto/bn/asm/ia64-mont.pl851
-rw-r--r--openssl/crypto/bn/asm/mips-mont.pl426
-rw-r--r--openssl/crypto/bn/asm/mips.pl2585
-rw-r--r--openssl/crypto/bn/asm/modexp512-x86_64.pl1496
-rw-r--r--openssl/crypto/bn/asm/parisc-mont.pl993
-rw-r--r--openssl/crypto/bn/asm/ppc-mont.pl107
-rw-r--r--openssl/crypto/bn/asm/ppc.pl43
-rw-r--r--openssl/crypto/bn/asm/ppc64-mont.pl338
-rw-r--r--openssl/crypto/bn/asm/s390x-gf2m.pl221
-rw-r--r--openssl/crypto/bn/asm/s390x-mont.pl102
-rw-r--r--openssl/crypto/bn/asm/x86-gf2m.pl313
-rw-r--r--openssl/crypto/bn/asm/x86_64-gf2m.pl389
-rw-r--r--openssl/crypto/bn/asm/x86_64-mont.pl1486
-rw-r--r--openssl/crypto/bn/asm/x86_64-mont5.pl1070
-rw-r--r--openssl/crypto/bn/bn.h15
-rw-r--r--openssl/crypto/bn/bn_div.c272
-rw-r--r--openssl/crypto/bn/bn_exp.c240
-rw-r--r--openssl/crypto/bn/bn_gf2m.c111
-rw-r--r--openssl/crypto/bn/bn_lcl.h23
-rw-r--r--openssl/crypto/bn/bn_lib.c19
-rw-r--r--openssl/crypto/bn/bn_mont.c116
-rw-r--r--openssl/crypto/bn/bn_nist.c338
-rw-r--r--openssl/crypto/bn/bn_print.c19
-rw-r--r--openssl/crypto/bn/bn_shift.c27
-rw-r--r--openssl/crypto/bn/bn_x931p.c272
-rw-r--r--openssl/crypto/bn/bntest.c8
29 files changed, 11476 insertions, 739 deletions
diff --git a/openssl/crypto/bn/Makefile b/openssl/crypto/bn/Makefile
index aabc4f56b..672773454 100644
--- a/openssl/crypto/bn/Makefile
+++ b/openssl/crypto/bn/Makefile
@@ -26,13 +26,13 @@ LIBSRC= bn_add.c bn_div.c bn_exp.c bn_lib.c bn_ctx.c bn_mul.c bn_mod.c \
bn_print.c bn_rand.c bn_shift.c bn_word.c bn_blind.c \
bn_kron.c bn_sqrt.c bn_gcd.c bn_prime.c bn_err.c bn_sqr.c bn_asm.c \
bn_recp.c bn_mont.c bn_mpi.c bn_exp2.c bn_gf2m.c bn_nist.c \
- bn_depr.c bn_const.c
+ bn_depr.c bn_const.c bn_x931p.c
LIBOBJ= bn_add.o bn_div.o bn_exp.o bn_lib.o bn_ctx.o bn_mul.o bn_mod.o \
bn_print.o bn_rand.o bn_shift.o bn_word.o bn_blind.o \
bn_kron.o bn_sqrt.o bn_gcd.o bn_prime.o bn_err.o bn_sqr.o $(BN_ASM) \
bn_recp.o bn_mont.o bn_mpi.o bn_exp2.o bn_gf2m.o bn_nist.o \
- bn_depr.o bn_const.o
+ bn_depr.o bn_const.o bn_x931p.o
SRC= $(LIBSRC)
@@ -66,6 +66,8 @@ co-586.s: asm/co-586.pl ../perlasm/x86asm.pl
$(PERL) asm/co-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
x86-mont.s: asm/x86-mont.pl ../perlasm/x86asm.pl
$(PERL) asm/x86-mont.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
+x86-gf2m.s: asm/x86-gf2m.pl ../perlasm/x86asm.pl
+ $(PERL) asm/x86-gf2m.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
sparcv8.o: asm/sparcv8.S
$(CC) $(CFLAGS) -c asm/sparcv8.S
@@ -82,16 +84,31 @@ bn-mips3.o: asm/mips3.s
as -$$ABI -O -o $@ asm/mips3.s; \
else $(CC) -c $(CFLAGS) -o $@ asm/mips3.s; fi
+bn-mips.s: asm/mips.pl
+ $(PERL) asm/mips.pl $(PERLASM_SCHEME) $@
+mips-mont.s: asm/mips-mont.pl
+ $(PERL) asm/mips-mont.pl $(PERLASM_SCHEME) $@
+
bn-s390x.o: asm/s390x.S
$(CC) $(CFLAGS) -c -o $@ asm/s390x.S
+s390x-gf2m.s: asm/s390x-gf2m.pl
+ $(PERL) asm/s390x-gf2m.pl $(PERLASM_SCHEME) $@
x86_64-gcc.o: asm/x86_64-gcc.c
$(CC) $(CFLAGS) -c -o $@ asm/x86_64-gcc.c
x86_64-mont.s: asm/x86_64-mont.pl
$(PERL) asm/x86_64-mont.pl $(PERLASM_SCHEME) > $@
+x86_64-mont5.s: asm/x86_64-mont5.pl
+ $(PERL) asm/x86_64-mont5.pl $(PERLASM_SCHEME) > $@
+x86_64-gf2m.s: asm/x86_64-gf2m.pl
+ $(PERL) asm/x86_64-gf2m.pl $(PERLASM_SCHEME) > $@
+modexp512-x86_64.s: asm/modexp512-x86_64.pl
+ $(PERL) asm/modexp512-x86_64.pl $(PERLASM_SCHEME) > $@
bn-ia64.s: asm/ia64.S
$(CC) $(CFLAGS) -E asm/ia64.S > $@
+ia64-mont.s: asm/ia64-mont.pl
+ $(PERL) asm/ia64-mont.pl $@ $(CFLAGS)
# GNU assembler fails to compile PA-RISC2 modules, insist on calling
# vendor assembler...
@@ -99,16 +116,22 @@ pa-risc2W.o: asm/pa-risc2W.s
/usr/ccs/bin/as -o pa-risc2W.o asm/pa-risc2W.s
pa-risc2.o: asm/pa-risc2.s
/usr/ccs/bin/as -o pa-risc2.o asm/pa-risc2.s
+parisc-mont.s: asm/parisc-mont.pl
+ $(PERL) asm/parisc-mont.pl $(PERLASM_SCHEME) $@
# ppc - AIX, Linux, MacOS X...
bn-ppc.s: asm/ppc.pl; $(PERL) asm/ppc.pl $(PERLASM_SCHEME) $@
ppc-mont.s: asm/ppc-mont.pl;$(PERL) asm/ppc-mont.pl $(PERLASM_SCHEME) $@
+ppc64-mont.s: asm/ppc64-mont.pl;$(PERL) asm/ppc64-mont.pl $(PERLASM_SCHEME) $@
alpha-mont.s: asm/alpha-mont.pl
$(PERL) $< | $(CC) -E - | tee $@ > /dev/null
# GNU make "catch all"
-%-mont.s: asm/%-mont.pl; $(PERL) $< $(CFLAGS) > $@
+%-mont.s: asm/%-mont.pl; $(PERL) $< $(PERLASM_SCHEME) $@
+%-gf2m.S: asm/%-gf2m.pl; $(PERL) $< $(PERLASM_SCHEME) $@
+
+armv4-gf2m.o: armv4-gf2m.S
files:
$(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO
@@ -345,3 +368,8 @@ bn_word.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h
bn_word.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
bn_word.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
bn_word.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_word.c
+bn_x931p.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h
+bn_x931p.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h
+bn_x931p.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
+bn_x931p.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
+bn_x931p.o: ../../include/openssl/symhacks.h bn_x931p.c
diff --git a/openssl/crypto/bn/asm/armv4-gf2m.pl b/openssl/crypto/bn/asm/armv4-gf2m.pl
new file mode 100644
index 000000000..c52e0b75b
--- /dev/null
+++ b/openssl/crypto/bn/asm/armv4-gf2m.pl
@@ -0,0 +1,278 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication
+# used in bn_gf2m.c. It's kind of low-hanging mechanical port from
+# C for the time being... Except that it has two code paths: pure
+# integer code suitable for any ARMv4 and later CPU and NEON code
+# suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
+# in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
+# faster than compiler-generated code. For ECDH and ECDSA verify (but
+# not for ECDSA sign) it means 25%-45% improvement depending on key
+# length, more for longer keys. Even though NEON 1x1 multiplication
+# runs in even less cycles, ~30, improvement is measurable only on
+# longer keys. One has to optimize code elsewhere to get NEON glow...
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+sub Q() { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; }
+
+$code=<<___;
+#include "arm_arch.h"
+
+.text
+.code 32
+
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.type mul_1x1_neon,%function
+.align 5
+mul_1x1_neon:
+ vshl.u64 `&Dlo("q1")`,d16,#8 @ q1-q3 are slided $a
+ vmull.p8 `&Q("d0")`,d16,d17 @ a·bb
+ vshl.u64 `&Dlo("q2")`,d16,#16
+ vmull.p8 q1,`&Dlo("q1")`,d17 @ a<<8·bb
+ vshl.u64 `&Dlo("q3")`,d16,#24
+ vmull.p8 q2,`&Dlo("q2")`,d17 @ a<<16·bb
+ vshr.u64 `&Dlo("q1")`,#8
+ vmull.p8 q3,`&Dlo("q3")`,d17 @ a<<24·bb
+ vshl.u64 `&Dhi("q1")`,#24
+ veor d0,`&Dlo("q1")`
+ vshr.u64 `&Dlo("q2")`,#16
+ veor d0,`&Dhi("q1")`
+ vshl.u64 `&Dhi("q2")`,#16
+ veor d0,`&Dlo("q2")`
+ vshr.u64 `&Dlo("q3")`,#24
+ veor d0,`&Dhi("q2")`
+ vshl.u64 `&Dhi("q3")`,#8
+ veor d0,`&Dlo("q3")`
+ veor d0,`&Dhi("q3")`
+ bx lr
+.size mul_1x1_neon,.-mul_1x1_neon
+#endif
+___
+################
+# private interface to mul_1x1_ialu
+#
+$a="r1";
+$b="r0";
+
+($a0,$a1,$a2,$a12,$a4,$a14)=
+($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
+
+$mask="r12";
+
+$code.=<<___;
+.type mul_1x1_ialu,%function
+.align 5
+mul_1x1_ialu:
+ mov $a0,#0
+ bic $a1,$a,#3<<30 @ a1=a&0x3fffffff
+ str $a0,[sp,#0] @ tab[0]=0
+ add $a2,$a1,$a1 @ a2=a1<<1
+ str $a1,[sp,#4] @ tab[1]=a1
+ eor $a12,$a1,$a2 @ a1^a2
+ str $a2,[sp,#8] @ tab[2]=a2
+ mov $a4,$a1,lsl#2 @ a4=a1<<2
+ str $a12,[sp,#12] @ tab[3]=a1^a2
+ eor $a14,$a1,$a4 @ a1^a4
+ str $a4,[sp,#16] @ tab[4]=a4
+ eor $a0,$a2,$a4 @ a2^a4
+ str $a14,[sp,#20] @ tab[5]=a1^a4
+ eor $a12,$a12,$a4 @ a1^a2^a4
+ str $a0,[sp,#24] @ tab[6]=a2^a4
+ and $i0,$mask,$b,lsl#2
+ str $a12,[sp,#28] @ tab[7]=a1^a2^a4
+
+ and $i1,$mask,$b,lsr#1
+ ldr $lo,[sp,$i0] @ tab[b & 0x7]
+ and $i0,$mask,$b,lsr#4
+ ldr $t1,[sp,$i1] @ tab[b >> 3 & 0x7]
+ and $i1,$mask,$b,lsr#7
+ ldr $t0,[sp,$i0] @ tab[b >> 6 & 0x7]
+ eor $lo,$lo,$t1,lsl#3 @ stall
+ mov $hi,$t1,lsr#29
+ ldr $t1,[sp,$i1] @ tab[b >> 9 & 0x7]
+
+ and $i0,$mask,$b,lsr#10
+ eor $lo,$lo,$t0,lsl#6
+ eor $hi,$hi,$t0,lsr#26
+ ldr $t0,[sp,$i0] @ tab[b >> 12 & 0x7]
+
+ and $i1,$mask,$b,lsr#13
+ eor $lo,$lo,$t1,lsl#9
+ eor $hi,$hi,$t1,lsr#23
+ ldr $t1,[sp,$i1] @ tab[b >> 15 & 0x7]
+
+ and $i0,$mask,$b,lsr#16
+ eor $lo,$lo,$t0,lsl#12
+ eor $hi,$hi,$t0,lsr#20
+ ldr $t0,[sp,$i0] @ tab[b >> 18 & 0x7]
+
+ and $i1,$mask,$b,lsr#19
+ eor $lo,$lo,$t1,lsl#15
+ eor $hi,$hi,$t1,lsr#17
+ ldr $t1,[sp,$i1] @ tab[b >> 21 & 0x7]
+
+ and $i0,$mask,$b,lsr#22
+ eor $lo,$lo,$t0,lsl#18
+ eor $hi,$hi,$t0,lsr#14
+ ldr $t0,[sp,$i0] @ tab[b >> 24 & 0x7]
+
+ and $i1,$mask,$b,lsr#25
+ eor $lo,$lo,$t1,lsl#21
+ eor $hi,$hi,$t1,lsr#11
+ ldr $t1,[sp,$i1] @ tab[b >> 27 & 0x7]
+
+ tst $a,#1<<30
+ and $i0,$mask,$b,lsr#28
+ eor $lo,$lo,$t0,lsl#24
+ eor $hi,$hi,$t0,lsr#8
+ ldr $t0,[sp,$i0] @ tab[b >> 30 ]
+
+ eorne $lo,$lo,$b,lsl#30
+ eorne $hi,$hi,$b,lsr#2
+ tst $a,#1<<31
+ eor $lo,$lo,$t1,lsl#27
+ eor $hi,$hi,$t1,lsr#5
+ eorne $lo,$lo,$b,lsl#31
+ eorne $hi,$hi,$b,lsr#1
+ eor $lo,$lo,$t0,lsl#30
+ eor $hi,$hi,$t0,lsr#2
+
+ mov pc,lr
+.size mul_1x1_ialu,.-mul_1x1_ialu
+___
+################
+# void bn_GF2m_mul_2x2(BN_ULONG *r,
+# BN_ULONG a1,BN_ULONG a0,
+# BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
+
+($A1,$B1,$A0,$B0,$A1B1,$A0B0)=map("d$_",(18..23));
+
+$code.=<<___;
+.global bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,%function
+.align 5
+bn_GF2m_mul_2x2:
+#if __ARM_ARCH__>=7
+ ldr r12,.LOPENSSL_armcap
+.Lpic: ldr r12,[pc,r12]
+ tst r12,#1
+ beq .Lialu
+
+ veor $A1,$A1
+ vmov.32 $B1,r3,r3 @ two copies of b1
+ vmov.32 ${A1}[0],r1 @ a1
+
+ veor $A0,$A0
+ vld1.32 ${B0}[],[sp,:32] @ two copies of b0
+ vmov.32 ${A0}[0],r2 @ a0
+ mov r12,lr
+
+ vmov d16,$A1
+ vmov d17,$B1
+ bl mul_1x1_neon @ a1·b1
+ vmov $A1B1,d0
+
+ vmov d16,$A0
+ vmov d17,$B0
+ bl mul_1x1_neon @ a0·b0
+ vmov $A0B0,d0
+
+ veor d16,$A0,$A1
+ veor d17,$B0,$B1
+ veor $A0,$A0B0,$A1B1
+ bl mul_1x1_neon @ (a0+a1)·(b0+b1)
+
+ veor d0,$A0 @ (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ vshl.u64 d1,d0,#32
+ vshr.u64 d0,d0,#32
+ veor $A0B0,d1
+ veor $A1B1,d0
+ vst1.32 {${A0B0}[0]},[r0,:32]!
+ vst1.32 {${A0B0}[1]},[r0,:32]!
+ vst1.32 {${A1B1}[0]},[r0,:32]!
+ vst1.32 {${A1B1}[1]},[r0,:32]
+ bx r12
+.align 4
+.Lialu:
+#endif
+___
+$ret="r10"; # reassigned 1st argument
+$code.=<<___;
+ stmdb sp!,{r4-r10,lr}
+ mov $ret,r0 @ reassign 1st argument
+ mov $b,r3 @ $b=b1
+ ldr r3,[sp,#32] @ load b0
+ mov $mask,#7<<2
+ sub sp,sp,#32 @ allocate tab[8]
+
+ bl mul_1x1_ialu @ a1·b1
+ str $lo,[$ret,#8]
+ str $hi,[$ret,#12]
+
+ eor $b,$b,r3 @ flip b0 and b1
+ eor $a,$a,r2 @ flip a0 and a1
+ eor r3,r3,$b
+ eor r2,r2,$a
+ eor $b,$b,r3
+ eor $a,$a,r2
+ bl mul_1x1_ialu @ a0·b0
+ str $lo,[$ret]
+ str $hi,[$ret,#4]
+
+ eor $a,$a,r2
+ eor $b,$b,r3
+ bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
+___
+@r=map("r$_",(6..9));
+$code.=<<___;
+ ldmia $ret,{@r[0]-@r[3]}
+ eor $lo,$lo,$hi
+ eor $hi,$hi,@r[1]
+ eor $lo,$lo,@r[0]
+ eor $hi,$hi,@r[2]
+ eor $lo,$lo,@r[3]
+ eor $hi,$hi,@r[3]
+ str $hi,[$ret,#8]
+ eor $lo,$lo,$hi
+ add sp,sp,#32 @ destroy tab[8]
+ str $lo,[$ret,#4]
+
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r10,pc}
+#else
+ ldmia sp!,{r4-r10,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+#if __ARM_ARCH__>=7
+.align 5
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-(.Lpic+8)
+#endif
+.asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 5
+
+.comm OPENSSL_armcap_P,4,4
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+print $code;
+close STDOUT; # enforce flush
diff --git a/openssl/crypto/bn/asm/armv4-mont.pl b/openssl/crypto/bn/asm/armv4-mont.pl
index 14e0d2d1d..f78a8b5f0 100644
--- a/openssl/crypto/bn/asm/armv4-mont.pl
+++ b/openssl/crypto/bn/asm/armv4-mont.pl
@@ -23,6 +23,9 @@
# than 1/2KB. Windows CE port would be trivial, as it's exclusively
# about decorations, ABI and instruction syntax are identical.
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
$num="r0"; # starts as num argument, but holds &tp[num-1]
$ap="r1";
$bp="r2"; $bi="r2"; $rp="r2";
@@ -89,9 +92,9 @@ bn_mul_mont:
.L1st:
ldr $aj,[$ap],#4 @ ap[j],ap++
mov $alo,$ahi
+ ldr $nj,[$np],#4 @ np[j],np++
mov $ahi,#0
umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
- ldr $nj,[$np],#4 @ np[j],np++
mov $nhi,#0
umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
adds $nlo,$nlo,$alo
@@ -101,21 +104,21 @@ bn_mul_mont:
bne .L1st
adds $nlo,$nlo,$ahi
+ ldr $tp,[$_bp] @ restore bp
mov $nhi,#0
+ ldr $n0,[$_n0] @ restore n0
adc $nhi,$nhi,#0
- ldr $tp,[$_bp] @ restore bp
str $nlo,[$num] @ tp[num-1]=
- ldr $n0,[$_n0] @ restore n0
str $nhi,[$num,#4] @ tp[num]=
.Louter:
sub $tj,$num,sp @ "original" $num-1 value
sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
- sub $np,$np,$tj @ "rewind" np to &np[1]
ldr $bi,[$tp,#4]! @ *(++bp)
+ sub $np,$np,$tj @ "rewind" np to &np[1]
ldr $aj,[$ap,#-4] @ ap[0]
- ldr $nj,[$np,#-4] @ np[0]
ldr $alo,[sp] @ tp[0]
+ ldr $nj,[$np,#-4] @ np[0]
ldr $tj,[sp,#4] @ tp[1]
mov $ahi,#0
@@ -129,13 +132,13 @@ bn_mul_mont:
.Linner:
ldr $aj,[$ap],#4 @ ap[j],ap++
adds $alo,$ahi,$tj @ +=tp[j]
+ ldr $nj,[$np],#4 @ np[j],np++
mov $ahi,#0
umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
- ldr $nj,[$np],#4 @ np[j],np++
mov $nhi,#0
umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
- ldr $tj,[$tp,#8] @ tp[j+1]
adc $ahi,$ahi,#0
+ ldr $tj,[$tp,#8] @ tp[j+1]
adds $nlo,$nlo,$alo
str $nlo,[$tp],#4 @ tp[j-1]=,tp++
adc $nlo,$nhi,#0
@@ -144,13 +147,13 @@ bn_mul_mont:
adds $nlo,$nlo,$ahi
mov $nhi,#0
+ ldr $tp,[$_bp] @ restore bp
adc $nhi,$nhi,#0
+ ldr $n0,[$_n0] @ restore n0
adds $nlo,$nlo,$tj
- adc $nhi,$nhi,#0
- ldr $tp,[$_bp] @ restore bp
ldr $tj,[$_bpend] @ restore &bp[num]
+ adc $nhi,$nhi,#0
str $nlo,[$num] @ tp[num-1]=
- ldr $n0,[$_n0] @ restore n0
str $nhi,[$num,#4] @ tp[num]=
cmp $tp,$tj
diff --git a/openssl/crypto/bn/asm/ia64-mont.pl b/openssl/crypto/bn/asm/ia64-mont.pl
new file mode 100644
index 000000000..e25865842
--- /dev/null
+++ b/openssl/crypto/bn/asm/ia64-mont.pl
@@ -0,0 +1,851 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# January 2010
+#
+# "Teaser" Montgomery multiplication module for IA-64. There are
+# several possibilities for improvement:
+#
+# - modulo-scheduling outer loop would eliminate quite a number of
+# stalls after ldf8, xma and getf.sig outside inner loop and
+# improve shorter key performance;
+# - shorter vector support [with input vectors being fetched only
+# once] should be added;
+# - 2x unroll with help of n0[1] would make the code scalable on
+# "wider" IA-64, "wider" than Itanium 2 that is, which is not of
+# acute interest, because upcoming Tukwila's individual cores are
+# reportedly based on Itanium 2 design;
+# - dedicated squaring procedure(?);
+#
+# January 2010
+#
+# Shorter vector support is implemented by zero-padding ap and np
+# vectors up to 8 elements, or 512 bits. This means that 256-bit
+# inputs will be processed only 2 times faster than 512-bit inputs,
+# not 4 [as one would expect, because algorithm complexity is n^2].
+# The reason for padding is that inputs shorter than 512 bits won't
+# be processed faster anyway, because minimal critical path of the
+# core loop happens to match 512-bit timing. Either way, it resulted
+# in >100% improvement of 512-bit RSA sign benchmark and 50% - of
+# 1024-bit one [in comparison to original version of *this* module].
+#
+# So far 'openssl speed rsa dsa' output on 900MHz Itanium 2 *with*
+# this module is:
+# sign verify sign/s verify/s
+# rsa 512 bits 0.000290s 0.000024s 3452.8 42031.4
+# rsa 1024 bits 0.000793s 0.000058s 1261.7 17172.0
+# rsa 2048 bits 0.005908s 0.000148s 169.3 6754.0
+# rsa 4096 bits 0.033456s 0.000469s 29.9 2133.6
+# dsa 512 bits 0.000253s 0.000198s 3949.9 5057.0
+# dsa 1024 bits 0.000585s 0.000607s 1708.4 1647.4
+# dsa 2048 bits 0.001453s 0.001703s 688.1 587.4
+#
+# ... and *without* (but still with ia64.S):
+#
+# rsa 512 bits 0.000670s 0.000041s 1491.8 24145.5
+# rsa 1024 bits 0.001988s 0.000080s 502.9 12499.3
+# rsa 2048 bits 0.008702s 0.000189s 114.9 5293.9
+# rsa 4096 bits 0.043860s 0.000533s 22.8 1875.9
+# dsa 512 bits 0.000441s 0.000427s 2265.3 2340.6
+# dsa 1024 bits 0.000823s 0.000867s 1215.6 1153.2
+# dsa 2048 bits 0.001894s 0.002179s 528.1 458.9
+#
+# As it can be seen, RSA sign performance improves by 130-30%,
+# hereafter less for longer keys, while verify - by 74-13%.
+# DSA performance improves by 115-30%.
+
+if ($^O eq "hpux") {
+ $ADDP="addp4";
+ for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
+} else { $ADDP="add"; }
+
+$code=<<___;
+.explicit
+.text
+
+// int bn_mul_mont (BN_ULONG *rp,const BN_ULONG *ap,
+// const BN_ULONG *bp,const BN_ULONG *np,
+// const BN_ULONG *n0p,int num);
+.align 64
+.global bn_mul_mont#
+.proc bn_mul_mont#
+bn_mul_mont:
+ .prologue
+ .body
+{ .mmi; cmp4.le p6,p7=2,r37;;
+(p6) cmp4.lt.unc p8,p9=8,r37
+ mov ret0=r0 };;
+{ .bbb;
+(p9) br.cond.dptk.many bn_mul_mont_8
+(p8) br.cond.dpnt.many bn_mul_mont_general
+(p7) br.ret.spnt.many b0 };;
+.endp bn_mul_mont#
+
+prevfs=r2; prevpr=r3; prevlc=r10; prevsp=r11;
+
+rptr=r8; aptr=r9; bptr=r14; nptr=r15;
+tptr=r16; // &tp[0]
+tp_1=r17; // &tp[-1]
+num=r18; len=r19; lc=r20;
+topbit=r21; // carry bit from tmp[num]
+
+n0=f6;
+m0=f7;
+bi=f8;
+
+.align 64
+.local bn_mul_mont_general#
+.proc bn_mul_mont_general#
+bn_mul_mont_general:
+ .prologue
+{ .mmi; .save ar.pfs,prevfs
+ alloc prevfs=ar.pfs,6,2,0,8
+ $ADDP aptr=0,in1
+ .save ar.lc,prevlc
+ mov prevlc=ar.lc }
+{ .mmi; .vframe prevsp
+ mov prevsp=sp
+ $ADDP bptr=0,in2
+ .save pr,prevpr
+ mov prevpr=pr };;
+
+ .body
+ .rotf alo[6],nlo[4],ahi[8],nhi[6]
+ .rotr a[3],n[3],t[2]
+
+{ .mmi; ldf8 bi=[bptr],8 // (*bp++)
+ ldf8 alo[4]=[aptr],16 // ap[0]
+ $ADDP r30=8,in1 };;
+{ .mmi; ldf8 alo[3]=[r30],16 // ap[1]
+ ldf8 alo[2]=[aptr],16 // ap[2]
+ $ADDP in4=0,in4 };;
+{ .mmi; ldf8 alo[1]=[r30] // ap[3]
+ ldf8 n0=[in4] // n0
+ $ADDP rptr=0,in0 }
+{ .mmi; $ADDP nptr=0,in3
+ mov r31=16
+ zxt4 num=in5 };;
+{ .mmi; ldf8 nlo[2]=[nptr],8 // np[0]
+ shladd len=num,3,r0
+ shladd r31=num,3,r31 };;
+{ .mmi; ldf8 nlo[1]=[nptr],8 // np[1]
+ add lc=-5,num
+ sub r31=sp,r31 };;
+{ .mfb; and sp=-16,r31 // alloca
+ xmpy.hu ahi[2]=alo[4],bi // ap[0]*bp[0]
+ nop.b 0 }
+{ .mfb; nop.m 0
+ xmpy.lu alo[4]=alo[4],bi
+ brp.loop.imp .L1st_ctop,.L1st_cend-16
+ };;
+{ .mfi; nop.m 0
+ xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[0]
+ add tp_1=8,sp }
+{ .mfi; nop.m 0
+ xma.lu alo[3]=alo[3],bi,ahi[2]
+ mov pr.rot=0x20001f<<16
+ // ------^----- (p40) at first (p23)
+ // ----------^^ p[16:20]=1
+ };;
+{ .mfi; nop.m 0
+ xmpy.lu m0=alo[4],n0 // (ap[0]*bp[0])*n0
+ mov ar.lc=lc }
+{ .mfi; nop.m 0
+ fcvt.fxu.s1 nhi[1]=f0
+ mov ar.ec=8 };;
+
+.align 32
+.L1st_ctop:
+.pred.rel "mutex",p40,p42
+{ .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
+ (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
+ (p40) add n[2]=n[2],a[2] } // (p23) }
+{ .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)(p16)
+ (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
+ (p42) add n[2]=n[2],a[2],1 };; // (p23)
+{ .mfi; (p21) getf.sig a[0]=alo[5]
+ (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
+ (p42) cmp.leu p41,p39=n[2],a[2] } // (p23)
+{ .mfi; (p23) st8 [tp_1]=n[2],8
+ (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
+ (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
+{ .mmb; (p21) getf.sig n[0]=nlo[3]
+ (p16) nop.m 0
+ br.ctop.sptk .L1st_ctop };;
+.L1st_cend:
+
+{ .mmi; getf.sig a[0]=ahi[6] // (p24)
+ getf.sig n[0]=nhi[4]
+ add num=-1,num };; // num--
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) add n[0]=n[0],a[0]
+(p42) add n[0]=n[0],a[0],1
+ sub aptr=aptr,len };; // rewind
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) cmp.ltu p41,p39=n[0],a[0]
+(p42) cmp.leu p41,p39=n[0],a[0]
+ sub nptr=nptr,len };;
+{ .mmi; .pred.rel "mutex",p39,p41
+(p39) add topbit=r0,r0
+(p41) add topbit=r0,r0,1
+ nop.i 0 }
+{ .mmi; st8 [tp_1]=n[0]
+ add tptr=16,sp
+ add tp_1=8,sp };;
+
+.Louter:
+{ .mmi; ldf8 bi=[bptr],8 // (*bp++)
+ ldf8 ahi[3]=[tptr] // tp[0]
+ add r30=8,aptr };;
+{ .mmi; ldf8 alo[4]=[aptr],16 // ap[0]
+ ldf8 alo[3]=[r30],16 // ap[1]
+ add r31=8,nptr };;
+{ .mfb; ldf8 alo[2]=[aptr],16 // ap[2]
+ xma.hu ahi[2]=alo[4],bi,ahi[3] // ap[0]*bp[i]+tp[0]
+ brp.loop.imp .Linner_ctop,.Linner_cend-16
+ }
+{ .mfb; ldf8 alo[1]=[r30] // ap[3]
+ xma.lu alo[4]=alo[4],bi,ahi[3]
+ clrrrb.pr };;
+{ .mfi; ldf8 nlo[2]=[nptr],16 // np[0]
+ xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[i]
+ nop.i 0 }
+{ .mfi; ldf8 nlo[1]=[r31] // np[1]
+ xma.lu alo[3]=alo[3],bi,ahi[2]
+ mov pr.rot=0x20101f<<16
+ // ------^----- (p40) at first (p23)
+ // --------^--- (p30) at first (p22)
+ // ----------^^ p[16:20]=1
+ };;
+{ .mfi; st8 [tptr]=r0 // tp[0] is already accounted
+ xmpy.lu m0=alo[4],n0 // (ap[0]*bp[i]+tp[0])*n0
+ mov ar.lc=lc }
+{ .mfi;
+ fcvt.fxu.s1 nhi[1]=f0
+ mov ar.ec=8 };;
+
+// This loop spins in 4*(n+7) ticks on Itanium 2 and should spin in
+// 7*(n+7) ticks on Itanium (the one codenamed Merced). Factor of 7
+// in latter case accounts for two-tick pipeline stall, which means
+// that its performance would be ~20% lower than optimal one. No
+// attempt was made to address this, because original Itanium is
+// hardly represented out in the wild...
+.align 32
+.Linner_ctop:
+.pred.rel "mutex",p40,p42
+.pred.rel "mutex",p30,p32
+{ .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
+ (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
+ (p40) add n[2]=n[2],a[2] } // (p23)
+{ .mfi; (p16) nop.m 0
+ (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
+ (p42) add n[2]=n[2],a[2],1 };; // (p23)
+{ .mfi; (p21) getf.sig a[0]=alo[5]
+ (p16) nop.f 0
+ (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
+{ .mfi; (p21) ld8 t[0]=[tptr],8
+ (p16) nop.f 0
+ (p42) cmp.leu p41,p39=n[2],a[2] };; // (p23)
+{ .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)
+ (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
+ (p30) add a[1]=a[1],t[1] } // (p22)
+{ .mfi; (p16) nop.m 0
+ (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
+ (p32) add a[1]=a[1],t[1],1 };; // (p22)
+{ .mmi; (p21) getf.sig n[0]=nlo[3]
+ (p16) nop.m 0
+ (p30) cmp.ltu p31,p29=a[1],t[1] } // (p22)
+{ .mmb; (p23) st8 [tp_1]=n[2],8
+ (p32) cmp.leu p31,p29=a[1],t[1] // (p22)
+ br.ctop.sptk .Linner_ctop };;
+.Linner_cend:
+
+{ .mmi; getf.sig a[0]=ahi[6] // (p24)
+ getf.sig n[0]=nhi[4]
+ nop.i 0 };;
+
+{ .mmi; .pred.rel "mutex",p31,p33
+(p31) add a[0]=a[0],topbit
+(p33) add a[0]=a[0],topbit,1
+ mov topbit=r0 };;
+{ .mfi; .pred.rel "mutex",p31,p33
+(p31) cmp.ltu p32,p30=a[0],topbit
+(p33) cmp.leu p32,p30=a[0],topbit
+ }
+{ .mfi; .pred.rel "mutex",p40,p42
+(p40) add n[0]=n[0],a[0]
+(p42) add n[0]=n[0],a[0],1
+ };;
+{ .mmi; .pred.rel "mutex",p44,p46
+(p40) cmp.ltu p41,p39=n[0],a[0]
+(p42) cmp.leu p41,p39=n[0],a[0]
+(p32) add topbit=r0,r0,1 }
+
+{ .mmi; st8 [tp_1]=n[0],8
+ cmp4.ne p6,p0=1,num
+ sub aptr=aptr,len };; // rewind
+{ .mmi; sub nptr=nptr,len
+(p41) add topbit=r0,r0,1
+ add tptr=16,sp }
+{ .mmb; add tp_1=8,sp
+ add num=-1,num // num--
+(p6) br.cond.sptk.many .Louter };;
+
+{ .mbb; add lc=4,lc
+ brp.loop.imp .Lsub_ctop,.Lsub_cend-16
+ clrrrb.pr };;
+{ .mii; nop.m 0
+ mov pr.rot=0x10001<<16
+ // ------^---- (p33) at first (p17)
+ mov ar.lc=lc }
+{ .mii; nop.m 0
+ mov ar.ec=3
+ nop.i 0 };;
+
+.Lsub_ctop:
+.pred.rel "mutex",p33,p35
+{ .mfi; (p16) ld8 t[0]=[tptr],8 // t=*(tp++)
+ (p16) nop.f 0
+ (p33) sub n[1]=t[1],n[1] } // (p17)
+{ .mfi; (p16) ld8 n[0]=[nptr],8 // n=*(np++)
+ (p16) nop.f 0
+ (p35) sub n[1]=t[1],n[1],1 };; // (p17)
+{ .mib; (p18) st8 [rptr]=n[2],8 // *(rp++)=r
+ (p33) cmp.gtu p34,p32=n[1],t[1] // (p17)
+ (p18) nop.b 0 }
+{ .mib; (p18) nop.m 0
+ (p35) cmp.geu p34,p32=n[1],t[1] // (p17)
+ br.ctop.sptk .Lsub_ctop };;
+.Lsub_cend:
+
+{ .mmb; .pred.rel "mutex",p34,p36
+(p34) sub topbit=topbit,r0 // (p19)
+(p36) sub topbit=topbit,r0,1
+ brp.loop.imp .Lcopy_ctop,.Lcopy_cend-16
+ }
+{ .mmb; sub rptr=rptr,len // rewind
+ sub tptr=tptr,len
+ clrrrb.pr };;
+{ .mmi; and aptr=tptr,topbit
+ andcm bptr=rptr,topbit
+ mov pr.rot=1<<16 };;
+{ .mii; or nptr=aptr,bptr
+ mov ar.lc=lc
+ mov ar.ec=3 };;
+
+.Lcopy_ctop:
+{ .mmb; (p16) ld8 n[0]=[nptr],8
+ (p18) st8 [tptr]=r0,8
+ (p16) nop.b 0 }
+{ .mmb; (p16) nop.m 0
+ (p18) st8 [rptr]=n[2],8
+ br.ctop.sptk .Lcopy_ctop };;
+.Lcopy_cend:
+
+{ .mmi; mov ret0=1 // signal "handled"
+ rum 1<<5 // clear um.mfh
+ mov ar.lc=prevlc }
+{ .mib; .restore sp
+ mov sp=prevsp
+ mov pr=prevpr,0x1ffff
+ br.ret.sptk.many b0 };;
+.endp bn_mul_mont_general#
+
+a1=r16; a2=r17; a3=r18; a4=r19; a5=r20; a6=r21; a7=r22; a8=r23;
+n1=r24; n2=r25; n3=r26; n4=r27; n5=r28; n6=r29; n7=r30; n8=r31;
+t0=r15;
+
+ai0=f8; ai1=f9; ai2=f10; ai3=f11; ai4=f12; ai5=f13; ai6=f14; ai7=f15;
+ni0=f16; ni1=f17; ni2=f18; ni3=f19; ni4=f20; ni5=f21; ni6=f22; ni7=f23;
+
+.align 64
+.skip 48 // aligns loop body
+.local bn_mul_mont_8#
+.proc bn_mul_mont_8#
+bn_mul_mont_8:
+ .prologue
+{ .mmi; .save ar.pfs,prevfs
+ alloc prevfs=ar.pfs,6,2,0,8
+ .vframe prevsp
+ mov prevsp=sp
+ .save ar.lc,prevlc
+ mov prevlc=ar.lc }
+{ .mmi; add r17=-6*16,sp
+ add sp=-7*16,sp
+ .save pr,prevpr
+ mov prevpr=pr };;
+
+{ .mmi; .save.gf 0,0x10
+ stf.spill [sp]=f16,-16
+ .save.gf 0,0x20
+ stf.spill [r17]=f17,32
+ add r16=-5*16,prevsp};;
+{ .mmi; .save.gf 0,0x40
+ stf.spill [r16]=f18,32
+ .save.gf 0,0x80
+ stf.spill [r17]=f19,32
+ $ADDP aptr=0,in1 };;
+{ .mmi; .save.gf 0,0x100
+ stf.spill [r16]=f20,32
+ .save.gf 0,0x200
+ stf.spill [r17]=f21,32
+ $ADDP r29=8,in1 };;
+{ .mmi; .save.gf 0,0x400
+ stf.spill [r16]=f22
+ .save.gf 0,0x800
+ stf.spill [r17]=f23
+ $ADDP rptr=0,in0 };;
+
+ .body
+ .rotf bj[8],mj[2],tf[2],alo[10],ahi[10],nlo[10],nhi[10]
+ .rotr t[8]
+
+// load input vectors padding them to 8 elements
+{ .mmi; ldf8 ai0=[aptr],16 // ap[0]
+ ldf8 ai1=[r29],16 // ap[1]
+ $ADDP bptr=0,in2 }
+{ .mmi; $ADDP r30=8,in2
+ $ADDP nptr=0,in3
+ $ADDP r31=8,in3 };;
+{ .mmi; ldf8 bj[7]=[bptr],16 // bp[0]
+ ldf8 bj[6]=[r30],16 // bp[1]
+ cmp4.le p4,p5=3,in5 }
+{ .mmi; ldf8 ni0=[nptr],16 // np[0]
+ ldf8 ni1=[r31],16 // np[1]
+ cmp4.le p6,p7=4,in5 };;
+
+{ .mfi; (p4)ldf8 ai2=[aptr],16 // ap[2]
+ (p5)fcvt.fxu ai2=f0
+ cmp4.le p8,p9=5,in5 }
+{ .mfi; (p6)ldf8 ai3=[r29],16 // ap[3]
+ (p7)fcvt.fxu ai3=f0
+ cmp4.le p10,p11=6,in5 }
+{ .mfi; (p4)ldf8 bj[5]=[bptr],16 // bp[2]
+ (p5)fcvt.fxu bj[5]=f0
+ cmp4.le p12,p13=7,in5 }
+{ .mfi; (p6)ldf8 bj[4]=[r30],16 // bp[3]
+ (p7)fcvt.fxu bj[4]=f0
+ cmp4.le p14,p15=8,in5 }
+{ .mfi; (p4)ldf8 ni2=[nptr],16 // np[2]
+ (p5)fcvt.fxu ni2=f0
+ addp4 r28=-1,in5 }
+{ .mfi; (p6)ldf8 ni3=[r31],16 // np[3]
+ (p7)fcvt.fxu ni3=f0
+ $ADDP in4=0,in4 };;
+
+{ .mfi; ldf8 n0=[in4]
+ fcvt.fxu tf[1]=f0
+ nop.i 0 }
+
+{ .mfi; (p8)ldf8 ai4=[aptr],16 // ap[4]
+ (p9)fcvt.fxu ai4=f0
+ mov t[0]=r0 }
+{ .mfi; (p10)ldf8 ai5=[r29],16 // ap[5]
+ (p11)fcvt.fxu ai5=f0
+ mov t[1]=r0 }
+{ .mfi; (p8)ldf8 bj[3]=[bptr],16 // bp[4]
+ (p9)fcvt.fxu bj[3]=f0
+ mov t[2]=r0 }
+{ .mfi; (p10)ldf8 bj[2]=[r30],16 // bp[5]
+ (p11)fcvt.fxu bj[2]=f0
+ mov t[3]=r0 }
+{ .mfi; (p8)ldf8 ni4=[nptr],16 // np[4]
+ (p9)fcvt.fxu ni4=f0
+ mov t[4]=r0 }
+{ .mfi; (p10)ldf8 ni5=[r31],16 // np[5]
+ (p11)fcvt.fxu ni5=f0
+ mov t[5]=r0 };;
+
+{ .mfi; (p12)ldf8 ai6=[aptr],16 // ap[6]
+ (p13)fcvt.fxu ai6=f0
+ mov t[6]=r0 }
+{ .mfi; (p14)ldf8 ai7=[r29],16 // ap[7]
+ (p15)fcvt.fxu ai7=f0
+ mov t[7]=r0 }
+{ .mfi; (p12)ldf8 bj[1]=[bptr],16 // bp[6]
+ (p13)fcvt.fxu bj[1]=f0
+ mov ar.lc=r28 }
+{ .mfi; (p14)ldf8 bj[0]=[r30],16 // bp[7]
+ (p15)fcvt.fxu bj[0]=f0
+ mov ar.ec=1 }
+{ .mfi; (p12)ldf8 ni6=[nptr],16 // np[6]
+ (p13)fcvt.fxu ni6=f0
+ mov pr.rot=1<<16 }
+{ .mfb; (p14)ldf8 ni7=[r31],16 // np[7]
+ (p15)fcvt.fxu ni7=f0
+ brp.loop.imp .Louter_8_ctop,.Louter_8_cend-16
+ };;
+
+// The loop is scheduled for 32*n ticks on Itanium 2. Actual attempt
+// to measure with help of Interval Time Counter indicated that the
+// factor is a tad higher: 33 or 34, if not 35. Exact measurement and
+// addressing the issue is problematic, because I don't have access
+// to platform-specific instruction-level profiler. On Itanium it
+// should run in 56*n ticks, because of higher xma latency...
+.Louter_8_ctop:
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 0:
+ (p16) xma.hu ahi[0]=ai0,bj[7],tf[1] // ap[0]*b[i]+t[0]
+ (p40) add a3=a3,n3 } // (p17) a3+=n3
+{ .mfi; (p42) add a3=a3,n3,1
+ (p16) xma.lu alo[0]=ai0,bj[7],tf[1]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig a7=alo[8] // 1:
+ (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
+ (p50) add t[6]=t[6],a3,1 };;
+{ .mfi; (p17) getf.sig a8=ahi[8] // 2:
+ (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
+ (p40) cmp.ltu p43,p41=a3,n3 }
+{ .mfi; (p42) cmp.leu p43,p41=a3,n3
+ (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n5=nlo[6] // 3:
+ (p48) cmp.ltu p51,p49=t[6],a3
+ (p50) cmp.leu p51,p49=t[6],a3 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p16) nop.m 0 // 4:
+ (p16) xma.hu ahi[1]=ai1,bj[7],ahi[0] // ap[1]*b[i]
+ (p41) add a4=a4,n4 } // (p17) a4+=n4
+{ .mfi; (p43) add a4=a4,n4,1
+ (p16) xma.lu alo[1]=ai1,bj[7],ahi[0]
+ (p16) nop.i 0 };;
+{ .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
+ (p16) xmpy.lu mj[0]=alo[0],n0 // (ap[0]*b[i]+t[0])*n0
+ (p51) add t[5]=t[5],a4,1 };;
+{ .mfi; (p16) nop.m 0 // 6:
+ (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
+ (p41) cmp.ltu p42,p40=a4,n4 }
+{ .mfi; (p43) cmp.leu p42,p40=a4,n4
+ (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n6=nlo[7] // 7:
+ (p49) cmp.ltu p50,p48=t[5],a4
+ (p51) cmp.leu p50,p48=t[5],a4 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 8:
+ (p16) xma.hu ahi[2]=ai2,bj[7],ahi[1] // ap[2]*b[i]
+ (p40) add a5=a5,n5 } // (p17) a5+=n5
+{ .mfi; (p42) add a5=a5,n5,1
+ (p16) xma.lu alo[2]=ai2,bj[7],ahi[1]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a1=alo[1] // 9:
+ (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
+ (p50) add t[4]=t[4],a5,1 };;
+{ .mfi; (p16) nop.m 0 // 10:
+ (p16) xma.hu nhi[0]=ni0,mj[0],alo[0] // np[0]*m0
+ (p40) cmp.ltu p43,p41=a5,n5 }
+{ .mfi; (p42) cmp.leu p43,p41=a5,n5
+ (p16) xma.lu nlo[0]=ni0,mj[0],alo[0]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n7=nlo[8] // 11:
+ (p48) cmp.ltu p51,p49=t[4],a5
+ (p50) cmp.leu p51,p49=t[4],a5 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p17) getf.sig n8=nhi[8] // 12:
+ (p16) xma.hu ahi[3]=ai3,bj[7],ahi[2] // ap[3]*b[i]
+ (p41) add a6=a6,n6 } // (p17) a6+=n6
+{ .mfi; (p43) add a6=a6,n6,1
+ (p16) xma.lu alo[3]=ai3,bj[7],ahi[2]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a2=alo[2] // 13:
+ (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
+ (p51) add t[3]=t[3],a6,1 };;
+{ .mfi; (p16) nop.m 0 // 14:
+ (p16) xma.hu nhi[1]=ni1,mj[0],nhi[0] // np[1]*m0
+ (p41) cmp.ltu p42,p40=a6,n6 }
+{ .mfi; (p43) cmp.leu p42,p40=a6,n6
+ (p16) xma.lu nlo[1]=ni1,mj[0],nhi[0]
+ (p16) nop.i 0 };;
+{ .mii; (p16) nop.m 0 // 15:
+ (p49) cmp.ltu p50,p48=t[3],a6
+ (p51) cmp.leu p50,p48=t[3],a6 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 16:
+ (p16) xma.hu ahi[4]=ai4,bj[7],ahi[3] // ap[4]*b[i]
+ (p40) add a7=a7,n7 } // (p17) a7+=n7
+{ .mfi; (p42) add a7=a7,n7,1
+ (p16) xma.lu alo[4]=ai4,bj[7],ahi[3]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a3=alo[3] // 17:
+ (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
+ (p50) add t[2]=t[2],a7,1 };;
+{ .mfi; (p16) nop.m 0 // 18:
+ (p16) xma.hu nhi[2]=ni2,mj[0],nhi[1] // np[2]*m0
+ (p40) cmp.ltu p43,p41=a7,n7 }
+{ .mfi; (p42) cmp.leu p43,p41=a7,n7
+ (p16) xma.lu nlo[2]=ni2,mj[0],nhi[1]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n1=nlo[1] // 19:
+ (p48) cmp.ltu p51,p49=t[2],a7
+ (p50) cmp.leu p51,p49=t[2],a7 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p16) nop.m 0 // 20:
+ (p16) xma.hu ahi[5]=ai5,bj[7],ahi[4] // ap[5]*b[i]
+ (p41) add a8=a8,n8 } // (p17) a8+=n8
+{ .mfi; (p43) add a8=a8,n8,1
+ (p16) xma.lu alo[5]=ai5,bj[7],ahi[4]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a4=alo[4] // 21:
+ (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
+ (p51) add t[1]=t[1],a8,1 };;
+{ .mfi; (p16) nop.m 0 // 22:
+ (p16) xma.hu nhi[3]=ni3,mj[0],nhi[2] // np[3]*m0
+ (p41) cmp.ltu p42,p40=a8,n8 }
+{ .mfi; (p43) cmp.leu p42,p40=a8,n8
+ (p16) xma.lu nlo[3]=ni3,mj[0],nhi[2]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n2=nlo[2] // 23:
+ (p49) cmp.ltu p50,p48=t[1],a8
+ (p51) cmp.leu p50,p48=t[1],a8 };;
+{ .mfi; (p16) nop.m 0 // 24:
+ (p16) xma.hu ahi[6]=ai6,bj[7],ahi[5] // ap[6]*b[i]
+ (p16) add a1=a1,n1 } // (p16) a1+=n1
+{ .mfi; (p16) nop.m 0
+ (p16) xma.lu alo[6]=ai6,bj[7],ahi[5]
+ (p17) mov t[0]=r0 };;
+{ .mii; (p16) getf.sig a5=alo[5] // 25:
+ (p16) add t0=t[7],a1 // (p16) t[7]+=a1
+ (p42) add t[0]=t[0],r0,1 };;
+{ .mfi; (p16) setf.sig tf[0]=t0 // 26:
+ (p16) xma.hu nhi[4]=ni4,mj[0],nhi[3] // np[4]*m0
+ (p50) add t[0]=t[0],r0,1 }
+{ .mfi; (p16) cmp.ltu.unc p42,p40=a1,n1
+ (p16) xma.lu nlo[4]=ni4,mj[0],nhi[3]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n3=nlo[3] // 27:
+ (p16) cmp.ltu.unc p50,p48=t0,a1
+ (p16) nop.i 0 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 28:
+ (p16) xma.hu ahi[7]=ai7,bj[7],ahi[6] // ap[7]*b[i]
+ (p40) add a2=a2,n2 } // (p16) a2+=n2
+{ .mfi; (p42) add a2=a2,n2,1
+ (p16) xma.lu alo[7]=ai7,bj[7],ahi[6]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a6=alo[6] // 29:
+ (p48) add t[6]=t[6],a2 // (p16) t[6]+=a2
+ (p50) add t[6]=t[6],a2,1 };;
+{ .mfi; (p16) nop.m 0 // 30:
+ (p16) xma.hu nhi[5]=ni5,mj[0],nhi[4] // np[5]*m0
+ (p40) cmp.ltu p41,p39=a2,n2 }
+{ .mfi; (p42) cmp.leu p41,p39=a2,n2
+ (p16) xma.lu nlo[5]=ni5,mj[0],nhi[4]
+ (p16) nop.i 0 };;
+{ .mfi; (p16) getf.sig n4=nlo[4] // 31:
+ (p16) nop.f 0
+ (p48) cmp.ltu p49,p47=t[6],a2 }
+{ .mfb; (p50) cmp.leu p49,p47=t[6],a2
+ (p16) nop.f 0
+ br.ctop.sptk.many .Louter_8_ctop };;
+.Louter_8_cend:
+
+// above loop has to execute one more time, without (p16), which is
+// replaced with merged move of np[8] to GPR bank
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mmi; (p0) getf.sig n1=ni0 // 0:
+ (p40) add a3=a3,n3 // (p17) a3+=n3
+ (p42) add a3=a3,n3,1 };;
+{ .mii; (p17) getf.sig a7=alo[8] // 1:
+ (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
+ (p50) add t[6]=t[6],a3,1 };;
+{ .mfi; (p17) getf.sig a8=ahi[8] // 2:
+ (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
+ (p40) cmp.ltu p43,p41=a3,n3 }
+{ .mfi; (p42) cmp.leu p43,p41=a3,n3
+ (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
+ (p0) nop.i 0 };;
+{ .mii; (p17) getf.sig n5=nlo[6] // 3:
+ (p48) cmp.ltu p51,p49=t[6],a3
+ (p50) cmp.leu p51,p49=t[6],a3 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mmi; (p0) getf.sig n2=ni1 // 4:
+ (p41) add a4=a4,n4 // (p17) a4+=n4
+ (p43) add a4=a4,n4,1 };;
+{ .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
+ (p0) nop.f 0
+ (p51) add t[5]=t[5],a4,1 };;
+{ .mfi; (p0) getf.sig n3=ni2 // 6:
+ (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
+ (p41) cmp.ltu p42,p40=a4,n4 }
+{ .mfi; (p43) cmp.leu p42,p40=a4,n4
+ (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
+ (p0) nop.i 0 };;
+{ .mii; (p17) getf.sig n6=nlo[7] // 7:
+ (p49) cmp.ltu p50,p48=t[5],a4
+ (p51) cmp.leu p50,p48=t[5],a4 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mii; (p0) getf.sig n4=ni3 // 8:
+ (p40) add a5=a5,n5 // (p17) a5+=n5
+ (p42) add a5=a5,n5,1 };;
+{ .mii; (p0) nop.m 0 // 9:
+ (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
+ (p50) add t[4]=t[4],a5,1 };;
+{ .mii; (p0) nop.m 0 // 10:
+ (p40) cmp.ltu p43,p41=a5,n5
+ (p42) cmp.leu p43,p41=a5,n5 };;
+{ .mii; (p17) getf.sig n7=nlo[8] // 11:
+ (p48) cmp.ltu p51,p49=t[4],a5
+ (p50) cmp.leu p51,p49=t[4],a5 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mii; (p17) getf.sig n8=nhi[8] // 12:
+ (p41) add a6=a6,n6 // (p17) a6+=n6
+ (p43) add a6=a6,n6,1 };;
+{ .mii; (p0) getf.sig n5=ni4 // 13:
+ (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
+ (p51) add t[3]=t[3],a6,1 };;
+{ .mii; (p0) nop.m 0 // 14:
+ (p41) cmp.ltu p42,p40=a6,n6
+ (p43) cmp.leu p42,p40=a6,n6 };;
+{ .mii; (p0) getf.sig n6=ni5 // 15:
+ (p49) cmp.ltu p50,p48=t[3],a6
+ (p51) cmp.leu p50,p48=t[3],a6 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mii; (p0) nop.m 0 // 16:
+ (p40) add a7=a7,n7 // (p17) a7+=n7
+ (p42) add a7=a7,n7,1 };;
+{ .mii; (p0) nop.m 0 // 17:
+ (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
+ (p50) add t[2]=t[2],a7,1 };;
+{ .mii; (p0) nop.m 0 // 18:
+ (p40) cmp.ltu p43,p41=a7,n7
+ (p42) cmp.leu p43,p41=a7,n7 };;
+{ .mii; (p0) getf.sig n7=ni6 // 19:
+ (p48) cmp.ltu p51,p49=t[2],a7
+ (p50) cmp.leu p51,p49=t[2],a7 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mii; (p0) nop.m 0 // 20:
+ (p41) add a8=a8,n8 // (p17) a8+=n8
+ (p43) add a8=a8,n8,1 };;
+{ .mmi; (p0) nop.m 0 // 21:
+ (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
+ (p51) add t[1]=t[1],a8,1 }
+{ .mmi; (p17) mov t[0]=r0
+ (p41) cmp.ltu p42,p40=a8,n8
+ (p43) cmp.leu p42,p40=a8,n8 };;
+{ .mmi; (p0) getf.sig n8=ni7 // 22:
+ (p49) cmp.ltu p50,p48=t[1],a8
+ (p51) cmp.leu p50,p48=t[1],a8 }
+{ .mmi; (p42) add t[0]=t[0],r0,1
+ (p0) add r16=-7*16,prevsp
+ (p0) add r17=-6*16,prevsp };;
+
+// subtract np[8] from carrybit|tmp[8]
+// carrybit|tmp[8] layout upon exit from above loop is:
+// t[0]|t[1]|t[2]|t[3]|t[4]|t[5]|t[6]|t[7]|t0 (least significant)
+{ .mmi; (p50)add t[0]=t[0],r0,1
+ add r18=-5*16,prevsp
+ sub n1=t0,n1 };;
+{ .mmi; cmp.gtu p34,p32=n1,t0;;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n2=t[7],n2
+ (p34)sub n2=t[7],n2,1 };;
+{ .mii; (p32)cmp.gtu p35,p33=n2,t[7]
+ (p34)cmp.geu p35,p33=n2,t[7];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub n3=t[6],n3 }
+{ .mmi; (p35)sub n3=t[6],n3,1;;
+ (p33)cmp.gtu p34,p32=n3,t[6]
+ (p35)cmp.geu p34,p32=n3,t[6] };;
+ .pred.rel "mutex",p32,p34
+{ .mii; (p32)sub n4=t[5],n4
+ (p34)sub n4=t[5],n4,1;;
+ (p32)cmp.gtu p35,p33=n4,t[5] }
+{ .mmi; (p34)cmp.geu p35,p33=n4,t[5];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub n5=t[4],n5
+ (p35)sub n5=t[4],n5,1 };;
+{ .mii; (p33)cmp.gtu p34,p32=n5,t[4]
+ (p35)cmp.geu p34,p32=n5,t[4];;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n6=t[3],n6 }
+{ .mmi; (p34)sub n6=t[3],n6,1;;
+ (p32)cmp.gtu p35,p33=n6,t[3]
+ (p34)cmp.geu p35,p33=n6,t[3] };;
+ .pred.rel "mutex",p33,p35
+{ .mii; (p33)sub n7=t[2],n7
+ (p35)sub n7=t[2],n7,1;;
+ (p33)cmp.gtu p34,p32=n7,t[2] }
+{ .mmi; (p35)cmp.geu p34,p32=n7,t[2];;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n8=t[1],n8
+ (p34)sub n8=t[1],n8,1 };;
+{ .mii; (p32)cmp.gtu p35,p33=n8,t[1]
+ (p34)cmp.geu p35,p33=n8,t[1];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub a8=t[0],r0 }
+{ .mmi; (p35)sub a8=t[0],r0,1;;
+ (p33)cmp.gtu p34,p32=a8,t[0]
+ (p35)cmp.geu p34,p32=a8,t[0] };;
+
+// save the result, either tmp[num] or tmp[num]-np[num]
+ .pred.rel "mutex",p32,p34
+{ .mmi; (p32)st8 [rptr]=n1,8
+ (p34)st8 [rptr]=t0,8
+ add r19=-4*16,prevsp};;
+{ .mmb; (p32)st8 [rptr]=n2,8
+ (p34)st8 [rptr]=t[7],8
+ (p5)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n3,8
+ (p34)st8 [rptr]=t[6],8
+ (p7)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n4,8
+ (p34)st8 [rptr]=t[5],8
+ (p9)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n5,8
+ (p34)st8 [rptr]=t[4],8
+ (p11)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n6,8
+ (p34)st8 [rptr]=t[3],8
+ (p13)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n7,8
+ (p34)st8 [rptr]=t[2],8
+ (p15)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n8,8
+ (p34)st8 [rptr]=t[1],8
+ nop.b 0 };;
+.Ldone: // epilogue
+{ .mmi; ldf.fill f16=[r16],64
+ ldf.fill f17=[r17],64
+ nop.i 0 }
+{ .mmi; ldf.fill f18=[r18],64
+ ldf.fill f19=[r19],64
+ mov pr=prevpr,0x1ffff };;
+{ .mmi; ldf.fill f20=[r16]
+ ldf.fill f21=[r17]
+ mov ar.lc=prevlc }
+{ .mmi; ldf.fill f22=[r18]
+ ldf.fill f23=[r19]
+ mov ret0=1 } // signal "handled"
+{ .mib; rum 1<<5
+ .restore sp
+ mov sp=prevsp
+ br.ret.sptk.many b0 };;
+.endp bn_mul_mont_8#
+
+.type copyright#,\@object
+copyright:
+stringz "Montgomery multiplication for IA-64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$output=shift and open STDOUT,">$output";
+print $code;
+close STDOUT;
diff --git a/openssl/crypto/bn/asm/mips-mont.pl b/openssl/crypto/bn/asm/mips-mont.pl
new file mode 100644
index 000000000..b944a12b8
--- /dev/null
+++ b/openssl/crypto/bn/asm/mips-mont.pl
@@ -0,0 +1,426 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# This module doesn't present direct interest for OpenSSL, because it
+# doesn't provide better performance for longer keys, at least not on
+# in-order-execution cores. While 512-bit RSA sign operations can be
+# 65% faster in 64-bit mode, 1024-bit ones are only 15% faster, and
+# 4096-bit ones are up to 15% slower. In 32-bit mode it varies from
+# 16% improvement for 512-bit RSA sign to -33% for 4096-bit RSA
+# verify:-( All comparisons are against bn_mul_mont-free assembler.
+# The module might be of interest to embedded system developers, as
+# the code is smaller than 1KB, yet offers >3x improvement on MIPS64
+# and 75-30% [less for longer keys] on MIPS32 over compiler-generated
+# code.
+
+######################################################################
+# There is a number of MIPS ABI in use, O32 and N32/64 are most
+# widely used. Then there is a new contender: NUBI. It appears that if
+# one picks the latter, it's possible to arrange code in ABI neutral
+# manner. Therefore let's stick to NUBI register layout:
+#
+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
+#
+# The return value is placed in $a0. Following coding rules facilitate
+# interoperability:
+#
+# - never ever touch $tp, "thread pointer", former $gp;
+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
+# old code];
+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
+#
+# For reference here is register layout for N32/64 MIPS ABIs:
+#
+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+#
+$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
+
+if ($flavour =~ /64|n32/i) {
+ $PTR_ADD="dadd"; # incidentally works even on n32
+ $PTR_SUB="dsub"; # incidentally works even on n32
+ $REG_S="sd";
+ $REG_L="ld";
+ $SZREG=8;
+} else {
+ $PTR_ADD="add";
+ $PTR_SUB="sub";
+ $REG_S="sw";
+ $REG_L="lw";
+ $SZREG=4;
+}
+$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0x00fff000 : 0x00ff0000;
+#
+# <appro@openssl.org>
+#
+######################################################################
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+if ($flavour =~ /64|n32/i) {
+ $LD="ld";
+ $ST="sd";
+ $MULTU="dmultu";
+ $ADDU="daddu";
+ $SUBU="dsubu";
+ $BNSZ=8;
+} else {
+ $LD="lw";
+ $ST="sw";
+ $MULTU="multu";
+ $ADDU="addu";
+ $SUBU="subu";
+ $BNSZ=4;
+}
+
+# int bn_mul_mont(
+$rp=$a0; # BN_ULONG *rp,
+$ap=$a1; # const BN_ULONG *ap,
+$bp=$a2; # const BN_ULONG *bp,
+$np=$a3; # const BN_ULONG *np,
+$n0=$a4; # const BN_ULONG *n0,
+$num=$a5; # int num);
+
+$lo0=$a6;
+$hi0=$a7;
+$lo1=$t1;
+$hi1=$t2;
+$aj=$s0;
+$bi=$s1;
+$nj=$s2;
+$tp=$s3;
+$alo=$s4;
+$ahi=$s5;
+$nlo=$s6;
+$nhi=$s7;
+$tj=$s8;
+$i=$s9;
+$j=$s10;
+$m1=$s11;
+
+$FRAMESIZE=14;
+
+$code=<<___;
+.text
+
+.set noat
+.set noreorder
+
+.align 5
+.globl bn_mul_mont
+.ent bn_mul_mont
+bn_mul_mont:
+___
+$code.=<<___ if ($flavour =~ /o32/i);
+ lw $n0,16($sp)
+ lw $num,20($sp)
+___
+$code.=<<___;
+ slt $at,$num,4
+ bnez $at,1f
+ li $t0,0
+ slt $at,$num,17 # on in-order CPU
+ bnezl $at,bn_mul_mont_internal
+ nop
+1: jr $ra
+ li $a0,0
+.end bn_mul_mont
+
+.align 5
+.ent bn_mul_mont_internal
+bn_mul_mont_internal:
+ .frame $fp,$FRAMESIZE*$SZREG,$ra
+ .mask 0x40000000|$SAVED_REGS_MASK,-$SZREG
+ $PTR_SUB $sp,$FRAMESIZE*$SZREG
+ $REG_S $fp,($FRAMESIZE-1)*$SZREG($sp)
+ $REG_S $s11,($FRAMESIZE-2)*$SZREG($sp)
+ $REG_S $s10,($FRAMESIZE-3)*$SZREG($sp)
+ $REG_S $s9,($FRAMESIZE-4)*$SZREG($sp)
+ $REG_S $s8,($FRAMESIZE-5)*$SZREG($sp)
+ $REG_S $s7,($FRAMESIZE-6)*$SZREG($sp)
+ $REG_S $s6,($FRAMESIZE-7)*$SZREG($sp)
+ $REG_S $s5,($FRAMESIZE-8)*$SZREG($sp)
+ $REG_S $s4,($FRAMESIZE-9)*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_S $s3,($FRAMESIZE-10)*$SZREG($sp)
+ $REG_S $s2,($FRAMESIZE-11)*$SZREG($sp)
+ $REG_S $s1,($FRAMESIZE-12)*$SZREG($sp)
+ $REG_S $s0,($FRAMESIZE-13)*$SZREG($sp)
+___
+$code.=<<___;
+ move $fp,$sp
+
+ .set reorder
+ $LD $n0,0($n0)
+ $LD $bi,0($bp) # bp[0]
+ $LD $aj,0($ap) # ap[0]
+ $LD $nj,0($np) # np[0]
+
+ $PTR_SUB $sp,2*$BNSZ # place for two extra words
+ sll $num,`log($BNSZ)/log(2)`
+ li $at,-4096
+ $PTR_SUB $sp,$num
+ and $sp,$at
+
+ $MULTU $aj,$bi
+ $LD $alo,$BNSZ($ap)
+ $LD $nlo,$BNSZ($np)
+ mflo $lo0
+ mfhi $hi0
+ $MULTU $lo0,$n0
+ mflo $m1
+
+ $MULTU $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ $MULTU $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+ $MULTU $nlo,$m1
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,$sp
+ li $j,2*$BNSZ
+.align 4
+.L1st:
+ .set noreorder
+ $PTR_ADD $aj,$ap,$j
+ $PTR_ADD $nj,$np,$j
+ $LD $aj,($aj)
+ $LD $nj,($nj)
+
+ $MULTU $aj,$bi
+ $ADDU $lo0,$alo,$hi0
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo0,$hi0
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi0,$ahi,$at
+ $ADDU $hi1,$nhi,$t0
+ mflo $alo
+ mfhi $ahi
+
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $MULTU $nj,$m1
+ $ADDU $hi1,$at
+ addu $j,$BNSZ
+ $ST $lo1,($tp)
+ sltu $t0,$j,$num
+ mflo $nlo
+ mfhi $nhi
+
+ bnez $t0,.L1st
+ $PTR_ADD $tp,$BNSZ
+ .set reorder
+
+ $ADDU $lo0,$alo,$hi0
+ sltu $at,$lo0,$hi0
+ $ADDU $hi0,$ahi,$at
+
+ $ADDU $lo1,$nlo,$hi1
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi1,$nhi,$t0
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+
+ $ST $lo1,($tp)
+
+ $ADDU $hi1,$hi0
+ sltu $at,$hi1,$hi0
+ $ST $hi1,$BNSZ($tp)
+ $ST $at,2*$BNSZ($tp)
+
+ li $i,$BNSZ
+.align 4
+.Louter:
+ $PTR_ADD $bi,$bp,$i
+ $LD $bi,($bi)
+ $LD $aj,($ap)
+ $LD $alo,$BNSZ($ap)
+ $LD $tj,($sp)
+
+ $MULTU $aj,$bi
+ $LD $nj,($np)
+ $LD $nlo,$BNSZ($np)
+ mflo $lo0
+ mfhi $hi0
+ $ADDU $lo0,$tj
+ $MULTU $lo0,$n0
+ sltu $at,$lo0,$tj
+ $ADDU $hi0,$at
+ mflo $m1
+
+ $MULTU $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ $MULTU $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+
+ $MULTU $nlo,$m1
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,$sp
+ li $j,2*$BNSZ
+ $LD $tj,$BNSZ($tp)
+.align 4
+.Linner:
+ .set noreorder
+ $PTR_ADD $aj,$ap,$j
+ $PTR_ADD $nj,$np,$j
+ $LD $aj,($aj)
+ $LD $nj,($nj)
+
+ $MULTU $aj,$bi
+ $ADDU $lo0,$alo,$hi0
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo0,$hi0
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi0,$ahi,$at
+ $ADDU $hi1,$nhi,$t0
+ mflo $alo
+ mfhi $ahi
+
+ $ADDU $lo0,$tj
+ addu $j,$BNSZ
+ $MULTU $nj,$m1
+ sltu $at,$lo0,$tj
+ $ADDU $lo1,$lo0
+ $ADDU $hi0,$at
+ sltu $t0,$lo1,$lo0
+ $LD $tj,2*$BNSZ($tp)
+ $ADDU $hi1,$t0
+ sltu $at,$j,$num
+ mflo $nlo
+ mfhi $nhi
+ $ST $lo1,($tp)
+ bnez $at,.Linner
+ $PTR_ADD $tp,$BNSZ
+ .set reorder
+
+ $ADDU $lo0,$alo,$hi0
+ sltu $at,$lo0,$hi0
+ $ADDU $hi0,$ahi,$at
+ $ADDU $lo0,$tj
+ sltu $t0,$lo0,$tj
+ $ADDU $hi0,$t0
+
+ $LD $tj,2*$BNSZ($tp)
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo1,$hi1
+ $ADDU $hi1,$nhi,$at
+ $ADDU $lo1,$lo0
+ sltu $t0,$lo1,$lo0
+ $ADDU $hi1,$t0
+ $ST $lo1,($tp)
+
+ $ADDU $lo1,$hi1,$hi0
+ sltu $hi1,$lo1,$hi0
+ $ADDU $lo1,$tj
+ sltu $at,$lo1,$tj
+ $ADDU $hi1,$at
+ $ST $lo1,$BNSZ($tp)
+ $ST $hi1,2*$BNSZ($tp)
+
+ addu $i,$BNSZ
+ sltu $t0,$i,$num
+ bnez $t0,.Louter
+
+ .set noreorder
+ $PTR_ADD $tj,$sp,$num # &tp[num]
+ move $tp,$sp
+ move $ap,$sp
+ li $hi0,0 # clear borrow bit
+
+.align 4
+.Lsub: $LD $lo0,($tp)
+ $LD $lo1,($np)
+ $PTR_ADD $tp,$BNSZ
+ $PTR_ADD $np,$BNSZ
+ $SUBU $lo1,$lo0,$lo1 # tp[i]-np[i]
+ sgtu $at,$lo1,$lo0
+ $SUBU $lo0,$lo1,$hi0
+ sgtu $hi0,$lo0,$lo1
+ $ST $lo0,($rp)
+ or $hi0,$at
+ sltu $at,$tp,$tj
+ bnez $at,.Lsub
+ $PTR_ADD $rp,$BNSZ
+
+ $SUBU $hi0,$hi1,$hi0 # handle upmost overflow bit
+ move $tp,$sp
+ $PTR_SUB $rp,$num # restore rp
+ not $hi1,$hi0
+
+ and $ap,$hi0,$sp
+ and $bp,$hi1,$rp
+ or $ap,$ap,$bp # ap=borrow?tp:rp
+
+.align 4
+.Lcopy: $LD $aj,($ap)
+ $PTR_ADD $ap,$BNSZ
+ $ST $zero,($tp)
+ $PTR_ADD $tp,$BNSZ
+ sltu $at,$tp,$tj
+ $ST $aj,($rp)
+ bnez $at,.Lcopy
+ $PTR_ADD $rp,$BNSZ
+
+ li $a0,1
+ li $t0,1
+
+ .set noreorder
+ move $sp,$fp
+ $REG_L $fp,($FRAMESIZE-1)*$SZREG($sp)
+ $REG_L $s11,($FRAMESIZE-2)*$SZREG($sp)
+ $REG_L $s10,($FRAMESIZE-3)*$SZREG($sp)
+ $REG_L $s9,($FRAMESIZE-4)*$SZREG($sp)
+ $REG_L $s8,($FRAMESIZE-5)*$SZREG($sp)
+ $REG_L $s7,($FRAMESIZE-6)*$SZREG($sp)
+ $REG_L $s6,($FRAMESIZE-7)*$SZREG($sp)
+ $REG_L $s5,($FRAMESIZE-8)*$SZREG($sp)
+ $REG_L $s4,($FRAMESIZE-9)*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s3,($FRAMESIZE-10)*$SZREG($sp)
+ $REG_L $s2,($FRAMESIZE-11)*$SZREG($sp)
+ $REG_L $s1,($FRAMESIZE-12)*$SZREG($sp)
+ $REG_L $s0,($FRAMESIZE-13)*$SZREG($sp)
+___
+$code.=<<___;
+ jr $ra
+ $PTR_ADD $sp,$FRAMESIZE*$SZREG
+.end bn_mul_mont_internal
+.rdata
+.asciiz "Montgomery Multiplication for MIPS, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+
+print $code;
+close STDOUT;
diff --git a/openssl/crypto/bn/asm/mips.pl b/openssl/crypto/bn/asm/mips.pl
new file mode 100644
index 000000000..c162a3ec2
--- /dev/null
+++ b/openssl/crypto/bn/asm/mips.pl
@@ -0,0 +1,2585 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project.
+#
+# Rights for redistribution and usage in source and binary forms are
+# granted according to the OpenSSL license. Warranty of any kind is
+# disclaimed.
+# ====================================================================
+
+
+# July 1999
+#
+# This is drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c.
+#
+# The module is designed to work with either of the "new" MIPS ABI(5),
+# namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
+# IRIX 5.x not only because it doesn't support new ABIs but also
+# because 5.x kernels put R4x00 CPU into 32-bit mode and all those
+# 64-bit instructions (daddu, dmultu, etc.) found below gonna only
+# cause illegal instruction exception:-(
+#
+# In addition the code depends on preprocessor flags set up by MIPSpro
+# compiler driver (either as or cc) and therefore (probably?) can't be
+# compiled by the GNU assembler. GNU C driver manages fine though...
+# I mean as long as -mmips-as is specified or is the default option,
+# because then it simply invokes /usr/bin/as which in turn takes
+# perfect care of the preprocessor definitions. Another neat feature
+# offered by the MIPSpro assembler is an optimization pass. This gave
+# me the opportunity to have the code looking more regular as all those
+# architecture dependent instruction rescheduling details were left to
+# the assembler. Cool, huh?
+#
+# Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
+# goes way over 3 times faster!
+#
+# <appro@fy.chalmers.se>
+
+# October 2010
+#
+# Adapt the module even for 32-bit ABIs and other OSes. The former was
+# achieved by mechanical replacement of 64-bit arithmetic instructions
+# such as dmultu, daddu, etc. with their 32-bit counterparts and
+# adjusting offsets denoting multiples of BN_ULONG. Above mentioned
+# >3x performance improvement naturally does not apply to 32-bit code
+# [because there is no instruction 32-bit compiler can't use], one
+# has to content with 40-85% improvement depending on benchmark and
+# key length, more for longer keys.
+
+$flavour = shift;
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+if ($flavour =~ /64|n32/i) {
+ $LD="ld";
+ $ST="sd";
+ $MULTU="dmultu";
+ $DIVU="ddivu";
+ $ADDU="daddu";
+ $SUBU="dsubu";
+ $SRL="dsrl";
+ $SLL="dsll";
+ $BNSZ=8;
+ $PTR_ADD="daddu";
+ $PTR_SUB="dsubu";
+ $SZREG=8;
+ $REG_S="sd";
+ $REG_L="ld";
+} else {
+ $LD="lw";
+ $ST="sw";
+ $MULTU="multu";
+ $DIVU="divu";
+ $ADDU="addu";
+ $SUBU="subu";
+ $SRL="srl";
+ $SLL="sll";
+ $BNSZ=4;
+ $PTR_ADD="addu";
+ $PTR_SUB="subu";
+ $SZREG=4;
+ $REG_S="sw";
+ $REG_L="lw";
+ $code=".set mips2\n";
+}
+
+# Below is N32/64 register layout used in the original module.
+#
+($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+($ta0,$ta1,$ta2,$ta3)=($a4,$a5,$a6,$a7);
+#
+# No special adaptation is required for O32. NUBI on the other hand
+# is treated by saving/restoring ($v1,$t0..$t3).
+
+$gp=$v1 if ($flavour =~ /nubi/i);
+
+$minus4=$v1;
+
+$code.=<<___;
+.rdata
+.asciiz "mips3.s, Version 1.2"
+.asciiz "MIPS II/III/IV ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>"
+
+.text
+.set noat
+
+.align 5
+.globl bn_mul_add_words
+.ent bn_mul_add_words
+bn_mul_add_words:
+ .set noreorder
+ bgtz $a2,bn_mul_add_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_mul_add_words
+
+.align 5
+.ent bn_mul_add_words_internal
+bn_mul_add_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ $LD $t0,0($a1)
+ beqz $ta0,.L_bn_mul_add_words_tail
+
+.L_bn_mul_add_words_loop:
+ $MULTU $t0,$a3
+ $LD $t1,0($a0)
+ $LD $t2,$BNSZ($a1)
+ $LD $t3,$BNSZ($a0)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta1,2*$BNSZ($a0)
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0 # All manuals say it "compares 32-bit
+ # values", but it seems to work fine
+ # even on 64-bit registers.
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ $MULTU $t2,$a3
+ sltu $at,$t1,$at
+ $ST $t1,0($a0)
+ $ADDU $v0,$at
+
+ $LD $ta2,3*$BNSZ($a1)
+ $LD $ta3,3*$BNSZ($a0)
+ $ADDU $t3,$v0
+ sltu $v0,$t3,$v0
+ mflo $at
+ mfhi $t2
+ $ADDU $t3,$at
+ $ADDU $v0,$t2
+ $MULTU $ta0,$a3
+ sltu $at,$t3,$at
+ $ST $t3,$BNSZ($a0)
+ $ADDU $v0,$at
+
+ subu $a2,4
+ $PTR_ADD $a0,4*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ $ADDU $ta1,$v0
+ sltu $v0,$ta1,$v0
+ mflo $at
+ mfhi $ta0
+ $ADDU $ta1,$at
+ $ADDU $v0,$ta0
+ $MULTU $ta2,$a3
+ sltu $at,$ta1,$at
+ $ST $ta1,-2*$BNSZ($a0)
+ $ADDU $v0,$at
+
+
+ and $ta0,$a2,$minus4
+ $ADDU $ta3,$v0
+ sltu $v0,$ta3,$v0
+ mflo $at
+ mfhi $ta2
+ $ADDU $ta3,$at
+ $ADDU $v0,$ta2
+ sltu $at,$ta3,$at
+ $ST $ta3,-$BNSZ($a0)
+ $ADDU $v0,$at
+ .set noreorder
+ bgtzl $ta0,.L_bn_mul_add_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a2,.L_bn_mul_add_words_return
+ nop
+
+.L_bn_mul_add_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ $LD $t1,0($a0)
+ subu $a2,1
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,0($a0)
+ $ADDU $v0,$at
+ beqz $a2,.L_bn_mul_add_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$a3
+ $LD $t1,$BNSZ($a0)
+ subu $a2,1
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$at
+ beqz $a2,.L_bn_mul_add_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$a3
+ $LD $t1,2*$BNSZ($a0)
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,2*$BNSZ($a0)
+ $ADDU $v0,$at
+
+.L_bn_mul_add_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_mul_add_words_internal
+
+.align 5
+.globl bn_mul_words
+.ent bn_mul_words
+bn_mul_words:
+ .set noreorder
+ bgtz $a2,bn_mul_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_mul_words
+
+.align 5
+.ent bn_mul_words_internal
+bn_mul_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ $LD $t0,0($a1)
+ beqz $ta0,.L_bn_mul_words_tail
+
+.L_bn_mul_words_loop:
+ $MULTU $t0,$a3
+ $LD $t2,$BNSZ($a1)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta2,3*$BNSZ($a1)
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $MULTU $t2,$a3
+ $ST $v0,0($a0)
+ $ADDU $v0,$t1,$t0
+
+ subu $a2,4
+ $PTR_ADD $a0,4*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ mflo $at
+ mfhi $t2
+ $ADDU $v0,$at
+ sltu $t3,$v0,$at
+ $MULTU $ta0,$a3
+ $ST $v0,-3*$BNSZ($a0)
+ $ADDU $v0,$t3,$t2
+
+ mflo $at
+ mfhi $ta0
+ $ADDU $v0,$at
+ sltu $ta1,$v0,$at
+ $MULTU $ta2,$a3
+ $ST $v0,-2*$BNSZ($a0)
+ $ADDU $v0,$ta1,$ta0
+
+ and $ta0,$a2,$minus4
+ mflo $at
+ mfhi $ta2
+ $ADDU $v0,$at
+ sltu $ta3,$v0,$at
+ $ST $v0,-$BNSZ($a0)
+ $ADDU $v0,$ta3,$ta2
+ .set noreorder
+ bgtzl $ta0,.L_bn_mul_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a2,.L_bn_mul_words_return
+ nop
+
+.L_bn_mul_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ subu $a2,1
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,0($a0)
+ $ADDU $v0,$t1,$t0
+ beqz $a2,.L_bn_mul_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$a3
+ subu $a2,1
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,$BNSZ($a0)
+ $ADDU $v0,$t1,$t0
+ beqz $a2,.L_bn_mul_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$a3
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,2*$BNSZ($a0)
+ $ADDU $v0,$t1,$t0
+
+.L_bn_mul_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_mul_words_internal
+
+.align 5
+.globl bn_sqr_words
+.ent bn_sqr_words
+bn_sqr_words:
+ .set noreorder
+ bgtz $a2,bn_sqr_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_sqr_words
+
+.align 5
+.ent bn_sqr_words_internal
+bn_sqr_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ $LD $t0,0($a1)
+ beqz $ta0,.L_bn_sqr_words_tail
+
+.L_bn_sqr_words_loop:
+ $MULTU $t0,$t0
+ $LD $t2,$BNSZ($a1)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta2,3*$BNSZ($a1)
+ mflo $t1
+ mfhi $t0
+ $ST $t1,0($a0)
+ $ST $t0,$BNSZ($a0)
+
+ $MULTU $t2,$t2
+ subu $a2,4
+ $PTR_ADD $a0,8*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ mflo $t3
+ mfhi $t2
+ $ST $t3,-6*$BNSZ($a0)
+ $ST $t2,-5*$BNSZ($a0)
+
+ $MULTU $ta0,$ta0
+ mflo $ta1
+ mfhi $ta0
+ $ST $ta1,-4*$BNSZ($a0)
+ $ST $ta0,-3*$BNSZ($a0)
+
+
+ $MULTU $ta2,$ta2
+ and $ta0,$a2,$minus4
+ mflo $ta3
+ mfhi $ta2
+ $ST $ta3,-2*$BNSZ($a0)
+ $ST $ta2,-$BNSZ($a0)
+
+ .set noreorder
+ bgtzl $ta0,.L_bn_sqr_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a2,.L_bn_sqr_words_return
+ nop
+
+.L_bn_sqr_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$t0
+ subu $a2,1
+ mflo $t1
+ mfhi $t0
+ $ST $t1,0($a0)
+ $ST $t0,$BNSZ($a0)
+ beqz $a2,.L_bn_sqr_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$t0
+ subu $a2,1
+ mflo $t1
+ mfhi $t0
+ $ST $t1,2*$BNSZ($a0)
+ $ST $t0,3*$BNSZ($a0)
+ beqz $a2,.L_bn_sqr_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$t0
+ mflo $t1
+ mfhi $t0
+ $ST $t1,4*$BNSZ($a0)
+ $ST $t0,5*$BNSZ($a0)
+
+.L_bn_sqr_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+
+.end bn_sqr_words_internal
+
+.align 5
+.globl bn_add_words
+.ent bn_add_words
+bn_add_words:
+ .set noreorder
+ bgtz $a3,bn_add_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_add_words
+
+.align 5
+.ent bn_add_words_internal
+bn_add_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $at,$a3,$minus4
+ $LD $t0,0($a1)
+ beqz $at,.L_bn_add_words_tail
+
+.L_bn_add_words_loop:
+ $LD $ta0,0($a2)
+ subu $a3,4
+ $LD $t1,$BNSZ($a1)
+ and $at,$a3,$minus4
+ $LD $t2,2*$BNSZ($a1)
+ $PTR_ADD $a2,4*$BNSZ
+ $LD $t3,3*$BNSZ($a1)
+ $PTR_ADD $a0,4*$BNSZ
+ $LD $ta1,-3*$BNSZ($a2)
+ $PTR_ADD $a1,4*$BNSZ
+ $LD $ta2,-2*$BNSZ($a2)
+ $LD $ta3,-$BNSZ($a2)
+ $ADDU $ta0,$t0
+ sltu $t8,$ta0,$t0
+ $ADDU $t0,$ta0,$v0
+ sltu $v0,$t0,$ta0
+ $ST $t0,-4*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ $ADDU $ta1,$t1
+ sltu $t9,$ta1,$t1
+ $ADDU $t1,$ta1,$v0
+ sltu $v0,$t1,$ta1
+ $ST $t1,-3*$BNSZ($a0)
+ $ADDU $v0,$t9
+
+ $ADDU $ta2,$t2
+ sltu $t8,$ta2,$t2
+ $ADDU $t2,$ta2,$v0
+ sltu $v0,$t2,$ta2
+ $ST $t2,-2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ $ADDU $ta3,$t3
+ sltu $t9,$ta3,$t3
+ $ADDU $t3,$ta3,$v0
+ sltu $v0,$t3,$ta3
+ $ST $t3,-$BNSZ($a0)
+ $ADDU $v0,$t9
+
+ .set noreorder
+ bgtzl $at,.L_bn_add_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a3,.L_bn_add_words_return
+ nop
+
+.L_bn_add_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ $ADDU $ta0,$t0
+ subu $a3,1
+ sltu $t8,$ta0,$t0
+ $ADDU $t0,$ta0,$v0
+ sltu $v0,$t0,$ta0
+ $ST $t0,0($a0)
+ $ADDU $v0,$t8
+ beqz $a3,.L_bn_add_words_return
+
+ $LD $t1,$BNSZ($a1)
+ $LD $ta1,$BNSZ($a2)
+ $ADDU $ta1,$t1
+ subu $a3,1
+ sltu $t9,$ta1,$t1
+ $ADDU $t1,$ta1,$v0
+ sltu $v0,$t1,$ta1
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$t9
+ beqz $a3,.L_bn_add_words_return
+
+ $LD $t2,2*$BNSZ($a1)
+ $LD $ta2,2*$BNSZ($a2)
+ $ADDU $ta2,$t2
+ sltu $t8,$ta2,$t2
+ $ADDU $t2,$ta2,$v0
+ sltu $v0,$t2,$ta2
+ $ST $t2,2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+.L_bn_add_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+
+.end bn_add_words_internal
+
+.align 5
+.globl bn_sub_words
+.ent bn_sub_words
+bn_sub_words:
+ .set noreorder
+ bgtz $a3,bn_sub_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$zero
+.end bn_sub_words
+
+.align 5
+.ent bn_sub_words_internal
+bn_sub_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $at,$a3,$minus4
+ $LD $t0,0($a1)
+ beqz $at,.L_bn_sub_words_tail
+
+.L_bn_sub_words_loop:
+ $LD $ta0,0($a2)
+ subu $a3,4
+ $LD $t1,$BNSZ($a1)
+ and $at,$a3,$minus4
+ $LD $t2,2*$BNSZ($a1)
+ $PTR_ADD $a2,4*$BNSZ
+ $LD $t3,3*$BNSZ($a1)
+ $PTR_ADD $a0,4*$BNSZ
+ $LD $ta1,-3*$BNSZ($a2)
+ $PTR_ADD $a1,4*$BNSZ
+ $LD $ta2,-2*$BNSZ($a2)
+ $LD $ta3,-$BNSZ($a2)
+ sltu $t8,$t0,$ta0
+ $SUBU $ta0,$t0,$ta0
+ $SUBU $t0,$ta0,$v0
+ sgtu $v0,$t0,$ta0
+ $ST $t0,-4*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ sltu $t9,$t1,$ta1
+ $SUBU $ta1,$t1,$ta1
+ $SUBU $t1,$ta1,$v0
+ sgtu $v0,$t1,$ta1
+ $ST $t1,-3*$BNSZ($a0)
+ $ADDU $v0,$t9
+
+
+ sltu $t8,$t2,$ta2
+ $SUBU $ta2,$t2,$ta2
+ $SUBU $t2,$ta2,$v0
+ sgtu $v0,$t2,$ta2
+ $ST $t2,-2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ sltu $t9,$t3,$ta3
+ $SUBU $ta3,$t3,$ta3
+ $SUBU $t3,$ta3,$v0
+ sgtu $v0,$t3,$ta3
+ $ST $t3,-$BNSZ($a0)
+ $ADDU $v0,$t9
+
+ .set noreorder
+ bgtzl $at,.L_bn_sub_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a3,.L_bn_sub_words_return
+ nop
+
+.L_bn_sub_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ subu $a3,1
+ sltu $t8,$t0,$ta0
+ $SUBU $ta0,$t0,$ta0
+ $SUBU $t0,$ta0,$v0
+ sgtu $v0,$t0,$ta0
+ $ST $t0,0($a0)
+ $ADDU $v0,$t8
+ beqz $a3,.L_bn_sub_words_return
+
+ $LD $t1,$BNSZ($a1)
+ subu $a3,1
+ $LD $ta1,$BNSZ($a2)
+ sltu $t9,$t1,$ta1
+ $SUBU $ta1,$t1,$ta1
+ $SUBU $t1,$ta1,$v0
+ sgtu $v0,$t1,$ta1
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$t9
+ beqz $a3,.L_bn_sub_words_return
+
+ $LD $t2,2*$BNSZ($a1)
+ $LD $ta2,2*$BNSZ($a2)
+ sltu $t8,$t2,$ta2
+ $SUBU $ta2,$t2,$ta2
+ $SUBU $t2,$ta2,$v0
+ sgtu $v0,$t2,$ta2
+ $ST $t2,2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+.L_bn_sub_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_sub_words_internal
+
+.align 5
+.globl bn_div_3_words
+.ent bn_div_3_words
+bn_div_3_words:
+ .set noreorder
+ move $a3,$a0 # we know that bn_div_words does not
+ # touch $a3, $ta2, $ta3 and preserves $a2
+ # so that we can save two arguments
+ # and return address in registers
+ # instead of stack:-)
+
+ $LD $a0,($a3)
+ move $ta2,$a1
+ bne $a0,$a2,bn_div_3_words_internal
+ $LD $a1,-$BNSZ($a3)
+ li $v0,-1
+ jr $ra
+ move $a0,$v0
+.end bn_div_3_words
+
+.align 5
+.ent bn_div_3_words_internal
+bn_div_3_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ move $ta3,$ra
+ bal bn_div_words
+ move $ra,$ta3
+ $MULTU $ta2,$v0
+ $LD $t2,-2*$BNSZ($a3)
+ move $ta0,$zero
+ mfhi $t1
+ mflo $t0
+ sltu $t8,$t1,$a1
+.L_bn_div_3_words_inner_loop:
+ bnez $t8,.L_bn_div_3_words_inner_loop_done
+ sgeu $at,$t2,$t0
+ seq $t9,$t1,$a1
+ and $at,$t9
+ sltu $t3,$t0,$ta2
+ $ADDU $a1,$a2
+ $SUBU $t1,$t3
+ $SUBU $t0,$ta2
+ sltu $t8,$t1,$a1
+ sltu $ta0,$a1,$a2
+ or $t8,$ta0
+ .set noreorder
+ beqzl $at,.L_bn_div_3_words_inner_loop
+ $SUBU $v0,1
+ .set reorder
+.L_bn_div_3_words_inner_loop_done:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_div_3_words_internal
+
+.align 5
+.globl bn_div_words
+.ent bn_div_words
+bn_div_words:
+ .set noreorder
+ bnez $a2,bn_div_words_internal
+ li $v0,-1 # I would rather signal div-by-zero
+ # which can be done with 'break 7'
+ jr $ra
+ move $a0,$v0
+.end bn_div_words
+
+.align 5
+.ent bn_div_words_internal
+bn_div_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ move $v1,$zero
+ bltz $a2,.L_bn_div_words_body
+ move $t9,$v1
+ $SLL $a2,1
+ bgtz $a2,.-4
+ addu $t9,1
+
+ .set reorder
+ negu $t1,$t9
+ li $t2,-1
+ $SLL $t2,$t1
+ and $t2,$a0
+ $SRL $at,$a1,$t1
+ .set noreorder
+ bnezl $t2,.+8
+ break 6 # signal overflow
+ .set reorder
+ $SLL $a0,$t9
+ $SLL $a1,$t9
+ or $a0,$at
+___
+$QT=$ta0;
+$HH=$ta1;
+$DH=$v1;
+$code.=<<___;
+.L_bn_div_words_body:
+ $SRL $DH,$a2,4*$BNSZ # bits
+ sgeu $at,$a0,$a2
+ .set noreorder
+ bnezl $at,.+8
+ $SUBU $a0,$a2
+ .set reorder
+
+ li $QT,-1
+ $SRL $HH,$a0,4*$BNSZ # bits
+ $SRL $QT,4*$BNSZ # q=0xffffffff
+ beq $DH,$HH,.L_bn_div_words_skip_div1
+ $DIVU $zero,$a0,$DH
+ mflo $QT
+.L_bn_div_words_skip_div1:
+ $MULTU $a2,$QT
+ $SLL $t3,$a0,4*$BNSZ # bits
+ $SRL $at,$a1,4*$BNSZ # bits
+ or $t3,$at
+ mflo $t0
+ mfhi $t1
+.L_bn_div_words_inner_loop1:
+ sltu $t2,$t3,$t0
+ seq $t8,$HH,$t1
+ sltu $at,$HH,$t1
+ and $t2,$t8
+ sltu $v0,$t0,$a2
+ or $at,$t2
+ .set noreorder
+ beqz $at,.L_bn_div_words_inner_loop1_done
+ $SUBU $t1,$v0
+ $SUBU $t0,$a2
+ b .L_bn_div_words_inner_loop1
+ $SUBU $QT,1
+ .set reorder
+.L_bn_div_words_inner_loop1_done:
+
+ $SLL $a1,4*$BNSZ # bits
+ $SUBU $a0,$t3,$t0
+ $SLL $v0,$QT,4*$BNSZ # bits
+
+ li $QT,-1
+ $SRL $HH,$a0,4*$BNSZ # bits
+ $SRL $QT,4*$BNSZ # q=0xffffffff
+ beq $DH,$HH,.L_bn_div_words_skip_div2
+ $DIVU $zero,$a0,$DH
+ mflo $QT
+.L_bn_div_words_skip_div2:
+ $MULTU $a2,$QT
+ $SLL $t3,$a0,4*$BNSZ # bits
+ $SRL $at,$a1,4*$BNSZ # bits
+ or $t3,$at
+ mflo $t0
+ mfhi $t1
+.L_bn_div_words_inner_loop2:
+ sltu $t2,$t3,$t0
+ seq $t8,$HH,$t1
+ sltu $at,$HH,$t1
+ and $t2,$t8
+ sltu $v1,$t0,$a2
+ or $at,$t2
+ .set noreorder
+ beqz $at,.L_bn_div_words_inner_loop2_done
+ $SUBU $t1,$v1
+ $SUBU $t0,$a2
+ b .L_bn_div_words_inner_loop2
+ $SUBU $QT,1
+ .set reorder
+.L_bn_div_words_inner_loop2_done:
+
+ $SUBU $a0,$t3,$t0
+ or $v0,$QT
+ $SRL $v1,$a0,$t9 # $v1 contains remainder if anybody wants it
+ $SRL $a2,$t9 # restore $a2
+
+ .set noreorder
+ move $a1,$v1
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_div_words_internal
+___
+undef $HH; undef $QT; undef $DH;
+
+($a_0,$a_1,$a_2,$a_3)=($t0,$t1,$t2,$t3);
+($b_0,$b_1,$b_2,$b_3)=($ta0,$ta1,$ta2,$ta3);
+
+($a_4,$a_5,$a_6,$a_7)=($s0,$s2,$s4,$a1); # once we load a[7], no use for $a1
+($b_4,$b_5,$b_6,$b_7)=($s1,$s3,$s5,$a2); # once we load b[7], no use for $a2
+
+($t_1,$t_2,$c_1,$c_2,$c_3)=($t8,$t9,$v0,$v1,$a3);
+
+$code.=<<___;
+
+.align 5
+.globl bn_mul_comba8
+.ent bn_mul_comba8
+bn_mul_comba8:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,12*$SZREG,$ra
+ .mask 0x803ff008,-$SZREG
+ $PTR_SUB $sp,12*$SZREG
+ $REG_S $ra,11*$SZREG($sp)
+ $REG_S $s5,10*$SZREG($sp)
+ $REG_S $s4,9*$SZREG($sp)
+ $REG_S $s3,8*$SZREG($sp)
+ $REG_S $s2,7*$SZREG($sp)
+ $REG_S $s1,6*$SZREG($sp)
+ $REG_S $s0,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x003f0000,-$SZREG
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $s5,5*$SZREG($sp)
+ $REG_S $s4,4*$SZREG($sp)
+ $REG_S $s3,3*$SZREG($sp)
+ $REG_S $s2,2*$SZREG($sp)
+ $REG_S $s1,1*$SZREG($sp)
+ $REG_S $s0,0*$SZREG($sp)
+___
+$code.=<<___;
+
+ .set reorder
+ $LD $a_0,0($a1) # If compiled with -mips3 option on
+ # R5000 box assembler barks on this
+ # 1ine with "should not have mult/div
+ # as last instruction in bb (R10K
+ # bug)" warning. If anybody out there
+ # has a clue about how to circumvent
+ # this do send me a note.
+ # <appro\@fy.chalmers.se>
+
+ $LD $b_0,0($a2)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_3,3*$BNSZ($a1)
+ $LD $b_1,$BNSZ($a2)
+ $LD $b_2,2*$BNSZ($a2)
+ $LD $b_3,3*$BNSZ($a2)
+ mflo $c_1
+ mfhi $c_2
+
+ $LD $a_4,4*$BNSZ($a1)
+ $LD $a_5,5*$BNSZ($a1)
+ $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD $a_6,6*$BNSZ($a1)
+ $LD $a_7,7*$BNSZ($a1)
+ $LD $b_4,4*$BNSZ($a2)
+ $LD $b_5,5*$BNSZ($a2)
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1);
+ $ADDU $c_3,$t_2,$at
+ $LD $b_6,6*$BNSZ($a2)
+ $LD $b_7,7*$BNSZ($a2)
+ $ST $c_1,0($a0) # r[0]=c1;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ $ST $c_2,$BNSZ($a0) # r[1]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0) # r[2]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_0 # mul_add_c(a[4],b[0],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0) # r[3]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$b_4 # mul_add_c(a[0],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$b_5 # mul_add_c(a[0],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0) # r[4]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_4 # mul_add_c(a[1],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_4,$b_1 # mul_add_c(a[4],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_0 # mul_add_c(a[5],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_0 # mul_add_c(a[6],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0) # r[5]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_1 # mul_add_c(a[5],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_2 # mul_add_c(a[4],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_4 # mul_add_c(a[2],b[4],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_5 # mul_add_c(a[1],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$b_6 # mul_add_c(a[0],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$b_7 # mul_add_c(a[0],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,6*$BNSZ($a0) # r[6]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_6 # mul_add_c(a[1],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_5 # mul_add_c(a[2],b[5],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_4 # mul_add_c(a[3],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_3 # mul_add_c(a[4],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_5,$b_2 # mul_add_c(a[5],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_6,$b_1 # mul_add_c(a[6],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_0 # mul_add_c(a[7],b[0],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_1 # mul_add_c(a[7],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,7*$BNSZ($a0) # r[7]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_2 # mul_add_c(a[6],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_3 # mul_add_c(a[5],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_4,$b_4 # mul_add_c(a[4],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_5 # mul_add_c(a[3],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_6 # mul_add_c(a[2],b[6],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_7 # mul_add_c(a[1],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_7 # mul_add_c(a[2],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,8*$BNSZ($a0) # r[8]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_6 # mul_add_c(a[3],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_5 # mul_add_c(a[4],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_4 # mul_add_c(a[5],b[4],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_3 # mul_add_c(a[6],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_7,$b_2 # mul_add_c(a[7],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_7,$b_3 # mul_add_c(a[7],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,9*$BNSZ($a0) # r[9]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_6,$b_4 # mul_add_c(a[6],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_5,$b_5 # mul_add_c(a[5],b[5],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_6 # mul_add_c(a[4],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_7 # mul_add_c(a[3],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_7 # mul_add_c(a[4],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,10*$BNSZ($a0) # r[10]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_6 # mul_add_c(a[5],b[6],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_5 # mul_add_c(a[6],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_7,$b_4 # mul_add_c(a[7],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_7,$b_5 # mul_add_c(a[7],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,11*$BNSZ($a0) # r[11]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_6 # mul_add_c(a[6],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_7 # mul_add_c(a[5],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_7 # mul_add_c(a[6],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,12*$BNSZ($a0) # r[12]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_6 # mul_add_c(a[7],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_7 # mul_add_c(a[7],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,13*$BNSZ($a0) # r[13]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ $ST $c_3,14*$BNSZ($a0) # r[14]=c3;
+ $ST $c_1,15*$BNSZ($a0) # r[15]=c1;
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s5,10*$SZREG($sp)
+ $REG_L $s4,9*$SZREG($sp)
+ $REG_L $s3,8*$SZREG($sp)
+ $REG_L $s2,7*$SZREG($sp)
+ $REG_L $s1,6*$SZREG($sp)
+ $REG_L $s0,5*$SZREG($sp)
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ jr $ra
+ $PTR_ADD $sp,12*$SZREG
+___
+$code.=<<___ if ($flavour !~ /nubi/i);
+ $REG_L $s5,5*$SZREG($sp)
+ $REG_L $s4,4*$SZREG($sp)
+ $REG_L $s3,3*$SZREG($sp)
+ $REG_L $s2,2*$SZREG($sp)
+ $REG_L $s1,1*$SZREG($sp)
+ $REG_L $s0,0*$SZREG($sp)
+ jr $ra
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+.end bn_mul_comba8
+
+.align 5
+.globl bn_mul_comba4
+.ent bn_mul_comba4
+bn_mul_comba4:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $b_0,0($a2)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_3,3*$BNSZ($a1)
+ $LD $b_1,$BNSZ($a2)
+ $LD $b_2,2*$BNSZ($a2)
+ $LD $b_3,3*$BNSZ($a2)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1);
+ $ADDU $c_3,$t_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ $ST $c_1,6*$BNSZ($a0)
+ $ST $c_2,7*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_mul_comba4
+___
+
+($a_4,$a_5,$a_6,$a_7)=($b_0,$b_1,$b_2,$b_3);
+
+$code.=<<___;
+
+.align 5
+.globl bn_sqr_comba8
+.ent bn_sqr_comba8
+bn_sqr_comba8:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $LD $a_3,3*$BNSZ($a1)
+
+ $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_4,4*$BNSZ($a1)
+ $LD $a_5,5*$BNSZ($a1)
+ $LD $a_6,6*$BNSZ($a1)
+ $LD $a_7,7*$BNSZ($a1)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $c_3,$t_2,$at
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_2 # mul_add_c2(a[1],b[2],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_4,$a_0 # mul_add_c2(a[4],b[0],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$a_5 # mul_add_c2(a[0],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_4 # mul_add_c2(a[1],b[4],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $MULTU $a_6,$a_0 # mul_add_c2(a[6],b[0],c1,c2,c3);
+ $ADDU $c_2,$at
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_5,$a_1 # mul_add_c2(a[5],b[1],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_4,$a_2 # mul_add_c2(a[4],b[2],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$a_7 # mul_add_c2(a[0],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,6*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_6 # mul_add_c2(a[1],b[6],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_2,$a_5 # mul_add_c2(a[2],b[5],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_3,$a_4 # mul_add_c2(a[3],b[4],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_7,$a_1 # mul_add_c2(a[7],b[1],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,7*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_6,$a_2 # mul_add_c2(a[6],b[2],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_5,$a_3 # mul_add_c2(a[5],b[3],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_4,$a_4 # mul_add_c(a[4],b[4],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$a_7 # mul_add_c2(a[2],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,8*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_3,$a_6 # mul_add_c2(a[3],b[6],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_4,$a_5 # mul_add_c2(a[4],b[5],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_7,$a_3 # mul_add_c2(a[7],b[3],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,9*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_6,$a_4 # mul_add_c2(a[6],b[4],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_5,$a_5 # mul_add_c(a[5],b[5],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$a_7 # mul_add_c2(a[4],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,10*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_5,$a_6 # mul_add_c2(a[5],b[6],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_7,$a_5 # mul_add_c2(a[7],b[5],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,11*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_6,$a_6 # mul_add_c(a[6],b[6],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$a_7 # mul_add_c2(a[6],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,12*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_7,$a_7 # mul_add_c(a[7],b[7],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,13*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ $ST $c_3,14*$BNSZ($a0)
+ $ST $c_1,15*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_sqr_comba8
+
+.align 5
+.globl bn_sqr_comba4
+.ent bn_sqr_comba4
+bn_sqr_comba4:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $a_1,$BNSZ($a1)
+ $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_2,2*$BNSZ($a1)
+ $LD $a_3,3*$BNSZ($a1)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $c_3,$t_2,$at
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_2 # mul_add_c(a2[1],b[2],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ $ST $c_1,6*$BNSZ($a0)
+ $ST $c_2,7*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_sqr_comba4
+___
+print $code;
+close STDOUT;
diff --git a/openssl/crypto/bn/asm/modexp512-x86_64.pl b/openssl/crypto/bn/asm/modexp512-x86_64.pl
new file mode 100644
index 000000000..54aeb0192
--- /dev/null
+++ b/openssl/crypto/bn/asm/modexp512-x86_64.pl
@@ -0,0 +1,1496 @@
+#!/usr/bin/env perl
+#
+# Copyright (c) 2010-2011 Intel Corp.
+# Author: Vinodh.Gopal@intel.com
+# Jim Guilford
+# Erdinc.Ozturk@intel.com
+# Maxim.Perminov@intel.com
+#
+# More information about algorithm used can be found at:
+# http://www.cse.buffalo.edu/srds2009/escs2009_submission_Gopal.pdf
+#
+# ====================================================================
+# Copyright (c) 2011 The OpenSSL Project. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# 3. All advertising materials mentioning features or use of this
+# software must display the following acknowledgment:
+# "This product includes software developed by the OpenSSL Project
+# for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+#
+# 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+# endorse or promote products derived from this software without
+# prior written permission. For written permission, please contact
+# licensing@OpenSSL.org.
+#
+# 5. Products derived from this software may not be called "OpenSSL"
+# nor may "OpenSSL" appear in their names without prior written
+# permission of the OpenSSL Project.
+#
+# 6. Redistributions of any form whatsoever must retain the following
+# acknowledgment:
+# "This product includes software developed by the OpenSSL Project
+# for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+#
+# THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+# ====================================================================
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+use strict;
+my $code=".text\n\n";
+my $m=0;
+
+#
+# Define x512 macros
+#
+
+#MULSTEP_512_ADD MACRO x7, x6, x5, x4, x3, x2, x1, x0, dst, src1, src2, add_src, tmp1, tmp2
+#
+# uses rax, rdx, and args
+sub MULSTEP_512_ADD
+{
+ my ($x, $DST, $SRC2, $ASRC, $OP, $TMP)=@_;
+ my @X=@$x; # make a copy
+$code.=<<___;
+ mov (+8*0)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [0]
+ mov ($ASRC), $X[0]
+ add %rax, $X[0]
+ adc \$0, %rdx
+ mov $X[0], $DST
+___
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov %rdx, $TMP
+
+ mov (+8*$i)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [$i]
+ mov (+8*$i)($ASRC), $X[$i]
+ add %rax, $X[$i]
+ adc \$0, %rdx
+ add $TMP, $X[$i]
+ adc \$0, %rdx
+___
+}
+$code.=<<___;
+ mov %rdx, $X[0]
+___
+}
+
+#MULSTEP_512 MACRO x7, x6, x5, x4, x3, x2, x1, x0, dst, src2, src1_val, tmp
+#
+# uses rax, rdx, and args
+sub MULSTEP_512
+{
+ my ($x, $DST, $SRC2, $OP, $TMP)=@_;
+ my @X=@$x; # make a copy
+$code.=<<___;
+ mov (+8*0)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [0]
+ add %rax, $X[0]
+ adc \$0, %rdx
+ mov $X[0], $DST
+___
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov %rdx, $TMP
+
+ mov (+8*$i)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [$i]
+ add %rax, $X[$i]
+ adc \$0, %rdx
+ add $TMP, $X[$i]
+ adc \$0, %rdx
+___
+}
+$code.=<<___;
+ mov %rdx, $X[0]
+___
+}
+
+#
+# Swizzle Macros
+#
+
+# macro to copy data from flat space to swizzled table
+#MACRO swizzle pDst, pSrc, tmp1, tmp2
+# pDst and pSrc are modified
+sub swizzle
+{
+ my ($pDst, $pSrc, $cnt, $d0)=@_;
+$code.=<<___;
+ mov \$8, $cnt
+loop_$m:
+ mov ($pSrc), $d0
+ mov $d0#w, ($pDst)
+ shr \$16, $d0
+ mov $d0#w, (+64*1)($pDst)
+ shr \$16, $d0
+ mov $d0#w, (+64*2)($pDst)
+ shr \$16, $d0
+ mov $d0#w, (+64*3)($pDst)
+ lea 8($pSrc), $pSrc
+ lea 64*4($pDst), $pDst
+ dec $cnt
+ jnz loop_$m
+___
+
+ $m++;
+}
+
+# macro to copy data from swizzled table to flat space
+#MACRO unswizzle pDst, pSrc, tmp*3
+sub unswizzle
+{
+ my ($pDst, $pSrc, $cnt, $d0, $d1)=@_;
+$code.=<<___;
+ mov \$4, $cnt
+loop_$m:
+ movzxw (+64*3+256*0)($pSrc), $d0
+ movzxw (+64*3+256*1)($pSrc), $d1
+ shl \$16, $d0
+ shl \$16, $d1
+ mov (+64*2+256*0)($pSrc), $d0#w
+ mov (+64*2+256*1)($pSrc), $d1#w
+ shl \$16, $d0
+ shl \$16, $d1
+ mov (+64*1+256*0)($pSrc), $d0#w
+ mov (+64*1+256*1)($pSrc), $d1#w
+ shl \$16, $d0
+ shl \$16, $d1
+ mov (+64*0+256*0)($pSrc), $d0#w
+ mov (+64*0+256*1)($pSrc), $d1#w
+ mov $d0, (+8*0)($pDst)
+ mov $d1, (+8*1)($pDst)
+ lea 256*2($pSrc), $pSrc
+ lea 8*2($pDst), $pDst
+ sub \$1, $cnt
+ jnz loop_$m
+___
+
+ $m++;
+}
+
+#
+# Data Structures
+#
+
+# Reduce Data
+#
+#
+# Offset Value
+# 0C0 Carries
+# 0B8 X2[10]
+# 0B0 X2[9]
+# 0A8 X2[8]
+# 0A0 X2[7]
+# 098 X2[6]
+# 090 X2[5]
+# 088 X2[4]
+# 080 X2[3]
+# 078 X2[2]
+# 070 X2[1]
+# 068 X2[0]
+# 060 X1[12] P[10]
+# 058 X1[11] P[9] Z[8]
+# 050 X1[10] P[8] Z[7]
+# 048 X1[9] P[7] Z[6]
+# 040 X1[8] P[6] Z[5]
+# 038 X1[7] P[5] Z[4]
+# 030 X1[6] P[4] Z[3]
+# 028 X1[5] P[3] Z[2]
+# 020 X1[4] P[2] Z[1]
+# 018 X1[3] P[1] Z[0]
+# 010 X1[2] P[0] Y[2]
+# 008 X1[1] Q[1] Y[1]
+# 000 X1[0] Q[0] Y[0]
+
+my $X1_offset = 0; # 13 qwords
+my $X2_offset = $X1_offset + 13*8; # 11 qwords
+my $Carries_offset = $X2_offset + 11*8; # 1 qword
+my $Q_offset = 0; # 2 qwords
+my $P_offset = $Q_offset + 2*8; # 11 qwords
+my $Y_offset = 0; # 3 qwords
+my $Z_offset = $Y_offset + 3*8; # 9 qwords
+
+my $Red_Data_Size = $Carries_offset + 1*8; # (25 qwords)
+
+#
+# Stack Frame
+#
+#
+# offset value
+# ... <old stack contents>
+# ...
+# 280 Garray
+
+# 278 tmp16[15]
+# ... ...
+# 200 tmp16[0]
+
+# 1F8 tmp[7]
+# ... ...
+# 1C0 tmp[0]
+
+# 1B8 GT[7]
+# ... ...
+# 180 GT[0]
+
+# 178 Reduce Data
+# ... ...
+# 0B8 Reduce Data
+# 0B0 reserved
+# 0A8 reserved
+# 0A0 reserved
+# 098 reserved
+# 090 reserved
+# 088 reduce result addr
+# 080 exp[8]
+
+# ...
+# 048 exp[1]
+# 040 exp[0]
+
+# 038 reserved
+# 030 loop_idx
+# 028 pg
+# 020 i
+# 018 pData ; arg 4
+# 010 pG ; arg 2
+# 008 pResult ; arg 1
+# 000 rsp ; stack pointer before subtract
+
+my $rsp_offset = 0;
+my $pResult_offset = 8*1 + $rsp_offset;
+my $pG_offset = 8*1 + $pResult_offset;
+my $pData_offset = 8*1 + $pG_offset;
+my $i_offset = 8*1 + $pData_offset;
+my $pg_offset = 8*1 + $i_offset;
+my $loop_idx_offset = 8*1 + $pg_offset;
+my $reserved1_offset = 8*1 + $loop_idx_offset;
+my $exp_offset = 8*1 + $reserved1_offset;
+my $red_result_addr_offset= 8*9 + $exp_offset;
+my $reserved2_offset = 8*1 + $red_result_addr_offset;
+my $Reduce_Data_offset = 8*5 + $reserved2_offset;
+my $GT_offset = $Red_Data_Size + $Reduce_Data_offset;
+my $tmp_offset = 8*8 + $GT_offset;
+my $tmp16_offset = 8*8 + $tmp_offset;
+my $garray_offset = 8*16 + $tmp16_offset;
+my $mem_size = 8*8*32 + $garray_offset;
+
+#
+# Offsets within Reduce Data
+#
+#
+# struct MODF_2FOLD_MONT_512_C1_DATA {
+# UINT64 t[8][8];
+# UINT64 m[8];
+# UINT64 m1[8]; /* 2^768 % m */
+# UINT64 m2[8]; /* 2^640 % m */
+# UINT64 k1[2]; /* (- 1/m) % 2^128 */
+# };
+
+my $T = 0;
+my $M = 512; # = 8 * 8 * 8
+my $M1 = 576; # = 8 * 8 * 9 /* += 8 * 8 */
+my $M2 = 640; # = 8 * 8 * 10 /* += 8 * 8 */
+my $K1 = 704; # = 8 * 8 * 11 /* += 8 * 8 */
+
+#
+# FUNCTIONS
+#
+
+{{{
+#
+# MULADD_128x512 : Function to multiply 128-bits (2 qwords) by 512-bits (8 qwords)
+# and add 512-bits (8 qwords)
+# to get 640 bits (10 qwords)
+# Input: 128-bit mul source: [rdi+8*1], rbp
+# 512-bit mul source: [rsi+8*n]
+# 512-bit add source: r15, r14, ..., r9, r8
+# Output: r9, r8, r15, r14, r13, r12, r11, r10, [rcx+8*1], [rcx+8*0]
+# Clobbers all regs except: rcx, rsi, rdi
+$code.=<<___;
+.type MULADD_128x512,\@abi-omnipotent
+.align 16
+MULADD_128x512:
+___
+ &MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx");
+$code.=<<___;
+ mov (+8*1)(%rdi), %rbp
+___
+ &MULSTEP_512([map("%r$_",(9..15,8))], "(+8*1)(%rcx)", "%rsi", "%rbp", "%rbx");
+$code.=<<___;
+ ret
+.size MULADD_128x512,.-MULADD_128x512
+___
+}}}
+
+{{{
+#MULADD_256x512 MACRO pDst, pA, pB, OP, TMP, X7, X6, X5, X4, X3, X2, X1, X0
+#
+# Inputs: pDst: Destination (768 bits, 12 qwords)
+# pA: Multiplicand (1024 bits, 16 qwords)
+# pB: Multiplicand (512 bits, 8 qwords)
+# Dst = Ah * B + Al
+# where Ah is (in qwords) A[15:12] (256 bits) and Al is A[7:0] (512 bits)
+# Results in X3 X2 X1 X0 X7 X6 X5 X4 Dst[3:0]
+# Uses registers: arguments, RAX, RDX
+sub MULADD_256x512
+{
+ my ($pDst, $pA, $pB, $OP, $TMP, $X)=@_;
+$code.=<<___;
+ mov (+8*12)($pA), $OP
+___
+ &MULSTEP_512_ADD($X, "(+8*0)($pDst)", $pB, $pA, $OP, $TMP);
+ push(@$X,shift(@$X));
+
+$code.=<<___;
+ mov (+8*13)($pA), $OP
+___
+ &MULSTEP_512($X, "(+8*1)($pDst)", $pB, $OP, $TMP);
+ push(@$X,shift(@$X));
+
+$code.=<<___;
+ mov (+8*14)($pA), $OP
+___
+ &MULSTEP_512($X, "(+8*2)($pDst)", $pB, $OP, $TMP);
+ push(@$X,shift(@$X));
+
+$code.=<<___;
+ mov (+8*15)($pA), $OP
+___
+ &MULSTEP_512($X, "(+8*3)($pDst)", $pB, $OP, $TMP);
+ push(@$X,shift(@$X));
+}
+
+#
+# mont_reduce(UINT64 *x, /* 1024 bits, 16 qwords */
+# UINT64 *m, /* 512 bits, 8 qwords */
+# MODF_2FOLD_MONT_512_C1_DATA *data,
+# UINT64 *r) /* 512 bits, 8 qwords */
+# Input: x (number to be reduced): tmp16 (Implicit)
+# m (modulus): [pM] (Implicit)
+# data (reduce data): [pData] (Implicit)
+# Output: r (result): Address in [red_res_addr]
+# result also in: r9, r8, r15, r14, r13, r12, r11, r10
+
+my @X=map("%r$_",(8..15));
+
+$code.=<<___;
+.type mont_reduce,\@abi-omnipotent
+.align 16
+mont_reduce:
+___
+
+my $STACK_DEPTH = 8;
+ #
+ # X1 = Xh * M1 + Xl
+$code.=<<___;
+ lea (+$Reduce_Data_offset+$X1_offset+$STACK_DEPTH)(%rsp), %rdi # pX1 (Dst) 769 bits, 13 qwords
+ mov (+$pData_offset+$STACK_DEPTH)(%rsp), %rsi # pM1 (Bsrc) 512 bits, 8 qwords
+ add \$$M1, %rsi
+ lea (+$tmp16_offset+$STACK_DEPTH)(%rsp), %rcx # X (Asrc) 1024 bits, 16 qwords
+
+___
+
+ &MULADD_256x512("%rdi", "%rcx", "%rsi", "%rbp", "%rbx", \@X); # rotates @X 4 times
+ # results in r11, r10, r9, r8, r15, r14, r13, r12, X1[3:0]
+
+$code.=<<___;
+ xor %rax, %rax
+ # X1 += xl
+ add (+8*8)(%rcx), $X[4]
+ adc (+8*9)(%rcx), $X[5]
+ adc (+8*10)(%rcx), $X[6]
+ adc (+8*11)(%rcx), $X[7]
+ adc \$0, %rax
+ # X1 is now rax, r11-r8, r15-r12, tmp16[3:0]
+
+ #
+ # check for carry ;; carry stored in rax
+ mov $X[4], (+8*8)(%rdi) # rdi points to X1
+ mov $X[5], (+8*9)(%rdi)
+ mov $X[6], %rbp
+ mov $X[7], (+8*11)(%rdi)
+
+ mov %rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
+
+ mov (+8*0)(%rdi), $X[4]
+ mov (+8*1)(%rdi), $X[5]
+ mov (+8*2)(%rdi), $X[6]
+ mov (+8*3)(%rdi), $X[7]
+
+ # X1 is now stored in: X1[11], rbp, X1[9:8], r15-r8
+ # rdi -> X1
+ # rsi -> M1
+
+ #
+ # X2 = Xh * M2 + Xl
+ # do first part (X2 = Xh * M2)
+ add \$8*10, %rdi # rdi -> pXh ; 128 bits, 2 qwords
+ # Xh is actually { [rdi+8*1], rbp }
+ add \$`$M2-$M1`, %rsi # rsi -> M2
+ lea (+$Reduce_Data_offset+$X2_offset+$STACK_DEPTH)(%rsp), %rcx # rcx -> pX2 ; 641 bits, 11 qwords
+___
+ unshift(@X,pop(@X)); unshift(@X,pop(@X));
+$code.=<<___;
+
+ call MULADD_128x512 # args in rcx, rdi / rbp, rsi, r15-r8
+ # result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
+ mov (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rax
+
+ # X2 += Xl
+ add (+8*8-8*10)(%rdi), $X[6] # (-8*10) is to adjust rdi -> Xh to Xl
+ adc (+8*9-8*10)(%rdi), $X[7]
+ mov $X[6], (+8*8)(%rcx)
+ mov $X[7], (+8*9)(%rcx)
+
+ adc %rax, %rax
+ mov %rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
+
+ lea (+$Reduce_Data_offset+$Q_offset+$STACK_DEPTH)(%rsp), %rdi # rdi -> pQ ; 128 bits, 2 qwords
+ add \$`$K1-$M2`, %rsi # rsi -> pK1 ; 128 bits, 2 qwords
+
+ # MUL_128x128t128 rdi, rcx, rsi ; Q = X2 * K1 (bottom half)
+ # B1:B0 = rsi[1:0] = K1[1:0]
+ # A1:A0 = rcx[1:0] = X2[1:0]
+ # Result = rdi[1],rbp = Q[1],rbp
+ mov (%rsi), %r8 # B0
+ mov (+8*1)(%rsi), %rbx # B1
+
+ mov (%rcx), %rax # A0
+ mul %r8 # B0
+ mov %rax, %rbp
+ mov %rdx, %r9
+
+ mov (+8*1)(%rcx), %rax # A1
+ mul %r8 # B0
+ add %rax, %r9
+
+ mov (%rcx), %rax # A0
+ mul %rbx # B1
+ add %rax, %r9
+
+ mov %r9, (+8*1)(%rdi)
+ # end MUL_128x128t128
+
+ sub \$`$K1-$M`, %rsi
+
+ mov (%rcx), $X[6]
+ mov (+8*1)(%rcx), $X[7] # r9:r8 = X2[1:0]
+
+ call MULADD_128x512 # args in rcx, rdi / rbp, rsi, r15-r8
+ # result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
+
+ # load first half of m to rdx, rdi, rbx, rax
+ # moved this here for efficiency
+ mov (+8*0)(%rsi), %rax
+ mov (+8*1)(%rsi), %rbx
+ mov (+8*2)(%rsi), %rdi
+ mov (+8*3)(%rsi), %rdx
+
+ # continue with reduction
+ mov (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rbp
+
+ add (+8*8)(%rcx), $X[6]
+ adc (+8*9)(%rcx), $X[7]
+
+ #accumulate the final carry to rbp
+ adc %rbp, %rbp
+
+ # Add in overflow corrections: R = (X2>>128) += T[overflow]
+ # R = {r9, r8, r15, r14, ..., r10}
+ shl \$3, %rbp
+ mov (+$pData_offset+$STACK_DEPTH)(%rsp), %rcx # rsi -> Data (and points to T)
+ add %rcx, %rbp # pT ; 512 bits, 8 qwords, spread out
+
+ # rsi will be used to generate a mask after the addition
+ xor %rsi, %rsi
+
+ add (+8*8*0)(%rbp), $X[0]
+ adc (+8*8*1)(%rbp), $X[1]
+ adc (+8*8*2)(%rbp), $X[2]
+ adc (+8*8*3)(%rbp), $X[3]
+ adc (+8*8*4)(%rbp), $X[4]
+ adc (+8*8*5)(%rbp), $X[5]
+ adc (+8*8*6)(%rbp), $X[6]
+ adc (+8*8*7)(%rbp), $X[7]
+
+ # if there is a carry: rsi = 0xFFFFFFFFFFFFFFFF
+ # if carry is clear: rsi = 0x0000000000000000
+ sbb \$0, %rsi
+
+ # if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
+ and %rsi, %rax
+ and %rsi, %rbx
+ and %rsi, %rdi
+ and %rsi, %rdx
+
+ mov \$1, %rbp
+ sub %rax, $X[0]
+ sbb %rbx, $X[1]
+ sbb %rdi, $X[2]
+ sbb %rdx, $X[3]
+
+ # if there is a borrow: rbp = 0
+ # if there is no borrow: rbp = 1
+ # this is used to save the borrows in between the first half and the 2nd half of the subtraction of m
+ sbb \$0, %rbp
+
+ #load second half of m to rdx, rdi, rbx, rax
+
+ add \$$M, %rcx
+ mov (+8*4)(%rcx), %rax
+ mov (+8*5)(%rcx), %rbx
+ mov (+8*6)(%rcx), %rdi
+ mov (+8*7)(%rcx), %rdx
+
+ # use the rsi mask as before
+ # if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
+ and %rsi, %rax
+ and %rsi, %rbx
+ and %rsi, %rdi
+ and %rsi, %rdx
+
+ # if rbp = 0, there was a borrow before, it is moved to the carry flag
+ # if rbp = 1, there was not a borrow before, carry flag is cleared
+ sub \$1, %rbp
+
+ sbb %rax, $X[4]
+ sbb %rbx, $X[5]
+ sbb %rdi, $X[6]
+ sbb %rdx, $X[7]
+
+ # write R back to memory
+
+ mov (+$red_result_addr_offset+$STACK_DEPTH)(%rsp), %rsi
+ mov $X[0], (+8*0)(%rsi)
+ mov $X[1], (+8*1)(%rsi)
+ mov $X[2], (+8*2)(%rsi)
+ mov $X[3], (+8*3)(%rsi)
+ mov $X[4], (+8*4)(%rsi)
+ mov $X[5], (+8*5)(%rsi)
+ mov $X[6], (+8*6)(%rsi)
+ mov $X[7], (+8*7)(%rsi)
+
+ ret
+.size mont_reduce,.-mont_reduce
+___
+}}}
+
+{{{
+#MUL_512x512 MACRO pDst, pA, pB, x7, x6, x5, x4, x3, x2, x1, x0, tmp*2
+#
+# Inputs: pDst: Destination (1024 bits, 16 qwords)
+# pA: Multiplicand (512 bits, 8 qwords)
+# pB: Multiplicand (512 bits, 8 qwords)
+# Uses registers rax, rdx, args
+# B operand in [pB] and also in x7...x0
+sub MUL_512x512
+{
+ my ($pDst, $pA, $pB, $x, $OP, $TMP, $pDst_o)=@_;
+ my ($pDst, $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
+ my @X=@$x; # make a copy
+
+$code.=<<___;
+ mov (+8*0)($pA), $OP
+
+ mov $X[0], %rax
+ mul $OP # rdx:rax = %OP * [0]
+ mov %rax, (+$pDst_o+8*0)($pDst)
+ mov %rdx, $X[0]
+___
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov $X[$i], %rax
+ mul $OP # rdx:rax = %OP * [$i]
+ add %rax, $X[$i-1]
+ adc \$0, %rdx
+ mov %rdx, $X[$i]
+___
+}
+
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov (+8*$i)($pA), $OP
+___
+
+ &MULSTEP_512(\@X, "(+$pDst_o+8*$i)($pDst)", $pB, $OP, $TMP);
+ push(@X,shift(@X));
+}
+
+$code.=<<___;
+ mov $X[0], (+$pDst_o+8*8)($pDst)
+ mov $X[1], (+$pDst_o+8*9)($pDst)
+ mov $X[2], (+$pDst_o+8*10)($pDst)
+ mov $X[3], (+$pDst_o+8*11)($pDst)
+ mov $X[4], (+$pDst_o+8*12)($pDst)
+ mov $X[5], (+$pDst_o+8*13)($pDst)
+ mov $X[6], (+$pDst_o+8*14)($pDst)
+ mov $X[7], (+$pDst_o+8*15)($pDst)
+___
+}
+
+#
+# mont_mul_a3b : subroutine to compute (Src1 * Src2) % M (all 512-bits)
+# Input: src1: Address of source 1: rdi
+# src2: Address of source 2: rsi
+# Output: dst: Address of destination: [red_res_addr]
+# src2 and result also in: r9, r8, r15, r14, r13, r12, r11, r10
+# Temp: Clobbers [tmp16], all registers
+$code.=<<___;
+.type mont_mul_a3b,\@abi-omnipotent
+.align 16
+mont_mul_a3b:
+ #
+ # multiply tmp = src1 * src2
+ # For multiply: dst = rcx, src1 = rdi, src2 = rsi
+ # stack depth is extra 8 from call
+___
+ &MUL_512x512("%rsp+$tmp16_offset+8", "%rdi", "%rsi", [map("%r$_",(10..15,8..9))], "%rbp", "%rbx");
+$code.=<<___;
+ #
+ # Dst = tmp % m
+ # Call reduce(tmp, m, data, dst)
+
+ # tail recursion optimization: jmp to mont_reduce and return from there
+ jmp mont_reduce
+ # call mont_reduce
+ # ret
+.size mont_mul_a3b,.-mont_mul_a3b
+___
+}}}
+
+{{{
+#SQR_512 MACRO pDest, pA, x7, x6, x5, x4, x3, x2, x1, x0, tmp*4
+#
+# Input in memory [pA] and also in x7...x0
+# Uses all argument registers plus rax and rdx
+#
+# This version computes all of the off-diagonal terms into memory,
+# and then it adds in the diagonal terms
+
+sub SQR_512
+{
+ my ($pDst, $pA, $x, $A, $tmp, $x7, $x6, $pDst_o)=@_;
+ my ($pDst, $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
+ my @X=@$x; # make a copy
+$code.=<<___;
+ # ------------------
+ # first pass 01...07
+ # ------------------
+ mov $X[0], $A
+
+ mov $X[1],%rax
+ mul $A
+ mov %rax, (+$pDst_o+8*1)($pDst)
+___
+for(my $i=2;$i<8;$i++) {
+$code.=<<___;
+ mov %rdx, $X[$i-2]
+ mov $X[$i],%rax
+ mul $A
+ add %rax, $X[$i-2]
+ adc \$0, %rdx
+___
+}
+$code.=<<___;
+ mov %rdx, $x7
+
+ mov $X[0], (+$pDst_o+8*2)($pDst)
+
+ # ------------------
+ # second pass 12...17
+ # ------------------
+
+ mov (+8*1)($pA), $A
+
+ mov (+8*2)($pA),%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ mov $X[1], (+$pDst_o+8*3)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*3)($pA),%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ add $X[0], $X[2]
+ adc \$0, %rdx
+ mov $X[2], (+$pDst_o+8*4)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*4)($pA),%rax
+ mul $A
+ add %rax, $X[3]
+ adc \$0, %rdx
+ add $X[0], $X[3]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $X[4]
+ adc \$0, %rdx
+ add $X[0], $X[4]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ add $X[0], $X[5]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $x7
+ adc \$0, %rdx
+ add $X[0], $x7
+ adc \$0, %rdx
+
+ mov %rdx, $X[1]
+
+ # ------------------
+ # third pass 23...27
+ # ------------------
+ mov (+8*2)($pA), $A
+
+ mov (+8*3)($pA),%rax
+ mul $A
+ add %rax, $X[3]
+ adc \$0, %rdx
+ mov $X[3], (+$pDst_o+8*5)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*4)($pA),%rax
+ mul $A
+ add %rax, $X[4]
+ adc \$0, %rdx
+ add $X[0], $X[4]
+ adc \$0, %rdx
+ mov $X[4], (+$pDst_o+8*6)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ add $X[0], $X[5]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $x7
+ adc \$0, %rdx
+ add $X[0], $x7
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ add $X[0], $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $X[2]
+
+ # ------------------
+ # fourth pass 34...37
+ # ------------------
+
+ mov (+8*3)($pA), $A
+
+ mov (+8*4)($pA),%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ mov $X[5], (+$pDst_o+8*7)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $x7
+ adc \$0, %rdx
+ add $X[0], $x7
+ adc \$0, %rdx
+ mov $x7, (+$pDst_o+8*8)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ add $X[0], $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ add $X[0], $X[2]
+ adc \$0, %rdx
+
+ mov %rdx, $X[5]
+
+ # ------------------
+ # fifth pass 45...47
+ # ------------------
+ mov (+8*4)($pA), $A
+
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ mov $X[1], (+$pDst_o+8*9)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ add $X[0], $X[2]
+ adc \$0, %rdx
+ mov $X[2], (+$pDst_o+8*10)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ add $X[0], $X[5]
+ adc \$0, %rdx
+
+ mov %rdx, $X[1]
+
+ # ------------------
+ # sixth pass 56...57
+ # ------------------
+ mov (+8*5)($pA), $A
+
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ mov $X[5], (+$pDst_o+8*11)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ add $X[0], $X[1]
+ adc \$0, %rdx
+ mov $X[1], (+$pDst_o+8*12)($pDst)
+
+ mov %rdx, $X[2]
+
+ # ------------------
+ # seventh pass 67
+ # ------------------
+ mov $X[6], $A
+
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ mov $X[2], (+$pDst_o+8*13)($pDst)
+
+ mov %rdx, (+$pDst_o+8*14)($pDst)
+
+ # start finalize (add in squares, and double off-terms)
+ mov (+$pDst_o+8*1)($pDst), $X[0]
+ mov (+$pDst_o+8*2)($pDst), $X[1]
+ mov (+$pDst_o+8*3)($pDst), $X[2]
+ mov (+$pDst_o+8*4)($pDst), $X[3]
+ mov (+$pDst_o+8*5)($pDst), $X[4]
+ mov (+$pDst_o+8*6)($pDst), $X[5]
+
+ mov (+8*3)($pA), %rax
+ mul %rax
+ mov %rax, $x6
+ mov %rdx, $X[6]
+
+ add $X[0], $X[0]
+ adc $X[1], $X[1]
+ adc $X[2], $X[2]
+ adc $X[3], $X[3]
+ adc $X[4], $X[4]
+ adc $X[5], $X[5]
+ adc \$0, $X[6]
+
+ mov (+8*0)($pA), %rax
+ mul %rax
+ mov %rax, (+$pDst_o+8*0)($pDst)
+ mov %rdx, $A
+
+ mov (+8*1)($pA), %rax
+ mul %rax
+
+ add $A, $X[0]
+ adc %rax, $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $A
+ mov $X[0], (+$pDst_o+8*1)($pDst)
+ mov $X[1], (+$pDst_o+8*2)($pDst)
+
+ mov (+8*2)($pA), %rax
+ mul %rax
+
+ add $A, $X[2]
+ adc %rax, $X[3]
+ adc \$0, %rdx
+
+ mov %rdx, $A
+
+ mov $X[2], (+$pDst_o+8*3)($pDst)
+ mov $X[3], (+$pDst_o+8*4)($pDst)
+
+ xor $tmp, $tmp
+ add $A, $X[4]
+ adc $x6, $X[5]
+ adc \$0, $tmp
+
+ mov $X[4], (+$pDst_o+8*5)($pDst)
+ mov $X[5], (+$pDst_o+8*6)($pDst)
+
+ # %%tmp has 0/1 in column 7
+ # %%A6 has a full value in column 7
+
+ mov (+$pDst_o+8*7)($pDst), $X[0]
+ mov (+$pDst_o+8*8)($pDst), $X[1]
+ mov (+$pDst_o+8*9)($pDst), $X[2]
+ mov (+$pDst_o+8*10)($pDst), $X[3]
+ mov (+$pDst_o+8*11)($pDst), $X[4]
+ mov (+$pDst_o+8*12)($pDst), $X[5]
+ mov (+$pDst_o+8*13)($pDst), $x6
+ mov (+$pDst_o+8*14)($pDst), $x7
+
+ mov $X[7], %rax
+ mul %rax
+ mov %rax, $X[7]
+ mov %rdx, $A
+
+ add $X[0], $X[0]
+ adc $X[1], $X[1]
+ adc $X[2], $X[2]
+ adc $X[3], $X[3]
+ adc $X[4], $X[4]
+ adc $X[5], $X[5]
+ adc $x6, $x6
+ adc $x7, $x7
+ adc \$0, $A
+
+ add $tmp, $X[0]
+
+ mov (+8*4)($pA), %rax
+ mul %rax
+
+ add $X[6], $X[0]
+ adc %rax, $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $tmp
+
+ mov $X[0], (+$pDst_o+8*7)($pDst)
+ mov $X[1], (+$pDst_o+8*8)($pDst)
+
+ mov (+8*5)($pA), %rax
+ mul %rax
+
+ add $tmp, $X[2]
+ adc %rax, $X[3]
+ adc \$0, %rdx
+
+ mov %rdx, $tmp
+
+ mov $X[2], (+$pDst_o+8*9)($pDst)
+ mov $X[3], (+$pDst_o+8*10)($pDst)
+
+ mov (+8*6)($pA), %rax
+ mul %rax
+
+ add $tmp, $X[4]
+ adc %rax, $X[5]
+ adc \$0, %rdx
+
+ mov $X[4], (+$pDst_o+8*11)($pDst)
+ mov $X[5], (+$pDst_o+8*12)($pDst)
+
+ add %rdx, $x6
+ adc $X[7], $x7
+ adc \$0, $A
+
+ mov $x6, (+$pDst_o+8*13)($pDst)
+ mov $x7, (+$pDst_o+8*14)($pDst)
+ mov $A, (+$pDst_o+8*15)($pDst)
+___
+}
+
+#
+# sqr_reduce: subroutine to compute Result = reduce(Result * Result)
+#
+# input and result also in: r9, r8, r15, r14, r13, r12, r11, r10
+#
+$code.=<<___;
+.type sqr_reduce,\@abi-omnipotent
+.align 16
+sqr_reduce:
+ mov (+$pResult_offset+8)(%rsp), %rcx
+___
+ &SQR_512("%rsp+$tmp16_offset+8", "%rcx", [map("%r$_",(10..15,8..9))], "%rbx", "%rbp", "%rsi", "%rdi");
+$code.=<<___;
+ # tail recursion optimization: jmp to mont_reduce and return from there
+ jmp mont_reduce
+ # call mont_reduce
+ # ret
+.size sqr_reduce,.-sqr_reduce
+___
+}}}
+
+#
+# MAIN FUNCTION
+#
+
+#mod_exp_512(UINT64 *result, /* 512 bits, 8 qwords */
+# UINT64 *g, /* 512 bits, 8 qwords */
+# UINT64 *exp, /* 512 bits, 8 qwords */
+# struct mod_ctx_512 *data)
+
+# window size = 5
+# table size = 2^5 = 32
+#table_entries equ 32
+#table_size equ table_entries * 8
+$code.=<<___;
+.globl mod_exp_512
+.type mod_exp_512,\@function,4
+mod_exp_512:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ # adjust stack down and then align it with cache boundary
+ mov %rsp, %r8
+ sub \$$mem_size, %rsp
+ and \$-64, %rsp
+
+ # store previous stack pointer and arguments
+ mov %r8, (+$rsp_offset)(%rsp)
+ mov %rdi, (+$pResult_offset)(%rsp)
+ mov %rsi, (+$pG_offset)(%rsp)
+ mov %rcx, (+$pData_offset)(%rsp)
+.Lbody:
+ # transform g into montgomery space
+ # GT = reduce(g * C2) = reduce(g * (2^256))
+ # reduce expects to have the input in [tmp16]
+ pxor %xmm4, %xmm4
+ movdqu (+16*0)(%rsi), %xmm0
+ movdqu (+16*1)(%rsi), %xmm1
+ movdqu (+16*2)(%rsi), %xmm2
+ movdqu (+16*3)(%rsi), %xmm3
+ movdqa %xmm4, (+$tmp16_offset+16*0)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*1)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp)
+ movdqa %xmm0, (+$tmp16_offset+16*2)(%rsp)
+ movdqa %xmm1, (+$tmp16_offset+16*3)(%rsp)
+ movdqa %xmm2, (+$tmp16_offset+16*4)(%rsp)
+ movdqa %xmm3, (+$tmp16_offset+16*5)(%rsp)
+
+ # load pExp before rdx gets blown away
+ movdqu (+16*0)(%rdx), %xmm0
+ movdqu (+16*1)(%rdx), %xmm1
+ movdqu (+16*2)(%rdx), %xmm2
+ movdqu (+16*3)(%rdx), %xmm3
+
+ lea (+$GT_offset)(%rsp), %rbx
+ mov %rbx, (+$red_result_addr_offset)(%rsp)
+ call mont_reduce
+
+ # Initialize tmp = C
+ lea (+$tmp_offset)(%rsp), %rcx
+ xor %rax, %rax
+ mov %rax, (+8*0)(%rcx)
+ mov %rax, (+8*1)(%rcx)
+ mov %rax, (+8*3)(%rcx)
+ mov %rax, (+8*4)(%rcx)
+ mov %rax, (+8*5)(%rcx)
+ mov %rax, (+8*6)(%rcx)
+ mov %rax, (+8*7)(%rcx)
+ mov %rax, (+$exp_offset+8*8)(%rsp)
+ movq \$1, (+8*2)(%rcx)
+
+ lea (+$garray_offset)(%rsp), %rbp
+ mov %rcx, %rsi # pTmp
+ mov %rbp, %rdi # Garray[][0]
+___
+
+ &swizzle("%rdi", "%rcx", "%rax", "%rbx");
+
+ # for (rax = 31; rax != 0; rax--) {
+ # tmp = reduce(tmp * G)
+ # swizzle(pg, tmp);
+ # pg += 2; }
+$code.=<<___;
+ mov \$31, %rax
+ mov %rax, (+$i_offset)(%rsp)
+ mov %rbp, (+$pg_offset)(%rsp)
+ # rsi -> pTmp
+ mov %rsi, (+$red_result_addr_offset)(%rsp)
+ mov (+8*0)(%rsi), %r10
+ mov (+8*1)(%rsi), %r11
+ mov (+8*2)(%rsi), %r12
+ mov (+8*3)(%rsi), %r13
+ mov (+8*4)(%rsi), %r14
+ mov (+8*5)(%rsi), %r15
+ mov (+8*6)(%rsi), %r8
+ mov (+8*7)(%rsi), %r9
+init_loop:
+ lea (+$GT_offset)(%rsp), %rdi
+ call mont_mul_a3b
+ lea (+$tmp_offset)(%rsp), %rsi
+ mov (+$pg_offset)(%rsp), %rbp
+ add \$2, %rbp
+ mov %rbp, (+$pg_offset)(%rsp)
+ mov %rsi, %rcx # rcx = rsi = addr of tmp
+___
+
+ &swizzle("%rbp", "%rcx", "%rax", "%rbx");
+$code.=<<___;
+ mov (+$i_offset)(%rsp), %rax
+ sub \$1, %rax
+ mov %rax, (+$i_offset)(%rsp)
+ jne init_loop
+
+ #
+ # Copy exponent onto stack
+ movdqa %xmm0, (+$exp_offset+16*0)(%rsp)
+ movdqa %xmm1, (+$exp_offset+16*1)(%rsp)
+ movdqa %xmm2, (+$exp_offset+16*2)(%rsp)
+ movdqa %xmm3, (+$exp_offset+16*3)(%rsp)
+
+
+ #
+ # Do exponentiation
+ # Initialize result to G[exp{511:507}]
+ mov (+$exp_offset+62)(%rsp), %eax
+ mov %rax, %rdx
+ shr \$11, %rax
+ and \$0x07FF, %edx
+ mov %edx, (+$exp_offset+62)(%rsp)
+ lea (+$garray_offset)(%rsp,%rax,2), %rsi
+ mov (+$pResult_offset)(%rsp), %rdx
+___
+
+ &unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
+
+ #
+ # Loop variables
+ # rcx = [loop_idx] = index: 510-5 to 0 by 5
+$code.=<<___;
+ movq \$505, (+$loop_idx_offset)(%rsp)
+
+ mov (+$pResult_offset)(%rsp), %rcx
+ mov %rcx, (+$red_result_addr_offset)(%rsp)
+ mov (+8*0)(%rcx), %r10
+ mov (+8*1)(%rcx), %r11
+ mov (+8*2)(%rcx), %r12
+ mov (+8*3)(%rcx), %r13
+ mov (+8*4)(%rcx), %r14
+ mov (+8*5)(%rcx), %r15
+ mov (+8*6)(%rcx), %r8
+ mov (+8*7)(%rcx), %r9
+ jmp sqr_2
+
+main_loop_a3b:
+ call sqr_reduce
+ call sqr_reduce
+ call sqr_reduce
+sqr_2:
+ call sqr_reduce
+ call sqr_reduce
+
+ #
+ # Do multiply, first look up proper value in Garray
+ mov (+$loop_idx_offset)(%rsp), %rcx # bit index
+ mov %rcx, %rax
+ shr \$4, %rax # rax is word pointer
+ mov (+$exp_offset)(%rsp,%rax,2), %edx
+ and \$15, %rcx
+ shrq %cl, %rdx
+ and \$0x1F, %rdx
+
+ lea (+$garray_offset)(%rsp,%rdx,2), %rsi
+ lea (+$tmp_offset)(%rsp), %rdx
+ mov %rdx, %rdi
+___
+
+ &unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
+ # rdi = tmp = pG
+
+ #
+ # Call mod_mul_a1(pDst, pSrc1, pSrc2, pM, pData)
+ # result result pG M Data
+$code.=<<___;
+ mov (+$pResult_offset)(%rsp), %rsi
+ call mont_mul_a3b
+
+ #
+ # finish loop
+ mov (+$loop_idx_offset)(%rsp), %rcx
+ sub \$5, %rcx
+ mov %rcx, (+$loop_idx_offset)(%rsp)
+ jge main_loop_a3b
+
+ #
+
+end_main_loop_a3b:
+ # transform result out of Montgomery space
+ # result = reduce(result)
+ mov (+$pResult_offset)(%rsp), %rdx
+ pxor %xmm4, %xmm4
+ movdqu (+16*0)(%rdx), %xmm0
+ movdqu (+16*1)(%rdx), %xmm1
+ movdqu (+16*2)(%rdx), %xmm2
+ movdqu (+16*3)(%rdx), %xmm3
+ movdqa %xmm4, (+$tmp16_offset+16*4)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*5)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp)
+ movdqa %xmm0, (+$tmp16_offset+16*0)(%rsp)
+ movdqa %xmm1, (+$tmp16_offset+16*1)(%rsp)
+ movdqa %xmm2, (+$tmp16_offset+16*2)(%rsp)
+ movdqa %xmm3, (+$tmp16_offset+16*3)(%rsp)
+ call mont_reduce
+
+ # If result > m, subract m
+ # load result into r15:r8
+ mov (+$pResult_offset)(%rsp), %rax
+ mov (+8*0)(%rax), %r8
+ mov (+8*1)(%rax), %r9
+ mov (+8*2)(%rax), %r10
+ mov (+8*3)(%rax), %r11
+ mov (+8*4)(%rax), %r12
+ mov (+8*5)(%rax), %r13
+ mov (+8*6)(%rax), %r14
+ mov (+8*7)(%rax), %r15
+
+ # subtract m
+ mov (+$pData_offset)(%rsp), %rbx
+ add \$$M, %rbx
+
+ sub (+8*0)(%rbx), %r8
+ sbb (+8*1)(%rbx), %r9
+ sbb (+8*2)(%rbx), %r10
+ sbb (+8*3)(%rbx), %r11
+ sbb (+8*4)(%rbx), %r12
+ sbb (+8*5)(%rbx), %r13
+ sbb (+8*6)(%rbx), %r14
+ sbb (+8*7)(%rbx), %r15
+
+ # if Carry is clear, replace result with difference
+ mov (+8*0)(%rax), %rsi
+ mov (+8*1)(%rax), %rdi
+ mov (+8*2)(%rax), %rcx
+ mov (+8*3)(%rax), %rdx
+ cmovnc %r8, %rsi
+ cmovnc %r9, %rdi
+ cmovnc %r10, %rcx
+ cmovnc %r11, %rdx
+ mov %rsi, (+8*0)(%rax)
+ mov %rdi, (+8*1)(%rax)
+ mov %rcx, (+8*2)(%rax)
+ mov %rdx, (+8*3)(%rax)
+
+ mov (+8*4)(%rax), %rsi
+ mov (+8*5)(%rax), %rdi
+ mov (+8*6)(%rax), %rcx
+ mov (+8*7)(%rax), %rdx
+ cmovnc %r12, %rsi
+ cmovnc %r13, %rdi
+ cmovnc %r14, %rcx
+ cmovnc %r15, %rdx
+ mov %rsi, (+8*4)(%rax)
+ mov %rdi, (+8*5)(%rax)
+ mov %rcx, (+8*6)(%rax)
+ mov %rdx, (+8*7)(%rax)
+
+ mov (+$rsp_offset)(%rsp), %rsi
+ mov 0(%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbx
+ mov 40(%rsi),%rbp
+ lea 48(%rsi),%rsp
+.Lepilogue:
+ ret
+.size mod_exp_512, . - mod_exp_512
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+my $rec="%rcx";
+my $frame="%rdx";
+my $context="%r8";
+my $disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mod_exp_512_se_handler,\@abi-omnipotent
+.align 16
+mod_exp_512_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lbody(%rip),%r10
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lepilogue(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+
+ mov $rsp_offset(%rax),%rax # pull saved Rsp
+
+ mov 32(%rax),%rbx
+ mov 40(%rax),%rbp
+ mov 24(%rax),%r12
+ mov 16(%rax),%r13
+ mov 8(%rax),%r14
+ mov 0(%rax),%r15
+ lea 48(%rax),%rax
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size mod_exp_512_se_handler,.-mod_exp_512_se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_mod_exp_512
+ .rva .LSEH_end_mod_exp_512
+ .rva .LSEH_info_mod_exp_512
+
+.section .xdata
+.align 8
+.LSEH_info_mod_exp_512:
+ .byte 9,0,0,0
+ .rva mod_exp_512_se_handler
+___
+}
+
+sub reg_part {
+my ($reg,$conv)=@_;
+ if ($reg =~ /%r[0-9]+/) { $reg .= $conv; }
+ elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; }
+ elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; }
+ elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; }
+ return $reg;
+}
+
+$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/(\(\+[^)]+\))/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/openssl/crypto/bn/asm/parisc-mont.pl b/openssl/crypto/bn/asm/parisc-mont.pl
new file mode 100644
index 000000000..4a766a87f
--- /dev/null
+++ b/openssl/crypto/bn/asm/parisc-mont.pl
@@ -0,0 +1,993 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# On PA-7100LC this module performs ~90-50% better, less for longer
+# keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means
+# that compiler utilized xmpyu instruction to perform 32x32=64-bit
+# multiplication, which in turn means that "baseline" performance was
+# optimal in respect to instruction set capabilities. Fair comparison
+# with vendor compiler is problematic, because OpenSSL doesn't define
+# BN_LLONG [presumably] for historical reasons, which drives compiler
+# toward 4 times 16x16=32-bit multiplicatons [plus complementary
+# shifts and additions] instead. This means that you should observe
+# several times improvement over code generated by vendor compiler
+# for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
+# improvement coefficient was never collected on PA-7100LC, or any
+# other 1.1 CPU, because I don't have access to such machine with
+# vendor compiler. But to give you a taste, PA-RISC 1.1 code path
+# reportedly outperformed code generated by cc +DA1.1 +O3 by factor
+# of ~5x on PA-8600.
+#
+# On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
+# reportedly ~2x faster than vendor compiler generated code [according
+# to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
+# this implementation is actually 32-bit one, in the sense that it
+# operates on 32-bit values. But pa-risc2[W].s operates on arrays of
+# 64-bit BN_LONGs... How do they interoperate then? No problem. This
+# module picks halves of 64-bit values in reverse order and pretends
+# they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
+# 64-bit code such as pa-risc2[W].s then? Well, the thing is that
+# 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
+# i.e. there is no "wider" multiplication like on most other 64-bit
+# platforms. This means that even being effectively 32-bit, this
+# implementation performs "64-bit" computational task in same amount
+# of arithmetic operations, most notably multiplications. It requires
+# more memory references, most notably to tp[num], but this doesn't
+# seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
+# 2.0 code path, provides virtually same performance as pa-risc2[W].s:
+# it's ~10% better for shortest key length and ~10% worse for longest
+# one.
+#
+# In case it wasn't clear. The module has two distinct code paths:
+# PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
+# additions and 64-bit integer loads, not to mention specific
+# instruction scheduling. In 64-bit build naturally only 2.0 code path
+# is assembled. In 32-bit application context both code paths are
+# assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
+# is taken automatically. Also, in 32-bit build the module imposes
+# couple of limitations: vector lengths has to be even and vector
+# addresses has to be 64-bit aligned. Normally neither is a problem:
+# most common key lengths are even and vectors are commonly malloc-ed,
+# which ensures alignment.
+#
+# Special thanks to polarhome.com for providing HP-UX account on
+# PA-RISC 1.1 machine, and to correspondent who chose to remain
+# anonymous for testing the code on PA-RISC 2.0 machine.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+
+$flavour = shift;
+$output = shift;
+
+open STDOUT,">$output";
+
+if ($flavour =~ /64/) {
+ $LEVEL ="2.0W";
+ $SIZE_T =8;
+ $FRAME_MARKER =80;
+ $SAVED_RP =16;
+ $PUSH ="std";
+ $PUSHMA ="std,ma";
+ $POP ="ldd";
+ $POPMB ="ldd,mb";
+ $BN_SZ =$SIZE_T;
+} else {
+ $LEVEL ="1.1"; #$LEVEL.="\n\t.ALLOW\t2.0";
+ $SIZE_T =4;
+ $FRAME_MARKER =48;
+ $SAVED_RP =20;
+ $PUSH ="stw";
+ $PUSHMA ="stwm";
+ $POP ="ldw";
+ $POPMB ="ldwm";
+ $BN_SZ =$SIZE_T;
+ if (open CONF,"<${dir}../../opensslconf.h") {
+ while(<CONF>) {
+ if (m/#\s*define\s+SIXTY_FOUR_BIT/) {
+ $BN_SZ=8;
+ $LEVEL="2.0";
+ last;
+ }
+ }
+ close CONF;
+ }
+}
+
+$FRAME=8*$SIZE_T+$FRAME_MARKER; # 8 saved regs + frame marker
+ # [+ argument transfer]
+$LOCALS=$FRAME-$FRAME_MARKER;
+$FRAME+=32; # local variables
+
+$tp="%r31";
+$ti1="%r29";
+$ti0="%r28";
+
+$rp="%r26";
+$ap="%r25";
+$bp="%r24";
+$np="%r23";
+$n0="%r22"; # passed through stack in 32-bit
+$num="%r21"; # passed through stack in 32-bit
+$idx="%r20";
+$arrsz="%r19";
+
+$nm1="%r7";
+$nm0="%r6";
+$ab1="%r5";
+$ab0="%r4";
+
+$fp="%r3";
+$hi1="%r2";
+$hi0="%r1";
+
+$xfer=$n0; # accomodates [-16..15] offset in fld[dw]s
+
+$fm0="%fr4"; $fti=$fm0;
+$fbi="%fr5L";
+$fn0="%fr5R";
+$fai="%fr6"; $fab0="%fr7"; $fab1="%fr8";
+$fni="%fr9"; $fnm0="%fr10"; $fnm1="%fr11";
+
+$code=<<___;
+ .LEVEL $LEVEL
+ .SPACE \$TEXT\$
+ .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
+
+ .EXPORT bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
+ .ALIGN 64
+bn_mul_mont
+ .PROC
+ .CALLINFO FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
+ .ENTRY
+ $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
+ $PUSHMA %r3,$FRAME(%sp)
+ $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
+ $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
+ $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
+ $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
+ $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
+ $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
+ $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
+ ldo -$FRAME(%sp),$fp
+___
+$code.=<<___ if ($SIZE_T==4);
+ ldw `-$FRAME_MARKER-4`($fp),$n0
+ ldw `-$FRAME_MARKER-8`($fp),$num
+ nop
+ nop ; alignment
+___
+$code.=<<___ if ($BN_SZ==4);
+ comiclr,<= 6,$num,%r0 ; are vectors long enough?
+ b L\$abort
+ ldi 0,%r28 ; signal "unhandled"
+ add,ev %r0,$num,$num ; is $num even?
+ b L\$abort
+ nop
+ or $ap,$np,$ti1
+ extru,= $ti1,31,3,%r0 ; are ap and np 64-bit aligned?
+ b L\$abort
+ nop
+ nop ; alignment
+ nop
+
+ fldws 0($n0),${fn0}
+ fldws,ma 4($bp),${fbi} ; bp[0]
+___
+$code.=<<___ if ($BN_SZ==8);
+ comib,> 3,$num,L\$abort ; are vectors long enough?
+ ldi 0,%r28 ; signal "unhandled"
+ addl $num,$num,$num ; I operate on 32-bit values
+
+ fldws 4($n0),${fn0} ; only low part of n0
+ fldws 4($bp),${fbi} ; bp[0] in flipped word order
+___
+$code.=<<___;
+ fldds 0($ap),${fai} ; ap[0,1]
+ fldds 0($np),${fni} ; np[0,1]
+
+ sh2addl $num,%r0,$arrsz
+ ldi 31,$hi0
+ ldo 36($arrsz),$hi1 ; space for tp[num+1]
+ andcm $hi1,$hi0,$hi1 ; align
+ addl $hi1,%sp,%sp
+ $PUSH $fp,-$SIZE_T(%sp)
+
+ ldo `$LOCALS+16`($fp),$xfer
+ ldo `$LOCALS+32+4`($fp),$tp
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[0]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[0]
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ addl $arrsz,$ap,$ap ; point at the end
+ addl $arrsz,$np,$np
+ subi 0,$arrsz,$idx ; j=0
+ ldo 8($idx),$idx ; j++++
+
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+ fstds ${fab1},0($xfer)
+ fstds ${fnm1},8($xfer)
+ flddx $idx($ap),${fai} ; ap[2,3]
+ flddx $idx($np),${fni} ; np[2,3]
+___
+$code.=<<___ if ($BN_SZ==4);
+ mtctl $hi0,%cr11 ; $hi0 still holds 31
+ extrd,u,*= $hi0,%sar,1,$hi0 ; executes on PA-RISC 1.0
+ b L\$parisc11
+ nop
+___
+$code.=<<___; # PA-RISC 2.0 code-path
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+
+ extrd,u $ab0,31,32,$hi0
+ extrd,u $ab0,63,32,$ab0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ ldo 8($idx),$idx ; j++++
+ addl $ab0,$nm0,$nm0 ; low part is discarded
+ extrd,u $nm0,31,32,$hi1
+
+L\$1st
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,63,32,$ab1
+ addl $hi1,$nm1,$nm1
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ flddx $idx($np),${fni} ; np[j,j+1]
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+ addl $hi0,$ab0,$ab0
+ extrd,u $ab0,31,32,$hi0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ stw $nm1,-4($tp) ; tp[j-1]
+ addl $ab0,$nm0,$nm0
+ stw,ma $nm0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$1st ; j++++
+ extrd,u $nm0,31,32,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,63,32,$ab1
+ addl $hi1,$nm1,$nm1
+ ldd -16($xfer),$ab0
+ addl $ab1,$nm1,$nm1
+ ldd -8($xfer),$nm0
+ extrd,u $nm1,31,32,$hi1
+
+ addl $hi0,$ab0,$ab0
+ extrd,u $ab0,31,32,$hi0
+ stw $nm1,-4($tp) ; tp[j-1]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ ldd 0($xfer),$ab1
+ addl $ab0,$nm0,$nm0
+ ldd,mb 8($xfer),$nm1
+ extrd,u $nm0,31,32,$hi1
+ stw,ma $nm0,8($tp) ; tp[j-1]
+
+ ldo -1($num),$num ; i--
+ subi 0,$arrsz,$idx ; j=0
+___
+$code.=<<___ if ($BN_SZ==4);
+ fldws,ma 4($bp),${fbi} ; bp[1]
+___
+$code.=<<___ if ($BN_SZ==8);
+ fldws 0($bp),${fbi} ; bp[1] in flipped word order
+___
+$code.=<<___;
+ flddx $idx($ap),${fai} ; ap[0,1]
+ flddx $idx($np),${fni} ; np[0,1]
+ fldws 8($xfer),${fti}R ; tp[0]
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ addl $hi1,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ xmpyu ${fn0},${fab0}R,${fm0}
+ ldo `$LOCALS+32+4`($fp),$tp
+L\$outer
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer) ; 33-bit value
+ fstds ${fnm0},-8($xfer)
+ flddx $idx($ap),${fai} ; ap[2]
+ flddx $idx($np),${fni} ; np[2]
+ ldo 8($idx),$idx ; j++++
+ ldd -16($xfer),$ab0 ; 33-bit value
+ ldd -8($xfer),$nm0
+ ldw 0($xfer),$hi0 ; high part
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ extrd,u $ab0,31,32,$ti0 ; carry bit
+ extrd,u $ab0,63,32,$ab0
+ fstds ${fab1},0($xfer)
+ addl $ti0,$hi0,$hi0 ; account carry bit
+ fstds ${fnm1},8($xfer)
+ addl $ab0,$nm0,$nm0 ; low part is discarded
+ ldw 0($tp),$ti1 ; tp[1]
+ extrd,u $nm0,31,32,$hi1
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+
+L\$inner
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ti1,$ti1
+ addl $ti1,$ab1,$ab1
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ flddx $idx($np),${fni} ; np[j,j+1]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ ldw 4($tp),$ti0 ; tp[j]
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+ addl $hi0,$ti0,$ti0
+ addl $ti0,$ab0,$ab0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ extrd,u $ab0,31,32,$hi0
+ extrd,u $nm1,31,32,$hi1
+ ldw 8($tp),$ti1 ; tp[j]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ addl $ab0,$nm0,$nm0
+ stw,ma $nm0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$inner ; j++++
+ extrd,u $nm0,31,32,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ti1,$ti1
+ addl $ti1,$ab1,$ab1
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ ldw 4($tp),$ti0 ; tp[j]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ ldd -16($xfer),$ab0
+ ldd -8($xfer),$nm0
+ extrd,u $nm1,31,32,$hi1
+
+ addl $hi0,$ab0,$ab0
+ addl $ti0,$ab0,$ab0
+ stw $nm1,-4($tp) ; tp[j-1]
+ extrd,u $ab0,31,32,$hi0
+ ldw 8($tp),$ti1 ; tp[j]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ ldd 0($xfer),$ab1
+ addl $ab0,$nm0,$nm0
+ ldd,mb 8($xfer),$nm1
+ extrd,u $nm0,31,32,$hi1
+ stw,ma $nm0,8($tp) ; tp[j-1]
+
+ addib,= -1,$num,L\$outerdone ; i--
+ subi 0,$arrsz,$idx ; j=0
+___
+$code.=<<___ if ($BN_SZ==4);
+ fldws,ma 4($bp),${fbi} ; bp[i]
+___
+$code.=<<___ if ($BN_SZ==8);
+ ldi 12,$ti0 ; bp[i] in flipped word order
+ addl,ev %r0,$num,$num
+ ldi -4,$ti0
+ addl $ti0,$bp,$bp
+ fldws 0($bp),${fbi}
+___
+$code.=<<___;
+ flddx $idx($ap),${fai} ; ap[0]
+ addl $hi0,$ab1,$ab1
+ flddx $idx($np),${fni} ; np[0]
+ fldws 8($xfer),${fti}R ; tp[0]
+ addl $ti1,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
+ ldw 4($tp),$ti0 ; tp[j]
+
+ addl $hi1,$nm1,$nm1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ addl $hi1,$hi0,$hi0
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ addl $ti0,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ b L\$outer
+ ldo `$LOCALS+32+4`($fp),$tp
+
+L\$outerdone
+ addl $hi0,$ab1,$ab1
+ addl $ti1,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+
+ ldw 4($tp),$ti0 ; tp[j]
+
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ addl $hi1,$hi0,$hi0
+ addl $ti0,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ ldo `$LOCALS+32`($fp),$tp
+ sub %r0,%r0,%r0 ; clear borrow
+___
+$code.=<<___ if ($BN_SZ==4);
+ ldws,ma 4($tp),$ti0
+ extru,= $rp,31,3,%r0 ; is rp 64-bit aligned?
+ b L\$sub_pa11
+ addl $tp,$arrsz,$tp
+L\$sub
+ ldwx $idx($np),$hi0
+ subb $ti0,$hi0,$hi1
+ ldwx $idx($tp),$ti0
+ addib,<> 4,$idx,L\$sub
+ stws,ma $hi1,4($rp)
+
+ subb $ti0,%r0,$hi1
+ ldo -4($tp),$tp
+___
+$code.=<<___ if ($BN_SZ==8);
+ ldd,ma 8($tp),$ti0
+L\$sub
+ ldd $idx($np),$hi0
+ shrpd $ti0,$ti0,32,$ti0 ; flip word order
+ std $ti0,-8($tp) ; save flipped value
+ sub,db $ti0,$hi0,$hi1
+ ldd,ma 8($tp),$ti0
+ addib,<> 8,$idx,L\$sub
+ std,ma $hi1,8($rp)
+
+ extrd,u $ti0,31,32,$ti0 ; carry in flipped word order
+ sub,db $ti0,%r0,$hi1
+ ldo -8($tp),$tp
+___
+$code.=<<___;
+ and $tp,$hi1,$ap
+ andcm $rp,$hi1,$bp
+ or $ap,$bp,$np
+
+ sub $rp,$arrsz,$rp ; rewind rp
+ subi 0,$arrsz,$idx
+ ldo `$LOCALS+32`($fp),$tp
+L\$copy
+ ldd $idx($np),$hi0
+ std,ma %r0,8($tp)
+ addib,<> 8,$idx,.-8 ; L\$copy
+ std,ma $hi0,8($rp)
+___
+
+if ($BN_SZ==4) { # PA-RISC 1.1 code-path
+$ablo=$ab0;
+$abhi=$ab1;
+$nmlo0=$nm0;
+$nmhi0=$nm1;
+$nmlo1="%r9";
+$nmhi1="%r8";
+
+$code.=<<___;
+ b L\$done
+ nop
+
+ .ALIGN 8
+L\$parisc11
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -12($xfer),$ablo
+ ldw -16($xfer),$hi0
+ ldw -4($xfer),$nmlo0
+ ldw -8($xfer),$nmhi0
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+
+ ldo 8($idx),$idx ; j++++
+ add $ablo,$nmlo0,$nmlo0 ; discarded
+ addc %r0,$nmhi0,$hi1
+ ldw 4($xfer),$ablo
+ ldw 0($xfer),$abhi
+ nop
+
+L\$1st_pa11
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ flddx $idx($np),${fni} ; np[j,j+1]
+ add $hi0,$ablo,$ablo
+ ldw 12($xfer),$nmlo1
+ addc %r0,$abhi,$hi0
+ ldw 8($xfer),$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ fstds ${fab1},0($xfer)
+ addc %r0,$nmhi1,$nmhi1
+ fstds ${fnm1},8($xfer)
+ add $hi1,$nmlo1,$nmlo1
+ ldw -12($xfer),$ablo
+ addc %r0,$nmhi1,$hi1
+ ldw -16($xfer),$abhi
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ ldw -4($xfer),$nmlo0
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -8($xfer),$nmhi0
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$hi0
+ fstds ${fab0},-16($xfer)
+ add $ablo,$nmlo0,$nmlo0
+ fstds ${fnm0},-8($xfer)
+ addc %r0,$nmhi0,$nmhi0
+ ldw 0($xfer),$abhi
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$1st_pa11 ; j++++
+ addc %r0,$nmhi0,$hi1
+
+ ldw 8($xfer),$nmhi1
+ ldw 12($xfer),$nmlo1
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ add $hi0,$ablo,$ablo
+ fstds ${fab1},0($xfer)
+ addc %r0,$abhi,$hi0
+ fstds ${fnm1},8($xfer)
+ add $ablo,$nmlo1,$nmlo1
+ ldw -16($xfer),$abhi
+ addc %r0,$nmhi1,$nmhi1
+ ldw -12($xfer),$ablo
+ add $hi1,$nmlo1,$nmlo1
+ ldw -8($xfer),$nmhi0
+ addc %r0,$nmhi1,$hi1
+ ldw -4($xfer),$nmlo0
+
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$hi0
+ ldw 0($xfer),$abhi
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldws,mb 8($xfer),$nmhi1
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$nmlo1
+ addc %r0,$nmhi0,$hi1
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+
+ ldo -1($num),$num ; i--
+ subi 0,$arrsz,$idx ; j=0
+
+ fldws,ma 4($bp),${fbi} ; bp[1]
+ flddx $idx($ap),${fai} ; ap[0,1]
+ flddx $idx($np),${fni} ; np[0,1]
+ fldws 8($xfer),${fti}R ; tp[0]
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ xmpyu ${fn0},${fab0}R,${fm0}
+ ldo `$LOCALS+32+4`($fp),$tp
+L\$outer_pa11
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer) ; 33-bit value
+ fstds ${fnm0},-8($xfer)
+ flddx $idx($ap),${fai} ; ap[2,3]
+ flddx $idx($np),${fni} ; np[2,3]
+ ldw -16($xfer),$abhi ; carry bit actually
+ ldo 8($idx),$idx ; j++++
+ ldw -12($xfer),$ablo
+ ldw -8($xfer),$nmhi0
+ ldw -4($xfer),$nmlo0
+ ldw 0($xfer),$hi0 ; high part
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ fstds ${fab1},0($xfer)
+ addl $abhi,$hi0,$hi0 ; account carry bit
+ fstds ${fnm1},8($xfer)
+ add $ablo,$nmlo0,$nmlo0 ; discarded
+ ldw 0($tp),$ti1 ; tp[1]
+ addc %r0,$nmhi0,$hi1
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+ ldw 4($xfer),$ablo
+ ldw 0($xfer),$abhi
+
+L\$inner_pa11
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ flddx $idx($np),${fni} ; np[j,j+1]
+ add $hi0,$ablo,$ablo
+ ldw 4($tp),$ti0 ; tp[j]
+ addc %r0,$abhi,$abhi
+ ldw 12($xfer),$nmlo1
+ add $ti1,$ablo,$ablo
+ ldw 8($xfer),$nmhi1
+ addc %r0,$abhi,$hi0
+ fstds ${fab1},0($xfer)
+ add $ablo,$nmlo1,$nmlo1
+ fstds ${fnm1},8($xfer)
+ addc %r0,$nmhi1,$nmhi1
+ ldw -12($xfer),$ablo
+ add $hi1,$nmlo1,$nmlo1
+ ldw -16($xfer),$abhi
+ addc %r0,$nmhi1,$hi1
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ ldw 8($tp),$ti1 ; tp[j]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -4($xfer),$nmlo0
+ add $hi0,$ablo,$ablo
+ ldw -8($xfer),$nmhi0
+ addc %r0,$abhi,$abhi
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ add $ti0,$ablo,$ablo
+ fstds ${fab0},-16($xfer)
+ addc %r0,$abhi,$hi0
+ fstds ${fnm0},-8($xfer)
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldw 0($xfer),$abhi
+ add $hi1,$nmlo0,$nmlo0
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$inner_pa11 ; j++++
+ addc %r0,$nmhi0,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
+ ldw 12($xfer),$nmlo1
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldw 8($xfer),$nmhi1
+ add $hi0,$ablo,$ablo
+ ldw 4($tp),$ti0 ; tp[j]
+ addc %r0,$abhi,$abhi
+ fstds ${fab1},0($xfer)
+ add $ti1,$ablo,$ablo
+ fstds ${fnm1},8($xfer)
+ addc %r0,$abhi,$hi0
+ ldw -16($xfer),$abhi
+ add $ablo,$nmlo1,$nmlo1
+ ldw -12($xfer),$ablo
+ addc %r0,$nmhi1,$nmhi1
+ ldw -8($xfer),$nmhi0
+ add $hi1,$nmlo1,$nmlo1
+ ldw -4($xfer),$nmlo0
+ addc %r0,$nmhi1,$hi1
+
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$abhi
+ add $ti0,$ablo,$ablo
+ ldw 8($tp),$ti1 ; tp[j]
+ addc %r0,$abhi,$hi0
+ ldw 0($xfer),$abhi
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldws,mb 8($xfer),$nmhi1
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$nmlo1
+ addc %r0,$nmhi0,$hi1
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+
+ addib,= -1,$num,L\$outerdone_pa11; i--
+ subi 0,$arrsz,$idx ; j=0
+
+ fldws,ma 4($bp),${fbi} ; bp[i]
+ flddx $idx($ap),${fai} ; ap[0]
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$abhi
+ flddx $idx($np),${fni} ; np[0]
+ fldws 8($xfer),${fti}R ; tp[0]
+ add $ti1,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
+ ldw 4($tp),$ti0 ; tp[j]
+
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ add $ti0,$hi0,$hi0
+ addc %r0,$hi1,$hi1
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ b L\$outer_pa11
+ ldo `$LOCALS+32+4`($fp),$tp
+
+L\$outerdone_pa11
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$abhi
+ add $ti1,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+
+ ldw 4($tp),$ti0 ; tp[j]
+
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ add $ti0,$hi0,$hi0
+ addc %r0,$hi1,$hi1
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ ldo `$LOCALS+32+4`($fp),$tp
+ sub %r0,%r0,%r0 ; clear borrow
+ ldw -4($tp),$ti0
+ addl $tp,$arrsz,$tp
+L\$sub_pa11
+ ldwx $idx($np),$hi0
+ subb $ti0,$hi0,$hi1
+ ldwx $idx($tp),$ti0
+ addib,<> 4,$idx,L\$sub_pa11
+ stws,ma $hi1,4($rp)
+
+ subb $ti0,%r0,$hi1
+ ldo -4($tp),$tp
+ and $tp,$hi1,$ap
+ andcm $rp,$hi1,$bp
+ or $ap,$bp,$np
+
+ sub $rp,$arrsz,$rp ; rewind rp
+ subi 0,$arrsz,$idx
+ ldo `$LOCALS+32`($fp),$tp
+L\$copy_pa11
+ ldwx $idx($np),$hi0
+ stws,ma %r0,4($tp)
+ addib,<> 4,$idx,L\$copy_pa11
+ stws,ma $hi0,4($rp)
+
+ nop ; alignment
+L\$done
+___
+}
+
+$code.=<<___;
+ ldi 1,%r28 ; signal "handled"
+ ldo $FRAME($fp),%sp ; destroy tp[num+1]
+
+ $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
+ $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
+ $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
+ $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
+ $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
+ $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
+ $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
+ $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
+L\$abort
+ bv (%r2)
+ .EXIT
+ $POPMB -$FRAME(%sp),%r3
+ .PROCEND
+ .STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# Explicitly encode PA-RISC 2.0 instructions used in this module, so
+# that it can be compiled with .LEVEL 1.0. It should be noted that I
+# wouldn't have to do this, if GNU assembler understood .ALLOW 2.0
+# directive...
+
+my $ldd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "ldd$mod\t$args";
+
+ if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4
+ { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3;
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5
+ { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3;
+ $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset
+ $opcode|=(1<<5) if ($mod =~ /^,m/);
+ $opcode|=(1<<13) if ($mod =~ /^,mb/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $std = sub {
+ my ($mod,$args) = @_;
+ my $orig = "std$mod\t$args";
+
+ if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 6
+ { my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6);
+ $opcode|=(($2&0xF)<<1)|(($2&0x10)>>4); # encode offset
+ $opcode|=(1<<5) if ($mod =~ /^,m/);
+ $opcode|=(1<<13) if ($mod =~ /^,mb/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $extrd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "extrd$mod\t$args";
+
+ # I only have ",u" completer, it's implicitly encoded...
+ if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15
+ { my $opcode=(0x36<<26)|($1<<21)|($4<<16);
+ my $len=32-$3;
+ $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos
+ $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12
+ { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9);
+ my $len=32-$2;
+ $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len
+ $opcode |= (1<<13) if ($mod =~ /,\**=/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $shrpd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "shrpd$mod\t$args";
+
+ if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14
+ { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4;
+ my $cpos=63-$3;
+ $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $sub = sub {
+ my ($mod,$args) = @_;
+ my $orig = "sub$mod\t$args";
+
+ if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) {
+ my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3;
+ $opcode|=(1<<10); # e1
+ $opcode|=(1<<8); # e2
+ $opcode|=(1<<5); # d
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig
+ }
+ else { "\t".$orig; }
+};
+
+sub assemble {
+ my ($mnemonic,$mod,$args)=@_;
+ my $opcode = eval("\$$mnemonic");
+
+ ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args";
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+ # flip word order in 64-bit mode...
+ s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8);
+ # assemble 2.0 instructions in 32-bit mode...
+ s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4);
+
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/openssl/crypto/bn/asm/ppc-mont.pl b/openssl/crypto/bn/asm/ppc-mont.pl
index 7849eae95..f9b6992cc 100644
--- a/openssl/crypto/bn/asm/ppc-mont.pl
+++ b/openssl/crypto/bn/asm/ppc-mont.pl
@@ -31,7 +31,6 @@ if ($flavour =~ /32/) {
$BNSZ= $BITS/8;
$SIZE_T=4;
$RZONE= 224;
- $FRAME= $SIZE_T*16;
$LD= "lwz"; # load
$LDU= "lwzu"; # load and update
@@ -51,7 +50,6 @@ if ($flavour =~ /32/) {
$BNSZ= $BITS/8;
$SIZE_T=8;
$RZONE= 288;
- $FRAME= $SIZE_T*16;
# same as above, but 64-bit mnemonics...
$LD= "ld"; # load
@@ -69,6 +67,9 @@ if ($flavour =~ /32/) {
$POP= $LD;
} else { die "nonsense $flavour"; }
+$FRAME=8*$SIZE_T+$RZONE;
+$LOCALS=8*$SIZE_T;
+
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
@@ -89,18 +90,18 @@ $aj="r10";
$nj="r11";
$tj="r12";
# non-volatile registers
-$i="r14";
-$j="r15";
-$tp="r16";
-$m0="r17";
-$m1="r18";
-$lo0="r19";
-$hi0="r20";
-$lo1="r21";
-$hi1="r22";
-$alo="r23";
-$ahi="r24";
-$nlo="r25";
+$i="r20";
+$j="r21";
+$tp="r22";
+$m0="r23";
+$m1="r24";
+$lo0="r25";
+$hi0="r26";
+$lo1="r27";
+$hi1="r28";
+$alo="r29";
+$ahi="r30";
+$nlo="r31";
#
$nhi="r0";
@@ -108,42 +109,48 @@ $code=<<___;
.machine "any"
.text
-.globl .bn_mul_mont
+.globl .bn_mul_mont_int
.align 4
-.bn_mul_mont:
+.bn_mul_mont_int:
cmpwi $num,4
mr $rp,r3 ; $rp is reassigned
li r3,0
bltlr
-
+___
+$code.=<<___ if ($BNSZ==4);
+ cmpwi $num,32 ; longer key performance is not better
+ bgelr
+___
+$code.=<<___;
slwi $num,$num,`log($BNSZ)/log(2)`
li $tj,-4096
- addi $ovf,$num,`$FRAME+$RZONE`
+ addi $ovf,$num,$FRAME
subf $ovf,$ovf,$sp ; $sp-$ovf
and $ovf,$ovf,$tj ; minimize TLB usage
subf $ovf,$sp,$ovf ; $ovf-$sp
+ mr $tj,$sp
srwi $num,$num,`log($BNSZ)/log(2)`
$STUX $sp,$sp,$ovf
- $PUSH r14,`4*$SIZE_T`($sp)
- $PUSH r15,`5*$SIZE_T`($sp)
- $PUSH r16,`6*$SIZE_T`($sp)
- $PUSH r17,`7*$SIZE_T`($sp)
- $PUSH r18,`8*$SIZE_T`($sp)
- $PUSH r19,`9*$SIZE_T`($sp)
- $PUSH r20,`10*$SIZE_T`($sp)
- $PUSH r21,`11*$SIZE_T`($sp)
- $PUSH r22,`12*$SIZE_T`($sp)
- $PUSH r23,`13*$SIZE_T`($sp)
- $PUSH r24,`14*$SIZE_T`($sp)
- $PUSH r25,`15*$SIZE_T`($sp)
+ $PUSH r20,`-12*$SIZE_T`($tj)
+ $PUSH r21,`-11*$SIZE_T`($tj)
+ $PUSH r22,`-10*$SIZE_T`($tj)
+ $PUSH r23,`-9*$SIZE_T`($tj)
+ $PUSH r24,`-8*$SIZE_T`($tj)
+ $PUSH r25,`-7*$SIZE_T`($tj)
+ $PUSH r26,`-6*$SIZE_T`($tj)
+ $PUSH r27,`-5*$SIZE_T`($tj)
+ $PUSH r28,`-4*$SIZE_T`($tj)
+ $PUSH r29,`-3*$SIZE_T`($tj)
+ $PUSH r30,`-2*$SIZE_T`($tj)
+ $PUSH r31,`-1*$SIZE_T`($tj)
$LD $n0,0($n0) ; pull n0[0] value
addi $num,$num,-2 ; adjust $num for counter register
$LD $m0,0($bp) ; m0=bp[0]
$LD $aj,0($ap) ; ap[0]
- addi $tp,$sp,$FRAME
+ addi $tp,$sp,$LOCALS
$UMULL $lo0,$aj,$m0 ; ap[0]*bp[0]
$UMULH $hi0,$aj,$m0
@@ -205,8 +212,8 @@ L1st:
Louter:
$LDX $m0,$bp,$i ; m0=bp[i]
$LD $aj,0($ap) ; ap[0]
- addi $tp,$sp,$FRAME
- $LD $tj,$FRAME($sp) ; tp[0]
+ addi $tp,$sp,$LOCALS
+ $LD $tj,$LOCALS($sp); tp[0]
$UMULL $lo0,$aj,$m0 ; ap[0]*bp[i]
$UMULH $hi0,$aj,$m0
$LD $aj,$BNSZ($ap) ; ap[1]
@@ -273,7 +280,7 @@ Linner:
addi $num,$num,2 ; restore $num
subfc $j,$j,$j ; j=0 and "clear" XER[CA]
- addi $tp,$sp,$FRAME
+ addi $tp,$sp,$LOCALS
mtctr $num
.align 4
@@ -299,23 +306,27 @@ Lcopy: ; copy or in-place refresh
addi $j,$j,$BNSZ
bdnz- Lcopy
- $POP r14,`4*$SIZE_T`($sp)
- $POP r15,`5*$SIZE_T`($sp)
- $POP r16,`6*$SIZE_T`($sp)
- $POP r17,`7*$SIZE_T`($sp)
- $POP r18,`8*$SIZE_T`($sp)
- $POP r19,`9*$SIZE_T`($sp)
- $POP r20,`10*$SIZE_T`($sp)
- $POP r21,`11*$SIZE_T`($sp)
- $POP r22,`12*$SIZE_T`($sp)
- $POP r23,`13*$SIZE_T`($sp)
- $POP r24,`14*$SIZE_T`($sp)
- $POP r25,`15*$SIZE_T`($sp)
- $POP $sp,0($sp)
+ $POP $tj,0($sp)
li r3,1
+ $POP r20,`-12*$SIZE_T`($tj)
+ $POP r21,`-11*$SIZE_T`($tj)
+ $POP r22,`-10*$SIZE_T`($tj)
+ $POP r23,`-9*$SIZE_T`($tj)
+ $POP r24,`-8*$SIZE_T`($tj)
+ $POP r25,`-7*$SIZE_T`($tj)
+ $POP r26,`-6*$SIZE_T`($tj)
+ $POP r27,`-5*$SIZE_T`($tj)
+ $POP r28,`-4*$SIZE_T`($tj)
+ $POP r29,`-3*$SIZE_T`($tj)
+ $POP r30,`-2*$SIZE_T`($tj)
+ $POP r31,`-1*$SIZE_T`($tj)
+ mr $sp,$tj
blr
.long 0
-.asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@fy.chalmers.se>"
+ .byte 0,12,4,0,0x80,12,6,0
+ .long 0
+
+.asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>"
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;
diff --git a/openssl/crypto/bn/asm/ppc.pl b/openssl/crypto/bn/asm/ppc.pl
index f4093177e..1249ce229 100644
--- a/openssl/crypto/bn/asm/ppc.pl
+++ b/openssl/crypto/bn/asm/ppc.pl
@@ -389,7 +389,9 @@ $data=<<EOF;
$ST r9,`6*$BNSZ`(r3) #r[6]=c1
$ST r10,`7*$BNSZ`(r3) #r[7]=c2
blr
- .long 0x00000000
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -814,8 +816,9 @@ $data=<<EOF;
blr
-
- .long 0x00000000
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -966,7 +969,9 @@ $data=<<EOF;
$ST r10,`6*$BNSZ`(r3) #r[6]=c1
$ST r11,`7*$BNSZ`(r3) #r[7]=c2
blr
- .long 0x00000000
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1502,7 +1507,9 @@ $data=<<EOF;
$ST r12,`14*$BNSZ`(r3) #r[14]=c3;
$ST r10,`15*$BNSZ`(r3) #r[15]=c1;
blr
- .long 0x00000000
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1550,8 +1557,9 @@ Lppcasm_sub_adios:
subfze r3,r0 # if carry bit is set then r3 = 0 else -1
andi. r3,r3,1 # keep only last bit.
blr
- .long 0x00000000
-
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1594,7 +1602,9 @@ Lppcasm_add_mainloop:
Lppcasm_add_adios:
addze r3,r0 #return carry bit.
blr
- .long 0x00000000
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1707,7 +1717,9 @@ Lppcasm_div8:
Lppcasm_div9:
or r3,r8,r0
blr
- .long 0x00000000
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1746,8 +1758,9 @@ Lppcasm_sqr_mainloop:
bdnz- Lppcasm_sqr_mainloop
Lppcasm_sqr_adios:
blr
- .long 0x00000000
-
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1850,7 +1863,9 @@ Lppcasm_mw_REM:
Lppcasm_mw_OVER:
addi r3,r12,0
blr
- .long 0x00000000
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1973,7 +1988,9 @@ Lppcasm_maw_leftover:
Lppcasm_maw_adios:
addi r3,r12,0
blr
- .long 0x00000000
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
.align 4
EOF
$data =~ s/\`([^\`]*)\`/eval $1/gem;
diff --git a/openssl/crypto/bn/asm/ppc64-mont.pl b/openssl/crypto/bn/asm/ppc64-mont.pl
index 3449b3585..a14e769ad 100644
--- a/openssl/crypto/bn/asm/ppc64-mont.pl
+++ b/openssl/crypto/bn/asm/ppc64-mont.pl
@@ -45,23 +45,40 @@
# on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive
# in absolute terms, but it's apparently the way Power 6 is...
+# December 2009
+
+# Adapted for 32-bit build this module delivers 25-120%, yes, more
+# than *twice* for longer keys, performance improvement over 32-bit
+# ppc-mont.pl on 1.8GHz PPC970. However! This implementation utilizes
+# even 64-bit integer operations and the trouble is that most PPC
+# operating systems don't preserve upper halves of general purpose
+# registers upon 32-bit signal delivery. They do preserve them upon
+# context switch, but not signalling:-( This means that asynchronous
+# signals have to be blocked upon entry to this subroutine. Signal
+# masking (and of course complementary unmasking) has quite an impact
+# on performance, naturally larger for shorter keys. It's so severe
+# that 512-bit key performance can be as low as 1/3 of expected one.
+# This is why this routine can be engaged for longer key operations
+# only on these OSes, see crypto/ppccap.c for further details. MacOS X
+# is an exception from this and doesn't require signal masking, and
+# that's where above improvement coefficients were collected. For
+# others alternative would be to break dependence on upper halves of
+# GPRs by sticking to 32-bit integer operations...
+
$flavour = shift;
if ($flavour =~ /32/) {
$SIZE_T=4;
$RZONE= 224;
- $FRAME= $SIZE_T*12+8*12;
- $fname= "bn_mul_mont_ppc64";
+ $fname= "bn_mul_mont_fpu64";
$STUX= "stwux"; # store indexed and update
$PUSH= "stw";
$POP= "lwz";
- die "not implemented yet";
} elsif ($flavour =~ /64/) {
$SIZE_T=8;
$RZONE= 288;
- $FRAME= $SIZE_T*12+8*12;
- $fname= "bn_mul_mont";
+ $fname= "bn_mul_mont_fpu64";
# same as above, but 64-bit mnemonics...
$STUX= "stdux"; # store indexed and update
@@ -76,7 +93,7 @@ die "can't locate ppc-xlate.pl";
open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
-$FRAME=($FRAME+63)&~63;
+$FRAME=64; # padded frame header
$TRANSFER=16*8;
$carry="r0";
@@ -93,16 +110,16 @@ $tp="r10";
$j="r11";
$i="r12";
# non-volatile registers
-$nap_d="r14"; # interleaved ap and np in double format
-$a0="r15"; # ap[0]
-$t0="r16"; # temporary registers
-$t1="r17";
-$t2="r18";
-$t3="r19";
-$t4="r20";
-$t5="r21";
-$t6="r22";
-$t7="r23";
+$nap_d="r22"; # interleaved ap and np in double format
+$a0="r23"; # ap[0]
+$t0="r24"; # temporary registers
+$t1="r25";
+$t2="r26";
+$t3="r27";
+$t4="r28";
+$t5="r29";
+$t6="r30";
+$t7="r31";
# PPC offers enough register bank capacity to unroll inner loops twice
#
@@ -132,28 +149,17 @@ $ba="f0"; $bb="f1"; $bc="f2"; $bd="f3";
$na="f4"; $nb="f5"; $nc="f6"; $nd="f7";
$dota="f8"; $dotb="f9";
$A0="f10"; $A1="f11"; $A2="f12"; $A3="f13";
-$N0="f14"; $N1="f15"; $N2="f16"; $N3="f17";
-$T0a="f18"; $T0b="f19";
-$T1a="f20"; $T1b="f21";
-$T2a="f22"; $T2b="f23";
-$T3a="f24"; $T3b="f25";
+$N0="f20"; $N1="f21"; $N2="f22"; $N3="f23";
+$T0a="f24"; $T0b="f25";
+$T1a="f26"; $T1b="f27";
+$T2a="f28"; $T2b="f29";
+$T3a="f30"; $T3b="f31";
# sp----------->+-------------------------------+
# | saved sp |
# +-------------------------------+
-# | |
-# +-------------------------------+
-# | 10 saved gpr, r14-r23 |
-# . .
-# . .
-# +12*size_t +-------------------------------+
-# | 12 saved fpr, f14-f25 |
# . .
-# . .
-# +12*8 +-------------------------------+
-# | padding to 64 byte boundary |
-# . .
-# +X +-------------------------------+
+# +64 +-------------------------------+
# | 16 gpr<->fpr transfer zone |
# . .
# . .
@@ -173,6 +179,16 @@ $T3a="f24"; $T3b="f25";
# . .
# . .
# +-------------------------------+
+# . .
+# -12*size_t +-------------------------------+
+# | 10 saved gpr, r22-r31 |
+# . .
+# . .
+# -12*8 +-------------------------------+
+# | 12 saved fpr, f20-f31 |
+# . .
+# . .
+# +-------------------------------+
$code=<<___;
.machine "any"
@@ -181,14 +197,14 @@ $code=<<___;
.globl .$fname
.align 5
.$fname:
- cmpwi $num,4
+ cmpwi $num,`3*8/$SIZE_T`
mr $rp,r3 ; $rp is reassigned
li r3,0 ; possible "not handled" return code
bltlr-
- andi. r0,$num,1 ; $num has to be even
+ andi. r0,$num,`16/$SIZE_T-1` ; $num has to be "even"
bnelr-
- slwi $num,$num,3 ; num*=8
+ slwi $num,$num,`log($SIZE_T)/log(2)` ; num*=sizeof(BN_LONG)
li $i,-4096
slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num
add $tp,$tp,$num ; place for tp[num+1]
@@ -196,35 +212,50 @@ $code=<<___;
subf $tp,$tp,$sp ; $sp-$tp
and $tp,$tp,$i ; minimize TLB usage
subf $tp,$sp,$tp ; $tp-$sp
+ mr $i,$sp
$STUX $sp,$sp,$tp ; alloca
- $PUSH r14,`2*$SIZE_T`($sp)
- $PUSH r15,`3*$SIZE_T`($sp)
- $PUSH r16,`4*$SIZE_T`($sp)
- $PUSH r17,`5*$SIZE_T`($sp)
- $PUSH r18,`6*$SIZE_T`($sp)
- $PUSH r19,`7*$SIZE_T`($sp)
- $PUSH r20,`8*$SIZE_T`($sp)
- $PUSH r21,`9*$SIZE_T`($sp)
- $PUSH r22,`10*$SIZE_T`($sp)
- $PUSH r23,`11*$SIZE_T`($sp)
- stfd f14,`12*$SIZE_T+0`($sp)
- stfd f15,`12*$SIZE_T+8`($sp)
- stfd f16,`12*$SIZE_T+16`($sp)
- stfd f17,`12*$SIZE_T+24`($sp)
- stfd f18,`12*$SIZE_T+32`($sp)
- stfd f19,`12*$SIZE_T+40`($sp)
- stfd f20,`12*$SIZE_T+48`($sp)
- stfd f21,`12*$SIZE_T+56`($sp)
- stfd f22,`12*$SIZE_T+64`($sp)
- stfd f23,`12*$SIZE_T+72`($sp)
- stfd f24,`12*$SIZE_T+80`($sp)
- stfd f25,`12*$SIZE_T+88`($sp)
-
+ $PUSH r22,`-12*8-10*$SIZE_T`($i)
+ $PUSH r23,`-12*8-9*$SIZE_T`($i)
+ $PUSH r24,`-12*8-8*$SIZE_T`($i)
+ $PUSH r25,`-12*8-7*$SIZE_T`($i)
+ $PUSH r26,`-12*8-6*$SIZE_T`($i)
+ $PUSH r27,`-12*8-5*$SIZE_T`($i)
+ $PUSH r28,`-12*8-4*$SIZE_T`($i)
+ $PUSH r29,`-12*8-3*$SIZE_T`($i)
+ $PUSH r30,`-12*8-2*$SIZE_T`($i)
+ $PUSH r31,`-12*8-1*$SIZE_T`($i)
+ stfd f20,`-12*8`($i)
+ stfd f21,`-11*8`($i)
+ stfd f22,`-10*8`($i)
+ stfd f23,`-9*8`($i)
+ stfd f24,`-8*8`($i)
+ stfd f25,`-7*8`($i)
+ stfd f26,`-6*8`($i)
+ stfd f27,`-5*8`($i)
+ stfd f28,`-4*8`($i)
+ stfd f29,`-3*8`($i)
+ stfd f30,`-2*8`($i)
+ stfd f31,`-1*8`($i)
+___
+$code.=<<___ if ($SIZE_T==8);
ld $a0,0($ap) ; pull ap[0] value
ld $n0,0($n0) ; pull n0[0] value
ld $t3,0($bp) ; bp[0]
-
+___
+$code.=<<___ if ($SIZE_T==4);
+ mr $t1,$n0
+ lwz $a0,0($ap) ; pull ap[0,1] value
+ lwz $t0,4($ap)
+ lwz $n0,0($t1) ; pull n0[0,1] value
+ lwz $t1,4($t1)
+ lwz $t3,0($bp) ; bp[0,1]
+ lwz $t2,4($bp)
+ insrdi $a0,$t0,32,0
+ insrdi $n0,$t1,32,0
+ insrdi $t3,$t2,32,0
+___
+$code.=<<___;
addi $tp,$sp,`$FRAME+$TRANSFER+8+64`
li $i,-64
add $nap_d,$tp,$num
@@ -258,6 +289,8 @@ $code=<<___;
std $t5,`$FRAME+40`($sp)
std $t6,`$FRAME+48`($sp)
std $t7,`$FRAME+56`($sp)
+___
+$code.=<<___ if ($SIZE_T==8);
lwz $t0,4($ap) ; load a[j] as 32-bit word pair
lwz $t1,0($ap)
lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
@@ -266,6 +299,18 @@ $code=<<___;
lwz $t5,0($np)
lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
lwz $t7,8($np)
+___
+$code.=<<___ if ($SIZE_T==4);
+ lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
+ lwz $t1,4($ap)
+ lwz $t2,8($ap)
+ lwz $t3,12($ap)
+ lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
+ lwz $t5,4($np)
+ lwz $t6,8($np)
+ lwz $t7,12($np)
+___
+$code.=<<___;
lfd $ba,`$FRAME+0`($sp)
lfd $bb,`$FRAME+8`($sp)
lfd $bc,`$FRAME+16`($sp)
@@ -374,6 +419,8 @@ $code=<<___;
.align 5
L1st:
+___
+$code.=<<___ if ($SIZE_T==8);
lwz $t0,4($ap) ; load a[j] as 32-bit word pair
lwz $t1,0($ap)
lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
@@ -382,6 +429,18 @@ L1st:
lwz $t5,0($np)
lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
lwz $t7,8($np)
+___
+$code.=<<___ if ($SIZE_T==4);
+ lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
+ lwz $t1,4($ap)
+ lwz $t2,8($ap)
+ lwz $t3,12($ap)
+ lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
+ lwz $t5,4($np)
+ lwz $t6,8($np)
+ lwz $t7,12($np)
+___
+$code.=<<___;
std $t0,`$FRAME+64`($sp)
std $t1,`$FRAME+72`($sp)
std $t2,`$FRAME+80`($sp)
@@ -559,7 +618,17 @@ L1st:
li $i,8 ; i=1
.align 5
Louter:
+___
+$code.=<<___ if ($SIZE_T==8);
ldx $t3,$bp,$i ; bp[i]
+___
+$code.=<<___ if ($SIZE_T==4);
+ add $t0,$bp,$i
+ lwz $t3,0($t0) ; bp[i,i+1]
+ lwz $t0,4($t0)
+ insrdi $t3,$t0,32,0
+___
+$code.=<<___;
ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0]
mulld $t7,$a0,$t3 ; ap[0]*bp[i]
@@ -761,6 +830,13 @@ Linner:
stfd $T0b,`$FRAME+8`($sp)
add $t7,$t7,$carry
addc $t3,$t0,$t1
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t0,$t0,32,0
+ extrdi $t1,$t1,32,0
+ adde $t0,$t0,$t1
+___
+$code.=<<___;
stfd $T1a,`$FRAME+16`($sp)
stfd $T1b,`$FRAME+24`($sp)
insrdi $t4,$t7,16,0 ; 64..127 bits
@@ -768,6 +844,13 @@ Linner:
stfd $T2a,`$FRAME+32`($sp)
stfd $T2b,`$FRAME+40`($sp)
adde $t5,$t4,$t2
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t4,$t4,32,0
+ extrdi $t2,$t2,32,0
+ adde $t4,$t4,$t2
+___
+$code.=<<___;
stfd $T3a,`$FRAME+48`($sp)
stfd $T3b,`$FRAME+56`($sp)
addze $carry,$carry
@@ -816,7 +899,21 @@ Linner:
ld $t7,`$FRAME+72`($sp)
addc $t3,$t0,$t1
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t0,$t0,32,0
+ extrdi $t1,$t1,32,0
+ adde $t0,$t0,$t1
+___
+$code.=<<___;
adde $t5,$t4,$t2
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t4,$t4,32,0
+ extrdi $t2,$t2,32,0
+ adde $t4,$t4,$t2
+___
+$code.=<<___;
addze $carry,$carry
std $t3,-16($tp) ; tp[j-1]
@@ -835,7 +932,9 @@ Linner:
subf $nap_d,$t7,$nap_d ; rewind pointer
cmpw $i,$num
blt- Louter
+___
+$code.=<<___ if ($SIZE_T==8);
subf $np,$num,$np ; rewind np
addi $j,$j,1 ; restore counter
subfc $i,$i,$i ; j=0 and "clear" XER[CA]
@@ -883,34 +982,105 @@ Lcopy: ; copy or in-place refresh
stdx $i,$t4,$i
addi $i,$i,16
bdnz- Lcopy
+___
+$code.=<<___ if ($SIZE_T==4);
+ subf $np,$num,$np ; rewind np
+ addi $j,$j,1 ; restore counter
+ subfc $i,$i,$i ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ addi $np,$np,-4
+ addi $rp,$rp,-4
+ addi $ap,$sp,`$FRAME+$TRANSFER+4`
+ mtctr $j
+
+.align 4
+Lsub: ld $t0,8($tp) ; load tp[j..j+3] in 64-bit word order
+ ldu $t2,16($tp)
+ lwz $t4,4($np) ; load np[j..j+3] in 32-bit word order
+ lwz $t5,8($np)
+ lwz $t6,12($np)
+ lwzu $t7,16($np)
+ extrdi $t1,$t0,32,0
+ extrdi $t3,$t2,32,0
+ subfe $t4,$t4,$t0 ; tp[j]-np[j]
+ stw $t0,4($ap) ; save tp[j..j+3] in 32-bit word order
+ subfe $t5,$t5,$t1 ; tp[j+1]-np[j+1]
+ stw $t1,8($ap)
+ subfe $t6,$t6,$t2 ; tp[j+2]-np[j+2]
+ stw $t2,12($ap)
+ subfe $t7,$t7,$t3 ; tp[j+3]-np[j+3]
+ stwu $t3,16($ap)
+ stw $t4,4($rp)
+ stw $t5,8($rp)
+ stw $t6,12($rp)
+ stwu $t7,16($rp)
+ bdnz- Lsub
+
+ li $i,0
+ subfe $ovf,$i,$ovf ; handle upmost overflow bit
+ addi $tp,$sp,`$FRAME+$TRANSFER+4`
+ subf $rp,$num,$rp ; rewind rp
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ mtctr $j
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ lwz $t0,4($ap)
+ lwz $t1,8($ap)
+ lwz $t2,12($ap)
+ lwzu $t3,16($ap)
+ std $i,8($nap_d) ; zap nap_d
+ std $i,16($nap_d)
+ std $i,24($nap_d)
+ std $i,32($nap_d)
+ std $i,40($nap_d)
+ std $i,48($nap_d)
+ std $i,56($nap_d)
+ stdu $i,64($nap_d)
+ stw $t0,4($rp)
+ stw $t1,8($rp)
+ stw $t2,12($rp)
+ stwu $t3,16($rp)
+ std $i,8($tp) ; zap tp at once
+ stdu $i,16($tp)
+ bdnz- Lcopy
+___
- $POP r14,`2*$SIZE_T`($sp)
- $POP r15,`3*$SIZE_T`($sp)
- $POP r16,`4*$SIZE_T`($sp)
- $POP r17,`5*$SIZE_T`($sp)
- $POP r18,`6*$SIZE_T`($sp)
- $POP r19,`7*$SIZE_T`($sp)
- $POP r20,`8*$SIZE_T`($sp)
- $POP r21,`9*$SIZE_T`($sp)
- $POP r22,`10*$SIZE_T`($sp)
- $POP r23,`11*$SIZE_T`($sp)
- lfd f14,`12*$SIZE_T+0`($sp)
- lfd f15,`12*$SIZE_T+8`($sp)
- lfd f16,`12*$SIZE_T+16`($sp)
- lfd f17,`12*$SIZE_T+24`($sp)
- lfd f18,`12*$SIZE_T+32`($sp)
- lfd f19,`12*$SIZE_T+40`($sp)
- lfd f20,`12*$SIZE_T+48`($sp)
- lfd f21,`12*$SIZE_T+56`($sp)
- lfd f22,`12*$SIZE_T+64`($sp)
- lfd f23,`12*$SIZE_T+72`($sp)
- lfd f24,`12*$SIZE_T+80`($sp)
- lfd f25,`12*$SIZE_T+88`($sp)
- $POP $sp,0($sp)
+$code.=<<___;
+ $POP $i,0($sp)
li r3,1 ; signal "handled"
+ $POP r22,`-12*8-10*$SIZE_T`($i)
+ $POP r23,`-12*8-9*$SIZE_T`($i)
+ $POP r24,`-12*8-8*$SIZE_T`($i)
+ $POP r25,`-12*8-7*$SIZE_T`($i)
+ $POP r26,`-12*8-6*$SIZE_T`($i)
+ $POP r27,`-12*8-5*$SIZE_T`($i)
+ $POP r28,`-12*8-4*$SIZE_T`($i)
+ $POP r29,`-12*8-3*$SIZE_T`($i)
+ $POP r30,`-12*8-2*$SIZE_T`($i)
+ $POP r31,`-12*8-1*$SIZE_T`($i)
+ lfd f20,`-12*8`($i)
+ lfd f21,`-11*8`($i)
+ lfd f22,`-10*8`($i)
+ lfd f23,`-9*8`($i)
+ lfd f24,`-8*8`($i)
+ lfd f25,`-7*8`($i)
+ lfd f26,`-6*8`($i)
+ lfd f27,`-5*8`($i)
+ lfd f28,`-4*8`($i)
+ lfd f29,`-3*8`($i)
+ lfd f30,`-2*8`($i)
+ lfd f31,`-1*8`($i)
+ mr $sp,$i
blr
.long 0
-.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@fy.chalmers.se>"
+ .byte 0,12,4,0,0x8c,10,6,0
+ .long 0
+
+.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;
diff --git a/openssl/crypto/bn/asm/s390x-gf2m.pl b/openssl/crypto/bn/asm/s390x-gf2m.pl
new file mode 100644
index 000000000..cd9f13eca
--- /dev/null
+++ b/openssl/crypto/bn/asm/s390x-gf2m.pl
@@ -0,0 +1,221 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... gcc 4.3 appeared to generate poor code, therefore
+# the effort. And indeed, the module delivers 55%-90%(*) improvement
+# on haviest ECDSA verify and ECDH benchmarks for 163- and 571-bit
+# key lengths on z990, 30%-55%(*) - on z10, and 70%-110%(*) - on z196.
+# This is for 64-bit build. In 32-bit "highgprs" case improvement is
+# even higher, for example on z990 it was measured 80%-150%. ECDSA
+# sign is modest 9%-12% faster. Keep in mind that these coefficients
+# are not ones for bn_GF2m_mul_2x2 itself, as not all CPU time is
+# burnt in it...
+#
+# (*) gcc 4.1 was observed to deliver better results than gcc 4.3,
+# so that improvement coefficients can vary from one specific
+# setup to another.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$stdframe=16*$SIZE_T+4*8;
+
+$rp="%r2";
+$a1="%r3";
+$a0="%r4";
+$b1="%r5";
+$b0="%r6";
+
+$ra="%r14";
+$sp="%r15";
+
+@T=("%r0","%r1");
+@i=("%r12","%r13");
+
+($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(6..11));
+($lo,$hi,$b)=map("%r$_",(3..5)); $a=$lo; $mask=$a8;
+
+$code.=<<___;
+.text
+
+.type _mul_1x1,\@function
+.align 16
+_mul_1x1:
+ lgr $a1,$a
+ sllg $a2,$a,1
+ sllg $a4,$a,2
+ sllg $a8,$a,3
+
+ srag $lo,$a1,63 # broadcast 63rd bit
+ nihh $a1,0x1fff
+ srag @i[0],$a2,63 # broadcast 62nd bit
+ nihh $a2,0x3fff
+ srag @i[1],$a4,63 # broadcast 61st bit
+ nihh $a4,0x7fff
+ ngr $lo,$b
+ ngr @i[0],$b
+ ngr @i[1],$b
+
+ lghi @T[0],0
+ lgr $a12,$a1
+ stg @T[0],`$stdframe+0*8`($sp) # tab[0]=0
+ xgr $a12,$a2
+ stg $a1,`$stdframe+1*8`($sp) # tab[1]=a1
+ lgr $a48,$a4
+ stg $a2,`$stdframe+2*8`($sp) # tab[2]=a2
+ xgr $a48,$a8
+ stg $a12,`$stdframe+3*8`($sp) # tab[3]=a1^a2
+ xgr $a1,$a4
+
+ stg $a4,`$stdframe+4*8`($sp) # tab[4]=a4
+ xgr $a2,$a4
+ stg $a1,`$stdframe+5*8`($sp) # tab[5]=a1^a4
+ xgr $a12,$a4
+ stg $a2,`$stdframe+6*8`($sp) # tab[6]=a2^a4
+ xgr $a1,$a48
+ stg $a12,`$stdframe+7*8`($sp) # tab[7]=a1^a2^a4
+ xgr $a2,$a48
+
+ stg $a8,`$stdframe+8*8`($sp) # tab[8]=a8
+ xgr $a12,$a48
+ stg $a1,`$stdframe+9*8`($sp) # tab[9]=a1^a8
+ xgr $a1,$a4
+ stg $a2,`$stdframe+10*8`($sp) # tab[10]=a2^a8
+ xgr $a2,$a4
+ stg $a12,`$stdframe+11*8`($sp) # tab[11]=a1^a2^a8
+
+ xgr $a12,$a4
+ stg $a48,`$stdframe+12*8`($sp) # tab[12]=a4^a8
+ srlg $hi,$lo,1
+ stg $a1,`$stdframe+13*8`($sp) # tab[13]=a1^a4^a8
+ sllg $lo,$lo,63
+ stg $a2,`$stdframe+14*8`($sp) # tab[14]=a2^a4^a8
+ srlg @T[0],@i[0],2
+ stg $a12,`$stdframe+15*8`($sp) # tab[15]=a1^a2^a4^a8
+
+ lghi $mask,`0xf<<3`
+ sllg $a1,@i[0],62
+ sllg @i[0],$b,3
+ srlg @T[1],@i[1],3
+ ngr @i[0],$mask
+ sllg $a2,@i[1],61
+ srlg @i[1],$b,4-3
+ xgr $hi,@T[0]
+ ngr @i[1],$mask
+ xgr $lo,$a1
+ xgr $hi,@T[1]
+ xgr $lo,$a2
+
+ xg $lo,$stdframe(@i[0],$sp)
+ srlg @i[0],$b,8-3
+ ngr @i[0],$mask
+___
+for($n=1;$n<14;$n++) {
+$code.=<<___;
+ lg @T[1],$stdframe(@i[1],$sp)
+ srlg @i[1],$b,`($n+2)*4`-3
+ sllg @T[0],@T[1],`$n*4`
+ ngr @i[1],$mask
+ srlg @T[1],@T[1],`64-$n*4`
+ xgr $lo,@T[0]
+ xgr $hi,@T[1]
+___
+ push(@i,shift(@i)); push(@T,shift(@T));
+}
+$code.=<<___;
+ lg @T[1],$stdframe(@i[1],$sp)
+ sllg @T[0],@T[1],`$n*4`
+ srlg @T[1],@T[1],`64-$n*4`
+ xgr $lo,@T[0]
+ xgr $hi,@T[1]
+
+ lg @T[0],$stdframe(@i[0],$sp)
+ sllg @T[1],@T[0],`($n+1)*4`
+ srlg @T[0],@T[0],`64-($n+1)*4`
+ xgr $lo,@T[1]
+ xgr $hi,@T[0]
+
+ br $ra
+.size _mul_1x1,.-_mul_1x1
+
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,\@function
+.align 16
+bn_GF2m_mul_2x2:
+ stm${g} %r3,%r15,3*$SIZE_T($sp)
+
+ lghi %r1,-$stdframe-128
+ la %r0,0($sp)
+ la $sp,0(%r1,$sp) # alloca
+ st${g} %r0,0($sp) # back chain
+___
+if ($SIZE_T==8) {
+my @r=map("%r$_",(6..9));
+$code.=<<___;
+ bras $ra,_mul_1x1 # a1·b1
+ stmg $lo,$hi,16($rp)
+
+ lg $a,`$stdframe+128+4*$SIZE_T`($sp)
+ lg $b,`$stdframe+128+6*$SIZE_T`($sp)
+ bras $ra,_mul_1x1 # a0·b0
+ stmg $lo,$hi,0($rp)
+
+ lg $a,`$stdframe+128+3*$SIZE_T`($sp)
+ lg $b,`$stdframe+128+5*$SIZE_T`($sp)
+ xg $a,`$stdframe+128+4*$SIZE_T`($sp)
+ xg $b,`$stdframe+128+6*$SIZE_T`($sp)
+ bras $ra,_mul_1x1 # (a0+a1)·(b0+b1)
+ lmg @r[0],@r[3],0($rp)
+
+ xgr $lo,$hi
+ xgr $hi,@r[1]
+ xgr $lo,@r[0]
+ xgr $hi,@r[2]
+ xgr $lo,@r[3]
+ xgr $hi,@r[3]
+ xgr $lo,$hi
+ stg $hi,16($rp)
+ stg $lo,8($rp)
+___
+} else {
+$code.=<<___;
+ sllg %r3,%r3,32
+ sllg %r5,%r5,32
+ or %r3,%r4
+ or %r5,%r6
+ bras $ra,_mul_1x1
+ rllg $lo,$lo,32
+ rllg $hi,$hi,32
+ stmg $lo,$hi,0($rp)
+___
+}
+$code.=<<___;
+ lm${g} %r6,%r15,`$stdframe+128+6*$SIZE_T`($sp)
+ br $ra
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.string "GF(2^m) Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/openssl/crypto/bn/asm/s390x-mont.pl b/openssl/crypto/bn/asm/s390x-mont.pl
index f61246f5b..9fd64e81e 100644
--- a/openssl/crypto/bn/asm/s390x-mont.pl
+++ b/openssl/crypto/bn/asm/s390x-mont.pl
@@ -32,6 +32,33 @@
# Reschedule to minimize/avoid Address Generation Interlock hazard,
# make inner loops counter-based.
+# November 2010.
+#
+# Adapt for -m31 build. If kernel supports what's called "highgprs"
+# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
+# instructions and achieve "64-bit" performance even in 31-bit legacy
+# application context. The feature is not specific to any particular
+# processor, as long as it's "z-CPU". Latter implies that the code
+# remains z/Architecture specific. Compatibility with 32-bit BN_ULONG
+# is achieved by swapping words after 64-bit loads, follow _dswap-s.
+# On z990 it was measured to perform 2.6-2.2 times better than
+# compiler-generated code, less for longer keys...
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$stdframe=16*$SIZE_T+4*8;
+
$mn0="%r0";
$num="%r1";
@@ -60,34 +87,44 @@ $code.=<<___;
.globl bn_mul_mont
.type bn_mul_mont,\@function
bn_mul_mont:
- lgf $num,164($sp) # pull $num
- sla $num,3 # $num to enumerate bytes
+ lgf $num,`$stdframe+$SIZE_T-4`($sp) # pull $num
+ sla $num,`log($SIZE_T)/log(2)` # $num to enumerate bytes
la $bp,0($num,$bp)
- stg %r2,16($sp)
+ st${g} %r2,2*$SIZE_T($sp)
cghi $num,16 #
lghi %r2,0 #
blr %r14 # if($num<16) return 0;
+___
+$code.=<<___ if ($flavour =~ /3[12]/);
+ tmll $num,4
+ bnzr %r14 # if ($num&1) return 0;
+___
+$code.=<<___ if ($flavour !~ /3[12]/);
cghi $num,96 #
bhr %r14 # if($num>96) return 0;
+___
+$code.=<<___;
+ stm${g} %r3,%r15,3*$SIZE_T($sp)
- stmg %r3,%r15,24($sp)
-
- lghi $rp,-160-8 # leave room for carry bit
+ lghi $rp,-$stdframe-8 # leave room for carry bit
lcgr $j,$num # -$num
lgr %r0,$sp
la $rp,0($rp,$sp)
la $sp,0($j,$rp) # alloca
- stg %r0,0($sp) # back chain
+ st${g} %r0,0($sp) # back chain
sra $num,3 # restore $num
la $bp,0($j,$bp) # restore $bp
ahi $num,-1 # adjust $num for inner loop
lg $n0,0($n0) # pull n0
+ _dswap $n0
lg $bi,0($bp)
+ _dswap $bi
lg $alo,0($ap)
+ _dswap $alo
mlgr $ahi,$bi # ap[0]*bp[0]
lgr $AHI,$ahi
@@ -95,6 +132,7 @@ bn_mul_mont:
msgr $mn0,$n0
lg $nlo,0($np) #
+ _dswap $nlo
mlgr $nhi,$mn0 # np[0]*m1
algr $nlo,$alo # +="tp[0]"
lghi $NHI,0
@@ -106,12 +144,14 @@ bn_mul_mont:
.align 16
.L1st:
lg $alo,0($j,$ap)
+ _dswap $alo
mlgr $ahi,$bi # ap[j]*bp[0]
algr $alo,$AHI
lghi $AHI,0
alcgr $AHI,$ahi
lg $nlo,0($j,$np)
+ _dswap $nlo
mlgr $nhi,$mn0 # np[j]*m1
algr $nlo,$NHI
lghi $NHI,0
@@ -119,22 +159,24 @@ bn_mul_mont:
algr $nlo,$alo
alcgr $NHI,$nhi
- stg $nlo,160-8($j,$sp) # tp[j-1]=
+ stg $nlo,$stdframe-8($j,$sp) # tp[j-1]=
la $j,8($j) # j++
brct $count,.L1st
algr $NHI,$AHI
lghi $AHI,0
alcgr $AHI,$AHI # upmost overflow bit
- stg $NHI,160-8($j,$sp)
- stg $AHI,160($j,$sp)
+ stg $NHI,$stdframe-8($j,$sp)
+ stg $AHI,$stdframe($j,$sp)
la $bp,8($bp) # bp++
.Louter:
lg $bi,0($bp) # bp[i]
+ _dswap $bi
lg $alo,0($ap)
+ _dswap $alo
mlgr $ahi,$bi # ap[0]*bp[i]
- alg $alo,160($sp) # +=tp[0]
+ alg $alo,$stdframe($sp) # +=tp[0]
lghi $AHI,0
alcgr $AHI,$ahi
@@ -142,6 +184,7 @@ bn_mul_mont:
msgr $mn0,$n0 # tp[0]*n0
lg $nlo,0($np) # np[0]
+ _dswap $nlo
mlgr $nhi,$mn0 # np[0]*m1
algr $nlo,$alo # +="tp[0]"
lghi $NHI,0
@@ -153,14 +196,16 @@ bn_mul_mont:
.align 16
.Linner:
lg $alo,0($j,$ap)
+ _dswap $alo
mlgr $ahi,$bi # ap[j]*bp[i]
algr $alo,$AHI
lghi $AHI,0
alcgr $ahi,$AHI
- alg $alo,160($j,$sp)# +=tp[j]
+ alg $alo,$stdframe($j,$sp)# +=tp[j]
alcgr $AHI,$ahi
lg $nlo,0($j,$np)
+ _dswap $nlo
mlgr $nhi,$mn0 # np[j]*m1
algr $nlo,$NHI
lghi $NHI,0
@@ -168,31 +213,33 @@ bn_mul_mont:
algr $nlo,$alo # +="tp[j]"
alcgr $NHI,$nhi
- stg $nlo,160-8($j,$sp) # tp[j-1]=
+ stg $nlo,$stdframe-8($j,$sp) # tp[j-1]=
la $j,8($j) # j++
brct $count,.Linner
algr $NHI,$AHI
lghi $AHI,0
alcgr $AHI,$AHI
- alg $NHI,160($j,$sp)# accumulate previous upmost overflow bit
+ alg $NHI,$stdframe($j,$sp)# accumulate previous upmost overflow bit
lghi $ahi,0
alcgr $AHI,$ahi # new upmost overflow bit
- stg $NHI,160-8($j,$sp)
- stg $AHI,160($j,$sp)
+ stg $NHI,$stdframe-8($j,$sp)
+ stg $AHI,$stdframe($j,$sp)
la $bp,8($bp) # bp++
- clg $bp,160+8+32($j,$sp) # compare to &bp[num]
+ cl${g} $bp,`$stdframe+8+4*$SIZE_T`($j,$sp) # compare to &bp[num]
jne .Louter
- lg $rp,160+8+16($j,$sp) # reincarnate rp
- la $ap,160($sp)
+ l${g} $rp,`$stdframe+8+2*$SIZE_T`($j,$sp) # reincarnate rp
+ la $ap,$stdframe($sp)
ahi $num,1 # restore $num, incidentally clears "borrow"
la $j,0(%r0)
lr $count,$num
.Lsub: lg $alo,0($j,$ap)
- slbg $alo,0($j,$np)
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ slbgr $alo,$nlo
stg $alo,0($j,$rp)
la $j,8($j)
brct $count,.Lsub
@@ -207,19 +254,24 @@ bn_mul_mont:
la $j,0(%r0)
lgr $count,$num
-.Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh
- stg $j,160($j,$sp) # zap tp
+.Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh
+ _dswap $alo
+ stg $j,$stdframe($j,$sp) # zap tp
stg $alo,0($j,$rp)
la $j,8($j)
brct $count,.Lcopy
- la %r1,160+8+48($j,$sp)
- lmg %r6,%r15,0(%r1)
+ la %r1,`$stdframe+8+6*$SIZE_T`($j,$sp)
+ lm${g} %r6,%r15,0(%r1)
lghi %r2,1 # signal "processed"
br %r14
.size bn_mul_mont,.-bn_mul_mont
.string "Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
___
-print $code;
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+ s/_dswap\s+(%r[0-9]+)/sprintf("rllg\t%s,%s,32",$1,$1) if($SIZE_T==4)/e;
+ print $_,"\n";
+}
close STDOUT;
diff --git a/openssl/crypto/bn/asm/x86-gf2m.pl b/openssl/crypto/bn/asm/x86-gf2m.pl
new file mode 100644
index 000000000..808a1e596
--- /dev/null
+++ b/openssl/crypto/bn/asm/x86-gf2m.pl
@@ -0,0 +1,313 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... Except that it has three code paths: pure integer
+# code suitable for any x86 CPU, MMX code suitable for PIII and later
+# and PCLMULQDQ suitable for Westmere and later. Improvement varies
+# from one benchmark and µ-arch to another. Below are interval values
+# for 163- and 571-bit ECDH benchmarks relative to compiler-generated
+# code:
+#
+# PIII 16%-30%
+# P4 12%-12%
+# Opteron 18%-40%
+# Core2 19%-44%
+# Atom 38%-64%
+# Westmere 53%-121%(PCLMULQDQ)/20%-32%(MMX)
+# Sandy Bridge 72%-127%(PCLMULQDQ)/27%-23%(MMX)
+#
+# Note that above improvement coefficients are not coefficients for
+# bn_GF2m_mul_2x2 itself. For example 120% ECDH improvement is result
+# of bn_GF2m_mul_2x2 being >4x faster. As it gets faster, benchmark
+# is more and more dominated by other subroutines, most notably by
+# BN_GF2m_mod[_mul]_arr...
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0,$x86only = $ARGV[$#ARGV] eq "386");
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+$a="eax";
+$b="ebx";
+($a1,$a2,$a4)=("ecx","edx","ebp");
+
+$R="mm0";
+@T=("mm1","mm2");
+($A,$B,$B30,$B31)=("mm2","mm3","mm4","mm5");
+@i=("esi","edi");
+
+ if (!$x86only) {
+&function_begin_B("_mul_1x1_mmx");
+ &sub ("esp",32+4);
+ &mov ($a1,$a);
+ &lea ($a2,&DWP(0,$a,$a));
+ &and ($a1,0x3fffffff);
+ &lea ($a4,&DWP(0,$a2,$a2));
+ &mov (&DWP(0*4,"esp"),0);
+ &and ($a2,0x7fffffff);
+ &movd ($A,$a);
+ &movd ($B,$b);
+ &mov (&DWP(1*4,"esp"),$a1); # a1
+ &xor ($a1,$a2); # a1^a2
+ &pxor ($B31,$B31);
+ &pxor ($B30,$B30);
+ &mov (&DWP(2*4,"esp"),$a2); # a2
+ &xor ($a2,$a4); # a2^a4
+ &mov (&DWP(3*4,"esp"),$a1); # a1^a2
+ &pcmpgtd($B31,$A); # broadcast 31st bit
+ &paddd ($A,$A); # $A<<=1
+ &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4
+ &mov (&DWP(4*4,"esp"),$a4); # a4
+ &xor ($a4,$a2); # a2=a4^a2^a4
+ &pand ($B31,$B);
+ &pcmpgtd($B30,$A); # broadcast 30th bit
+ &mov (&DWP(5*4,"esp"),$a1); # a1^a4
+ &xor ($a4,$a1); # a1^a2^a4
+ &psllq ($B31,31);
+ &pand ($B30,$B);
+ &mov (&DWP(6*4,"esp"),$a2); # a2^a4
+ &mov (@i[0],0x7);
+ &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4
+ &mov ($a4,@i[0]);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ &mov (@i[1],$a4);
+ &psllq ($B30,30);
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &movd ($R,&DWP(0,"esp",@i[0],4));
+ &mov (@i[0],$a4);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ for($n=1;$n<9;$n++) {
+ &movd (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@i[1],$a4);
+ &psllq (@T[1],3*$n);
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &pxor ($R,@T[1]);
+
+ push(@i,shift(@i)); push(@T,shift(@T));
+ }
+ &movd (@T[1],&DWP(0,"esp",@i[1],4));
+ &pxor ($R,$B30);
+ &psllq (@T[1],3*$n++);
+ &pxor ($R,@T[1]);
+
+ &movd (@T[0],&DWP(0,"esp",@i[0],4));
+ &pxor ($R,$B31);
+ &psllq (@T[0],3*$n);
+ &add ("esp",32+4);
+ &pxor ($R,@T[0]);
+ &ret ();
+&function_end_B("_mul_1x1_mmx");
+ }
+
+($lo,$hi)=("eax","edx");
+@T=("ecx","ebp");
+
+&function_begin_B("_mul_1x1_ialu");
+ &sub ("esp",32+4);
+ &mov ($a1,$a);
+ &lea ($a2,&DWP(0,$a,$a));
+ &lea ($a4,&DWP(0,"",$a,4));
+ &and ($a1,0x3fffffff);
+ &lea (@i[1],&DWP(0,$lo,$lo));
+ &sar ($lo,31); # broadcast 31st bit
+ &mov (&DWP(0*4,"esp"),0);
+ &and ($a2,0x7fffffff);
+ &mov (&DWP(1*4,"esp"),$a1); # a1
+ &xor ($a1,$a2); # a1^a2
+ &mov (&DWP(2*4,"esp"),$a2); # a2
+ &xor ($a2,$a4); # a2^a4
+ &mov (&DWP(3*4,"esp"),$a1); # a1^a2
+ &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4
+ &mov (&DWP(4*4,"esp"),$a4); # a4
+ &xor ($a4,$a2); # a2=a4^a2^a4
+ &mov (&DWP(5*4,"esp"),$a1); # a1^a4
+ &xor ($a4,$a1); # a1^a2^a4
+ &sar (@i[1],31); # broardcast 30th bit
+ &and ($lo,$b);
+ &mov (&DWP(6*4,"esp"),$a2); # a2^a4
+ &and (@i[1],$b);
+ &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4
+ &mov ($hi,$lo);
+ &shl ($lo,31);
+ &mov (@T[0],@i[1]);
+ &shr ($hi,1);
+
+ &mov (@i[0],0x7);
+ &shl (@i[1],30);
+ &and (@i[0],$b);
+ &shr (@T[0],2);
+ &xor ($lo,@i[1]);
+
+ &shr ($b,3);
+ &mov (@i[1],0x7); # 5-byte instruction!?
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &xor ($hi,@T[0]);
+ &xor ($lo,&DWP(0,"esp",@i[0],4));
+ &mov (@i[0],0x7);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ for($n=1;$n<9;$n++) {
+ &mov (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@i[1],0x7);
+ &mov (@T[0],@T[1]);
+ &shl (@T[1],3*$n);
+ &and (@i[1],$b);
+ &shr (@T[0],32-3*$n);
+ &xor ($lo,@T[1]);
+ &shr ($b,3);
+ &xor ($hi,@T[0]);
+
+ push(@i,shift(@i)); push(@T,shift(@T));
+ }
+ &mov (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@T[0],@T[1]);
+ &shl (@T[1],3*$n);
+ &mov (@i[1],&DWP(0,"esp",@i[0],4));
+ &shr (@T[0],32-3*$n); $n++;
+ &mov (@i[0],@i[1]);
+ &xor ($lo,@T[1]);
+ &shl (@i[1],3*$n);
+ &xor ($hi,@T[0]);
+ &shr (@i[0],32-3*$n);
+ &xor ($lo,@i[1]);
+ &xor ($hi,@i[0]);
+
+ &add ("esp",32+4);
+ &ret ();
+&function_end_B("_mul_1x1_ialu");
+
+# void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0);
+&function_begin_B("bn_GF2m_mul_2x2");
+if (!$x86only) {
+ &picmeup("edx","OPENSSL_ia32cap_P");
+ &mov ("eax",&DWP(0,"edx"));
+ &mov ("edx",&DWP(4,"edx"));
+ &test ("eax",1<<23); # check MMX bit
+ &jz (&label("ialu"));
+if ($sse2) {
+ &test ("eax",1<<24); # check FXSR bit
+ &jz (&label("mmx"));
+ &test ("edx",1<<1); # check PCLMULQDQ bit
+ &jz (&label("mmx"));
+
+ &movups ("xmm0",&QWP(8,"esp"));
+ &shufps ("xmm0","xmm0",0b10110001);
+ &pclmulqdq ("xmm0","xmm0",1);
+ &mov ("eax",&DWP(4,"esp"));
+ &movups (&QWP(0,"eax"),"xmm0");
+ &ret ();
+
+&set_label("mmx",16);
+}
+ &push ("ebp");
+ &push ("ebx");
+ &push ("esi");
+ &push ("edi");
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &call ("_mul_1x1_mmx"); # a1·b1
+ &movq ("mm7",$R);
+
+ &mov ($a,&wparam(2));
+ &mov ($b,&wparam(4));
+ &call ("_mul_1x1_mmx"); # a0·b0
+ &movq ("mm6",$R);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &xor ($a,&wparam(2));
+ &xor ($b,&wparam(4));
+ &call ("_mul_1x1_mmx"); # (a0+a1)·(b0+b1)
+ &pxor ($R,"mm7");
+ &mov ($a,&wparam(0));
+ &pxor ($R,"mm6"); # (a0+a1)·(b0+b1)-a1·b1-a0·b0
+
+ &movq ($A,$R);
+ &psllq ($R,32);
+ &pop ("edi");
+ &psrlq ($A,32);
+ &pop ("esi");
+ &pxor ($R,"mm6");
+ &pop ("ebx");
+ &pxor ($A,"mm7");
+ &movq (&QWP(0,$a),$R);
+ &pop ("ebp");
+ &movq (&QWP(8,$a),$A);
+ &emms ();
+ &ret ();
+&set_label("ialu",16);
+}
+ &push ("ebp");
+ &push ("ebx");
+ &push ("esi");
+ &push ("edi");
+ &stack_push(4+1);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &call ("_mul_1x1_ialu"); # a1·b1
+ &mov (&DWP(8,"esp"),$lo);
+ &mov (&DWP(12,"esp"),$hi);
+
+ &mov ($a,&wparam(2));
+ &mov ($b,&wparam(4));
+ &call ("_mul_1x1_ialu"); # a0·b0
+ &mov (&DWP(0,"esp"),$lo);
+ &mov (&DWP(4,"esp"),$hi);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &xor ($a,&wparam(2));
+ &xor ($b,&wparam(4));
+ &call ("_mul_1x1_ialu"); # (a0+a1)·(b0+b1)
+
+ &mov ("ebp",&wparam(0));
+ @r=("ebx","ecx","edi","esi");
+ &mov (@r[0],&DWP(0,"esp"));
+ &mov (@r[1],&DWP(4,"esp"));
+ &mov (@r[2],&DWP(8,"esp"));
+ &mov (@r[3],&DWP(12,"esp"));
+
+ &xor ($lo,$hi);
+ &xor ($hi,@r[1]);
+ &xor ($lo,@r[0]);
+ &mov (&DWP(0,"ebp"),@r[0]);
+ &xor ($hi,@r[2]);
+ &mov (&DWP(12,"ebp"),@r[3]);
+ &xor ($lo,@r[3]);
+ &stack_pop(4+1);
+ &xor ($hi,@r[3]);
+ &pop ("edi");
+ &xor ($lo,$hi);
+ &pop ("esi");
+ &mov (&DWP(8,"ebp"),$hi);
+ &pop ("ebx");
+ &mov (&DWP(4,"ebp"),$lo);
+ &pop ("ebp");
+ &ret ();
+&function_end_B("bn_GF2m_mul_2x2");
+
+&asciz ("GF(2^m) Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/openssl/crypto/bn/asm/x86_64-gf2m.pl b/openssl/crypto/bn/asm/x86_64-gf2m.pl
new file mode 100644
index 000000000..1658acbbd
--- /dev/null
+++ b/openssl/crypto/bn/asm/x86_64-gf2m.pl
@@ -0,0 +1,389 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... Except that it has two code paths: code suitable
+# for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and
+# later. Improvement varies from one benchmark and µ-arch to another.
+# Vanilla code path is at most 20% faster than compiler-generated code
+# [not very impressive], while PCLMULQDQ - whole 85%-160% better on
+# 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that
+# these coefficients are not ones for bn_GF2m_mul_2x2 itself, as not
+# all CPU time is burnt in it...
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+($lo,$hi)=("%rax","%rdx"); $a=$lo;
+($i0,$i1)=("%rsi","%rdi");
+($t0,$t1)=("%rbx","%rcx");
+($b,$mask)=("%rbp","%r8");
+($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(9..15));
+($R,$Tx)=("%xmm0","%xmm1");
+
+$code.=<<___;
+.text
+
+.type _mul_1x1,\@abi-omnipotent
+.align 16
+_mul_1x1:
+ sub \$128+8,%rsp
+ mov \$-1,$a1
+ lea ($a,$a),$i0
+ shr \$3,$a1
+ lea (,$a,4),$i1
+ and $a,$a1 # a1=a&0x1fffffffffffffff
+ lea (,$a,8),$a8
+ sar \$63,$a # broadcast 63rd bit
+ lea ($a1,$a1),$a2
+ sar \$63,$i0 # broadcast 62nd bit
+ lea (,$a1,4),$a4
+ and $b,$a
+ sar \$63,$i1 # boardcast 61st bit
+ mov $a,$hi # $a is $lo
+ shl \$63,$lo
+ and $b,$i0
+ shr \$1,$hi
+ mov $i0,$t1
+ shl \$62,$i0
+ and $b,$i1
+ shr \$2,$t1
+ xor $i0,$lo
+ mov $i1,$t0
+ shl \$61,$i1
+ xor $t1,$hi
+ shr \$3,$t0
+ xor $i1,$lo
+ xor $t0,$hi
+
+ mov $a1,$a12
+ movq \$0,0(%rsp) # tab[0]=0
+ xor $a2,$a12 # a1^a2
+ mov $a1,8(%rsp) # tab[1]=a1
+ mov $a4,$a48
+ mov $a2,16(%rsp) # tab[2]=a2
+ xor $a8,$a48 # a4^a8
+ mov $a12,24(%rsp) # tab[3]=a1^a2
+
+ xor $a4,$a1
+ mov $a4,32(%rsp) # tab[4]=a4
+ xor $a4,$a2
+ mov $a1,40(%rsp) # tab[5]=a1^a4
+ xor $a4,$a12
+ mov $a2,48(%rsp) # tab[6]=a2^a4
+ xor $a48,$a1 # a1^a4^a4^a8=a1^a8
+ mov $a12,56(%rsp) # tab[7]=a1^a2^a4
+ xor $a48,$a2 # a2^a4^a4^a8=a1^a8
+
+ mov $a8,64(%rsp) # tab[8]=a8
+ xor $a48,$a12 # a1^a2^a4^a4^a8=a1^a2^a8
+ mov $a1,72(%rsp) # tab[9]=a1^a8
+ xor $a4,$a1 # a1^a8^a4
+ mov $a2,80(%rsp) # tab[10]=a2^a8
+ xor $a4,$a2 # a2^a8^a4
+ mov $a12,88(%rsp) # tab[11]=a1^a2^a8
+
+ xor $a4,$a12 # a1^a2^a8^a4
+ mov $a48,96(%rsp) # tab[12]=a4^a8
+ mov $mask,$i0
+ mov $a1,104(%rsp) # tab[13]=a1^a4^a8
+ and $b,$i0
+ mov $a2,112(%rsp) # tab[14]=a2^a4^a8
+ shr \$4,$b
+ mov $a12,120(%rsp) # tab[15]=a1^a2^a4^a8
+ mov $mask,$i1
+ and $b,$i1
+ shr \$4,$b
+
+ movq (%rsp,$i0,8),$R # half of calculations is done in SSE2
+ mov $mask,$i0
+ and $b,$i0
+ shr \$4,$b
+___
+ for ($n=1;$n<8;$n++) {
+ $code.=<<___;
+ mov (%rsp,$i1,8),$t1
+ mov $mask,$i1
+ mov $t1,$t0
+ shl \$`8*$n-4`,$t1
+ and $b,$i1
+ movq (%rsp,$i0,8),$Tx
+ shr \$`64-(8*$n-4)`,$t0
+ xor $t1,$lo
+ pslldq \$$n,$Tx
+ mov $mask,$i0
+ shr \$4,$b
+ xor $t0,$hi
+ and $b,$i0
+ shr \$4,$b
+ pxor $Tx,$R
+___
+ }
+$code.=<<___;
+ mov (%rsp,$i1,8),$t1
+ mov $t1,$t0
+ shl \$`8*$n-4`,$t1
+ movq $R,$i0
+ shr \$`64-(8*$n-4)`,$t0
+ xor $t1,$lo
+ psrldq \$8,$R
+ xor $t0,$hi
+ movq $R,$i1
+ xor $i0,$lo
+ xor $i1,$hi
+
+ add \$128+8,%rsp
+ ret
+.Lend_mul_1x1:
+.size _mul_1x1,.-_mul_1x1
+___
+
+($rp,$a1,$a0,$b1,$b0) = $win64? ("%rcx","%rdx","%r8", "%r9","%r10") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx","%r8"); # Unix order
+
+$code.=<<___;
+.extern OPENSSL_ia32cap_P
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,\@abi-omnipotent
+.align 16
+bn_GF2m_mul_2x2:
+ mov OPENSSL_ia32cap_P(%rip),%rax
+ bt \$33,%rax
+ jnc .Lvanilla_mul_2x2
+
+ movq $a1,%xmm0
+ movq $b1,%xmm1
+ movq $a0,%xmm2
+___
+$code.=<<___ if ($win64);
+ movq 40(%rsp),%xmm3
+___
+$code.=<<___ if (!$win64);
+ movq $b0,%xmm3
+___
+$code.=<<___;
+ movdqa %xmm0,%xmm4
+ movdqa %xmm1,%xmm5
+ pclmulqdq \$0,%xmm1,%xmm0 # a1·b1
+ pxor %xmm2,%xmm4
+ pxor %xmm3,%xmm5
+ pclmulqdq \$0,%xmm3,%xmm2 # a0·b0
+ pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1)
+ xorps %xmm0,%xmm4
+ xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ movdqa %xmm4,%xmm5
+ pslldq \$8,%xmm4
+ psrldq \$8,%xmm5
+ pxor %xmm4,%xmm2
+ pxor %xmm5,%xmm0
+ movdqu %xmm2,0($rp)
+ movdqu %xmm0,16($rp)
+ ret
+
+.align 16
+.Lvanilla_mul_2x2:
+ lea -8*17(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+ mov `8*17+40`(%rsp),$b0
+ mov %rdi,8*15(%rsp)
+ mov %rsi,8*16(%rsp)
+___
+$code.=<<___;
+ mov %r14,8*10(%rsp)
+ mov %r13,8*11(%rsp)
+ mov %r12,8*12(%rsp)
+ mov %rbp,8*13(%rsp)
+ mov %rbx,8*14(%rsp)
+.Lbody_mul_2x2:
+ mov $rp,32(%rsp) # save the arguments
+ mov $a1,40(%rsp)
+ mov $a0,48(%rsp)
+ mov $b1,56(%rsp)
+ mov $b0,64(%rsp)
+
+ mov \$0xf,$mask
+ mov $a1,$a
+ mov $b1,$b
+ call _mul_1x1 # a1·b1
+ mov $lo,16(%rsp)
+ mov $hi,24(%rsp)
+
+ mov 48(%rsp),$a
+ mov 64(%rsp),$b
+ call _mul_1x1 # a0·b0
+ mov $lo,0(%rsp)
+ mov $hi,8(%rsp)
+
+ mov 40(%rsp),$a
+ mov 56(%rsp),$b
+ xor 48(%rsp),$a
+ xor 64(%rsp),$b
+ call _mul_1x1 # (a0+a1)·(b0+b1)
+___
+ @r=("%rbx","%rcx","%rdi","%rsi");
+$code.=<<___;
+ mov 0(%rsp),@r[0]
+ mov 8(%rsp),@r[1]
+ mov 16(%rsp),@r[2]
+ mov 24(%rsp),@r[3]
+ mov 32(%rsp),%rbp
+
+ xor $hi,$lo
+ xor @r[1],$hi
+ xor @r[0],$lo
+ mov @r[0],0(%rbp)
+ xor @r[2],$hi
+ mov @r[3],24(%rbp)
+ xor @r[3],$lo
+ xor @r[3],$hi
+ xor $hi,$lo
+ mov $hi,16(%rbp)
+ mov $lo,8(%rbp)
+
+ mov 8*10(%rsp),%r14
+ mov 8*11(%rsp),%r13
+ mov 8*12(%rsp),%r12
+ mov 8*13(%rsp),%rbp
+ mov 8*14(%rsp),%rbx
+___
+$code.=<<___ if ($win64);
+ mov 8*15(%rsp),%rdi
+ mov 8*16(%rsp),%rsi
+___
+$code.=<<___;
+ lea 8*17(%rsp),%rsp
+ ret
+.Lend_mul_2x2:
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.asciz "GF(2^m) Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 152($context),%rax # pull context->Rsp
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lbody_mul_2x2(%rip),%r10
+ cmp %r10,%rbx # context->Rip<"prologue" label
+ jb .Lin_prologue
+
+ mov 8*10(%rax),%r14 # mimic epilogue
+ mov 8*11(%rax),%r13
+ mov 8*12(%rax),%r12
+ mov 8*13(%rax),%rbp
+ mov 8*14(%rax),%rbx
+ mov 8*15(%rax),%rdi
+ mov 8*16(%rax),%rsi
+
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+
+.Lin_prologue:
+ lea 8*17(%rax),%rax
+ mov %rax,152($context) # restore context->Rsp
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+
+.section .pdata
+.align 4
+ .rva _mul_1x1
+ .rva .Lend_mul_1x1
+ .rva .LSEH_info_1x1
+
+ .rva .Lvanilla_mul_2x2
+ .rva .Lend_mul_2x2
+ .rva .LSEH_info_2x2
+.section .xdata
+.align 8
+.LSEH_info_1x1:
+ .byte 0x01,0x07,0x02,0x00
+ .byte 0x07,0x01,0x11,0x00 # sub rsp,128+8
+.LSEH_info_2x2:
+ .byte 9,0,0,0
+ .rva se_handler
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/openssl/crypto/bn/asm/x86_64-mont.pl b/openssl/crypto/bn/asm/x86_64-mont.pl
index 3b7a6f243..5d79b35e1 100644
--- a/openssl/crypto/bn/asm/x86_64-mont.pl
+++ b/openssl/crypto/bn/asm/x86_64-mont.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -15,6 +15,20 @@
# respectful 50%. It remains to be seen if loop unrolling and
# dedicated squaring routine can provide further improvement...
+# July 2011.
+#
+# Add dedicated squaring procedure. Performance improvement varies
+# from platform to platform, but in average it's ~5%/15%/25%/33%
+# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
+
+# August 2011.
+#
+# Unroll and modulo-schedule inner loops in such manner that they
+# are "fallen through" for input lengths of 8, which is critical for
+# 1024-bit RSA *sign*. Average performance improvement in comparison
+# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
+# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
+
$flavour = shift;
$output = shift;
if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
@@ -37,7 +51,6 @@ $n0="%r8"; # const BN_ULONG *n0,
$num="%r9"; # int num);
$lo0="%r10";
$hi0="%r11";
-$bp="%r12"; # reassign $bp
$hi1="%r13";
$i="%r14";
$j="%r15";
@@ -51,6 +64,16 @@ $code=<<___;
.type bn_mul_mont,\@function,6
.align 16
bn_mul_mont:
+ test \$3,${num}d
+ jnz .Lmul_enter
+ cmp \$8,${num}d
+ jb .Lmul_enter
+ cmp $ap,$bp
+ jne .Lmul4x_enter
+ jmp .Lsqr4x_enter
+
+.align 16
+.Lmul_enter:
push %rbx
push %rbp
push %r12
@@ -66,48 +89,66 @@ bn_mul_mont:
and \$-1024,%rsp # minimize TLB usage
mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
-.Lprologue:
- mov %rdx,$bp # $bp reassigned, remember?
-
+.Lmul_body:
+ mov $bp,%r12 # reassign $bp
+___
+ $bp="%r12";
+$code.=<<___;
mov ($n0),$n0 # pull n0[0] value
+ mov ($bp),$m0 # m0=bp[0]
+ mov ($ap),%rax
xor $i,$i # i=0
xor $j,$j # j=0
- mov ($bp),$m0 # m0=bp[0]
- mov ($ap),%rax
+ mov $n0,$m1
mulq $m0 # ap[0]*bp[0]
mov %rax,$lo0
- mov %rdx,$hi0
+ mov ($np),%rax
- imulq $n0,%rax # "tp[0]"*n0
- mov %rax,$m1
+ imulq $lo0,$m1 # "tp[0]"*n0
+ mov %rdx,$hi0
- mulq ($np) # np[0]*m1
- add $lo0,%rax # discarded
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
adc \$0,%rdx
mov %rdx,$hi1
lea 1($j),$j # j++
+ jmp .L1st_enter
+
+.align 16
.L1st:
+ add %rax,$hi1
mov ($ap,$j,8),%rax
- mulq $m0 # ap[j]*bp[0]
- add $hi0,%rax
adc \$0,%rdx
- mov %rax,$lo0
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ mov $lo0,$hi0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.L1st_enter:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$hi0
mov ($np,$j,8),%rax
- mov %rdx,$hi0
+ adc \$0,%rdx
+ lea 1($j),$j # j++
+ mov %rdx,$lo0
mulq $m1 # np[j]*m1
- add $hi1,%rax
- lea 1($j),$j # j++
+ cmp $num,$j
+ jne .L1st
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
adc \$0,%rdx
- add $lo0,%rax # np[j]*m1+ap[j]*bp[0]
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
adc \$0,%rdx
- mov %rax,-16(%rsp,$j,8) # tp[j-1]
- cmp $num,$j
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
mov %rdx,$hi1
- jl .L1st
+ mov $lo0,$hi0
xor %rdx,%rdx
add $hi0,$hi1
@@ -116,50 +157,64 @@ bn_mul_mont:
mov %rdx,(%rsp,$num,8) # store upmost overflow bit
lea 1($i),$i # i++
-.align 4
+ jmp .Louter
+.align 16
.Louter:
- xor $j,$j # j=0
-
mov ($bp,$i,8),$m0 # m0=bp[i]
- mov ($ap),%rax # ap[0]
+ xor $j,$j # j=0
+ mov $n0,$m1
+ mov (%rsp),$lo0
mulq $m0 # ap[0]*bp[i]
- add (%rsp),%rax # ap[0]*bp[i]+tp[0]
+ add %rax,$lo0 # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
adc \$0,%rdx
- mov %rax,$lo0
- mov %rdx,$hi0
- imulq $n0,%rax # tp[0]*n0
- mov %rax,$m1
+ imulq $lo0,$m1 # tp[0]*n0
+ mov %rdx,$hi0
- mulq ($np,$j,8) # np[0]*m1
- add $lo0,%rax # discarded
- mov 8(%rsp),$lo0 # tp[1]
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
adc \$0,%rdx
+ mov 8(%rsp),$lo0 # tp[1]
mov %rdx,$hi1
lea 1($j),$j # j++
-.align 4
+ jmp .Linner_enter
+
+.align 16
.Linner:
+ add %rax,$hi1
mov ($ap,$j,8),%rax
- mulq $m0 # ap[j]*bp[i]
- add $hi0,%rax
adc \$0,%rdx
- add %rax,$lo0 # ap[j]*bp[i]+tp[j]
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.Linner_enter:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$hi0
mov ($np,$j,8),%rax
adc \$0,%rdx
+ add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
mov %rdx,$hi0
+ adc \$0,$hi0
+ lea 1($j),$j # j++
mulq $m1 # np[j]*m1
- add $hi1,%rax
- lea 1($j),$j # j++
- adc \$0,%rdx
- add $lo0,%rax # np[j]*m1+ap[j]*bp[i]+tp[j]
+ cmp $num,$j
+ jne .Linner
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
mov (%rsp,$j,8),$lo0
- cmp $num,$j
- mov %rax,-16(%rsp,$j,8) # tp[j-1]
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
mov %rdx,$hi1
- jl .Linner
xor %rdx,%rdx
add $hi0,$hi1
@@ -173,35 +228,449 @@ bn_mul_mont:
cmp $num,$i
jl .Louter
- lea (%rsp),$ap # borrow ap for tp
- lea -1($num),$j # j=num-1
-
- mov ($ap),%rax # tp[0]
xor $i,$i # i=0 and clear CF!
+ mov (%rsp),%rax # tp[0]
+ lea (%rsp),$ap # borrow ap for tp
+ mov $num,$j # j=num
jmp .Lsub
.align 16
.Lsub: sbb ($np,$i,8),%rax
mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
- dec $j # doesn't affect CF!
mov 8($ap,$i,8),%rax # tp[i+1]
lea 1($i),$i # i++
- jge .Lsub
+ dec $j # doesnn't affect CF!
+ jnz .Lsub
sbb \$0,%rax # handle upmost overflow bit
+ xor $i,$i
and %rax,$ap
not %rax
mov $rp,$np
and %rax,$np
- lea -1($num),$j
+ mov $num,$j # j=num
or $np,$ap # ap=borrow?tp:rp
.align 16
.Lcopy: # copy or in-place refresh
+ mov ($ap,$i,8),%rax
+ mov $i,(%rsp,$i,8) # zap temporary vector
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]
+ lea 1($i),$i
+ sub \$1,$j
+ jnz .Lcopy
+
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul_epilogue:
+ ret
+.size bn_mul_mont,.-bn_mul_mont
+___
+{{{
+my @A=("%r10","%r11");
+my @N=("%r13","%rdi");
+$code.=<<___;
+.type bn_mul4x_mont,\@function,6
+.align 16
+bn_mul4x_mont:
+.Lmul4x_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov ${num}d,${num}d
+ lea 4($num),%r10
+ mov %rsp,%r11
+ neg %r10
+ lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul4x_body:
+ mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
+ mov %rdx,%r12 # reassign $bp
+___
+ $bp="%r12";
+$code.=<<___;
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($bp),$m0 # m0=bp[0]
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$A[0]
+ mov ($np),%rax
+
+ imulq $A[0],$m1 # "tp[0]"*n0
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ lea 4($j),$j # j++
+ adc \$0,%rdx
+ mov $N[1],(%rsp)
+ mov %rdx,$N[0]
+ jmp .L1st4x
+.align 16
+.L1st4x:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
mov ($ap,$j,8),%rax
- mov %rax,($rp,$j,8) # rp[i]=tp[i]
- mov $i,(%rsp,$j,8) # zap temporary vector
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .L1st4x
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+.align 4
+.Louter4x:
+ mov ($bp,$i,8),$m0 # m0=bp[i]
+ xor $j,$j # j=0
+ mov (%rsp),$A[0]
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$A[0] # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ imulq $A[0],$m1 # tp[0]*n0
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # "$N[0]", discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ add 8(%rsp),$A[1] # +tp[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
+ lea 4($j),$j # j+=2
+ adc \$0,%rdx
+ mov $N[1],(%rsp) # tp[j-1]
+ mov %rdx,$N[0]
+ jmp .Linner4x
+.align 16
+.Linner4x:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ add 8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .Linner4x
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 1($i),$i # i++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ add (%rsp,$num,8),$N[0] # pull upmost overflow bit
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ cmp $num,$i
+ jl .Louter4x
+___
+{
+my @ri=("%rax","%rdx",$m0,$m1);
+$code.=<<___;
+ mov 16(%rsp,$num,8),$rp # restore $rp
+ mov 0(%rsp),@ri[0] # tp[0]
+ pxor %xmm0,%xmm0
+ mov 8(%rsp),@ri[1] # tp[1]
+ shr \$2,$num # num/=4
+ lea (%rsp),$ap # borrow ap for tp
+ xor $i,$i # i=0 and clear CF!
+
+ sub 0($np),@ri[0]
+ mov 16($ap),@ri[2] # tp[2]
+ mov 24($ap),@ri[3] # tp[3]
+ sbb 8($np),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsub4x
+.align 16
+.Lsub4x:
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($np,$i,8),@ri[2]
+ mov 32($ap,$i,8),@ri[0] # tp[i+1]
+ mov 40($ap,$i,8),@ri[1]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($np,$i,8),@ri[0]
+ mov 48($ap,$i,8),@ri[2]
+ mov 56($ap,$i,8),@ri[3]
+ sbb 40($np,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub4x
+
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($ap,$i,8),@ri[0] # load overflow bit
+ sbb 16($np,$i,8),@ri[2]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$ap
+ not @ri[0]
+ mov $rp,$np
+ and @ri[0],$np
+ lea -1($num),$j
+ or $np,$ap # ap=borrow?tp:rp
+
+ movdqu ($ap),%xmm1
+ movdqa %xmm0,(%rsp)
+ movdqu %xmm1,($rp)
+ jmp .Lcopy4x
+.align 16
+.Lcopy4x: # copy or in-place refresh
+ movdqu 16($ap,$i),%xmm2
+ movdqu 32($ap,$i),%xmm1
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+ movdqa %xmm0,32(%rsp,$i)
+ movdqu %xmm1,32($rp,$i)
+ lea 32($i),$i
dec $j
- jge .Lcopy
+ jnz .Lcopy4x
+ shl \$2,$num
+ movdqu 16($ap,$i),%xmm2
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+___
+}
+$code.=<<___;
mov 8(%rsp,$num,8),%rsi # restore %rsp
mov \$1,%rax
mov (%rsi),%r15
@@ -211,9 +680,823 @@ bn_mul_mont:
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
-.Lepilogue:
+.Lmul4x_epilogue:
ret
-.size bn_mul_mont,.-bn_mul_mont
+.size bn_mul4x_mont,.-bn_mul4x_mont
+___
+}}}
+ {{{
+######################################################################
+# void bn_sqr4x_mont(
+my $rptr="%rdi"; # const BN_ULONG *rptr,
+my $aptr="%rsi"; # const BN_ULONG *aptr,
+my $bptr="%rdx"; # not used
+my $nptr="%rcx"; # const BN_ULONG *nptr,
+my $n0 ="%r8"; # const BN_ULONG *n0);
+my $num ="%r9"; # int num, has to be divisible by 4 and
+ # not less than 8
+
+my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
+my @A0=("%r10","%r11");
+my @A1=("%r12","%r13");
+my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
+
+$code.=<<___;
+.type bn_sqr4x_mont,\@function,6
+.align 16
+bn_sqr4x_mont:
+.Lsqr4x_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ shl \$3,${num}d # convert $num to bytes
+ xor %r10,%r10
+ mov %rsp,%r11 # put aside %rsp
+ sub $num,%r10 # -$num
+ mov ($n0),$n0 # *n0
+ lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
+ and \$-1024,%rsp # minimize TLB usage
+ ##############################################################
+ # Stack layout
+ #
+ # +0 saved $num, used in reduction section
+ # +8 &t[2*$num], used in reduction section
+ # +32 saved $rptr
+ # +40 saved $nptr
+ # +48 saved *n0
+ # +56 saved %rsp
+ # +64 t[2*$num]
+ #
+ mov $rptr,32(%rsp) # save $rptr
+ mov $nptr,40(%rsp)
+ mov $n0, 48(%rsp)
+ mov %r11, 56(%rsp) # save original %rsp
+.Lsqr4x_body:
+ ##############################################################
+ # Squaring part:
+ #
+ # a) multiply-n-add everything but a[i]*a[i];
+ # b) shift result of a) by 1 to the left and accumulate
+ # a[i]*a[i] products;
+ #
+ lea 32(%r10),$i # $i=-($num-32)
+ lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
+
+ mov $num,$j # $j=$num
+
+ # comments apply to $num==8 case
+ mov -32($aptr,$i),$a0 # a[0]
+ lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr,$i),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr,$i),$ai # a[2]
+ mov %rax,$a1
+
+ mul $a0 # a[1]*a[0]
+ mov %rax,$A0[0] # a[1]*a[0]
+ mov $ai,%rax # a[2]
+ mov %rdx,$A0[1]
+ mov $A0[0],-24($tptr,$i) # t[1]
+
+ xor $A0[0],$A0[0]
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc %rdx,$A0[0]
+ mov $A0[1],-16($tptr,$i) # t[2]
+
+ lea -16($i),$j # j=-16
+
+
+ mov 8($aptr,$j),$ai # a[3]
+ mul $a1 # a[2]*a[1]
+ mov %rax,$A1[0] # a[2]*a[1]+t[3]
+ mov $ai,%rax
+ mov %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ lea 16($j),$j
+ adc \$0,$A0[1]
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[3]
+ jmp .Lsqr4x_1st
+
+.align 16
+.Lsqr4x_1st:
+ mov ($aptr,$j),$ai # a[4]
+ xor $A1[0],$A1[0]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1] # a[3]*a[1]+t[4]
+ mov $ai,%rax
+ adc %rdx,$A1[0]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+ mul $a0 # a[4]*a[0]
+ add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
+ mov $ai,%rax # a[3]
+ adc %rdx,$A0[0]
+ mov $A0[1],($tptr,$j) # t[4]
+
+
+ mov 8($aptr,$j),$ai # a[5]
+ xor $A1[1],$A1[1]
+ mul $a1 # a[4]*a[3]
+ add %rax,$A1[0] # a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+ mul $a0 # a[5]*a[2]
+ add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],8($tptr,$j) # t[5]
+
+ mov 16($aptr,$j),$ai # a[6]
+ xor $A1[0],$A1[0]
+ mul $a1 # a[5]*a[3]
+ add %rax,$A1[1] # a[5]*a[3]+t[6]
+ mov $ai,%rax
+ adc %rdx,$A1[0]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+ mul $a0 # a[6]*a[2]
+ add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
+ mov $ai,%rax # a[3]
+ adc %rdx,$A0[0]
+ mov $A0[1],16($tptr,$j) # t[6]
+
+
+ mov 24($aptr,$j),$ai # a[7]
+ xor $A1[1],$A1[1]
+ mul $a1 # a[6]*a[5]
+ add %rax,$A1[0] # a[6]*a[5]+t[7]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ lea 32($j),$j
+ adc \$0,$A0[1]
+ mul $a0 # a[7]*a[4]
+ add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[7]
+
+ cmp \$0,$j
+ jne .Lsqr4x_1st
+
+ xor $A1[0],$A1[0]
+ add $A0[1],$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[7]*a[5]
+ add %rax,$A1[1]
+ adc %rdx,$A1[0]
+
+ mov $A1[1],($tptr) # t[8]
+ lea 16($i),$i
+ mov $A1[0],8($tptr) # t[9]
+ jmp .Lsqr4x_outer
+
+.align 16
+.Lsqr4x_outer: # comments apply to $num==6 case
+ mov -32($aptr,$i),$a0 # a[0]
+ lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr,$i),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr,$i),$ai # a[2]
+ mov %rax,$a1
+
+ mov -24($tptr,$i),$A0[0] # t[1]
+ xor $A0[1],$A0[1]
+ mul $a0 # a[1]*a[0]
+ add %rax,$A0[0] # a[1]*a[0]+t[1]
+ mov $ai,%rax # a[2]
+ adc %rdx,$A0[1]
+ mov $A0[0],-24($tptr,$i) # t[1]
+
+ xor $A0[0],$A0[0]
+ add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
+ adc \$0,$A0[0]
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc %rdx,$A0[0]
+ mov $A0[1],-16($tptr,$i) # t[2]
+
+ lea -16($i),$j # j=-16
+ xor $A1[0],$A1[0]
+
+
+ mov 8($aptr,$j),$ai # a[3]
+ xor $A1[1],$A1[1]
+ add 8($tptr,$j),$A1[0]
+ adc \$0,$A1[1]
+ mul $a1 # a[2]*a[1]
+ add %rax,$A1[0] # a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],8($tptr,$j) # t[3]
+
+ lea 16($j),$j
+ jmp .Lsqr4x_inner
+
+.align 16
+.Lsqr4x_inner:
+ mov ($aptr,$j),$ai # a[4]
+ xor $A1[0],$A1[0]
+ add ($tptr,$j),$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1] # a[3]*a[1]+t[4]
+ mov $ai,%rax
+ adc %rdx,$A1[0]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+ mul $a0 # a[4]*a[0]
+ add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
+ mov $ai,%rax # a[3]
+ adc %rdx,$A0[0]
+ mov $A0[1],($tptr,$j) # t[4]
+
+ mov 8($aptr,$j),$ai # a[5]
+ xor $A1[1],$A1[1]
+ add 8($tptr,$j),$A1[0]
+ adc \$0,$A1[1]
+ mul $a1 # a[4]*a[3]
+ add %rax,$A1[0] # a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ lea 16($j),$j # j++
+ adc \$0,$A0[1]
+ mul $a0 # a[5]*a[2]
+ add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
+
+ cmp \$0,$j
+ jne .Lsqr4x_inner
+
+ xor $A1[0],$A1[0]
+ add $A0[1],$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[5]*a[3]
+ add %rax,$A1[1]
+ adc %rdx,$A1[0]
+
+ mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
+ mov $A1[0],8($tptr) # t[7], "preloaded t[3]" below
+
+ add \$16,$i
+ jnz .Lsqr4x_outer
+
+ # comments apply to $num==4 case
+ mov -32($aptr),$a0 # a[0]
+ lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr),$ai # a[2]
+ mov %rax,$a1
+
+ xor $A0[1],$A0[1]
+ mul $a0 # a[1]*a[0]
+ add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
+ mov $ai,%rax # a[2]
+ adc %rdx,$A0[1]
+ mov $A0[0],-24($tptr) # t[1]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
+ adc \$0,$A0[0]
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc %rdx,$A0[0]
+ mov $A0[1],-16($tptr) # t[2]
+
+ mov -8($aptr),$ai # a[3]
+ mul $a1 # a[2]*a[1]
+ add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
+ mov $ai,%rax
+ adc \$0,%rdx
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ mov %rdx,$A1[1]
+ adc \$0,$A0[1]
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr) # t[3]
+
+ xor $A1[0],$A1[0]
+ add $A0[1],$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1]
+ mov -16($aptr),%rax # a[2]
+ adc %rdx,$A1[0]
+
+ mov $A1[1],($tptr) # t[4]
+ mov $A1[0],8($tptr) # t[5]
+
+ mul $ai # a[2]*a[3]
+___
+{
+my ($shift,$carry)=($a0,$a1);
+my @S=(@A1,$ai,$n0);
+$code.=<<___;
+ add \$16,$i
+ xor $shift,$shift
+ sub $num,$i # $i=16-$num
+ xor $carry,$carry
+
+ add $A1[0],%rax # t[5]
+ adc \$0,%rdx
+ mov %rax,8($tptr) # t[5]
+ mov %rdx,16($tptr) # t[6]
+ mov $carry,24($tptr) # t[7]
+
+ mov -16($aptr,$i),%rax # a[0]
+ lea 64(%rsp,$num,2),$tptr
+ xor $A0[0],$A0[0] # t[0]
+ mov -24($tptr,$i,2),$A0[1] # t[1]
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr,$i,2)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],-24($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 0($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],-16($tptr,$i,2)
+ adc %rdx,$S[3]
+ lea 16($i),$i
+ mov $S[3],-40($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ jmp .Lsqr4x_shift_n_add
+
+.align 16
+.Lsqr4x_shift_n_add:
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr,$i,2)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],-24($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 0($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],-16($tptr,$i,2)
+ adc %rdx,$S[3]
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ mov $S[3],-8($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov 16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 24($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov 8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],0($tptr,$i,2)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],8($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 32($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 40($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 16($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],16($tptr,$i,2)
+ adc %rdx,$S[3]
+ mov $S[3],24($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ add \$32,$i
+ jnz .Lsqr4x_shift_n_add
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
+ mov $S[1],-24($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ adc %rax,$S[2]
+ adc %rdx,$S[3]
+ mov $S[2],-16($tptr)
+ mov $S[3],-8($tptr)
+___
+}
+##############################################################
+# Montgomery reduction part, "word-by-word" algorithm.
+#
+{
+my ($topbit,$nptr)=("%rbp",$aptr);
+my ($m0,$m1)=($a0,$a1);
+my @Ni=("%rbx","%r9");
+$code.=<<___;
+ mov 40(%rsp),$nptr # restore $nptr
+ mov 48(%rsp),$n0 # restore *n0
+ xor $j,$j
+ mov $num,0(%rsp) # save $num
+ sub $num,$j # $j=-$num
+ mov 64(%rsp),$A0[0] # t[0] # modsched #
+ mov $n0,$m0 # # modsched #
+ lea 64(%rsp,$num,2),%rax # end of t[] buffer
+ lea 64(%rsp,$num),$tptr # end of t[] window
+ mov %rax,8(%rsp) # save end of t[] buffer
+ lea ($nptr,$num),$nptr # end of n[] buffer
+ xor $topbit,$topbit # $topbit=0
+
+ mov 0($nptr,$j),%rax # n[0] # modsched #
+ mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
+ imulq $A0[0],$m0 # m0=t[0]*n0 # modsched #
+ mov %rax,$Ni[0] # # modsched #
+ jmp .Lsqr4x_mont_outer
+
+.align 16
+.Lsqr4x_mont_outer:
+ xor $A0[1],$A0[1]
+ mul $m0 # n[0]*m0
+ add %rax,$A0[0] # n[0]*m0+t[0]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+ mov $n0,$m1
+
+ xor $A0[0],$A0[0]
+ add 8($tptr,$j),$A0[1]
+ adc \$0,$A0[0]
+ mul $m0 # n[1]*m0
+ add %rax,$A0[1] # n[1]*m0+t[1]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+
+ imulq $A0[1],$m1
+
+ mov 16($nptr,$j),$Ni[0] # n[2]
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[0]*m1
+ add %rax,$A1[0] # n[0]*m1+"t[1]"
+ mov $Ni[0],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],8($tptr,$j) # "t[1]"
+
+ xor $A0[1],$A0[1]
+ add 16($tptr,$j),$A0[0]
+ adc \$0,$A0[1]
+ mul $m0 # n[2]*m0
+ add %rax,$A0[0] # n[2]*m0+t[2]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+
+ mov 24($nptr,$j),$Ni[1] # n[3]
+ xor $A1[0],$A1[0]
+ add $A0[0],$A1[1]
+ adc \$0,$A1[0]
+ mul $m1 # n[1]*m1
+ add %rax,$A1[1] # n[1]*m1+"t[2]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[0]
+ mov $A1[1],16($tptr,$j) # "t[2]"
+
+ xor $A0[0],$A0[0]
+ add 24($tptr,$j),$A0[1]
+ lea 32($j),$j
+ adc \$0,$A0[0]
+ mul $m0 # n[3]*m0
+ add %rax,$A0[1] # n[3]*m0+t[3]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+ jmp .Lsqr4x_mont_inner
+
+.align 16
+.Lsqr4x_mont_inner:
+ mov ($nptr,$j),$Ni[0] # n[4]
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[2]*m1
+ add %rax,$A1[0] # n[2]*m1+"t[3]"
+ mov $Ni[0],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],-8($tptr,$j) # "t[3]"
+
+ xor $A0[1],$A0[1]
+ add ($tptr,$j),$A0[0]
+ adc \$0,$A0[1]
+ mul $m0 # n[4]*m0
+ add %rax,$A0[0] # n[4]*m0+t[4]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+
+ mov 8($nptr,$j),$Ni[1] # n[5]
+ xor $A1[0],$A1[0]
+ add $A0[0],$A1[1]
+ adc \$0,$A1[0]
+ mul $m1 # n[3]*m1
+ add %rax,$A1[1] # n[3]*m1+"t[4]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[0]
+ mov $A1[1],($tptr,$j) # "t[4]"
+
+ xor $A0[0],$A0[0]
+ add 8($tptr,$j),$A0[1]
+ adc \$0,$A0[0]
+ mul $m0 # n[5]*m0
+ add %rax,$A0[1] # n[5]*m0+t[5]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+
+
+ mov 16($nptr,$j),$Ni[0] # n[6]
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[4]*m1
+ add %rax,$A1[0] # n[4]*m1+"t[5]"
+ mov $Ni[0],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],8($tptr,$j) # "t[5]"
+
+ xor $A0[1],$A0[1]
+ add 16($tptr,$j),$A0[0]
+ adc \$0,$A0[1]
+ mul $m0 # n[6]*m0
+ add %rax,$A0[0] # n[6]*m0+t[6]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+
+ mov 24($nptr,$j),$Ni[1] # n[7]
+ xor $A1[0],$A1[0]
+ add $A0[0],$A1[1]
+ adc \$0,$A1[0]
+ mul $m1 # n[5]*m1
+ add %rax,$A1[1] # n[5]*m1+"t[6]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[0]
+ mov $A1[1],16($tptr,$j) # "t[6]"
+
+ xor $A0[0],$A0[0]
+ add 24($tptr,$j),$A0[1]
+ lea 32($j),$j
+ adc \$0,$A0[0]
+ mul $m0 # n[7]*m0
+ add %rax,$A0[1] # n[7]*m0+t[7]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+ cmp \$0,$j
+ jne .Lsqr4x_mont_inner
+
+ sub 0(%rsp),$j # $j=-$num # modsched #
+ mov $n0,$m0 # # modsched #
+
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[6]*m1
+ add %rax,$A1[0] # n[6]*m1+"t[7]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],-8($tptr) # "t[7]"
+
+ xor $A0[1],$A0[1]
+ add ($tptr),$A0[0] # +t[8]
+ adc \$0,$A0[1]
+ mov 0($nptr,$j),$Ni[0] # n[0] # modsched #
+ add $topbit,$A0[0]
+ adc \$0,$A0[1]
+
+ imulq 16($tptr,$j),$m0 # m0=t[0]*n0 # modsched #
+ xor $A1[0],$A1[0]
+ mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
+ add $A0[0],$A1[1]
+ mov 16($tptr,$j),$A0[0] # t[0] # modsched #
+ adc \$0,$A1[0]
+ mul $m1 # n[7]*m1
+ add %rax,$A1[1] # n[7]*m1+"t[8]"
+ mov $Ni[0],%rax # # modsched #
+ adc %rdx,$A1[0]
+ mov $A1[1],($tptr) # "t[8]"
+
+ xor $topbit,$topbit
+ add 8($tptr),$A1[0] # +t[9]
+ adc $topbit,$topbit
+ add $A0[1],$A1[0]
+ lea 16($tptr),$tptr # "t[$num]>>128"
+ adc \$0,$topbit
+ mov $A1[0],-8($tptr) # "t[9]"
+ cmp 8(%rsp),$tptr # are we done?
+ jb .Lsqr4x_mont_outer
+
+ mov 0(%rsp),$num # restore $num
+ mov $topbit,($tptr) # save $topbit
+___
+}
+##############################################################
+# Post-condition, 4x unrolled copy from bn_mul_mont
+#
+{
+my ($tptr,$nptr)=("%rbx",$aptr);
+my @ri=("%rax","%rdx","%r10","%r11");
+$code.=<<___;
+ mov 64(%rsp,$num),@ri[0] # tp[0]
+ lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result
+ mov 40(%rsp),$nptr # restore $nptr
+ shr \$5,$num # num/4
+ mov 8($tptr),@ri[1] # t[1]
+ xor $i,$i # i=0 and clear CF!
+
+ mov 32(%rsp),$rptr # restore $rptr
+ sub 0($nptr),@ri[0]
+ mov 16($tptr),@ri[2] # t[2]
+ mov 24($tptr),@ri[3] # t[3]
+ sbb 8($nptr),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsqr4x_sub
+.align 16
+.Lsqr4x_sub:
+ mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($nptr,$i,8),@ri[2]
+ mov 32($tptr,$i,8),@ri[0] # tp[i+1]
+ mov 40($tptr,$i,8),@ri[1]
+ sbb 24($nptr,$i,8),@ri[3]
+ mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($nptr,$i,8),@ri[0]
+ mov 48($tptr,$i,8),@ri[2]
+ mov 56($tptr,$i,8),@ri[3]
+ sbb 40($nptr,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesn't affect CF!
+ jnz .Lsqr4x_sub
+
+ mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($tptr,$i,8),@ri[0] # load overflow bit
+ sbb 16($nptr,$i,8),@ri[2]
+ mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($nptr,$i,8),@ri[3]
+ mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$tptr
+ not @ri[0]
+ mov $rptr,$nptr
+ and @ri[0],$nptr
+ lea -1($num),$j
+ or $nptr,$tptr # tp=borrow?tp:rp
+
+ pxor %xmm0,%xmm0
+ lea 64(%rsp,$num,8),$nptr
+ movdqu ($tptr),%xmm1
+ lea ($nptr,$num,8),$nptr
+ movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
+ movdqa %xmm0,($nptr) # zap upper half of temporary vector
+ movdqu %xmm1,($rptr)
+ jmp .Lsqr4x_copy
+.align 16
+.Lsqr4x_copy: # copy or in-place refresh
+ movdqu 16($tptr,$i),%xmm2
+ movdqu 32($tptr,$i),%xmm1
+ movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
+ movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
+ movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
+ movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector
+ movdqu %xmm2,16($rptr,$i)
+ movdqu %xmm1,32($rptr,$i)
+ lea 32($i),$i
+ dec $j
+ jnz .Lsqr4x_copy
+
+ movdqu 16($tptr,$i),%xmm2
+ movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
+ movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
+ movdqu %xmm2,16($rptr,$i)
+___
+}
+$code.=<<___;
+ mov 56(%rsp),%rsi # restore %rsp
+ mov \$1,%rax
+ mov 0(%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lsqr4x_epilogue:
+ ret
+.size bn_sqr4x_mont,.-bn_sqr4x_mont
+___
+}}}
+$code.=<<___;
.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
.align 16
___
@@ -228,9 +1511,9 @@ $disp="%r9";
$code.=<<___;
.extern __imp_RtlVirtualUnwind
-.type se_handler,\@abi-omnipotent
+.type mul_handler,\@abi-omnipotent
.align 16
-se_handler:
+mul_handler:
push %rsi
push %rdi
push %rbx
@@ -245,15 +1528,20 @@ se_handler:
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
- lea .Lprologue(%rip),%r10
- cmp %r10,%rbx # context->Rip<.Lprologue
- jb .Lin_prologue
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
mov 152($context),%rax # pull context->Rsp
- lea .Lepilogue(%rip),%r10
- cmp %r10,%rbx # context->Rip>=.Lepilogue
- jae .Lin_prologue
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
mov 192($context),%r10 # pull $num
mov 8(%rax,%r10,8),%rax # pull saved stack pointer
@@ -272,7 +1560,53 @@ se_handler:
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
-.Lin_prologue:
+ jmp .Lcommon_seh_tail
+.size mul_handler,.-mul_handler
+
+.type sqr_handler,\@abi-omnipotent
+.align 16
+sqr_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lsqr4x_body(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lsqr_body
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lsqr4x_epilogue(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
+ jae .Lcommon_seh_tail
+
+ mov 56(%rax),%rax # pull saved stack pointer
+ lea 48(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lcommon_seh_tail:
mov 8(%rax),%rdi
mov 16(%rax),%rsi
mov %rax,152($context) # restore context->Rsp
@@ -310,7 +1644,7 @@ se_handler:
pop %rdi
pop %rsi
ret
-.size se_handler,.-se_handler
+.size sqr_handler,.-sqr_handler
.section .pdata
.align 4
@@ -318,11 +1652,27 @@ se_handler:
.rva .LSEH_end_bn_mul_mont
.rva .LSEH_info_bn_mul_mont
+ .rva .LSEH_begin_bn_mul4x_mont
+ .rva .LSEH_end_bn_mul4x_mont
+ .rva .LSEH_info_bn_mul4x_mont
+
+ .rva .LSEH_begin_bn_sqr4x_mont
+ .rva .LSEH_end_bn_sqr4x_mont
+ .rva .LSEH_info_bn_sqr4x_mont
+
.section .xdata
.align 8
.LSEH_info_bn_mul_mont:
.byte 9,0,0,0
- .rva se_handler
+ .rva mul_handler
+ .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
+.LSEH_info_bn_mul4x_mont:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
+.LSEH_info_bn_sqr4x_mont:
+ .byte 9,0,0,0
+ .rva sqr_handler
___
}
diff --git a/openssl/crypto/bn/asm/x86_64-mont5.pl b/openssl/crypto/bn/asm/x86_64-mont5.pl
new file mode 100644
index 000000000..057cda28a
--- /dev/null
+++ b/openssl/crypto/bn/asm/x86_64-mont5.pl
@@ -0,0 +1,1070 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# August 2011.
+#
+# Companion to x86_64-mont.pl that optimizes cache-timing attack
+# countermeasures. The subroutines are produced by replacing bp[i]
+# references in their x86_64-mont.pl counterparts with cache-neutral
+# references to powers table computed in BN_mod_exp_mont_consttime.
+# In addition subroutine that scatters elements of the powers table
+# is implemented, so that scatter-/gathering can be tuned without
+# bn_exp.c modifications.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+# int bn_mul_mont_gather5(
+$rp="%rdi"; # BN_ULONG *rp,
+$ap="%rsi"; # const BN_ULONG *ap,
+$bp="%rdx"; # const BN_ULONG *bp,
+$np="%rcx"; # const BN_ULONG *np,
+$n0="%r8"; # const BN_ULONG *n0,
+$num="%r9"; # int num,
+ # int idx); # 0 to 2^5-1, "index" in $bp holding
+ # pre-computed powers of a', interlaced
+ # in such manner that b[0] is $bp[idx],
+ # b[1] is [2^5+idx], etc.
+$lo0="%r10";
+$hi0="%r11";
+$hi1="%r13";
+$i="%r14";
+$j="%r15";
+$m0="%rbx";
+$m1="%rbp";
+
+$code=<<___;
+.text
+
+.globl bn_mul_mont_gather5
+.type bn_mul_mont_gather5,\@function,6
+.align 64
+bn_mul_mont_gather5:
+ test \$3,${num}d
+ jnz .Lmul_enter
+ cmp \$8,${num}d
+ jb .Lmul_enter
+ jmp .Lmul4x_enter
+
+.align 16
+.Lmul_enter:
+ mov ${num}d,${num}d
+ mov `($win64?56:8)`(%rsp),%r10d # load 7th argument
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+___
+$code.=<<___ if ($win64);
+ lea -0x28(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+.Lmul_alloca:
+___
+$code.=<<___;
+ mov %rsp,%rax
+ lea 2($num),%r11
+ neg %r11
+ lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+2))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul_body:
+ mov $bp,%r12 # reassign $bp
+___
+ $bp="%r12";
+ $STRIDE=2**5*8; # 5 is "window size"
+ $N=$STRIDE/4; # should match cache line size
+$code.=<<___;
+ mov %r10,%r11
+ shr \$`log($N/8)/log(2)`,%r10
+ and \$`$N/8-1`,%r11
+ not %r10
+ lea .Lmagic_masks(%rip),%rax
+ and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
+ lea 96($bp,%r11,8),$bp # pointer within 1st cache line
+ movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
+ movq 8(%rax,%r10,8),%xmm5 # cache line contains element
+ movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
+ movq 24(%rax,%r10,8),%xmm7
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ movq %xmm0,$m0 # m0=bp[0]
+
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$lo0
+ mov ($np),%rax
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $lo0,$m1 # "tp[0]"*n0
+ mov %rdx,$hi0
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .L1st_enter
+
+.align 16
+.L1st:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ mov $lo0,$hi0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.L1st_enter:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 1($j),$j # j++
+ mov %rdx,$lo0
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .L1st
+
+ movq %xmm0,$m0 # bp[1]
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+ mov $lo0,$hi0
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ jmp .Louter
+.align 16
+.Louter:
+ xor $j,$j # j=0
+ mov $n0,$m1
+ mov (%rsp),$lo0
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$lo0 # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $lo0,$m1 # tp[0]*n0
+ mov %rdx,$hi0
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov 8(%rsp),$lo0 # tp[1]
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .Linner_enter
+
+.align 16
+.Linner:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.Linner_enter:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
+ mov %rdx,$hi0
+ adc \$0,$hi0
+ lea 1($j),$j # j++
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .Linner
+
+ movq %xmm0,$m0 # bp[i+1]
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ add $lo0,$hi1 # pull upmost overflow bit
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ cmp $num,$i
+ jl .Louter
+
+ xor $i,$i # i=0 and clear CF!
+ mov (%rsp),%rax # tp[0]
+ lea (%rsp),$ap # borrow ap for tp
+ mov $num,$j # j=num
+ jmp .Lsub
+.align 16
+.Lsub: sbb ($np,$i,8),%rax
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 8($ap,$i,8),%rax # tp[i+1]
+ lea 1($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub
+
+ sbb \$0,%rax # handle upmost overflow bit
+ xor $i,$i
+ and %rax,$ap
+ not %rax
+ mov $rp,$np
+ and %rax,$np
+ mov $num,$j # j=num
+ or $np,$ap # ap=borrow?tp:rp
+.align 16
+.Lcopy: # copy or in-place refresh
+ mov ($ap,$i,8),%rax
+ mov $i,(%rsp,$i,8) # zap temporary vector
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]
+ lea 1($i),$i
+ sub \$1,$j
+ jnz .Lcopy
+
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+___
+$code.=<<___ if ($win64);
+ movaps (%rsi),%xmm6
+ movaps 0x10(%rsi),%xmm7
+ lea 0x28(%rsi),%rsi
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul_epilogue:
+ ret
+.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
+___
+{{{
+my @A=("%r10","%r11");
+my @N=("%r13","%rdi");
+$code.=<<___;
+.type bn_mul4x_mont_gather5,\@function,6
+.align 16
+bn_mul4x_mont_gather5:
+.Lmul4x_enter:
+ mov ${num}d,${num}d
+ mov `($win64?56:8)`(%rsp),%r10d # load 7th argument
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+___
+$code.=<<___ if ($win64);
+ lea -0x28(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+.Lmul4x_alloca:
+___
+$code.=<<___;
+ mov %rsp,%rax
+ lea 4($num),%r11
+ neg %r11
+ lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+4))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul4x_body:
+ mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
+ mov %rdx,%r12 # reassign $bp
+___
+ $bp="%r12";
+ $STRIDE=2**5*8; # 5 is "window size"
+ $N=$STRIDE/4; # should match cache line size
+$code.=<<___;
+ mov %r10,%r11
+ shr \$`log($N/8)/log(2)`,%r10
+ and \$`$N/8-1`,%r11
+ not %r10
+ lea .Lmagic_masks(%rip),%rax
+ and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
+ lea 96($bp,%r11,8),$bp # pointer within 1st cache line
+ movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
+ movq 8(%rax,%r10,8),%xmm5 # cache line contains element
+ movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
+ movq 24(%rax,%r10,8),%xmm7
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ movq %xmm0,$m0 # m0=bp[0]
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$A[0]
+ mov ($np),%rax
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $A[0],$m1 # "tp[0]"*n0
+ mov %rdx,$A[1]
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ lea 4($j),$j # j++
+ adc \$0,%rdx
+ mov $N[1],(%rsp)
+ mov %rdx,$N[0]
+ jmp .L1st4x
+.align 16
+.L1st4x:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .L1st4x
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ movq %xmm0,$m0 # bp[1]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+.align 4
+.Louter4x:
+ xor $j,$j # j=0
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mov (%rsp),$A[0]
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$A[0] # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $A[0],$m1 # tp[0]*n0
+ mov %rdx,$A[1]
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # "$N[0]", discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ add 8(%rsp),$A[1] # +tp[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
+ lea 4($j),$j # j+=2
+ adc \$0,%rdx
+ mov %rdx,$N[0]
+ jmp .Linner4x
+.align 16
+.Linner4x:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ add 8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-40(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .Linner4x
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 1($i),$i # i++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ movq %xmm0,$m0 # bp[i+1]
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ add (%rsp,$num,8),$N[0] # pull upmost overflow bit
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ cmp $num,$i
+ jl .Louter4x
+___
+{
+my @ri=("%rax","%rdx",$m0,$m1);
+$code.=<<___;
+ mov 16(%rsp,$num,8),$rp # restore $rp
+ mov 0(%rsp),@ri[0] # tp[0]
+ pxor %xmm0,%xmm0
+ mov 8(%rsp),@ri[1] # tp[1]
+ shr \$2,$num # num/=4
+ lea (%rsp),$ap # borrow ap for tp
+ xor $i,$i # i=0 and clear CF!
+
+ sub 0($np),@ri[0]
+ mov 16($ap),@ri[2] # tp[2]
+ mov 24($ap),@ri[3] # tp[3]
+ sbb 8($np),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsub4x
+.align 16
+.Lsub4x:
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($np,$i,8),@ri[2]
+ mov 32($ap,$i,8),@ri[0] # tp[i+1]
+ mov 40($ap,$i,8),@ri[1]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($np,$i,8),@ri[0]
+ mov 48($ap,$i,8),@ri[2]
+ mov 56($ap,$i,8),@ri[3]
+ sbb 40($np,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub4x
+
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($ap,$i,8),@ri[0] # load overflow bit
+ sbb 16($np,$i,8),@ri[2]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$ap
+ not @ri[0]
+ mov $rp,$np
+ and @ri[0],$np
+ lea -1($num),$j
+ or $np,$ap # ap=borrow?tp:rp
+
+ movdqu ($ap),%xmm1
+ movdqa %xmm0,(%rsp)
+ movdqu %xmm1,($rp)
+ jmp .Lcopy4x
+.align 16
+.Lcopy4x: # copy or in-place refresh
+ movdqu 16($ap,$i),%xmm2
+ movdqu 32($ap,$i),%xmm1
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+ movdqa %xmm0,32(%rsp,$i)
+ movdqu %xmm1,32($rp,$i)
+ lea 32($i),$i
+ dec $j
+ jnz .Lcopy4x
+
+ shl \$2,$num
+ movdqu 16($ap,$i),%xmm2
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+___
+}
+$code.=<<___;
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+___
+$code.=<<___ if ($win64);
+ movaps (%rsi),%xmm6
+ movaps 0x10(%rsi),%xmm7
+ lea 0x28(%rsi),%rsi
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul4x_epilogue:
+ ret
+.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
+___
+}}}
+
+{
+my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx"); # Unix order
+my $out=$inp;
+my $STRIDE=2**5*8;
+my $N=$STRIDE/4;
+
+$code.=<<___;
+.globl bn_scatter5
+.type bn_scatter5,\@abi-omnipotent
+.align 16
+bn_scatter5:
+ cmp \$0, $num
+ jz .Lscatter_epilogue
+ lea ($tbl,$idx,8),$tbl
+.Lscatter:
+ mov ($inp),%rax
+ lea 8($inp),$inp
+ mov %rax,($tbl)
+ lea 32*8($tbl),$tbl
+ sub \$1,$num
+ jnz .Lscatter
+.Lscatter_epilogue:
+ ret
+.size bn_scatter5,.-bn_scatter5
+
+.globl bn_gather5
+.type bn_gather5,\@abi-omnipotent
+.align 16
+bn_gather5:
+___
+$code.=<<___ if ($win64);
+.LSEH_begin_bn_gather5:
+ # I can't trust assembler to use specific encoding:-(
+ .byte 0x48,0x83,0xec,0x28 #sub \$0x28,%rsp
+ .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
+ .byte 0x0f,0x29,0x7c,0x24,0x10 #movdqa %xmm7,0x10(%rsp)
+___
+$code.=<<___;
+ mov $idx,%r11
+ shr \$`log($N/8)/log(2)`,$idx
+ and \$`$N/8-1`,%r11
+ not $idx
+ lea .Lmagic_masks(%rip),%rax
+ and \$`2**5/($N/8)-1`,$idx # 5 is "window size"
+ lea 96($tbl,%r11,8),$tbl # pointer within 1st cache line
+ movq 0(%rax,$idx,8),%xmm4 # set of masks denoting which
+ movq 8(%rax,$idx,8),%xmm5 # cache line contains element
+ movq 16(%rax,$idx,8),%xmm6 # denoted by 7th argument
+ movq 24(%rax,$idx,8),%xmm7
+ jmp .Lgather
+.align 16
+.Lgather:
+ movq `0*$STRIDE/4-96`($tbl),%xmm0
+ movq `1*$STRIDE/4-96`($tbl),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($tbl),%xmm2
+ pand %xmm5,%xmm1
+ movq `3*$STRIDE/4-96`($tbl),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ lea $STRIDE($tbl),$tbl
+ por %xmm3,%xmm0
+
+ movq %xmm0,($out) # m0=bp[0]
+ lea 8($out),$out
+ sub \$1,$num
+ jnz .Lgather
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ lea 0x28(%rsp),%rsp
+___
+$code.=<<___;
+ ret
+.LSEH_end_bn_gather5:
+.size bn_gather5,.-bn_gather5
+___
+}
+$code.=<<___;
+.align 64
+.Lmagic_masks:
+ .long 0,0, 0,0, 0,0, -1,-1
+ .long 0,0, 0,0, 0,0, 0,0
+.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mul_handler,\@abi-omnipotent
+.align 16
+mul_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ lea `40+48`(%rax),%rax
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # end of alloca label
+ cmp %r10,%rbx # context->Rip<end of alloca label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 8(%r11),%r10d # HandlerData[2]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 192($context),%r10 # pull $num
+ mov 8(%rax,%r10,8),%rax # pull saved stack pointer
+
+ movaps (%rax),%xmm0
+ movaps 16(%rax),%xmm1
+ lea `40+48`(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+ movups %xmm0,512($context) # restore context->Xmm6
+ movups %xmm1,528($context) # restore context->Xmm7
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size mul_handler,.-mul_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_bn_mul_mont_gather5
+ .rva .LSEH_end_bn_mul_mont_gather5
+ .rva .LSEH_info_bn_mul_mont_gather5
+
+ .rva .LSEH_begin_bn_mul4x_mont_gather5
+ .rva .LSEH_end_bn_mul4x_mont_gather5
+ .rva .LSEH_info_bn_mul4x_mont_gather5
+
+ .rva .LSEH_begin_bn_gather5
+ .rva .LSEH_end_bn_gather5
+ .rva .LSEH_info_bn_gather5
+
+.section .xdata
+.align 8
+.LSEH_info_bn_mul_mont_gather5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul_alloca,.Lmul_body,.Lmul_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_mul4x_mont_gather5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul4x_alloca,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_gather5:
+ .byte 0x01,0x0d,0x05,0x00
+ .byte 0x0d,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
+ .byte 0x08,0x68,0x00,0x00 #movaps (rsp),xmm6
+ .byte 0x04,0x42,0x00,0x00 #sub rsp,0x28
+.align 8
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+close STDOUT;
diff --git a/openssl/crypto/bn/bn.h b/openssl/crypto/bn/bn.h
index a0bc47837..f34248ec4 100644
--- a/openssl/crypto/bn/bn.h
+++ b/openssl/crypto/bn/bn.h
@@ -558,6 +558,17 @@ int BN_is_prime_ex(const BIGNUM *p,int nchecks, BN_CTX *ctx, BN_GENCB *cb);
int BN_is_prime_fasttest_ex(const BIGNUM *p,int nchecks, BN_CTX *ctx,
int do_trial_division, BN_GENCB *cb);
+int BN_X931_generate_Xpq(BIGNUM *Xp, BIGNUM *Xq, int nbits, BN_CTX *ctx);
+
+int BN_X931_derive_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2,
+ const BIGNUM *Xp, const BIGNUM *Xp1, const BIGNUM *Xp2,
+ const BIGNUM *e, BN_CTX *ctx, BN_GENCB *cb);
+int BN_X931_generate_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2,
+ BIGNUM *Xp1, BIGNUM *Xp2,
+ const BIGNUM *Xp,
+ const BIGNUM *e, BN_CTX *ctx,
+ BN_GENCB *cb);
+
BN_MONT_CTX *BN_MONT_CTX_new(void );
void BN_MONT_CTX_init(BN_MONT_CTX *ctx);
int BN_mod_mul_montgomery(BIGNUM *r,const BIGNUM *a,const BIGNUM *b,
@@ -612,6 +623,8 @@ int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m,
BN_RECP_CTX *recp, BN_CTX *ctx);
+#ifndef OPENSSL_NO_EC2M
+
/* Functions for arithmetic over binary polynomials represented by BIGNUMs.
*
* The BIGNUM::neg property of BIGNUMs representing binary polynomials is
@@ -663,6 +676,8 @@ int BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a,
int BN_GF2m_poly2arr(const BIGNUM *a, int p[], int max);
int BN_GF2m_arr2poly(const int p[], BIGNUM *a);
+#endif
+
/* faster mod functions for the 'NIST primes'
* 0 <= a < p^2 */
int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
diff --git a/openssl/crypto/bn/bn_div.c b/openssl/crypto/bn/bn_div.c
index 802a43d64..52b330429 100644
--- a/openssl/crypto/bn/bn_div.c
+++ b/openssl/crypto/bn/bn_div.c
@@ -169,15 +169,13 @@ int BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d,
#endif /* OPENSSL_NO_ASM */
-/* BN_div[_no_branch] computes dv := num / divisor, rounding towards
+/* BN_div computes dv := num / divisor, rounding towards
* zero, and sets up rm such that dv*divisor + rm = num holds.
* Thus:
* dv->neg == num->neg ^ divisor->neg (unless the result is zero)
* rm->neg == num->neg (unless the remainder is zero)
* If 'dv' or 'rm' is NULL, the respective value is not returned.
*/
-static int BN_div_no_branch(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num,
- const BIGNUM *divisor, BN_CTX *ctx);
int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
BN_CTX *ctx)
{
@@ -186,6 +184,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
BN_ULONG *resp,*wnump;
BN_ULONG d0,d1;
int num_n,div_n;
+ int no_branch=0;
/* Invalid zero-padding would have particularly bad consequences
* in the case of 'num', so don't just rely on bn_check_top() for this one
@@ -200,7 +199,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
if ((BN_get_flags(num, BN_FLG_CONSTTIME) != 0) || (BN_get_flags(divisor, BN_FLG_CONSTTIME) != 0))
{
- return BN_div_no_branch(dv, rm, num, divisor, ctx);
+ no_branch=1;
}
bn_check_top(dv);
@@ -214,7 +213,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
return(0);
}
- if (BN_ucmp(num,divisor) < 0)
+ if (!no_branch && BN_ucmp(num,divisor) < 0)
{
if (rm != NULL)
{ if (BN_copy(rm,num) == NULL) return(0); }
@@ -239,242 +238,25 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
norm_shift+=BN_BITS2;
if (!(BN_lshift(snum,num,norm_shift))) goto err;
snum->neg=0;
- div_n=sdiv->top;
- num_n=snum->top;
- loop=num_n-div_n;
- /* Lets setup a 'window' into snum
- * This is the part that corresponds to the current
- * 'area' being divided */
- wnum.neg = 0;
- wnum.d = &(snum->d[loop]);
- wnum.top = div_n;
- /* only needed when BN_ucmp messes up the values between top and max */
- wnum.dmax = snum->dmax - loop; /* so we don't step out of bounds */
-
- /* Get the top 2 words of sdiv */
- /* div_n=sdiv->top; */
- d0=sdiv->d[div_n-1];
- d1=(div_n == 1)?0:sdiv->d[div_n-2];
-
- /* pointer to the 'top' of snum */
- wnump= &(snum->d[num_n-1]);
-
- /* Setup to 'res' */
- res->neg= (num->neg^divisor->neg);
- if (!bn_wexpand(res,(loop+1))) goto err;
- res->top=loop;
- resp= &(res->d[loop-1]);
-
- /* space for temp */
- if (!bn_wexpand(tmp,(div_n+1))) goto err;
- if (BN_ucmp(&wnum,sdiv) >= 0)
+ if (no_branch)
{
- /* If BN_DEBUG_RAND is defined BN_ucmp changes (via
- * bn_pollute) the const bignum arguments =>
- * clean the values between top and max again */
- bn_clear_top2max(&wnum);
- bn_sub_words(wnum.d, wnum.d, sdiv->d, div_n);
- *resp=1;
- }
- else
- res->top--;
- /* if res->top == 0 then clear the neg value otherwise decrease
- * the resp pointer */
- if (res->top == 0)
- res->neg = 0;
- else
- resp--;
-
- for (i=0; i<loop-1; i++, wnump--, resp--)
- {
- BN_ULONG q,l0;
- /* the first part of the loop uses the top two words of
- * snum and sdiv to calculate a BN_ULONG q such that
- * | wnum - sdiv * q | < sdiv */
-#if defined(BN_DIV3W) && !defined(OPENSSL_NO_ASM)
- BN_ULONG bn_div_3_words(BN_ULONG*,BN_ULONG,BN_ULONG);
- q=bn_div_3_words(wnump,d1,d0);
-#else
- BN_ULONG n0,n1,rem=0;
-
- n0=wnump[0];
- n1=wnump[-1];
- if (n0 == d0)
- q=BN_MASK2;
- else /* n0 < d0 */
- {
-#ifdef BN_LLONG
- BN_ULLONG t2;
-
-#if defined(BN_LLONG) && defined(BN_DIV2W) && !defined(bn_div_words)
- q=(BN_ULONG)(((((BN_ULLONG)n0)<<BN_BITS2)|n1)/d0);
-#else
- q=bn_div_words(n0,n1,d0);
-#ifdef BN_DEBUG_LEVITTE
- fprintf(stderr,"DEBUG: bn_div_words(0x%08X,0x%08X,0x%08\
-X) -> 0x%08X\n",
- n0, n1, d0, q);
-#endif
-#endif
-
-#ifndef REMAINDER_IS_ALREADY_CALCULATED
- /*
- * rem doesn't have to be BN_ULLONG. The least we
- * know it's less that d0, isn't it?
- */
- rem=(n1-q*d0)&BN_MASK2;
-#endif
- t2=(BN_ULLONG)d1*q;
-
- for (;;)
- {
- if (t2 <= ((((BN_ULLONG)rem)<<BN_BITS2)|wnump[-2]))
- break;
- q--;
- rem += d0;
- if (rem < d0) break; /* don't let rem overflow */
- t2 -= d1;
- }
-#else /* !BN_LLONG */
- BN_ULONG t2l,t2h;
-
- q=bn_div_words(n0,n1,d0);
-#ifdef BN_DEBUG_LEVITTE
- fprintf(stderr,"DEBUG: bn_div_words(0x%08X,0x%08X,0x%08\
-X) -> 0x%08X\n",
- n0, n1, d0, q);
-#endif
-#ifndef REMAINDER_IS_ALREADY_CALCULATED
- rem=(n1-q*d0)&BN_MASK2;
-#endif
-
-#if defined(BN_UMULT_LOHI)
- BN_UMULT_LOHI(t2l,t2h,d1,q);
-#elif defined(BN_UMULT_HIGH)
- t2l = d1 * q;
- t2h = BN_UMULT_HIGH(d1,q);
-#else
+ /* Since we don't know whether snum is larger than sdiv,
+ * we pad snum with enough zeroes without changing its
+ * value.
+ */
+ if (snum->top <= sdiv->top+1)
{
- BN_ULONG ql, qh;
- t2l=LBITS(d1); t2h=HBITS(d1);
- ql =LBITS(q); qh =HBITS(q);
- mul64(t2l,t2h,ql,qh); /* t2=(BN_ULLONG)d1*q; */
+ if (bn_wexpand(snum, sdiv->top + 2) == NULL) goto err;
+ for (i = snum->top; i < sdiv->top + 2; i++) snum->d[i] = 0;
+ snum->top = sdiv->top + 2;
}
-#endif
-
- for (;;)
- {
- if ((t2h < rem) ||
- ((t2h == rem) && (t2l <= wnump[-2])))
- break;
- q--;
- rem += d0;
- if (rem < d0) break; /* don't let rem overflow */
- if (t2l < d1) t2h--; t2l -= d1;
- }
-#endif /* !BN_LLONG */
- }
-#endif /* !BN_DIV3W */
-
- l0=bn_mul_words(tmp->d,sdiv->d,div_n,q);
- tmp->d[div_n]=l0;
- wnum.d--;
- /* ingore top values of the bignums just sub the two
- * BN_ULONG arrays with bn_sub_words */
- if (bn_sub_words(wnum.d, wnum.d, tmp->d, div_n+1))
+ else
{
- /* Note: As we have considered only the leading
- * two BN_ULONGs in the calculation of q, sdiv * q
- * might be greater than wnum (but then (q-1) * sdiv
- * is less or equal than wnum)
- */
- q--;
- if (bn_add_words(wnum.d, wnum.d, sdiv->d, div_n))
- /* we can't have an overflow here (assuming
- * that q != 0, but if q == 0 then tmp is
- * zero anyway) */
- (*wnump)++;
+ if (bn_wexpand(snum, snum->top + 1) == NULL) goto err;
+ snum->d[snum->top] = 0;
+ snum->top ++;
}
- /* store part of the result */
- *resp = q;
- }
- bn_correct_top(snum);
- if (rm != NULL)
- {
- /* Keep a copy of the neg flag in num because if rm==num
- * BN_rshift() will overwrite it.
- */
- int neg = num->neg;
- BN_rshift(rm,snum,norm_shift);
- if (!BN_is_zero(rm))
- rm->neg = neg;
- bn_check_top(rm);
- }
- BN_CTX_end(ctx);
- return(1);
-err:
- bn_check_top(rm);
- BN_CTX_end(ctx);
- return(0);
- }
-
-
-/* BN_div_no_branch is a special version of BN_div. It does not contain
- * branches that may leak sensitive information.
- */
-static int BN_div_no_branch(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num,
- const BIGNUM *divisor, BN_CTX *ctx)
- {
- int norm_shift,i,loop;
- BIGNUM *tmp,wnum,*snum,*sdiv,*res;
- BN_ULONG *resp,*wnump;
- BN_ULONG d0,d1;
- int num_n,div_n;
-
- bn_check_top(dv);
- bn_check_top(rm);
- /* bn_check_top(num); */ /* 'num' has been checked in BN_div() */
- bn_check_top(divisor);
-
- if (BN_is_zero(divisor))
- {
- BNerr(BN_F_BN_DIV_NO_BRANCH,BN_R_DIV_BY_ZERO);
- return(0);
- }
-
- BN_CTX_start(ctx);
- tmp=BN_CTX_get(ctx);
- snum=BN_CTX_get(ctx);
- sdiv=BN_CTX_get(ctx);
- if (dv == NULL)
- res=BN_CTX_get(ctx);
- else res=dv;
- if (sdiv == NULL || res == NULL) goto err;
-
- /* First we normalise the numbers */
- norm_shift=BN_BITS2-((BN_num_bits(divisor))%BN_BITS2);
- if (!(BN_lshift(sdiv,divisor,norm_shift))) goto err;
- sdiv->neg=0;
- norm_shift+=BN_BITS2;
- if (!(BN_lshift(snum,num,norm_shift))) goto err;
- snum->neg=0;
-
- /* Since we don't know whether snum is larger than sdiv,
- * we pad snum with enough zeroes without changing its
- * value.
- */
- if (snum->top <= sdiv->top+1)
- {
- if (bn_wexpand(snum, sdiv->top + 2) == NULL) goto err;
- for (i = snum->top; i < sdiv->top + 2; i++) snum->d[i] = 0;
- snum->top = sdiv->top + 2;
- }
- else
- {
- if (bn_wexpand(snum, snum->top + 1) == NULL) goto err;
- snum->d[snum->top] = 0;
- snum->top ++;
}
div_n=sdiv->top;
@@ -500,12 +282,27 @@ static int BN_div_no_branch(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num,
/* Setup to 'res' */
res->neg= (num->neg^divisor->neg);
if (!bn_wexpand(res,(loop+1))) goto err;
- res->top=loop-1;
+ res->top=loop-no_branch;
resp= &(res->d[loop-1]);
/* space for temp */
if (!bn_wexpand(tmp,(div_n+1))) goto err;
+ if (!no_branch)
+ {
+ if (BN_ucmp(&wnum,sdiv) >= 0)
+ {
+ /* If BN_DEBUG_RAND is defined BN_ucmp changes (via
+ * bn_pollute) the const bignum arguments =>
+ * clean the values between top and max again */
+ bn_clear_top2max(&wnum);
+ bn_sub_words(wnum.d, wnum.d, sdiv->d, div_n);
+ *resp=1;
+ }
+ else
+ res->top--;
+ }
+
/* if res->top == 0 then clear the neg value otherwise decrease
* the resp pointer */
if (res->top == 0)
@@ -638,7 +435,7 @@ X) -> 0x%08X\n",
rm->neg = neg;
bn_check_top(rm);
}
- bn_correct_top(res);
+ if (no_branch) bn_correct_top(res);
BN_CTX_end(ctx);
return(1);
err:
@@ -646,5 +443,4 @@ err:
BN_CTX_end(ctx);
return(0);
}
-
#endif
diff --git a/openssl/crypto/bn/bn_exp.c b/openssl/crypto/bn/bn_exp.c
index d9b6c737f..2abf6fd67 100644
--- a/openssl/crypto/bn/bn_exp.c
+++ b/openssl/crypto/bn/bn_exp.c
@@ -113,6 +113,18 @@
#include "cryptlib.h"
#include "bn_lcl.h"
+#include <stdlib.h>
+#ifdef _WIN32
+# include <malloc.h>
+# ifndef alloca
+# define alloca _alloca
+# endif
+#elif defined(__GNUC__)
+# ifndef alloca
+# define alloca(s) __builtin_alloca((s))
+# endif
+#endif
+
/* maximum precomputation table size for *variable* sliding windows */
#define TABLE_SIZE 32
@@ -522,23 +534,17 @@ err:
* as cache lines are concerned. The following functions are used to transfer a BIGNUM
* from/to that table. */
-static int MOD_EXP_CTIME_COPY_TO_PREBUF(BIGNUM *b, int top, unsigned char *buf, int idx, int width)
+static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top, unsigned char *buf, int idx, int width)
{
size_t i, j;
- if (bn_wexpand(b, top) == NULL)
- return 0;
- while (b->top < top)
- {
- b->d[b->top++] = 0;
- }
-
+ if (top > b->top)
+ top = b->top; /* this works because 'buf' is explicitly zeroed */
for (i = 0, j=idx; i < top * sizeof b->d[0]; i++, j+=width)
{
buf[j] = ((unsigned char*)b->d)[i];
}
- bn_correct_top(b);
return 1;
}
@@ -561,7 +567,7 @@ static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, unsigned char *buf
/* Given a pointer value, compute the next address that is a cache line multiple. */
#define MOD_EXP_CTIME_ALIGN(x_) \
- ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((BN_ULONG)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))
+ ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))
/* This variant of BN_mod_exp_mont() uses fixed windows and the special
* precomputation memory layout to limit data-dependency to a minimum
@@ -572,17 +578,15 @@ static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, unsigned char *buf
int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
{
- int i,bits,ret=0,idx,window,wvalue;
+ int i,bits,ret=0,window,wvalue;
int top;
- BIGNUM *r;
- const BIGNUM *aa;
BN_MONT_CTX *mont=NULL;
int numPowers;
unsigned char *powerbufFree=NULL;
int powerbufLen = 0;
unsigned char *powerbuf=NULL;
- BIGNUM *computeTemp=NULL, *am=NULL;
+ BIGNUM tmp, am;
bn_check_top(a);
bn_check_top(p);
@@ -602,10 +606,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
return ret;
}
- /* Initialize BIGNUM context and allocate intermediate result */
BN_CTX_start(ctx);
- r = BN_CTX_get(ctx);
- if (r == NULL) goto err;
/* Allocate a montgomery context if it was not supplied by the caller.
* If this is not done, things will break in the montgomery part.
@@ -620,40 +621,154 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
/* Get the window size to use with size of p. */
window = BN_window_bits_for_ctime_exponent_size(bits);
+#if defined(OPENSSL_BN_ASM_MONT5)
+ if (window==6 && bits<=1024) window=5; /* ~5% improvement of 2048-bit RSA sign */
+#endif
/* Allocate a buffer large enough to hold all of the pre-computed
- * powers of a.
+ * powers of am, am itself and tmp.
*/
numPowers = 1 << window;
- powerbufLen = sizeof(m->d[0])*top*numPowers;
+ powerbufLen = sizeof(m->d[0])*(top*numPowers +
+ ((2*top)>numPowers?(2*top):numPowers));
+#ifdef alloca
+ if (powerbufLen < 3072)
+ powerbufFree = alloca(powerbufLen+MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH);
+ else
+#endif
if ((powerbufFree=(unsigned char*)OPENSSL_malloc(powerbufLen+MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH)) == NULL)
goto err;
powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree);
memset(powerbuf, 0, powerbufLen);
- /* Initialize the intermediate result. Do this early to save double conversion,
- * once each for a^0 and intermediate result.
- */
- if (!BN_to_montgomery(r,BN_value_one(),mont,ctx)) goto err;
- if (!MOD_EXP_CTIME_COPY_TO_PREBUF(r, top, powerbuf, 0, numPowers)) goto err;
+#ifdef alloca
+ if (powerbufLen < 3072)
+ powerbufFree = NULL;
+#endif
- /* Initialize computeTemp as a^1 with montgomery precalcs */
- computeTemp = BN_CTX_get(ctx);
- am = BN_CTX_get(ctx);
- if (computeTemp==NULL || am==NULL) goto err;
+ /* lay down tmp and am right after powers table */
+ tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0])*top*numPowers);
+ am.d = tmp.d + top;
+ tmp.top = am.top = 0;
+ tmp.dmax = am.dmax = top;
+ tmp.neg = am.neg = 0;
+ tmp.flags = am.flags = BN_FLG_STATIC_DATA;
+
+ /* prepare a^0 in Montgomery domain */
+#if 1
+ if (!BN_to_montgomery(&tmp,BN_value_one(),mont,ctx)) goto err;
+#else
+ tmp.d[0] = (0-m->d[0])&BN_MASK2; /* 2^(top*BN_BITS2) - m */
+ for (i=1;i<top;i++)
+ tmp.d[i] = (~m->d[i])&BN_MASK2;
+ tmp.top = top;
+#endif
+ /* prepare a^1 in Montgomery domain */
if (a->neg || BN_ucmp(a,m) >= 0)
{
- if (!BN_mod(am,a,m,ctx))
- goto err;
- aa= am;
+ if (!BN_mod(&am,a,m,ctx)) goto err;
+ if (!BN_to_montgomery(&am,&am,mont,ctx)) goto err;
}
- else
- aa=a;
- if (!BN_to_montgomery(am,aa,mont,ctx)) goto err;
- if (!BN_copy(computeTemp, am)) goto err;
- if (!MOD_EXP_CTIME_COPY_TO_PREBUF(am, top, powerbuf, 1, numPowers)) goto err;
+ else if (!BN_to_montgomery(&am,a,mont,ctx)) goto err;
+
+#if defined(OPENSSL_BN_ASM_MONT5)
+ /* This optimization uses ideas from http://eprint.iacr.org/2011/239,
+ * specifically optimization of cache-timing attack countermeasures
+ * and pre-computation optimization. */
+
+ /* Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
+ * 512-bit RSA is hardly relevant, we omit it to spare size... */
+ if (window==5)
+ {
+ void bn_mul_mont_gather5(BN_ULONG *rp,const BN_ULONG *ap,
+ const void *table,const BN_ULONG *np,
+ const BN_ULONG *n0,int num,int power);
+ void bn_scatter5(const BN_ULONG *inp,size_t num,
+ void *table,size_t power);
+ void bn_gather5(BN_ULONG *out,size_t num,
+ void *table,size_t power);
+
+ BN_ULONG *np=mont->N.d, *n0=mont->n0;
+
+ /* BN_to_montgomery can contaminate words above .top
+ * [in BN_DEBUG[_DEBUG] build]... */
+ for (i=am.top; i<top; i++) am.d[i]=0;
+ for (i=tmp.top; i<top; i++) tmp.d[i]=0;
+
+ bn_scatter5(tmp.d,top,powerbuf,0);
+ bn_scatter5(am.d,am.top,powerbuf,1);
+ bn_mul_mont(tmp.d,am.d,am.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,2);
+
+#if 0
+ for (i=3; i<32; i++)
+ {
+ /* Calculate a^i = a^(i-1) * a */
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ }
+#else
+ /* same as above, but uses squaring for 1/2 of operations */
+ for (i=4; i<32; i*=2)
+ {
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ }
+ for (i=3; i<8; i+=2)
+ {
+ int j;
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ for (j=2*i; j<32; j*=2)
+ {
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,j);
+ }
+ }
+ for (; i<16; i+=2)
+ {
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,2*i);
+ }
+ for (; i<32; i+=2)
+ {
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ }
+#endif
+ bits--;
+ for (wvalue=0, i=bits%5; i>=0; i--,bits--)
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+ bn_gather5(tmp.d,top,powerbuf,wvalue);
+
+ /* Scan the exponent one window at a time starting from the most
+ * significant bits.
+ */
+ while (bits >= 0)
+ {
+ for (wvalue=0, i=0; i<5; i++,bits--)
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont_gather5(tmp.d,tmp.d,powerbuf,np,n0,top,wvalue);
+ }
+
+ tmp.top=top;
+ bn_correct_top(&tmp);
+ }
+ else
+#endif
+ {
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, numPowers)) goto err;
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, numPowers)) goto err;
/* If the window size is greater than 1, then calculate
* val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1)
@@ -662,62 +777,54 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
*/
if (window > 1)
{
- for (i=2; i<numPowers; i++)
+ if (!BN_mod_mul_montgomery(&tmp,&am,&am,mont,ctx)) goto err;
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, numPowers)) goto err;
+ for (i=3; i<numPowers; i++)
{
/* Calculate a^i = a^(i-1) * a */
- if (!BN_mod_mul_montgomery(computeTemp,am,computeTemp,mont,ctx))
+ if (!BN_mod_mul_montgomery(&tmp,&am,&tmp,mont,ctx))
goto err;
- if (!MOD_EXP_CTIME_COPY_TO_PREBUF(computeTemp, top, powerbuf, i, numPowers)) goto err;
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, numPowers)) goto err;
}
}
- /* Adjust the number of bits up to a multiple of the window size.
- * If the exponent length is not a multiple of the window size, then
- * this pads the most significant bits with zeros to normalize the
- * scanning loop to there's no special cases.
- *
- * * NOTE: Making the window size a power of two less than the native
- * * word size ensures that the padded bits won't go past the last
- * * word in the internal BIGNUM structure. Going past the end will
- * * still produce the correct result, but causes a different branch
- * * to be taken in the BN_is_bit_set function.
- */
- bits = ((bits+window-1)/window)*window;
- idx=bits-1; /* The top bit of the window */
-
- /* Scan the exponent one window at a time starting from the most
- * significant bits.
- */
- while (idx >= 0)
+ bits--;
+ for (wvalue=0, i=bits%window; i>=0; i--,bits--)
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+ if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp,top,powerbuf,wvalue,numPowers)) goto err;
+
+ /* Scan the exponent one window at a time starting from the most
+ * significant bits.
+ */
+ while (bits >= 0)
{
wvalue=0; /* The 'value' of the window */
/* Scan the window, squaring the result as we go */
- for (i=0; i<window; i++,idx--)
+ for (i=0; i<window; i++,bits--)
{
- if (!BN_mod_mul_montgomery(r,r,r,mont,ctx)) goto err;
- wvalue = (wvalue<<1)+BN_is_bit_set(p,idx);
+ if (!BN_mod_mul_montgomery(&tmp,&tmp,&tmp,mont,ctx)) goto err;
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
}
/* Fetch the appropriate pre-computed value from the pre-buf */
- if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(computeTemp, top, powerbuf, wvalue, numPowers)) goto err;
+ if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, numPowers)) goto err;
/* Multiply the result into the intermediate result */
- if (!BN_mod_mul_montgomery(r,r,computeTemp,mont,ctx)) goto err;
+ if (!BN_mod_mul_montgomery(&tmp,&tmp,&am,mont,ctx)) goto err;
}
+ }
/* Convert the final result from montgomery to standard format */
- if (!BN_from_montgomery(rr,r,mont,ctx)) goto err;
+ if (!BN_from_montgomery(rr,&tmp,mont,ctx)) goto err;
ret=1;
err:
if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont);
if (powerbuf!=NULL)
{
OPENSSL_cleanse(powerbuf,powerbufLen);
- OPENSSL_free(powerbufFree);
+ if (powerbufFree) OPENSSL_free(powerbufFree);
}
- if (am!=NULL) BN_clear(am);
- if (computeTemp!=NULL) BN_clear(computeTemp);
BN_CTX_end(ctx);
return(ret);
}
@@ -988,4 +1095,3 @@ err:
bn_check_top(r);
return(ret);
}
-
diff --git a/openssl/crypto/bn/bn_gf2m.c b/openssl/crypto/bn/bn_gf2m.c
index 432a3aa33..08ab9fd22 100644
--- a/openssl/crypto/bn/bn_gf2m.c
+++ b/openssl/crypto/bn/bn_gf2m.c
@@ -94,6 +94,8 @@
#include "cryptlib.h"
#include "bn_lcl.h"
+#ifndef OPENSSL_NO_EC2M
+
/* Maximum number of iterations before BN_GF2m_mod_solve_quad_arr should fail. */
#define MAX_ITERATIONS 50
@@ -122,6 +124,7 @@ static const BN_ULONG SQR_tb[16] =
SQR_tb[(w) >> 4 & 0xF] << 8 | SQR_tb[(w) & 0xF]
#endif
+#if !defined(OPENSSL_BN_ASM_GF2m)
/* Product of two polynomials a, b each with degree < BN_BITS2 - 1,
* result is a polynomial r with degree < 2 * BN_BITS - 1
* The caller MUST ensure that the variables have the right amount
@@ -216,7 +219,9 @@ static void bn_GF2m_mul_2x2(BN_ULONG *r, const BN_ULONG a1, const BN_ULONG a0, c
r[2] ^= m1 ^ r[1] ^ r[3]; /* h0 ^= m1 ^ l1 ^ h1; */
r[1] = r[3] ^ r[2] ^ r[0] ^ m1 ^ m0; /* l1 ^= l0 ^ h0 ^ m0; */
}
-
+#else
+void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0);
+#endif
/* Add polynomials a and b and store result in r; r could be a or b, a and b
* could be equal; r is the bitwise XOR of a and b.
@@ -360,21 +365,17 @@ int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[])
int BN_GF2m_mod(BIGNUM *r, const BIGNUM *a, const BIGNUM *p)
{
int ret = 0;
- const int max = BN_num_bits(p) + 1;
- int *arr=NULL;
+ int arr[6];
bn_check_top(a);
bn_check_top(p);
- if ((arr = (int *)OPENSSL_malloc(sizeof(int) * max)) == NULL) goto err;
- ret = BN_GF2m_poly2arr(p, arr, max);
- if (!ret || ret > max)
+ ret = BN_GF2m_poly2arr(p, arr, sizeof(arr)/sizeof(arr[0]));
+ if (!ret || ret > (int)(sizeof(arr)/sizeof(arr[0])))
{
BNerr(BN_F_BN_GF2M_MOD,BN_R_INVALID_LENGTH);
- goto err;
+ return 0;
}
ret = BN_GF2m_mod_arr(r, a, arr);
bn_check_top(r);
-err:
- if (arr) OPENSSL_free(arr);
return ret;
}
@@ -521,7 +522,7 @@ err:
*/
int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
- BIGNUM *b, *c, *u, *v, *tmp;
+ BIGNUM *b, *c = NULL, *u = NULL, *v = NULL, *tmp;
int ret = 0;
bn_check_top(a);
@@ -529,18 +530,18 @@ int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
BN_CTX_start(ctx);
- b = BN_CTX_get(ctx);
- c = BN_CTX_get(ctx);
- u = BN_CTX_get(ctx);
- v = BN_CTX_get(ctx);
- if (v == NULL) goto err;
+ if ((b = BN_CTX_get(ctx))==NULL) goto err;
+ if ((c = BN_CTX_get(ctx))==NULL) goto err;
+ if ((u = BN_CTX_get(ctx))==NULL) goto err;
+ if ((v = BN_CTX_get(ctx))==NULL) goto err;
- if (!BN_one(b)) goto err;
if (!BN_GF2m_mod(u, a, p)) goto err;
- if (!BN_copy(v, p)) goto err;
-
if (BN_is_zero(u)) goto err;
+ if (!BN_copy(v, p)) goto err;
+#if 0
+ if (!BN_one(b)) goto err;
+
while (1)
{
while (!BN_is_odd(u))
@@ -565,13 +566,86 @@ int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
if (!BN_GF2m_add(u, u, v)) goto err;
if (!BN_GF2m_add(b, b, c)) goto err;
}
+#else
+ {
+ int i, ubits = BN_num_bits(u),
+ vbits = BN_num_bits(v), /* v is copy of p */
+ top = p->top;
+ BN_ULONG *udp,*bdp,*vdp,*cdp;
+
+ bn_wexpand(u,top); udp = u->d;
+ for (i=u->top;i<top;i++) udp[i] = 0;
+ u->top = top;
+ bn_wexpand(b,top); bdp = b->d;
+ bdp[0] = 1;
+ for (i=1;i<top;i++) bdp[i] = 0;
+ b->top = top;
+ bn_wexpand(c,top); cdp = c->d;
+ for (i=0;i<top;i++) cdp[i] = 0;
+ c->top = top;
+ vdp = v->d; /* It pays off to "cache" *->d pointers, because
+ * it allows optimizer to be more aggressive.
+ * But we don't have to "cache" p->d, because *p
+ * is declared 'const'... */
+ while (1)
+ {
+ while (ubits && !(udp[0]&1))
+ {
+ BN_ULONG u0,u1,b0,b1,mask;
+ u0 = udp[0];
+ b0 = bdp[0];
+ mask = (BN_ULONG)0-(b0&1);
+ b0 ^= p->d[0]&mask;
+ for (i=0;i<top-1;i++)
+ {
+ u1 = udp[i+1];
+ udp[i] = ((u0>>1)|(u1<<(BN_BITS2-1)))&BN_MASK2;
+ u0 = u1;
+ b1 = bdp[i+1]^(p->d[i+1]&mask);
+ bdp[i] = ((b0>>1)|(b1<<(BN_BITS2-1)))&BN_MASK2;
+ b0 = b1;
+ }
+ udp[i] = u0>>1;
+ bdp[i] = b0>>1;
+ ubits--;
+ }
+
+ if (ubits<=BN_BITS2 && udp[0]==1) break;
+
+ if (ubits<vbits)
+ {
+ i = ubits; ubits = vbits; vbits = i;
+ tmp = u; u = v; v = tmp;
+ tmp = b; b = c; c = tmp;
+ udp = vdp; vdp = v->d;
+ bdp = cdp; cdp = c->d;
+ }
+ for(i=0;i<top;i++)
+ {
+ udp[i] ^= vdp[i];
+ bdp[i] ^= cdp[i];
+ }
+ if (ubits==vbits)
+ {
+ bn_correct_top(u);
+ ubits = BN_num_bits(u);
+ }
+ }
+ bn_correct_top(b);
+ }
+#endif
if (!BN_copy(r, b)) goto err;
bn_check_top(r);
ret = 1;
err:
+#ifdef BN_DEBUG /* BN_CTX_end would complain about the expanded form */
+ bn_correct_top(c);
+ bn_correct_top(u);
+ bn_correct_top(v);
+#endif
BN_CTX_end(ctx);
return ret;
}
@@ -1033,3 +1107,4 @@ int BN_GF2m_arr2poly(const int p[], BIGNUM *a)
return 1;
}
+#endif
diff --git a/openssl/crypto/bn/bn_lcl.h b/openssl/crypto/bn/bn_lcl.h
index 8e5e98e3f..eecfd8cc9 100644
--- a/openssl/crypto/bn/bn_lcl.h
+++ b/openssl/crypto/bn/bn_lcl.h
@@ -238,7 +238,7 @@ extern "C" {
# if defined(__DECC)
# include <c_asm.h>
# define BN_UMULT_HIGH(a,b) (BN_ULONG)asm("umulh %a0,%a1,%v0",(a),(b))
-# elif defined(__GNUC__)
+# elif defined(__GNUC__) && __GNUC__>=2
# define BN_UMULT_HIGH(a,b) ({ \
register BN_ULONG ret; \
asm ("umulh %1,%2,%0" \
@@ -247,7 +247,7 @@ extern "C" {
ret; })
# endif /* compiler */
# elif defined(_ARCH_PPC) && defined(__64BIT__) && defined(SIXTY_FOUR_BIT_LONG)
-# if defined(__GNUC__)
+# if defined(__GNUC__) && __GNUC__>=2
# define BN_UMULT_HIGH(a,b) ({ \
register BN_ULONG ret; \
asm ("mulhdu %0,%1,%2" \
@@ -257,7 +257,7 @@ extern "C" {
# endif /* compiler */
# elif (defined(__x86_64) || defined(__x86_64__)) && \
(defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
-# if defined(__GNUC__)
+# if defined(__GNUC__) && __GNUC__>=2
# define BN_UMULT_HIGH(a,b) ({ \
register BN_ULONG ret,discard; \
asm ("mulq %3" \
@@ -280,6 +280,19 @@ extern "C" {
# define BN_UMULT_HIGH(a,b) __umulh((a),(b))
# define BN_UMULT_LOHI(low,high,a,b) ((low)=_umul128((a),(b),&(high)))
# endif
+# elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG))
+# if defined(__GNUC__) && __GNUC__>=2
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret; \
+ asm ("dmultu %1,%2" \
+ : "=h"(ret) \
+ : "r"(a), "r"(b) : "l"); \
+ ret; })
+# define BN_UMULT_LOHI(low,high,a,b) \
+ asm ("dmultu %2,%3" \
+ : "=l"(low),"=h"(high) \
+ : "r"(a), "r"(b));
+# endif
# endif /* cpu */
#endif /* OPENSSL_NO_ASM */
@@ -459,6 +472,10 @@ extern "C" {
}
#endif /* !BN_LLONG */
+#if defined(OPENSSL_DOING_MAKEDEPEND) && defined(OPENSSL_FIPS)
+#undef bn_div_words
+#endif
+
void bn_mul_normal(BN_ULONG *r,BN_ULONG *a,int na,BN_ULONG *b,int nb);
void bn_mul_comba8(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b);
void bn_mul_comba4(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b);
diff --git a/openssl/crypto/bn/bn_lib.c b/openssl/crypto/bn/bn_lib.c
index 5470fbe6e..7a5676de6 100644
--- a/openssl/crypto/bn/bn_lib.c
+++ b/openssl/crypto/bn/bn_lib.c
@@ -139,25 +139,6 @@ const BIGNUM *BN_value_one(void)
return(&const_one);
}
-char *BN_options(void)
- {
- static int init=0;
- static char data[16];
-
- if (!init)
- {
- init++;
-#ifdef BN_LLONG
- BIO_snprintf(data,sizeof data,"bn(%d,%d)",
- (int)sizeof(BN_ULLONG)*8,(int)sizeof(BN_ULONG)*8);
-#else
- BIO_snprintf(data,sizeof data,"bn(%d,%d)",
- (int)sizeof(BN_ULONG)*8,(int)sizeof(BN_ULONG)*8);
-#endif
- }
- return(data);
- }
-
int BN_num_bits_word(BN_ULONG l)
{
static const unsigned char bits[256]={
diff --git a/openssl/crypto/bn/bn_mont.c b/openssl/crypto/bn/bn_mont.c
index 1a866880f..427b5cf4d 100644
--- a/openssl/crypto/bn/bn_mont.c
+++ b/openssl/crypto/bn/bn_mont.c
@@ -177,31 +177,26 @@ err:
static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
{
BIGNUM *n;
- BN_ULONG *ap,*np,*rp,n0,v,*nrp;
- int al,nl,max,i,x,ri;
+ BN_ULONG *ap,*np,*rp,n0,v,carry;
+ int nl,max,i;
n= &(mont->N);
- /* mont->ri is the size of mont->N in bits (rounded up
- to the word size) */
- al=ri=mont->ri/BN_BITS2;
-
nl=n->top;
- if ((al == 0) || (nl == 0)) { ret->top=0; return(1); }
+ if (nl == 0) { ret->top=0; return(1); }
- max=(nl+al+1); /* allow for overflow (no?) XXX */
+ max=(2*nl); /* carry is stored separately */
if (bn_wexpand(r,max) == NULL) return(0);
r->neg^=n->neg;
np=n->d;
rp=r->d;
- nrp= &(r->d[nl]);
/* clear the top words of T */
#if 1
for (i=r->top; i<max; i++) /* memset? XXX */
- r->d[i]=0;
+ rp[i]=0;
#else
- memset(&(r->d[r->top]),0,(max-r->top)*sizeof(BN_ULONG));
+ memset(&(rp[r->top]),0,(max-r->top)*sizeof(BN_ULONG));
#endif
r->top=max;
@@ -210,7 +205,7 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
#ifdef BN_COUNT
fprintf(stderr,"word BN_from_montgomery_word %d * %d\n",nl,nl);
#endif
- for (i=0; i<nl; i++)
+ for (carry=0, i=0; i<nl; i++, rp++)
{
#ifdef __TANDEM
{
@@ -228,61 +223,33 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
#else
v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
#endif
- nrp++;
- rp++;
- if (((nrp[-1]+=v)&BN_MASK2) >= v)
- continue;
- else
- {
- if (((++nrp[0])&BN_MASK2) != 0) continue;
- if (((++nrp[1])&BN_MASK2) != 0) continue;
- for (x=2; (((++nrp[x])&BN_MASK2) == 0); x++) ;
- }
- }
- bn_correct_top(r);
-
- /* mont->ri will be a multiple of the word size and below code
- * is kind of BN_rshift(ret,r,mont->ri) equivalent */
- if (r->top <= ri)
- {
- ret->top=0;
- return(1);
+ v = (v+carry+rp[nl])&BN_MASK2;
+ carry |= (v != rp[nl]);
+ carry &= (v <= rp[nl]);
+ rp[nl]=v;
}
- al=r->top-ri;
-#define BRANCH_FREE 1
-#if BRANCH_FREE
- if (bn_wexpand(ret,ri) == NULL) return(0);
- x=0-(((al-ri)>>(sizeof(al)*8-1))&1);
- ret->top=x=(ri&~x)|(al&x); /* min(ri,al) */
+ if (bn_wexpand(ret,nl) == NULL) return(0);
+ ret->top=nl;
ret->neg=r->neg;
rp=ret->d;
- ap=&(r->d[ri]);
+ ap=&(r->d[nl]);
+#define BRANCH_FREE 1
+#if BRANCH_FREE
{
- size_t m1,m2;
-
- v=bn_sub_words(rp,ap,np,ri);
- /* this ----------------^^ works even in al<ri case
- * thanks to zealous zeroing of top of the vector in the
- * beginning. */
+ BN_ULONG *nrp;
+ size_t m;
- /* if (al==ri && !v) || al>ri) nrp=rp; else nrp=ap; */
- /* in other words if subtraction result is real, then
+ v=bn_sub_words(rp,ap,np,nl)-carry;
+ /* if subtraction result is real, then
* trick unconditional memcpy below to perform in-place
* "refresh" instead of actual copy. */
- m1=0-(size_t)(((al-ri)>>(sizeof(al)*8-1))&1); /* al<ri */
- m2=0-(size_t)(((ri-al)>>(sizeof(al)*8-1))&1); /* al>ri */
- m1|=m2; /* (al!=ri) */
- m1|=(0-(size_t)v); /* (al!=ri || v) */
- m1&=~m2; /* (al!=ri || v) && !al>ri */
- nrp=(BN_ULONG *)(((PTR_SIZE_INT)rp&~m1)|((PTR_SIZE_INT)ap&m1));
- }
+ m=(0-(size_t)v);
+ nrp=(BN_ULONG *)(((PTR_SIZE_INT)rp&~m)|((PTR_SIZE_INT)ap&m));
- /* 'i<ri' is chosen to eliminate dependency on input data, even
- * though it results in redundant copy in al<ri case. */
- for (i=0,ri-=4; i<ri; i+=4)
+ for (i=0,nl-=4; i<nl; i+=4)
{
BN_ULONG t1,t2,t3,t4;
@@ -295,40 +262,15 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
rp[i+2]=t3;
rp[i+3]=t4;
}
- for (ri+=4; i<ri; i++)
+ for (nl+=4; i<nl; i++)
rp[i]=nrp[i], ap[i]=0;
- bn_correct_top(r);
- bn_correct_top(ret);
+ }
#else
- if (bn_wexpand(ret,al) == NULL) return(0);
- ret->top=al;
- ret->neg=r->neg;
-
- rp=ret->d;
- ap=&(r->d[ri]);
- al-=4;
- for (i=0; i<al; i+=4)
- {
- BN_ULONG t1,t2,t3,t4;
-
- t1=ap[i+0];
- t2=ap[i+1];
- t3=ap[i+2];
- t4=ap[i+3];
- rp[i+0]=t1;
- rp[i+1]=t2;
- rp[i+2]=t3;
- rp[i+3]=t4;
- }
- al+=4;
- for (; i<al; i++)
- rp[i]=ap[i];
-
- if (BN_ucmp(ret, &(mont->N)) >= 0)
- {
- if (!BN_usub(ret,ret,&(mont->N))) return(0);
- }
+ if (bn_sub_words (rp,ap,np,nl)-carry)
+ memcpy(rp,ap,nl*sizeof(BN_ULONG));
#endif
+ bn_correct_top(r);
+ bn_correct_top(ret);
bn_check_top(ret);
return(1);
diff --git a/openssl/crypto/bn/bn_nist.c b/openssl/crypto/bn/bn_nist.c
index c6de03269..43caee477 100644
--- a/openssl/crypto/bn/bn_nist.c
+++ b/openssl/crypto/bn/bn_nist.c
@@ -319,6 +319,13 @@ static void nist_cp_bn(BN_ULONG *buf, BN_ULONG *a, int top)
:(to[(n)/2] =((m)&1)?(from[(m)/2]>>32):(from[(m)/2]&BN_MASK2l)))
#define bn_32_set_0(to, n) (((n)&1)?(to[(n)/2]&=BN_MASK2l):(to[(n)/2]=0));
#define bn_cp_32(to,n,from,m) ((m)>=0)?bn_cp_32_naked(to,n,from,m):bn_32_set_0(to,n)
+# if defined(L_ENDIAN)
+# if defined(__arch64__)
+# define NIST_INT64 long
+# else
+# define NIST_INT64 long long
+# endif
+# endif
#else
#define bn_cp_64(to, n, from, m) \
{ \
@@ -330,13 +337,15 @@ static void nist_cp_bn(BN_ULONG *buf, BN_ULONG *a, int top)
bn_32_set_0(to, (n)*2); \
bn_32_set_0(to, (n)*2+1); \
}
-#if BN_BITS2 == 32
#define bn_cp_32(to, n, from, m) (to)[n] = (m>=0)?((from)[m]):0;
#define bn_32_set_0(to, n) (to)[n] = (BN_ULONG)0;
-#endif
+# if defined(_WIN32) && !defined(__GNUC__)
+# define NIST_INT64 __int64
+# elif defined(BN_LLONG)
+# define NIST_INT64 long long
+# endif
#endif /* BN_BITS2 != 64 */
-
#define nist_set_192(to, from, a1, a2, a3) \
{ \
bn_cp_64(to, 0, from, (a3) - 3) \
@@ -350,9 +359,11 @@ int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
int top = a->top, i;
int carry;
register BN_ULONG *r_d, *a_d = a->d;
- BN_ULONG t_d[BN_NIST_192_TOP],
- buf[BN_NIST_192_TOP],
- c_d[BN_NIST_192_TOP],
+ union {
+ BN_ULONG bn[BN_NIST_192_TOP];
+ unsigned int ui[BN_NIST_192_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_192_TOP],
*res;
PTR_SIZE_INT mask;
static const BIGNUM _bignum_nist_p_192_sqr = {
@@ -385,15 +396,48 @@ int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
else
r_d = a_d;
- nist_cp_bn_0(buf, a_d + BN_NIST_192_TOP, top - BN_NIST_192_TOP, BN_NIST_192_TOP);
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_192_TOP, top - BN_NIST_192_TOP, BN_NIST_192_TOP);
+
+#if defined(NIST_INT64)
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc += bp[3*2-6];
+ acc += bp[5*2-6]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc += bp[3*2-5];
+ acc += bp[5*2-5]; rp[1] = (unsigned int)acc; acc >>= 32;
- nist_set_192(t_d, buf, 0, 3, 3);
+ acc += rp[2]; acc += bp[3*2-6];
+ acc += bp[4*2-6];
+ acc += bp[5*2-6]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[3*2-5];
+ acc += bp[4*2-5];
+ acc += bp[5*2-5]; rp[3] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[4]; acc += bp[4*2-6];
+ acc += bp[5*2-6]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[4*2-5];
+ acc += bp[5*2-5]; rp[5] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_192_TOP];
+
+ nist_set_192(t_d, buf.bn, 0, 3, 3);
carry = (int)bn_add_words(r_d, r_d, t_d, BN_NIST_192_TOP);
- nist_set_192(t_d, buf, 4, 4, 0);
+ nist_set_192(t_d, buf.bn, 4, 4, 0);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_192_TOP);
- nist_set_192(t_d, buf, 5, 5, 5)
+ nist_set_192(t_d, buf.bn, 5, 5, 5)
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_192_TOP);
-
+ }
+#endif
if (carry > 0)
carry = (int)bn_sub_words(r_d,r_d,_nist_p_192[carry-1],BN_NIST_192_TOP);
else
@@ -435,8 +479,7 @@ int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
int top = a->top, i;
int carry;
BN_ULONG *r_d, *a_d = a->d;
- BN_ULONG t_d[BN_NIST_224_TOP],
- buf[BN_NIST_224_TOP],
+ BN_ULONG buf[BN_NIST_224_TOP],
c_d[BN_NIST_224_TOP],
*res;
PTR_SIZE_INT mask;
@@ -474,14 +517,54 @@ int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
#if BN_BITS2==64
/* copy upper 256 bits of 448 bit number ... */
- nist_cp_bn_0(t_d, a_d + (BN_NIST_224_TOP-1), top - (BN_NIST_224_TOP-1), BN_NIST_224_TOP);
+ nist_cp_bn_0(c_d, a_d + (BN_NIST_224_TOP-1), top - (BN_NIST_224_TOP-1), BN_NIST_224_TOP);
/* ... and right shift by 32 to obtain upper 224 bits */
- nist_set_224(buf, t_d, 14, 13, 12, 11, 10, 9, 8);
+ nist_set_224(buf, c_d, 14, 13, 12, 11, 10, 9, 8);
/* truncate lower part to 224 bits too */
r_d[BN_NIST_224_TOP-1] &= BN_MASK2l;
#else
nist_cp_bn_0(buf, a_d + BN_NIST_224_TOP, top - BN_NIST_224_TOP, BN_NIST_224_TOP);
#endif
+
+#if defined(NIST_INT64) && BN_BITS2!=64
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf;
+
+ acc = rp[0]; acc -= bp[7-7];
+ acc -= bp[11-7]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc -= bp[8-7];
+ acc -= bp[12-7]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc -= bp[9-7];
+ acc -= bp[13-7]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[7-7];
+ acc += bp[11-7];
+ acc -= bp[10-7]; rp[3] = (unsigned int)acc; acc>>= 32;
+
+ acc += rp[4]; acc += bp[8-7];
+ acc += bp[12-7];
+ acc -= bp[11-7]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[9-7];
+ acc += bp[13-7];
+ acc -= bp[12-7]; rp[5] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[6]; acc += bp[10-7];
+ acc -= bp[13-7]; rp[6] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+# if BN_BITS2==64
+ rp[7] = carry;
+# endif
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_224_TOP];
+
nist_set_224(t_d, buf, 10, 9, 8, 7, 0, 0, 0);
carry = (int)bn_add_words(r_d, r_d, t_d, BN_NIST_224_TOP);
nist_set_224(t_d, buf, 0, 13, 12, 11, 0, 0, 0);
@@ -494,6 +577,8 @@ int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
#if BN_BITS2==64
carry = (int)(r_d[BN_NIST_224_TOP-1]>>32);
#endif
+ }
+#endif
u.f = bn_sub_words;
if (carry > 0)
{
@@ -548,9 +633,11 @@ int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
int i, top = a->top;
int carry = 0;
register BN_ULONG *a_d = a->d, *r_d;
- BN_ULONG t_d[BN_NIST_256_TOP],
- buf[BN_NIST_256_TOP],
- c_d[BN_NIST_256_TOP],
+ union {
+ BN_ULONG bn[BN_NIST_256_TOP];
+ unsigned int ui[BN_NIST_256_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_256_TOP],
*res;
PTR_SIZE_INT mask;
union { bn_addsub_f f; PTR_SIZE_INT p; } u;
@@ -584,12 +671,87 @@ int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
else
r_d = a_d;
- nist_cp_bn_0(buf, a_d + BN_NIST_256_TOP, top - BN_NIST_256_TOP, BN_NIST_256_TOP);
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_256_TOP, top - BN_NIST_256_TOP, BN_NIST_256_TOP);
+
+#if defined(NIST_INT64)
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc += bp[8-8];
+ acc += bp[9-8];
+ acc -= bp[11-8];
+ acc -= bp[12-8];
+ acc -= bp[13-8];
+ acc -= bp[14-8]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc += bp[9-8];
+ acc += bp[10-8];
+ acc -= bp[12-8];
+ acc -= bp[13-8];
+ acc -= bp[14-8];
+ acc -= bp[15-8]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc += bp[10-8];
+ acc += bp[11-8];
+ acc -= bp[13-8];
+ acc -= bp[14-8];
+ acc -= bp[15-8]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[11-8];
+ acc += bp[11-8];
+ acc += bp[12-8];
+ acc += bp[12-8];
+ acc += bp[13-8];
+ acc -= bp[15-8];
+ acc -= bp[8-8];
+ acc -= bp[9-8]; rp[3] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[4]; acc += bp[12-8];
+ acc += bp[12-8];
+ acc += bp[13-8];
+ acc += bp[13-8];
+ acc += bp[14-8];
+ acc -= bp[9-8];
+ acc -= bp[10-8]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[13-8];
+ acc += bp[13-8];
+ acc += bp[14-8];
+ acc += bp[14-8];
+ acc += bp[15-8];
+ acc -= bp[10-8];
+ acc -= bp[11-8]; rp[5] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[6]; acc += bp[14-8];
+ acc += bp[14-8];
+ acc += bp[15-8];
+ acc += bp[15-8];
+ acc += bp[14-8];
+ acc += bp[13-8];
+ acc -= bp[8-8];
+ acc -= bp[9-8]; rp[6] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[7]; acc += bp[15-8];
+ acc += bp[15-8];
+ acc += bp[15-8];
+ acc += bp[8 -8];
+ acc -= bp[10-8];
+ acc -= bp[11-8];
+ acc -= bp[12-8];
+ acc -= bp[13-8]; rp[7] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_256_TOP];
/*S1*/
- nist_set_256(t_d, buf, 15, 14, 13, 12, 11, 0, 0, 0);
+ nist_set_256(t_d, buf.bn, 15, 14, 13, 12, 11, 0, 0, 0);
/*S2*/
- nist_set_256(c_d, buf, 0, 15, 14, 13, 12, 0, 0, 0);
+ nist_set_256(c_d, buf.bn, 0, 15, 14, 13, 12, 0, 0, 0);
carry = (int)bn_add_words(t_d, t_d, c_d, BN_NIST_256_TOP);
/* left shift */
{
@@ -607,24 +769,26 @@ int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
}
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*S3*/
- nist_set_256(t_d, buf, 15, 14, 0, 0, 0, 10, 9, 8);
+ nist_set_256(t_d, buf.bn, 15, 14, 0, 0, 0, 10, 9, 8);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*S4*/
- nist_set_256(t_d, buf, 8, 13, 15, 14, 13, 11, 10, 9);
+ nist_set_256(t_d, buf.bn, 8, 13, 15, 14, 13, 11, 10, 9);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*D1*/
- nist_set_256(t_d, buf, 10, 8, 0, 0, 0, 13, 12, 11);
+ nist_set_256(t_d, buf.bn, 10, 8, 0, 0, 0, 13, 12, 11);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*D2*/
- nist_set_256(t_d, buf, 11, 9, 0, 0, 15, 14, 13, 12);
+ nist_set_256(t_d, buf.bn, 11, 9, 0, 0, 15, 14, 13, 12);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*D3*/
- nist_set_256(t_d, buf, 12, 0, 10, 9, 8, 15, 14, 13);
+ nist_set_256(t_d, buf.bn, 12, 0, 10, 9, 8, 15, 14, 13);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*D4*/
- nist_set_256(t_d, buf, 13, 0, 11, 10, 9, 0, 15, 14);
+ nist_set_256(t_d, buf.bn, 13, 0, 11, 10, 9, 0, 15, 14);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
+ }
+#endif
/* see BN_nist_mod_224 for explanation */
u.f = bn_sub_words;
if (carry > 0)
@@ -672,9 +836,11 @@ int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
int i, top = a->top;
int carry = 0;
register BN_ULONG *r_d, *a_d = a->d;
- BN_ULONG t_d[BN_NIST_384_TOP],
- buf[BN_NIST_384_TOP],
- c_d[BN_NIST_384_TOP],
+ union {
+ BN_ULONG bn[BN_NIST_384_TOP];
+ unsigned int ui[BN_NIST_384_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_384_TOP],
*res;
PTR_SIZE_INT mask;
union { bn_addsub_f f; PTR_SIZE_INT p; } u;
@@ -709,10 +875,100 @@ int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
else
r_d = a_d;
- nist_cp_bn_0(buf, a_d + BN_NIST_384_TOP, top - BN_NIST_384_TOP, BN_NIST_384_TOP);
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_384_TOP, top - BN_NIST_384_TOP, BN_NIST_384_TOP);
+
+#if defined(NIST_INT64)
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc += bp[12-12];
+ acc += bp[21-12];
+ acc += bp[20-12];
+ acc -= bp[23-12]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc += bp[13-12];
+ acc += bp[22-12];
+ acc += bp[23-12];
+ acc -= bp[12-12];
+ acc -= bp[20-12]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc += bp[14-12];
+ acc += bp[23-12];
+ acc -= bp[13-12];
+ acc -= bp[21-12]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[15-12];
+ acc += bp[12-12];
+ acc += bp[20-12];
+ acc += bp[21-12];
+ acc -= bp[14-12];
+ acc -= bp[22-12];
+ acc -= bp[23-12]; rp[3] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[4]; acc += bp[21-12];
+ acc += bp[21-12];
+ acc += bp[16-12];
+ acc += bp[13-12];
+ acc += bp[12-12];
+ acc += bp[20-12];
+ acc += bp[22-12];
+ acc -= bp[15-12];
+ acc -= bp[23-12];
+ acc -= bp[23-12]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[22-12];
+ acc += bp[22-12];
+ acc += bp[17-12];
+ acc += bp[14-12];
+ acc += bp[13-12];
+ acc += bp[21-12];
+ acc += bp[23-12];
+ acc -= bp[16-12]; rp[5] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[6]; acc += bp[23-12];
+ acc += bp[23-12];
+ acc += bp[18-12];
+ acc += bp[15-12];
+ acc += bp[14-12];
+ acc += bp[22-12];
+ acc -= bp[17-12]; rp[6] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[7]; acc += bp[19-12];
+ acc += bp[16-12];
+ acc += bp[15-12];
+ acc += bp[23-12];
+ acc -= bp[18-12]; rp[7] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[8]; acc += bp[20-12];
+ acc += bp[17-12];
+ acc += bp[16-12];
+ acc -= bp[19-12]; rp[8] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[9]; acc += bp[21-12];
+ acc += bp[18-12];
+ acc += bp[17-12];
+ acc -= bp[20-12]; rp[9] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[10]; acc += bp[22-12];
+ acc += bp[19-12];
+ acc += bp[18-12];
+ acc -= bp[21-12]; rp[10] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[11]; acc += bp[23-12];
+ acc += bp[20-12];
+ acc += bp[19-12];
+ acc -= bp[22-12]; rp[11] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_384_TOP];
/*S1*/
- nist_set_256(t_d, buf, 0, 0, 0, 0, 0, 23-4, 22-4, 21-4);
+ nist_set_256(t_d, buf.bn, 0, 0, 0, 0, 0, 23-4, 22-4, 21-4);
/* left shift */
{
register BN_ULONG *ap,t,c;
@@ -729,29 +985,31 @@ int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
carry = (int)bn_add_words(r_d+(128/BN_BITS2), r_d+(128/BN_BITS2),
t_d, BN_NIST_256_TOP);
/*S2 */
- carry += (int)bn_add_words(r_d, r_d, buf, BN_NIST_384_TOP);
+ carry += (int)bn_add_words(r_d, r_d, buf.bn, BN_NIST_384_TOP);
/*S3*/
- nist_set_384(t_d,buf,20,19,18,17,16,15,14,13,12,23,22,21);
+ nist_set_384(t_d,buf.bn,20,19,18,17,16,15,14,13,12,23,22,21);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*S4*/
- nist_set_384(t_d,buf,19,18,17,16,15,14,13,12,20,0,23,0);
+ nist_set_384(t_d,buf.bn,19,18,17,16,15,14,13,12,20,0,23,0);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*S5*/
- nist_set_384(t_d, buf,0,0,0,0,23,22,21,20,0,0,0,0);
+ nist_set_384(t_d, buf.bn,0,0,0,0,23,22,21,20,0,0,0,0);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*S6*/
- nist_set_384(t_d,buf,0,0,0,0,0,0,23,22,21,0,0,20);
+ nist_set_384(t_d,buf.bn,0,0,0,0,0,0,23,22,21,0,0,20);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*D1*/
- nist_set_384(t_d,buf,22,21,20,19,18,17,16,15,14,13,12,23);
+ nist_set_384(t_d,buf.bn,22,21,20,19,18,17,16,15,14,13,12,23);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*D2*/
- nist_set_384(t_d,buf,0,0,0,0,0,0,0,23,22,21,20,0);
+ nist_set_384(t_d,buf.bn,0,0,0,0,0,0,0,23,22,21,20,0);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*D3*/
- nist_set_384(t_d,buf,0,0,0,0,0,0,0,23,23,0,0,0);
+ nist_set_384(t_d,buf.bn,0,0,0,0,0,0,0,23,23,0,0,0);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_384_TOP);
+ }
+#endif
/* see BN_nist_mod_224 for explanation */
u.f = bn_sub_words;
if (carry > 0)
diff --git a/openssl/crypto/bn/bn_print.c b/openssl/crypto/bn/bn_print.c
index bebb466d0..1743b6a7e 100644
--- a/openssl/crypto/bn/bn_print.c
+++ b/openssl/crypto/bn/bn_print.c
@@ -357,3 +357,22 @@ end:
return(ret);
}
#endif
+
+char *BN_options(void)
+ {
+ static int init=0;
+ static char data[16];
+
+ if (!init)
+ {
+ init++;
+#ifdef BN_LLONG
+ BIO_snprintf(data,sizeof data,"bn(%d,%d)",
+ (int)sizeof(BN_ULLONG)*8,(int)sizeof(BN_ULONG)*8);
+#else
+ BIO_snprintf(data,sizeof data,"bn(%d,%d)",
+ (int)sizeof(BN_ULONG)*8,(int)sizeof(BN_ULONG)*8);
+#endif
+ }
+ return(data);
+ }
diff --git a/openssl/crypto/bn/bn_shift.c b/openssl/crypto/bn/bn_shift.c
index c4d301afc..a6fca2c42 100644
--- a/openssl/crypto/bn/bn_shift.c
+++ b/openssl/crypto/bn/bn_shift.c
@@ -99,7 +99,7 @@ int BN_lshift1(BIGNUM *r, const BIGNUM *a)
int BN_rshift1(BIGNUM *r, const BIGNUM *a)
{
BN_ULONG *ap,*rp,t,c;
- int i;
+ int i,j;
bn_check_top(r);
bn_check_top(a);
@@ -109,22 +109,25 @@ int BN_rshift1(BIGNUM *r, const BIGNUM *a)
BN_zero(r);
return(1);
}
+ i = a->top;
+ ap= a->d;
+ j = i-(ap[i-1]==1);
if (a != r)
{
- if (bn_wexpand(r,a->top) == NULL) return(0);
- r->top=a->top;
+ if (bn_wexpand(r,j) == NULL) return(0);
r->neg=a->neg;
}
- ap=a->d;
rp=r->d;
- c=0;
- for (i=a->top-1; i>=0; i--)
+ t=ap[--i];
+ c=(t&1)?BN_TBIT:0;
+ if (t>>=1) rp[i]=t;
+ while (i>0)
{
- t=ap[i];
+ t=ap[--i];
rp[i]=((t>>1)&BN_MASK2)|c;
c=(t&1)?BN_TBIT:0;
}
- bn_correct_top(r);
+ r->top=j;
bn_check_top(r);
return(1);
}
@@ -182,10 +185,11 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n)
BN_zero(r);
return(1);
}
+ i = (BN_num_bits(a)-n+(BN_BITS2-1))/BN_BITS2;
if (r != a)
{
r->neg=a->neg;
- if (bn_wexpand(r,a->top-nw+1) == NULL) return(0);
+ if (bn_wexpand(r,i) == NULL) return(0);
}
else
{
@@ -196,7 +200,7 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n)
f= &(a->d[nw]);
t=r->d;
j=a->top-nw;
- r->top=j;
+ r->top=i;
if (rb == 0)
{
@@ -212,9 +216,8 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n)
l= *(f++);
*(t++) =(tmp|(l<<lb))&BN_MASK2;
}
- *(t++) =(l>>rb)&BN_MASK2;
+ if ((l = (l>>rb)&BN_MASK2)) *(t) = l;
}
- bn_correct_top(r);
bn_check_top(r);
return(1);
}
diff --git a/openssl/crypto/bn/bn_x931p.c b/openssl/crypto/bn/bn_x931p.c
new file mode 100644
index 000000000..04c5c874e
--- /dev/null
+++ b/openssl/crypto/bn/bn_x931p.c
@@ -0,0 +1,272 @@
+/* bn_x931p.c */
+/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
+ * project 2005.
+ */
+/* ====================================================================
+ * Copyright (c) 2005 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * licensing@OpenSSL.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#include <stdio.h>
+#include <openssl/bn.h>
+
+/* X9.31 routines for prime derivation */
+
+/* X9.31 prime derivation. This is used to generate the primes pi
+ * (p1, p2, q1, q2) from a parameter Xpi by checking successive odd
+ * integers.
+ */
+
+static int bn_x931_derive_pi(BIGNUM *pi, const BIGNUM *Xpi, BN_CTX *ctx,
+ BN_GENCB *cb)
+ {
+ int i = 0;
+ if (!BN_copy(pi, Xpi))
+ return 0;
+ if (!BN_is_odd(pi) && !BN_add_word(pi, 1))
+ return 0;
+ for(;;)
+ {
+ i++;
+ BN_GENCB_call(cb, 0, i);
+ /* NB 27 MR is specificed in X9.31 */
+ if (BN_is_prime_fasttest_ex(pi, 27, ctx, 1, cb))
+ break;
+ if (!BN_add_word(pi, 2))
+ return 0;
+ }
+ BN_GENCB_call(cb, 2, i);
+ return 1;
+ }
+
+/* This is the main X9.31 prime derivation function. From parameters
+ * Xp1, Xp2 and Xp derive the prime p. If the parameters p1 or p2 are
+ * not NULL they will be returned too: this is needed for testing.
+ */
+
+int BN_X931_derive_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2,
+ const BIGNUM *Xp, const BIGNUM *Xp1, const BIGNUM *Xp2,
+ const BIGNUM *e, BN_CTX *ctx, BN_GENCB *cb)
+ {
+ int ret = 0;
+
+ BIGNUM *t, *p1p2, *pm1;
+
+ /* Only even e supported */
+ if (!BN_is_odd(e))
+ return 0;
+
+ BN_CTX_start(ctx);
+ if (!p1)
+ p1 = BN_CTX_get(ctx);
+
+ if (!p2)
+ p2 = BN_CTX_get(ctx);
+
+ t = BN_CTX_get(ctx);
+
+ p1p2 = BN_CTX_get(ctx);
+
+ pm1 = BN_CTX_get(ctx);
+
+ if (!bn_x931_derive_pi(p1, Xp1, ctx, cb))
+ goto err;
+
+ if (!bn_x931_derive_pi(p2, Xp2, ctx, cb))
+ goto err;
+
+ if (!BN_mul(p1p2, p1, p2, ctx))
+ goto err;
+
+ /* First set p to value of Rp */
+
+ if (!BN_mod_inverse(p, p2, p1, ctx))
+ goto err;
+
+ if (!BN_mul(p, p, p2, ctx))
+ goto err;
+
+ if (!BN_mod_inverse(t, p1, p2, ctx))
+ goto err;
+
+ if (!BN_mul(t, t, p1, ctx))
+ goto err;
+
+ if (!BN_sub(p, p, t))
+ goto err;
+
+ if (p->neg && !BN_add(p, p, p1p2))
+ goto err;
+
+ /* p now equals Rp */
+
+ if (!BN_mod_sub(p, p, Xp, p1p2, ctx))
+ goto err;
+
+ if (!BN_add(p, p, Xp))
+ goto err;
+
+ /* p now equals Yp0 */
+
+ for (;;)
+ {
+ int i = 1;
+ BN_GENCB_call(cb, 0, i++);
+ if (!BN_copy(pm1, p))
+ goto err;
+ if (!BN_sub_word(pm1, 1))
+ goto err;
+ if (!BN_gcd(t, pm1, e, ctx))
+ goto err;
+ if (BN_is_one(t)
+ /* X9.31 specifies 8 MR and 1 Lucas test or any prime test
+ * offering similar or better guarantees 50 MR is considerably
+ * better.
+ */
+ && BN_is_prime_fasttest_ex(p, 50, ctx, 1, cb))
+ break;
+ if (!BN_add(p, p, p1p2))
+ goto err;
+ }
+
+ BN_GENCB_call(cb, 3, 0);
+
+ ret = 1;
+
+ err:
+
+ BN_CTX_end(ctx);
+
+ return ret;
+ }
+
+/* Generate pair of paramters Xp, Xq for X9.31 prime generation.
+ * Note: nbits paramter is sum of number of bits in both.
+ */
+
+int BN_X931_generate_Xpq(BIGNUM *Xp, BIGNUM *Xq, int nbits, BN_CTX *ctx)
+ {
+ BIGNUM *t;
+ int i;
+ /* Number of bits for each prime is of the form
+ * 512+128s for s = 0, 1, ...
+ */
+ if ((nbits < 1024) || (nbits & 0xff))
+ return 0;
+ nbits >>= 1;
+ /* The random value Xp must be between sqrt(2) * 2^(nbits-1) and
+ * 2^nbits - 1. By setting the top two bits we ensure that the lower
+ * bound is exceeded.
+ */
+ if (!BN_rand(Xp, nbits, 1, 0))
+ return 0;
+
+ BN_CTX_start(ctx);
+ t = BN_CTX_get(ctx);
+
+ for (i = 0; i < 1000; i++)
+ {
+ if (!BN_rand(Xq, nbits, 1, 0))
+ return 0;
+ /* Check that |Xp - Xq| > 2^(nbits - 100) */
+ BN_sub(t, Xp, Xq);
+ if (BN_num_bits(t) > (nbits - 100))
+ break;
+ }
+
+ BN_CTX_end(ctx);
+
+ if (i < 1000)
+ return 1;
+
+ return 0;
+
+ }
+
+/* Generate primes using X9.31 algorithm. Of the values p, p1, p2, Xp1
+ * and Xp2 only 'p' needs to be non-NULL. If any of the others are not NULL
+ * the relevant parameter will be stored in it.
+ *
+ * Due to the fact that |Xp - Xq| > 2^(nbits - 100) must be satisfied Xp and Xq
+ * are generated using the previous function and supplied as input.
+ */
+
+int BN_X931_generate_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2,
+ BIGNUM *Xp1, BIGNUM *Xp2,
+ const BIGNUM *Xp,
+ const BIGNUM *e, BN_CTX *ctx,
+ BN_GENCB *cb)
+ {
+ int ret = 0;
+
+ BN_CTX_start(ctx);
+ if (!Xp1)
+ Xp1 = BN_CTX_get(ctx);
+ if (!Xp2)
+ Xp2 = BN_CTX_get(ctx);
+
+ if (!BN_rand(Xp1, 101, 0, 0))
+ goto error;
+ if (!BN_rand(Xp2, 101, 0, 0))
+ goto error;
+ if (!BN_X931_derive_prime_ex(p, p1, p2, Xp, Xp1, Xp2, e, ctx, cb))
+ goto error;
+
+ ret = 1;
+
+ error:
+ BN_CTX_end(ctx);
+
+ return ret;
+
+ }
+
diff --git a/openssl/crypto/bn/bntest.c b/openssl/crypto/bn/bntest.c
index 0cd99c5b4..06f5954ac 100644
--- a/openssl/crypto/bn/bntest.c
+++ b/openssl/crypto/bn/bntest.c
@@ -262,7 +262,7 @@ int main(int argc, char *argv[])
message(out,"BN_mod_sqrt");
if (!test_sqrt(out,ctx)) goto err;
(void)BIO_flush(out);
-
+#ifndef OPENSSL_NO_EC2M
message(out,"BN_GF2m_add");
if (!test_gf2m_add(out)) goto err;
(void)BIO_flush(out);
@@ -298,7 +298,7 @@ int main(int argc, char *argv[])
message(out,"BN_GF2m_mod_solve_quad");
if (!test_gf2m_mod_solve_quad(out,ctx)) goto err;
(void)BIO_flush(out);
-
+#endif
BN_CTX_free(ctx);
BIO_free(out);
@@ -1061,7 +1061,7 @@ int test_exp(BIO *bp, BN_CTX *ctx)
BN_free(one);
return(1);
}
-
+#ifndef OPENSSL_NO_EC2M
int test_gf2m_add(BIO *bp)
{
BIGNUM a,b,c;
@@ -1636,7 +1636,7 @@ int test_gf2m_mod_solve_quad(BIO *bp,BN_CTX *ctx)
BN_free(e);
return ret;
}
-
+#endif
static int genprime_cb(int p, int n, BN_GENCB *arg)
{
char c='*';