aboutsummaryrefslogtreecommitdiff
path: root/openssl/crypto/aes/asm
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2012-04-10 11:41:26 +0200
committermarha <marha@users.sourceforge.net>2012-04-10 11:41:26 +0200
commit67326634496ef21b4acbf4cef2f05040d34aef9b (patch)
treef19fba7c7b691e44cd97482644e383e09ab98c49 /openssl/crypto/aes/asm
parentc6f80401dc533b04341afe8d596960d1bc25efce (diff)
downloadvcxsrv-67326634496ef21b4acbf4cef2f05040d34aef9b.tar.gz
vcxsrv-67326634496ef21b4acbf4cef2f05040d34aef9b.tar.bz2
vcxsrv-67326634496ef21b4acbf4cef2f05040d34aef9b.zip
Update to openssl-1.0.1
Diffstat (limited to 'openssl/crypto/aes/asm')
-rw-r--r--openssl/crypto/aes/asm/aes-586.pl14
-rw-r--r--openssl/crypto/aes/asm/aes-armv4.pl181
-rw-r--r--openssl/crypto/aes/asm/aes-mips.pl1611
-rw-r--r--openssl/crypto/aes/asm/aes-parisc.pl1021
-rw-r--r--openssl/crypto/aes/asm/aes-ppc.pl444
-rw-r--r--openssl/crypto/aes/asm/aes-s390x.pl1056
-rw-r--r--openssl/crypto/aes/asm/aes-x86_64.pl45
-rw-r--r--openssl/crypto/aes/asm/aesni-sha1-x86_64.pl1249
-rw-r--r--openssl/crypto/aes/asm/aesni-x86.pl2189
-rw-r--r--openssl/crypto/aes/asm/aesni-x86_64.pl3068
-rw-r--r--openssl/crypto/aes/asm/bsaes-x86_64.pl3004
-rw-r--r--openssl/crypto/aes/asm/vpaes-x86.pl901
-rw-r--r--openssl/crypto/aes/asm/vpaes-x86_64.pl1204
13 files changed, 15720 insertions, 267 deletions
diff --git a/openssl/crypto/aes/asm/aes-586.pl b/openssl/crypto/aes/asm/aes-586.pl
index aab40e6f1..687ed811b 100644
--- a/openssl/crypto/aes/asm/aes-586.pl
+++ b/openssl/crypto/aes/asm/aes-586.pl
@@ -39,7 +39,7 @@
# but exhibits up to 10% improvement on other cores.
#
# Second version is "monolithic" replacement for aes_core.c, which in
-# addition to AES_[de|en]crypt implements AES_set_[de|en]cryption_key.
+# addition to AES_[de|en]crypt implements private_AES_set_[de|en]cryption_key.
# This made it possible to implement little-endian variant of the
# algorithm without modifying the base C code. Motivating factor for
# the undertaken effort was that it appeared that in tight IA-32
@@ -2854,12 +2854,12 @@ sub enckey()
&set_label("exit");
&function_end("_x86_AES_set_encrypt_key");
-# int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+# int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
# AES_KEY *key)
-&function_begin_B("AES_set_encrypt_key");
+&function_begin_B("private_AES_set_encrypt_key");
&call ("_x86_AES_set_encrypt_key");
&ret ();
-&function_end_B("AES_set_encrypt_key");
+&function_end_B("private_AES_set_encrypt_key");
sub deckey()
{ my ($i,$key,$tp1,$tp2,$tp4,$tp8) = @_;
@@ -2916,9 +2916,9 @@ sub deckey()
&mov (&DWP(4*$i,$key),$tp1);
}
-# int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+# int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
# AES_KEY *key)
-&function_begin_B("AES_set_decrypt_key");
+&function_begin_B("private_AES_set_decrypt_key");
&call ("_x86_AES_set_encrypt_key");
&cmp ("eax",0);
&je (&label("proceed"));
@@ -2974,7 +2974,7 @@ sub deckey()
&jb (&label("permute"));
&xor ("eax","eax"); # return success
-&function_end("AES_set_decrypt_key");
+&function_end("private_AES_set_decrypt_key");
&asciz("AES for x86, CRYPTOGAMS by <appro\@openssl.org>");
&asm_finish();
diff --git a/openssl/crypto/aes/asm/aes-armv4.pl b/openssl/crypto/aes/asm/aes-armv4.pl
index c51ee1fbf..943ce45ff 100644
--- a/openssl/crypto/aes/asm/aes-armv4.pl
+++ b/openssl/crypto/aes/asm/aes-armv4.pl
@@ -27,6 +27,11 @@
# Rescheduling for dual-issue pipeline resulted in 12% improvement on
# Cortex A8 core and ~25 cycles per byte processed with 128-bit key.
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~21.5 cycles per byte.
+
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
@@ -46,6 +51,7 @@ $key="r11";
$rounds="r12";
$code=<<___;
+#include "arm_arch.h"
.text
.code 32
@@ -166,7 +172,7 @@ AES_encrypt:
mov $rounds,r0 @ inp
mov $key,r2
sub $tbl,r3,#AES_encrypt-AES_Te @ Te
-
+#if __ARM_ARCH__<7
ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
ldrb $t1,[$rounds,#2] @ manner...
ldrb $t2,[$rounds,#1]
@@ -195,10 +201,33 @@ AES_encrypt:
orr $s3,$s3,$t1,lsl#8
orr $s3,$s3,$t2,lsl#16
orr $s3,$s3,$t3,lsl#24
-
+#else
+ ldr $s0,[$rounds,#0]
+ ldr $s1,[$rounds,#4]
+ ldr $s2,[$rounds,#8]
+ ldr $s3,[$rounds,#12]
+#ifdef __ARMEL__
+ rev $s0,$s0
+ rev $s1,$s1
+ rev $s2,$s2
+ rev $s3,$s3
+#endif
+#endif
bl _armv4_AES_encrypt
ldr $rounds,[sp],#4 @ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+ rev $s0,$s0
+ rev $s1,$s1
+ rev $s2,$s2
+ rev $s3,$s3
+#endif
+ str $s0,[$rounds,#0]
+ str $s1,[$rounds,#4]
+ str $s2,[$rounds,#8]
+ str $s3,[$rounds,#12]
+#else
mov $t1,$s0,lsr#24 @ write output in endian-neutral
mov $t2,$s0,lsr#16 @ manner...
mov $t3,$s0,lsr#8
@@ -227,11 +256,15 @@ AES_encrypt:
strb $t2,[$rounds,#13]
strb $t3,[$rounds,#14]
strb $s3,[$rounds,#15]
-
+#endif
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
+#endif
.size AES_encrypt,.-AES_encrypt
.type _armv4_AES_encrypt,%function
@@ -271,11 +304,11 @@ _armv4_AES_encrypt:
and $i2,lr,$s2,lsr#16 @ i1
eor $t3,$t3,$i3,ror#8
and $i3,lr,$s2
- eor $s1,$s1,$t1,ror#24
ldr $i1,[$tbl,$i1,lsl#2] @ Te2[s2>>8]
+ eor $s1,$s1,$t1,ror#24
+ ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16]
mov $s2,$s2,lsr#24
- ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16]
ldr $i3,[$tbl,$i3,lsl#2] @ Te3[s2>>0]
eor $s0,$s0,$i1,ror#16
ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24]
@@ -284,16 +317,16 @@ _armv4_AES_encrypt:
and $i2,lr,$s3,lsr#8 @ i1
eor $t3,$t3,$i3,ror#16
and $i3,lr,$s3,lsr#16 @ i2
- eor $s2,$s2,$t2,ror#16
ldr $i1,[$tbl,$i1,lsl#2] @ Te3[s3>>0]
+ eor $s2,$s2,$t2,ror#16
+ ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8]
mov $s3,$s3,lsr#24
- ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8]
ldr $i3,[$tbl,$i3,lsl#2] @ Te1[s3>>16]
eor $s0,$s0,$i1,ror#24
- ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24]
- eor $s1,$s1,$i2,ror#16
ldr $i1,[$key],#16
+ eor $s1,$s1,$i2,ror#16
+ ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24]
eor $s2,$s2,$i3,ror#8
ldr $t1,[$key,#-12]
eor $s3,$s3,$t3,ror#8
@@ -333,11 +366,11 @@ _armv4_AES_encrypt:
and $i2,lr,$s2,lsr#16 @ i1
eor $t3,$i3,$t3,lsl#8
and $i3,lr,$s2
- eor $s1,$t1,$s1,lsl#24
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s2>>8]
+ eor $s1,$t1,$s1,lsl#24
+ ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16]
mov $s2,$s2,lsr#24
- ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16]
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s2>>0]
eor $s0,$i1,$s0,lsl#8
ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24]
@@ -346,15 +379,15 @@ _armv4_AES_encrypt:
and $i2,lr,$s3,lsr#8 @ i1
eor $t3,$i3,$t3,lsl#8
and $i3,lr,$s3,lsr#16 @ i2
- eor $s2,$t2,$s2,lsl#24
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s3>>0]
+ eor $s2,$t2,$s2,lsl#24
+ ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8]
mov $s3,$s3,lsr#24
- ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8]
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s3>>16]
eor $s0,$i1,$s0,lsl#8
- ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24]
ldr $i1,[$key,#0]
+ ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24]
eor $s1,$s1,$i2,lsl#8
ldr $t1,[$key,#4]
eor $s2,$s2,$i3,lsl#16
@@ -371,10 +404,10 @@ _armv4_AES_encrypt:
ldr pc,[sp],#4 @ pop and return
.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
-.global AES_set_encrypt_key
-.type AES_set_encrypt_key,%function
+.global private_AES_set_encrypt_key
+.type private_AES_set_encrypt_key,%function
.align 5
-AES_set_encrypt_key:
+private_AES_set_encrypt_key:
sub r3,pc,#8 @ AES_set_encrypt_key
teq r0,#0
moveq r0,#-1
@@ -392,12 +425,13 @@ AES_set_encrypt_key:
bne .Labrt
.Lok: stmdb sp!,{r4-r12,lr}
- sub $tbl,r3,#AES_set_encrypt_key-AES_Te-1024 @ Te4
+ sub $tbl,r3,#private_AES_set_encrypt_key-AES_Te-1024 @ Te4
mov $rounds,r0 @ inp
mov lr,r1 @ bits
mov $key,r2 @ key
+#if __ARM_ARCH__<7
ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
ldrb $t1,[$rounds,#2] @ manner...
ldrb $t2,[$rounds,#1]
@@ -430,6 +464,22 @@ AES_set_encrypt_key:
orr $s3,$s3,$t3,lsl#24
str $s2,[$key,#-8]
str $s3,[$key,#-4]
+#else
+ ldr $s0,[$rounds,#0]
+ ldr $s1,[$rounds,#4]
+ ldr $s2,[$rounds,#8]
+ ldr $s3,[$rounds,#12]
+#ifdef __ARMEL__
+ rev $s0,$s0
+ rev $s1,$s1
+ rev $s2,$s2
+ rev $s3,$s3
+#endif
+ str $s0,[$key],#16
+ str $s1,[$key,#-12]
+ str $s2,[$key,#-8]
+ str $s3,[$key,#-4]
+#endif
teq lr,#128
bne .Lnot128
@@ -466,6 +516,7 @@ AES_set_encrypt_key:
b .Ldone
.Lnot128:
+#if __ARM_ARCH__<7
ldrb $i2,[$rounds,#19]
ldrb $t1,[$rounds,#18]
ldrb $t2,[$rounds,#17]
@@ -482,6 +533,16 @@ AES_set_encrypt_key:
str $i2,[$key],#8
orr $i3,$i3,$t3,lsl#24
str $i3,[$key,#-4]
+#else
+ ldr $i2,[$rounds,#16]
+ ldr $i3,[$rounds,#20]
+#ifdef __ARMEL__
+ rev $i2,$i2
+ rev $i3,$i3
+#endif
+ str $i2,[$key],#8
+ str $i3,[$key,#-4]
+#endif
teq lr,#192
bne .Lnot192
@@ -526,6 +587,7 @@ AES_set_encrypt_key:
b .L192_loop
.Lnot192:
+#if __ARM_ARCH__<7
ldrb $i2,[$rounds,#27]
ldrb $t1,[$rounds,#26]
ldrb $t2,[$rounds,#25]
@@ -542,6 +604,16 @@ AES_set_encrypt_key:
str $i2,[$key],#8
orr $i3,$i3,$t3,lsl#24
str $i3,[$key,#-4]
+#else
+ ldr $i2,[$rounds,#24]
+ ldr $i3,[$rounds,#28]
+#ifdef __ARMEL__
+ rev $i2,$i2
+ rev $i3,$i3
+#endif
+ str $i2,[$key],#8
+ str $i3,[$key,#-4]
+#endif
mov $rounds,#14
str $rounds,[$key,#240-32]
@@ -606,14 +678,14 @@ AES_set_encrypt_key:
.Labrt: tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
-.size AES_set_encrypt_key,.-AES_set_encrypt_key
+.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
-.global AES_set_decrypt_key
-.type AES_set_decrypt_key,%function
+.global private_AES_set_decrypt_key
+.type private_AES_set_decrypt_key,%function
.align 5
-AES_set_decrypt_key:
+private_AES_set_decrypt_key:
str lr,[sp,#-4]! @ push lr
- bl AES_set_encrypt_key
+ bl private_AES_set_encrypt_key
teq r0,#0
ldrne lr,[sp],#4 @ pop lr
bne .Labrt
@@ -692,11 +764,15 @@ $code.=<<___;
bne .Lmix
mov r0,#0
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
-.size AES_set_decrypt_key,.-AES_set_decrypt_key
+#endif
+.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
.type AES_Td,%object
.align 5
@@ -811,7 +887,7 @@ AES_decrypt:
mov $rounds,r0 @ inp
mov $key,r2
sub $tbl,r3,#AES_decrypt-AES_Td @ Td
-
+#if __ARM_ARCH__<7
ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
ldrb $t1,[$rounds,#2] @ manner...
ldrb $t2,[$rounds,#1]
@@ -840,10 +916,33 @@ AES_decrypt:
orr $s3,$s3,$t1,lsl#8
orr $s3,$s3,$t2,lsl#16
orr $s3,$s3,$t3,lsl#24
-
+#else
+ ldr $s0,[$rounds,#0]
+ ldr $s1,[$rounds,#4]
+ ldr $s2,[$rounds,#8]
+ ldr $s3,[$rounds,#12]
+#ifdef __ARMEL__
+ rev $s0,$s0
+ rev $s1,$s1
+ rev $s2,$s2
+ rev $s3,$s3
+#endif
+#endif
bl _armv4_AES_decrypt
ldr $rounds,[sp],#4 @ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+ rev $s0,$s0
+ rev $s1,$s1
+ rev $s2,$s2
+ rev $s3,$s3
+#endif
+ str $s0,[$rounds,#0]
+ str $s1,[$rounds,#4]
+ str $s2,[$rounds,#8]
+ str $s3,[$rounds,#12]
+#else
mov $t1,$s0,lsr#24 @ write output in endian-neutral
mov $t2,$s0,lsr#16 @ manner...
mov $t3,$s0,lsr#8
@@ -872,11 +971,15 @@ AES_decrypt:
strb $t2,[$rounds,#13]
strb $t3,[$rounds,#14]
strb $s3,[$rounds,#15]
-
+#endif
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
+#endif
.size AES_decrypt,.-AES_decrypt
.type _armv4_AES_decrypt,%function
@@ -916,11 +1019,11 @@ _armv4_AES_decrypt:
and $i2,lr,$s2 @ i1
eor $t3,$i3,$t3,ror#8
and $i3,lr,$s2,lsr#16
- eor $s1,$s1,$t1,ror#8
ldr $i1,[$tbl,$i1,lsl#2] @ Td2[s2>>8]
+ eor $s1,$s1,$t1,ror#8
+ ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0]
mov $s2,$s2,lsr#24
- ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0]
ldr $i3,[$tbl,$i3,lsl#2] @ Td1[s2>>16]
eor $s0,$s0,$i1,ror#16
ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24]
@@ -929,22 +1032,22 @@ _armv4_AES_decrypt:
and $i2,lr,$s3,lsr#8 @ i1
eor $t3,$i3,$t3,ror#8
and $i3,lr,$s3 @ i2
- eor $s2,$s2,$t2,ror#8
ldr $i1,[$tbl,$i1,lsl#2] @ Td1[s3>>16]
+ eor $s2,$s2,$t2,ror#8
+ ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8]
mov $s3,$s3,lsr#24
- ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8]
ldr $i3,[$tbl,$i3,lsl#2] @ Td3[s3>>0]
eor $s0,$s0,$i1,ror#8
- ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24]
+ ldr $i1,[$key],#16
eor $s1,$s1,$i2,ror#16
+ ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24]
eor $s2,$s2,$i3,ror#24
- ldr $i1,[$key],#16
- eor $s3,$s3,$t3,ror#8
ldr $t1,[$key,#-12]
- ldr $t2,[$key,#-8]
eor $s0,$s0,$i1
+ ldr $t2,[$key,#-8]
+ eor $s3,$s3,$t3,ror#8
ldr $t3,[$key,#-4]
and $i1,lr,$s0,lsr#16
eor $s1,$s1,$t1
@@ -985,11 +1088,11 @@ _armv4_AES_decrypt:
and $i1,lr,$s2,lsr#8 @ i0
eor $t2,$t2,$i2,lsl#8
and $i2,lr,$s2 @ i1
- eor $t3,$t3,$i3,lsl#8
ldrb $i1,[$tbl,$i1] @ Td4[s2>>8]
+ eor $t3,$t3,$i3,lsl#8
+ ldrb $i2,[$tbl,$i2] @ Td4[s2>>0]
and $i3,lr,$s2,lsr#16
- ldrb $i2,[$tbl,$i2] @ Td4[s2>>0]
ldrb $s2,[$tbl,$s2,lsr#24] @ Td4[s2>>24]
eor $s0,$s0,$i1,lsl#8
ldrb $i3,[$tbl,$i3] @ Td4[s2>>16]
@@ -997,11 +1100,11 @@ _armv4_AES_decrypt:
and $i1,lr,$s3,lsr#16 @ i0
eor $s2,$t2,$s2,lsl#16
and $i2,lr,$s3,lsr#8 @ i1
- eor $t3,$t3,$i3,lsl#16
ldrb $i1,[$tbl,$i1] @ Td4[s3>>16]
+ eor $t3,$t3,$i3,lsl#16
+ ldrb $i2,[$tbl,$i2] @ Td4[s3>>8]
and $i3,lr,$s3 @ i2
- ldrb $i2,[$tbl,$i2] @ Td4[s3>>8]
ldrb $i3,[$tbl,$i3] @ Td4[s3>>0]
ldrb $s3,[$tbl,$s3,lsr#24] @ Td4[s3>>24]
eor $s0,$s0,$i1,lsl#16
diff --git a/openssl/crypto/aes/asm/aes-mips.pl b/openssl/crypto/aes/asm/aes-mips.pl
new file mode 100644
index 000000000..2ce6deffc
--- /dev/null
+++ b/openssl/crypto/aes/asm/aes-mips.pl
@@ -0,0 +1,1611 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for MIPS
+
+# October 2010
+#
+# Code uses 1K[+256B] S-box and on single-issue core [such as R5000]
+# spends ~68 cycles per byte processed with 128-bit key. This is ~16%
+# faster than gcc-generated code, which is not very impressive. But
+# recall that compressed S-box requires extra processing, namely
+# additional rotations. Rotations are implemented with lwl/lwr pairs,
+# which is normally used for loading unaligned data. Another cool
+# thing about this module is its endian neutrality, which means that
+# it processes data without ever changing byte order...
+
+######################################################################
+# There is a number of MIPS ABI in use, O32 and N32/64 are most
+# widely used. Then there is a new contender: NUBI. It appears that if
+# one picks the latter, it's possible to arrange code in ABI neutral
+# manner. Therefore let's stick to NUBI register layout:
+#
+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
+#
+# The return value is placed in $a0. Following coding rules facilitate
+# interoperability:
+#
+# - never ever touch $tp, "thread pointer", former $gp;
+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
+# old code];
+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
+#
+# For reference here is register layout for N32/64 MIPS ABIs:
+#
+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+#
+$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
+
+if ($flavour =~ /64|n32/i) {
+ $PTR_ADD="dadd"; # incidentally works even on n32
+ $PTR_SUB="dsub"; # incidentally works even on n32
+ $REG_S="sd";
+ $REG_L="ld";
+ $PTR_SLL="dsll"; # incidentally works even on n32
+ $SZREG=8;
+} else {
+ $PTR_ADD="add";
+ $PTR_SUB="sub";
+ $REG_S="sw";
+ $REG_L="lw";
+ $PTR_SLL="sll";
+ $SZREG=4;
+}
+$pf = ($flavour =~ /nubi/i) ? $t0 : $t2;
+#
+# <appro@openssl.org>
+#
+######################################################################
+
+$big_endian=(`echo MIPSEL | $ENV{CC} -E -P -`=~/MIPSEL/)?1:0;
+
+for (@ARGV) { $output=$_ if (/^\w[\w\-]*\.\w+$/); }
+open STDOUT,">$output";
+
+if (!defined($big_endian))
+{ $big_endian=(unpack('L',pack('N',1))==1); }
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($MSB,$LSB)=(0,3); # automatically converted to little-endian
+
+$code.=<<___;
+.text
+#ifdef OPENSSL_FIPSCANISTER
+# include <openssl/fipssyms.h>
+#endif
+
+#if !defined(__vxworks) || defined(__pic__)
+.option pic2
+#endif
+.set noat
+___
+
+{{{
+my $FRAMESIZE=16*$SZREG;
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc0fff008 : 0xc0ff0000;
+
+my ($inp,$out,$key,$Tbl,$s0,$s1,$s2,$s3)=($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7);
+my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
+my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23));
+my ($key0,$cnt)=($gp,$fp);
+
+# instuction ordering is "stolen" from output from MIPSpro assembler
+# invoked with -mips3 -O3 arguments...
+$code.=<<___;
+.align 5
+.ent _mips_AES_encrypt
+_mips_AES_encrypt:
+ .frame $sp,0,$ra
+ .set reorder
+ lw $t0,0($key)
+ lw $t1,4($key)
+ lw $t2,8($key)
+ lw $t3,12($key)
+ lw $cnt,240($key)
+ $PTR_ADD $key0,$key,16
+
+ xor $s0,$t0
+ xor $s1,$t1
+ xor $s2,$t2
+ xor $s3,$t3
+
+ sub $cnt,1
+ _xtr $i0,$s1,16-2
+.Loop_enc:
+ _xtr $i1,$s2,16-2
+ _xtr $i2,$s3,16-2
+ _xtr $i3,$s0,16-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lwl $t0,3($i0) # Te1[s1>>16]
+ lwl $t1,3($i1) # Te1[s2>>16]
+ lwl $t2,3($i2) # Te1[s3>>16]
+ lwl $t3,3($i3) # Te1[s0>>16]
+ lwr $t0,2($i0) # Te1[s1>>16]
+ lwr $t1,2($i1) # Te1[s2>>16]
+ lwr $t2,2($i2) # Te1[s3>>16]
+ lwr $t3,2($i3) # Te1[s0>>16]
+
+ _xtr $i0,$s2,8-2
+ _xtr $i1,$s3,8-2
+ _xtr $i2,$s0,8-2
+ _xtr $i3,$s1,8-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lwl $t4,2($i0) # Te2[s2>>8]
+ lwl $t5,2($i1) # Te2[s3>>8]
+ lwl $t6,2($i2) # Te2[s0>>8]
+ lwl $t7,2($i3) # Te2[s1>>8]
+ lwr $t4,1($i0) # Te2[s2>>8]
+ lwr $t5,1($i1) # Te2[s3>>8]
+ lwr $t6,1($i2) # Te2[s0>>8]
+ lwr $t7,1($i3) # Te2[s1>>8]
+
+ _xtr $i0,$s3,0-2
+ _xtr $i1,$s0,0-2
+ _xtr $i2,$s1,0-2
+ _xtr $i3,$s2,0-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lwl $t8,1($i0) # Te3[s3]
+ lwl $t9,1($i1) # Te3[s0]
+ lwl $t10,1($i2) # Te3[s1]
+ lwl $t11,1($i3) # Te3[s2]
+ lwr $t8,0($i0) # Te3[s3]
+ lwr $t9,0($i1) # Te3[s0]
+ lwr $t10,0($i2) # Te3[s1]
+ lwr $t11,0($i3) # Te3[s2]
+
+ _xtr $i0,$s0,24-2
+ _xtr $i1,$s1,24-2
+ _xtr $i2,$s2,24-2
+ _xtr $i3,$s3,24-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ xor $t0,$t4
+ xor $t1,$t5
+ xor $t2,$t6
+ xor $t3,$t7
+ lw $t4,0($i0) # Te0[s0>>24]
+ lw $t5,0($i1) # Te0[s1>>24]
+ lw $t6,0($i2) # Te0[s2>>24]
+ lw $t7,0($i3) # Te0[s3>>24]
+
+ lw $s0,0($key0)
+ lw $s1,4($key0)
+ lw $s2,8($key0)
+ lw $s3,12($key0)
+
+ xor $t0,$t8
+ xor $t1,$t9
+ xor $t2,$t10
+ xor $t3,$t11
+
+ xor $t0,$t4
+ xor $t1,$t5
+ xor $t2,$t6
+ xor $t3,$t7
+
+ sub $cnt,1
+ $PTR_ADD $key0,16
+ xor $s0,$t0
+ xor $s1,$t1
+ xor $s2,$t2
+ xor $s3,$t3
+ .set noreorder
+ bnez $cnt,.Loop_enc
+ _xtr $i0,$s1,16-2
+
+ .set reorder
+ _xtr $i1,$s2,16-2
+ _xtr $i2,$s3,16-2
+ _xtr $i3,$s0,16-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $t0,2($i0) # Te4[s1>>16]
+ lbu $t1,2($i1) # Te4[s2>>16]
+ lbu $t2,2($i2) # Te4[s3>>16]
+ lbu $t3,2($i3) # Te4[s0>>16]
+
+ _xtr $i0,$s2,8-2
+ _xtr $i1,$s3,8-2
+ _xtr $i2,$s0,8-2
+ _xtr $i3,$s1,8-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $t4,2($i0) # Te4[s2>>8]
+ lbu $t5,2($i1) # Te4[s3>>8]
+ lbu $t6,2($i2) # Te4[s0>>8]
+ lbu $t7,2($i3) # Te4[s1>>8]
+
+ _xtr $i0,$s0,24-2
+ _xtr $i1,$s1,24-2
+ _xtr $i2,$s2,24-2
+ _xtr $i3,$s3,24-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $t8,2($i0) # Te4[s0>>24]
+ lbu $t9,2($i1) # Te4[s1>>24]
+ lbu $t10,2($i2) # Te4[s2>>24]
+ lbu $t11,2($i3) # Te4[s3>>24]
+
+ _xtr $i0,$s3,0-2
+ _xtr $i1,$s0,0-2
+ _xtr $i2,$s1,0-2
+ _xtr $i3,$s2,0-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+
+ _ins $t0,16
+ _ins $t1,16
+ _ins $t2,16
+ _ins $t3,16
+
+ _ins $t4,8
+ _ins $t5,8
+ _ins $t6,8
+ _ins $t7,8
+
+ xor $t0,$t4
+ xor $t1,$t5
+ xor $t2,$t6
+ xor $t3,$t7
+
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $t4,2($i0) # Te4[s3]
+ lbu $t5,2($i1) # Te4[s0]
+ lbu $t6,2($i2) # Te4[s1]
+ lbu $t7,2($i3) # Te4[s2]
+
+ _ins $t8,24
+ _ins $t9,24
+ _ins $t10,24
+ _ins $t11,24
+
+ lw $s0,0($key0)
+ lw $s1,4($key0)
+ lw $s2,8($key0)
+ lw $s3,12($key0)
+
+ xor $t0,$t8
+ xor $t1,$t9
+ xor $t2,$t10
+ xor $t3,$t11
+
+ _ins $t4,0
+ _ins $t5,0
+ _ins $t6,0
+ _ins $t7,0
+
+ xor $t0,$t4
+ xor $t1,$t5
+ xor $t2,$t6
+ xor $t3,$t7
+
+ xor $s0,$t0
+ xor $s1,$t1
+ xor $s2,$t2
+ xor $s3,$t3
+
+ jr $ra
+.end _mips_AES_encrypt
+
+.align 5
+.globl AES_encrypt
+.ent AES_encrypt
+AES_encrypt:
+ .frame $sp,$FRAMESIZE,$ra
+ .mask $SAVED_REGS_MASK,-$SZREG
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i); # o32 PIC-ification
+ .cpload $pf
+___
+$code.=<<___;
+ $PTR_SUB $sp,$FRAMESIZE
+ $REG_S $ra,$FRAMESIZE-1*$SZREG($sp)
+ $REG_S $fp,$FRAMESIZE-2*$SZREG($sp)
+ $REG_S $s11,$FRAMESIZE-3*$SZREG($sp)
+ $REG_S $s10,$FRAMESIZE-4*$SZREG($sp)
+ $REG_S $s9,$FRAMESIZE-5*$SZREG($sp)
+ $REG_S $s8,$FRAMESIZE-6*$SZREG($sp)
+ $REG_S $s7,$FRAMESIZE-7*$SZREG($sp)
+ $REG_S $s6,$FRAMESIZE-8*$SZREG($sp)
+ $REG_S $s5,$FRAMESIZE-9*$SZREG($sp)
+ $REG_S $s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ $REG_S \$15,$FRAMESIZE-11*$SZREG($sp)
+ $REG_S \$14,$FRAMESIZE-12*$SZREG($sp)
+ $REG_S \$13,$FRAMESIZE-13*$SZREG($sp)
+ $REG_S \$12,$FRAMESIZE-14*$SZREG($sp)
+ $REG_S $gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i); # non-o32 PIC-ification
+ .cplocal $Tbl
+ .cpsetup $pf,$zero,AES_encrypt
+___
+$code.=<<___;
+ .set reorder
+ la $Tbl,AES_Te # PIC-ified 'load address'
+
+ lwl $s0,0+$MSB($inp)
+ lwl $s1,4+$MSB($inp)
+ lwl $s2,8+$MSB($inp)
+ lwl $s3,12+$MSB($inp)
+ lwr $s0,0+$LSB($inp)
+ lwr $s1,4+$LSB($inp)
+ lwr $s2,8+$LSB($inp)
+ lwr $s3,12+$LSB($inp)
+
+ bal _mips_AES_encrypt
+
+ swr $s0,0+$LSB($out)
+ swr $s1,4+$LSB($out)
+ swr $s2,8+$LSB($out)
+ swr $s3,12+$LSB($out)
+ swl $s0,0+$MSB($out)
+ swl $s1,4+$MSB($out)
+ swl $s2,8+$MSB($out)
+ swl $s3,12+$MSB($out)
+
+ .set noreorder
+ $REG_L $ra,$FRAMESIZE-1*$SZREG($sp)
+ $REG_L $fp,$FRAMESIZE-2*$SZREG($sp)
+ $REG_L $s11,$FRAMESIZE-3*$SZREG($sp)
+ $REG_L $s10,$FRAMESIZE-4*$SZREG($sp)
+ $REG_L $s9,$FRAMESIZE-5*$SZREG($sp)
+ $REG_L $s8,$FRAMESIZE-6*$SZREG($sp)
+ $REG_L $s7,$FRAMESIZE-7*$SZREG($sp)
+ $REG_L $s6,$FRAMESIZE-8*$SZREG($sp)
+ $REG_L $s5,$FRAMESIZE-9*$SZREG($sp)
+ $REG_L $s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L \$15,$FRAMESIZE-11*$SZREG($sp)
+ $REG_L \$14,$FRAMESIZE-12*$SZREG($sp)
+ $REG_L \$13,$FRAMESIZE-13*$SZREG($sp)
+ $REG_L \$12,$FRAMESIZE-14*$SZREG($sp)
+ $REG_L $gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+ jr $ra
+ $PTR_ADD $sp,$FRAMESIZE
+.end AES_encrypt
+___
+
+$code.=<<___;
+.align 5
+.ent _mips_AES_decrypt
+_mips_AES_decrypt:
+ .frame $sp,0,$ra
+ .set reorder
+ lw $t0,0($key)
+ lw $t1,4($key)
+ lw $t2,8($key)
+ lw $t3,12($key)
+ lw $cnt,240($key)
+ $PTR_ADD $key0,$key,16
+
+ xor $s0,$t0
+ xor $s1,$t1
+ xor $s2,$t2
+ xor $s3,$t3
+
+ sub $cnt,1
+ _xtr $i0,$s3,16-2
+.Loop_dec:
+ _xtr $i1,$s0,16-2
+ _xtr $i2,$s1,16-2
+ _xtr $i3,$s2,16-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lwl $t0,3($i0) # Td1[s3>>16]
+ lwl $t1,3($i1) # Td1[s0>>16]
+ lwl $t2,3($i2) # Td1[s1>>16]
+ lwl $t3,3($i3) # Td1[s2>>16]
+ lwr $t0,2($i0) # Td1[s3>>16]
+ lwr $t1,2($i1) # Td1[s0>>16]
+ lwr $t2,2($i2) # Td1[s1>>16]
+ lwr $t3,2($i3) # Td1[s2>>16]
+
+ _xtr $i0,$s2,8-2
+ _xtr $i1,$s3,8-2
+ _xtr $i2,$s0,8-2
+ _xtr $i3,$s1,8-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lwl $t4,2($i0) # Td2[s2>>8]
+ lwl $t5,2($i1) # Td2[s3>>8]
+ lwl $t6,2($i2) # Td2[s0>>8]
+ lwl $t7,2($i3) # Td2[s1>>8]
+ lwr $t4,1($i0) # Td2[s2>>8]
+ lwr $t5,1($i1) # Td2[s3>>8]
+ lwr $t6,1($i2) # Td2[s0>>8]
+ lwr $t7,1($i3) # Td2[s1>>8]
+
+ _xtr $i0,$s1,0-2
+ _xtr $i1,$s2,0-2
+ _xtr $i2,$s3,0-2
+ _xtr $i3,$s0,0-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lwl $t8,1($i0) # Td3[s1]
+ lwl $t9,1($i1) # Td3[s2]
+ lwl $t10,1($i2) # Td3[s3]
+ lwl $t11,1($i3) # Td3[s0]
+ lwr $t8,0($i0) # Td3[s1]
+ lwr $t9,0($i1) # Td3[s2]
+ lwr $t10,0($i2) # Td3[s3]
+ lwr $t11,0($i3) # Td3[s0]
+
+ _xtr $i0,$s0,24-2
+ _xtr $i1,$s1,24-2
+ _xtr $i2,$s2,24-2
+ _xtr $i3,$s3,24-2
+ and $i0,0x3fc
+ and $i1,0x3fc
+ and $i2,0x3fc
+ and $i3,0x3fc
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+
+ xor $t0,$t4
+ xor $t1,$t5
+ xor $t2,$t6
+ xor $t3,$t7
+
+
+ lw $t4,0($i0) # Td0[s0>>24]
+ lw $t5,0($i1) # Td0[s1>>24]
+ lw $t6,0($i2) # Td0[s2>>24]
+ lw $t7,0($i3) # Td0[s3>>24]
+
+ lw $s0,0($key0)
+ lw $s1,4($key0)
+ lw $s2,8($key0)
+ lw $s3,12($key0)
+
+ xor $t0,$t8
+ xor $t1,$t9
+ xor $t2,$t10
+ xor $t3,$t11
+
+ xor $t0,$t4
+ xor $t1,$t5
+ xor $t2,$t6
+ xor $t3,$t7
+
+ sub $cnt,1
+ $PTR_ADD $key0,16
+ xor $s0,$t0
+ xor $s1,$t1
+ xor $s2,$t2
+ xor $s3,$t3
+ .set noreorder
+ bnez $cnt,.Loop_dec
+ _xtr $i0,$s3,16-2
+
+ .set reorder
+ lw $t4,1024($Tbl) # prefetch Td4
+ lw $t5,1024+32($Tbl)
+ lw $t6,1024+64($Tbl)
+ lw $t7,1024+96($Tbl)
+ lw $t8,1024+128($Tbl)
+ lw $t9,1024+160($Tbl)
+ lw $t10,1024+192($Tbl)
+ lw $t11,1024+224($Tbl)
+
+ _xtr $i0,$s3,16
+ _xtr $i1,$s0,16
+ _xtr $i2,$s1,16
+ _xtr $i3,$s2,16
+ and $i0,0xff
+ and $i1,0xff
+ and $i2,0xff
+ and $i3,0xff
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $t0,1024($i0) # Td4[s3>>16]
+ lbu $t1,1024($i1) # Td4[s0>>16]
+ lbu $t2,1024($i2) # Td4[s1>>16]
+ lbu $t3,1024($i3) # Td4[s2>>16]
+
+ _xtr $i0,$s2,8
+ _xtr $i1,$s3,8
+ _xtr $i2,$s0,8
+ _xtr $i3,$s1,8
+ and $i0,0xff
+ and $i1,0xff
+ and $i2,0xff
+ and $i3,0xff
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $t4,1024($i0) # Td4[s2>>8]
+ lbu $t5,1024($i1) # Td4[s3>>8]
+ lbu $t6,1024($i2) # Td4[s0>>8]
+ lbu $t7,1024($i3) # Td4[s1>>8]
+
+ _xtr $i0,$s0,24
+ _xtr $i1,$s1,24
+ _xtr $i2,$s2,24
+ _xtr $i3,$s3,24
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $t8,1024($i0) # Td4[s0>>24]
+ lbu $t9,1024($i1) # Td4[s1>>24]
+ lbu $t10,1024($i2) # Td4[s2>>24]
+ lbu $t11,1024($i3) # Td4[s3>>24]
+
+ _xtr $i0,$s1,0
+ _xtr $i1,$s2,0
+ _xtr $i2,$s3,0
+ _xtr $i3,$s0,0
+
+ _ins $t0,16
+ _ins $t1,16
+ _ins $t2,16
+ _ins $t3,16
+
+ _ins $t4,8
+ _ins $t5,8
+ _ins $t6,8
+ _ins $t7,8
+
+ xor $t0,$t4
+ xor $t1,$t5
+ xor $t2,$t6
+ xor $t3,$t7
+
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $t4,1024($i0) # Td4[s1]
+ lbu $t5,1024($i1) # Td4[s2]
+ lbu $t6,1024($i2) # Td4[s3]
+ lbu $t7,1024($i3) # Td4[s0]
+
+ _ins $t8,24
+ _ins $t9,24
+ _ins $t10,24
+ _ins $t11,24
+
+ lw $s0,0($key0)
+ lw $s1,4($key0)
+ lw $s2,8($key0)
+ lw $s3,12($key0)
+
+ _ins $t4,0
+ _ins $t5,0
+ _ins $t6,0
+ _ins $t7,0
+
+
+ xor $t0,$t8
+ xor $t1,$t9
+ xor $t2,$t10
+ xor $t3,$t11
+
+ xor $t0,$t4
+ xor $t1,$t5
+ xor $t2,$t6
+ xor $t3,$t7
+
+ xor $s0,$t0
+ xor $s1,$t1
+ xor $s2,$t2
+ xor $s3,$t3
+
+ jr $ra
+.end _mips_AES_decrypt
+
+.align 5
+.globl AES_decrypt
+.ent AES_decrypt
+AES_decrypt:
+ .frame $sp,$FRAMESIZE,$ra
+ .mask $SAVED_REGS_MASK,-$SZREG
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i); # o32 PIC-ification
+ .cpload $pf
+___
+$code.=<<___;
+ $PTR_SUB $sp,$FRAMESIZE
+ $REG_S $ra,$FRAMESIZE-1*$SZREG($sp)
+ $REG_S $fp,$FRAMESIZE-2*$SZREG($sp)
+ $REG_S $s11,$FRAMESIZE-3*$SZREG($sp)
+ $REG_S $s10,$FRAMESIZE-4*$SZREG($sp)
+ $REG_S $s9,$FRAMESIZE-5*$SZREG($sp)
+ $REG_S $s8,$FRAMESIZE-6*$SZREG($sp)
+ $REG_S $s7,$FRAMESIZE-7*$SZREG($sp)
+ $REG_S $s6,$FRAMESIZE-8*$SZREG($sp)
+ $REG_S $s5,$FRAMESIZE-9*$SZREG($sp)
+ $REG_S $s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ $REG_S \$15,$FRAMESIZE-11*$SZREG($sp)
+ $REG_S \$14,$FRAMESIZE-12*$SZREG($sp)
+ $REG_S \$13,$FRAMESIZE-13*$SZREG($sp)
+ $REG_S \$12,$FRAMESIZE-14*$SZREG($sp)
+ $REG_S $gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i); # non-o32 PIC-ification
+ .cplocal $Tbl
+ .cpsetup $pf,$zero,AES_decrypt
+___
+$code.=<<___;
+ .set reorder
+ la $Tbl,AES_Td # PIC-ified 'load address'
+
+ lwl $s0,0+$MSB($inp)
+ lwl $s1,4+$MSB($inp)
+ lwl $s2,8+$MSB($inp)
+ lwl $s3,12+$MSB($inp)
+ lwr $s0,0+$LSB($inp)
+ lwr $s1,4+$LSB($inp)
+ lwr $s2,8+$LSB($inp)
+ lwr $s3,12+$LSB($inp)
+
+ bal _mips_AES_decrypt
+
+ swr $s0,0+$LSB($out)
+ swr $s1,4+$LSB($out)
+ swr $s2,8+$LSB($out)
+ swr $s3,12+$LSB($out)
+ swl $s0,0+$MSB($out)
+ swl $s1,4+$MSB($out)
+ swl $s2,8+$MSB($out)
+ swl $s3,12+$MSB($out)
+
+ .set noreorder
+ $REG_L $ra,$FRAMESIZE-1*$SZREG($sp)
+ $REG_L $fp,$FRAMESIZE-2*$SZREG($sp)
+ $REG_L $s11,$FRAMESIZE-3*$SZREG($sp)
+ $REG_L $s10,$FRAMESIZE-4*$SZREG($sp)
+ $REG_L $s9,$FRAMESIZE-5*$SZREG($sp)
+ $REG_L $s8,$FRAMESIZE-6*$SZREG($sp)
+ $REG_L $s7,$FRAMESIZE-7*$SZREG($sp)
+ $REG_L $s6,$FRAMESIZE-8*$SZREG($sp)
+ $REG_L $s5,$FRAMESIZE-9*$SZREG($sp)
+ $REG_L $s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L \$15,$FRAMESIZE-11*$SZREG($sp)
+ $REG_L \$14,$FRAMESIZE-12*$SZREG($sp)
+ $REG_L \$13,$FRAMESIZE-13*$SZREG($sp)
+ $REG_L \$12,$FRAMESIZE-14*$SZREG($sp)
+ $REG_L $gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+ jr $ra
+ $PTR_ADD $sp,$FRAMESIZE
+.end AES_decrypt
+___
+}}}
+
+{{{
+my $FRAMESIZE=8*$SZREG;
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc000f008 : 0xc0000000;
+
+my ($inp,$bits,$key,$Tbl)=($a0,$a1,$a2,$a3);
+my ($rk0,$rk1,$rk2,$rk3,$rk4,$rk5,$rk6,$rk7)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
+my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
+my ($rcon,$cnt)=($gp,$fp);
+
+$code.=<<___;
+.align 5
+.ent _mips_AES_set_encrypt_key
+_mips_AES_set_encrypt_key:
+ .frame $sp,0,$ra
+ .set noreorder
+ beqz $inp,.Lekey_done
+ li $t0,-1
+ beqz $key,.Lekey_done
+ $PTR_ADD $rcon,$Tbl,1024+256
+
+ .set reorder
+ lwl $rk0,0+$MSB($inp) # load 128 bits
+ lwl $rk1,4+$MSB($inp)
+ lwl $rk2,8+$MSB($inp)
+ lwl $rk3,12+$MSB($inp)
+ li $at,128
+ lwr $rk0,0+$LSB($inp)
+ lwr $rk1,4+$LSB($inp)
+ lwr $rk2,8+$LSB($inp)
+ lwr $rk3,12+$LSB($inp)
+ .set noreorder
+ beq $bits,$at,.L128bits
+ li $cnt,10
+
+ .set reorder
+ lwl $rk4,16+$MSB($inp) # load 192 bits
+ lwl $rk5,20+$MSB($inp)
+ li $at,192
+ lwr $rk4,16+$LSB($inp)
+ lwr $rk5,20+$LSB($inp)
+ .set noreorder
+ beq $bits,$at,.L192bits
+ li $cnt,8
+
+ .set reorder
+ lwl $rk6,24+$MSB($inp) # load 256 bits
+ lwl $rk7,28+$MSB($inp)
+ li $at,256
+ lwr $rk6,24+$LSB($inp)
+ lwr $rk7,28+$LSB($inp)
+ .set noreorder
+ beq $bits,$at,.L256bits
+ li $cnt,7
+
+ b .Lekey_done
+ li $t0,-2
+
+.align 4
+.L128bits:
+ .set reorder
+ srl $i0,$rk3,16
+ srl $i1,$rk3,8
+ and $i0,0xff
+ and $i1,0xff
+ and $i2,$rk3,0xff
+ srl $i3,$rk3,24
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $i0,1024($i0)
+ lbu $i1,1024($i1)
+ lbu $i2,1024($i2)
+ lbu $i3,1024($i3)
+
+ sw $rk0,0($key)
+ sw $rk1,4($key)
+ sw $rk2,8($key)
+ sw $rk3,12($key)
+ sub $cnt,1
+ $PTR_ADD $key,16
+
+ _bias $i0,24
+ _bias $i1,16
+ _bias $i2,8
+ _bias $i3,0
+
+ xor $rk0,$i0
+ lw $i0,0($rcon)
+ xor $rk0,$i1
+ xor $rk0,$i2
+ xor $rk0,$i3
+ xor $rk0,$i0
+
+ xor $rk1,$rk0
+ xor $rk2,$rk1
+ xor $rk3,$rk2
+
+ .set noreorder
+ bnez $cnt,.L128bits
+ $PTR_ADD $rcon,4
+
+ sw $rk0,0($key)
+ sw $rk1,4($key)
+ sw $rk2,8($key)
+ li $cnt,10
+ sw $rk3,12($key)
+ li $t0,0
+ sw $cnt,80($key)
+ b .Lekey_done
+ $PTR_SUB $key,10*16
+
+.align 4
+.L192bits:
+ .set reorder
+ srl $i0,$rk5,16
+ srl $i1,$rk5,8
+ and $i0,0xff
+ and $i1,0xff
+ and $i2,$rk5,0xff
+ srl $i3,$rk5,24
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $i0,1024($i0)
+ lbu $i1,1024($i1)
+ lbu $i2,1024($i2)
+ lbu $i3,1024($i3)
+
+ sw $rk0,0($key)
+ sw $rk1,4($key)
+ sw $rk2,8($key)
+ sw $rk3,12($key)
+ sw $rk4,16($key)
+ sw $rk5,20($key)
+ sub $cnt,1
+ $PTR_ADD $key,24
+
+ _bias $i0,24
+ _bias $i1,16
+ _bias $i2,8
+ _bias $i3,0
+
+ xor $rk0,$i0
+ lw $i0,0($rcon)
+ xor $rk0,$i1
+ xor $rk0,$i2
+ xor $rk0,$i3
+ xor $rk0,$i0
+
+ xor $rk1,$rk0
+ xor $rk2,$rk1
+ xor $rk3,$rk2
+ xor $rk4,$rk3
+ xor $rk5,$rk4
+
+ .set noreorder
+ bnez $cnt,.L192bits
+ $PTR_ADD $rcon,4
+
+ sw $rk0,0($key)
+ sw $rk1,4($key)
+ sw $rk2,8($key)
+ li $cnt,12
+ sw $rk3,12($key)
+ li $t0,0
+ sw $cnt,48($key)
+ b .Lekey_done
+ $PTR_SUB $key,12*16
+
+.align 4
+.L256bits:
+ .set reorder
+ srl $i0,$rk7,16
+ srl $i1,$rk7,8
+ and $i0,0xff
+ and $i1,0xff
+ and $i2,$rk7,0xff
+ srl $i3,$rk7,24
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $i0,1024($i0)
+ lbu $i1,1024($i1)
+ lbu $i2,1024($i2)
+ lbu $i3,1024($i3)
+
+ sw $rk0,0($key)
+ sw $rk1,4($key)
+ sw $rk2,8($key)
+ sw $rk3,12($key)
+ sw $rk4,16($key)
+ sw $rk5,20($key)
+ sw $rk6,24($key)
+ sw $rk7,28($key)
+ sub $cnt,1
+
+ _bias $i0,24
+ _bias $i1,16
+ _bias $i2,8
+ _bias $i3,0
+
+ xor $rk0,$i0
+ lw $i0,0($rcon)
+ xor $rk0,$i1
+ xor $rk0,$i2
+ xor $rk0,$i3
+ xor $rk0,$i0
+
+ xor $rk1,$rk0
+ xor $rk2,$rk1
+ xor $rk3,$rk2
+ beqz $cnt,.L256bits_done
+
+ srl $i0,$rk3,24
+ srl $i1,$rk3,16
+ srl $i2,$rk3,8
+ and $i3,$rk3,0xff
+ and $i1,0xff
+ and $i2,0xff
+ $PTR_ADD $i0,$Tbl
+ $PTR_ADD $i1,$Tbl
+ $PTR_ADD $i2,$Tbl
+ $PTR_ADD $i3,$Tbl
+ lbu $i0,1024($i0)
+ lbu $i1,1024($i1)
+ lbu $i2,1024($i2)
+ lbu $i3,1024($i3)
+ sll $i0,24
+ sll $i1,16
+ sll $i2,8
+
+ xor $rk4,$i0
+ xor $rk4,$i1
+ xor $rk4,$i2
+ xor $rk4,$i3
+
+ xor $rk5,$rk4
+ xor $rk6,$rk5
+ xor $rk7,$rk6
+
+ $PTR_ADD $key,32
+ .set noreorder
+ b .L256bits
+ $PTR_ADD $rcon,4
+
+.L256bits_done:
+ sw $rk0,32($key)
+ sw $rk1,36($key)
+ sw $rk2,40($key)
+ li $cnt,14
+ sw $rk3,44($key)
+ li $t0,0
+ sw $cnt,48($key)
+ $PTR_SUB $key,12*16
+
+.Lekey_done:
+ jr $ra
+ nop
+.end _mips_AES_set_encrypt_key
+
+.globl AES_set_encrypt_key
+.ent AES_set_encrypt_key
+AES_set_encrypt_key:
+ .frame $sp,$FRAMESIZE,$ra
+ .mask $SAVED_REGS_MASK,-$SZREG
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i); # o32 PIC-ification
+ .cpload $pf
+___
+$code.=<<___;
+ $PTR_SUB $sp,$FRAMESIZE
+ $REG_S $ra,$FRAMESIZE-1*$SZREG($sp)
+ $REG_S $fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ $REG_S $s3,$FRAMESIZE-3*$SZREG($sp)
+ $REG_S $s2,$FRAMESIZE-4*$SZREG($sp)
+ $REG_S $s1,$FRAMESIZE-5*$SZREG($sp)
+ $REG_S $s0,$FRAMESIZE-6*$SZREG($sp)
+ $REG_S $gp,$FRAMESIZE-7*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i); # non-o32 PIC-ification
+ .cplocal $Tbl
+ .cpsetup $pf,$zero,AES_set_encrypt_key
+___
+$code.=<<___;
+ .set reorder
+ la $Tbl,AES_Te # PIC-ified 'load address'
+
+ bal _mips_AES_set_encrypt_key
+
+ .set noreorder
+ move $a0,$t0
+ $REG_L $ra,$FRAMESIZE-1*$SZREG($sp)
+ $REG_L $fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s3,$FRAMESIZE-11*$SZREG($sp)
+ $REG_L $s2,$FRAMESIZE-12*$SZREG($sp)
+ $REG_L $s1,$FRAMESIZE-13*$SZREG($sp)
+ $REG_L $s0,$FRAMESIZE-14*$SZREG($sp)
+ $REG_L $gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+ jr $ra
+ $PTR_ADD $sp,$FRAMESIZE
+.end AES_set_encrypt_key
+___
+
+my ($head,$tail)=($inp,$bits);
+my ($tp1,$tp2,$tp4,$tp8,$tp9,$tpb,$tpd,$tpe)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
+my ($m,$x80808080,$x7f7f7f7f,$x1b1b1b1b)=($at,$t0,$t1,$t2);
+$code.=<<___;
+.align 5
+.globl AES_set_decrypt_key
+.ent AES_set_decrypt_key
+AES_set_decrypt_key:
+ .frame $sp,$FRAMESIZE,$ra
+ .mask $SAVED_REGS_MASK,-$SZREG
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i); # o32 PIC-ification
+ .cpload $pf
+___
+$code.=<<___;
+ $PTR_SUB $sp,$FRAMESIZE
+ $REG_S $ra,$FRAMESIZE-1*$SZREG($sp)
+ $REG_S $fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ $REG_S $s3,$FRAMESIZE-3*$SZREG($sp)
+ $REG_S $s2,$FRAMESIZE-4*$SZREG($sp)
+ $REG_S $s1,$FRAMESIZE-5*$SZREG($sp)
+ $REG_S $s0,$FRAMESIZE-6*$SZREG($sp)
+ $REG_S $gp,$FRAMESIZE-7*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i); # non-o32 PIC-ification
+ .cplocal $Tbl
+ .cpsetup $pf,$zero,AES_set_decrypt_key
+___
+$code.=<<___;
+ .set reorder
+ la $Tbl,AES_Te # PIC-ified 'load address'
+
+ bal _mips_AES_set_encrypt_key
+
+ bltz $t0,.Ldkey_done
+
+ sll $at,$cnt,4
+ $PTR_ADD $head,$key,0
+ $PTR_ADD $tail,$key,$at
+.align 4
+.Lswap:
+ lw $rk0,0($head)
+ lw $rk1,4($head)
+ lw $rk2,8($head)
+ lw $rk3,12($head)
+ lw $rk4,0($tail)
+ lw $rk5,4($tail)
+ lw $rk6,8($tail)
+ lw $rk7,12($tail)
+ sw $rk0,0($tail)
+ sw $rk1,4($tail)
+ sw $rk2,8($tail)
+ sw $rk3,12($tail)
+ $PTR_ADD $head,16
+ $PTR_SUB $tail,16
+ sw $rk4,-16($head)
+ sw $rk5,-12($head)
+ sw $rk6,-8($head)
+ sw $rk7,-4($head)
+ bne $head,$tail,.Lswap
+
+ lw $tp1,16($key) # modulo-scheduled
+ lui $x80808080,0x8080
+ sub $cnt,1
+ or $x80808080,0x8080
+ sll $cnt,2
+ $PTR_ADD $key,16
+ lui $x1b1b1b1b,0x1b1b
+ nor $x7f7f7f7f,$zero,$x80808080
+ or $x1b1b1b1b,0x1b1b
+.align 4
+.Lmix:
+ and $m,$tp1,$x80808080
+ and $tp2,$tp1,$x7f7f7f7f
+ srl $tp4,$m,7
+ addu $tp2,$tp2 # tp2<<1
+ subu $m,$tp4
+ and $m,$x1b1b1b1b
+ xor $tp2,$m
+
+ and $m,$tp2,$x80808080
+ and $tp4,$tp2,$x7f7f7f7f
+ srl $tp8,$m,7
+ addu $tp4,$tp4 # tp4<<1
+ subu $m,$tp8
+ and $m,$x1b1b1b1b
+ xor $tp4,$m
+
+ and $m,$tp4,$x80808080
+ and $tp8,$tp4,$x7f7f7f7f
+ srl $tp9,$m,7
+ addu $tp8,$tp8 # tp8<<1
+ subu $m,$tp9
+ and $m,$x1b1b1b1b
+ xor $tp8,$m
+
+ xor $tp9,$tp8,$tp1
+ xor $tpe,$tp8,$tp4
+ xor $tpb,$tp9,$tp2
+ xor $tpd,$tp9,$tp4
+
+ _ror $tp1,$tpd,16
+ xor $tpe,$tp2
+ _ror $tp2,$tpd,-16
+ xor $tpe,$tp1
+ _ror $tp1,$tp9,8
+ xor $tpe,$tp2
+ _ror $tp2,$tp9,-24
+ xor $tpe,$tp1
+ _ror $tp1,$tpb,24
+ xor $tpe,$tp2
+ _ror $tp2,$tpb,-8
+ xor $tpe,$tp1
+ lw $tp1,4($key) # modulo-scheduled
+ xor $tpe,$tp2
+ sub $cnt,1
+ sw $tpe,0($key)
+ $PTR_ADD $key,4
+ bnez $cnt,.Lmix
+
+ li $t0,0
+.Ldkey_done:
+ .set noreorder
+ move $a0,$t0
+ $REG_L $ra,$FRAMESIZE-1*$SZREG($sp)
+ $REG_L $fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s3,$FRAMESIZE-11*$SZREG($sp)
+ $REG_L $s2,$FRAMESIZE-12*$SZREG($sp)
+ $REG_L $s1,$FRAMESIZE-13*$SZREG($sp)
+ $REG_L $s0,$FRAMESIZE-14*$SZREG($sp)
+ $REG_L $gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+ jr $ra
+ $PTR_ADD $sp,$FRAMESIZE
+.end AES_set_decrypt_key
+___
+}}}
+
+######################################################################
+# Tables are kept in endian-neutral manner
+$code.=<<___;
+.rdata
+.align 6
+AES_Te:
+.byte 0xc6,0x63,0x63,0xa5, 0xf8,0x7c,0x7c,0x84 # Te0
+.byte 0xee,0x77,0x77,0x99, 0xf6,0x7b,0x7b,0x8d
+.byte 0xff,0xf2,0xf2,0x0d, 0xd6,0x6b,0x6b,0xbd
+.byte 0xde,0x6f,0x6f,0xb1, 0x91,0xc5,0xc5,0x54
+.byte 0x60,0x30,0x30,0x50, 0x02,0x01,0x01,0x03
+.byte 0xce,0x67,0x67,0xa9, 0x56,0x2b,0x2b,0x7d
+.byte 0xe7,0xfe,0xfe,0x19, 0xb5,0xd7,0xd7,0x62
+.byte 0x4d,0xab,0xab,0xe6, 0xec,0x76,0x76,0x9a
+.byte 0x8f,0xca,0xca,0x45, 0x1f,0x82,0x82,0x9d
+.byte 0x89,0xc9,0xc9,0x40, 0xfa,0x7d,0x7d,0x87
+.byte 0xef,0xfa,0xfa,0x15, 0xb2,0x59,0x59,0xeb
+.byte 0x8e,0x47,0x47,0xc9, 0xfb,0xf0,0xf0,0x0b
+.byte 0x41,0xad,0xad,0xec, 0xb3,0xd4,0xd4,0x67
+.byte 0x5f,0xa2,0xa2,0xfd, 0x45,0xaf,0xaf,0xea
+.byte 0x23,0x9c,0x9c,0xbf, 0x53,0xa4,0xa4,0xf7
+.byte 0xe4,0x72,0x72,0x96, 0x9b,0xc0,0xc0,0x5b
+.byte 0x75,0xb7,0xb7,0xc2, 0xe1,0xfd,0xfd,0x1c
+.byte 0x3d,0x93,0x93,0xae, 0x4c,0x26,0x26,0x6a
+.byte 0x6c,0x36,0x36,0x5a, 0x7e,0x3f,0x3f,0x41
+.byte 0xf5,0xf7,0xf7,0x02, 0x83,0xcc,0xcc,0x4f
+.byte 0x68,0x34,0x34,0x5c, 0x51,0xa5,0xa5,0xf4
+.byte 0xd1,0xe5,0xe5,0x34, 0xf9,0xf1,0xf1,0x08
+.byte 0xe2,0x71,0x71,0x93, 0xab,0xd8,0xd8,0x73
+.byte 0x62,0x31,0x31,0x53, 0x2a,0x15,0x15,0x3f
+.byte 0x08,0x04,0x04,0x0c, 0x95,0xc7,0xc7,0x52
+.byte 0x46,0x23,0x23,0x65, 0x9d,0xc3,0xc3,0x5e
+.byte 0x30,0x18,0x18,0x28, 0x37,0x96,0x96,0xa1
+.byte 0x0a,0x05,0x05,0x0f, 0x2f,0x9a,0x9a,0xb5
+.byte 0x0e,0x07,0x07,0x09, 0x24,0x12,0x12,0x36
+.byte 0x1b,0x80,0x80,0x9b, 0xdf,0xe2,0xe2,0x3d
+.byte 0xcd,0xeb,0xeb,0x26, 0x4e,0x27,0x27,0x69
+.byte 0x7f,0xb2,0xb2,0xcd, 0xea,0x75,0x75,0x9f
+.byte 0x12,0x09,0x09,0x1b, 0x1d,0x83,0x83,0x9e
+.byte 0x58,0x2c,0x2c,0x74, 0x34,0x1a,0x1a,0x2e
+.byte 0x36,0x1b,0x1b,0x2d, 0xdc,0x6e,0x6e,0xb2
+.byte 0xb4,0x5a,0x5a,0xee, 0x5b,0xa0,0xa0,0xfb
+.byte 0xa4,0x52,0x52,0xf6, 0x76,0x3b,0x3b,0x4d
+.byte 0xb7,0xd6,0xd6,0x61, 0x7d,0xb3,0xb3,0xce
+.byte 0x52,0x29,0x29,0x7b, 0xdd,0xe3,0xe3,0x3e
+.byte 0x5e,0x2f,0x2f,0x71, 0x13,0x84,0x84,0x97
+.byte 0xa6,0x53,0x53,0xf5, 0xb9,0xd1,0xd1,0x68
+.byte 0x00,0x00,0x00,0x00, 0xc1,0xed,0xed,0x2c
+.byte 0x40,0x20,0x20,0x60, 0xe3,0xfc,0xfc,0x1f
+.byte 0x79,0xb1,0xb1,0xc8, 0xb6,0x5b,0x5b,0xed
+.byte 0xd4,0x6a,0x6a,0xbe, 0x8d,0xcb,0xcb,0x46
+.byte 0x67,0xbe,0xbe,0xd9, 0x72,0x39,0x39,0x4b
+.byte 0x94,0x4a,0x4a,0xde, 0x98,0x4c,0x4c,0xd4
+.byte 0xb0,0x58,0x58,0xe8, 0x85,0xcf,0xcf,0x4a
+.byte 0xbb,0xd0,0xd0,0x6b, 0xc5,0xef,0xef,0x2a
+.byte 0x4f,0xaa,0xaa,0xe5, 0xed,0xfb,0xfb,0x16
+.byte 0x86,0x43,0x43,0xc5, 0x9a,0x4d,0x4d,0xd7
+.byte 0x66,0x33,0x33,0x55, 0x11,0x85,0x85,0x94
+.byte 0x8a,0x45,0x45,0xcf, 0xe9,0xf9,0xf9,0x10
+.byte 0x04,0x02,0x02,0x06, 0xfe,0x7f,0x7f,0x81
+.byte 0xa0,0x50,0x50,0xf0, 0x78,0x3c,0x3c,0x44
+.byte 0x25,0x9f,0x9f,0xba, 0x4b,0xa8,0xa8,0xe3
+.byte 0xa2,0x51,0x51,0xf3, 0x5d,0xa3,0xa3,0xfe
+.byte 0x80,0x40,0x40,0xc0, 0x05,0x8f,0x8f,0x8a
+.byte 0x3f,0x92,0x92,0xad, 0x21,0x9d,0x9d,0xbc
+.byte 0x70,0x38,0x38,0x48, 0xf1,0xf5,0xf5,0x04
+.byte 0x63,0xbc,0xbc,0xdf, 0x77,0xb6,0xb6,0xc1
+.byte 0xaf,0xda,0xda,0x75, 0x42,0x21,0x21,0x63
+.byte 0x20,0x10,0x10,0x30, 0xe5,0xff,0xff,0x1a
+.byte 0xfd,0xf3,0xf3,0x0e, 0xbf,0xd2,0xd2,0x6d
+.byte 0x81,0xcd,0xcd,0x4c, 0x18,0x0c,0x0c,0x14
+.byte 0x26,0x13,0x13,0x35, 0xc3,0xec,0xec,0x2f
+.byte 0xbe,0x5f,0x5f,0xe1, 0x35,0x97,0x97,0xa2
+.byte 0x88,0x44,0x44,0xcc, 0x2e,0x17,0x17,0x39
+.byte 0x93,0xc4,0xc4,0x57, 0x55,0xa7,0xa7,0xf2
+.byte 0xfc,0x7e,0x7e,0x82, 0x7a,0x3d,0x3d,0x47
+.byte 0xc8,0x64,0x64,0xac, 0xba,0x5d,0x5d,0xe7
+.byte 0x32,0x19,0x19,0x2b, 0xe6,0x73,0x73,0x95
+.byte 0xc0,0x60,0x60,0xa0, 0x19,0x81,0x81,0x98
+.byte 0x9e,0x4f,0x4f,0xd1, 0xa3,0xdc,0xdc,0x7f
+.byte 0x44,0x22,0x22,0x66, 0x54,0x2a,0x2a,0x7e
+.byte 0x3b,0x90,0x90,0xab, 0x0b,0x88,0x88,0x83
+.byte 0x8c,0x46,0x46,0xca, 0xc7,0xee,0xee,0x29
+.byte 0x6b,0xb8,0xb8,0xd3, 0x28,0x14,0x14,0x3c
+.byte 0xa7,0xde,0xde,0x79, 0xbc,0x5e,0x5e,0xe2
+.byte 0x16,0x0b,0x0b,0x1d, 0xad,0xdb,0xdb,0x76
+.byte 0xdb,0xe0,0xe0,0x3b, 0x64,0x32,0x32,0x56
+.byte 0x74,0x3a,0x3a,0x4e, 0x14,0x0a,0x0a,0x1e
+.byte 0x92,0x49,0x49,0xdb, 0x0c,0x06,0x06,0x0a
+.byte 0x48,0x24,0x24,0x6c, 0xb8,0x5c,0x5c,0xe4
+.byte 0x9f,0xc2,0xc2,0x5d, 0xbd,0xd3,0xd3,0x6e
+.byte 0x43,0xac,0xac,0xef, 0xc4,0x62,0x62,0xa6
+.byte 0x39,0x91,0x91,0xa8, 0x31,0x95,0x95,0xa4
+.byte 0xd3,0xe4,0xe4,0x37, 0xf2,0x79,0x79,0x8b
+.byte 0xd5,0xe7,0xe7,0x32, 0x8b,0xc8,0xc8,0x43
+.byte 0x6e,0x37,0x37,0x59, 0xda,0x6d,0x6d,0xb7
+.byte 0x01,0x8d,0x8d,0x8c, 0xb1,0xd5,0xd5,0x64
+.byte 0x9c,0x4e,0x4e,0xd2, 0x49,0xa9,0xa9,0xe0
+.byte 0xd8,0x6c,0x6c,0xb4, 0xac,0x56,0x56,0xfa
+.byte 0xf3,0xf4,0xf4,0x07, 0xcf,0xea,0xea,0x25
+.byte 0xca,0x65,0x65,0xaf, 0xf4,0x7a,0x7a,0x8e
+.byte 0x47,0xae,0xae,0xe9, 0x10,0x08,0x08,0x18
+.byte 0x6f,0xba,0xba,0xd5, 0xf0,0x78,0x78,0x88
+.byte 0x4a,0x25,0x25,0x6f, 0x5c,0x2e,0x2e,0x72
+.byte 0x38,0x1c,0x1c,0x24, 0x57,0xa6,0xa6,0xf1
+.byte 0x73,0xb4,0xb4,0xc7, 0x97,0xc6,0xc6,0x51
+.byte 0xcb,0xe8,0xe8,0x23, 0xa1,0xdd,0xdd,0x7c
+.byte 0xe8,0x74,0x74,0x9c, 0x3e,0x1f,0x1f,0x21
+.byte 0x96,0x4b,0x4b,0xdd, 0x61,0xbd,0xbd,0xdc
+.byte 0x0d,0x8b,0x8b,0x86, 0x0f,0x8a,0x8a,0x85
+.byte 0xe0,0x70,0x70,0x90, 0x7c,0x3e,0x3e,0x42
+.byte 0x71,0xb5,0xb5,0xc4, 0xcc,0x66,0x66,0xaa
+.byte 0x90,0x48,0x48,0xd8, 0x06,0x03,0x03,0x05
+.byte 0xf7,0xf6,0xf6,0x01, 0x1c,0x0e,0x0e,0x12
+.byte 0xc2,0x61,0x61,0xa3, 0x6a,0x35,0x35,0x5f
+.byte 0xae,0x57,0x57,0xf9, 0x69,0xb9,0xb9,0xd0
+.byte 0x17,0x86,0x86,0x91, 0x99,0xc1,0xc1,0x58
+.byte 0x3a,0x1d,0x1d,0x27, 0x27,0x9e,0x9e,0xb9
+.byte 0xd9,0xe1,0xe1,0x38, 0xeb,0xf8,0xf8,0x13
+.byte 0x2b,0x98,0x98,0xb3, 0x22,0x11,0x11,0x33
+.byte 0xd2,0x69,0x69,0xbb, 0xa9,0xd9,0xd9,0x70
+.byte 0x07,0x8e,0x8e,0x89, 0x33,0x94,0x94,0xa7
+.byte 0x2d,0x9b,0x9b,0xb6, 0x3c,0x1e,0x1e,0x22
+.byte 0x15,0x87,0x87,0x92, 0xc9,0xe9,0xe9,0x20
+.byte 0x87,0xce,0xce,0x49, 0xaa,0x55,0x55,0xff
+.byte 0x50,0x28,0x28,0x78, 0xa5,0xdf,0xdf,0x7a
+.byte 0x03,0x8c,0x8c,0x8f, 0x59,0xa1,0xa1,0xf8
+.byte 0x09,0x89,0x89,0x80, 0x1a,0x0d,0x0d,0x17
+.byte 0x65,0xbf,0xbf,0xda, 0xd7,0xe6,0xe6,0x31
+.byte 0x84,0x42,0x42,0xc6, 0xd0,0x68,0x68,0xb8
+.byte 0x82,0x41,0x41,0xc3, 0x29,0x99,0x99,0xb0
+.byte 0x5a,0x2d,0x2d,0x77, 0x1e,0x0f,0x0f,0x11
+.byte 0x7b,0xb0,0xb0,0xcb, 0xa8,0x54,0x54,0xfc
+.byte 0x6d,0xbb,0xbb,0xd6, 0x2c,0x16,0x16,0x3a
+
+.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 # Te4
+.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+
+.byte 0x01,0x00,0x00,0x00, 0x02,0x00,0x00,0x00 # rcon
+.byte 0x04,0x00,0x00,0x00, 0x08,0x00,0x00,0x00
+.byte 0x10,0x00,0x00,0x00, 0x20,0x00,0x00,0x00
+.byte 0x40,0x00,0x00,0x00, 0x80,0x00,0x00,0x00
+.byte 0x1B,0x00,0x00,0x00, 0x36,0x00,0x00,0x00
+
+.align 6
+AES_Td:
+.byte 0x51,0xf4,0xa7,0x50, 0x7e,0x41,0x65,0x53 # Td0
+.byte 0x1a,0x17,0xa4,0xc3, 0x3a,0x27,0x5e,0x96
+.byte 0x3b,0xab,0x6b,0xcb, 0x1f,0x9d,0x45,0xf1
+.byte 0xac,0xfa,0x58,0xab, 0x4b,0xe3,0x03,0x93
+.byte 0x20,0x30,0xfa,0x55, 0xad,0x76,0x6d,0xf6
+.byte 0x88,0xcc,0x76,0x91, 0xf5,0x02,0x4c,0x25
+.byte 0x4f,0xe5,0xd7,0xfc, 0xc5,0x2a,0xcb,0xd7
+.byte 0x26,0x35,0x44,0x80, 0xb5,0x62,0xa3,0x8f
+.byte 0xde,0xb1,0x5a,0x49, 0x25,0xba,0x1b,0x67
+.byte 0x45,0xea,0x0e,0x98, 0x5d,0xfe,0xc0,0xe1
+.byte 0xc3,0x2f,0x75,0x02, 0x81,0x4c,0xf0,0x12
+.byte 0x8d,0x46,0x97,0xa3, 0x6b,0xd3,0xf9,0xc6
+.byte 0x03,0x8f,0x5f,0xe7, 0x15,0x92,0x9c,0x95
+.byte 0xbf,0x6d,0x7a,0xeb, 0x95,0x52,0x59,0xda
+.byte 0xd4,0xbe,0x83,0x2d, 0x58,0x74,0x21,0xd3
+.byte 0x49,0xe0,0x69,0x29, 0x8e,0xc9,0xc8,0x44
+.byte 0x75,0xc2,0x89,0x6a, 0xf4,0x8e,0x79,0x78
+.byte 0x99,0x58,0x3e,0x6b, 0x27,0xb9,0x71,0xdd
+.byte 0xbe,0xe1,0x4f,0xb6, 0xf0,0x88,0xad,0x17
+.byte 0xc9,0x20,0xac,0x66, 0x7d,0xce,0x3a,0xb4
+.byte 0x63,0xdf,0x4a,0x18, 0xe5,0x1a,0x31,0x82
+.byte 0x97,0x51,0x33,0x60, 0x62,0x53,0x7f,0x45
+.byte 0xb1,0x64,0x77,0xe0, 0xbb,0x6b,0xae,0x84
+.byte 0xfe,0x81,0xa0,0x1c, 0xf9,0x08,0x2b,0x94
+.byte 0x70,0x48,0x68,0x58, 0x8f,0x45,0xfd,0x19
+.byte 0x94,0xde,0x6c,0x87, 0x52,0x7b,0xf8,0xb7
+.byte 0xab,0x73,0xd3,0x23, 0x72,0x4b,0x02,0xe2
+.byte 0xe3,0x1f,0x8f,0x57, 0x66,0x55,0xab,0x2a
+.byte 0xb2,0xeb,0x28,0x07, 0x2f,0xb5,0xc2,0x03
+.byte 0x86,0xc5,0x7b,0x9a, 0xd3,0x37,0x08,0xa5
+.byte 0x30,0x28,0x87,0xf2, 0x23,0xbf,0xa5,0xb2
+.byte 0x02,0x03,0x6a,0xba, 0xed,0x16,0x82,0x5c
+.byte 0x8a,0xcf,0x1c,0x2b, 0xa7,0x79,0xb4,0x92
+.byte 0xf3,0x07,0xf2,0xf0, 0x4e,0x69,0xe2,0xa1
+.byte 0x65,0xda,0xf4,0xcd, 0x06,0x05,0xbe,0xd5
+.byte 0xd1,0x34,0x62,0x1f, 0xc4,0xa6,0xfe,0x8a
+.byte 0x34,0x2e,0x53,0x9d, 0xa2,0xf3,0x55,0xa0
+.byte 0x05,0x8a,0xe1,0x32, 0xa4,0xf6,0xeb,0x75
+.byte 0x0b,0x83,0xec,0x39, 0x40,0x60,0xef,0xaa
+.byte 0x5e,0x71,0x9f,0x06, 0xbd,0x6e,0x10,0x51
+.byte 0x3e,0x21,0x8a,0xf9, 0x96,0xdd,0x06,0x3d
+.byte 0xdd,0x3e,0x05,0xae, 0x4d,0xe6,0xbd,0x46
+.byte 0x91,0x54,0x8d,0xb5, 0x71,0xc4,0x5d,0x05
+.byte 0x04,0x06,0xd4,0x6f, 0x60,0x50,0x15,0xff
+.byte 0x19,0x98,0xfb,0x24, 0xd6,0xbd,0xe9,0x97
+.byte 0x89,0x40,0x43,0xcc, 0x67,0xd9,0x9e,0x77
+.byte 0xb0,0xe8,0x42,0xbd, 0x07,0x89,0x8b,0x88
+.byte 0xe7,0x19,0x5b,0x38, 0x79,0xc8,0xee,0xdb
+.byte 0xa1,0x7c,0x0a,0x47, 0x7c,0x42,0x0f,0xe9
+.byte 0xf8,0x84,0x1e,0xc9, 0x00,0x00,0x00,0x00
+.byte 0x09,0x80,0x86,0x83, 0x32,0x2b,0xed,0x48
+.byte 0x1e,0x11,0x70,0xac, 0x6c,0x5a,0x72,0x4e
+.byte 0xfd,0x0e,0xff,0xfb, 0x0f,0x85,0x38,0x56
+.byte 0x3d,0xae,0xd5,0x1e, 0x36,0x2d,0x39,0x27
+.byte 0x0a,0x0f,0xd9,0x64, 0x68,0x5c,0xa6,0x21
+.byte 0x9b,0x5b,0x54,0xd1, 0x24,0x36,0x2e,0x3a
+.byte 0x0c,0x0a,0x67,0xb1, 0x93,0x57,0xe7,0x0f
+.byte 0xb4,0xee,0x96,0xd2, 0x1b,0x9b,0x91,0x9e
+.byte 0x80,0xc0,0xc5,0x4f, 0x61,0xdc,0x20,0xa2
+.byte 0x5a,0x77,0x4b,0x69, 0x1c,0x12,0x1a,0x16
+.byte 0xe2,0x93,0xba,0x0a, 0xc0,0xa0,0x2a,0xe5
+.byte 0x3c,0x22,0xe0,0x43, 0x12,0x1b,0x17,0x1d
+.byte 0x0e,0x09,0x0d,0x0b, 0xf2,0x8b,0xc7,0xad
+.byte 0x2d,0xb6,0xa8,0xb9, 0x14,0x1e,0xa9,0xc8
+.byte 0x57,0xf1,0x19,0x85, 0xaf,0x75,0x07,0x4c
+.byte 0xee,0x99,0xdd,0xbb, 0xa3,0x7f,0x60,0xfd
+.byte 0xf7,0x01,0x26,0x9f, 0x5c,0x72,0xf5,0xbc
+.byte 0x44,0x66,0x3b,0xc5, 0x5b,0xfb,0x7e,0x34
+.byte 0x8b,0x43,0x29,0x76, 0xcb,0x23,0xc6,0xdc
+.byte 0xb6,0xed,0xfc,0x68, 0xb8,0xe4,0xf1,0x63
+.byte 0xd7,0x31,0xdc,0xca, 0x42,0x63,0x85,0x10
+.byte 0x13,0x97,0x22,0x40, 0x84,0xc6,0x11,0x20
+.byte 0x85,0x4a,0x24,0x7d, 0xd2,0xbb,0x3d,0xf8
+.byte 0xae,0xf9,0x32,0x11, 0xc7,0x29,0xa1,0x6d
+.byte 0x1d,0x9e,0x2f,0x4b, 0xdc,0xb2,0x30,0xf3
+.byte 0x0d,0x86,0x52,0xec, 0x77,0xc1,0xe3,0xd0
+.byte 0x2b,0xb3,0x16,0x6c, 0xa9,0x70,0xb9,0x99
+.byte 0x11,0x94,0x48,0xfa, 0x47,0xe9,0x64,0x22
+.byte 0xa8,0xfc,0x8c,0xc4, 0xa0,0xf0,0x3f,0x1a
+.byte 0x56,0x7d,0x2c,0xd8, 0x22,0x33,0x90,0xef
+.byte 0x87,0x49,0x4e,0xc7, 0xd9,0x38,0xd1,0xc1
+.byte 0x8c,0xca,0xa2,0xfe, 0x98,0xd4,0x0b,0x36
+.byte 0xa6,0xf5,0x81,0xcf, 0xa5,0x7a,0xde,0x28
+.byte 0xda,0xb7,0x8e,0x26, 0x3f,0xad,0xbf,0xa4
+.byte 0x2c,0x3a,0x9d,0xe4, 0x50,0x78,0x92,0x0d
+.byte 0x6a,0x5f,0xcc,0x9b, 0x54,0x7e,0x46,0x62
+.byte 0xf6,0x8d,0x13,0xc2, 0x90,0xd8,0xb8,0xe8
+.byte 0x2e,0x39,0xf7,0x5e, 0x82,0xc3,0xaf,0xf5
+.byte 0x9f,0x5d,0x80,0xbe, 0x69,0xd0,0x93,0x7c
+.byte 0x6f,0xd5,0x2d,0xa9, 0xcf,0x25,0x12,0xb3
+.byte 0xc8,0xac,0x99,0x3b, 0x10,0x18,0x7d,0xa7
+.byte 0xe8,0x9c,0x63,0x6e, 0xdb,0x3b,0xbb,0x7b
+.byte 0xcd,0x26,0x78,0x09, 0x6e,0x59,0x18,0xf4
+.byte 0xec,0x9a,0xb7,0x01, 0x83,0x4f,0x9a,0xa8
+.byte 0xe6,0x95,0x6e,0x65, 0xaa,0xff,0xe6,0x7e
+.byte 0x21,0xbc,0xcf,0x08, 0xef,0x15,0xe8,0xe6
+.byte 0xba,0xe7,0x9b,0xd9, 0x4a,0x6f,0x36,0xce
+.byte 0xea,0x9f,0x09,0xd4, 0x29,0xb0,0x7c,0xd6
+.byte 0x31,0xa4,0xb2,0xaf, 0x2a,0x3f,0x23,0x31
+.byte 0xc6,0xa5,0x94,0x30, 0x35,0xa2,0x66,0xc0
+.byte 0x74,0x4e,0xbc,0x37, 0xfc,0x82,0xca,0xa6
+.byte 0xe0,0x90,0xd0,0xb0, 0x33,0xa7,0xd8,0x15
+.byte 0xf1,0x04,0x98,0x4a, 0x41,0xec,0xda,0xf7
+.byte 0x7f,0xcd,0x50,0x0e, 0x17,0x91,0xf6,0x2f
+.byte 0x76,0x4d,0xd6,0x8d, 0x43,0xef,0xb0,0x4d
+.byte 0xcc,0xaa,0x4d,0x54, 0xe4,0x96,0x04,0xdf
+.byte 0x9e,0xd1,0xb5,0xe3, 0x4c,0x6a,0x88,0x1b
+.byte 0xc1,0x2c,0x1f,0xb8, 0x46,0x65,0x51,0x7f
+.byte 0x9d,0x5e,0xea,0x04, 0x01,0x8c,0x35,0x5d
+.byte 0xfa,0x87,0x74,0x73, 0xfb,0x0b,0x41,0x2e
+.byte 0xb3,0x67,0x1d,0x5a, 0x92,0xdb,0xd2,0x52
+.byte 0xe9,0x10,0x56,0x33, 0x6d,0xd6,0x47,0x13
+.byte 0x9a,0xd7,0x61,0x8c, 0x37,0xa1,0x0c,0x7a
+.byte 0x59,0xf8,0x14,0x8e, 0xeb,0x13,0x3c,0x89
+.byte 0xce,0xa9,0x27,0xee, 0xb7,0x61,0xc9,0x35
+.byte 0xe1,0x1c,0xe5,0xed, 0x7a,0x47,0xb1,0x3c
+.byte 0x9c,0xd2,0xdf,0x59, 0x55,0xf2,0x73,0x3f
+.byte 0x18,0x14,0xce,0x79, 0x73,0xc7,0x37,0xbf
+.byte 0x53,0xf7,0xcd,0xea, 0x5f,0xfd,0xaa,0x5b
+.byte 0xdf,0x3d,0x6f,0x14, 0x78,0x44,0xdb,0x86
+.byte 0xca,0xaf,0xf3,0x81, 0xb9,0x68,0xc4,0x3e
+.byte 0x38,0x24,0x34,0x2c, 0xc2,0xa3,0x40,0x5f
+.byte 0x16,0x1d,0xc3,0x72, 0xbc,0xe2,0x25,0x0c
+.byte 0x28,0x3c,0x49,0x8b, 0xff,0x0d,0x95,0x41
+.byte 0x39,0xa8,0x01,0x71, 0x08,0x0c,0xb3,0xde
+.byte 0xd8,0xb4,0xe4,0x9c, 0x64,0x56,0xc1,0x90
+.byte 0x7b,0xcb,0x84,0x61, 0xd5,0x32,0xb6,0x70
+.byte 0x48,0x6c,0x5c,0x74, 0xd0,0xb8,0x57,0x42
+
+.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 # Td4
+.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ # made-up _instructions, _xtr, _ins, _ror and _bias, cope
+ # with byte order dependencies...
+ if (/^\s+_/) {
+ s/(_[a-z]+\s+)(\$[0-9]+),([^,]+)(#.*)*$/$1$2,$2,$3/;
+
+ s/_xtr\s+(\$[0-9]+),(\$[0-9]+),([0-9]+(\-2)*)/
+ sprintf("srl\t$1,$2,%d",$big_endian ? eval($3)
+ : eval("24-$3"))/e or
+ s/_ins\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
+ sprintf("sll\t$1,$2,%d",$big_endian ? eval($3)
+ : eval("24-$3"))/e or
+ s/_ror\s+(\$[0-9]+),(\$[0-9]+),(\-?[0-9]+)/
+ sprintf("srl\t$1,$2,%d",$big_endian ? eval($3)
+ : eval("$3*-1"))/e or
+ s/_bias\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
+ sprintf("sll\t$1,$2,%d",$big_endian ? eval($3)
+ : eval("($3-16)&31"))/e;
+
+ s/srl\s+(\$[0-9]+),(\$[0-9]+),\-([0-9]+)/
+ sprintf("sll\t$1,$2,$3")/e or
+ s/srl\s+(\$[0-9]+),(\$[0-9]+),0/
+ sprintf("and\t$1,$2,0xff")/e or
+ s/(sll\s+\$[0-9]+,\$[0-9]+,0)/#$1/;
+ }
+
+ # convert lwl/lwr and swr/swl to little-endian order
+ if (!$big_endian && /^\s+[sl]w[lr]\s+/) {
+ s/([sl]wl.*)([0-9]+)\((\$[0-9]+)\)/
+ sprintf("$1%d($3)",eval("$2-$2%4+($2%4-1)&3"))/e or
+ s/([sl]wr.*)([0-9]+)\((\$[0-9]+)\)/
+ sprintf("$1%d($3)",eval("$2-$2%4+($2%4+1)&3"))/e;
+ }
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/openssl/crypto/aes/asm/aes-parisc.pl b/openssl/crypto/aes/asm/aes-parisc.pl
new file mode 100644
index 000000000..c36b6a227
--- /dev/null
+++ b/openssl/crypto/aes/asm/aes-parisc.pl
@@ -0,0 +1,1021 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for PA-RISC.
+#
+# June 2009.
+#
+# The module is mechanical transliteration of aes-sparcv9.pl, but with
+# a twist: S-boxes are compressed even further down to 1K+256B. On
+# PA-7100LC performance is ~40% better than gcc 3.2 generated code and
+# is about 33 cycles per byte processed with 128-bit key. Newer CPUs
+# perform at 16 cycles per byte. It's not faster than code generated
+# by vendor compiler, but recall that it has compressed S-boxes, which
+# requires extra processing.
+#
+# Special thanks to polarhome.com for providing HP-UX account.
+
+$flavour = shift;
+$output = shift;
+open STDOUT,">$output";
+
+if ($flavour =~ /64/) {
+ $LEVEL ="2.0W";
+ $SIZE_T =8;
+ $FRAME_MARKER =80;
+ $SAVED_RP =16;
+ $PUSH ="std";
+ $PUSHMA ="std,ma";
+ $POP ="ldd";
+ $POPMB ="ldd,mb";
+} else {
+ $LEVEL ="1.0";
+ $SIZE_T =4;
+ $FRAME_MARKER =48;
+ $SAVED_RP =20;
+ $PUSH ="stw";
+ $PUSHMA ="stwm";
+ $POP ="ldw";
+ $POPMB ="ldwm";
+}
+
+$FRAME=16*$SIZE_T+$FRAME_MARKER;# 16 saved regs + frame marker
+ # [+ argument transfer]
+$inp="%r26"; # arg0
+$out="%r25"; # arg1
+$key="%r24"; # arg2
+
+($s0,$s1,$s2,$s3) = ("%r1","%r2","%r3","%r4");
+($t0,$t1,$t2,$t3) = ("%r5","%r6","%r7","%r8");
+
+($acc0, $acc1, $acc2, $acc3, $acc4, $acc5, $acc6, $acc7,
+ $acc8, $acc9,$acc10,$acc11,$acc12,$acc13,$acc14,$acc15) =
+("%r9","%r10","%r11","%r12","%r13","%r14","%r15","%r16",
+"%r17","%r18","%r19","%r20","%r21","%r22","%r23","%r26");
+
+$tbl="%r28";
+$rounds="%r29";
+
+$code=<<___;
+ .LEVEL $LEVEL
+ .SPACE \$TEXT\$
+ .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
+
+ .EXPORT AES_encrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
+ .ALIGN 64
+AES_encrypt
+ .PROC
+ .CALLINFO FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
+ .ENTRY
+ $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
+ $PUSHMA %r3,$FRAME(%sp)
+ $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
+ $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
+ $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
+ $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
+ $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
+ $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
+ $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
+ $PUSH %r11,`-$FRAME+8*$SIZE_T`(%sp)
+ $PUSH %r12,`-$FRAME+9*$SIZE_T`(%sp)
+ $PUSH %r13,`-$FRAME+10*$SIZE_T`(%sp)
+ $PUSH %r14,`-$FRAME+11*$SIZE_T`(%sp)
+ $PUSH %r15,`-$FRAME+12*$SIZE_T`(%sp)
+ $PUSH %r16,`-$FRAME+13*$SIZE_T`(%sp)
+ $PUSH %r17,`-$FRAME+14*$SIZE_T`(%sp)
+ $PUSH %r18,`-$FRAME+15*$SIZE_T`(%sp)
+
+ blr %r0,$tbl
+ ldi 3,$t0
+L\$enc_pic
+ andcm $tbl,$t0,$tbl
+ ldo L\$AES_Te-L\$enc_pic($tbl),$tbl
+
+ and $inp,$t0,$t0
+ sub $inp,$t0,$inp
+ ldw 0($inp),$s0
+ ldw 4($inp),$s1
+ ldw 8($inp),$s2
+ comib,= 0,$t0,L\$enc_inp_aligned
+ ldw 12($inp),$s3
+
+ sh3addl $t0,%r0,$t0
+ subi 32,$t0,$t0
+ mtctl $t0,%cr11
+ ldw 16($inp),$t1
+ vshd $s0,$s1,$s0
+ vshd $s1,$s2,$s1
+ vshd $s2,$s3,$s2
+ vshd $s3,$t1,$s3
+
+L\$enc_inp_aligned
+ bl _parisc_AES_encrypt,%r31
+ nop
+
+ extru,<> $out,31,2,%r0
+ b L\$enc_out_aligned
+ nop
+
+ _srm $s0,24,$acc0
+ _srm $s0,16,$acc1
+ stb $acc0,0($out)
+ _srm $s0,8,$acc2
+ stb $acc1,1($out)
+ _srm $s1,24,$acc4
+ stb $acc2,2($out)
+ _srm $s1,16,$acc5
+ stb $s0,3($out)
+ _srm $s1,8,$acc6
+ stb $acc4,4($out)
+ _srm $s2,24,$acc0
+ stb $acc5,5($out)
+ _srm $s2,16,$acc1
+ stb $acc6,6($out)
+ _srm $s2,8,$acc2
+ stb $s1,7($out)
+ _srm $s3,24,$acc4
+ stb $acc0,8($out)
+ _srm $s3,16,$acc5
+ stb $acc1,9($out)
+ _srm $s3,8,$acc6
+ stb $acc2,10($out)
+ stb $s2,11($out)
+ stb $acc4,12($out)
+ stb $acc5,13($out)
+ stb $acc6,14($out)
+ b L\$enc_done
+ stb $s3,15($out)
+
+L\$enc_out_aligned
+ stw $s0,0($out)
+ stw $s1,4($out)
+ stw $s2,8($out)
+ stw $s3,12($out)
+
+L\$enc_done
+ $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
+ $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
+ $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
+ $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
+ $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
+ $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
+ $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
+ $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
+ $POP `-$FRAME+8*$SIZE_T`(%sp),%r11
+ $POP `-$FRAME+9*$SIZE_T`(%sp),%r12
+ $POP `-$FRAME+10*$SIZE_T`(%sp),%r13
+ $POP `-$FRAME+11*$SIZE_T`(%sp),%r14
+ $POP `-$FRAME+12*$SIZE_T`(%sp),%r15
+ $POP `-$FRAME+13*$SIZE_T`(%sp),%r16
+ $POP `-$FRAME+14*$SIZE_T`(%sp),%r17
+ $POP `-$FRAME+15*$SIZE_T`(%sp),%r18
+ bv (%r2)
+ .EXIT
+ $POPMB -$FRAME(%sp),%r3
+ .PROCEND
+
+ .ALIGN 16
+_parisc_AES_encrypt
+ .PROC
+ .CALLINFO MILLICODE
+ .ENTRY
+ ldw 240($key),$rounds
+ ldw 0($key),$t0
+ ldw 4($key),$t1
+ ldw 8($key),$t2
+ _srm $rounds,1,$rounds
+ xor $t0,$s0,$s0
+ ldw 12($key),$t3
+ _srm $s0,24,$acc0
+ xor $t1,$s1,$s1
+ ldw 16($key),$t0
+ _srm $s1,16,$acc1
+ xor $t2,$s2,$s2
+ ldw 20($key),$t1
+ xor $t3,$s3,$s3
+ ldw 24($key),$t2
+ ldw 28($key),$t3
+L\$enc_loop
+ _srm $s2,8,$acc2
+ ldwx,s $acc0($tbl),$acc0
+ _srm $s3,0,$acc3
+ ldwx,s $acc1($tbl),$acc1
+ _srm $s1,24,$acc4
+ ldwx,s $acc2($tbl),$acc2
+ _srm $s2,16,$acc5
+ ldwx,s $acc3($tbl),$acc3
+ _srm $s3,8,$acc6
+ ldwx,s $acc4($tbl),$acc4
+ _srm $s0,0,$acc7
+ ldwx,s $acc5($tbl),$acc5
+ _srm $s2,24,$acc8
+ ldwx,s $acc6($tbl),$acc6
+ _srm $s3,16,$acc9
+ ldwx,s $acc7($tbl),$acc7
+ _srm $s0,8,$acc10
+ ldwx,s $acc8($tbl),$acc8
+ _srm $s1,0,$acc11
+ ldwx,s $acc9($tbl),$acc9
+ _srm $s3,24,$acc12
+ ldwx,s $acc10($tbl),$acc10
+ _srm $s0,16,$acc13
+ ldwx,s $acc11($tbl),$acc11
+ _srm $s1,8,$acc14
+ ldwx,s $acc12($tbl),$acc12
+ _srm $s2,0,$acc15
+ ldwx,s $acc13($tbl),$acc13
+ ldwx,s $acc14($tbl),$acc14
+ ldwx,s $acc15($tbl),$acc15
+ addib,= -1,$rounds,L\$enc_last
+ ldo 32($key),$key
+
+ _ror $acc1,8,$acc1
+ xor $acc0,$t0,$t0
+ ldw 0($key),$s0
+ _ror $acc2,16,$acc2
+ xor $acc1,$t0,$t0
+ ldw 4($key),$s1
+ _ror $acc3,24,$acc3
+ xor $acc2,$t0,$t0
+ ldw 8($key),$s2
+ _ror $acc5,8,$acc5
+ xor $acc3,$t0,$t0
+ ldw 12($key),$s3
+ _ror $acc6,16,$acc6
+ xor $acc4,$t1,$t1
+ _ror $acc7,24,$acc7
+ xor $acc5,$t1,$t1
+ _ror $acc9,8,$acc9
+ xor $acc6,$t1,$t1
+ _ror $acc10,16,$acc10
+ xor $acc7,$t1,$t1
+ _ror $acc11,24,$acc11
+ xor $acc8,$t2,$t2
+ _ror $acc13,8,$acc13
+ xor $acc9,$t2,$t2
+ _ror $acc14,16,$acc14
+ xor $acc10,$t2,$t2
+ _ror $acc15,24,$acc15
+ xor $acc11,$t2,$t2
+ xor $acc12,$acc14,$acc14
+ xor $acc13,$t3,$t3
+ _srm $t0,24,$acc0
+ xor $acc14,$t3,$t3
+ _srm $t1,16,$acc1
+ xor $acc15,$t3,$t3
+
+ _srm $t2,8,$acc2
+ ldwx,s $acc0($tbl),$acc0
+ _srm $t3,0,$acc3
+ ldwx,s $acc1($tbl),$acc1
+ _srm $t1,24,$acc4
+ ldwx,s $acc2($tbl),$acc2
+ _srm $t2,16,$acc5
+ ldwx,s $acc3($tbl),$acc3
+ _srm $t3,8,$acc6
+ ldwx,s $acc4($tbl),$acc4
+ _srm $t0,0,$acc7
+ ldwx,s $acc5($tbl),$acc5
+ _srm $t2,24,$acc8
+ ldwx,s $acc6($tbl),$acc6
+ _srm $t3,16,$acc9
+ ldwx,s $acc7($tbl),$acc7
+ _srm $t0,8,$acc10
+ ldwx,s $acc8($tbl),$acc8
+ _srm $t1,0,$acc11
+ ldwx,s $acc9($tbl),$acc9
+ _srm $t3,24,$acc12
+ ldwx,s $acc10($tbl),$acc10
+ _srm $t0,16,$acc13
+ ldwx,s $acc11($tbl),$acc11
+ _srm $t1,8,$acc14
+ ldwx,s $acc12($tbl),$acc12
+ _srm $t2,0,$acc15
+ ldwx,s $acc13($tbl),$acc13
+ _ror $acc1,8,$acc1
+ ldwx,s $acc14($tbl),$acc14
+
+ _ror $acc2,16,$acc2
+ xor $acc0,$s0,$s0
+ ldwx,s $acc15($tbl),$acc15
+ _ror $acc3,24,$acc3
+ xor $acc1,$s0,$s0
+ ldw 16($key),$t0
+ _ror $acc5,8,$acc5
+ xor $acc2,$s0,$s0
+ ldw 20($key),$t1
+ _ror $acc6,16,$acc6
+ xor $acc3,$s0,$s0
+ ldw 24($key),$t2
+ _ror $acc7,24,$acc7
+ xor $acc4,$s1,$s1
+ ldw 28($key),$t3
+ _ror $acc9,8,$acc9
+ xor $acc5,$s1,$s1
+ ldw 1024+0($tbl),%r0 ; prefetch te4
+ _ror $acc10,16,$acc10
+ xor $acc6,$s1,$s1
+ ldw 1024+32($tbl),%r0 ; prefetch te4
+ _ror $acc11,24,$acc11
+ xor $acc7,$s1,$s1
+ ldw 1024+64($tbl),%r0 ; prefetch te4
+ _ror $acc13,8,$acc13
+ xor $acc8,$s2,$s2
+ ldw 1024+96($tbl),%r0 ; prefetch te4
+ _ror $acc14,16,$acc14
+ xor $acc9,$s2,$s2
+ ldw 1024+128($tbl),%r0 ; prefetch te4
+ _ror $acc15,24,$acc15
+ xor $acc10,$s2,$s2
+ ldw 1024+160($tbl),%r0 ; prefetch te4
+ _srm $s0,24,$acc0
+ xor $acc11,$s2,$s2
+ ldw 1024+192($tbl),%r0 ; prefetch te4
+ xor $acc12,$acc14,$acc14
+ xor $acc13,$s3,$s3
+ ldw 1024+224($tbl),%r0 ; prefetch te4
+ _srm $s1,16,$acc1
+ xor $acc14,$s3,$s3
+ b L\$enc_loop
+ xor $acc15,$s3,$s3
+
+ .ALIGN 16
+L\$enc_last
+ ldo 1024($tbl),$rounds
+ _ror $acc1,8,$acc1
+ xor $acc0,$t0,$t0
+ ldw 0($key),$s0
+ _ror $acc2,16,$acc2
+ xor $acc1,$t0,$t0
+ ldw 4($key),$s1
+ _ror $acc3,24,$acc3
+ xor $acc2,$t0,$t0
+ ldw 8($key),$s2
+ _ror $acc5,8,$acc5
+ xor $acc3,$t0,$t0
+ ldw 12($key),$s3
+ _ror $acc6,16,$acc6
+ xor $acc4,$t1,$t1
+ _ror $acc7,24,$acc7
+ xor $acc5,$t1,$t1
+ _ror $acc9,8,$acc9
+ xor $acc6,$t1,$t1
+ _ror $acc10,16,$acc10
+ xor $acc7,$t1,$t1
+ _ror $acc11,24,$acc11
+ xor $acc8,$t2,$t2
+ _ror $acc13,8,$acc13
+ xor $acc9,$t2,$t2
+ _ror $acc14,16,$acc14
+ xor $acc10,$t2,$t2
+ _ror $acc15,24,$acc15
+ xor $acc11,$t2,$t2
+ xor $acc12,$acc14,$acc14
+ xor $acc13,$t3,$t3
+ _srm $t0,24,$acc0
+ xor $acc14,$t3,$t3
+ _srm $t1,16,$acc1
+ xor $acc15,$t3,$t3
+
+ _srm $t2,8,$acc2
+ ldbx $acc0($rounds),$acc0
+ _srm $t1,24,$acc4
+ ldbx $acc1($rounds),$acc1
+ _srm $t2,16,$acc5
+ _srm $t3,0,$acc3
+ ldbx $acc2($rounds),$acc2
+ ldbx $acc3($rounds),$acc3
+ _srm $t3,8,$acc6
+ ldbx $acc4($rounds),$acc4
+ _srm $t2,24,$acc8
+ ldbx $acc5($rounds),$acc5
+ _srm $t3,16,$acc9
+ _srm $t0,0,$acc7
+ ldbx $acc6($rounds),$acc6
+ ldbx $acc7($rounds),$acc7
+ _srm $t0,8,$acc10
+ ldbx $acc8($rounds),$acc8
+ _srm $t3,24,$acc12
+ ldbx $acc9($rounds),$acc9
+ _srm $t0,16,$acc13
+ _srm $t1,0,$acc11
+ ldbx $acc10($rounds),$acc10
+ _srm $t1,8,$acc14
+ ldbx $acc11($rounds),$acc11
+ ldbx $acc12($rounds),$acc12
+ ldbx $acc13($rounds),$acc13
+ _srm $t2,0,$acc15
+ ldbx $acc14($rounds),$acc14
+
+ dep $acc0,7,8,$acc3
+ ldbx $acc15($rounds),$acc15
+ dep $acc4,7,8,$acc7
+ dep $acc1,15,8,$acc3
+ dep $acc5,15,8,$acc7
+ dep $acc2,23,8,$acc3
+ dep $acc6,23,8,$acc7
+ xor $acc3,$s0,$s0
+ xor $acc7,$s1,$s1
+ dep $acc8,7,8,$acc11
+ dep $acc12,7,8,$acc15
+ dep $acc9,15,8,$acc11
+ dep $acc13,15,8,$acc15
+ dep $acc10,23,8,$acc11
+ dep $acc14,23,8,$acc15
+ xor $acc11,$s2,$s2
+
+ bv (%r31)
+ .EXIT
+ xor $acc15,$s3,$s3
+ .PROCEND
+
+ .ALIGN 64
+L\$AES_Te
+ .WORD 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+ .WORD 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+ .WORD 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+ .WORD 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+ .WORD 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+ .WORD 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+ .WORD 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+ .WORD 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+ .WORD 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+ .WORD 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+ .WORD 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+ .WORD 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+ .WORD 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+ .WORD 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+ .WORD 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+ .WORD 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+ .WORD 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+ .WORD 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+ .WORD 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+ .WORD 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+ .WORD 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+ .WORD 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+ .WORD 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+ .WORD 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+ .WORD 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+ .WORD 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+ .WORD 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+ .WORD 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+ .WORD 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+ .WORD 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+ .WORD 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+ .WORD 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+ .WORD 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+ .WORD 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+ .WORD 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+ .WORD 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+ .WORD 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+ .WORD 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+ .WORD 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+ .WORD 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+ .WORD 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+ .WORD 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+ .WORD 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+ .WORD 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+ .WORD 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+ .WORD 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+ .WORD 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+ .WORD 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+ .WORD 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+ .WORD 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+ .WORD 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+ .WORD 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+ .WORD 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+ .WORD 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+ .WORD 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+ .WORD 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+ .WORD 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+ .WORD 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+ .WORD 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+ .WORD 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+ .WORD 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+ .WORD 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+ .WORD 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+ .WORD 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+ .BYTE 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+ .BYTE 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+ .BYTE 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+ .BYTE 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+ .BYTE 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+ .BYTE 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+ .BYTE 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+ .BYTE 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+ .BYTE 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+ .BYTE 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+ .BYTE 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+ .BYTE 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+ .BYTE 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+ .BYTE 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+ .BYTE 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+ .BYTE 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+ .BYTE 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+ .BYTE 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+ .BYTE 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+ .BYTE 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+ .BYTE 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+ .BYTE 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+ .BYTE 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+ .BYTE 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+ .BYTE 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+ .BYTE 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+ .BYTE 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+ .BYTE 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+ .BYTE 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+ .BYTE 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+ .BYTE 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+ .BYTE 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+___
+
+$code.=<<___;
+ .EXPORT AES_decrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
+ .ALIGN 16
+AES_decrypt
+ .PROC
+ .CALLINFO FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
+ .ENTRY
+ $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
+ $PUSHMA %r3,$FRAME(%sp)
+ $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
+ $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
+ $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
+ $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
+ $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
+ $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
+ $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
+ $PUSH %r11,`-$FRAME+8*$SIZE_T`(%sp)
+ $PUSH %r12,`-$FRAME+9*$SIZE_T`(%sp)
+ $PUSH %r13,`-$FRAME+10*$SIZE_T`(%sp)
+ $PUSH %r14,`-$FRAME+11*$SIZE_T`(%sp)
+ $PUSH %r15,`-$FRAME+12*$SIZE_T`(%sp)
+ $PUSH %r16,`-$FRAME+13*$SIZE_T`(%sp)
+ $PUSH %r17,`-$FRAME+14*$SIZE_T`(%sp)
+ $PUSH %r18,`-$FRAME+15*$SIZE_T`(%sp)
+
+ blr %r0,$tbl
+ ldi 3,$t0
+L\$dec_pic
+ andcm $tbl,$t0,$tbl
+ ldo L\$AES_Td-L\$dec_pic($tbl),$tbl
+
+ and $inp,$t0,$t0
+ sub $inp,$t0,$inp
+ ldw 0($inp),$s0
+ ldw 4($inp),$s1
+ ldw 8($inp),$s2
+ comib,= 0,$t0,L\$dec_inp_aligned
+ ldw 12($inp),$s3
+
+ sh3addl $t0,%r0,$t0
+ subi 32,$t0,$t0
+ mtctl $t0,%cr11
+ ldw 16($inp),$t1
+ vshd $s0,$s1,$s0
+ vshd $s1,$s2,$s1
+ vshd $s2,$s3,$s2
+ vshd $s3,$t1,$s3
+
+L\$dec_inp_aligned
+ bl _parisc_AES_decrypt,%r31
+ nop
+
+ extru,<> $out,31,2,%r0
+ b L\$dec_out_aligned
+ nop
+
+ _srm $s0,24,$acc0
+ _srm $s0,16,$acc1
+ stb $acc0,0($out)
+ _srm $s0,8,$acc2
+ stb $acc1,1($out)
+ _srm $s1,24,$acc4
+ stb $acc2,2($out)
+ _srm $s1,16,$acc5
+ stb $s0,3($out)
+ _srm $s1,8,$acc6
+ stb $acc4,4($out)
+ _srm $s2,24,$acc0
+ stb $acc5,5($out)
+ _srm $s2,16,$acc1
+ stb $acc6,6($out)
+ _srm $s2,8,$acc2
+ stb $s1,7($out)
+ _srm $s3,24,$acc4
+ stb $acc0,8($out)
+ _srm $s3,16,$acc5
+ stb $acc1,9($out)
+ _srm $s3,8,$acc6
+ stb $acc2,10($out)
+ stb $s2,11($out)
+ stb $acc4,12($out)
+ stb $acc5,13($out)
+ stb $acc6,14($out)
+ b L\$dec_done
+ stb $s3,15($out)
+
+L\$dec_out_aligned
+ stw $s0,0($out)
+ stw $s1,4($out)
+ stw $s2,8($out)
+ stw $s3,12($out)
+
+L\$dec_done
+ $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
+ $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
+ $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
+ $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
+ $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
+ $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
+ $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
+ $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
+ $POP `-$FRAME+8*$SIZE_T`(%sp),%r11
+ $POP `-$FRAME+9*$SIZE_T`(%sp),%r12
+ $POP `-$FRAME+10*$SIZE_T`(%sp),%r13
+ $POP `-$FRAME+11*$SIZE_T`(%sp),%r14
+ $POP `-$FRAME+12*$SIZE_T`(%sp),%r15
+ $POP `-$FRAME+13*$SIZE_T`(%sp),%r16
+ $POP `-$FRAME+14*$SIZE_T`(%sp),%r17
+ $POP `-$FRAME+15*$SIZE_T`(%sp),%r18
+ bv (%r2)
+ .EXIT
+ $POPMB -$FRAME(%sp),%r3
+ .PROCEND
+
+ .ALIGN 16
+_parisc_AES_decrypt
+ .PROC
+ .CALLINFO MILLICODE
+ .ENTRY
+ ldw 240($key),$rounds
+ ldw 0($key),$t0
+ ldw 4($key),$t1
+ ldw 8($key),$t2
+ ldw 12($key),$t3
+ _srm $rounds,1,$rounds
+ xor $t0,$s0,$s0
+ ldw 16($key),$t0
+ xor $t1,$s1,$s1
+ ldw 20($key),$t1
+ _srm $s0,24,$acc0
+ xor $t2,$s2,$s2
+ ldw 24($key),$t2
+ xor $t3,$s3,$s3
+ ldw 28($key),$t3
+ _srm $s3,16,$acc1
+L\$dec_loop
+ _srm $s2,8,$acc2
+ ldwx,s $acc0($tbl),$acc0
+ _srm $s1,0,$acc3
+ ldwx,s $acc1($tbl),$acc1
+ _srm $s1,24,$acc4
+ ldwx,s $acc2($tbl),$acc2
+ _srm $s0,16,$acc5
+ ldwx,s $acc3($tbl),$acc3
+ _srm $s3,8,$acc6
+ ldwx,s $acc4($tbl),$acc4
+ _srm $s2,0,$acc7
+ ldwx,s $acc5($tbl),$acc5
+ _srm $s2,24,$acc8
+ ldwx,s $acc6($tbl),$acc6
+ _srm $s1,16,$acc9
+ ldwx,s $acc7($tbl),$acc7
+ _srm $s0,8,$acc10
+ ldwx,s $acc8($tbl),$acc8
+ _srm $s3,0,$acc11
+ ldwx,s $acc9($tbl),$acc9
+ _srm $s3,24,$acc12
+ ldwx,s $acc10($tbl),$acc10
+ _srm $s2,16,$acc13
+ ldwx,s $acc11($tbl),$acc11
+ _srm $s1,8,$acc14
+ ldwx,s $acc12($tbl),$acc12
+ _srm $s0,0,$acc15
+ ldwx,s $acc13($tbl),$acc13
+ ldwx,s $acc14($tbl),$acc14
+ ldwx,s $acc15($tbl),$acc15
+ addib,= -1,$rounds,L\$dec_last
+ ldo 32($key),$key
+
+ _ror $acc1,8,$acc1
+ xor $acc0,$t0,$t0
+ ldw 0($key),$s0
+ _ror $acc2,16,$acc2
+ xor $acc1,$t0,$t0
+ ldw 4($key),$s1
+ _ror $acc3,24,$acc3
+ xor $acc2,$t0,$t0
+ ldw 8($key),$s2
+ _ror $acc5,8,$acc5
+ xor $acc3,$t0,$t0
+ ldw 12($key),$s3
+ _ror $acc6,16,$acc6
+ xor $acc4,$t1,$t1
+ _ror $acc7,24,$acc7
+ xor $acc5,$t1,$t1
+ _ror $acc9,8,$acc9
+ xor $acc6,$t1,$t1
+ _ror $acc10,16,$acc10
+ xor $acc7,$t1,$t1
+ _ror $acc11,24,$acc11
+ xor $acc8,$t2,$t2
+ _ror $acc13,8,$acc13
+ xor $acc9,$t2,$t2
+ _ror $acc14,16,$acc14
+ xor $acc10,$t2,$t2
+ _ror $acc15,24,$acc15
+ xor $acc11,$t2,$t2
+ xor $acc12,$acc14,$acc14
+ xor $acc13,$t3,$t3
+ _srm $t0,24,$acc0
+ xor $acc14,$t3,$t3
+ xor $acc15,$t3,$t3
+ _srm $t3,16,$acc1
+
+ _srm $t2,8,$acc2
+ ldwx,s $acc0($tbl),$acc0
+ _srm $t1,0,$acc3
+ ldwx,s $acc1($tbl),$acc1
+ _srm $t1,24,$acc4
+ ldwx,s $acc2($tbl),$acc2
+ _srm $t0,16,$acc5
+ ldwx,s $acc3($tbl),$acc3
+ _srm $t3,8,$acc6
+ ldwx,s $acc4($tbl),$acc4
+ _srm $t2,0,$acc7
+ ldwx,s $acc5($tbl),$acc5
+ _srm $t2,24,$acc8
+ ldwx,s $acc6($tbl),$acc6
+ _srm $t1,16,$acc9
+ ldwx,s $acc7($tbl),$acc7
+ _srm $t0,8,$acc10
+ ldwx,s $acc8($tbl),$acc8
+ _srm $t3,0,$acc11
+ ldwx,s $acc9($tbl),$acc9
+ _srm $t3,24,$acc12
+ ldwx,s $acc10($tbl),$acc10
+ _srm $t2,16,$acc13
+ ldwx,s $acc11($tbl),$acc11
+ _srm $t1,8,$acc14
+ ldwx,s $acc12($tbl),$acc12
+ _srm $t0,0,$acc15
+ ldwx,s $acc13($tbl),$acc13
+ _ror $acc1,8,$acc1
+ ldwx,s $acc14($tbl),$acc14
+
+ _ror $acc2,16,$acc2
+ xor $acc0,$s0,$s0
+ ldwx,s $acc15($tbl),$acc15
+ _ror $acc3,24,$acc3
+ xor $acc1,$s0,$s0
+ ldw 16($key),$t0
+ _ror $acc5,8,$acc5
+ xor $acc2,$s0,$s0
+ ldw 20($key),$t1
+ _ror $acc6,16,$acc6
+ xor $acc3,$s0,$s0
+ ldw 24($key),$t2
+ _ror $acc7,24,$acc7
+ xor $acc4,$s1,$s1
+ ldw 28($key),$t3
+ _ror $acc9,8,$acc9
+ xor $acc5,$s1,$s1
+ ldw 1024+0($tbl),%r0 ; prefetch td4
+ _ror $acc10,16,$acc10
+ xor $acc6,$s1,$s1
+ ldw 1024+32($tbl),%r0 ; prefetch td4
+ _ror $acc11,24,$acc11
+ xor $acc7,$s1,$s1
+ ldw 1024+64($tbl),%r0 ; prefetch td4
+ _ror $acc13,8,$acc13
+ xor $acc8,$s2,$s2
+ ldw 1024+96($tbl),%r0 ; prefetch td4
+ _ror $acc14,16,$acc14
+ xor $acc9,$s2,$s2
+ ldw 1024+128($tbl),%r0 ; prefetch td4
+ _ror $acc15,24,$acc15
+ xor $acc10,$s2,$s2
+ ldw 1024+160($tbl),%r0 ; prefetch td4
+ _srm $s0,24,$acc0
+ xor $acc11,$s2,$s2
+ ldw 1024+192($tbl),%r0 ; prefetch td4
+ xor $acc12,$acc14,$acc14
+ xor $acc13,$s3,$s3
+ ldw 1024+224($tbl),%r0 ; prefetch td4
+ xor $acc14,$s3,$s3
+ xor $acc15,$s3,$s3
+ b L\$dec_loop
+ _srm $s3,16,$acc1
+
+ .ALIGN 16
+L\$dec_last
+ ldo 1024($tbl),$rounds
+ _ror $acc1,8,$acc1
+ xor $acc0,$t0,$t0
+ ldw 0($key),$s0
+ _ror $acc2,16,$acc2
+ xor $acc1,$t0,$t0
+ ldw 4($key),$s1
+ _ror $acc3,24,$acc3
+ xor $acc2,$t0,$t0
+ ldw 8($key),$s2
+ _ror $acc5,8,$acc5
+ xor $acc3,$t0,$t0
+ ldw 12($key),$s3
+ _ror $acc6,16,$acc6
+ xor $acc4,$t1,$t1
+ _ror $acc7,24,$acc7
+ xor $acc5,$t1,$t1
+ _ror $acc9,8,$acc9
+ xor $acc6,$t1,$t1
+ _ror $acc10,16,$acc10
+ xor $acc7,$t1,$t1
+ _ror $acc11,24,$acc11
+ xor $acc8,$t2,$t2
+ _ror $acc13,8,$acc13
+ xor $acc9,$t2,$t2
+ _ror $acc14,16,$acc14
+ xor $acc10,$t2,$t2
+ _ror $acc15,24,$acc15
+ xor $acc11,$t2,$t2
+ xor $acc12,$acc14,$acc14
+ xor $acc13,$t3,$t3
+ _srm $t0,24,$acc0
+ xor $acc14,$t3,$t3
+ xor $acc15,$t3,$t3
+ _srm $t3,16,$acc1
+
+ _srm $t2,8,$acc2
+ ldbx $acc0($rounds),$acc0
+ _srm $t1,24,$acc4
+ ldbx $acc1($rounds),$acc1
+ _srm $t0,16,$acc5
+ _srm $t1,0,$acc3
+ ldbx $acc2($rounds),$acc2
+ ldbx $acc3($rounds),$acc3
+ _srm $t3,8,$acc6
+ ldbx $acc4($rounds),$acc4
+ _srm $t2,24,$acc8
+ ldbx $acc5($rounds),$acc5
+ _srm $t1,16,$acc9
+ _srm $t2,0,$acc7
+ ldbx $acc6($rounds),$acc6
+ ldbx $acc7($rounds),$acc7
+ _srm $t0,8,$acc10
+ ldbx $acc8($rounds),$acc8
+ _srm $t3,24,$acc12
+ ldbx $acc9($rounds),$acc9
+ _srm $t2,16,$acc13
+ _srm $t3,0,$acc11
+ ldbx $acc10($rounds),$acc10
+ _srm $t1,8,$acc14
+ ldbx $acc11($rounds),$acc11
+ ldbx $acc12($rounds),$acc12
+ ldbx $acc13($rounds),$acc13
+ _srm $t0,0,$acc15
+ ldbx $acc14($rounds),$acc14
+
+ dep $acc0,7,8,$acc3
+ ldbx $acc15($rounds),$acc15
+ dep $acc4,7,8,$acc7
+ dep $acc1,15,8,$acc3
+ dep $acc5,15,8,$acc7
+ dep $acc2,23,8,$acc3
+ dep $acc6,23,8,$acc7
+ xor $acc3,$s0,$s0
+ xor $acc7,$s1,$s1
+ dep $acc8,7,8,$acc11
+ dep $acc12,7,8,$acc15
+ dep $acc9,15,8,$acc11
+ dep $acc13,15,8,$acc15
+ dep $acc10,23,8,$acc11
+ dep $acc14,23,8,$acc15
+ xor $acc11,$s2,$s2
+
+ bv (%r31)
+ .EXIT
+ xor $acc15,$s3,$s3
+ .PROCEND
+
+ .ALIGN 64
+L\$AES_Td
+ .WORD 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+ .WORD 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+ .WORD 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+ .WORD 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+ .WORD 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+ .WORD 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+ .WORD 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+ .WORD 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+ .WORD 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+ .WORD 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+ .WORD 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+ .WORD 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+ .WORD 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+ .WORD 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+ .WORD 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+ .WORD 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+ .WORD 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+ .WORD 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+ .WORD 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+ .WORD 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+ .WORD 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+ .WORD 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+ .WORD 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+ .WORD 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+ .WORD 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+ .WORD 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+ .WORD 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+ .WORD 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+ .WORD 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+ .WORD 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+ .WORD 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+ .WORD 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+ .WORD 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+ .WORD 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+ .WORD 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+ .WORD 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+ .WORD 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+ .WORD 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+ .WORD 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+ .WORD 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+ .WORD 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+ .WORD 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+ .WORD 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+ .WORD 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+ .WORD 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+ .WORD 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+ .WORD 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+ .WORD 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+ .WORD 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+ .WORD 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+ .WORD 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+ .WORD 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+ .WORD 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+ .WORD 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+ .WORD 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+ .WORD 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+ .WORD 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+ .WORD 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+ .WORD 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+ .WORD 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+ .WORD 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+ .WORD 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+ .WORD 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+ .WORD 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+ .BYTE 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+ .BYTE 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+ .BYTE 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+ .BYTE 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+ .BYTE 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+ .BYTE 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+ .BYTE 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+ .BYTE 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+ .BYTE 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+ .BYTE 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+ .BYTE 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+ .BYTE 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+ .BYTE 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+ .BYTE 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+ .BYTE 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+ .BYTE 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+ .BYTE 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+ .BYTE 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+ .BYTE 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+ .BYTE 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+ .BYTE 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+ .BYTE 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+ .BYTE 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+ .BYTE 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+ .BYTE 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+ .BYTE 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+ .BYTE 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+ .BYTE 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+ .BYTE 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+ .BYTE 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+ .BYTE 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+ .BYTE 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+ .STRINGZ "AES for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ # translate made up instructons: _ror, _srm
+ s/_ror(\s+)(%r[0-9]+),/shd$1$2,$2,/ or
+
+ s/_srm(\s+%r[0-9]+),([0-9]+),/
+ $SIZE_T==4 ? sprintf("extru%s,%d,8,",$1,31-$2)
+ : sprintf("extrd,u%s,%d,8,",$1,63-$2)/e;
+
+ s/,\*/,/ if ($SIZE_T==4);
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/openssl/crypto/aes/asm/aes-ppc.pl b/openssl/crypto/aes/asm/aes-ppc.pl
index f82c5e181..7c52cbe5f 100644
--- a/openssl/crypto/aes/asm/aes-ppc.pl
+++ b/openssl/crypto/aes/asm/aes-ppc.pl
@@ -7,7 +7,7 @@
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
-# Needs more work: key setup, page boundaries, CBC routine...
+# Needs more work: key setup, CBC routine...
#
# ppc_AES_[en|de]crypt perform at 18 cycles per byte processed with
# 128-bit key, which is ~40% better than 64-bit code generated by gcc
@@ -18,7 +18,7 @@
# February 2010
#
-# Rescheduling instructions to favour Power6 pipeline gives 10%
+# Rescheduling instructions to favour Power6 pipeline gave 10%
# performance improvement on the platfrom in question (and marginal
# improvement even on others). It should be noted that Power6 fails
# to process byte in 18 cycles, only in 23, because it fails to issue
@@ -33,11 +33,13 @@ $flavour = shift;
if ($flavour =~ /64/) {
$SIZE_T =8;
+ $LRSAVE =2*$SIZE_T;
$STU ="stdu";
$POP ="ld";
$PUSH ="std";
} elsif ($flavour =~ /32/) {
$SIZE_T =4;
+ $LRSAVE =$SIZE_T;
$STU ="stwu";
$POP ="lwz";
$PUSH ="stw";
@@ -116,15 +118,19 @@ LAES_Te:
addi $Tbl0,$Tbl0,`128-8`
mtlr r0
blr
- .space `32-24`
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+ .space `64-9*4`
LAES_Td:
mflr r0
bcl 20,31,\$+4
mflr $Tbl0 ; vvvvvvvv "distance" between . and 1st data entry
- addi $Tbl0,$Tbl0,`128-8-32+2048+256`
+ addi $Tbl0,$Tbl0,`128-64-8+2048+256`
mtlr r0
blr
- .space `128-32-24`
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+ .space `128-64-9*4`
___
&_data_word(
0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
@@ -328,10 +334,9 @@ $code.=<<___;
.globl .AES_encrypt
.align 7
.AES_encrypt:
- mflr r0
$STU $sp,-$FRAME($sp)
+ mflr r0
- $PUSH r0,`$FRAME-$SIZE_T*21`($sp)
$PUSH $toc,`$FRAME-$SIZE_T*20`($sp)
$PUSH r13,`$FRAME-$SIZE_T*19`($sp)
$PUSH r14,`$FRAME-$SIZE_T*18`($sp)
@@ -352,7 +357,14 @@ $code.=<<___;
$PUSH r29,`$FRAME-$SIZE_T*3`($sp)
$PUSH r30,`$FRAME-$SIZE_T*2`($sp)
$PUSH r31,`$FRAME-$SIZE_T*1`($sp)
+ $PUSH r0,`$FRAME+$LRSAVE`($sp)
+
+ andi. $t0,$inp,3
+ andi. $t1,$out,3
+ or. $t0,$t0,$t1
+ bne Lenc_unaligned
+Lenc_unaligned_ok:
lwz $s0,0($inp)
lwz $s1,4($inp)
lwz $s2,8($inp)
@@ -363,8 +375,80 @@ $code.=<<___;
stw $s1,4($out)
stw $s2,8($out)
stw $s3,12($out)
+ b Lenc_done
+
+Lenc_unaligned:
+ subfic $t0,$inp,4096
+ subfic $t1,$out,4096
+ andi. $t0,$t0,4096-16
+ beq Lenc_xpage
+ andi. $t1,$t1,4096-16
+ bne Lenc_unaligned_ok
+
+Lenc_xpage:
+ lbz $acc00,0($inp)
+ lbz $acc01,1($inp)
+ lbz $acc02,2($inp)
+ lbz $s0,3($inp)
+ lbz $acc04,4($inp)
+ lbz $acc05,5($inp)
+ lbz $acc06,6($inp)
+ lbz $s1,7($inp)
+ lbz $acc08,8($inp)
+ lbz $acc09,9($inp)
+ lbz $acc10,10($inp)
+ insrwi $s0,$acc00,8,0
+ lbz $s2,11($inp)
+ insrwi $s1,$acc04,8,0
+ lbz $acc12,12($inp)
+ insrwi $s0,$acc01,8,8
+ lbz $acc13,13($inp)
+ insrwi $s1,$acc05,8,8
+ lbz $acc14,14($inp)
+ insrwi $s0,$acc02,8,16
+ lbz $s3,15($inp)
+ insrwi $s1,$acc06,8,16
+ insrwi $s2,$acc08,8,0
+ insrwi $s3,$acc12,8,0
+ insrwi $s2,$acc09,8,8
+ insrwi $s3,$acc13,8,8
+ insrwi $s2,$acc10,8,16
+ insrwi $s3,$acc14,8,16
+
+ bl LAES_Te
+ bl Lppc_AES_encrypt_compact
+
+ extrwi $acc00,$s0,8,0
+ extrwi $acc01,$s0,8,8
+ stb $acc00,0($out)
+ extrwi $acc02,$s0,8,16
+ stb $acc01,1($out)
+ stb $acc02,2($out)
+ extrwi $acc04,$s1,8,0
+ stb $s0,3($out)
+ extrwi $acc05,$s1,8,8
+ stb $acc04,4($out)
+ extrwi $acc06,$s1,8,16
+ stb $acc05,5($out)
+ stb $acc06,6($out)
+ extrwi $acc08,$s2,8,0
+ stb $s1,7($out)
+ extrwi $acc09,$s2,8,8
+ stb $acc08,8($out)
+ extrwi $acc10,$s2,8,16
+ stb $acc09,9($out)
+ stb $acc10,10($out)
+ extrwi $acc12,$s3,8,0
+ stb $s2,11($out)
+ extrwi $acc13,$s3,8,8
+ stb $acc12,12($out)
+ extrwi $acc14,$s3,8,16
+ stb $acc13,13($out)
+ stb $acc14,14($out)
+ stb $s3,15($out)
- $POP r0,`$FRAME-$SIZE_T*21`($sp)
+Lenc_done:
+ $POP r0,`$FRAME+$LRSAVE`($sp)
$POP $toc,`$FRAME-$SIZE_T*20`($sp)
$POP r13,`$FRAME-$SIZE_T*19`($sp)
$POP r14,`$FRAME-$SIZE_T*18`($sp)
@@ -388,18 +472,21 @@ $code.=<<___;
mtlr r0
addi $sp,$sp,$FRAME
blr
+ .long 0
+ .byte 0,12,4,1,0x80,18,3,0
+ .long 0
.align 5
Lppc_AES_encrypt:
lwz $acc00,240($key)
- lwz $t0,0($key)
- lwz $t1,4($key)
- lwz $t2,8($key)
- lwz $t3,12($key)
addi $Tbl1,$Tbl0,3
+ lwz $t0,0($key)
addi $Tbl2,$Tbl0,2
+ lwz $t1,4($key)
addi $Tbl3,$Tbl0,1
+ lwz $t2,8($key)
addi $acc00,$acc00,-1
+ lwz $t3,12($key)
addi $key,$key,16
xor $s0,$s0,$t0
xor $s1,$s1,$t1
@@ -413,44 +500,44 @@ Lenc_loop:
rlwinm $acc02,$s2,`32-24+3`,21,28
rlwinm $acc03,$s3,`32-24+3`,21,28
lwz $t0,0($key)
- lwz $t1,4($key)
rlwinm $acc04,$s1,`32-16+3`,21,28
+ lwz $t1,4($key)
rlwinm $acc05,$s2,`32-16+3`,21,28
lwz $t2,8($key)
- lwz $t3,12($key)
rlwinm $acc06,$s3,`32-16+3`,21,28
+ lwz $t3,12($key)
rlwinm $acc07,$s0,`32-16+3`,21,28
lwzx $acc00,$Tbl0,$acc00
- lwzx $acc01,$Tbl0,$acc01
rlwinm $acc08,$s2,`32-8+3`,21,28
+ lwzx $acc01,$Tbl0,$acc01
rlwinm $acc09,$s3,`32-8+3`,21,28
lwzx $acc02,$Tbl0,$acc02
- lwzx $acc03,$Tbl0,$acc03
rlwinm $acc10,$s0,`32-8+3`,21,28
+ lwzx $acc03,$Tbl0,$acc03
rlwinm $acc11,$s1,`32-8+3`,21,28
lwzx $acc04,$Tbl1,$acc04
- lwzx $acc05,$Tbl1,$acc05
rlwinm $acc12,$s3,`0+3`,21,28
+ lwzx $acc05,$Tbl1,$acc05
rlwinm $acc13,$s0,`0+3`,21,28
lwzx $acc06,$Tbl1,$acc06
- lwzx $acc07,$Tbl1,$acc07
rlwinm $acc14,$s1,`0+3`,21,28
+ lwzx $acc07,$Tbl1,$acc07
rlwinm $acc15,$s2,`0+3`,21,28
lwzx $acc08,$Tbl2,$acc08
- lwzx $acc09,$Tbl2,$acc09
xor $t0,$t0,$acc00
+ lwzx $acc09,$Tbl2,$acc09
xor $t1,$t1,$acc01
lwzx $acc10,$Tbl2,$acc10
- lwzx $acc11,$Tbl2,$acc11
xor $t2,$t2,$acc02
+ lwzx $acc11,$Tbl2,$acc11
xor $t3,$t3,$acc03
lwzx $acc12,$Tbl3,$acc12
- lwzx $acc13,$Tbl3,$acc13
xor $t0,$t0,$acc04
+ lwzx $acc13,$Tbl3,$acc13
xor $t1,$t1,$acc05
lwzx $acc14,$Tbl3,$acc14
- lwzx $acc15,$Tbl3,$acc15
xor $t2,$t2,$acc06
+ lwzx $acc15,$Tbl3,$acc15
xor $t3,$t3,$acc07
xor $t0,$t0,$acc08
xor $t1,$t1,$acc09
@@ -466,60 +553,60 @@ Lenc_loop:
addi $Tbl2,$Tbl0,2048
nop
lwz $t0,0($key)
- lwz $t1,4($key)
rlwinm $acc00,$s0,`32-24`,24,31
+ lwz $t1,4($key)
rlwinm $acc01,$s1,`32-24`,24,31
lwz $t2,8($key)
- lwz $t3,12($key)
rlwinm $acc02,$s2,`32-24`,24,31
+ lwz $t3,12($key)
rlwinm $acc03,$s3,`32-24`,24,31
lwz $acc08,`2048+0`($Tbl0) ! prefetch Te4
- lwz $acc09,`2048+32`($Tbl0)
rlwinm $acc04,$s1,`32-16`,24,31
+ lwz $acc09,`2048+32`($Tbl0)
rlwinm $acc05,$s2,`32-16`,24,31
lwz $acc10,`2048+64`($Tbl0)
- lwz $acc11,`2048+96`($Tbl0)
rlwinm $acc06,$s3,`32-16`,24,31
+ lwz $acc11,`2048+96`($Tbl0)
rlwinm $acc07,$s0,`32-16`,24,31
lwz $acc12,`2048+128`($Tbl0)
- lwz $acc13,`2048+160`($Tbl0)
rlwinm $acc08,$s2,`32-8`,24,31
+ lwz $acc13,`2048+160`($Tbl0)
rlwinm $acc09,$s3,`32-8`,24,31
lwz $acc14,`2048+192`($Tbl0)
- lwz $acc15,`2048+224`($Tbl0)
rlwinm $acc10,$s0,`32-8`,24,31
+ lwz $acc15,`2048+224`($Tbl0)
rlwinm $acc11,$s1,`32-8`,24,31
lbzx $acc00,$Tbl2,$acc00
- lbzx $acc01,$Tbl2,$acc01
rlwinm $acc12,$s3,`0`,24,31
+ lbzx $acc01,$Tbl2,$acc01
rlwinm $acc13,$s0,`0`,24,31
lbzx $acc02,$Tbl2,$acc02
- lbzx $acc03,$Tbl2,$acc03
rlwinm $acc14,$s1,`0`,24,31
+ lbzx $acc03,$Tbl2,$acc03
rlwinm $acc15,$s2,`0`,24,31
lbzx $acc04,$Tbl2,$acc04
- lbzx $acc05,$Tbl2,$acc05
rlwinm $s0,$acc00,24,0,7
+ lbzx $acc05,$Tbl2,$acc05
rlwinm $s1,$acc01,24,0,7
lbzx $acc06,$Tbl2,$acc06
- lbzx $acc07,$Tbl2,$acc07
rlwinm $s2,$acc02,24,0,7
+ lbzx $acc07,$Tbl2,$acc07
rlwinm $s3,$acc03,24,0,7
lbzx $acc08,$Tbl2,$acc08
- lbzx $acc09,$Tbl2,$acc09
rlwimi $s0,$acc04,16,8,15
+ lbzx $acc09,$Tbl2,$acc09
rlwimi $s1,$acc05,16,8,15
lbzx $acc10,$Tbl2,$acc10
- lbzx $acc11,$Tbl2,$acc11
rlwimi $s2,$acc06,16,8,15
+ lbzx $acc11,$Tbl2,$acc11
rlwimi $s3,$acc07,16,8,15
lbzx $acc12,$Tbl2,$acc12
- lbzx $acc13,$Tbl2,$acc13
rlwimi $s0,$acc08,8,16,23
+ lbzx $acc13,$Tbl2,$acc13
rlwimi $s1,$acc09,8,16,23
lbzx $acc14,$Tbl2,$acc14
- lbzx $acc15,$Tbl2,$acc15
rlwimi $s2,$acc10,8,16,23
+ lbzx $acc15,$Tbl2,$acc15
rlwimi $s3,$acc11,8,16,23
or $s0,$s0,$acc12
or $s1,$s1,$acc13
@@ -530,29 +617,31 @@ Lenc_loop:
xor $s2,$s2,$t2
xor $s3,$s3,$t3
blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
.align 4
Lppc_AES_encrypt_compact:
lwz $acc00,240($key)
- lwz $t0,0($key)
- lwz $t1,4($key)
- lwz $t2,8($key)
- lwz $t3,12($key)
addi $Tbl1,$Tbl0,2048
+ lwz $t0,0($key)
lis $mask80,0x8080
+ lwz $t1,4($key)
lis $mask1b,0x1b1b
- addi $key,$key,16
+ lwz $t2,8($key)
ori $mask80,$mask80,0x8080
+ lwz $t3,12($key)
ori $mask1b,$mask1b,0x1b1b
+ addi $key,$key,16
mtctr $acc00
.align 4
Lenc_compact_loop:
xor $s0,$s0,$t0
xor $s1,$s1,$t1
- xor $s2,$s2,$t2
- xor $s3,$s3,$t3
rlwinm $acc00,$s0,`32-24`,24,31
+ xor $s2,$s2,$t2
rlwinm $acc01,$s1,`32-24`,24,31
+ xor $s3,$s3,$t3
rlwinm $acc02,$s2,`32-24`,24,31
rlwinm $acc03,$s3,`32-24`,24,31
rlwinm $acc04,$s1,`32-16`,24,31
@@ -560,48 +649,48 @@ Lenc_compact_loop:
rlwinm $acc06,$s3,`32-16`,24,31
rlwinm $acc07,$s0,`32-16`,24,31
lbzx $acc00,$Tbl1,$acc00
- lbzx $acc01,$Tbl1,$acc01
rlwinm $acc08,$s2,`32-8`,24,31
+ lbzx $acc01,$Tbl1,$acc01
rlwinm $acc09,$s3,`32-8`,24,31
lbzx $acc02,$Tbl1,$acc02
- lbzx $acc03,$Tbl1,$acc03
rlwinm $acc10,$s0,`32-8`,24,31
+ lbzx $acc03,$Tbl1,$acc03
rlwinm $acc11,$s1,`32-8`,24,31
lbzx $acc04,$Tbl1,$acc04
- lbzx $acc05,$Tbl1,$acc05
rlwinm $acc12,$s3,`0`,24,31
+ lbzx $acc05,$Tbl1,$acc05
rlwinm $acc13,$s0,`0`,24,31
lbzx $acc06,$Tbl1,$acc06
- lbzx $acc07,$Tbl1,$acc07
rlwinm $acc14,$s1,`0`,24,31
+ lbzx $acc07,$Tbl1,$acc07
rlwinm $acc15,$s2,`0`,24,31
lbzx $acc08,$Tbl1,$acc08
- lbzx $acc09,$Tbl1,$acc09
rlwinm $s0,$acc00,24,0,7
+ lbzx $acc09,$Tbl1,$acc09
rlwinm $s1,$acc01,24,0,7
lbzx $acc10,$Tbl1,$acc10
- lbzx $acc11,$Tbl1,$acc11
rlwinm $s2,$acc02,24,0,7
+ lbzx $acc11,$Tbl1,$acc11
rlwinm $s3,$acc03,24,0,7
lbzx $acc12,$Tbl1,$acc12
- lbzx $acc13,$Tbl1,$acc13
rlwimi $s0,$acc04,16,8,15
+ lbzx $acc13,$Tbl1,$acc13
rlwimi $s1,$acc05,16,8,15
lbzx $acc14,$Tbl1,$acc14
- lbzx $acc15,$Tbl1,$acc15
rlwimi $s2,$acc06,16,8,15
+ lbzx $acc15,$Tbl1,$acc15
rlwimi $s3,$acc07,16,8,15
rlwimi $s0,$acc08,8,16,23
rlwimi $s1,$acc09,8,16,23
rlwimi $s2,$acc10,8,16,23
rlwimi $s3,$acc11,8,16,23
lwz $t0,0($key)
- lwz $t1,4($key)
or $s0,$s0,$acc12
+ lwz $t1,4($key)
or $s1,$s1,$acc13
lwz $t2,8($key)
- lwz $t3,12($key)
or $s2,$s2,$acc14
+ lwz $t3,12($key)
or $s3,$s3,$acc15
addi $key,$key,16
@@ -612,12 +701,12 @@ Lenc_compact_loop:
and $acc02,$s2,$mask80
and $acc03,$s3,$mask80
srwi $acc04,$acc00,7 # r1>>7
- srwi $acc05,$acc01,7
- srwi $acc06,$acc02,7
- srwi $acc07,$acc03,7
andc $acc08,$s0,$mask80 # r0&0x7f7f7f7f
+ srwi $acc05,$acc01,7
andc $acc09,$s1,$mask80
+ srwi $acc06,$acc02,7
andc $acc10,$s2,$mask80
+ srwi $acc07,$acc03,7
andc $acc11,$s3,$mask80
sub $acc00,$acc00,$acc04 # r1-(r1>>7)
sub $acc01,$acc01,$acc05
@@ -633,32 +722,32 @@ Lenc_compact_loop:
and $acc03,$acc03,$mask1b
xor $acc00,$acc00,$acc08 # r2
xor $acc01,$acc01,$acc09
+ rotlwi $acc12,$s0,16 # ROTATE(r0,16)
xor $acc02,$acc02,$acc10
+ rotlwi $acc13,$s1,16
xor $acc03,$acc03,$acc11
+ rotlwi $acc14,$s2,16
- rotlwi $acc12,$s0,16 # ROTATE(r0,16)
- rotlwi $acc13,$s1,16
- rotlwi $acc14,$s2,16
- rotlwi $acc15,$s3,16
xor $s0,$s0,$acc00 # r0^r2
+ rotlwi $acc15,$s3,16
xor $s1,$s1,$acc01
- xor $s2,$s2,$acc02
- xor $s3,$s3,$acc03
rotrwi $s0,$s0,24 # ROTATE(r2^r0,24)
+ xor $s2,$s2,$acc02
rotrwi $s1,$s1,24
+ xor $s3,$s3,$acc03
rotrwi $s2,$s2,24
- rotrwi $s3,$s3,24
xor $s0,$s0,$acc00 # ROTATE(r2^r0,24)^r2
+ rotrwi $s3,$s3,24
xor $s1,$s1,$acc01
xor $s2,$s2,$acc02
xor $s3,$s3,$acc03
rotlwi $acc08,$acc12,8 # ROTATE(r0,24)
- rotlwi $acc09,$acc13,8
- rotlwi $acc10,$acc14,8
- rotlwi $acc11,$acc15,8
xor $s0,$s0,$acc12 #
+ rotlwi $acc09,$acc13,8
xor $s1,$s1,$acc13
+ rotlwi $acc10,$acc14,8
xor $s2,$s2,$acc14
+ rotlwi $acc11,$acc15,8
xor $s3,$s3,$acc15
xor $s0,$s0,$acc08 #
xor $s1,$s1,$acc09
@@ -673,14 +762,15 @@ Lenc_compact_done:
xor $s2,$s2,$t2
xor $s3,$s3,$t3
blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
.globl .AES_decrypt
.align 7
.AES_decrypt:
- mflr r0
$STU $sp,-$FRAME($sp)
+ mflr r0
- $PUSH r0,`$FRAME-$SIZE_T*21`($sp)
$PUSH $toc,`$FRAME-$SIZE_T*20`($sp)
$PUSH r13,`$FRAME-$SIZE_T*19`($sp)
$PUSH r14,`$FRAME-$SIZE_T*18`($sp)
@@ -701,7 +791,14 @@ Lenc_compact_done:
$PUSH r29,`$FRAME-$SIZE_T*3`($sp)
$PUSH r30,`$FRAME-$SIZE_T*2`($sp)
$PUSH r31,`$FRAME-$SIZE_T*1`($sp)
+ $PUSH r0,`$FRAME+$LRSAVE`($sp)
+ andi. $t0,$inp,3
+ andi. $t1,$out,3
+ or. $t0,$t0,$t1
+ bne Ldec_unaligned
+
+Ldec_unaligned_ok:
lwz $s0,0($inp)
lwz $s1,4($inp)
lwz $s2,8($inp)
@@ -712,8 +809,80 @@ Lenc_compact_done:
stw $s1,4($out)
stw $s2,8($out)
stw $s3,12($out)
+ b Ldec_done
+
+Ldec_unaligned:
+ subfic $t0,$inp,4096
+ subfic $t1,$out,4096
+ andi. $t0,$t0,4096-16
+ beq Ldec_xpage
+ andi. $t1,$t1,4096-16
+ bne Ldec_unaligned_ok
+
+Ldec_xpage:
+ lbz $acc00,0($inp)
+ lbz $acc01,1($inp)
+ lbz $acc02,2($inp)
+ lbz $s0,3($inp)
+ lbz $acc04,4($inp)
+ lbz $acc05,5($inp)
+ lbz $acc06,6($inp)
+ lbz $s1,7($inp)
+ lbz $acc08,8($inp)
+ lbz $acc09,9($inp)
+ lbz $acc10,10($inp)
+ insrwi $s0,$acc00,8,0
+ lbz $s2,11($inp)
+ insrwi $s1,$acc04,8,0
+ lbz $acc12,12($inp)
+ insrwi $s0,$acc01,8,8
+ lbz $acc13,13($inp)
+ insrwi $s1,$acc05,8,8
+ lbz $acc14,14($inp)
+ insrwi $s0,$acc02,8,16
+ lbz $s3,15($inp)
+ insrwi $s1,$acc06,8,16
+ insrwi $s2,$acc08,8,0
+ insrwi $s3,$acc12,8,0
+ insrwi $s2,$acc09,8,8
+ insrwi $s3,$acc13,8,8
+ insrwi $s2,$acc10,8,16
+ insrwi $s3,$acc14,8,16
+
+ bl LAES_Td
+ bl Lppc_AES_decrypt_compact
- $POP r0,`$FRAME-$SIZE_T*21`($sp)
+ extrwi $acc00,$s0,8,0
+ extrwi $acc01,$s0,8,8
+ stb $acc00,0($out)
+ extrwi $acc02,$s0,8,16
+ stb $acc01,1($out)
+ stb $acc02,2($out)
+ extrwi $acc04,$s1,8,0
+ stb $s0,3($out)
+ extrwi $acc05,$s1,8,8
+ stb $acc04,4($out)
+ extrwi $acc06,$s1,8,16
+ stb $acc05,5($out)
+ stb $acc06,6($out)
+ extrwi $acc08,$s2,8,0
+ stb $s1,7($out)
+ extrwi $acc09,$s2,8,8
+ stb $acc08,8($out)
+ extrwi $acc10,$s2,8,16
+ stb $acc09,9($out)
+ stb $acc10,10($out)
+ extrwi $acc12,$s3,8,0
+ stb $s2,11($out)
+ extrwi $acc13,$s3,8,8
+ stb $acc12,12($out)
+ extrwi $acc14,$s3,8,16
+ stb $acc13,13($out)
+ stb $acc14,14($out)
+ stb $s3,15($out)
+
+Ldec_done:
+ $POP r0,`$FRAME+$LRSAVE`($sp)
$POP $toc,`$FRAME-$SIZE_T*20`($sp)
$POP r13,`$FRAME-$SIZE_T*19`($sp)
$POP r14,`$FRAME-$SIZE_T*18`($sp)
@@ -737,18 +906,21 @@ Lenc_compact_done:
mtlr r0
addi $sp,$sp,$FRAME
blr
+ .long 0
+ .byte 0,12,4,1,0x80,18,3,0
+ .long 0
.align 5
Lppc_AES_decrypt:
lwz $acc00,240($key)
- lwz $t0,0($key)
- lwz $t1,4($key)
- lwz $t2,8($key)
- lwz $t3,12($key)
addi $Tbl1,$Tbl0,3
+ lwz $t0,0($key)
addi $Tbl2,$Tbl0,2
+ lwz $t1,4($key)
addi $Tbl3,$Tbl0,1
+ lwz $t2,8($key)
addi $acc00,$acc00,-1
+ lwz $t3,12($key)
addi $key,$key,16
xor $s0,$s0,$t0
xor $s1,$s1,$t1
@@ -762,44 +934,44 @@ Ldec_loop:
rlwinm $acc02,$s2,`32-24+3`,21,28
rlwinm $acc03,$s3,`32-24+3`,21,28
lwz $t0,0($key)
- lwz $t1,4($key)
rlwinm $acc04,$s3,`32-16+3`,21,28
+ lwz $t1,4($key)
rlwinm $acc05,$s0,`32-16+3`,21,28
lwz $t2,8($key)
- lwz $t3,12($key)
rlwinm $acc06,$s1,`32-16+3`,21,28
+ lwz $t3,12($key)
rlwinm $acc07,$s2,`32-16+3`,21,28
lwzx $acc00,$Tbl0,$acc00
- lwzx $acc01,$Tbl0,$acc01
rlwinm $acc08,$s2,`32-8+3`,21,28
+ lwzx $acc01,$Tbl0,$acc01
rlwinm $acc09,$s3,`32-8+3`,21,28
lwzx $acc02,$Tbl0,$acc02
- lwzx $acc03,$Tbl0,$acc03
rlwinm $acc10,$s0,`32-8+3`,21,28
+ lwzx $acc03,$Tbl0,$acc03
rlwinm $acc11,$s1,`32-8+3`,21,28
lwzx $acc04,$Tbl1,$acc04
- lwzx $acc05,$Tbl1,$acc05
rlwinm $acc12,$s1,`0+3`,21,28
+ lwzx $acc05,$Tbl1,$acc05
rlwinm $acc13,$s2,`0+3`,21,28
lwzx $acc06,$Tbl1,$acc06
- lwzx $acc07,$Tbl1,$acc07
rlwinm $acc14,$s3,`0+3`,21,28
+ lwzx $acc07,$Tbl1,$acc07
rlwinm $acc15,$s0,`0+3`,21,28
lwzx $acc08,$Tbl2,$acc08
- lwzx $acc09,$Tbl2,$acc09
xor $t0,$t0,$acc00
+ lwzx $acc09,$Tbl2,$acc09
xor $t1,$t1,$acc01
lwzx $acc10,$Tbl2,$acc10
- lwzx $acc11,$Tbl2,$acc11
xor $t2,$t2,$acc02
+ lwzx $acc11,$Tbl2,$acc11
xor $t3,$t3,$acc03
lwzx $acc12,$Tbl3,$acc12
- lwzx $acc13,$Tbl3,$acc13
xor $t0,$t0,$acc04
+ lwzx $acc13,$Tbl3,$acc13
xor $t1,$t1,$acc05
lwzx $acc14,$Tbl3,$acc14
- lwzx $acc15,$Tbl3,$acc15
xor $t2,$t2,$acc06
+ lwzx $acc15,$Tbl3,$acc15
xor $t3,$t3,$acc07
xor $t0,$t0,$acc08
xor $t1,$t1,$acc09
@@ -815,56 +987,56 @@ Ldec_loop:
addi $Tbl2,$Tbl0,2048
nop
lwz $t0,0($key)
- lwz $t1,4($key)
rlwinm $acc00,$s0,`32-24`,24,31
+ lwz $t1,4($key)
rlwinm $acc01,$s1,`32-24`,24,31
lwz $t2,8($key)
- lwz $t3,12($key)
rlwinm $acc02,$s2,`32-24`,24,31
+ lwz $t3,12($key)
rlwinm $acc03,$s3,`32-24`,24,31
lwz $acc08,`2048+0`($Tbl0) ! prefetch Td4
- lwz $acc09,`2048+32`($Tbl0)
rlwinm $acc04,$s3,`32-16`,24,31
+ lwz $acc09,`2048+32`($Tbl0)
rlwinm $acc05,$s0,`32-16`,24,31
lwz $acc10,`2048+64`($Tbl0)
- lwz $acc11,`2048+96`($Tbl0)
lbzx $acc00,$Tbl2,$acc00
+ lwz $acc11,`2048+96`($Tbl0)
lbzx $acc01,$Tbl2,$acc01
lwz $acc12,`2048+128`($Tbl0)
- lwz $acc13,`2048+160`($Tbl0)
rlwinm $acc06,$s1,`32-16`,24,31
+ lwz $acc13,`2048+160`($Tbl0)
rlwinm $acc07,$s2,`32-16`,24,31
lwz $acc14,`2048+192`($Tbl0)
- lwz $acc15,`2048+224`($Tbl0)
rlwinm $acc08,$s2,`32-8`,24,31
+ lwz $acc15,`2048+224`($Tbl0)
rlwinm $acc09,$s3,`32-8`,24,31
lbzx $acc02,$Tbl2,$acc02
- lbzx $acc03,$Tbl2,$acc03
rlwinm $acc10,$s0,`32-8`,24,31
+ lbzx $acc03,$Tbl2,$acc03
rlwinm $acc11,$s1,`32-8`,24,31
lbzx $acc04,$Tbl2,$acc04
- lbzx $acc05,$Tbl2,$acc05
rlwinm $acc12,$s1,`0`,24,31
+ lbzx $acc05,$Tbl2,$acc05
rlwinm $acc13,$s2,`0`,24,31
lbzx $acc06,$Tbl2,$acc06
- lbzx $acc07,$Tbl2,$acc07
rlwinm $acc14,$s3,`0`,24,31
+ lbzx $acc07,$Tbl2,$acc07
rlwinm $acc15,$s0,`0`,24,31
lbzx $acc08,$Tbl2,$acc08
- lbzx $acc09,$Tbl2,$acc09
rlwinm $s0,$acc00,24,0,7
+ lbzx $acc09,$Tbl2,$acc09
rlwinm $s1,$acc01,24,0,7
lbzx $acc10,$Tbl2,$acc10
- lbzx $acc11,$Tbl2,$acc11
rlwinm $s2,$acc02,24,0,7
+ lbzx $acc11,$Tbl2,$acc11
rlwinm $s3,$acc03,24,0,7
lbzx $acc12,$Tbl2,$acc12
- lbzx $acc13,$Tbl2,$acc13
rlwimi $s0,$acc04,16,8,15
+ lbzx $acc13,$Tbl2,$acc13
rlwimi $s1,$acc05,16,8,15
lbzx $acc14,$Tbl2,$acc14
- lbzx $acc15,$Tbl2,$acc15
rlwimi $s2,$acc06,16,8,15
+ lbzx $acc15,$Tbl2,$acc15
rlwimi $s3,$acc07,16,8,15
rlwimi $s0,$acc08,8,16,23
rlwimi $s1,$acc09,8,16,23
@@ -879,20 +1051,22 @@ Ldec_loop:
xor $s2,$s2,$t2
xor $s3,$s3,$t3
blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
.align 4
Lppc_AES_decrypt_compact:
lwz $acc00,240($key)
- lwz $t0,0($key)
- lwz $t1,4($key)
- lwz $t2,8($key)
- lwz $t3,12($key)
addi $Tbl1,$Tbl0,2048
+ lwz $t0,0($key)
lis $mask80,0x8080
+ lwz $t1,4($key)
lis $mask1b,0x1b1b
- addi $key,$key,16
+ lwz $t2,8($key)
ori $mask80,$mask80,0x8080
+ lwz $t3,12($key)
ori $mask1b,$mask1b,0x1b1b
+ addi $key,$key,16
___
$code.=<<___ if ($SIZE_T==8);
insrdi $mask80,$mask80,32,0
@@ -904,10 +1078,10 @@ $code.=<<___;
Ldec_compact_loop:
xor $s0,$s0,$t0
xor $s1,$s1,$t1
- xor $s2,$s2,$t2
- xor $s3,$s3,$t3
rlwinm $acc00,$s0,`32-24`,24,31
+ xor $s2,$s2,$t2
rlwinm $acc01,$s1,`32-24`,24,31
+ xor $s3,$s3,$t3
rlwinm $acc02,$s2,`32-24`,24,31
rlwinm $acc03,$s3,`32-24`,24,31
rlwinm $acc04,$s3,`32-16`,24,31
@@ -915,48 +1089,48 @@ Ldec_compact_loop:
rlwinm $acc06,$s1,`32-16`,24,31
rlwinm $acc07,$s2,`32-16`,24,31
lbzx $acc00,$Tbl1,$acc00
- lbzx $acc01,$Tbl1,$acc01
rlwinm $acc08,$s2,`32-8`,24,31
+ lbzx $acc01,$Tbl1,$acc01
rlwinm $acc09,$s3,`32-8`,24,31
lbzx $acc02,$Tbl1,$acc02
- lbzx $acc03,$Tbl1,$acc03
rlwinm $acc10,$s0,`32-8`,24,31
+ lbzx $acc03,$Tbl1,$acc03
rlwinm $acc11,$s1,`32-8`,24,31
lbzx $acc04,$Tbl1,$acc04
- lbzx $acc05,$Tbl1,$acc05
rlwinm $acc12,$s1,`0`,24,31
+ lbzx $acc05,$Tbl1,$acc05
rlwinm $acc13,$s2,`0`,24,31
lbzx $acc06,$Tbl1,$acc06
- lbzx $acc07,$Tbl1,$acc07
rlwinm $acc14,$s3,`0`,24,31
+ lbzx $acc07,$Tbl1,$acc07
rlwinm $acc15,$s0,`0`,24,31
lbzx $acc08,$Tbl1,$acc08
- lbzx $acc09,$Tbl1,$acc09
rlwinm $s0,$acc00,24,0,7
+ lbzx $acc09,$Tbl1,$acc09
rlwinm $s1,$acc01,24,0,7
lbzx $acc10,$Tbl1,$acc10
- lbzx $acc11,$Tbl1,$acc11
rlwinm $s2,$acc02,24,0,7
+ lbzx $acc11,$Tbl1,$acc11
rlwinm $s3,$acc03,24,0,7
lbzx $acc12,$Tbl1,$acc12
- lbzx $acc13,$Tbl1,$acc13
rlwimi $s0,$acc04,16,8,15
+ lbzx $acc13,$Tbl1,$acc13
rlwimi $s1,$acc05,16,8,15
lbzx $acc14,$Tbl1,$acc14
- lbzx $acc15,$Tbl1,$acc15
rlwimi $s2,$acc06,16,8,15
+ lbzx $acc15,$Tbl1,$acc15
rlwimi $s3,$acc07,16,8,15
rlwimi $s0,$acc08,8,16,23
rlwimi $s1,$acc09,8,16,23
rlwimi $s2,$acc10,8,16,23
rlwimi $s3,$acc11,8,16,23
lwz $t0,0($key)
- lwz $t1,4($key)
or $s0,$s0,$acc12
+ lwz $t1,4($key)
or $s1,$s1,$acc13
lwz $t2,8($key)
- lwz $t3,12($key)
or $s2,$s2,$acc14
+ lwz $t3,12($key)
or $s3,$s3,$acc15
addi $key,$key,16
@@ -1030,12 +1204,12 @@ $code.=<<___ if ($SIZE_T==4);
and $acc02,$s2,$mask80
and $acc03,$s3,$mask80
srwi $acc04,$acc00,7 # r1>>7
- srwi $acc05,$acc01,7
- srwi $acc06,$acc02,7
- srwi $acc07,$acc03,7
andc $acc08,$s0,$mask80 # r0&0x7f7f7f7f
+ srwi $acc05,$acc01,7
andc $acc09,$s1,$mask80
+ srwi $acc06,$acc02,7
andc $acc10,$s2,$mask80
+ srwi $acc07,$acc03,7
andc $acc11,$s3,$mask80
sub $acc00,$acc00,$acc04 # r1-(r1>>7)
sub $acc01,$acc01,$acc05
@@ -1059,12 +1233,12 @@ $code.=<<___ if ($SIZE_T==4);
and $acc06,$acc02,$mask80
and $acc07,$acc03,$mask80
srwi $acc08,$acc04,7 # r1>>7
- srwi $acc09,$acc05,7
- srwi $acc10,$acc06,7
- srwi $acc11,$acc07,7
andc $acc12,$acc00,$mask80 # r2&0x7f7f7f7f
+ srwi $acc09,$acc05,7
andc $acc13,$acc01,$mask80
+ srwi $acc10,$acc06,7
andc $acc14,$acc02,$mask80
+ srwi $acc11,$acc07,7
andc $acc15,$acc03,$mask80
sub $acc04,$acc04,$acc08 # r1-(r1>>7)
sub $acc05,$acc05,$acc09
@@ -1085,13 +1259,13 @@ $code.=<<___ if ($SIZE_T==4);
and $acc08,$acc04,$mask80 # r1=r4&0x80808080
and $acc09,$acc05,$mask80
- and $acc10,$acc06,$mask80
- and $acc11,$acc07,$mask80
srwi $acc12,$acc08,7 # r1>>7
+ and $acc10,$acc06,$mask80
srwi $acc13,$acc09,7
+ and $acc11,$acc07,$mask80
srwi $acc14,$acc10,7
- srwi $acc15,$acc11,7
sub $acc08,$acc08,$acc12 # r1-(r1>>7)
+ srwi $acc15,$acc11,7
sub $acc09,$acc09,$acc13
sub $acc10,$acc10,$acc14
sub $acc11,$acc11,$acc15
@@ -1124,10 +1298,10 @@ ___
$code.=<<___;
rotrwi $s0,$s0,8 # = ROTATE(r0,8)
rotrwi $s1,$s1,8
- rotrwi $s2,$s2,8
- rotrwi $s3,$s3,8
xor $s0,$s0,$acc00 # ^= r2^r0
+ rotrwi $s2,$s2,8
xor $s1,$s1,$acc01
+ rotrwi $s3,$s3,8
xor $s2,$s2,$acc02
xor $s3,$s3,$acc03
xor $acc00,$acc00,$acc08
@@ -1135,32 +1309,32 @@ $code.=<<___;
xor $acc02,$acc02,$acc10
xor $acc03,$acc03,$acc11
xor $s0,$s0,$acc04 # ^= r4^r0
- xor $s1,$s1,$acc05
- xor $s2,$s2,$acc06
- xor $s3,$s3,$acc07
rotrwi $acc00,$acc00,24
+ xor $s1,$s1,$acc05
rotrwi $acc01,$acc01,24
+ xor $s2,$s2,$acc06
rotrwi $acc02,$acc02,24
+ xor $s3,$s3,$acc07
rotrwi $acc03,$acc03,24
xor $acc04,$acc04,$acc08
xor $acc05,$acc05,$acc09
xor $acc06,$acc06,$acc10
xor $acc07,$acc07,$acc11
xor $s0,$s0,$acc08 # ^= r8 [^((r4^r0)^(r2^r0)=r4^r2)]
- xor $s1,$s1,$acc09
- xor $s2,$s2,$acc10
- xor $s3,$s3,$acc11
rotrwi $acc04,$acc04,16
+ xor $s1,$s1,$acc09
rotrwi $acc05,$acc05,16
+ xor $s2,$s2,$acc10
rotrwi $acc06,$acc06,16
+ xor $s3,$s3,$acc11
rotrwi $acc07,$acc07,16
xor $s0,$s0,$acc00 # ^= ROTATE(r8^r2^r0,24)
- xor $s1,$s1,$acc01
- xor $s2,$s2,$acc02
- xor $s3,$s3,$acc03
rotrwi $acc08,$acc08,8
+ xor $s1,$s1,$acc01
rotrwi $acc09,$acc09,8
+ xor $s2,$s2,$acc02
rotrwi $acc10,$acc10,8
+ xor $s3,$s3,$acc03
rotrwi $acc11,$acc11,8
xor $s0,$s0,$acc04 # ^= ROTATE(r8^r4^r0,16)
xor $s1,$s1,$acc05
@@ -1179,7 +1353,9 @@ Ldec_compact_done:
xor $s2,$s2,$t2
xor $s3,$s3,$t3
blr
-.long 0
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+
.asciz "AES for PPC, CRYPTOGAMS by <appro\@openssl.org>"
.align 7
___
diff --git a/openssl/crypto/aes/asm/aes-s390x.pl b/openssl/crypto/aes/asm/aes-s390x.pl
index 7e0188929..f749a52d7 100644
--- a/openssl/crypto/aes/asm/aes-s390x.pl
+++ b/openssl/crypto/aes/asm/aes-s390x.pl
@@ -44,12 +44,57 @@
# Unlike previous version hardware support detection takes place only
# at the moment of key schedule setup, which is denoted in key->rounds.
# This is done, because deferred key setup can't be made MT-safe, not
-# for key lengthes longer than 128 bits.
+# for keys longer than 128 bits.
#
# Add AES_cbc_encrypt, which gives incredible performance improvement,
# it was measured to be ~6.6x. It's less than previously mentioned 8x,
# because software implementation was optimized.
+# May 2010.
+#
+# Add AES_ctr32_encrypt. If hardware-assisted, it provides up to 4.3x
+# performance improvement over "generic" counter mode routine relying
+# on single-block, also hardware-assisted, AES_encrypt. "Up to" refers
+# to the fact that exact throughput value depends on current stack
+# frame alignment within 4KB page. In worst case you get ~75% of the
+# maximum, but *on average* it would be as much as ~98%. Meaning that
+# worst case is unlike, it's like hitting ravine on plateau.
+
+# November 2010.
+#
+# Adapt for -m31 build. If kernel supports what's called "highgprs"
+# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
+# instructions and achieve "64-bit" performance even in 31-bit legacy
+# application context. The feature is not specific to any particular
+# processor, as long as it's "z-CPU". Latter implies that the code
+# remains z/Architecture specific. On z990 it was measured to perform
+# 2x better than code generated by gcc 4.3.
+
+# December 2010.
+#
+# Add support for z196 "cipher message with counter" instruction.
+# Note however that it's disengaged, because it was measured to
+# perform ~12% worse than vanilla km-based code...
+
+# February 2011.
+#
+# Add AES_xts_[en|de]crypt. This includes support for z196 km-xts-aes
+# instructions, which deliver ~70% improvement at 8KB block size over
+# vanilla km-based code, 37% - at most like 512-bytes block size.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
$softonly=0; # allow hardware support
$t0="%r0"; $mask="%r0";
@@ -69,6 +114,8 @@ $rounds="%r13";
$ra="%r14";
$sp="%r15";
+$stdframe=16*$SIZE_T+4*8;
+
sub _data_word()
{ my $i;
while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
@@ -210,7 +257,7 @@ $code.=<<___ if (!$softonly);
.Lesoft:
___
$code.=<<___;
- stmg %r3,$ra,24($sp)
+ stm${g} %r3,$ra,3*$SIZE_T($sp)
llgf $s0,0($inp)
llgf $s1,4($inp)
@@ -220,20 +267,20 @@ $code.=<<___;
larl $tbl,AES_Te
bras $ra,_s390x_AES_encrypt
- lg $out,24($sp)
+ l${g} $out,3*$SIZE_T($sp)
st $s0,0($out)
st $s1,4($out)
st $s2,8($out)
st $s3,12($out)
- lmg %r6,$ra,48($sp)
+ lm${g} %r6,$ra,6*$SIZE_T($sp)
br $ra
.size AES_encrypt,.-AES_encrypt
.type _s390x_AES_encrypt,\@function
.align 16
_s390x_AES_encrypt:
- stg $ra,152($sp)
+ st${g} $ra,15*$SIZE_T($sp)
x $s0,0($key)
x $s1,4($key)
x $s2,8($key)
@@ -397,7 +444,7 @@ _s390x_AES_encrypt:
or $s2,$i3
or $s3,$t3
- lg $ra,152($sp)
+ l${g} $ra,15*$SIZE_T($sp)
xr $s0,$t0
xr $s1,$t2
x $s2,24($key)
@@ -536,7 +583,7 @@ $code.=<<___ if (!$softonly);
.Ldsoft:
___
$code.=<<___;
- stmg %r3,$ra,24($sp)
+ stm${g} %r3,$ra,3*$SIZE_T($sp)
llgf $s0,0($inp)
llgf $s1,4($inp)
@@ -546,20 +593,20 @@ $code.=<<___;
larl $tbl,AES_Td
bras $ra,_s390x_AES_decrypt
- lg $out,24($sp)
+ l${g} $out,3*$SIZE_T($sp)
st $s0,0($out)
st $s1,4($out)
st $s2,8($out)
st $s3,12($out)
- lmg %r6,$ra,48($sp)
+ lm${g} %r6,$ra,6*$SIZE_T($sp)
br $ra
.size AES_decrypt,.-AES_decrypt
.type _s390x_AES_decrypt,\@function
.align 16
_s390x_AES_decrypt:
- stg $ra,152($sp)
+ st${g} $ra,15*$SIZE_T($sp)
x $s0,0($key)
x $s1,4($key)
x $s2,8($key)
@@ -703,7 +750,7 @@ _s390x_AES_decrypt:
nr $i1,$mask
nr $i2,$mask
- lg $ra,152($sp)
+ l${g} $ra,15*$SIZE_T($sp)
or $s1,$t1
l $t0,16($key)
l $t1,20($key)
@@ -732,14 +779,14 @@ ___
$code.=<<___;
# void AES_set_encrypt_key(const unsigned char *in, int bits,
# AES_KEY *key) {
-.globl AES_set_encrypt_key
-.type AES_set_encrypt_key,\@function
+.globl private_AES_set_encrypt_key
+.type private_AES_set_encrypt_key,\@function
.align 16
-AES_set_encrypt_key:
+private_AES_set_encrypt_key:
lghi $t0,0
- clgr $inp,$t0
+ cl${g}r $inp,$t0
je .Lminus1
- clgr $key,$t0
+ cl${g}r $key,$t0
je .Lminus1
lghi $t0,128
@@ -797,7 +844,7 @@ ___
$code.=<<___;
.align 16
.Lekey_internal:
- stmg %r6,%r13,48($sp) # all non-volatile regs
+ stm${g} %r6,%r13,6*$SIZE_T($sp) # all non-volatile regs
larl $tbl,AES_Te+2048
@@ -858,7 +905,7 @@ $code.=<<___;
la $t3,4($t3) # i++
brct $rounds,.L128_loop
lghi %r2,0
- lmg %r6,%r13,48($sp)
+ lm${g} %r6,%r13,6*$SIZE_T($sp)
br $ra
.align 16
@@ -906,7 +953,7 @@ $code.=<<___;
st $s3,36($key)
brct $rounds,.L192_continue
lghi %r2,0
- lmg %r6,%r13,48($sp)
+ lm${g} %r6,%r13,6*$SIZE_T($sp)
br $ra
.align 16
@@ -968,7 +1015,7 @@ $code.=<<___;
st $s3,44($key)
brct $rounds,.L256_continue
lghi %r2,0
- lmg %r6,%r13,48($sp)
+ lm${g} %r6,%r13,6*$SIZE_T($sp)
br $ra
.align 16
@@ -1011,19 +1058,19 @@ $code.=<<___;
.Lminus1:
lghi %r2,-1
br $ra
-.size AES_set_encrypt_key,.-AES_set_encrypt_key
+.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
# void AES_set_decrypt_key(const unsigned char *in, int bits,
# AES_KEY *key) {
-.globl AES_set_decrypt_key
-.type AES_set_decrypt_key,\@function
+.globl private_AES_set_decrypt_key
+.type private_AES_set_decrypt_key,\@function
.align 16
-AES_set_decrypt_key:
- stg $key,32($sp) # I rely on AES_set_encrypt_key to
- stg $ra,112($sp) # save non-volatile registers!
+private_AES_set_decrypt_key:
+ st${g} $key,4*$SIZE_T($sp) # I rely on AES_set_encrypt_key to
+ st${g} $ra,14*$SIZE_T($sp) # save non-volatile registers!
bras $ra,AES_set_encrypt_key
- lg $key,32($sp)
- lg $ra,112($sp)
+ l${g} $key,4*$SIZE_T($sp)
+ l${g} $ra,14*$SIZE_T($sp)
ltgr %r2,%r2
bnzr $ra
___
@@ -1038,11 +1085,11 @@ $code.=<<___ if (!$softonly);
.align 16
.Ldkey_internal:
- stg $key,32($sp)
- stg $ra,40($sp)
+ st${g} $key,4*$SIZE_T($sp)
+ st${g} $ra,14*$SIZE_T($sp)
bras $ra,.Lekey_internal
- lg $key,32($sp)
- lg $ra,40($sp)
+ l${g} $key,4*$SIZE_T($sp)
+ l${g} $ra,14*$SIZE_T($sp)
___
$code.=<<___;
@@ -1123,13 +1170,14 @@ $code.=<<___;
la $key,4($key)
brct $rounds,.Lmix
- lmg %r6,%r13,48($sp)# as was saved by AES_set_encrypt_key!
+ lm${g} %r6,%r13,6*$SIZE_T($sp)# as was saved by AES_set_encrypt_key!
lghi %r2,0
br $ra
-.size AES_set_decrypt_key,.-AES_set_decrypt_key
+.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
___
-#void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
+########################################################################
+# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
# size_t length, const AES_KEY *key,
# unsigned char *ivec, const int enc)
{
@@ -1163,7 +1211,7 @@ $code.=<<___ if (!$softonly);
l %r0,240($key) # load kmc code
lghi $key,15 # res=len%16, len-=res;
ngr $key,$len
- slgr $len,$key
+ sl${g}r $len,$key
la %r1,16($sp) # parameter block - ivec || key
jz .Lkmc_truncated
.long 0xb92f0042 # kmc %r4,%r2
@@ -1181,34 +1229,34 @@ $code.=<<___ if (!$softonly);
tmll %r0,0x80
jnz .Lkmc_truncated_dec
lghi %r1,0
- stg %r1,128($sp)
- stg %r1,136($sp)
+ stg %r1,16*$SIZE_T($sp)
+ stg %r1,16*$SIZE_T+8($sp)
bras %r1,1f
- mvc 128(1,$sp),0($inp)
+ mvc 16*$SIZE_T(1,$sp),0($inp)
1: ex $key,0(%r1)
la %r1,16($sp) # restore parameter block
- la $inp,128($sp)
+ la $inp,16*$SIZE_T($sp)
lghi $len,16
.long 0xb92f0042 # kmc %r4,%r2
j .Lkmc_done
.align 16
.Lkmc_truncated_dec:
- stg $out,64($sp)
- la $out,128($sp)
+ st${g} $out,4*$SIZE_T($sp)
+ la $out,16*$SIZE_T($sp)
lghi $len,16
.long 0xb92f0042 # kmc %r4,%r2
- lg $out,64($sp)
+ l${g} $out,4*$SIZE_T($sp)
bras %r1,2f
- mvc 0(1,$out),128($sp)
+ mvc 0(1,$out),16*$SIZE_T($sp)
2: ex $key,0(%r1)
j .Lkmc_done
.align 16
.Lcbc_software:
___
$code.=<<___;
- stmg $key,$ra,40($sp)
+ stm${g} $key,$ra,5*$SIZE_T($sp)
lhi %r0,0
- cl %r0,164($sp)
+ cl %r0,`$stdframe+$SIZE_T-4`($sp)
je .Lcbc_decrypt
larl $tbl,AES_Te
@@ -1219,10 +1267,10 @@ $code.=<<___;
llgf $s3,12($ivp)
lghi $t0,16
- slgr $len,$t0
+ sl${g}r $len,$t0
brc 4,.Lcbc_enc_tail # if borrow
.Lcbc_enc_loop:
- stmg $inp,$out,16($sp)
+ stm${g} $inp,$out,2*$SIZE_T($sp)
x $s0,0($inp)
x $s1,4($inp)
x $s2,8($inp)
@@ -1231,7 +1279,7 @@ $code.=<<___;
bras $ra,_s390x_AES_encrypt
- lmg $inp,$key,16($sp)
+ lm${g} $inp,$key,2*$SIZE_T($sp)
st $s0,0($out)
st $s1,4($out)
st $s2,8($out)
@@ -1240,33 +1288,33 @@ $code.=<<___;
la $inp,16($inp)
la $out,16($out)
lghi $t0,16
- ltgr $len,$len
+ lt${g}r $len,$len
jz .Lcbc_enc_done
- slgr $len,$t0
+ sl${g}r $len,$t0
brc 4,.Lcbc_enc_tail # if borrow
j .Lcbc_enc_loop
.align 16
.Lcbc_enc_done:
- lg $ivp,48($sp)
+ l${g} $ivp,6*$SIZE_T($sp)
st $s0,0($ivp)
st $s1,4($ivp)
st $s2,8($ivp)
st $s3,12($ivp)
- lmg %r7,$ra,56($sp)
+ lm${g} %r7,$ra,7*$SIZE_T($sp)
br $ra
.align 16
.Lcbc_enc_tail:
aghi $len,15
lghi $t0,0
- stg $t0,128($sp)
- stg $t0,136($sp)
+ stg $t0,16*$SIZE_T($sp)
+ stg $t0,16*$SIZE_T+8($sp)
bras $t1,3f
- mvc 128(1,$sp),0($inp)
+ mvc 16*$SIZE_T(1,$sp),0($inp)
3: ex $len,0($t1)
lghi $len,0
- la $inp,128($sp)
+ la $inp,16*$SIZE_T($sp)
j .Lcbc_enc_loop
.align 16
@@ -1275,10 +1323,10 @@ $code.=<<___;
lg $t0,0($ivp)
lg $t1,8($ivp)
- stmg $t0,$t1,128($sp)
+ stmg $t0,$t1,16*$SIZE_T($sp)
.Lcbc_dec_loop:
- stmg $inp,$out,16($sp)
+ stm${g} $inp,$out,2*$SIZE_T($sp)
llgf $s0,0($inp)
llgf $s1,4($inp)
llgf $s2,8($inp)
@@ -1287,7 +1335,7 @@ $code.=<<___;
bras $ra,_s390x_AES_decrypt
- lmg $inp,$key,16($sp)
+ lm${g} $inp,$key,2*$SIZE_T($sp)
sllg $s0,$s0,32
sllg $s2,$s2,32
lr $s0,$s1
@@ -1295,15 +1343,15 @@ $code.=<<___;
lg $t0,0($inp)
lg $t1,8($inp)
- xg $s0,128($sp)
- xg $s2,136($sp)
+ xg $s0,16*$SIZE_T($sp)
+ xg $s2,16*$SIZE_T+8($sp)
lghi $s1,16
- slgr $len,$s1
+ sl${g}r $len,$s1
brc 4,.Lcbc_dec_tail # if borrow
brc 2,.Lcbc_dec_done # if zero
stg $s0,0($out)
stg $s2,8($out)
- stmg $t0,$t1,128($sp)
+ stmg $t0,$t1,16*$SIZE_T($sp)
la $inp,16($inp)
la $out,16($out)
@@ -1313,7 +1361,7 @@ $code.=<<___;
stg $s0,0($out)
stg $s2,8($out)
.Lcbc_dec_exit:
- lmg $ivp,$ra,48($sp)
+ lm${g} %r6,$ra,6*$SIZE_T($sp)
stmg $t0,$t1,0($ivp)
br $ra
@@ -1321,19 +1369,889 @@ $code.=<<___;
.align 16
.Lcbc_dec_tail:
aghi $len,15
- stg $s0,128($sp)
- stg $s2,136($sp)
+ stg $s0,16*$SIZE_T($sp)
+ stg $s2,16*$SIZE_T+8($sp)
bras $s1,4f
- mvc 0(1,$out),128($sp)
+ mvc 0(1,$out),16*$SIZE_T($sp)
4: ex $len,0($s1)
j .Lcbc_dec_exit
.size AES_cbc_encrypt,.-AES_cbc_encrypt
-.comm OPENSSL_s390xcap_P,8,8
+___
+}
+########################################################################
+# void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
+# size_t blocks, const AES_KEY *key,
+# const unsigned char *ivec)
+{
+my $inp="%r2";
+my $out="%r4"; # blocks and out are swapped
+my $len="%r3";
+my $key="%r5"; my $iv0="%r5";
+my $ivp="%r6";
+my $fp ="%r7";
+
+$code.=<<___;
+.globl AES_ctr32_encrypt
+.type AES_ctr32_encrypt,\@function
+.align 16
+AES_ctr32_encrypt:
+ xgr %r3,%r4 # flip %r3 and %r4, $out and $len
+ xgr %r4,%r3
+ xgr %r3,%r4
+ llgfr $len,$len # safe in ctr32 subroutine even in 64-bit case
+___
+$code.=<<___ if (!$softonly);
+ l %r0,240($key)
+ lhi %r1,16
+ clr %r0,%r1
+ jl .Lctr32_software
+
+ stm${g} %r6,$s3,6*$SIZE_T($sp)
+
+ slgr $out,$inp
+ la %r1,0($key) # %r1 is permanent copy of $key
+ lg $iv0,0($ivp) # load ivec
+ lg $ivp,8($ivp)
+
+ # prepare and allocate stack frame at the top of 4K page
+ # with 1K reserved for eventual signal handling
+ lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer
+ lghi $s1,-4096
+ algr $s0,$sp
+ lgr $fp,$sp
+ ngr $s0,$s1 # align at page boundary
+ slgr $fp,$s0 # total buffer size
+ lgr $s2,$sp
+ lghi $s1,1024+16 # sl[g]fi is extended-immediate facility
+ slgr $fp,$s1 # deduct reservation to get usable buffer size
+ # buffer size is at lest 256 and at most 3072+256-16
+
+ la $sp,1024($s0) # alloca
+ srlg $fp,$fp,4 # convert bytes to blocks, minimum 16
+ st${g} $s2,0($sp) # back-chain
+ st${g} $fp,$SIZE_T($sp)
+
+ slgr $len,$fp
+ brc 1,.Lctr32_hw_switch # not zero, no borrow
+ algr $fp,$len # input is shorter than allocated buffer
+ lghi $len,0
+ st${g} $fp,$SIZE_T($sp)
+
+.Lctr32_hw_switch:
+___
+$code.=<<___ if (0); ######### kmctr code was measured to be ~12% slower
+ larl $s0,OPENSSL_s390xcap_P
+ lg $s0,8($s0)
+ tmhh $s0,0x0004 # check for message_security-assist-4
+ jz .Lctr32_km_loop
+
+ llgfr $s0,%r0
+ lgr $s1,%r1
+ lghi %r0,0
+ la %r1,16($sp)
+ .long 0xb92d2042 # kmctr %r4,%r2,%r2
+
+ llihh %r0,0x8000 # check if kmctr supports the function code
+ srlg %r0,%r0,0($s0)
+ ng %r0,16($sp)
+ lgr %r0,$s0
+ lgr %r1,$s1
+ jz .Lctr32_km_loop
+
+####### kmctr code
+ algr $out,$inp # restore $out
+ lgr $s1,$len # $s1 undertakes $len
+ j .Lctr32_kmctr_loop
+.align 16
+.Lctr32_kmctr_loop:
+ la $s2,16($sp)
+ lgr $s3,$fp
+.Lctr32_kmctr_prepare:
+ stg $iv0,0($s2)
+ stg $ivp,8($s2)
+ la $s2,16($s2)
+ ahi $ivp,1 # 32-bit increment, preserves upper half
+ brct $s3,.Lctr32_kmctr_prepare
+
+ #la $inp,0($inp) # inp
+ sllg $len,$fp,4 # len
+ #la $out,0($out) # out
+ la $s2,16($sp) # iv
+ .long 0xb92da042 # kmctr $out,$s2,$inp
+ brc 1,.-4 # pay attention to "partial completion"
+
+ slgr $s1,$fp
+ brc 1,.Lctr32_kmctr_loop # not zero, no borrow
+ algr $fp,$s1
+ lghi $s1,0
+ brc 4+1,.Lctr32_kmctr_loop # not zero
+
+ l${g} $sp,0($sp)
+ lm${g} %r6,$s3,6*$SIZE_T($sp)
+ br $ra
+.align 16
+___
+$code.=<<___;
+.Lctr32_km_loop:
+ la $s2,16($sp)
+ lgr $s3,$fp
+.Lctr32_km_prepare:
+ stg $iv0,0($s2)
+ stg $ivp,8($s2)
+ la $s2,16($s2)
+ ahi $ivp,1 # 32-bit increment, preserves upper half
+ brct $s3,.Lctr32_km_prepare
+
+ la $s0,16($sp) # inp
+ sllg $s1,$fp,4 # len
+ la $s2,16($sp) # out
+ .long 0xb92e00a8 # km %r10,%r8
+ brc 1,.-4 # pay attention to "partial completion"
+
+ la $s2,16($sp)
+ lgr $s3,$fp
+ slgr $s2,$inp
+.Lctr32_km_xor:
+ lg $s0,0($inp)
+ lg $s1,8($inp)
+ xg $s0,0($s2,$inp)
+ xg $s1,8($s2,$inp)
+ stg $s0,0($out,$inp)
+ stg $s1,8($out,$inp)
+ la $inp,16($inp)
+ brct $s3,.Lctr32_km_xor
+
+ slgr $len,$fp
+ brc 1,.Lctr32_km_loop # not zero, no borrow
+ algr $fp,$len
+ lghi $len,0
+ brc 4+1,.Lctr32_km_loop # not zero
+
+ l${g} $s0,0($sp)
+ l${g} $s1,$SIZE_T($sp)
+ la $s2,16($sp)
+.Lctr32_km_zap:
+ stg $s0,0($s2)
+ stg $s0,8($s2)
+ la $s2,16($s2)
+ brct $s1,.Lctr32_km_zap
+
+ la $sp,0($s0)
+ lm${g} %r6,$s3,6*$SIZE_T($sp)
+ br $ra
+.align 16
+.Lctr32_software:
+___
+$code.=<<___;
+ stm${g} $key,$ra,5*$SIZE_T($sp)
+ sl${g}r $inp,$out
+ larl $tbl,AES_Te
+ llgf $t1,12($ivp)
+
+.Lctr32_loop:
+ stm${g} $inp,$out,2*$SIZE_T($sp)
+ llgf $s0,0($ivp)
+ llgf $s1,4($ivp)
+ llgf $s2,8($ivp)
+ lgr $s3,$t1
+ st $t1,16*$SIZE_T($sp)
+ lgr %r4,$key
+
+ bras $ra,_s390x_AES_encrypt
+
+ lm${g} $inp,$ivp,2*$SIZE_T($sp)
+ llgf $t1,16*$SIZE_T($sp)
+ x $s0,0($inp,$out)
+ x $s1,4($inp,$out)
+ x $s2,8($inp,$out)
+ x $s3,12($inp,$out)
+ stm $s0,$s3,0($out)
+
+ la $out,16($out)
+ ahi $t1,1 # 32-bit increment
+ brct $len,.Lctr32_loop
+
+ lm${g} %r6,$ra,6*$SIZE_T($sp)
+ br $ra
+.size AES_ctr32_encrypt,.-AES_ctr32_encrypt
+___
+}
+
+########################################################################
+# void AES_xts_encrypt(const char *inp,char *out,size_t len,
+# const AES_KEY *key1, const AES_KEY *key2,
+# const unsigned char iv[16]);
+#
+{
+my $inp="%r2";
+my $out="%r4"; # len and out are swapped
+my $len="%r3";
+my $key1="%r5"; # $i1
+my $key2="%r6"; # $i2
+my $fp="%r7"; # $i3
+my $tweak=16*$SIZE_T+16; # or $stdframe-16, bottom of the frame...
+
+$code.=<<___;
+.type _s390x_xts_km,\@function
+.align 16
+_s390x_xts_km:
+___
+$code.=<<___ if(1);
+ llgfr $s0,%r0 # put aside the function code
+ lghi $s1,0x7f
+ nr $s1,%r0
+ lghi %r0,0 # query capability vector
+ la %r1,2*$SIZE_T($sp)
+ .long 0xb92e0042 # km %r4,%r2
+ llihh %r1,0x8000
+ srlg %r1,%r1,32($s1) # check for 32+function code
+ ng %r1,2*$SIZE_T($sp)
+ lgr %r0,$s0 # restore the function code
+ la %r1,0($key1) # restore $key1
+ jz .Lxts_km_vanilla
+
+ lmg $i2,$i3,$tweak($sp) # put aside the tweak value
+ algr $out,$inp
+
+ oill %r0,32 # switch to xts function code
+ aghi $s1,-18 #
+ sllg $s1,$s1,3 # (function code - 18)*8, 0 or 16
+ la %r1,$tweak-16($sp)
+ slgr %r1,$s1 # parameter block position
+ lmg $s0,$s3,0($key1) # load 256 bits of key material,
+ stmg $s0,$s3,0(%r1) # and copy it to parameter block.
+ # yes, it contains junk and overlaps
+ # with the tweak in 128-bit case.
+ # it's done to avoid conditional
+ # branch.
+ stmg $i2,$i3,$tweak($sp) # "re-seat" the tweak value
+
+ .long 0xb92e0042 # km %r4,%r2
+ brc 1,.-4 # pay attention to "partial completion"
+
+ lrvg $s0,$tweak+0($sp) # load the last tweak
+ lrvg $s1,$tweak+8($sp)
+ stmg %r0,%r3,$tweak-32(%r1) # wipe copy of the key
+
+ nill %r0,0xffdf # switch back to original function code
+ la %r1,0($key1) # restore pointer to $key1
+ slgr $out,$inp
+
+ llgc $len,2*$SIZE_T-1($sp)
+ nill $len,0x0f # $len%=16
+ br $ra
+
+.align 16
+.Lxts_km_vanilla:
+___
+$code.=<<___;
+ # prepare and allocate stack frame at the top of 4K page
+ # with 1K reserved for eventual signal handling
+ lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer
+ lghi $s1,-4096
+ algr $s0,$sp
+ lgr $fp,$sp
+ ngr $s0,$s1 # align at page boundary
+ slgr $fp,$s0 # total buffer size
+ lgr $s2,$sp
+ lghi $s1,1024+16 # sl[g]fi is extended-immediate facility
+ slgr $fp,$s1 # deduct reservation to get usable buffer size
+ # buffer size is at lest 256 and at most 3072+256-16
+
+ la $sp,1024($s0) # alloca
+ nill $fp,0xfff0 # round to 16*n
+ st${g} $s2,0($sp) # back-chain
+ nill $len,0xfff0 # redundant
+ st${g} $fp,$SIZE_T($sp)
+
+ slgr $len,$fp
+ brc 1,.Lxts_km_go # not zero, no borrow
+ algr $fp,$len # input is shorter than allocated buffer
+ lghi $len,0
+ st${g} $fp,$SIZE_T($sp)
+
+.Lxts_km_go:
+ lrvg $s0,$tweak+0($s2) # load the tweak value in little-endian
+ lrvg $s1,$tweak+8($s2)
+
+ la $s2,16($sp) # vector of ascending tweak values
+ slgr $s2,$inp
+ srlg $s3,$fp,4
+ j .Lxts_km_start
+
+.Lxts_km_loop:
+ la $s2,16($sp)
+ slgr $s2,$inp
+ srlg $s3,$fp,4
+.Lxts_km_prepare:
+ lghi $i1,0x87
+ srag $i2,$s1,63 # broadcast upper bit
+ ngr $i1,$i2 # rem
+ srlg $i2,$s0,63 # carry bit from lower half
+ sllg $s0,$s0,1
+ sllg $s1,$s1,1
+ xgr $s0,$i1
+ ogr $s1,$i2
+.Lxts_km_start:
+ lrvgr $i1,$s0 # flip byte order
+ lrvgr $i2,$s1
+ stg $i1,0($s2,$inp)
+ stg $i2,8($s2,$inp)
+ xg $i1,0($inp)
+ xg $i2,8($inp)
+ stg $i1,0($out,$inp)
+ stg $i2,8($out,$inp)
+ la $inp,16($inp)
+ brct $s3,.Lxts_km_prepare
+
+ slgr $inp,$fp # rewind $inp
+ la $s2,0($out,$inp)
+ lgr $s3,$fp
+ .long 0xb92e00aa # km $s2,$s2
+ brc 1,.-4 # pay attention to "partial completion"
+
+ la $s2,16($sp)
+ slgr $s2,$inp
+ srlg $s3,$fp,4
+.Lxts_km_xor:
+ lg $i1,0($out,$inp)
+ lg $i2,8($out,$inp)
+ xg $i1,0($s2,$inp)
+ xg $i2,8($s2,$inp)
+ stg $i1,0($out,$inp)
+ stg $i2,8($out,$inp)
+ la $inp,16($inp)
+ brct $s3,.Lxts_km_xor
+
+ slgr $len,$fp
+ brc 1,.Lxts_km_loop # not zero, no borrow
+ algr $fp,$len
+ lghi $len,0
+ brc 4+1,.Lxts_km_loop # not zero
+
+ l${g} $i1,0($sp) # back-chain
+ llgf $fp,`2*$SIZE_T-4`($sp) # bytes used
+ la $i2,16($sp)
+ srlg $fp,$fp,4
+.Lxts_km_zap:
+ stg $i1,0($i2)
+ stg $i1,8($i2)
+ la $i2,16($i2)
+ brct $fp,.Lxts_km_zap
+
+ la $sp,0($i1)
+ llgc $len,2*$SIZE_T-1($i1)
+ nill $len,0x0f # $len%=16
+ bzr $ra
+
+ # generate one more tweak...
+ lghi $i1,0x87
+ srag $i2,$s1,63 # broadcast upper bit
+ ngr $i1,$i2 # rem
+ srlg $i2,$s0,63 # carry bit from lower half
+ sllg $s0,$s0,1
+ sllg $s1,$s1,1
+ xgr $s0,$i1
+ ogr $s1,$i2
+
+ ltr $len,$len # clear zero flag
+ br $ra
+.size _s390x_xts_km,.-_s390x_xts_km
+
+.globl AES_xts_encrypt
+.type AES_xts_encrypt,\@function
+.align 16
+AES_xts_encrypt:
+ xgr %r3,%r4 # flip %r3 and %r4, $out and $len
+ xgr %r4,%r3
+ xgr %r3,%r4
+___
+$code.=<<___ if ($SIZE_T==4);
+ llgfr $len,$len
+___
+$code.=<<___;
+ st${g} $len,1*$SIZE_T($sp) # save copy of $len
+ srag $len,$len,4 # formally wrong, because it expands
+ # sign byte, but who can afford asking
+ # to process more than 2^63-1 bytes?
+ # I use it, because it sets condition
+ # code...
+ bcr 8,$ra # abort if zero (i.e. less than 16)
+___
+$code.=<<___ if (!$softonly);
+ llgf %r0,240($key2)
+ lhi %r1,16
+ clr %r0,%r1
+ jl .Lxts_enc_software
+
+ stm${g} %r6,$s3,6*$SIZE_T($sp)
+ st${g} $ra,14*$SIZE_T($sp)
+
+ sllg $len,$len,4 # $len&=~15
+ slgr $out,$inp
+
+ # generate the tweak value
+ l${g} $s3,$stdframe($sp) # pointer to iv
+ la $s2,$tweak($sp)
+ lmg $s0,$s1,0($s3)
+ lghi $s3,16
+ stmg $s0,$s1,0($s2)
+ la %r1,0($key2) # $key2 is not needed anymore
+ .long 0xb92e00aa # km $s2,$s2, generate the tweak
+ brc 1,.-4 # can this happen?
+
+ l %r0,240($key1)
+ la %r1,0($key1) # $key1 is not needed anymore
+ bras $ra,_s390x_xts_km
+ jz .Lxts_enc_km_done
+
+ aghi $inp,-16 # take one step back
+ la $i3,0($out,$inp) # put aside real $out
+.Lxts_enc_km_steal:
+ llgc $i1,16($inp)
+ llgc $i2,0($out,$inp)
+ stc $i1,0($out,$inp)
+ stc $i2,16($out,$inp)
+ la $inp,1($inp)
+ brct $len,.Lxts_enc_km_steal
+
+ la $s2,0($i3)
+ lghi $s3,16
+ lrvgr $i1,$s0 # flip byte order
+ lrvgr $i2,$s1
+ xg $i1,0($s2)
+ xg $i2,8($s2)
+ stg $i1,0($s2)
+ stg $i2,8($s2)
+ .long 0xb92e00aa # km $s2,$s2
+ brc 1,.-4 # can this happen?
+ lrvgr $i1,$s0 # flip byte order
+ lrvgr $i2,$s1
+ xg $i1,0($i3)
+ xg $i2,8($i3)
+ stg $i1,0($i3)
+ stg $i2,8($i3)
+
+.Lxts_enc_km_done:
+ l${g} $ra,14*$SIZE_T($sp)
+ st${g} $sp,$tweak($sp) # wipe tweak
+ st${g} $sp,$tweak($sp)
+ lm${g} %r6,$s3,6*$SIZE_T($sp)
+ br $ra
+.align 16
+.Lxts_enc_software:
+___
+$code.=<<___;
+ stm${g} %r6,$ra,6*$SIZE_T($sp)
+
+ slgr $out,$inp
+
+ xgr $s0,$s0 # clear upper half
+ xgr $s1,$s1
+ lrv $s0,$stdframe+4($sp) # load secno
+ lrv $s1,$stdframe+0($sp)
+ xgr $s2,$s2
+ xgr $s3,$s3
+ stm${g} %r2,%r5,2*$SIZE_T($sp)
+ la $key,0($key2)
+ larl $tbl,AES_Te
+ bras $ra,_s390x_AES_encrypt # generate the tweak
+ lm${g} %r2,%r5,2*$SIZE_T($sp)
+ stm $s0,$s3,$tweak($sp) # save the tweak
+ j .Lxts_enc_enter
+
+.align 16
+.Lxts_enc_loop:
+ lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
+ lrvg $s3,$tweak+8($sp)
+ lghi %r1,0x87
+ srag %r0,$s3,63 # broadcast upper bit
+ ngr %r1,%r0 # rem
+ srlg %r0,$s1,63 # carry bit from lower half
+ sllg $s1,$s1,1
+ sllg $s3,$s3,1
+ xgr $s1,%r1
+ ogr $s3,%r0
+ lrvgr $s1,$s1 # flip byte order
+ lrvgr $s3,$s3
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ stg $s1,$tweak+0($sp) # save the tweak
+ llgfr $s1,$s1
+ srlg $s2,$s3,32
+ stg $s3,$tweak+8($sp)
+ llgfr $s3,$s3
+ la $inp,16($inp) # $inp+=16
+.Lxts_enc_enter:
+ x $s0,0($inp) # ^=*($inp)
+ x $s1,4($inp)
+ x $s2,8($inp)
+ x $s3,12($inp)
+ stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing
+ la $key,0($key1)
+ bras $ra,_s390x_AES_encrypt
+ lm${g} %r2,%r5,2*$SIZE_T($sp)
+ x $s0,$tweak+0($sp) # ^=tweak
+ x $s1,$tweak+4($sp)
+ x $s2,$tweak+8($sp)
+ x $s3,$tweak+12($sp)
+ st $s0,0($out,$inp)
+ st $s1,4($out,$inp)
+ st $s2,8($out,$inp)
+ st $s3,12($out,$inp)
+ brct${g} $len,.Lxts_enc_loop
+
+ llgc $len,`2*$SIZE_T-1`($sp)
+ nill $len,0x0f # $len%16
+ jz .Lxts_enc_done
+
+ la $i3,0($inp,$out) # put aside real $out
+.Lxts_enc_steal:
+ llgc %r0,16($inp)
+ llgc %r1,0($out,$inp)
+ stc %r0,0($out,$inp)
+ stc %r1,16($out,$inp)
+ la $inp,1($inp)
+ brct $len,.Lxts_enc_steal
+ la $out,0($i3) # restore real $out
+
+ # generate last tweak...
+ lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
+ lrvg $s3,$tweak+8($sp)
+ lghi %r1,0x87
+ srag %r0,$s3,63 # broadcast upper bit
+ ngr %r1,%r0 # rem
+ srlg %r0,$s1,63 # carry bit from lower half
+ sllg $s1,$s1,1
+ sllg $s3,$s3,1
+ xgr $s1,%r1
+ ogr $s3,%r0
+ lrvgr $s1,$s1 # flip byte order
+ lrvgr $s3,$s3
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ stg $s1,$tweak+0($sp) # save the tweak
+ llgfr $s1,$s1
+ srlg $s2,$s3,32
+ stg $s3,$tweak+8($sp)
+ llgfr $s3,$s3
+
+ x $s0,0($out) # ^=*(inp)|stolen cipther-text
+ x $s1,4($out)
+ x $s2,8($out)
+ x $s3,12($out)
+ st${g} $out,4*$SIZE_T($sp)
+ la $key,0($key1)
+ bras $ra,_s390x_AES_encrypt
+ l${g} $out,4*$SIZE_T($sp)
+ x $s0,`$tweak+0`($sp) # ^=tweak
+ x $s1,`$tweak+4`($sp)
+ x $s2,`$tweak+8`($sp)
+ x $s3,`$tweak+12`($sp)
+ st $s0,0($out)
+ st $s1,4($out)
+ st $s2,8($out)
+ st $s3,12($out)
+
+.Lxts_enc_done:
+ stg $sp,$tweak+0($sp) # wipe tweak
+ stg $sp,$twesk+8($sp)
+ lm${g} %r6,$ra,6*$SIZE_T($sp)
+ br $ra
+.size AES_xts_encrypt,.-AES_xts_encrypt
+___
+# void AES_xts_decrypt(const char *inp,char *out,size_t len,
+# const AES_KEY *key1, const AES_KEY *key2,u64 secno);
+#
+$code.=<<___;
+.globl AES_xts_decrypt
+.type AES_xts_decrypt,\@function
+.align 16
+AES_xts_decrypt:
+ xgr %r3,%r4 # flip %r3 and %r4, $out and $len
+ xgr %r4,%r3
+ xgr %r3,%r4
+___
+$code.=<<___ if ($SIZE_T==4);
+ llgfr $len,$len
+___
+$code.=<<___;
+ st${g} $len,1*$SIZE_T($sp) # save copy of $len
+ aghi $len,-16
+ bcr 4,$ra # abort if less than zero. formally
+ # wrong, because $len is unsigned,
+ # but who can afford asking to
+ # process more than 2^63-1 bytes?
+ tmll $len,0x0f
+ jnz .Lxts_dec_proceed
+ aghi $len,16
+.Lxts_dec_proceed:
+___
+$code.=<<___ if (!$softonly);
+ llgf %r0,240($key2)
+ lhi %r1,16
+ clr %r0,%r1
+ jl .Lxts_dec_software
+
+ stm${g} %r6,$s3,6*$SIZE_T($sp)
+ st${g} $ra,14*$SIZE_T($sp)
+
+ nill $len,0xfff0 # $len&=~15
+ slgr $out,$inp
+
+ # generate the tweak value
+ l${g} $s3,$stdframe($sp) # pointer to iv
+ la $s2,$tweak($sp)
+ lmg $s0,$s1,0($s3)
+ lghi $s3,16
+ stmg $s0,$s1,0($s2)
+ la %r1,0($key2) # $key2 is not needed past this point
+ .long 0xb92e00aa # km $s2,$s2, generate the tweak
+ brc 1,.-4 # can this happen?
+
+ l %r0,240($key1)
+ la %r1,0($key1) # $key1 is not needed anymore
+
+ ltgr $len,$len
+ jz .Lxts_dec_km_short
+ bras $ra,_s390x_xts_km
+ jz .Lxts_dec_km_done
+
+ lrvgr $s2,$s0 # make copy in reverse byte order
+ lrvgr $s3,$s1
+ j .Lxts_dec_km_2ndtweak
+
+.Lxts_dec_km_short:
+ llgc $len,`2*$SIZE_T-1`($sp)
+ nill $len,0x0f # $len%=16
+ lrvg $s0,$tweak+0($sp) # load the tweak
+ lrvg $s1,$tweak+8($sp)
+ lrvgr $s2,$s0 # make copy in reverse byte order
+ lrvgr $s3,$s1
+
+.Lxts_dec_km_2ndtweak:
+ lghi $i1,0x87
+ srag $i2,$s1,63 # broadcast upper bit
+ ngr $i1,$i2 # rem
+ srlg $i2,$s0,63 # carry bit from lower half
+ sllg $s0,$s0,1
+ sllg $s1,$s1,1
+ xgr $s0,$i1
+ ogr $s1,$i2
+ lrvgr $i1,$s0 # flip byte order
+ lrvgr $i2,$s1
+
+ xg $i1,0($inp)
+ xg $i2,8($inp)
+ stg $i1,0($out,$inp)
+ stg $i2,8($out,$inp)
+ la $i2,0($out,$inp)
+ lghi $i3,16
+ .long 0xb92e0066 # km $i2,$i2
+ brc 1,.-4 # can this happen?
+ lrvgr $i1,$s0
+ lrvgr $i2,$s1
+ xg $i1,0($out,$inp)
+ xg $i2,8($out,$inp)
+ stg $i1,0($out,$inp)
+ stg $i2,8($out,$inp)
+
+ la $i3,0($out,$inp) # put aside real $out
+.Lxts_dec_km_steal:
+ llgc $i1,16($inp)
+ llgc $i2,0($out,$inp)
+ stc $i1,0($out,$inp)
+ stc $i2,16($out,$inp)
+ la $inp,1($inp)
+ brct $len,.Lxts_dec_km_steal
+
+ lgr $s0,$s2
+ lgr $s1,$s3
+ xg $s0,0($i3)
+ xg $s1,8($i3)
+ stg $s0,0($i3)
+ stg $s1,8($i3)
+ la $s0,0($i3)
+ lghi $s1,16
+ .long 0xb92e0088 # km $s0,$s0
+ brc 1,.-4 # can this happen?
+ xg $s2,0($i3)
+ xg $s3,8($i3)
+ stg $s2,0($i3)
+ stg $s3,8($i3)
+.Lxts_dec_km_done:
+ l${g} $ra,14*$SIZE_T($sp)
+ st${g} $sp,$tweak($sp) # wipe tweak
+ st${g} $sp,$tweak($sp)
+ lm${g} %r6,$s3,6*$SIZE_T($sp)
+ br $ra
+.align 16
+.Lxts_dec_software:
+___
+$code.=<<___;
+ stm${g} %r6,$ra,6*$SIZE_T($sp)
+
+ srlg $len,$len,4
+ slgr $out,$inp
+
+ xgr $s0,$s0 # clear upper half
+ xgr $s1,$s1
+ lrv $s0,$stdframe+4($sp) # load secno
+ lrv $s1,$stdframe+0($sp)
+ xgr $s2,$s2
+ xgr $s3,$s3
+ stm${g} %r2,%r5,2*$SIZE_T($sp)
+ la $key,0($key2)
+ larl $tbl,AES_Te
+ bras $ra,_s390x_AES_encrypt # generate the tweak
+ lm${g} %r2,%r5,2*$SIZE_T($sp)
+ larl $tbl,AES_Td
+ lt${g}r $len,$len
+ stm $s0,$s3,$tweak($sp) # save the tweak
+ jz .Lxts_dec_short
+ j .Lxts_dec_enter
+
+.align 16
+.Lxts_dec_loop:
+ lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
+ lrvg $s3,$tweak+8($sp)
+ lghi %r1,0x87
+ srag %r0,$s3,63 # broadcast upper bit
+ ngr %r1,%r0 # rem
+ srlg %r0,$s1,63 # carry bit from lower half
+ sllg $s1,$s1,1
+ sllg $s3,$s3,1
+ xgr $s1,%r1
+ ogr $s3,%r0
+ lrvgr $s1,$s1 # flip byte order
+ lrvgr $s3,$s3
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ stg $s1,$tweak+0($sp) # save the tweak
+ llgfr $s1,$s1
+ srlg $s2,$s3,32
+ stg $s3,$tweak+8($sp)
+ llgfr $s3,$s3
+.Lxts_dec_enter:
+ x $s0,0($inp) # tweak^=*(inp)
+ x $s1,4($inp)
+ x $s2,8($inp)
+ x $s3,12($inp)
+ stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing
+ la $key,0($key1)
+ bras $ra,_s390x_AES_decrypt
+ lm${g} %r2,%r5,2*$SIZE_T($sp)
+ x $s0,$tweak+0($sp) # ^=tweak
+ x $s1,$tweak+4($sp)
+ x $s2,$tweak+8($sp)
+ x $s3,$tweak+12($sp)
+ st $s0,0($out,$inp)
+ st $s1,4($out,$inp)
+ st $s2,8($out,$inp)
+ st $s3,12($out,$inp)
+ la $inp,16($inp)
+ brct${g} $len,.Lxts_dec_loop
+
+ llgc $len,`2*$SIZE_T-1`($sp)
+ nill $len,0x0f # $len%16
+ jz .Lxts_dec_done
+
+ # generate pair of tweaks...
+ lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
+ lrvg $s3,$tweak+8($sp)
+ lghi %r1,0x87
+ srag %r0,$s3,63 # broadcast upper bit
+ ngr %r1,%r0 # rem
+ srlg %r0,$s1,63 # carry bit from lower half
+ sllg $s1,$s1,1
+ sllg $s3,$s3,1
+ xgr $s1,%r1
+ ogr $s3,%r0
+ lrvgr $i2,$s1 # flip byte order
+ lrvgr $i3,$s3
+ stmg $i2,$i3,$tweak($sp) # save the 1st tweak
+ j .Lxts_dec_2ndtweak
+
+.align 16
+.Lxts_dec_short:
+ llgc $len,`2*$SIZE_T-1`($sp)
+ nill $len,0x0f # $len%16
+ lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
+ lrvg $s3,$tweak+8($sp)
+.Lxts_dec_2ndtweak:
+ lghi %r1,0x87
+ srag %r0,$s3,63 # broadcast upper bit
+ ngr %r1,%r0 # rem
+ srlg %r0,$s1,63 # carry bit from lower half
+ sllg $s1,$s1,1
+ sllg $s3,$s3,1
+ xgr $s1,%r1
+ ogr $s3,%r0
+ lrvgr $s1,$s1 # flip byte order
+ lrvgr $s3,$s3
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ stg $s1,$tweak-16+0($sp) # save the 2nd tweak
+ llgfr $s1,$s1
+ srlg $s2,$s3,32
+ stg $s3,$tweak-16+8($sp)
+ llgfr $s3,$s3
+
+ x $s0,0($inp) # tweak_the_2nd^=*(inp)
+ x $s1,4($inp)
+ x $s2,8($inp)
+ x $s3,12($inp)
+ stm${g} %r2,%r3,2*$SIZE_T($sp)
+ la $key,0($key1)
+ bras $ra,_s390x_AES_decrypt
+ lm${g} %r2,%r5,2*$SIZE_T($sp)
+ x $s0,$tweak-16+0($sp) # ^=tweak_the_2nd
+ x $s1,$tweak-16+4($sp)
+ x $s2,$tweak-16+8($sp)
+ x $s3,$tweak-16+12($sp)
+ st $s0,0($out,$inp)
+ st $s1,4($out,$inp)
+ st $s2,8($out,$inp)
+ st $s3,12($out,$inp)
+
+ la $i3,0($out,$inp) # put aside real $out
+.Lxts_dec_steal:
+ llgc %r0,16($inp)
+ llgc %r1,0($out,$inp)
+ stc %r0,0($out,$inp)
+ stc %r1,16($out,$inp)
+ la $inp,1($inp)
+ brct $len,.Lxts_dec_steal
+ la $out,0($i3) # restore real $out
+
+ lm $s0,$s3,$tweak($sp) # load the 1st tweak
+ x $s0,0($out) # tweak^=*(inp)|stolen cipher-text
+ x $s1,4($out)
+ x $s2,8($out)
+ x $s3,12($out)
+ st${g} $out,4*$SIZE_T($sp)
+ la $key,0($key1)
+ bras $ra,_s390x_AES_decrypt
+ l${g} $out,4*$SIZE_T($sp)
+ x $s0,$tweak+0($sp) # ^=tweak
+ x $s1,$tweak+4($sp)
+ x $s2,$tweak+8($sp)
+ x $s3,$tweak+12($sp)
+ st $s0,0($out)
+ st $s1,4($out)
+ st $s2,8($out)
+ st $s3,12($out)
+ stg $sp,$tweak-16+0($sp) # wipe 2nd tweak
+ stg $sp,$tweak-16+8($sp)
+.Lxts_dec_done:
+ stg $sp,$tweak+0($sp) # wipe tweak
+ stg $sp,$twesk+8($sp)
+ lm${g} %r6,$ra,6*$SIZE_T($sp)
+ br $ra
+.size AES_xts_decrypt,.-AES_xts_decrypt
___
}
$code.=<<___;
.string "AES for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+.comm OPENSSL_s390xcap_P,16,8
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;
print $code;
+close STDOUT; # force flush
diff --git a/openssl/crypto/aes/asm/aes-x86_64.pl b/openssl/crypto/aes/asm/aes-x86_64.pl
index a545e892a..48fa857d5 100644
--- a/openssl/crypto/aes/asm/aes-x86_64.pl
+++ b/openssl/crypto/aes/asm/aes-x86_64.pl
@@ -588,6 +588,9 @@ $code.=<<___;
.globl AES_encrypt
.type AES_encrypt,\@function,3
.align 16
+.globl asm_AES_encrypt
+.hidden asm_AES_encrypt
+asm_AES_encrypt:
AES_encrypt:
push %rbx
push %rbp
@@ -1184,6 +1187,9 @@ $code.=<<___;
.globl AES_decrypt
.type AES_decrypt,\@function,3
.align 16
+.globl asm_AES_decrypt
+.hidden asm_AES_decrypt
+asm_AES_decrypt:
AES_decrypt:
push %rbx
push %rbp
@@ -1277,13 +1283,13 @@ $code.=<<___;
___
}
-# int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+# int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
# AES_KEY *key)
$code.=<<___;
-.globl AES_set_encrypt_key
-.type AES_set_encrypt_key,\@function,3
+.globl private_AES_set_encrypt_key
+.type private_AES_set_encrypt_key,\@function,3
.align 16
-AES_set_encrypt_key:
+private_AES_set_encrypt_key:
push %rbx
push %rbp
push %r12 # redundant, but allows to share
@@ -1304,7 +1310,7 @@ AES_set_encrypt_key:
add \$56,%rsp
.Lenc_key_epilogue:
ret
-.size AES_set_encrypt_key,.-AES_set_encrypt_key
+.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
.type _x86_64_AES_set_encrypt_key,\@abi-omnipotent
.align 16
@@ -1547,13 +1553,13 @@ $code.=<<___;
___
}
-# int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+# int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
# AES_KEY *key)
$code.=<<___;
-.globl AES_set_decrypt_key
-.type AES_set_decrypt_key,\@function,3
+.globl private_AES_set_decrypt_key
+.type private_AES_set_decrypt_key,\@function,3
.align 16
-AES_set_decrypt_key:
+private_AES_set_decrypt_key:
push %rbx
push %rbp
push %r12
@@ -1622,7 +1628,7 @@ $code.=<<___;
add \$56,%rsp
.Ldec_key_epilogue:
ret
-.size AES_set_decrypt_key,.-AES_set_decrypt_key
+.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
___
# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
@@ -1648,6 +1654,9 @@ $code.=<<___;
.type AES_cbc_encrypt,\@function,6
.align 16
.extern OPENSSL_ia32cap_P
+.globl asm_AES_cbc_encrypt
+.hidden asm_AES_cbc_encrypt
+asm_AES_cbc_encrypt:
AES_cbc_encrypt:
cmp \$0,%rdx # check length
je .Lcbc_epilogue
@@ -2766,13 +2775,13 @@ cbc_se_handler:
.rva .LSEH_end_AES_decrypt
.rva .LSEH_info_AES_decrypt
- .rva .LSEH_begin_AES_set_encrypt_key
- .rva .LSEH_end_AES_set_encrypt_key
- .rva .LSEH_info_AES_set_encrypt_key
+ .rva .LSEH_begin_private_AES_set_encrypt_key
+ .rva .LSEH_end_private_AES_set_encrypt_key
+ .rva .LSEH_info_private_AES_set_encrypt_key
- .rva .LSEH_begin_AES_set_decrypt_key
- .rva .LSEH_end_AES_set_decrypt_key
- .rva .LSEH_info_AES_set_decrypt_key
+ .rva .LSEH_begin_private_AES_set_decrypt_key
+ .rva .LSEH_end_private_AES_set_decrypt_key
+ .rva .LSEH_info_private_AES_set_decrypt_key
.rva .LSEH_begin_AES_cbc_encrypt
.rva .LSEH_end_AES_cbc_encrypt
@@ -2788,11 +2797,11 @@ cbc_se_handler:
.byte 9,0,0,0
.rva block_se_handler
.rva .Ldec_prologue,.Ldec_epilogue # HandlerData[]
-.LSEH_info_AES_set_encrypt_key:
+.LSEH_info_private_AES_set_encrypt_key:
.byte 9,0,0,0
.rva key_se_handler
.rva .Lenc_key_prologue,.Lenc_key_epilogue # HandlerData[]
-.LSEH_info_AES_set_decrypt_key:
+.LSEH_info_private_AES_set_decrypt_key:
.byte 9,0,0,0
.rva key_se_handler
.rva .Ldec_key_prologue,.Ldec_key_epilogue # HandlerData[]
diff --git a/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl b/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl
new file mode 100644
index 000000000..c6f6b3334
--- /dev/null
+++ b/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl
@@ -0,0 +1,1249 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# June 2011
+#
+# This is AESNI-CBC+SHA1 "stitch" implementation. The idea, as spelled
+# in http://download.intel.com/design/intarch/papers/323686.pdf, is
+# that since AESNI-CBC encrypt exhibit *very* low instruction-level
+# parallelism, interleaving it with another algorithm would allow to
+# utilize processor resources better and achieve better performance.
+# SHA1 instruction sequences(*) are taken from sha1-x86_64.pl and
+# AESNI code is weaved into it. Below are performance numbers in
+# cycles per processed byte, less is better, for standalone AESNI-CBC
+# encrypt, sum of the latter and standalone SHA1, and "stitched"
+# subroutine:
+#
+# AES-128-CBC +SHA1 stitch gain
+# Westmere 3.77[+5.6] 9.37 6.65 +41%
+# Sandy Bridge 5.05[+5.2(6.3)] 10.25(11.35) 6.16(7.08) +67%(+60%)
+#
+# AES-192-CBC
+# Westmere 4.51 10.11 6.97 +45%
+# Sandy Bridge 6.05 11.25(12.35) 6.34(7.27) +77%(+70%)
+#
+# AES-256-CBC
+# Westmere 5.25 10.85 7.25 +50%
+# Sandy Bridge 7.05 12.25(13.35) 7.06(7.70) +74%(+73%)
+#
+# (*) There are two code paths: SSSE3 and AVX. See sha1-568.pl for
+# background information. Above numbers in parentheses are SSSE3
+# results collected on AVX-capable CPU, i.e. apply on OSes that
+# don't support AVX.
+#
+# Needless to mention that it makes no sense to implement "stitched"
+# *decrypt* subroutine. Because *both* AESNI-CBC decrypt and SHA1
+# fully utilize parallelism, so stitching would not give any gain
+# anyway. Well, there might be some, e.g. because of better cache
+# locality... For reference, here are performance results for
+# standalone AESNI-CBC decrypt:
+#
+# AES-128-CBC AES-192-CBC AES-256-CBC
+# Westmere 1.31 1.55 1.80
+# Sandy Bridge 0.93 1.06 1.22
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+$avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
+ $1>=2.19);
+$avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
+ $1>=2.09);
+$avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
+ $1>=10);
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+# void aesni_cbc_sha1_enc(const void *inp,
+# void *out,
+# size_t length,
+# const AES_KEY *key,
+# unsigned char *iv,
+# SHA_CTX *ctx,
+# const void *in0);
+
+$code.=<<___;
+.text
+.extern OPENSSL_ia32cap_P
+
+.globl aesni_cbc_sha1_enc
+.type aesni_cbc_sha1_enc,\@abi-omnipotent
+.align 16
+aesni_cbc_sha1_enc:
+ # caller should check for SSSE3 and AES-NI bits
+ mov OPENSSL_ia32cap_P+0(%rip),%r10d
+ mov OPENSSL_ia32cap_P+4(%rip),%r11d
+___
+$code.=<<___ if ($avx);
+ and \$`1<<28`,%r11d # mask AVX bit
+ and \$`1<<30`,%r10d # mask "Intel CPU" bit
+ or %r11d,%r10d
+ cmp \$`1<<28|1<<30`,%r10d
+ je aesni_cbc_sha1_enc_avx
+___
+$code.=<<___;
+ jmp aesni_cbc_sha1_enc_ssse3
+ ret
+.size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
+___
+
+my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+my $Xi=4;
+my @X=map("%xmm$_",(4..7,0..3));
+my @Tx=map("%xmm$_",(8..10));
+my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
+my @T=("%esi","%edi");
+my $j=0; my $jj=0; my $r=0; my $sn=0;
+my $K_XX_XX="%r11";
+my ($iv,$in,$rndkey0)=map("%xmm$_",(11..13));
+my @rndkey=("%xmm14","%xmm15");
+
+sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+ my $arg = pop;
+ $arg = "\$$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+my $_rol=sub { &rol(@_) };
+my $_ror=sub { &ror(@_) };
+
+$code.=<<___;
+.type aesni_cbc_sha1_enc_ssse3,\@function,6
+.align 16
+aesni_cbc_sha1_enc_ssse3:
+ mov `($win64?56:8)`(%rsp),$inp # load 7th argument
+ #shr \$6,$len # debugging artefact
+ #jz .Lepilogue_ssse3 # debugging artefact
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ lea `-104-($win64?10*16:0)`(%rsp),%rsp
+ #mov $in0,$inp # debugging artefact
+ #lea 64(%rsp),$ctx # debugging artefact
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,96+0(%rsp)
+ movaps %xmm7,96+16(%rsp)
+ movaps %xmm8,96+32(%rsp)
+ movaps %xmm9,96+48(%rsp)
+ movaps %xmm10,96+64(%rsp)
+ movaps %xmm11,96+80(%rsp)
+ movaps %xmm12,96+96(%rsp)
+ movaps %xmm13,96+112(%rsp)
+ movaps %xmm14,96+128(%rsp)
+ movaps %xmm15,96+144(%rsp)
+.Lprologue_ssse3:
+___
+$code.=<<___;
+ mov $in0,%r12 # reassign arguments
+ mov $out,%r13
+ mov $len,%r14
+ mov $key,%r15
+ movdqu ($ivp),$iv # load IV
+ mov $ivp,88(%rsp) # save $ivp
+___
+my ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
+my $rounds="${ivp}d";
+$code.=<<___;
+ shl \$6,$len
+ sub $in0,$out
+ mov 240($key),$rounds
+ add $inp,$len # end of input
+
+ lea K_XX_XX(%rip),$K_XX_XX
+ mov 0($ctx),$A # load context
+ mov 4($ctx),$B
+ mov 8($ctx),$C
+ mov 12($ctx),$D
+ mov $B,@T[0] # magic seed
+ mov 16($ctx),$E
+
+ movdqa 64($K_XX_XX),@X[2] # pbswap mask
+ movdqa 0($K_XX_XX),@Tx[1] # K_00_19
+ movdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
+ movdqu 16($inp),@X[-3&7]
+ movdqu 32($inp),@X[-2&7]
+ movdqu 48($inp),@X[-1&7]
+ pshufb @X[2],@X[-4&7] # byte swap
+ add \$64,$inp
+ pshufb @X[2],@X[-3&7]
+ pshufb @X[2],@X[-2&7]
+ pshufb @X[2],@X[-1&7]
+ paddd @Tx[1],@X[-4&7] # add K_00_19
+ paddd @Tx[1],@X[-3&7]
+ paddd @Tx[1],@X[-2&7]
+ movdqa @X[-4&7],0(%rsp) # X[]+K xfer to IALU
+ psubd @Tx[1],@X[-4&7] # restore X[]
+ movdqa @X[-3&7],16(%rsp)
+ psubd @Tx[1],@X[-3&7]
+ movdqa @X[-2&7],32(%rsp)
+ psubd @Tx[1],@X[-2&7]
+ movups ($key),$rndkey0 # $key[0]
+ movups 16($key),$rndkey[0] # forward reference
+ jmp .Loop_ssse3
+___
+
+my $aesenc=sub {
+ use integer;
+ my ($n,$k)=($r/10,$r%10);
+ if ($k==0) {
+ $code.=<<___;
+ movups `16*$n`($in0),$in # load input
+ xorps $rndkey0,$in
+___
+ $code.=<<___ if ($n);
+ movups $iv,`16*($n-1)`($out,$in0) # write output
+___
+ $code.=<<___;
+ xorps $in,$iv
+ aesenc $rndkey[0],$iv
+ movups `32+16*$k`($key),$rndkey[1]
+___
+ } elsif ($k==9) {
+ $sn++;
+ $code.=<<___;
+ cmp \$11,$rounds
+ jb .Laesenclast$sn
+ movups `32+16*($k+0)`($key),$rndkey[1]
+ aesenc $rndkey[0],$iv
+ movups `32+16*($k+1)`($key),$rndkey[0]
+ aesenc $rndkey[1],$iv
+ je .Laesenclast$sn
+ movups `32+16*($k+2)`($key),$rndkey[1]
+ aesenc $rndkey[0],$iv
+ movups `32+16*($k+3)`($key),$rndkey[0]
+ aesenc $rndkey[1],$iv
+.Laesenclast$sn:
+ aesenclast $rndkey[0],$iv
+ movups 16($key),$rndkey[1] # forward reference
+___
+ } else {
+ $code.=<<___;
+ aesenc $rndkey[0],$iv
+ movups `32+16*$k`($key),$rndkey[1]
+___
+ }
+ $r++; unshift(@rndkey,pop(@rndkey));
+};
+
+sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
+ my ($a,$b,$c,$d,$e);
+
+ &movdqa (@X[0],@X[-3&7]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa (@Tx[0],@X[-1&7]);
+ &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &paddd (@Tx[1],@X[-1&7]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psrldq (@Tx[0],4); # "X[-3]", 3 dwords
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &pxor (@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &movdqa (@Tx[2],@X[0]);
+ &movdqa (@Tx[0],@X[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &pslldq (@Tx[2],12); # "X[0]"<<96, extract one dword
+ &paddd (@X[0],@X[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &psrld (@Tx[0],31);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa (@Tx[1],@Tx[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &psrld (@Tx[2],30);
+ &por (@X[0],@Tx[0]); # "X[0]"<<<=1
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &pslld (@Tx[1],2);
+ &pxor (@X[0],@Tx[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &pxor (@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
+
+ foreach (@insns) { eval; } # remaining instructions [if any]
+
+ $Xi++; push(@X,shift(@X)); # "rotate" X[]
+ push(@Tx,shift(@Tx));
+}
+
+sub Xupdate_ssse3_32_79()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
+ my ($a,$b,$c,$d,$e);
+
+ &movdqa (@Tx[0],@X[-1&7]) if ($Xi==8);
+ eval(shift(@insns)); # body_20_39
+ &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
+ &palignr(@Tx[0],@X[-2&7],8); # compose "X[-6]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+
+ &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
+ eval(shift(@insns));
+ eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
+ if ($Xi%5) {
+ &movdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
+ } else { # ... or load next one
+ &movdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
+ }
+ &paddd (@Tx[1],@X[-1&7]);
+ eval(shift(@insns)); # ror
+ eval(shift(@insns));
+
+ &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-6]"
+ eval(shift(@insns)); # body_20_39
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+
+ &movdqa (@Tx[0],@X[0]);
+ &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # ror
+ eval(shift(@insns));
+
+ &pslld (@X[0],2);
+ eval(shift(@insns)); # body_20_39
+ eval(shift(@insns));
+ &psrld (@Tx[0],30);
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # ror
+ eval(shift(@insns));
+
+ &por (@X[0],@Tx[0]); # "X[0]"<<<=2
+ eval(shift(@insns)); # body_20_39
+ eval(shift(@insns));
+ &movdqa (@Tx[1],@X[0]) if ($Xi<19);
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+ eval(shift(@insns));
+
+ foreach (@insns) { eval; } # remaining instructions
+
+ $Xi++; push(@X,shift(@X)); # "rotate" X[]
+ push(@Tx,shift(@Tx));
+}
+
+sub Xuplast_ssse3_80()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
+ my ($a,$b,$c,$d,$e);
+
+ eval(shift(@insns));
+ &paddd (@Tx[1],@X[-1&7]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
+
+ foreach (@insns) { eval; } # remaining instructions
+
+ &cmp ($inp,$len);
+ &je (".Ldone_ssse3");
+
+ unshift(@Tx,pop(@Tx));
+
+ &movdqa (@X[2],"64($K_XX_XX)"); # pbswap mask
+ &movdqa (@Tx[1],"0($K_XX_XX)"); # K_00_19
+ &movdqu (@X[-4&7],"0($inp)"); # load input
+ &movdqu (@X[-3&7],"16($inp)");
+ &movdqu (@X[-2&7],"32($inp)");
+ &movdqu (@X[-1&7],"48($inp)");
+ &pshufb (@X[-4&7],@X[2]); # byte swap
+ &add ($inp,64);
+
+ $Xi=0;
+}
+
+sub Xloop_ssse3()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
+ my ($a,$b,$c,$d,$e);
+
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &pshufb (@X[($Xi-3)&7],@X[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &paddd (@X[($Xi-4)&7],@Tx[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &movdqa (eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]); # X[]+K xfer to IALU
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &psubd (@X[($Xi-4)&7],@Tx[1]);
+
+ foreach (@insns) { eval; }
+ $Xi++;
+}
+
+sub Xtail_ssse3()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
+ my ($a,$b,$c,$d,$e);
+
+ foreach (@insns) { eval; }
+}
+
+sub body_00_19 () {
+ use integer;
+ my ($k,$n);
+ my @r=(
+ '($a,$b,$c,$d,$e)=@V;'.
+ '&add ($e,eval(4*($j&15))."(%rsp)");', # X[]+K xfer
+ '&xor ($c,$d);',
+ '&mov (@T[1],$a);', # $b in next round
+ '&$_rol ($a,5);',
+ '&and (@T[0],$c);', # ($b&($c^$d))
+ '&xor ($c,$d);', # restore $c
+ '&xor (@T[0],$d);',
+ '&add ($e,$a);',
+ '&$_ror ($b,$j?7:2);', # $b>>>2
+ '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ );
+ $n = scalar(@r);
+ $k = (($jj+1)*12/20)*20*$n/12; # 12 aesencs per these 20 rounds
+ @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
+ $jj++;
+ return @r;
+}
+
+sub body_20_39 () {
+ use integer;
+ my ($k,$n);
+ my @r=(
+ '($a,$b,$c,$d,$e)=@V;'.
+ '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer
+ '&xor (@T[0],$d);', # ($b^$d)
+ '&mov (@T[1],$a);', # $b in next round
+ '&$_rol ($a,5);',
+ '&xor (@T[0],$c);', # ($b^$d^$c)
+ '&add ($e,$a);',
+ '&$_ror ($b,7);', # $b>>>2
+ '&add ($e,@T[0]);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ );
+ $n = scalar(@r);
+ $k = (($jj+1)*8/20)*20*$n/8; # 8 aesencs per these 20 rounds
+ @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
+ $jj++;
+ return @r;
+}
+
+sub body_40_59 () {
+ use integer;
+ my ($k,$n);
+ my @r=(
+ '($a,$b,$c,$d,$e)=@V;'.
+ '&mov (@T[1],$c);',
+ '&xor ($c,$d);',
+ '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer
+ '&and (@T[1],$d);',
+ '&and (@T[0],$c);', # ($b&($c^$d))
+ '&$_ror ($b,7);', # $b>>>2
+ '&add ($e,@T[1]);',
+ '&mov (@T[1],$a);', # $b in next round
+ '&$_rol ($a,5);',
+ '&add ($e,@T[0]);',
+ '&xor ($c,$d);', # restore $c
+ '&add ($e,$a);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+ );
+ $n = scalar(@r);
+ $k=(($jj+1)*12/20)*20*$n/12; # 12 aesencs per these 20 rounds
+ @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
+ $jj++;
+ return @r;
+}
+$code.=<<___;
+.align 16
+.Loop_ssse3:
+___
+ &Xupdate_ssse3_16_31(\&body_00_19);
+ &Xupdate_ssse3_16_31(\&body_00_19);
+ &Xupdate_ssse3_16_31(\&body_00_19);
+ &Xupdate_ssse3_16_31(\&body_00_19);
+ &Xupdate_ssse3_32_79(\&body_00_19);
+ &Xupdate_ssse3_32_79(\&body_20_39);
+ &Xupdate_ssse3_32_79(\&body_20_39);
+ &Xupdate_ssse3_32_79(\&body_20_39);
+ &Xupdate_ssse3_32_79(\&body_20_39);
+ &Xupdate_ssse3_32_79(\&body_20_39);
+ &Xupdate_ssse3_32_79(\&body_40_59);
+ &Xupdate_ssse3_32_79(\&body_40_59);
+ &Xupdate_ssse3_32_79(\&body_40_59);
+ &Xupdate_ssse3_32_79(\&body_40_59);
+ &Xupdate_ssse3_32_79(\&body_40_59);
+ &Xupdate_ssse3_32_79(\&body_20_39);
+ &Xuplast_ssse3_80(\&body_20_39); # can jump to "done"
+
+ $saved_j=$j; @saved_V=@V;
+ $saved_r=$r; @saved_rndkey=@rndkey;
+
+ &Xloop_ssse3(\&body_20_39);
+ &Xloop_ssse3(\&body_20_39);
+ &Xloop_ssse3(\&body_20_39);
+
+$code.=<<___;
+ movups $iv,48($out,$in0) # write output
+ lea 64($in0),$in0
+
+ add 0($ctx),$A # update context
+ add 4($ctx),@T[0]
+ add 8($ctx),$C
+ add 12($ctx),$D
+ mov $A,0($ctx)
+ add 16($ctx),$E
+ mov @T[0],4($ctx)
+ mov @T[0],$B # magic seed
+ mov $C,8($ctx)
+ mov $D,12($ctx)
+ mov $E,16($ctx)
+ jmp .Loop_ssse3
+
+.align 16
+.Ldone_ssse3:
+___
+ $jj=$j=$saved_j; @V=@saved_V;
+ $r=$saved_r; @rndkey=@saved_rndkey;
+
+ &Xtail_ssse3(\&body_20_39);
+ &Xtail_ssse3(\&body_20_39);
+ &Xtail_ssse3(\&body_20_39);
+
+$code.=<<___;
+ movups $iv,48($out,$in0) # write output
+ mov 88(%rsp),$ivp # restore $ivp
+
+ add 0($ctx),$A # update context
+ add 4($ctx),@T[0]
+ add 8($ctx),$C
+ mov $A,0($ctx)
+ add 12($ctx),$D
+ mov @T[0],4($ctx)
+ add 16($ctx),$E
+ mov $C,8($ctx)
+ mov $D,12($ctx)
+ mov $E,16($ctx)
+ movups $iv,($ivp) # write IV
+___
+$code.=<<___ if ($win64);
+ movaps 96+0(%rsp),%xmm6
+ movaps 96+16(%rsp),%xmm7
+ movaps 96+32(%rsp),%xmm8
+ movaps 96+48(%rsp),%xmm9
+ movaps 96+64(%rsp),%xmm10
+ movaps 96+80(%rsp),%xmm11
+ movaps 96+96(%rsp),%xmm12
+ movaps 96+112(%rsp),%xmm13
+ movaps 96+128(%rsp),%xmm14
+ movaps 96+144(%rsp),%xmm15
+___
+$code.=<<___;
+ lea `104+($win64?10*16:0)`(%rsp),%rsi
+ mov 0(%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_ssse3:
+ ret
+.size aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
+___
+
+$j=$jj=$r=$sn=0;
+
+if ($avx) {
+my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+my $Xi=4;
+my @X=map("%xmm$_",(4..7,0..3));
+my @Tx=map("%xmm$_",(8..10));
+my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
+my @T=("%esi","%edi");
+
+my $_rol=sub { &shld(@_[0],@_) };
+my $_ror=sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type aesni_cbc_sha1_enc_avx,\@function,6
+.align 16
+aesni_cbc_sha1_enc_avx:
+ mov `($win64?56:8)`(%rsp),$inp # load 7th argument
+ #shr \$6,$len # debugging artefact
+ #jz .Lepilogue_avx # debugging artefact
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ lea `-104-($win64?10*16:0)`(%rsp),%rsp
+ #mov $in0,$inp # debugging artefact
+ #lea 64(%rsp),$ctx # debugging artefact
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,96+0(%rsp)
+ movaps %xmm7,96+16(%rsp)
+ movaps %xmm8,96+32(%rsp)
+ movaps %xmm9,96+48(%rsp)
+ movaps %xmm10,96+64(%rsp)
+ movaps %xmm11,96+80(%rsp)
+ movaps %xmm12,96+96(%rsp)
+ movaps %xmm13,96+112(%rsp)
+ movaps %xmm14,96+128(%rsp)
+ movaps %xmm15,96+144(%rsp)
+.Lprologue_avx:
+___
+$code.=<<___;
+ vzeroall
+ mov $in0,%r12 # reassign arguments
+ mov $out,%r13
+ mov $len,%r14
+ mov $key,%r15
+ vmovdqu ($ivp),$iv # load IV
+ mov $ivp,88(%rsp) # save $ivp
+___
+my ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
+my $rounds="${ivp}d";
+$code.=<<___;
+ shl \$6,$len
+ sub $in0,$out
+ mov 240($key),$rounds
+ add \$112,$key # size optimization
+ add $inp,$len # end of input
+
+ lea K_XX_XX(%rip),$K_XX_XX
+ mov 0($ctx),$A # load context
+ mov 4($ctx),$B
+ mov 8($ctx),$C
+ mov 12($ctx),$D
+ mov $B,@T[0] # magic seed
+ mov 16($ctx),$E
+
+ vmovdqa 64($K_XX_XX),@X[2] # pbswap mask
+ vmovdqa 0($K_XX_XX),@Tx[1] # K_00_19
+ vmovdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
+ vmovdqu 16($inp),@X[-3&7]
+ vmovdqu 32($inp),@X[-2&7]
+ vmovdqu 48($inp),@X[-1&7]
+ vpshufb @X[2],@X[-4&7],@X[-4&7] # byte swap
+ add \$64,$inp
+ vpshufb @X[2],@X[-3&7],@X[-3&7]
+ vpshufb @X[2],@X[-2&7],@X[-2&7]
+ vpshufb @X[2],@X[-1&7],@X[-1&7]
+ vpaddd @Tx[1],@X[-4&7],@X[0] # add K_00_19
+ vpaddd @Tx[1],@X[-3&7],@X[1]
+ vpaddd @Tx[1],@X[-2&7],@X[2]
+ vmovdqa @X[0],0(%rsp) # X[]+K xfer to IALU
+ vmovdqa @X[1],16(%rsp)
+ vmovdqa @X[2],32(%rsp)
+ vmovups -112($key),$rndkey0 # $key[0]
+ vmovups 16-112($key),$rndkey[0] # forward reference
+ jmp .Loop_avx
+___
+
+my $aesenc=sub {
+ use integer;
+ my ($n,$k)=($r/10,$r%10);
+ if ($k==0) {
+ $code.=<<___;
+ vmovups `16*$n`($in0),$in # load input
+ vxorps $rndkey0,$in,$in
+___
+ $code.=<<___ if ($n);
+ vmovups $iv,`16*($n-1)`($out,$in0) # write output
+___
+ $code.=<<___;
+ vxorps $in,$iv,$iv
+ vaesenc $rndkey[0],$iv,$iv
+ vmovups `32+16*$k-112`($key),$rndkey[1]
+___
+ } elsif ($k==9) {
+ $sn++;
+ $code.=<<___;
+ cmp \$11,$rounds
+ jb .Lvaesenclast$sn
+ vaesenc $rndkey[0],$iv,$iv
+ vmovups `32+16*($k+0)-112`($key),$rndkey[1]
+ vaesenc $rndkey[1],$iv,$iv
+ vmovups `32+16*($k+1)-112`($key),$rndkey[0]
+ je .Lvaesenclast$sn
+ vaesenc $rndkey[0],$iv,$iv
+ vmovups `32+16*($k+2)-112`($key),$rndkey[1]
+ vaesenc $rndkey[1],$iv,$iv
+ vmovups `32+16*($k+3)-112`($key),$rndkey[0]
+.Lvaesenclast$sn:
+ vaesenclast $rndkey[0],$iv,$iv
+ vmovups 16-112($key),$rndkey[1] # forward reference
+___
+ } else {
+ $code.=<<___;
+ vaesenc $rndkey[0],$iv,$iv
+ vmovups `32+16*$k-112`($key),$rndkey[1]
+___
+ }
+ $r++; unshift(@rndkey,pop(@rndkey));
+};
+
+sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
+ my ($a,$b,$c,$d,$e);
+
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpsrld (@Tx[0],@X[0],31);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpslldq(@Tx[2],@X[0],12); # "X[0]"<<96, extract one dword
+ &vpaddd (@X[0],@X[0],@X[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpsrld (@Tx[1],@Tx[2],30);
+ &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpslld (@Tx[2],@Tx[2],2);
+ &vpxor (@X[0],@X[0],@Tx[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vmovdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+
+ foreach (@insns) { eval; } # remaining instructions [if any]
+
+ $Xi++; push(@X,shift(@X)); # "rotate" X[]
+ push(@Tx,shift(@Tx));
+}
+
+sub Xupdate_avx_32_79()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
+ my ($a,$b,$c,$d,$e);
+
+ &vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
+ &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
+ eval(shift(@insns)); # body_20_39
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+
+ &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
+ eval(shift(@insns));
+ eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
+ if ($Xi%5) {
+ &vmovdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
+ } else { # ... or load next one
+ &vmovdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
+ }
+ &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
+ eval(shift(@insns)); # ror
+ eval(shift(@insns));
+
+ &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-6]"
+ eval(shift(@insns)); # body_20_39
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+
+ &vpsrld (@Tx[0],@X[0],30);
+ &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # ror
+ eval(shift(@insns));
+
+ &vpslld (@X[0],@X[0],2);
+ eval(shift(@insns)); # body_20_39
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # ror
+ eval(shift(@insns));
+
+ &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=2
+ eval(shift(@insns)); # body_20_39
+ eval(shift(@insns));
+ &vmovdqa (@Tx[1],@X[0]) if ($Xi<19);
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns)); # rol
+ eval(shift(@insns));
+
+ foreach (@insns) { eval; } # remaining instructions
+
+ $Xi++; push(@X,shift(@X)); # "rotate" X[]
+ push(@Tx,shift(@Tx));
+}
+
+sub Xuplast_avx_80()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
+ my ($a,$b,$c,$d,$e);
+
+ eval(shift(@insns));
+ &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
+
+ foreach (@insns) { eval; } # remaining instructions
+
+ &cmp ($inp,$len);
+ &je (".Ldone_avx");
+
+ unshift(@Tx,pop(@Tx));
+
+ &vmovdqa(@X[2],"64($K_XX_XX)"); # pbswap mask
+ &vmovdqa(@Tx[1],"0($K_XX_XX)"); # K_00_19
+ &vmovdqu(@X[-4&7],"0($inp)"); # load input
+ &vmovdqu(@X[-3&7],"16($inp)");
+ &vmovdqu(@X[-2&7],"32($inp)");
+ &vmovdqu(@X[-1&7],"48($inp)");
+ &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
+ &add ($inp,64);
+
+ $Xi=0;
+}
+
+sub Xloop_avx()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
+ my ($a,$b,$c,$d,$e);
+
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@Tx[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vmovdqa(eval(16*$Xi)."(%rsp)",@X[$Xi&7]); # X[]+K xfer to IALU
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ foreach (@insns) { eval; }
+ $Xi++;
+}
+
+sub Xtail_avx()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
+ my ($a,$b,$c,$d,$e);
+
+ foreach (@insns) { eval; }
+}
+
+$code.=<<___;
+.align 16
+.Loop_avx:
+___
+ &Xupdate_avx_16_31(\&body_00_19);
+ &Xupdate_avx_16_31(\&body_00_19);
+ &Xupdate_avx_16_31(\&body_00_19);
+ &Xupdate_avx_16_31(\&body_00_19);
+ &Xupdate_avx_32_79(\&body_00_19);
+ &Xupdate_avx_32_79(\&body_20_39);
+ &Xupdate_avx_32_79(\&body_20_39);
+ &Xupdate_avx_32_79(\&body_20_39);
+ &Xupdate_avx_32_79(\&body_20_39);
+ &Xupdate_avx_32_79(\&body_20_39);
+ &Xupdate_avx_32_79(\&body_40_59);
+ &Xupdate_avx_32_79(\&body_40_59);
+ &Xupdate_avx_32_79(\&body_40_59);
+ &Xupdate_avx_32_79(\&body_40_59);
+ &Xupdate_avx_32_79(\&body_40_59);
+ &Xupdate_avx_32_79(\&body_20_39);
+ &Xuplast_avx_80(\&body_20_39); # can jump to "done"
+
+ $saved_j=$j; @saved_V=@V;
+ $saved_r=$r; @saved_rndkey=@rndkey;
+
+ &Xloop_avx(\&body_20_39);
+ &Xloop_avx(\&body_20_39);
+ &Xloop_avx(\&body_20_39);
+
+$code.=<<___;
+ vmovups $iv,48($out,$in0) # write output
+ lea 64($in0),$in0
+
+ add 0($ctx),$A # update context
+ add 4($ctx),@T[0]
+ add 8($ctx),$C
+ add 12($ctx),$D
+ mov $A,0($ctx)
+ add 16($ctx),$E
+ mov @T[0],4($ctx)
+ mov @T[0],$B # magic seed
+ mov $C,8($ctx)
+ mov $D,12($ctx)
+ mov $E,16($ctx)
+ jmp .Loop_avx
+
+.align 16
+.Ldone_avx:
+___
+ $jj=$j=$saved_j; @V=@saved_V;
+ $r=$saved_r; @rndkey=@saved_rndkey;
+
+ &Xtail_avx(\&body_20_39);
+ &Xtail_avx(\&body_20_39);
+ &Xtail_avx(\&body_20_39);
+
+$code.=<<___;
+ vmovups $iv,48($out,$in0) # write output
+ mov 88(%rsp),$ivp # restore $ivp
+
+ add 0($ctx),$A # update context
+ add 4($ctx),@T[0]
+ add 8($ctx),$C
+ mov $A,0($ctx)
+ add 12($ctx),$D
+ mov @T[0],4($ctx)
+ add 16($ctx),$E
+ mov $C,8($ctx)
+ mov $D,12($ctx)
+ mov $E,16($ctx)
+ vmovups $iv,($ivp) # write IV
+ vzeroall
+___
+$code.=<<___ if ($win64);
+ movaps 96+0(%rsp),%xmm6
+ movaps 96+16(%rsp),%xmm7
+ movaps 96+32(%rsp),%xmm8
+ movaps 96+48(%rsp),%xmm9
+ movaps 96+64(%rsp),%xmm10
+ movaps 96+80(%rsp),%xmm11
+ movaps 96+96(%rsp),%xmm12
+ movaps 96+112(%rsp),%xmm13
+ movaps 96+128(%rsp),%xmm14
+ movaps 96+144(%rsp),%xmm15
+___
+$code.=<<___;
+ lea `104+($win64?10*16:0)`(%rsp),%rsi
+ mov 0(%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_avx:
+ ret
+.size aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
+___
+}
+$code.=<<___;
+.align 64
+K_XX_XX:
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
+
+.asciz "AESNI-CBC+SHA1 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type ssse3_handler,\@abi-omnipotent
+.align 16
+ssse3_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ lea 96(%rax),%rsi
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+ lea `104+10*16`(%rax),%rax # adjust stack pointer
+
+ mov 0(%rax),%r15
+ mov 8(%rax),%r14
+ mov 16(%rax),%r13
+ mov 24(%rax),%r12
+ mov 32(%rax),%rbp
+ mov 40(%rax),%rbx
+ lea 48(%rax),%rax
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size ssse3_handler,.-ssse3_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_aesni_cbc_sha1_enc_ssse3
+ .rva .LSEH_end_aesni_cbc_sha1_enc_ssse3
+ .rva .LSEH_info_aesni_cbc_sha1_enc_ssse3
+___
+$code.=<<___ if ($avx);
+ .rva .LSEH_begin_aesni_cbc_sha1_enc_avx
+ .rva .LSEH_end_aesni_cbc_sha1_enc_avx
+ .rva .LSEH_info_aesni_cbc_sha1_enc_avx
+___
+$code.=<<___;
+.section .xdata
+.align 8
+.LSEH_info_aesni_cbc_sha1_enc_ssse3:
+ .byte 9,0,0,0
+ .rva ssse3_handler
+ .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_aesni_cbc_sha1_enc_avx:
+ .byte 9,0,0,0
+ .rva ssse3_handler
+ .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
+___
+}
+
+####################################################################
+sub rex {
+ local *opcode=shift;
+ my ($dst,$src)=@_;
+ my $rex=0;
+
+ $rex|=0x04 if($dst>=8);
+ $rex|=0x01 if($src>=8);
+ push @opcode,$rex|0x40 if($rex);
+}
+
+sub aesni {
+ my $line=shift;
+ my @opcode=(0x66);
+
+ if ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+ my %opcodelet = (
+ "aesenc" => 0xdc, "aesenclast" => 0xdd
+ );
+ return undef if (!defined($opcodelet{$1}));
+ rex(\@opcode,$3,$2);
+ push @opcode,0x0f,0x38,$opcodelet{$1};
+ push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
+ return ".byte\t".join(',',@opcode);
+ }
+ return $line;
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
+
+print $code;
+close STDOUT;
diff --git a/openssl/crypto/aes/asm/aesni-x86.pl b/openssl/crypto/aes/asm/aesni-x86.pl
new file mode 100644
index 000000000..3dc345b58
--- /dev/null
+++ b/openssl/crypto/aes/asm/aesni-x86.pl
@@ -0,0 +1,2189 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-586.pl [see below for
+# details].
+#
+# Performance.
+#
+# To start with see corresponding paragraph in aesni-x86_64.pl...
+# Instead of filling table similar to one found there I've chosen to
+# summarize *comparison* results for raw ECB, CTR and CBC benchmarks.
+# The simplified table below represents 32-bit performance relative
+# to 64-bit one in every given point. Ratios vary for different
+# encryption modes, therefore interval values.
+#
+# 16-byte 64-byte 256-byte 1-KB 8-KB
+# 53-67% 67-84% 91-94% 95-98% 97-99.5%
+#
+# Lower ratios for smaller block sizes are perfectly understandable,
+# because function call overhead is higher in 32-bit mode. Largest
+# 8-KB block performance is virtually same: 32-bit code is less than
+# 1% slower for ECB, CBC and CCM, and ~3% slower otherwise.
+
+# January 2011
+#
+# See aesni-x86_64.pl for details. Unlike x86_64 version this module
+# interleaves at most 6 aes[enc|dec] instructions, because there are
+# not enough registers for 8x interleave [which should be optimal for
+# Sandy Bridge]. Actually, performance results for 6x interleave
+# factor presented in aesni-x86_64.pl (except for CTR) are for this
+# module.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.50 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 1.09.
+
+$PREFIX="aesni"; # if $PREFIX is set to "AES", the script
+ # generates drop-in replacement for
+ # crypto/aes/asm/aes-586.pl:-)
+$inline=1; # inline _aesni_[en|de]crypt
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+if ($PREFIX eq "aesni") { $movekey=*movups; }
+else { $movekey=*movups; }
+
+$len="eax";
+$rounds="ecx";
+$key="edx";
+$inp="esi";
+$out="edi";
+$rounds_="ebx"; # backup copy for $rounds
+$key_="ebp"; # backup copy for $key
+
+$rndkey0="xmm0";
+$rndkey1="xmm1";
+$inout0="xmm2";
+$inout1="xmm3";
+$inout2="xmm4";
+$inout3="xmm5"; $in1="xmm5";
+$inout4="xmm6"; $in0="xmm6";
+$inout5="xmm7"; $ivec="xmm7";
+
+# AESNI extenstion
+sub aeskeygenassist
+{ my($dst,$src,$imm)=@_;
+ if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+ { &data_byte(0x66,0x0f,0x3a,0xdf,0xc0|($1<<3)|$2,$imm); }
+}
+sub aescommon
+{ my($opcodelet,$dst,$src)=@_;
+ if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+ { &data_byte(0x66,0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);}
+}
+sub aesimc { aescommon(0xdb,@_); }
+sub aesenc { aescommon(0xdc,@_); }
+sub aesenclast { aescommon(0xdd,@_); }
+sub aesdec { aescommon(0xde,@_); }
+sub aesdeclast { aescommon(0xdf,@_); }
+
+# Inline version of internal aesni_[en|de]crypt1
+{ my $sn;
+sub aesni_inline_generate1
+{ my ($p,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
+ $sn++;
+
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &$movekey ($rndkey1,&QWP(16,$key));
+ &xorps ($ivec,$rndkey0) if (defined($ivec));
+ &lea ($key,&DWP(32,$key));
+ &xorps ($inout,$ivec) if (defined($ivec));
+ &xorps ($inout,$rndkey0) if (!defined($ivec));
+ &set_label("${p}1_loop_$sn");
+ eval"&aes${p} ($inout,$rndkey1)";
+ &dec ($rounds);
+ &$movekey ($rndkey1,&QWP(0,$key));
+ &lea ($key,&DWP(16,$key));
+ &jnz (&label("${p}1_loop_$sn"));
+ eval"&aes${p}last ($inout,$rndkey1)";
+}}
+
+sub aesni_generate1 # fully unrolled loop
+{ my ($p,$inout)=@_; $inout=$inout0 if (!defined($inout));
+
+ &function_begin_B("_aesni_${p}rypt1");
+ &movups ($rndkey0,&QWP(0,$key));
+ &$movekey ($rndkey1,&QWP(0x10,$key));
+ &xorps ($inout,$rndkey0);
+ &$movekey ($rndkey0,&QWP(0x20,$key));
+ &lea ($key,&DWP(0x30,$key));
+ &cmp ($rounds,11);
+ &jb (&label("${p}128"));
+ &lea ($key,&DWP(0x20,$key));
+ &je (&label("${p}192"));
+ &lea ($key,&DWP(0x20,$key));
+ eval"&aes${p} ($inout,$rndkey1)";
+ &$movekey ($rndkey1,&QWP(-0x40,$key));
+ eval"&aes${p} ($inout,$rndkey0)";
+ &$movekey ($rndkey0,&QWP(-0x30,$key));
+ &set_label("${p}192");
+ eval"&aes${p} ($inout,$rndkey1)";
+ &$movekey ($rndkey1,&QWP(-0x20,$key));
+ eval"&aes${p} ($inout,$rndkey0)";
+ &$movekey ($rndkey0,&QWP(-0x10,$key));
+ &set_label("${p}128");
+ eval"&aes${p} ($inout,$rndkey1)";
+ &$movekey ($rndkey1,&QWP(0,$key));
+ eval"&aes${p} ($inout,$rndkey0)";
+ &$movekey ($rndkey0,&QWP(0x10,$key));
+ eval"&aes${p} ($inout,$rndkey1)";
+ &$movekey ($rndkey1,&QWP(0x20,$key));
+ eval"&aes${p} ($inout,$rndkey0)";
+ &$movekey ($rndkey0,&QWP(0x30,$key));
+ eval"&aes${p} ($inout,$rndkey1)";
+ &$movekey ($rndkey1,&QWP(0x40,$key));
+ eval"&aes${p} ($inout,$rndkey0)";
+ &$movekey ($rndkey0,&QWP(0x50,$key));
+ eval"&aes${p} ($inout,$rndkey1)";
+ &$movekey ($rndkey1,&QWP(0x60,$key));
+ eval"&aes${p} ($inout,$rndkey0)";
+ &$movekey ($rndkey0,&QWP(0x70,$key));
+ eval"&aes${p} ($inout,$rndkey1)";
+ eval"&aes${p}last ($inout,$rndkey0)";
+ &ret();
+ &function_end_B("_aesni_${p}rypt1");
+}
+
+# void $PREFIX_encrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("enc") if (!$inline);
+&function_begin_B("${PREFIX}_encrypt");
+ &mov ("eax",&wparam(0));
+ &mov ($key,&wparam(2));
+ &movups ($inout0,&QWP(0,"eax"));
+ &mov ($rounds,&DWP(240,$key));
+ &mov ("eax",&wparam(1));
+ if ($inline)
+ { &aesni_inline_generate1("enc"); }
+ else
+ { &call ("_aesni_encrypt1"); }
+ &movups (&QWP(0,"eax"),$inout0);
+ &ret ();
+&function_end_B("${PREFIX}_encrypt");
+
+# void $PREFIX_decrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("dec") if(!$inline);
+&function_begin_B("${PREFIX}_decrypt");
+ &mov ("eax",&wparam(0));
+ &mov ($key,&wparam(2));
+ &movups ($inout0,&QWP(0,"eax"));
+ &mov ($rounds,&DWP(240,$key));
+ &mov ("eax",&wparam(1));
+ if ($inline)
+ { &aesni_inline_generate1("dec"); }
+ else
+ { &call ("_aesni_decrypt1"); }
+ &movups (&QWP(0,"eax"),$inout0);
+ &ret ();
+&function_end_B("${PREFIX}_decrypt");
+
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it makes no sense to implement 2x subroutine.
+# aes[enc|dec] latency in next processor generation is 8, but the
+# instructions can be scheduled every cycle. Optimal interleave for
+# new processor is therefore 8x, but it's unfeasible to accommodate it
+# in XMM registers addreassable in 32-bit mode and therefore 6x is
+# used instead...
+
+sub aesni_generate3
+{ my $p=shift;
+
+ &function_begin_B("_aesni_${p}rypt3");
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &shr ($rounds,1);
+ &$movekey ($rndkey1,&QWP(16,$key));
+ &lea ($key,&DWP(32,$key));
+ &xorps ($inout0,$rndkey0);
+ &pxor ($inout1,$rndkey0);
+ &pxor ($inout2,$rndkey0);
+ &$movekey ($rndkey0,&QWP(0,$key));
+
+ &set_label("${p}3_loop");
+ eval"&aes${p} ($inout0,$rndkey1)";
+ eval"&aes${p} ($inout1,$rndkey1)";
+ &dec ($rounds);
+ eval"&aes${p} ($inout2,$rndkey1)";
+ &$movekey ($rndkey1,&QWP(16,$key));
+ eval"&aes${p} ($inout0,$rndkey0)";
+ eval"&aes${p} ($inout1,$rndkey0)";
+ &lea ($key,&DWP(32,$key));
+ eval"&aes${p} ($inout2,$rndkey0)";
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &jnz (&label("${p}3_loop"));
+ eval"&aes${p} ($inout0,$rndkey1)";
+ eval"&aes${p} ($inout1,$rndkey1)";
+ eval"&aes${p} ($inout2,$rndkey1)";
+ eval"&aes${p}last ($inout0,$rndkey0)";
+ eval"&aes${p}last ($inout1,$rndkey0)";
+ eval"&aes${p}last ($inout2,$rndkey0)";
+ &ret();
+ &function_end_B("_aesni_${p}rypt3");
+}
+
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement would be <20%,
+# so it's not worth it...
+sub aesni_generate4
+{ my $p=shift;
+
+ &function_begin_B("_aesni_${p}rypt4");
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &$movekey ($rndkey1,&QWP(16,$key));
+ &shr ($rounds,1);
+ &lea ($key,&DWP(32,$key));
+ &xorps ($inout0,$rndkey0);
+ &pxor ($inout1,$rndkey0);
+ &pxor ($inout2,$rndkey0);
+ &pxor ($inout3,$rndkey0);
+ &$movekey ($rndkey0,&QWP(0,$key));
+
+ &set_label("${p}4_loop");
+ eval"&aes${p} ($inout0,$rndkey1)";
+ eval"&aes${p} ($inout1,$rndkey1)";
+ &dec ($rounds);
+ eval"&aes${p} ($inout2,$rndkey1)";
+ eval"&aes${p} ($inout3,$rndkey1)";
+ &$movekey ($rndkey1,&QWP(16,$key));
+ eval"&aes${p} ($inout0,$rndkey0)";
+ eval"&aes${p} ($inout1,$rndkey0)";
+ &lea ($key,&DWP(32,$key));
+ eval"&aes${p} ($inout2,$rndkey0)";
+ eval"&aes${p} ($inout3,$rndkey0)";
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &jnz (&label("${p}4_loop"));
+
+ eval"&aes${p} ($inout0,$rndkey1)";
+ eval"&aes${p} ($inout1,$rndkey1)";
+ eval"&aes${p} ($inout2,$rndkey1)";
+ eval"&aes${p} ($inout3,$rndkey1)";
+ eval"&aes${p}last ($inout0,$rndkey0)";
+ eval"&aes${p}last ($inout1,$rndkey0)";
+ eval"&aes${p}last ($inout2,$rndkey0)";
+ eval"&aes${p}last ($inout3,$rndkey0)";
+ &ret();
+ &function_end_B("_aesni_${p}rypt4");
+}
+
+sub aesni_generate6
+{ my $p=shift;
+
+ &function_begin_B("_aesni_${p}rypt6");
+ &static_label("_aesni_${p}rypt6_enter");
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &shr ($rounds,1);
+ &$movekey ($rndkey1,&QWP(16,$key));
+ &lea ($key,&DWP(32,$key));
+ &xorps ($inout0,$rndkey0);
+ &pxor ($inout1,$rndkey0); # pxor does better here
+ eval"&aes${p} ($inout0,$rndkey1)";
+ &pxor ($inout2,$rndkey0);
+ eval"&aes${p} ($inout1,$rndkey1)";
+ &pxor ($inout3,$rndkey0);
+ &dec ($rounds);
+ eval"&aes${p} ($inout2,$rndkey1)";
+ &pxor ($inout4,$rndkey0);
+ eval"&aes${p} ($inout3,$rndkey1)";
+ &pxor ($inout5,$rndkey0);
+ eval"&aes${p} ($inout4,$rndkey1)";
+ &$movekey ($rndkey0,&QWP(0,$key));
+ eval"&aes${p} ($inout5,$rndkey1)";
+ &jmp (&label("_aesni_${p}rypt6_enter"));
+
+ &set_label("${p}6_loop",16);
+ eval"&aes${p} ($inout0,$rndkey1)";
+ eval"&aes${p} ($inout1,$rndkey1)";
+ &dec ($rounds);
+ eval"&aes${p} ($inout2,$rndkey1)";
+ eval"&aes${p} ($inout3,$rndkey1)";
+ eval"&aes${p} ($inout4,$rndkey1)";
+ eval"&aes${p} ($inout5,$rndkey1)";
+ &set_label("_aesni_${p}rypt6_enter",16);
+ &$movekey ($rndkey1,&QWP(16,$key));
+ eval"&aes${p} ($inout0,$rndkey0)";
+ eval"&aes${p} ($inout1,$rndkey0)";
+ &lea ($key,&DWP(32,$key));
+ eval"&aes${p} ($inout2,$rndkey0)";
+ eval"&aes${p} ($inout3,$rndkey0)";
+ eval"&aes${p} ($inout4,$rndkey0)";
+ eval"&aes${p} ($inout5,$rndkey0)";
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &jnz (&label("${p}6_loop"));
+
+ eval"&aes${p} ($inout0,$rndkey1)";
+ eval"&aes${p} ($inout1,$rndkey1)";
+ eval"&aes${p} ($inout2,$rndkey1)";
+ eval"&aes${p} ($inout3,$rndkey1)";
+ eval"&aes${p} ($inout4,$rndkey1)";
+ eval"&aes${p} ($inout5,$rndkey1)";
+ eval"&aes${p}last ($inout0,$rndkey0)";
+ eval"&aes${p}last ($inout1,$rndkey0)";
+ eval"&aes${p}last ($inout2,$rndkey0)";
+ eval"&aes${p}last ($inout3,$rndkey0)";
+ eval"&aes${p}last ($inout4,$rndkey0)";
+ eval"&aes${p}last ($inout5,$rndkey0)";
+ &ret();
+ &function_end_B("_aesni_${p}rypt6");
+}
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+
+if ($PREFIX eq "aesni") {
+######################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+# size_t length, const AES_KEY *key,
+# int enc);
+&function_begin("aesni_ecb_encrypt");
+ &mov ($inp,&wparam(0));
+ &mov ($out,&wparam(1));
+ &mov ($len,&wparam(2));
+ &mov ($key,&wparam(3));
+ &mov ($rounds_,&wparam(4));
+ &and ($len,-16);
+ &jz (&label("ecb_ret"));
+ &mov ($rounds,&DWP(240,$key));
+ &test ($rounds_,$rounds_);
+ &jz (&label("ecb_decrypt"));
+
+ &mov ($key_,$key); # backup $key
+ &mov ($rounds_,$rounds); # backup $rounds
+ &cmp ($len,0x60);
+ &jb (&label("ecb_enc_tail"));
+
+ &movdqu ($inout0,&QWP(0,$inp));
+ &movdqu ($inout1,&QWP(0x10,$inp));
+ &movdqu ($inout2,&QWP(0x20,$inp));
+ &movdqu ($inout3,&QWP(0x30,$inp));
+ &movdqu ($inout4,&QWP(0x40,$inp));
+ &movdqu ($inout5,&QWP(0x50,$inp));
+ &lea ($inp,&DWP(0x60,$inp));
+ &sub ($len,0x60);
+ &jmp (&label("ecb_enc_loop6_enter"));
+
+&set_label("ecb_enc_loop6",16);
+ &movups (&QWP(0,$out),$inout0);
+ &movdqu ($inout0,&QWP(0,$inp));
+ &movups (&QWP(0x10,$out),$inout1);
+ &movdqu ($inout1,&QWP(0x10,$inp));
+ &movups (&QWP(0x20,$out),$inout2);
+ &movdqu ($inout2,&QWP(0x20,$inp));
+ &movups (&QWP(0x30,$out),$inout3);
+ &movdqu ($inout3,&QWP(0x30,$inp));
+ &movups (&QWP(0x40,$out),$inout4);
+ &movdqu ($inout4,&QWP(0x40,$inp));
+ &movups (&QWP(0x50,$out),$inout5);
+ &lea ($out,&DWP(0x60,$out));
+ &movdqu ($inout5,&QWP(0x50,$inp));
+ &lea ($inp,&DWP(0x60,$inp));
+&set_label("ecb_enc_loop6_enter");
+
+ &call ("_aesni_encrypt6");
+
+ &mov ($key,$key_); # restore $key
+ &mov ($rounds,$rounds_); # restore $rounds
+ &sub ($len,0x60);
+ &jnc (&label("ecb_enc_loop6"));
+
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+ &movups (&QWP(0x40,$out),$inout4);
+ &movups (&QWP(0x50,$out),$inout5);
+ &lea ($out,&DWP(0x60,$out));
+ &add ($len,0x60);
+ &jz (&label("ecb_ret"));
+
+&set_label("ecb_enc_tail");
+ &movups ($inout0,&QWP(0,$inp));
+ &cmp ($len,0x20);
+ &jb (&label("ecb_enc_one"));
+ &movups ($inout1,&QWP(0x10,$inp));
+ &je (&label("ecb_enc_two"));
+ &movups ($inout2,&QWP(0x20,$inp));
+ &cmp ($len,0x40);
+ &jb (&label("ecb_enc_three"));
+ &movups ($inout3,&QWP(0x30,$inp));
+ &je (&label("ecb_enc_four"));
+ &movups ($inout4,&QWP(0x40,$inp));
+ &xorps ($inout5,$inout5);
+ &call ("_aesni_encrypt6");
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+ &movups (&QWP(0x40,$out),$inout4);
+ jmp (&label("ecb_ret"));
+
+&set_label("ecb_enc_one",16);
+ if ($inline)
+ { &aesni_inline_generate1("enc"); }
+ else
+ { &call ("_aesni_encrypt1"); }
+ &movups (&QWP(0,$out),$inout0);
+ &jmp (&label("ecb_ret"));
+
+&set_label("ecb_enc_two",16);
+ &xorps ($inout2,$inout2);
+ &call ("_aesni_encrypt3");
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &jmp (&label("ecb_ret"));
+
+&set_label("ecb_enc_three",16);
+ &call ("_aesni_encrypt3");
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &jmp (&label("ecb_ret"));
+
+&set_label("ecb_enc_four",16);
+ &call ("_aesni_encrypt4");
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+ &jmp (&label("ecb_ret"));
+######################################################################
+&set_label("ecb_decrypt",16);
+ &mov ($key_,$key); # backup $key
+ &mov ($rounds_,$rounds); # backup $rounds
+ &cmp ($len,0x60);
+ &jb (&label("ecb_dec_tail"));
+
+ &movdqu ($inout0,&QWP(0,$inp));
+ &movdqu ($inout1,&QWP(0x10,$inp));
+ &movdqu ($inout2,&QWP(0x20,$inp));
+ &movdqu ($inout3,&QWP(0x30,$inp));
+ &movdqu ($inout4,&QWP(0x40,$inp));
+ &movdqu ($inout5,&QWP(0x50,$inp));
+ &lea ($inp,&DWP(0x60,$inp));
+ &sub ($len,0x60);
+ &jmp (&label("ecb_dec_loop6_enter"));
+
+&set_label("ecb_dec_loop6",16);
+ &movups (&QWP(0,$out),$inout0);
+ &movdqu ($inout0,&QWP(0,$inp));
+ &movups (&QWP(0x10,$out),$inout1);
+ &movdqu ($inout1,&QWP(0x10,$inp));
+ &movups (&QWP(0x20,$out),$inout2);
+ &movdqu ($inout2,&QWP(0x20,$inp));
+ &movups (&QWP(0x30,$out),$inout3);
+ &movdqu ($inout3,&QWP(0x30,$inp));
+ &movups (&QWP(0x40,$out),$inout4);
+ &movdqu ($inout4,&QWP(0x40,$inp));
+ &movups (&QWP(0x50,$out),$inout5);
+ &lea ($out,&DWP(0x60,$out));
+ &movdqu ($inout5,&QWP(0x50,$inp));
+ &lea ($inp,&DWP(0x60,$inp));
+&set_label("ecb_dec_loop6_enter");
+
+ &call ("_aesni_decrypt6");
+
+ &mov ($key,$key_); # restore $key
+ &mov ($rounds,$rounds_); # restore $rounds
+ &sub ($len,0x60);
+ &jnc (&label("ecb_dec_loop6"));
+
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+ &movups (&QWP(0x40,$out),$inout4);
+ &movups (&QWP(0x50,$out),$inout5);
+ &lea ($out,&DWP(0x60,$out));
+ &add ($len,0x60);
+ &jz (&label("ecb_ret"));
+
+&set_label("ecb_dec_tail");
+ &movups ($inout0,&QWP(0,$inp));
+ &cmp ($len,0x20);
+ &jb (&label("ecb_dec_one"));
+ &movups ($inout1,&QWP(0x10,$inp));
+ &je (&label("ecb_dec_two"));
+ &movups ($inout2,&QWP(0x20,$inp));
+ &cmp ($len,0x40);
+ &jb (&label("ecb_dec_three"));
+ &movups ($inout3,&QWP(0x30,$inp));
+ &je (&label("ecb_dec_four"));
+ &movups ($inout4,&QWP(0x40,$inp));
+ &xorps ($inout5,$inout5);
+ &call ("_aesni_decrypt6");
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+ &movups (&QWP(0x40,$out),$inout4);
+ &jmp (&label("ecb_ret"));
+
+&set_label("ecb_dec_one",16);
+ if ($inline)
+ { &aesni_inline_generate1("dec"); }
+ else
+ { &call ("_aesni_decrypt1"); }
+ &movups (&QWP(0,$out),$inout0);
+ &jmp (&label("ecb_ret"));
+
+&set_label("ecb_dec_two",16);
+ &xorps ($inout2,$inout2);
+ &call ("_aesni_decrypt3");
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &jmp (&label("ecb_ret"));
+
+&set_label("ecb_dec_three",16);
+ &call ("_aesni_decrypt3");
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &jmp (&label("ecb_ret"));
+
+&set_label("ecb_dec_four",16);
+ &call ("_aesni_decrypt4");
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+
+&set_label("ecb_ret");
+&function_end("aesni_ecb_encrypt");
+
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+# size_t blocks, const AES_KEY *key,
+# const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{ my $cmac=$inout1;
+&function_begin("aesni_ccm64_encrypt_blocks");
+ &mov ($inp,&wparam(0));
+ &mov ($out,&wparam(1));
+ &mov ($len,&wparam(2));
+ &mov ($key,&wparam(3));
+ &mov ($rounds_,&wparam(4));
+ &mov ($rounds,&wparam(5));
+ &mov ($key_,"esp");
+ &sub ("esp",60);
+ &and ("esp",-16); # align stack
+ &mov (&DWP(48,"esp"),$key_);
+
+ &movdqu ($ivec,&QWP(0,$rounds_)); # load ivec
+ &movdqu ($cmac,&QWP(0,$rounds)); # load cmac
+ &mov ($rounds,&DWP(240,$key));
+
+ # compose byte-swap control mask for pshufb on stack
+ &mov (&DWP(0,"esp"),0x0c0d0e0f);
+ &mov (&DWP(4,"esp"),0x08090a0b);
+ &mov (&DWP(8,"esp"),0x04050607);
+ &mov (&DWP(12,"esp"),0x00010203);
+
+ # compose counter increment vector on stack
+ &mov ($rounds_,1);
+ &xor ($key_,$key_);
+ &mov (&DWP(16,"esp"),$rounds_);
+ &mov (&DWP(20,"esp"),$key_);
+ &mov (&DWP(24,"esp"),$key_);
+ &mov (&DWP(28,"esp"),$key_);
+
+ &shr ($rounds,1);
+ &lea ($key_,&DWP(0,$key));
+ &movdqa ($inout3,&QWP(0,"esp"));
+ &movdqa ($inout0,$ivec);
+ &mov ($rounds_,$rounds);
+ &pshufb ($ivec,$inout3);
+
+&set_label("ccm64_enc_outer");
+ &$movekey ($rndkey0,&QWP(0,$key_));
+ &mov ($rounds,$rounds_);
+ &movups ($in0,&QWP(0,$inp));
+
+ &xorps ($inout0,$rndkey0);
+ &$movekey ($rndkey1,&QWP(16,$key_));
+ &xorps ($rndkey0,$in0);
+ &lea ($key,&DWP(32,$key_));
+ &xorps ($cmac,$rndkey0); # cmac^=inp
+ &$movekey ($rndkey0,&QWP(0,$key));
+
+&set_label("ccm64_enc2_loop");
+ &aesenc ($inout0,$rndkey1);
+ &dec ($rounds);
+ &aesenc ($cmac,$rndkey1);
+ &$movekey ($rndkey1,&QWP(16,$key));
+ &aesenc ($inout0,$rndkey0);
+ &lea ($key,&DWP(32,$key));
+ &aesenc ($cmac,$rndkey0);
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &jnz (&label("ccm64_enc2_loop"));
+ &aesenc ($inout0,$rndkey1);
+ &aesenc ($cmac,$rndkey1);
+ &paddq ($ivec,&QWP(16,"esp"));
+ &aesenclast ($inout0,$rndkey0);
+ &aesenclast ($cmac,$rndkey0);
+
+ &dec ($len);
+ &lea ($inp,&DWP(16,$inp));
+ &xorps ($in0,$inout0); # inp^=E(ivec)
+ &movdqa ($inout0,$ivec);
+ &movups (&QWP(0,$out),$in0); # save output
+ &lea ($out,&DWP(16,$out));
+ &pshufb ($inout0,$inout3);
+ &jnz (&label("ccm64_enc_outer"));
+
+ &mov ("esp",&DWP(48,"esp"));
+ &mov ($out,&wparam(5));
+ &movups (&QWP(0,$out),$cmac);
+&function_end("aesni_ccm64_encrypt_blocks");
+
+&function_begin("aesni_ccm64_decrypt_blocks");
+ &mov ($inp,&wparam(0));
+ &mov ($out,&wparam(1));
+ &mov ($len,&wparam(2));
+ &mov ($key,&wparam(3));
+ &mov ($rounds_,&wparam(4));
+ &mov ($rounds,&wparam(5));
+ &mov ($key_,"esp");
+ &sub ("esp",60);
+ &and ("esp",-16); # align stack
+ &mov (&DWP(48,"esp"),$key_);
+
+ &movdqu ($ivec,&QWP(0,$rounds_)); # load ivec
+ &movdqu ($cmac,&QWP(0,$rounds)); # load cmac
+ &mov ($rounds,&DWP(240,$key));
+
+ # compose byte-swap control mask for pshufb on stack
+ &mov (&DWP(0,"esp"),0x0c0d0e0f);
+ &mov (&DWP(4,"esp"),0x08090a0b);
+ &mov (&DWP(8,"esp"),0x04050607);
+ &mov (&DWP(12,"esp"),0x00010203);
+
+ # compose counter increment vector on stack
+ &mov ($rounds_,1);
+ &xor ($key_,$key_);
+ &mov (&DWP(16,"esp"),$rounds_);
+ &mov (&DWP(20,"esp"),$key_);
+ &mov (&DWP(24,"esp"),$key_);
+ &mov (&DWP(28,"esp"),$key_);
+
+ &movdqa ($inout3,&QWP(0,"esp")); # bswap mask
+ &movdqa ($inout0,$ivec);
+
+ &mov ($key_,$key);
+ &mov ($rounds_,$rounds);
+
+ &pshufb ($ivec,$inout3);
+ if ($inline)
+ { &aesni_inline_generate1("enc"); }
+ else
+ { &call ("_aesni_encrypt1"); }
+ &movups ($in0,&QWP(0,$inp)); # load inp
+ &paddq ($ivec,&QWP(16,"esp"));
+ &lea ($inp,&QWP(16,$inp));
+ &jmp (&label("ccm64_dec_outer"));
+
+&set_label("ccm64_dec_outer",16);
+ &xorps ($in0,$inout0); # inp ^= E(ivec)
+ &movdqa ($inout0,$ivec);
+ &mov ($rounds,$rounds_);
+ &movups (&QWP(0,$out),$in0); # save output
+ &lea ($out,&DWP(16,$out));
+ &pshufb ($inout0,$inout3);
+
+ &sub ($len,1);
+ &jz (&label("ccm64_dec_break"));
+
+ &$movekey ($rndkey0,&QWP(0,$key_));
+ &shr ($rounds,1);
+ &$movekey ($rndkey1,&QWP(16,$key_));
+ &xorps ($in0,$rndkey0);
+ &lea ($key,&DWP(32,$key_));
+ &xorps ($inout0,$rndkey0);
+ &xorps ($cmac,$in0); # cmac^=out
+ &$movekey ($rndkey0,&QWP(0,$key));
+
+&set_label("ccm64_dec2_loop");
+ &aesenc ($inout0,$rndkey1);
+ &dec ($rounds);
+ &aesenc ($cmac,$rndkey1);
+ &$movekey ($rndkey1,&QWP(16,$key));
+ &aesenc ($inout0,$rndkey0);
+ &lea ($key,&DWP(32,$key));
+ &aesenc ($cmac,$rndkey0);
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &jnz (&label("ccm64_dec2_loop"));
+ &movups ($in0,&QWP(0,$inp)); # load inp
+ &paddq ($ivec,&QWP(16,"esp"));
+ &aesenc ($inout0,$rndkey1);
+ &aesenc ($cmac,$rndkey1);
+ &lea ($inp,&QWP(16,$inp));
+ &aesenclast ($inout0,$rndkey0);
+ &aesenclast ($cmac,$rndkey0);
+ &jmp (&label("ccm64_dec_outer"));
+
+&set_label("ccm64_dec_break",16);
+ &mov ($key,$key_);
+ if ($inline)
+ { &aesni_inline_generate1("enc",$cmac,$in0); }
+ else
+ { &call ("_aesni_encrypt1",$cmac); }
+
+ &mov ("esp",&DWP(48,"esp"));
+ &mov ($out,&wparam(5));
+ &movups (&QWP(0,$out),$cmac);
+&function_end("aesni_ccm64_decrypt_blocks");
+}
+
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+# size_t blocks, const AES_KEY *key,
+# const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see engine/eng_aesni.c for details)
+#
+# stack layout:
+# 0 pshufb mask
+# 16 vector addend: 0,6,6,6
+# 32 counter-less ivec
+# 48 1st triplet of counter vector
+# 64 2nd triplet of counter vector
+# 80 saved %esp
+
+&function_begin("aesni_ctr32_encrypt_blocks");
+ &mov ($inp,&wparam(0));
+ &mov ($out,&wparam(1));
+ &mov ($len,&wparam(2));
+ &mov ($key,&wparam(3));
+ &mov ($rounds_,&wparam(4));
+ &mov ($key_,"esp");
+ &sub ("esp",88);
+ &and ("esp",-16); # align stack
+ &mov (&DWP(80,"esp"),$key_);
+
+ &cmp ($len,1);
+ &je (&label("ctr32_one_shortcut"));
+
+ &movdqu ($inout5,&QWP(0,$rounds_)); # load ivec
+
+ # compose byte-swap control mask for pshufb on stack
+ &mov (&DWP(0,"esp"),0x0c0d0e0f);
+ &mov (&DWP(4,"esp"),0x08090a0b);
+ &mov (&DWP(8,"esp"),0x04050607);
+ &mov (&DWP(12,"esp"),0x00010203);
+
+ # compose counter increment vector on stack
+ &mov ($rounds,6);
+ &xor ($key_,$key_);
+ &mov (&DWP(16,"esp"),$rounds);
+ &mov (&DWP(20,"esp"),$rounds);
+ &mov (&DWP(24,"esp"),$rounds);
+ &mov (&DWP(28,"esp"),$key_);
+
+ &pextrd ($rounds_,$inout5,3); # pull 32-bit counter
+ &pinsrd ($inout5,$key_,3); # wipe 32-bit counter
+
+ &mov ($rounds,&DWP(240,$key)); # key->rounds
+
+ # compose 2 vectors of 3x32-bit counters
+ &bswap ($rounds_);
+ &pxor ($rndkey1,$rndkey1);
+ &pxor ($rndkey0,$rndkey0);
+ &movdqa ($inout0,&QWP(0,"esp")); # load byte-swap mask
+ &pinsrd ($rndkey1,$rounds_,0);
+ &lea ($key_,&DWP(3,$rounds_));
+ &pinsrd ($rndkey0,$key_,0);
+ &inc ($rounds_);
+ &pinsrd ($rndkey1,$rounds_,1);
+ &inc ($key_);
+ &pinsrd ($rndkey0,$key_,1);
+ &inc ($rounds_);
+ &pinsrd ($rndkey1,$rounds_,2);
+ &inc ($key_);
+ &pinsrd ($rndkey0,$key_,2);
+ &movdqa (&QWP(48,"esp"),$rndkey1); # save 1st triplet
+ &pshufb ($rndkey1,$inout0); # byte swap
+ &movdqa (&QWP(64,"esp"),$rndkey0); # save 2nd triplet
+ &pshufb ($rndkey0,$inout0); # byte swap
+
+ &pshufd ($inout0,$rndkey1,3<<6); # place counter to upper dword
+ &pshufd ($inout1,$rndkey1,2<<6);
+ &cmp ($len,6);
+ &jb (&label("ctr32_tail"));
+ &movdqa (&QWP(32,"esp"),$inout5); # save counter-less ivec
+ &shr ($rounds,1);
+ &mov ($key_,$key); # backup $key
+ &mov ($rounds_,$rounds); # backup $rounds
+ &sub ($len,6);
+ &jmp (&label("ctr32_loop6"));
+
+&set_label("ctr32_loop6",16);
+ &pshufd ($inout2,$rndkey1,1<<6);
+ &movdqa ($rndkey1,&QWP(32,"esp")); # pull counter-less ivec
+ &pshufd ($inout3,$rndkey0,3<<6);
+ &por ($inout0,$rndkey1); # merge counter-less ivec
+ &pshufd ($inout4,$rndkey0,2<<6);
+ &por ($inout1,$rndkey1);
+ &pshufd ($inout5,$rndkey0,1<<6);
+ &por ($inout2,$rndkey1);
+ &por ($inout3,$rndkey1);
+ &por ($inout4,$rndkey1);
+ &por ($inout5,$rndkey1);
+
+ # inlining _aesni_encrypt6's prologue gives ~4% improvement...
+ &$movekey ($rndkey0,&QWP(0,$key_));
+ &$movekey ($rndkey1,&QWP(16,$key_));
+ &lea ($key,&DWP(32,$key_));
+ &dec ($rounds);
+ &pxor ($inout0,$rndkey0);
+ &pxor ($inout1,$rndkey0);
+ &aesenc ($inout0,$rndkey1);
+ &pxor ($inout2,$rndkey0);
+ &aesenc ($inout1,$rndkey1);
+ &pxor ($inout3,$rndkey0);
+ &aesenc ($inout2,$rndkey1);
+ &pxor ($inout4,$rndkey0);
+ &aesenc ($inout3,$rndkey1);
+ &pxor ($inout5,$rndkey0);
+ &aesenc ($inout4,$rndkey1);
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &aesenc ($inout5,$rndkey1);
+
+ &call (&label("_aesni_encrypt6_enter"));
+
+ &movups ($rndkey1,&QWP(0,$inp));
+ &movups ($rndkey0,&QWP(0x10,$inp));
+ &xorps ($inout0,$rndkey1);
+ &movups ($rndkey1,&QWP(0x20,$inp));
+ &xorps ($inout1,$rndkey0);
+ &movups (&QWP(0,$out),$inout0);
+ &movdqa ($rndkey0,&QWP(16,"esp")); # load increment
+ &xorps ($inout2,$rndkey1);
+ &movdqa ($rndkey1,&QWP(48,"esp")); # load 1st triplet
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+
+ &paddd ($rndkey1,$rndkey0); # 1st triplet increment
+ &paddd ($rndkey0,&QWP(64,"esp")); # 2nd triplet increment
+ &movdqa ($inout0,&QWP(0,"esp")); # load byte swap mask
+
+ &movups ($inout1,&QWP(0x30,$inp));
+ &movups ($inout2,&QWP(0x40,$inp));
+ &xorps ($inout3,$inout1);
+ &movups ($inout1,&QWP(0x50,$inp));
+ &lea ($inp,&DWP(0x60,$inp));
+ &movdqa (&QWP(48,"esp"),$rndkey1); # save 1st triplet
+ &pshufb ($rndkey1,$inout0); # byte swap
+ &xorps ($inout4,$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+ &xorps ($inout5,$inout1);
+ &movdqa (&QWP(64,"esp"),$rndkey0); # save 2nd triplet
+ &pshufb ($rndkey0,$inout0); # byte swap
+ &movups (&QWP(0x40,$out),$inout4);
+ &pshufd ($inout0,$rndkey1,3<<6);
+ &movups (&QWP(0x50,$out),$inout5);
+ &lea ($out,&DWP(0x60,$out));
+
+ &mov ($rounds,$rounds_);
+ &pshufd ($inout1,$rndkey1,2<<6);
+ &sub ($len,6);
+ &jnc (&label("ctr32_loop6"));
+
+ &add ($len,6);
+ &jz (&label("ctr32_ret"));
+ &mov ($key,$key_);
+ &lea ($rounds,&DWP(1,"",$rounds,2)); # restore $rounds
+ &movdqa ($inout5,&QWP(32,"esp")); # pull count-less ivec
+
+&set_label("ctr32_tail");
+ &por ($inout0,$inout5);
+ &cmp ($len,2);
+ &jb (&label("ctr32_one"));
+
+ &pshufd ($inout2,$rndkey1,1<<6);
+ &por ($inout1,$inout5);
+ &je (&label("ctr32_two"));
+
+ &pshufd ($inout3,$rndkey0,3<<6);
+ &por ($inout2,$inout5);
+ &cmp ($len,4);
+ &jb (&label("ctr32_three"));
+
+ &pshufd ($inout4,$rndkey0,2<<6);
+ &por ($inout3,$inout5);
+ &je (&label("ctr32_four"));
+
+ &por ($inout4,$inout5);
+ &call ("_aesni_encrypt6");
+ &movups ($rndkey1,&QWP(0,$inp));
+ &movups ($rndkey0,&QWP(0x10,$inp));
+ &xorps ($inout0,$rndkey1);
+ &movups ($rndkey1,&QWP(0x20,$inp));
+ &xorps ($inout1,$rndkey0);
+ &movups ($rndkey0,&QWP(0x30,$inp));
+ &xorps ($inout2,$rndkey1);
+ &movups ($rndkey1,&QWP(0x40,$inp));
+ &xorps ($inout3,$rndkey0);
+ &movups (&QWP(0,$out),$inout0);
+ &xorps ($inout4,$rndkey1);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+ &movups (&QWP(0x40,$out),$inout4);
+ &jmp (&label("ctr32_ret"));
+
+&set_label("ctr32_one_shortcut",16);
+ &movups ($inout0,&QWP(0,$rounds_)); # load ivec
+ &mov ($rounds,&DWP(240,$key));
+
+&set_label("ctr32_one");
+ if ($inline)
+ { &aesni_inline_generate1("enc"); }
+ else
+ { &call ("_aesni_encrypt1"); }
+ &movups ($in0,&QWP(0,$inp));
+ &xorps ($in0,$inout0);
+ &movups (&QWP(0,$out),$in0);
+ &jmp (&label("ctr32_ret"));
+
+&set_label("ctr32_two",16);
+ &call ("_aesni_encrypt3");
+ &movups ($inout3,&QWP(0,$inp));
+ &movups ($inout4,&QWP(0x10,$inp));
+ &xorps ($inout0,$inout3);
+ &xorps ($inout1,$inout4);
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &jmp (&label("ctr32_ret"));
+
+&set_label("ctr32_three",16);
+ &call ("_aesni_encrypt3");
+ &movups ($inout3,&QWP(0,$inp));
+ &movups ($inout4,&QWP(0x10,$inp));
+ &xorps ($inout0,$inout3);
+ &movups ($inout5,&QWP(0x20,$inp));
+ &xorps ($inout1,$inout4);
+ &movups (&QWP(0,$out),$inout0);
+ &xorps ($inout2,$inout5);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &jmp (&label("ctr32_ret"));
+
+&set_label("ctr32_four",16);
+ &call ("_aesni_encrypt4");
+ &movups ($inout4,&QWP(0,$inp));
+ &movups ($inout5,&QWP(0x10,$inp));
+ &movups ($rndkey1,&QWP(0x20,$inp));
+ &xorps ($inout0,$inout4);
+ &movups ($rndkey0,&QWP(0x30,$inp));
+ &xorps ($inout1,$inout5);
+ &movups (&QWP(0,$out),$inout0);
+ &xorps ($inout2,$rndkey1);
+ &movups (&QWP(0x10,$out),$inout1);
+ &xorps ($inout3,$rndkey0);
+ &movups (&QWP(0x20,$out),$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+
+&set_label("ctr32_ret");
+ &mov ("esp",&DWP(80,"esp"));
+&function_end("aesni_ctr32_encrypt_blocks");
+
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+# const AES_KEY *key1, const AES_KEY *key2
+# const unsigned char iv[16]);
+#
+{ my ($tweak,$twtmp,$twres,$twmask)=($rndkey1,$rndkey0,$inout0,$inout1);
+
+&function_begin("aesni_xts_encrypt");
+ &mov ($key,&wparam(4)); # key2
+ &mov ($inp,&wparam(5)); # clear-text tweak
+
+ &mov ($rounds,&DWP(240,$key)); # key2->rounds
+ &movups ($inout0,&QWP(0,$inp));
+ if ($inline)
+ { &aesni_inline_generate1("enc"); }
+ else
+ { &call ("_aesni_encrypt1"); }
+
+ &mov ($inp,&wparam(0));
+ &mov ($out,&wparam(1));
+ &mov ($len,&wparam(2));
+ &mov ($key,&wparam(3)); # key1
+
+ &mov ($key_,"esp");
+ &sub ("esp",16*7+8);
+ &mov ($rounds,&DWP(240,$key)); # key1->rounds
+ &and ("esp",-16); # align stack
+
+ &mov (&DWP(16*6+0,"esp"),0x87); # compose the magic constant
+ &mov (&DWP(16*6+4,"esp"),0);
+ &mov (&DWP(16*6+8,"esp"),1);
+ &mov (&DWP(16*6+12,"esp"),0);
+ &mov (&DWP(16*7+0,"esp"),$len); # save original $len
+ &mov (&DWP(16*7+4,"esp"),$key_); # save original %esp
+
+ &movdqa ($tweak,$inout0);
+ &pxor ($twtmp,$twtmp);
+ &movdqa ($twmask,&QWP(6*16,"esp")); # 0x0...010...87
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+
+ &and ($len,-16);
+ &mov ($key_,$key); # backup $key
+ &mov ($rounds_,$rounds); # backup $rounds
+ &sub ($len,16*6);
+ &jc (&label("xts_enc_short"));
+
+ &shr ($rounds,1);
+ &mov ($rounds_,$rounds);
+ &jmp (&label("xts_enc_loop6"));
+
+&set_label("xts_enc_loop6",16);
+ for ($i=0;$i<4;$i++) {
+ &pshufd ($twres,$twtmp,0x13);
+ &pxor ($twtmp,$twtmp);
+ &movdqa (&QWP(16*$i,"esp"),$tweak);
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd ($twtmp,$tweak); # broadcast upper bits
+ &pxor ($tweak,$twres);
+ }
+ &pshufd ($inout5,$twtmp,0x13);
+ &movdqa (&QWP(16*$i++,"esp"),$tweak);
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &$movekey ($rndkey0,&QWP(0,$key_));
+ &pand ($inout5,$twmask); # isolate carry and residue
+ &movups ($inout0,&QWP(0,$inp)); # load input
+ &pxor ($inout5,$tweak);
+
+ # inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+ &movdqu ($inout1,&QWP(16*1,$inp));
+ &xorps ($inout0,$rndkey0); # input^=rndkey[0]
+ &movdqu ($inout2,&QWP(16*2,$inp));
+ &pxor ($inout1,$rndkey0);
+ &movdqu ($inout3,&QWP(16*3,$inp));
+ &pxor ($inout2,$rndkey0);
+ &movdqu ($inout4,&QWP(16*4,$inp));
+ &pxor ($inout3,$rndkey0);
+ &movdqu ($rndkey1,&QWP(16*5,$inp));
+ &pxor ($inout4,$rndkey0);
+ &lea ($inp,&DWP(16*6,$inp));
+ &pxor ($inout0,&QWP(16*0,"esp")); # input^=tweak
+ &movdqa (&QWP(16*$i,"esp"),$inout5); # save last tweak
+ &pxor ($inout5,$rndkey1);
+
+ &$movekey ($rndkey1,&QWP(16,$key_));
+ &lea ($key,&DWP(32,$key_));
+ &pxor ($inout1,&QWP(16*1,"esp"));
+ &aesenc ($inout0,$rndkey1);
+ &pxor ($inout2,&QWP(16*2,"esp"));
+ &aesenc ($inout1,$rndkey1);
+ &pxor ($inout3,&QWP(16*3,"esp"));
+ &dec ($rounds);
+ &aesenc ($inout2,$rndkey1);
+ &pxor ($inout4,&QWP(16*4,"esp"));
+ &aesenc ($inout3,$rndkey1);
+ &pxor ($inout5,$rndkey0);
+ &aesenc ($inout4,$rndkey1);
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &aesenc ($inout5,$rndkey1);
+ &call (&label("_aesni_encrypt6_enter"));
+
+ &movdqa ($tweak,&QWP(16*5,"esp")); # last tweak
+ &pxor ($twtmp,$twtmp);
+ &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
+ &pcmpgtd ($twtmp,$tweak); # broadcast upper bits
+ &xorps ($inout1,&QWP(16*1,"esp"));
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &xorps ($inout2,&QWP(16*2,"esp"));
+ &movups (&QWP(16*1,$out),$inout1);
+ &xorps ($inout3,&QWP(16*3,"esp"));
+ &movups (&QWP(16*2,$out),$inout2);
+ &xorps ($inout4,&QWP(16*4,"esp"));
+ &movups (&QWP(16*3,$out),$inout3);
+ &xorps ($inout5,$tweak);
+ &movups (&QWP(16*4,$out),$inout4);
+ &pshufd ($twres,$twtmp,0x13);
+ &movups (&QWP(16*5,$out),$inout5);
+ &lea ($out,&DWP(16*6,$out));
+ &movdqa ($twmask,&QWP(16*6,"esp")); # 0x0...010...87
+
+ &pxor ($twtmp,$twtmp);
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &mov ($rounds,$rounds_); # restore $rounds
+ &pxor ($tweak,$twres);
+
+ &sub ($len,16*6);
+ &jnc (&label("xts_enc_loop6"));
+
+ &lea ($rounds,&DWP(1,"",$rounds,2)); # restore $rounds
+ &mov ($key,$key_); # restore $key
+ &mov ($rounds_,$rounds);
+
+&set_label("xts_enc_short");
+ &add ($len,16*6);
+ &jz (&label("xts_enc_done6x"));
+
+ &movdqa ($inout3,$tweak); # put aside previous tweak
+ &cmp ($len,0x20);
+ &jb (&label("xts_enc_one"));
+
+ &pshufd ($twres,$twtmp,0x13);
+ &pxor ($twtmp,$twtmp);
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &pxor ($tweak,$twres);
+ &je (&label("xts_enc_two"));
+
+ &pshufd ($twres,$twtmp,0x13);
+ &pxor ($twtmp,$twtmp);
+ &movdqa ($inout4,$tweak); # put aside previous tweak
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &pxor ($tweak,$twres);
+ &cmp ($len,0x40);
+ &jb (&label("xts_enc_three"));
+
+ &pshufd ($twres,$twtmp,0x13);
+ &pxor ($twtmp,$twtmp);
+ &movdqa ($inout5,$tweak); # put aside previous tweak
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &pxor ($tweak,$twres);
+ &movdqa (&QWP(16*0,"esp"),$inout3);
+ &movdqa (&QWP(16*1,"esp"),$inout4);
+ &je (&label("xts_enc_four"));
+
+ &movdqa (&QWP(16*2,"esp"),$inout5);
+ &pshufd ($inout5,$twtmp,0x13);
+ &movdqa (&QWP(16*3,"esp"),$tweak);
+ &paddq ($tweak,$tweak); # &psllq($inout0,1);
+ &pand ($inout5,$twmask); # isolate carry and residue
+ &pxor ($inout5,$tweak);
+
+ &movdqu ($inout0,&QWP(16*0,$inp)); # load input
+ &movdqu ($inout1,&QWP(16*1,$inp));
+ &movdqu ($inout2,&QWP(16*2,$inp));
+ &pxor ($inout0,&QWP(16*0,"esp")); # input^=tweak
+ &movdqu ($inout3,&QWP(16*3,$inp));
+ &pxor ($inout1,&QWP(16*1,"esp"));
+ &movdqu ($inout4,&QWP(16*4,$inp));
+ &pxor ($inout2,&QWP(16*2,"esp"));
+ &lea ($inp,&DWP(16*5,$inp));
+ &pxor ($inout3,&QWP(16*3,"esp"));
+ &movdqa (&QWP(16*4,"esp"),$inout5); # save last tweak
+ &pxor ($inout4,$inout5);
+
+ &call ("_aesni_encrypt6");
+
+ &movaps ($tweak,&QWP(16*4,"esp")); # last tweak
+ &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
+ &xorps ($inout1,&QWP(16*1,"esp"));
+ &xorps ($inout2,&QWP(16*2,"esp"));
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &xorps ($inout3,&QWP(16*3,"esp"));
+ &movups (&QWP(16*1,$out),$inout1);
+ &xorps ($inout4,$tweak);
+ &movups (&QWP(16*2,$out),$inout2);
+ &movups (&QWP(16*3,$out),$inout3);
+ &movups (&QWP(16*4,$out),$inout4);
+ &lea ($out,&DWP(16*5,$out));
+ &jmp (&label("xts_enc_done"));
+
+&set_label("xts_enc_one",16);
+ &movups ($inout0,&QWP(16*0,$inp)); # load input
+ &lea ($inp,&DWP(16*1,$inp));
+ &xorps ($inout0,$inout3); # input^=tweak
+ if ($inline)
+ { &aesni_inline_generate1("enc"); }
+ else
+ { &call ("_aesni_encrypt1"); }
+ &xorps ($inout0,$inout3); # output^=tweak
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &lea ($out,&DWP(16*1,$out));
+
+ &movdqa ($tweak,$inout3); # last tweak
+ &jmp (&label("xts_enc_done"));
+
+&set_label("xts_enc_two",16);
+ &movaps ($inout4,$tweak); # put aside last tweak
+
+ &movups ($inout0,&QWP(16*0,$inp)); # load input
+ &movups ($inout1,&QWP(16*1,$inp));
+ &lea ($inp,&DWP(16*2,$inp));
+ &xorps ($inout0,$inout3); # input^=tweak
+ &xorps ($inout1,$inout4);
+ &xorps ($inout2,$inout2);
+
+ &call ("_aesni_encrypt3");
+
+ &xorps ($inout0,$inout3); # output^=tweak
+ &xorps ($inout1,$inout4);
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &movups (&QWP(16*1,$out),$inout1);
+ &lea ($out,&DWP(16*2,$out));
+
+ &movdqa ($tweak,$inout4); # last tweak
+ &jmp (&label("xts_enc_done"));
+
+&set_label("xts_enc_three",16);
+ &movaps ($inout5,$tweak); # put aside last tweak
+ &movups ($inout0,&QWP(16*0,$inp)); # load input
+ &movups ($inout1,&QWP(16*1,$inp));
+ &movups ($inout2,&QWP(16*2,$inp));
+ &lea ($inp,&DWP(16*3,$inp));
+ &xorps ($inout0,$inout3); # input^=tweak
+ &xorps ($inout1,$inout4);
+ &xorps ($inout2,$inout5);
+
+ &call ("_aesni_encrypt3");
+
+ &xorps ($inout0,$inout3); # output^=tweak
+ &xorps ($inout1,$inout4);
+ &xorps ($inout2,$inout5);
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &movups (&QWP(16*1,$out),$inout1);
+ &movups (&QWP(16*2,$out),$inout2);
+ &lea ($out,&DWP(16*3,$out));
+
+ &movdqa ($tweak,$inout5); # last tweak
+ &jmp (&label("xts_enc_done"));
+
+&set_label("xts_enc_four",16);
+ &movaps ($inout4,$tweak); # put aside last tweak
+
+ &movups ($inout0,&QWP(16*0,$inp)); # load input
+ &movups ($inout1,&QWP(16*1,$inp));
+ &movups ($inout2,&QWP(16*2,$inp));
+ &xorps ($inout0,&QWP(16*0,"esp")); # input^=tweak
+ &movups ($inout3,&QWP(16*3,$inp));
+ &lea ($inp,&DWP(16*4,$inp));
+ &xorps ($inout1,&QWP(16*1,"esp"));
+ &xorps ($inout2,$inout5);
+ &xorps ($inout3,$inout4);
+
+ &call ("_aesni_encrypt4");
+
+ &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
+ &xorps ($inout1,&QWP(16*1,"esp"));
+ &xorps ($inout2,$inout5);
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &xorps ($inout3,$inout4);
+ &movups (&QWP(16*1,$out),$inout1);
+ &movups (&QWP(16*2,$out),$inout2);
+ &movups (&QWP(16*3,$out),$inout3);
+ &lea ($out,&DWP(16*4,$out));
+
+ &movdqa ($tweak,$inout4); # last tweak
+ &jmp (&label("xts_enc_done"));
+
+&set_label("xts_enc_done6x",16); # $tweak is pre-calculated
+ &mov ($len,&DWP(16*7+0,"esp")); # restore original $len
+ &and ($len,15);
+ &jz (&label("xts_enc_ret"));
+ &movdqa ($inout3,$tweak);
+ &mov (&DWP(16*7+0,"esp"),$len); # save $len%16
+ &jmp (&label("xts_enc_steal"));
+
+&set_label("xts_enc_done",16);
+ &mov ($len,&DWP(16*7+0,"esp")); # restore original $len
+ &pxor ($twtmp,$twtmp);
+ &and ($len,15);
+ &jz (&label("xts_enc_ret"));
+
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &mov (&DWP(16*7+0,"esp"),$len); # save $len%16
+ &pshufd ($inout3,$twtmp,0x13);
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($inout3,&QWP(16*6,"esp")); # isolate carry and residue
+ &pxor ($inout3,$tweak);
+
+&set_label("xts_enc_steal");
+ &movz ($rounds,&BP(0,$inp));
+ &movz ($key,&BP(-16,$out));
+ &lea ($inp,&DWP(1,$inp));
+ &mov (&BP(-16,$out),&LB($rounds));
+ &mov (&BP(0,$out),&LB($key));
+ &lea ($out,&DWP(1,$out));
+ &sub ($len,1);
+ &jnz (&label("xts_enc_steal"));
+
+ &sub ($out,&DWP(16*7+0,"esp")); # rewind $out
+ &mov ($key,$key_); # restore $key
+ &mov ($rounds,$rounds_); # restore $rounds
+
+ &movups ($inout0,&QWP(-16,$out)); # load input
+ &xorps ($inout0,$inout3); # input^=tweak
+ if ($inline)
+ { &aesni_inline_generate1("enc"); }
+ else
+ { &call ("_aesni_encrypt1"); }
+ &xorps ($inout0,$inout3); # output^=tweak
+ &movups (&QWP(-16,$out),$inout0); # write output
+
+&set_label("xts_enc_ret");
+ &mov ("esp",&DWP(16*7+4,"esp")); # restore %esp
+&function_end("aesni_xts_encrypt");
+
+&function_begin("aesni_xts_decrypt");
+ &mov ($key,&wparam(4)); # key2
+ &mov ($inp,&wparam(5)); # clear-text tweak
+
+ &mov ($rounds,&DWP(240,$key)); # key2->rounds
+ &movups ($inout0,&QWP(0,$inp));
+ if ($inline)
+ { &aesni_inline_generate1("enc"); }
+ else
+ { &call ("_aesni_encrypt1"); }
+
+ &mov ($inp,&wparam(0));
+ &mov ($out,&wparam(1));
+ &mov ($len,&wparam(2));
+ &mov ($key,&wparam(3)); # key1
+
+ &mov ($key_,"esp");
+ &sub ("esp",16*7+8);
+ &and ("esp",-16); # align stack
+
+ &xor ($rounds_,$rounds_); # if(len%16) len-=16;
+ &test ($len,15);
+ &setnz (&LB($rounds_));
+ &shl ($rounds_,4);
+ &sub ($len,$rounds_);
+
+ &mov (&DWP(16*6+0,"esp"),0x87); # compose the magic constant
+ &mov (&DWP(16*6+4,"esp"),0);
+ &mov (&DWP(16*6+8,"esp"),1);
+ &mov (&DWP(16*6+12,"esp"),0);
+ &mov (&DWP(16*7+0,"esp"),$len); # save original $len
+ &mov (&DWP(16*7+4,"esp"),$key_); # save original %esp
+
+ &mov ($rounds,&DWP(240,$key)); # key1->rounds
+ &mov ($key_,$key); # backup $key
+ &mov ($rounds_,$rounds); # backup $rounds
+
+ &movdqa ($tweak,$inout0);
+ &pxor ($twtmp,$twtmp);
+ &movdqa ($twmask,&QWP(6*16,"esp")); # 0x0...010...87
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+
+ &and ($len,-16);
+ &sub ($len,16*6);
+ &jc (&label("xts_dec_short"));
+
+ &shr ($rounds,1);
+ &mov ($rounds_,$rounds);
+ &jmp (&label("xts_dec_loop6"));
+
+&set_label("xts_dec_loop6",16);
+ for ($i=0;$i<4;$i++) {
+ &pshufd ($twres,$twtmp,0x13);
+ &pxor ($twtmp,$twtmp);
+ &movdqa (&QWP(16*$i,"esp"),$tweak);
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd ($twtmp,$tweak); # broadcast upper bits
+ &pxor ($tweak,$twres);
+ }
+ &pshufd ($inout5,$twtmp,0x13);
+ &movdqa (&QWP(16*$i++,"esp"),$tweak);
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &$movekey ($rndkey0,&QWP(0,$key_));
+ &pand ($inout5,$twmask); # isolate carry and residue
+ &movups ($inout0,&QWP(0,$inp)); # load input
+ &pxor ($inout5,$tweak);
+
+ # inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+ &movdqu ($inout1,&QWP(16*1,$inp));
+ &xorps ($inout0,$rndkey0); # input^=rndkey[0]
+ &movdqu ($inout2,&QWP(16*2,$inp));
+ &pxor ($inout1,$rndkey0);
+ &movdqu ($inout3,&QWP(16*3,$inp));
+ &pxor ($inout2,$rndkey0);
+ &movdqu ($inout4,&QWP(16*4,$inp));
+ &pxor ($inout3,$rndkey0);
+ &movdqu ($rndkey1,&QWP(16*5,$inp));
+ &pxor ($inout4,$rndkey0);
+ &lea ($inp,&DWP(16*6,$inp));
+ &pxor ($inout0,&QWP(16*0,"esp")); # input^=tweak
+ &movdqa (&QWP(16*$i,"esp"),$inout5); # save last tweak
+ &pxor ($inout5,$rndkey1);
+
+ &$movekey ($rndkey1,&QWP(16,$key_));
+ &lea ($key,&DWP(32,$key_));
+ &pxor ($inout1,&QWP(16*1,"esp"));
+ &aesdec ($inout0,$rndkey1);
+ &pxor ($inout2,&QWP(16*2,"esp"));
+ &aesdec ($inout1,$rndkey1);
+ &pxor ($inout3,&QWP(16*3,"esp"));
+ &dec ($rounds);
+ &aesdec ($inout2,$rndkey1);
+ &pxor ($inout4,&QWP(16*4,"esp"));
+ &aesdec ($inout3,$rndkey1);
+ &pxor ($inout5,$rndkey0);
+ &aesdec ($inout4,$rndkey1);
+ &$movekey ($rndkey0,&QWP(0,$key));
+ &aesdec ($inout5,$rndkey1);
+ &call (&label("_aesni_decrypt6_enter"));
+
+ &movdqa ($tweak,&QWP(16*5,"esp")); # last tweak
+ &pxor ($twtmp,$twtmp);
+ &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
+ &pcmpgtd ($twtmp,$tweak); # broadcast upper bits
+ &xorps ($inout1,&QWP(16*1,"esp"));
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &xorps ($inout2,&QWP(16*2,"esp"));
+ &movups (&QWP(16*1,$out),$inout1);
+ &xorps ($inout3,&QWP(16*3,"esp"));
+ &movups (&QWP(16*2,$out),$inout2);
+ &xorps ($inout4,&QWP(16*4,"esp"));
+ &movups (&QWP(16*3,$out),$inout3);
+ &xorps ($inout5,$tweak);
+ &movups (&QWP(16*4,$out),$inout4);
+ &pshufd ($twres,$twtmp,0x13);
+ &movups (&QWP(16*5,$out),$inout5);
+ &lea ($out,&DWP(16*6,$out));
+ &movdqa ($twmask,&QWP(16*6,"esp")); # 0x0...010...87
+
+ &pxor ($twtmp,$twtmp);
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &mov ($rounds,$rounds_); # restore $rounds
+ &pxor ($tweak,$twres);
+
+ &sub ($len,16*6);
+ &jnc (&label("xts_dec_loop6"));
+
+ &lea ($rounds,&DWP(1,"",$rounds,2)); # restore $rounds
+ &mov ($key,$key_); # restore $key
+ &mov ($rounds_,$rounds);
+
+&set_label("xts_dec_short");
+ &add ($len,16*6);
+ &jz (&label("xts_dec_done6x"));
+
+ &movdqa ($inout3,$tweak); # put aside previous tweak
+ &cmp ($len,0x20);
+ &jb (&label("xts_dec_one"));
+
+ &pshufd ($twres,$twtmp,0x13);
+ &pxor ($twtmp,$twtmp);
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &pxor ($tweak,$twres);
+ &je (&label("xts_dec_two"));
+
+ &pshufd ($twres,$twtmp,0x13);
+ &pxor ($twtmp,$twtmp);
+ &movdqa ($inout4,$tweak); # put aside previous tweak
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &pxor ($tweak,$twres);
+ &cmp ($len,0x40);
+ &jb (&label("xts_dec_three"));
+
+ &pshufd ($twres,$twtmp,0x13);
+ &pxor ($twtmp,$twtmp);
+ &movdqa ($inout5,$tweak); # put aside previous tweak
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &pxor ($tweak,$twres);
+ &movdqa (&QWP(16*0,"esp"),$inout3);
+ &movdqa (&QWP(16*1,"esp"),$inout4);
+ &je (&label("xts_dec_four"));
+
+ &movdqa (&QWP(16*2,"esp"),$inout5);
+ &pshufd ($inout5,$twtmp,0x13);
+ &movdqa (&QWP(16*3,"esp"),$tweak);
+ &paddq ($tweak,$tweak); # &psllq($inout0,1);
+ &pand ($inout5,$twmask); # isolate carry and residue
+ &pxor ($inout5,$tweak);
+
+ &movdqu ($inout0,&QWP(16*0,$inp)); # load input
+ &movdqu ($inout1,&QWP(16*1,$inp));
+ &movdqu ($inout2,&QWP(16*2,$inp));
+ &pxor ($inout0,&QWP(16*0,"esp")); # input^=tweak
+ &movdqu ($inout3,&QWP(16*3,$inp));
+ &pxor ($inout1,&QWP(16*1,"esp"));
+ &movdqu ($inout4,&QWP(16*4,$inp));
+ &pxor ($inout2,&QWP(16*2,"esp"));
+ &lea ($inp,&DWP(16*5,$inp));
+ &pxor ($inout3,&QWP(16*3,"esp"));
+ &movdqa (&QWP(16*4,"esp"),$inout5); # save last tweak
+ &pxor ($inout4,$inout5);
+
+ &call ("_aesni_decrypt6");
+
+ &movaps ($tweak,&QWP(16*4,"esp")); # last tweak
+ &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
+ &xorps ($inout1,&QWP(16*1,"esp"));
+ &xorps ($inout2,&QWP(16*2,"esp"));
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &xorps ($inout3,&QWP(16*3,"esp"));
+ &movups (&QWP(16*1,$out),$inout1);
+ &xorps ($inout4,$tweak);
+ &movups (&QWP(16*2,$out),$inout2);
+ &movups (&QWP(16*3,$out),$inout3);
+ &movups (&QWP(16*4,$out),$inout4);
+ &lea ($out,&DWP(16*5,$out));
+ &jmp (&label("xts_dec_done"));
+
+&set_label("xts_dec_one",16);
+ &movups ($inout0,&QWP(16*0,$inp)); # load input
+ &lea ($inp,&DWP(16*1,$inp));
+ &xorps ($inout0,$inout3); # input^=tweak
+ if ($inline)
+ { &aesni_inline_generate1("dec"); }
+ else
+ { &call ("_aesni_decrypt1"); }
+ &xorps ($inout0,$inout3); # output^=tweak
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &lea ($out,&DWP(16*1,$out));
+
+ &movdqa ($tweak,$inout3); # last tweak
+ &jmp (&label("xts_dec_done"));
+
+&set_label("xts_dec_two",16);
+ &movaps ($inout4,$tweak); # put aside last tweak
+
+ &movups ($inout0,&QWP(16*0,$inp)); # load input
+ &movups ($inout1,&QWP(16*1,$inp));
+ &lea ($inp,&DWP(16*2,$inp));
+ &xorps ($inout0,$inout3); # input^=tweak
+ &xorps ($inout1,$inout4);
+
+ &call ("_aesni_decrypt3");
+
+ &xorps ($inout0,$inout3); # output^=tweak
+ &xorps ($inout1,$inout4);
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &movups (&QWP(16*1,$out),$inout1);
+ &lea ($out,&DWP(16*2,$out));
+
+ &movdqa ($tweak,$inout4); # last tweak
+ &jmp (&label("xts_dec_done"));
+
+&set_label("xts_dec_three",16);
+ &movaps ($inout5,$tweak); # put aside last tweak
+ &movups ($inout0,&QWP(16*0,$inp)); # load input
+ &movups ($inout1,&QWP(16*1,$inp));
+ &movups ($inout2,&QWP(16*2,$inp));
+ &lea ($inp,&DWP(16*3,$inp));
+ &xorps ($inout0,$inout3); # input^=tweak
+ &xorps ($inout1,$inout4);
+ &xorps ($inout2,$inout5);
+
+ &call ("_aesni_decrypt3");
+
+ &xorps ($inout0,$inout3); # output^=tweak
+ &xorps ($inout1,$inout4);
+ &xorps ($inout2,$inout5);
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &movups (&QWP(16*1,$out),$inout1);
+ &movups (&QWP(16*2,$out),$inout2);
+ &lea ($out,&DWP(16*3,$out));
+
+ &movdqa ($tweak,$inout5); # last tweak
+ &jmp (&label("xts_dec_done"));
+
+&set_label("xts_dec_four",16);
+ &movaps ($inout4,$tweak); # put aside last tweak
+
+ &movups ($inout0,&QWP(16*0,$inp)); # load input
+ &movups ($inout1,&QWP(16*1,$inp));
+ &movups ($inout2,&QWP(16*2,$inp));
+ &xorps ($inout0,&QWP(16*0,"esp")); # input^=tweak
+ &movups ($inout3,&QWP(16*3,$inp));
+ &lea ($inp,&DWP(16*4,$inp));
+ &xorps ($inout1,&QWP(16*1,"esp"));
+ &xorps ($inout2,$inout5);
+ &xorps ($inout3,$inout4);
+
+ &call ("_aesni_decrypt4");
+
+ &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
+ &xorps ($inout1,&QWP(16*1,"esp"));
+ &xorps ($inout2,$inout5);
+ &movups (&QWP(16*0,$out),$inout0); # write output
+ &xorps ($inout3,$inout4);
+ &movups (&QWP(16*1,$out),$inout1);
+ &movups (&QWP(16*2,$out),$inout2);
+ &movups (&QWP(16*3,$out),$inout3);
+ &lea ($out,&DWP(16*4,$out));
+
+ &movdqa ($tweak,$inout4); # last tweak
+ &jmp (&label("xts_dec_done"));
+
+&set_label("xts_dec_done6x",16); # $tweak is pre-calculated
+ &mov ($len,&DWP(16*7+0,"esp")); # restore original $len
+ &and ($len,15);
+ &jz (&label("xts_dec_ret"));
+ &mov (&DWP(16*7+0,"esp"),$len); # save $len%16
+ &jmp (&label("xts_dec_only_one_more"));
+
+&set_label("xts_dec_done",16);
+ &mov ($len,&DWP(16*7+0,"esp")); # restore original $len
+ &pxor ($twtmp,$twtmp);
+ &and ($len,15);
+ &jz (&label("xts_dec_ret"));
+
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &mov (&DWP(16*7+0,"esp"),$len); # save $len%16
+ &pshufd ($twres,$twtmp,0x13);
+ &pxor ($twtmp,$twtmp);
+ &movdqa ($twmask,&QWP(16*6,"esp"));
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($twres,$twmask); # isolate carry and residue
+ &pcmpgtd($twtmp,$tweak); # broadcast upper bits
+ &pxor ($tweak,$twres);
+
+&set_label("xts_dec_only_one_more");
+ &pshufd ($inout3,$twtmp,0x13);
+ &movdqa ($inout4,$tweak); # put aside previous tweak
+ &paddq ($tweak,$tweak); # &psllq($tweak,1);
+ &pand ($inout3,$twmask); # isolate carry and residue
+ &pxor ($inout3,$tweak);
+
+ &mov ($key,$key_); # restore $key
+ &mov ($rounds,$rounds_); # restore $rounds
+
+ &movups ($inout0,&QWP(0,$inp)); # load input
+ &xorps ($inout0,$inout3); # input^=tweak
+ if ($inline)
+ { &aesni_inline_generate1("dec"); }
+ else
+ { &call ("_aesni_decrypt1"); }
+ &xorps ($inout0,$inout3); # output^=tweak
+ &movups (&QWP(0,$out),$inout0); # write output
+
+&set_label("xts_dec_steal");
+ &movz ($rounds,&BP(16,$inp));
+ &movz ($key,&BP(0,$out));
+ &lea ($inp,&DWP(1,$inp));
+ &mov (&BP(0,$out),&LB($rounds));
+ &mov (&BP(16,$out),&LB($key));
+ &lea ($out,&DWP(1,$out));
+ &sub ($len,1);
+ &jnz (&label("xts_dec_steal"));
+
+ &sub ($out,&DWP(16*7+0,"esp")); # rewind $out
+ &mov ($key,$key_); # restore $key
+ &mov ($rounds,$rounds_); # restore $rounds
+
+ &movups ($inout0,&QWP(0,$out)); # load input
+ &xorps ($inout0,$inout4); # input^=tweak
+ if ($inline)
+ { &aesni_inline_generate1("dec"); }
+ else
+ { &call ("_aesni_decrypt1"); }
+ &xorps ($inout0,$inout4); # output^=tweak
+ &movups (&QWP(0,$out),$inout0); # write output
+
+&set_label("xts_dec_ret");
+ &mov ("esp",&DWP(16*7+4,"esp")); # restore %esp
+&function_end("aesni_xts_decrypt");
+}
+}
+
+######################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+# size_t length, const AES_KEY *key,
+# unsigned char *ivp,const int enc);
+&function_begin("${PREFIX}_cbc_encrypt");
+ &mov ($inp,&wparam(0));
+ &mov ($rounds_,"esp");
+ &mov ($out,&wparam(1));
+ &sub ($rounds_,24);
+ &mov ($len,&wparam(2));
+ &and ($rounds_,-16);
+ &mov ($key,&wparam(3));
+ &mov ($key_,&wparam(4));
+ &test ($len,$len);
+ &jz (&label("cbc_abort"));
+
+ &cmp (&wparam(5),0);
+ &xchg ($rounds_,"esp"); # alloca
+ &movups ($ivec,&QWP(0,$key_)); # load IV
+ &mov ($rounds,&DWP(240,$key));
+ &mov ($key_,$key); # backup $key
+ &mov (&DWP(16,"esp"),$rounds_); # save original %esp
+ &mov ($rounds_,$rounds); # backup $rounds
+ &je (&label("cbc_decrypt"));
+
+ &movaps ($inout0,$ivec);
+ &cmp ($len,16);
+ &jb (&label("cbc_enc_tail"));
+ &sub ($len,16);
+ &jmp (&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+ &movups ($ivec,&QWP(0,$inp)); # input actually
+ &lea ($inp,&DWP(16,$inp));
+ if ($inline)
+ { &aesni_inline_generate1("enc",$inout0,$ivec); }
+ else
+ { &xorps($inout0,$ivec); &call("_aesni_encrypt1"); }
+ &mov ($rounds,$rounds_); # restore $rounds
+ &mov ($key,$key_); # restore $key
+ &movups (&QWP(0,$out),$inout0); # store output
+ &lea ($out,&DWP(16,$out));
+ &sub ($len,16);
+ &jnc (&label("cbc_enc_loop"));
+ &add ($len,16);
+ &jnz (&label("cbc_enc_tail"));
+ &movaps ($ivec,$inout0);
+ &jmp (&label("cbc_ret"));
+
+&set_label("cbc_enc_tail");
+ &mov ("ecx",$len); # zaps $rounds
+ &data_word(0xA4F3F689); # rep movsb
+ &mov ("ecx",16); # zero tail
+ &sub ("ecx",$len);
+ &xor ("eax","eax"); # zaps $len
+ &data_word(0xAAF3F689); # rep stosb
+ &lea ($out,&DWP(-16,$out)); # rewind $out by 1 block
+ &mov ($rounds,$rounds_); # restore $rounds
+ &mov ($inp,$out); # $inp and $out are the same
+ &mov ($key,$key_); # restore $key
+ &jmp (&label("cbc_enc_loop"));
+######################################################################
+&set_label("cbc_decrypt",16);
+ &cmp ($len,0x50);
+ &jbe (&label("cbc_dec_tail"));
+ &movaps (&QWP(0,"esp"),$ivec); # save IV
+ &sub ($len,0x50);
+ &jmp (&label("cbc_dec_loop6_enter"));
+
+&set_label("cbc_dec_loop6",16);
+ &movaps (&QWP(0,"esp"),$rndkey0); # save IV
+ &movups (&QWP(0,$out),$inout5);
+ &lea ($out,&DWP(0x10,$out));
+&set_label("cbc_dec_loop6_enter");
+ &movdqu ($inout0,&QWP(0,$inp));
+ &movdqu ($inout1,&QWP(0x10,$inp));
+ &movdqu ($inout2,&QWP(0x20,$inp));
+ &movdqu ($inout3,&QWP(0x30,$inp));
+ &movdqu ($inout4,&QWP(0x40,$inp));
+ &movdqu ($inout5,&QWP(0x50,$inp));
+
+ &call ("_aesni_decrypt6");
+
+ &movups ($rndkey1,&QWP(0,$inp));
+ &movups ($rndkey0,&QWP(0x10,$inp));
+ &xorps ($inout0,&QWP(0,"esp")); # ^=IV
+ &xorps ($inout1,$rndkey1);
+ &movups ($rndkey1,&QWP(0x20,$inp));
+ &xorps ($inout2,$rndkey0);
+ &movups ($rndkey0,&QWP(0x30,$inp));
+ &xorps ($inout3,$rndkey1);
+ &movups ($rndkey1,&QWP(0x40,$inp));
+ &xorps ($inout4,$rndkey0);
+ &movups ($rndkey0,&QWP(0x50,$inp)); # IV
+ &xorps ($inout5,$rndkey1);
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &lea ($inp,&DWP(0x60,$inp));
+ &movups (&QWP(0x20,$out),$inout2);
+ &mov ($rounds,$rounds_) # restore $rounds
+ &movups (&QWP(0x30,$out),$inout3);
+ &mov ($key,$key_); # restore $key
+ &movups (&QWP(0x40,$out),$inout4);
+ &lea ($out,&DWP(0x50,$out));
+ &sub ($len,0x60);
+ &ja (&label("cbc_dec_loop6"));
+
+ &movaps ($inout0,$inout5);
+ &movaps ($ivec,$rndkey0);
+ &add ($len,0x50);
+ &jle (&label("cbc_dec_tail_collected"));
+ &movups (&QWP(0,$out),$inout0);
+ &lea ($out,&DWP(0x10,$out));
+&set_label("cbc_dec_tail");
+ &movups ($inout0,&QWP(0,$inp));
+ &movaps ($in0,$inout0);
+ &cmp ($len,0x10);
+ &jbe (&label("cbc_dec_one"));
+
+ &movups ($inout1,&QWP(0x10,$inp));
+ &movaps ($in1,$inout1);
+ &cmp ($len,0x20);
+ &jbe (&label("cbc_dec_two"));
+
+ &movups ($inout2,&QWP(0x20,$inp));
+ &cmp ($len,0x30);
+ &jbe (&label("cbc_dec_three"));
+
+ &movups ($inout3,&QWP(0x30,$inp));
+ &cmp ($len,0x40);
+ &jbe (&label("cbc_dec_four"));
+
+ &movups ($inout4,&QWP(0x40,$inp));
+ &movaps (&QWP(0,"esp"),$ivec); # save IV
+ &movups ($inout0,&QWP(0,$inp));
+ &xorps ($inout5,$inout5);
+ &call ("_aesni_decrypt6");
+ &movups ($rndkey1,&QWP(0,$inp));
+ &movups ($rndkey0,&QWP(0x10,$inp));
+ &xorps ($inout0,&QWP(0,"esp")); # ^= IV
+ &xorps ($inout1,$rndkey1);
+ &movups ($rndkey1,&QWP(0x20,$inp));
+ &xorps ($inout2,$rndkey0);
+ &movups ($rndkey0,&QWP(0x30,$inp));
+ &xorps ($inout3,$rndkey1);
+ &movups ($ivec,&QWP(0x40,$inp)); # IV
+ &xorps ($inout4,$rndkey0);
+ &movups (&QWP(0,$out),$inout0);
+ &movups (&QWP(0x10,$out),$inout1);
+ &movups (&QWP(0x20,$out),$inout2);
+ &movups (&QWP(0x30,$out),$inout3);
+ &lea ($out,&DWP(0x40,$out));
+ &movaps ($inout0,$inout4);
+ &sub ($len,0x50);
+ &jmp (&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_one",16);
+ if ($inline)
+ { &aesni_inline_generate1("dec"); }
+ else
+ { &call ("_aesni_decrypt1"); }
+ &xorps ($inout0,$ivec);
+ &movaps ($ivec,$in0);
+ &sub ($len,0x10);
+ &jmp (&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_two",16);
+ &xorps ($inout2,$inout2);
+ &call ("_aesni_decrypt3");
+ &xorps ($inout0,$ivec);
+ &xorps ($inout1,$in0);
+ &movups (&QWP(0,$out),$inout0);
+ &movaps ($inout0,$inout1);
+ &lea ($out,&DWP(0x10,$out));
+ &movaps ($ivec,$in1);
+ &sub ($len,0x20);
+ &jmp (&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_three",16);
+ &call ("_aesni_decrypt3");
+ &xorps ($inout0,$ivec);
+ &xorps ($inout1,$in0);
+ &xorps ($inout2,$in1);
+ &movups (&QWP(0,$out),$inout0);
+ &movaps ($inout0,$inout2);
+ &movups (&QWP(0x10,$out),$inout1);
+ &lea ($out,&DWP(0x20,$out));
+ &movups ($ivec,&QWP(0x20,$inp));
+ &sub ($len,0x30);
+ &jmp (&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_four",16);
+ &call ("_aesni_decrypt4");
+ &movups ($rndkey1,&QWP(0x10,$inp));
+ &movups ($rndkey0,&QWP(0x20,$inp));
+ &xorps ($inout0,$ivec);
+ &movups ($ivec,&QWP(0x30,$inp));
+ &xorps ($inout1,$in0);
+ &movups (&QWP(0,$out),$inout0);
+ &xorps ($inout2,$rndkey1);
+ &movups (&QWP(0x10,$out),$inout1);
+ &xorps ($inout3,$rndkey0);
+ &movups (&QWP(0x20,$out),$inout2);
+ &lea ($out,&DWP(0x30,$out));
+ &movaps ($inout0,$inout3);
+ &sub ($len,0x40);
+
+&set_label("cbc_dec_tail_collected");
+ &and ($len,15);
+ &jnz (&label("cbc_dec_tail_partial"));
+ &movups (&QWP(0,$out),$inout0);
+ &jmp (&label("cbc_ret"));
+
+&set_label("cbc_dec_tail_partial",16);
+ &movaps (&QWP(0,"esp"),$inout0);
+ &mov ("ecx",16);
+ &mov ($inp,"esp");
+ &sub ("ecx",$len);
+ &data_word(0xA4F3F689); # rep movsb
+
+&set_label("cbc_ret");
+ &mov ("esp",&DWP(16,"esp")); # pull original %esp
+ &mov ($key_,&wparam(4));
+ &movups (&QWP(0,$key_),$ivec); # output IV
+&set_label("cbc_abort");
+&function_end("${PREFIX}_cbc_encrypt");
+
+######################################################################
+# Mechanical port from aesni-x86_64.pl.
+#
+# _aesni_set_encrypt_key is private interface,
+# input:
+# "eax" const unsigned char *userKey
+# $rounds int bits
+# $key AES_KEY *key
+# output:
+# "eax" return code
+# $round rounds
+
+&function_begin_B("_aesni_set_encrypt_key");
+ &test ("eax","eax");
+ &jz (&label("bad_pointer"));
+ &test ($key,$key);
+ &jz (&label("bad_pointer"));
+
+ &movups ("xmm0",&QWP(0,"eax")); # pull first 128 bits of *userKey
+ &xorps ("xmm4","xmm4"); # low dword of xmm4 is assumed 0
+ &lea ($key,&DWP(16,$key));
+ &cmp ($rounds,256);
+ &je (&label("14rounds"));
+ &cmp ($rounds,192);
+ &je (&label("12rounds"));
+ &cmp ($rounds,128);
+ &jne (&label("bad_keybits"));
+
+&set_label("10rounds",16);
+ &mov ($rounds,9);
+ &$movekey (&QWP(-16,$key),"xmm0"); # round 0
+ &aeskeygenassist("xmm1","xmm0",0x01); # round 1
+ &call (&label("key_128_cold"));
+ &aeskeygenassist("xmm1","xmm0",0x2); # round 2
+ &call (&label("key_128"));
+ &aeskeygenassist("xmm1","xmm0",0x04); # round 3
+ &call (&label("key_128"));
+ &aeskeygenassist("xmm1","xmm0",0x08); # round 4
+ &call (&label("key_128"));
+ &aeskeygenassist("xmm1","xmm0",0x10); # round 5
+ &call (&label("key_128"));
+ &aeskeygenassist("xmm1","xmm0",0x20); # round 6
+ &call (&label("key_128"));
+ &aeskeygenassist("xmm1","xmm0",0x40); # round 7
+ &call (&label("key_128"));
+ &aeskeygenassist("xmm1","xmm0",0x80); # round 8
+ &call (&label("key_128"));
+ &aeskeygenassist("xmm1","xmm0",0x1b); # round 9
+ &call (&label("key_128"));
+ &aeskeygenassist("xmm1","xmm0",0x36); # round 10
+ &call (&label("key_128"));
+ &$movekey (&QWP(0,$key),"xmm0");
+ &mov (&DWP(80,$key),$rounds);
+ &xor ("eax","eax");
+ &ret();
+
+&set_label("key_128",16);
+ &$movekey (&QWP(0,$key),"xmm0");
+ &lea ($key,&DWP(16,$key));
+&set_label("key_128_cold");
+ &shufps ("xmm4","xmm0",0b00010000);
+ &xorps ("xmm0","xmm4");
+ &shufps ("xmm4","xmm0",0b10001100);
+ &xorps ("xmm0","xmm4");
+ &shufps ("xmm1","xmm1",0b11111111); # critical path
+ &xorps ("xmm0","xmm1");
+ &ret();
+
+&set_label("12rounds",16);
+ &movq ("xmm2",&QWP(16,"eax")); # remaining 1/3 of *userKey
+ &mov ($rounds,11);
+ &$movekey (&QWP(-16,$key),"xmm0") # round 0
+ &aeskeygenassist("xmm1","xmm2",0x01); # round 1,2
+ &call (&label("key_192a_cold"));
+ &aeskeygenassist("xmm1","xmm2",0x02); # round 2,3
+ &call (&label("key_192b"));
+ &aeskeygenassist("xmm1","xmm2",0x04); # round 4,5
+ &call (&label("key_192a"));
+ &aeskeygenassist("xmm1","xmm2",0x08); # round 5,6
+ &call (&label("key_192b"));
+ &aeskeygenassist("xmm1","xmm2",0x10); # round 7,8
+ &call (&label("key_192a"));
+ &aeskeygenassist("xmm1","xmm2",0x20); # round 8,9
+ &call (&label("key_192b"));
+ &aeskeygenassist("xmm1","xmm2",0x40); # round 10,11
+ &call (&label("key_192a"));
+ &aeskeygenassist("xmm1","xmm2",0x80); # round 11,12
+ &call (&label("key_192b"));
+ &$movekey (&QWP(0,$key),"xmm0");
+ &mov (&DWP(48,$key),$rounds);
+ &xor ("eax","eax");
+ &ret();
+
+&set_label("key_192a",16);
+ &$movekey (&QWP(0,$key),"xmm0");
+ &lea ($key,&DWP(16,$key));
+&set_label("key_192a_cold",16);
+ &movaps ("xmm5","xmm2");
+&set_label("key_192b_warm");
+ &shufps ("xmm4","xmm0",0b00010000);
+ &movdqa ("xmm3","xmm2");
+ &xorps ("xmm0","xmm4");
+ &shufps ("xmm4","xmm0",0b10001100);
+ &pslldq ("xmm3",4);
+ &xorps ("xmm0","xmm4");
+ &pshufd ("xmm1","xmm1",0b01010101); # critical path
+ &pxor ("xmm2","xmm3");
+ &pxor ("xmm0","xmm1");
+ &pshufd ("xmm3","xmm0",0b11111111);
+ &pxor ("xmm2","xmm3");
+ &ret();
+
+&set_label("key_192b",16);
+ &movaps ("xmm3","xmm0");
+ &shufps ("xmm5","xmm0",0b01000100);
+ &$movekey (&QWP(0,$key),"xmm5");
+ &shufps ("xmm3","xmm2",0b01001110);
+ &$movekey (&QWP(16,$key),"xmm3");
+ &lea ($key,&DWP(32,$key));
+ &jmp (&label("key_192b_warm"));
+
+&set_label("14rounds",16);
+ &movups ("xmm2",&QWP(16,"eax")); # remaining half of *userKey
+ &mov ($rounds,13);
+ &lea ($key,&DWP(16,$key));
+ &$movekey (&QWP(-32,$key),"xmm0"); # round 0
+ &$movekey (&QWP(-16,$key),"xmm2"); # round 1
+ &aeskeygenassist("xmm1","xmm2",0x01); # round 2
+ &call (&label("key_256a_cold"));
+ &aeskeygenassist("xmm1","xmm0",0x01); # round 3
+ &call (&label("key_256b"));
+ &aeskeygenassist("xmm1","xmm2",0x02); # round 4
+ &call (&label("key_256a"));
+ &aeskeygenassist("xmm1","xmm0",0x02); # round 5
+ &call (&label("key_256b"));
+ &aeskeygenassist("xmm1","xmm2",0x04); # round 6
+ &call (&label("key_256a"));
+ &aeskeygenassist("xmm1","xmm0",0x04); # round 7
+ &call (&label("key_256b"));
+ &aeskeygenassist("xmm1","xmm2",0x08); # round 8
+ &call (&label("key_256a"));
+ &aeskeygenassist("xmm1","xmm0",0x08); # round 9
+ &call (&label("key_256b"));
+ &aeskeygenassist("xmm1","xmm2",0x10); # round 10
+ &call (&label("key_256a"));
+ &aeskeygenassist("xmm1","xmm0",0x10); # round 11
+ &call (&label("key_256b"));
+ &aeskeygenassist("xmm1","xmm2",0x20); # round 12
+ &call (&label("key_256a"));
+ &aeskeygenassist("xmm1","xmm0",0x20); # round 13
+ &call (&label("key_256b"));
+ &aeskeygenassist("xmm1","xmm2",0x40); # round 14
+ &call (&label("key_256a"));
+ &$movekey (&QWP(0,$key),"xmm0");
+ &mov (&DWP(16,$key),$rounds);
+ &xor ("eax","eax");
+ &ret();
+
+&set_label("key_256a",16);
+ &$movekey (&QWP(0,$key),"xmm2");
+ &lea ($key,&DWP(16,$key));
+&set_label("key_256a_cold");
+ &shufps ("xmm4","xmm0",0b00010000);
+ &xorps ("xmm0","xmm4");
+ &shufps ("xmm4","xmm0",0b10001100);
+ &xorps ("xmm0","xmm4");
+ &shufps ("xmm1","xmm1",0b11111111); # critical path
+ &xorps ("xmm0","xmm1");
+ &ret();
+
+&set_label("key_256b",16);
+ &$movekey (&QWP(0,$key),"xmm0");
+ &lea ($key,&DWP(16,$key));
+
+ &shufps ("xmm4","xmm2",0b00010000);
+ &xorps ("xmm2","xmm4");
+ &shufps ("xmm4","xmm2",0b10001100);
+ &xorps ("xmm2","xmm4");
+ &shufps ("xmm1","xmm1",0b10101010); # critical path
+ &xorps ("xmm2","xmm1");
+ &ret();
+
+&set_label("bad_pointer",4);
+ &mov ("eax",-1);
+ &ret ();
+&set_label("bad_keybits",4);
+ &mov ("eax",-2);
+ &ret ();
+&function_end_B("_aesni_set_encrypt_key");
+
+# int $PREFIX_set_encrypt_key (const unsigned char *userKey, int bits,
+# AES_KEY *key)
+&function_begin_B("${PREFIX}_set_encrypt_key");
+ &mov ("eax",&wparam(0));
+ &mov ($rounds,&wparam(1));
+ &mov ($key,&wparam(2));
+ &call ("_aesni_set_encrypt_key");
+ &ret ();
+&function_end_B("${PREFIX}_set_encrypt_key");
+
+# int $PREFIX_set_decrypt_key (const unsigned char *userKey, int bits,
+# AES_KEY *key)
+&function_begin_B("${PREFIX}_set_decrypt_key");
+ &mov ("eax",&wparam(0));
+ &mov ($rounds,&wparam(1));
+ &mov ($key,&wparam(2));
+ &call ("_aesni_set_encrypt_key");
+ &mov ($key,&wparam(2));
+ &shl ($rounds,4) # rounds-1 after _aesni_set_encrypt_key
+ &test ("eax","eax");
+ &jnz (&label("dec_key_ret"));
+ &lea ("eax",&DWP(16,$key,$rounds)); # end of key schedule
+
+ &$movekey ("xmm0",&QWP(0,$key)); # just swap
+ &$movekey ("xmm1",&QWP(0,"eax"));
+ &$movekey (&QWP(0,"eax"),"xmm0");
+ &$movekey (&QWP(0,$key),"xmm1");
+ &lea ($key,&DWP(16,$key));
+ &lea ("eax",&DWP(-16,"eax"));
+
+&set_label("dec_key_inverse");
+ &$movekey ("xmm0",&QWP(0,$key)); # swap and inverse
+ &$movekey ("xmm1",&QWP(0,"eax"));
+ &aesimc ("xmm0","xmm0");
+ &aesimc ("xmm1","xmm1");
+ &lea ($key,&DWP(16,$key));
+ &lea ("eax",&DWP(-16,"eax"));
+ &$movekey (&QWP(16,"eax"),"xmm0");
+ &$movekey (&QWP(-16,$key),"xmm1");
+ &cmp ("eax",$key);
+ &ja (&label("dec_key_inverse"));
+
+ &$movekey ("xmm0",&QWP(0,$key)); # inverse middle
+ &aesimc ("xmm0","xmm0");
+ &$movekey (&QWP(0,$key),"xmm0");
+
+ &xor ("eax","eax"); # return success
+&set_label("dec_key_ret");
+ &ret ();
+&function_end_B("${PREFIX}_set_decrypt_key");
+&asciz("AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/openssl/crypto/aes/asm/aesni-x86_64.pl b/openssl/crypto/aes/asm/aesni-x86_64.pl
new file mode 100644
index 000000000..499f3b3f4
--- /dev/null
+++ b/openssl/crypto/aes/asm/aesni-x86_64.pl
@@ -0,0 +1,3068 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
+# details].
+#
+# Performance.
+#
+# Given aes(enc|dec) instructions' latency asymptotic performance for
+# non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
+# processed with 128-bit key. And given their throughput asymptotic
+# performance for parallelizable modes is 1.25 cycles per byte. Being
+# asymptotic limit it's not something you commonly achieve in reality,
+# but how close does one get? Below are results collected for
+# different modes and block sized. Pairs of numbers are for en-/
+# decryption.
+#
+# 16-byte 64-byte 256-byte 1-KB 8-KB
+# ECB 4.25/4.25 1.38/1.38 1.28/1.28 1.26/1.26 1.26/1.26
+# CTR 5.42/5.42 1.92/1.92 1.44/1.44 1.28/1.28 1.26/1.26
+# CBC 4.38/4.43 4.15/1.43 4.07/1.32 4.07/1.29 4.06/1.28
+# CCM 5.66/9.42 4.42/5.41 4.16/4.40 4.09/4.15 4.06/4.07
+# OFB 5.42/5.42 4.64/4.64 4.44/4.44 4.39/4.39 4.38/4.38
+# CFB 5.73/5.85 5.56/5.62 5.48/5.56 5.47/5.55 5.47/5.55
+#
+# ECB, CTR, CBC and CCM results are free from EVP overhead. This means
+# that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
+# [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
+# The results were collected with specially crafted speed.c benchmark
+# in order to compare them with results reported in "Intel Advanced
+# Encryption Standard (AES) New Instruction Set" White Paper Revision
+# 3.0 dated May 2010. All above results are consistently better. This
+# module also provides better performance for block sizes smaller than
+# 128 bytes in points *not* represented in the above table.
+#
+# Looking at the results for 8-KB buffer.
+#
+# CFB and OFB results are far from the limit, because implementation
+# uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
+# single-block aesni_encrypt, which is not the most optimal way to go.
+# CBC encrypt result is unexpectedly high and there is no documented
+# explanation for it. Seemingly there is a small penalty for feeding
+# the result back to AES unit the way it's done in CBC mode. There is
+# nothing one can do and the result appears optimal. CCM result is
+# identical to CBC, because CBC-MAC is essentially CBC encrypt without
+# saving output. CCM CTR "stays invisible," because it's neatly
+# interleaved wih CBC-MAC. This provides ~30% improvement over
+# "straghtforward" CCM implementation with CTR and CBC-MAC performed
+# disjointly. Parallelizable modes practically achieve the theoretical
+# limit.
+#
+# Looking at how results vary with buffer size.
+#
+# Curves are practically saturated at 1-KB buffer size. In most cases
+# "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
+# CTR curve doesn't follow this pattern and is "slowest" changing one
+# with "256-byte" result being 87% of "8-KB." This is because overhead
+# in CTR mode is most computationally intensive. Small-block CCM
+# decrypt is slower than encrypt, because first CTR and last CBC-MAC
+# iterations can't be interleaved.
+#
+# Results for 192- and 256-bit keys.
+#
+# EVP-free results were observed to scale perfectly with number of
+# rounds for larger block sizes, i.e. 192-bit result being 10/12 times
+# lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
+# are a tad smaller, because the above mentioned penalty biases all
+# results by same constant value. In similar way function call
+# overhead affects small-block performance, as well as OFB and CFB
+# results. Differences are not large, most common coefficients are
+# 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
+# observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
+
+# January 2011
+#
+# While Westmere processor features 6 cycles latency for aes[enc|dec]
+# instructions, which can be scheduled every second cycle, Sandy
+# Bridge spends 8 cycles per instruction, but it can schedule them
+# every cycle. This means that code targeting Westmere would perform
+# suboptimally on Sandy Bridge. Therefore this update.
+#
+# In addition, non-parallelizable CBC encrypt (as well as CCM) is
+# optimized. Relative improvement might appear modest, 8% on Westmere,
+# but in absolute terms it's 3.77 cycles per byte encrypted with
+# 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
+# should be compared to asymptotic limits of 3.75 for Westmere and
+# 5.00 for Sandy Bridge. Actually, the fact that they get this close
+# to asymptotic limits is quite amazing. Indeed, the limit is
+# calculated as latency times number of rounds, 10 for 128-bit key,
+# and divided by 16, the number of bytes in block, or in other words
+# it accounts *solely* for aesenc instructions. But there are extra
+# instructions, and numbers so close to the asymptotic limits mean
+# that it's as if it takes as little as *one* additional cycle to
+# execute all of them. How is it possible? It is possible thanks to
+# out-of-order execution logic, which manages to overlap post-
+# processing of previous block, things like saving the output, with
+# actual encryption of current block, as well as pre-processing of
+# current block, things like fetching input and xor-ing it with
+# 0-round element of the key schedule, with actual encryption of
+# previous block. Keep this in mind...
+#
+# For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
+# performance is achieved by interleaving instructions working on
+# independent blocks. In which case asymptotic limit for such modes
+# can be obtained by dividing above mentioned numbers by AES
+# instructions' interleave factor. Westmere can execute at most 3
+# instructions at a time, meaning that optimal interleave factor is 3,
+# and that's where the "magic" number of 1.25 come from. "Optimal
+# interleave factor" means that increase of interleave factor does
+# not improve performance. The formula has proven to reflect reality
+# pretty well on Westmere... Sandy Bridge on the other hand can
+# execute up to 8 AES instructions at a time, so how does varying
+# interleave factor affect the performance? Here is table for ECB
+# (numbers are cycles per byte processed with 128-bit key):
+#
+# instruction interleave factor 3x 6x 8x
+# theoretical asymptotic limit 1.67 0.83 0.625
+# measured performance for 8KB block 1.05 0.86 0.84
+#
+# "as if" interleave factor 4.7x 5.8x 6.0x
+#
+# Further data for other parallelizable modes:
+#
+# CBC decrypt 1.16 0.93 0.93
+# CTR 1.14 0.91 n/a
+#
+# Well, given 3x column it's probably inappropriate to call the limit
+# asymptotic, if it can be surpassed, isn't it? What happens there?
+# Rewind to CBC paragraph for the answer. Yes, out-of-order execution
+# magic is responsible for this. Processor overlaps not only the
+# additional instructions with AES ones, but even AES instuctions
+# processing adjacent triplets of independent blocks. In the 6x case
+# additional instructions still claim disproportionally small amount
+# of additional cycles, but in 8x case number of instructions must be
+# a tad too high for out-of-order logic to cope with, and AES unit
+# remains underutilized... As you can see 8x interleave is hardly
+# justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
+# utilizies 6x interleave because of limited register bank capacity.
+#
+# Higher interleave factors do have negative impact on Westmere
+# performance. While for ECB mode it's negligible ~1.5%, other
+# parallelizables perform ~5% worse, which is outweighed by ~25%
+# improvement on Sandy Bridge. To balance regression on Westmere
+# CTR mode was implemented with 6x aesenc interleave factor.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.33 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 0.97. Just like
+# in CTR mode AES instruction interleave factor was chosen to be 6x.
+
+$PREFIX="aesni"; # if $PREFIX is set to "AES", the script
+ # generates drop-in replacement for
+ # crypto/aes/asm/aes-x86_64.pl:-)
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$movkey = $PREFIX eq "aesni" ? "movups" : "movups";
+@_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx"); # Unix order
+
+$code=".text\n";
+
+$rounds="%eax"; # input to and changed by aesni_[en|de]cryptN !!!
+# this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
+$inp="%rdi";
+$out="%rsi";
+$len="%rdx";
+$key="%rcx"; # input to and changed by aesni_[en|de]cryptN !!!
+$ivp="%r8"; # cbc, ctr, ...
+
+$rnds_="%r10d"; # backup copy for $rounds
+$key_="%r11"; # backup copy for $key
+
+# %xmm register layout
+$rndkey0="%xmm0"; $rndkey1="%xmm1";
+$inout0="%xmm2"; $inout1="%xmm3";
+$inout2="%xmm4"; $inout3="%xmm5";
+$inout4="%xmm6"; $inout5="%xmm7";
+$inout6="%xmm8"; $inout7="%xmm9";
+
+$in2="%xmm6"; $in1="%xmm7"; # used in CBC decrypt, CTR, ...
+$in0="%xmm8"; $iv="%xmm9";
+
+# Inline version of internal aesni_[en|de]crypt1.
+#
+# Why folded loop? Because aes[enc|dec] is slow enough to accommodate
+# cycles which take care of loop variables...
+{ my $sn;
+sub aesni_generate1 {
+my ($p,$key,$rounds,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
+++$sn;
+$code.=<<___;
+ $movkey ($key),$rndkey0
+ $movkey 16($key),$rndkey1
+___
+$code.=<<___ if (defined($ivec));
+ xorps $rndkey0,$ivec
+ lea 32($key),$key
+ xorps $ivec,$inout
+___
+$code.=<<___ if (!defined($ivec));
+ lea 32($key),$key
+ xorps $rndkey0,$inout
+___
+$code.=<<___;
+.Loop_${p}1_$sn:
+ aes${p} $rndkey1,$inout
+ dec $rounds
+ $movkey ($key),$rndkey1
+ lea 16($key),$key
+ jnz .Loop_${p}1_$sn # loop body is 16 bytes
+ aes${p}last $rndkey1,$inout
+___
+}}
+# void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
+#
+{ my ($inp,$out,$key) = @_4args;
+
+$code.=<<___;
+.globl ${PREFIX}_encrypt
+.type ${PREFIX}_encrypt,\@abi-omnipotent
+.align 16
+${PREFIX}_encrypt:
+ movups ($inp),$inout0 # load input
+ mov 240($key),$rounds # key->rounds
+___
+ &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+ movups $inout0,($out) # output
+ ret
+.size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl ${PREFIX}_decrypt
+.type ${PREFIX}_decrypt,\@abi-omnipotent
+.align 16
+${PREFIX}_decrypt:
+ movups ($inp),$inout0 # load input
+ mov 240($key),$rounds # key->rounds
+___
+ &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+ movups $inout0,($out) # output
+ ret
+.size ${PREFIX}_decrypt, .-${PREFIX}_decrypt
+___
+}
+
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it makes no sense to implement 2x subroutine.
+# aes[enc|dec] latency in next processor generation is 8, but the
+# instructions can be scheduled every cycle. Optimal interleave for
+# new processor is therefore 8x...
+sub aesni_generate3 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-2] is cipher/clear text...
+$code.=<<___;
+.type _aesni_${dir}rypt3,\@abi-omnipotent
+.align 16
+_aesni_${dir}rypt3:
+ $movkey ($key),$rndkey0
+ shr \$1,$rounds
+ $movkey 16($key),$rndkey1
+ lea 32($key),$key
+ xorps $rndkey0,$inout0
+ xorps $rndkey0,$inout1
+ xorps $rndkey0,$inout2
+ $movkey ($key),$rndkey0
+
+.L${dir}_loop3:
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ dec $rounds
+ aes${dir} $rndkey1,$inout2
+ $movkey 16($key),$rndkey1
+ aes${dir} $rndkey0,$inout0
+ aes${dir} $rndkey0,$inout1
+ lea 32($key),$key
+ aes${dir} $rndkey0,$inout2
+ $movkey ($key),$rndkey0
+ jnz .L${dir}_loop3
+
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ aes${dir} $rndkey1,$inout2
+ aes${dir}last $rndkey0,$inout0
+ aes${dir}last $rndkey0,$inout1
+ aes${dir}last $rndkey0,$inout2
+ ret
+.size _aesni_${dir}rypt3,.-_aesni_${dir}rypt3
+___
+}
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement would be <20%,
+# so it's not worth it...
+sub aesni_generate4 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-3] is cipher/clear text...
+$code.=<<___;
+.type _aesni_${dir}rypt4,\@abi-omnipotent
+.align 16
+_aesni_${dir}rypt4:
+ $movkey ($key),$rndkey0
+ shr \$1,$rounds
+ $movkey 16($key),$rndkey1
+ lea 32($key),$key
+ xorps $rndkey0,$inout0
+ xorps $rndkey0,$inout1
+ xorps $rndkey0,$inout2
+ xorps $rndkey0,$inout3
+ $movkey ($key),$rndkey0
+
+.L${dir}_loop4:
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ dec $rounds
+ aes${dir} $rndkey1,$inout2
+ aes${dir} $rndkey1,$inout3
+ $movkey 16($key),$rndkey1
+ aes${dir} $rndkey0,$inout0
+ aes${dir} $rndkey0,$inout1
+ lea 32($key),$key
+ aes${dir} $rndkey0,$inout2
+ aes${dir} $rndkey0,$inout3
+ $movkey ($key),$rndkey0
+ jnz .L${dir}_loop4
+
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ aes${dir} $rndkey1,$inout2
+ aes${dir} $rndkey1,$inout3
+ aes${dir}last $rndkey0,$inout0
+ aes${dir}last $rndkey0,$inout1
+ aes${dir}last $rndkey0,$inout2
+ aes${dir}last $rndkey0,$inout3
+ ret
+.size _aesni_${dir}rypt4,.-_aesni_${dir}rypt4
+___
+}
+sub aesni_generate6 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-5] is cipher/clear text...
+$code.=<<___;
+.type _aesni_${dir}rypt6,\@abi-omnipotent
+.align 16
+_aesni_${dir}rypt6:
+ $movkey ($key),$rndkey0
+ shr \$1,$rounds
+ $movkey 16($key),$rndkey1
+ lea 32($key),$key
+ xorps $rndkey0,$inout0
+ pxor $rndkey0,$inout1
+ aes${dir} $rndkey1,$inout0
+ pxor $rndkey0,$inout2
+ aes${dir} $rndkey1,$inout1
+ pxor $rndkey0,$inout3
+ aes${dir} $rndkey1,$inout2
+ pxor $rndkey0,$inout4
+ aes${dir} $rndkey1,$inout3
+ pxor $rndkey0,$inout5
+ dec $rounds
+ aes${dir} $rndkey1,$inout4
+ $movkey ($key),$rndkey0
+ aes${dir} $rndkey1,$inout5
+ jmp .L${dir}_loop6_enter
+.align 16
+.L${dir}_loop6:
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ dec $rounds
+ aes${dir} $rndkey1,$inout2
+ aes${dir} $rndkey1,$inout3
+ aes${dir} $rndkey1,$inout4
+ aes${dir} $rndkey1,$inout5
+.L${dir}_loop6_enter: # happens to be 16-byte aligned
+ $movkey 16($key),$rndkey1
+ aes${dir} $rndkey0,$inout0
+ aes${dir} $rndkey0,$inout1
+ lea 32($key),$key
+ aes${dir} $rndkey0,$inout2
+ aes${dir} $rndkey0,$inout3
+ aes${dir} $rndkey0,$inout4
+ aes${dir} $rndkey0,$inout5
+ $movkey ($key),$rndkey0
+ jnz .L${dir}_loop6
+
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ aes${dir} $rndkey1,$inout2
+ aes${dir} $rndkey1,$inout3
+ aes${dir} $rndkey1,$inout4
+ aes${dir} $rndkey1,$inout5
+ aes${dir}last $rndkey0,$inout0
+ aes${dir}last $rndkey0,$inout1
+ aes${dir}last $rndkey0,$inout2
+ aes${dir}last $rndkey0,$inout3
+ aes${dir}last $rndkey0,$inout4
+ aes${dir}last $rndkey0,$inout5
+ ret
+.size _aesni_${dir}rypt6,.-_aesni_${dir}rypt6
+___
+}
+sub aesni_generate8 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-7] is cipher/clear text...
+$code.=<<___;
+.type _aesni_${dir}rypt8,\@abi-omnipotent
+.align 16
+_aesni_${dir}rypt8:
+ $movkey ($key),$rndkey0
+ shr \$1,$rounds
+ $movkey 16($key),$rndkey1
+ lea 32($key),$key
+ xorps $rndkey0,$inout0
+ xorps $rndkey0,$inout1
+ aes${dir} $rndkey1,$inout0
+ pxor $rndkey0,$inout2
+ aes${dir} $rndkey1,$inout1
+ pxor $rndkey0,$inout3
+ aes${dir} $rndkey1,$inout2
+ pxor $rndkey0,$inout4
+ aes${dir} $rndkey1,$inout3
+ pxor $rndkey0,$inout5
+ dec $rounds
+ aes${dir} $rndkey1,$inout4
+ pxor $rndkey0,$inout6
+ aes${dir} $rndkey1,$inout5
+ pxor $rndkey0,$inout7
+ $movkey ($key),$rndkey0
+ aes${dir} $rndkey1,$inout6
+ aes${dir} $rndkey1,$inout7
+ $movkey 16($key),$rndkey1
+ jmp .L${dir}_loop8_enter
+.align 16
+.L${dir}_loop8:
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ dec $rounds
+ aes${dir} $rndkey1,$inout2
+ aes${dir} $rndkey1,$inout3
+ aes${dir} $rndkey1,$inout4
+ aes${dir} $rndkey1,$inout5
+ aes${dir} $rndkey1,$inout6
+ aes${dir} $rndkey1,$inout7
+ $movkey 16($key),$rndkey1
+.L${dir}_loop8_enter: # happens to be 16-byte aligned
+ aes${dir} $rndkey0,$inout0
+ aes${dir} $rndkey0,$inout1
+ lea 32($key),$key
+ aes${dir} $rndkey0,$inout2
+ aes${dir} $rndkey0,$inout3
+ aes${dir} $rndkey0,$inout4
+ aes${dir} $rndkey0,$inout5
+ aes${dir} $rndkey0,$inout6
+ aes${dir} $rndkey0,$inout7
+ $movkey ($key),$rndkey0
+ jnz .L${dir}_loop8
+
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ aes${dir} $rndkey1,$inout2
+ aes${dir} $rndkey1,$inout3
+ aes${dir} $rndkey1,$inout4
+ aes${dir} $rndkey1,$inout5
+ aes${dir} $rndkey1,$inout6
+ aes${dir} $rndkey1,$inout7
+ aes${dir}last $rndkey0,$inout0
+ aes${dir}last $rndkey0,$inout1
+ aes${dir}last $rndkey0,$inout2
+ aes${dir}last $rndkey0,$inout3
+ aes${dir}last $rndkey0,$inout4
+ aes${dir}last $rndkey0,$inout5
+ aes${dir}last $rndkey0,$inout6
+ aes${dir}last $rndkey0,$inout7
+ ret
+.size _aesni_${dir}rypt8,.-_aesni_${dir}rypt8
+___
+}
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+&aesni_generate8("enc") if ($PREFIX eq "aesni");
+&aesni_generate8("dec");
+
+if ($PREFIX eq "aesni") {
+########################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+# size_t length, const AES_KEY *key,
+# int enc);
+$code.=<<___;
+.globl aesni_ecb_encrypt
+.type aesni_ecb_encrypt,\@function,5
+.align 16
+aesni_ecb_encrypt:
+ and \$-16,$len
+ jz .Lecb_ret
+
+ mov 240($key),$rounds # key->rounds
+ $movkey ($key),$rndkey0
+ mov $key,$key_ # backup $key
+ mov $rounds,$rnds_ # backup $rounds
+ test %r8d,%r8d # 5th argument
+ jz .Lecb_decrypt
+#--------------------------- ECB ENCRYPT ------------------------------#
+ cmp \$0x80,$len
+ jb .Lecb_enc_tail
+
+ movdqu ($inp),$inout0
+ movdqu 0x10($inp),$inout1
+ movdqu 0x20($inp),$inout2
+ movdqu 0x30($inp),$inout3
+ movdqu 0x40($inp),$inout4
+ movdqu 0x50($inp),$inout5
+ movdqu 0x60($inp),$inout6
+ movdqu 0x70($inp),$inout7
+ lea 0x80($inp),$inp
+ sub \$0x80,$len
+ jmp .Lecb_enc_loop8_enter
+.align 16
+.Lecb_enc_loop8:
+ movups $inout0,($out)
+ mov $key_,$key # restore $key
+ movdqu ($inp),$inout0
+ mov $rnds_,$rounds # restore $rounds
+ movups $inout1,0x10($out)
+ movdqu 0x10($inp),$inout1
+ movups $inout2,0x20($out)
+ movdqu 0x20($inp),$inout2
+ movups $inout3,0x30($out)
+ movdqu 0x30($inp),$inout3
+ movups $inout4,0x40($out)
+ movdqu 0x40($inp),$inout4
+ movups $inout5,0x50($out)
+ movdqu 0x50($inp),$inout5
+ movups $inout6,0x60($out)
+ movdqu 0x60($inp),$inout6
+ movups $inout7,0x70($out)
+ lea 0x80($out),$out
+ movdqu 0x70($inp),$inout7
+ lea 0x80($inp),$inp
+.Lecb_enc_loop8_enter:
+
+ call _aesni_encrypt8
+
+ sub \$0x80,$len
+ jnc .Lecb_enc_loop8
+
+ movups $inout0,($out)
+ mov $key_,$key # restore $key
+ movups $inout1,0x10($out)
+ mov $rnds_,$rounds # restore $rounds
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ movups $inout5,0x50($out)
+ movups $inout6,0x60($out)
+ movups $inout7,0x70($out)
+ lea 0x80($out),$out
+ add \$0x80,$len
+ jz .Lecb_ret
+
+.Lecb_enc_tail:
+ movups ($inp),$inout0
+ cmp \$0x20,$len
+ jb .Lecb_enc_one
+ movups 0x10($inp),$inout1
+ je .Lecb_enc_two
+ movups 0x20($inp),$inout2
+ cmp \$0x40,$len
+ jb .Lecb_enc_three
+ movups 0x30($inp),$inout3
+ je .Lecb_enc_four
+ movups 0x40($inp),$inout4
+ cmp \$0x60,$len
+ jb .Lecb_enc_five
+ movups 0x50($inp),$inout5
+ je .Lecb_enc_six
+ movdqu 0x60($inp),$inout6
+ call _aesni_encrypt8
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ movups $inout5,0x50($out)
+ movups $inout6,0x60($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_enc_one:
+___
+ &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+ movups $inout0,($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_enc_two:
+ xorps $inout2,$inout2
+ call _aesni_encrypt3
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_enc_three:
+ call _aesni_encrypt3
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_enc_four:
+ call _aesni_encrypt4
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_enc_five:
+ xorps $inout5,$inout5
+ call _aesni_encrypt6
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_enc_six:
+ call _aesni_encrypt6
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ movups $inout5,0x50($out)
+ jmp .Lecb_ret
+ #--------------------------- ECB DECRYPT ------------------------------#
+.align 16
+.Lecb_decrypt:
+ cmp \$0x80,$len
+ jb .Lecb_dec_tail
+
+ movdqu ($inp),$inout0
+ movdqu 0x10($inp),$inout1
+ movdqu 0x20($inp),$inout2
+ movdqu 0x30($inp),$inout3
+ movdqu 0x40($inp),$inout4
+ movdqu 0x50($inp),$inout5
+ movdqu 0x60($inp),$inout6
+ movdqu 0x70($inp),$inout7
+ lea 0x80($inp),$inp
+ sub \$0x80,$len
+ jmp .Lecb_dec_loop8_enter
+.align 16
+.Lecb_dec_loop8:
+ movups $inout0,($out)
+ mov $key_,$key # restore $key
+ movdqu ($inp),$inout0
+ mov $rnds_,$rounds # restore $rounds
+ movups $inout1,0x10($out)
+ movdqu 0x10($inp),$inout1
+ movups $inout2,0x20($out)
+ movdqu 0x20($inp),$inout2
+ movups $inout3,0x30($out)
+ movdqu 0x30($inp),$inout3
+ movups $inout4,0x40($out)
+ movdqu 0x40($inp),$inout4
+ movups $inout5,0x50($out)
+ movdqu 0x50($inp),$inout5
+ movups $inout6,0x60($out)
+ movdqu 0x60($inp),$inout6
+ movups $inout7,0x70($out)
+ lea 0x80($out),$out
+ movdqu 0x70($inp),$inout7
+ lea 0x80($inp),$inp
+.Lecb_dec_loop8_enter:
+
+ call _aesni_decrypt8
+
+ $movkey ($key_),$rndkey0
+ sub \$0x80,$len
+ jnc .Lecb_dec_loop8
+
+ movups $inout0,($out)
+ mov $key_,$key # restore $key
+ movups $inout1,0x10($out)
+ mov $rnds_,$rounds # restore $rounds
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ movups $inout5,0x50($out)
+ movups $inout6,0x60($out)
+ movups $inout7,0x70($out)
+ lea 0x80($out),$out
+ add \$0x80,$len
+ jz .Lecb_ret
+
+.Lecb_dec_tail:
+ movups ($inp),$inout0
+ cmp \$0x20,$len
+ jb .Lecb_dec_one
+ movups 0x10($inp),$inout1
+ je .Lecb_dec_two
+ movups 0x20($inp),$inout2
+ cmp \$0x40,$len
+ jb .Lecb_dec_three
+ movups 0x30($inp),$inout3
+ je .Lecb_dec_four
+ movups 0x40($inp),$inout4
+ cmp \$0x60,$len
+ jb .Lecb_dec_five
+ movups 0x50($inp),$inout5
+ je .Lecb_dec_six
+ movups 0x60($inp),$inout6
+ $movkey ($key),$rndkey0
+ call _aesni_decrypt8
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ movups $inout5,0x50($out)
+ movups $inout6,0x60($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_dec_one:
+___
+ &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+ movups $inout0,($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_dec_two:
+ xorps $inout2,$inout2
+ call _aesni_decrypt3
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_dec_three:
+ call _aesni_decrypt3
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_dec_four:
+ call _aesni_decrypt4
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_dec_five:
+ xorps $inout5,$inout5
+ call _aesni_decrypt6
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ jmp .Lecb_ret
+.align 16
+.Lecb_dec_six:
+ call _aesni_decrypt6
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ movups $inout5,0x50($out)
+
+.Lecb_ret:
+ ret
+.size aesni_ecb_encrypt,.-aesni_ecb_encrypt
+___
+
+{
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+# size_t blocks, const AES_KEY *key,
+# const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{
+my $cmac="%r9"; # 6th argument
+
+my $increment="%xmm6";
+my $bswap_mask="%xmm7";
+
+$code.=<<___;
+.globl aesni_ccm64_encrypt_blocks
+.type aesni_ccm64_encrypt_blocks,\@function,6
+.align 16
+aesni_ccm64_encrypt_blocks:
+___
+$code.=<<___ if ($win64);
+ lea -0x58(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+.Lccm64_enc_body:
+___
+$code.=<<___;
+ mov 240($key),$rounds # key->rounds
+ movdqu ($ivp),$iv
+ movdqa .Lincrement64(%rip),$increment
+ movdqa .Lbswap_mask(%rip),$bswap_mask
+
+ shr \$1,$rounds
+ lea 0($key),$key_
+ movdqu ($cmac),$inout1
+ movdqa $iv,$inout0
+ mov $rounds,$rnds_
+ pshufb $bswap_mask,$iv
+ jmp .Lccm64_enc_outer
+.align 16
+.Lccm64_enc_outer:
+ $movkey ($key_),$rndkey0
+ mov $rnds_,$rounds
+ movups ($inp),$in0 # load inp
+
+ xorps $rndkey0,$inout0 # counter
+ $movkey 16($key_),$rndkey1
+ xorps $in0,$rndkey0
+ lea 32($key_),$key
+ xorps $rndkey0,$inout1 # cmac^=inp
+ $movkey ($key),$rndkey0
+
+.Lccm64_enc2_loop:
+ aesenc $rndkey1,$inout0
+ dec $rounds
+ aesenc $rndkey1,$inout1
+ $movkey 16($key),$rndkey1
+ aesenc $rndkey0,$inout0
+ lea 32($key),$key
+ aesenc $rndkey0,$inout1
+ $movkey 0($key),$rndkey0
+ jnz .Lccm64_enc2_loop
+ aesenc $rndkey1,$inout0
+ aesenc $rndkey1,$inout1
+ paddq $increment,$iv
+ aesenclast $rndkey0,$inout0
+ aesenclast $rndkey0,$inout1
+
+ dec $len
+ lea 16($inp),$inp
+ xorps $inout0,$in0 # inp ^= E(iv)
+ movdqa $iv,$inout0
+ movups $in0,($out) # save output
+ lea 16($out),$out
+ pshufb $bswap_mask,$inout0
+ jnz .Lccm64_enc_outer
+
+ movups $inout1,($cmac)
+___
+$code.=<<___ if ($win64);
+ movaps (%rsp),%xmm6
+ movaps 0x10(%rsp),%xmm7
+ movaps 0x20(%rsp),%xmm8
+ movaps 0x30(%rsp),%xmm9
+ lea 0x58(%rsp),%rsp
+.Lccm64_enc_ret:
+___
+$code.=<<___;
+ ret
+.size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
+___
+######################################################################
+$code.=<<___;
+.globl aesni_ccm64_decrypt_blocks
+.type aesni_ccm64_decrypt_blocks,\@function,6
+.align 16
+aesni_ccm64_decrypt_blocks:
+___
+$code.=<<___ if ($win64);
+ lea -0x58(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+.Lccm64_dec_body:
+___
+$code.=<<___;
+ mov 240($key),$rounds # key->rounds
+ movups ($ivp),$iv
+ movdqu ($cmac),$inout1
+ movdqa .Lincrement64(%rip),$increment
+ movdqa .Lbswap_mask(%rip),$bswap_mask
+
+ movaps $iv,$inout0
+ mov $rounds,$rnds_
+ mov $key,$key_
+ pshufb $bswap_mask,$iv
+___
+ &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+ movups ($inp),$in0 # load inp
+ paddq $increment,$iv
+ lea 16($inp),$inp
+ jmp .Lccm64_dec_outer
+.align 16
+.Lccm64_dec_outer:
+ xorps $inout0,$in0 # inp ^= E(iv)
+ movdqa $iv,$inout0
+ mov $rnds_,$rounds
+ movups $in0,($out) # save output
+ lea 16($out),$out
+ pshufb $bswap_mask,$inout0
+
+ sub \$1,$len
+ jz .Lccm64_dec_break
+
+ $movkey ($key_),$rndkey0
+ shr \$1,$rounds
+ $movkey 16($key_),$rndkey1
+ xorps $rndkey0,$in0
+ lea 32($key_),$key
+ xorps $rndkey0,$inout0
+ xorps $in0,$inout1 # cmac^=out
+ $movkey ($key),$rndkey0
+
+.Lccm64_dec2_loop:
+ aesenc $rndkey1,$inout0
+ dec $rounds
+ aesenc $rndkey1,$inout1
+ $movkey 16($key),$rndkey1
+ aesenc $rndkey0,$inout0
+ lea 32($key),$key
+ aesenc $rndkey0,$inout1
+ $movkey 0($key),$rndkey0
+ jnz .Lccm64_dec2_loop
+ movups ($inp),$in0 # load inp
+ paddq $increment,$iv
+ aesenc $rndkey1,$inout0
+ aesenc $rndkey1,$inout1
+ lea 16($inp),$inp
+ aesenclast $rndkey0,$inout0
+ aesenclast $rndkey0,$inout1
+ jmp .Lccm64_dec_outer
+
+.align 16
+.Lccm64_dec_break:
+ #xorps $in0,$inout1 # cmac^=out
+___
+ &aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
+$code.=<<___;
+ movups $inout1,($cmac)
+___
+$code.=<<___ if ($win64);
+ movaps (%rsp),%xmm6
+ movaps 0x10(%rsp),%xmm7
+ movaps 0x20(%rsp),%xmm8
+ movaps 0x30(%rsp),%xmm9
+ lea 0x58(%rsp),%rsp
+.Lccm64_dec_ret:
+___
+$code.=<<___;
+ ret
+.size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
+___
+}
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+# size_t blocks, const AES_KEY *key,
+# const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see engine/eng_aesni.c for details)
+#
+{
+my $reserved = $win64?0:-0x28;
+my ($in0,$in1,$in2,$in3)=map("%xmm$_",(8..11));
+my ($iv0,$iv1,$ivec)=("%xmm12","%xmm13","%xmm14");
+my $bswap_mask="%xmm15";
+
+$code.=<<___;
+.globl aesni_ctr32_encrypt_blocks
+.type aesni_ctr32_encrypt_blocks,\@function,5
+.align 16
+aesni_ctr32_encrypt_blocks:
+___
+$code.=<<___ if ($win64);
+ lea -0xc8(%rsp),%rsp
+ movaps %xmm6,0x20(%rsp)
+ movaps %xmm7,0x30(%rsp)
+ movaps %xmm8,0x40(%rsp)
+ movaps %xmm9,0x50(%rsp)
+ movaps %xmm10,0x60(%rsp)
+ movaps %xmm11,0x70(%rsp)
+ movaps %xmm12,0x80(%rsp)
+ movaps %xmm13,0x90(%rsp)
+ movaps %xmm14,0xa0(%rsp)
+ movaps %xmm15,0xb0(%rsp)
+.Lctr32_body:
+___
+$code.=<<___;
+ cmp \$1,$len
+ je .Lctr32_one_shortcut
+
+ movdqu ($ivp),$ivec
+ movdqa .Lbswap_mask(%rip),$bswap_mask
+ xor $rounds,$rounds
+ pextrd \$3,$ivec,$rnds_ # pull 32-bit counter
+ pinsrd \$3,$rounds,$ivec # wipe 32-bit counter
+
+ mov 240($key),$rounds # key->rounds
+ bswap $rnds_
+ pxor $iv0,$iv0 # vector of 3 32-bit counters
+ pxor $iv1,$iv1 # vector of 3 32-bit counters
+ pinsrd \$0,$rnds_,$iv0
+ lea 3($rnds_),$key_
+ pinsrd \$0,$key_,$iv1
+ inc $rnds_
+ pinsrd \$1,$rnds_,$iv0
+ inc $key_
+ pinsrd \$1,$key_,$iv1
+ inc $rnds_
+ pinsrd \$2,$rnds_,$iv0
+ inc $key_
+ pinsrd \$2,$key_,$iv1
+ movdqa $iv0,$reserved(%rsp)
+ pshufb $bswap_mask,$iv0
+ movdqa $iv1,`$reserved+0x10`(%rsp)
+ pshufb $bswap_mask,$iv1
+
+ pshufd \$`3<<6`,$iv0,$inout0 # place counter to upper dword
+ pshufd \$`2<<6`,$iv0,$inout1
+ pshufd \$`1<<6`,$iv0,$inout2
+ cmp \$6,$len
+ jb .Lctr32_tail
+ shr \$1,$rounds
+ mov $key,$key_ # backup $key
+ mov $rounds,$rnds_ # backup $rounds
+ sub \$6,$len
+ jmp .Lctr32_loop6
+
+.align 16
+.Lctr32_loop6:
+ pshufd \$`3<<6`,$iv1,$inout3
+ por $ivec,$inout0 # merge counter-less ivec
+ $movkey ($key_),$rndkey0
+ pshufd \$`2<<6`,$iv1,$inout4
+ por $ivec,$inout1
+ $movkey 16($key_),$rndkey1
+ pshufd \$`1<<6`,$iv1,$inout5
+ por $ivec,$inout2
+ por $ivec,$inout3
+ xorps $rndkey0,$inout0
+ por $ivec,$inout4
+ por $ivec,$inout5
+
+ # inline _aesni_encrypt6 and interleave last rounds
+ # with own code...
+
+ pxor $rndkey0,$inout1
+ aesenc $rndkey1,$inout0
+ lea 32($key_),$key
+ pxor $rndkey0,$inout2
+ aesenc $rndkey1,$inout1
+ movdqa .Lincrement32(%rip),$iv1
+ pxor $rndkey0,$inout3
+ aesenc $rndkey1,$inout2
+ movdqa $reserved(%rsp),$iv0
+ pxor $rndkey0,$inout4
+ aesenc $rndkey1,$inout3
+ pxor $rndkey0,$inout5
+ $movkey ($key),$rndkey0
+ dec $rounds
+ aesenc $rndkey1,$inout4
+ aesenc $rndkey1,$inout5
+ jmp .Lctr32_enc_loop6_enter
+.align 16
+.Lctr32_enc_loop6:
+ aesenc $rndkey1,$inout0
+ aesenc $rndkey1,$inout1
+ dec $rounds
+ aesenc $rndkey1,$inout2
+ aesenc $rndkey1,$inout3
+ aesenc $rndkey1,$inout4
+ aesenc $rndkey1,$inout5
+.Lctr32_enc_loop6_enter:
+ $movkey 16($key),$rndkey1
+ aesenc $rndkey0,$inout0
+ aesenc $rndkey0,$inout1
+ lea 32($key),$key
+ aesenc $rndkey0,$inout2
+ aesenc $rndkey0,$inout3
+ aesenc $rndkey0,$inout4
+ aesenc $rndkey0,$inout5
+ $movkey ($key),$rndkey0
+ jnz .Lctr32_enc_loop6
+
+ aesenc $rndkey1,$inout0
+ paddd $iv1,$iv0 # increment counter vector
+ aesenc $rndkey1,$inout1
+ paddd `$reserved+0x10`(%rsp),$iv1
+ aesenc $rndkey1,$inout2
+ movdqa $iv0,$reserved(%rsp) # save counter vector
+ aesenc $rndkey1,$inout3
+ movdqa $iv1,`$reserved+0x10`(%rsp)
+ aesenc $rndkey1,$inout4
+ pshufb $bswap_mask,$iv0 # byte swap
+ aesenc $rndkey1,$inout5
+ pshufb $bswap_mask,$iv1
+
+ aesenclast $rndkey0,$inout0
+ movups ($inp),$in0 # load input
+ aesenclast $rndkey0,$inout1
+ movups 0x10($inp),$in1
+ aesenclast $rndkey0,$inout2
+ movups 0x20($inp),$in2
+ aesenclast $rndkey0,$inout3
+ movups 0x30($inp),$in3
+ aesenclast $rndkey0,$inout4
+ movups 0x40($inp),$rndkey1
+ aesenclast $rndkey0,$inout5
+ movups 0x50($inp),$rndkey0
+ lea 0x60($inp),$inp
+
+ xorps $inout0,$in0 # xor
+ pshufd \$`3<<6`,$iv0,$inout0
+ xorps $inout1,$in1
+ pshufd \$`2<<6`,$iv0,$inout1
+ movups $in0,($out) # store output
+ xorps $inout2,$in2
+ pshufd \$`1<<6`,$iv0,$inout2
+ movups $in1,0x10($out)
+ xorps $inout3,$in3
+ movups $in2,0x20($out)
+ xorps $inout4,$rndkey1
+ movups $in3,0x30($out)
+ xorps $inout5,$rndkey0
+ movups $rndkey1,0x40($out)
+ movups $rndkey0,0x50($out)
+ lea 0x60($out),$out
+ mov $rnds_,$rounds
+ sub \$6,$len
+ jnc .Lctr32_loop6
+
+ add \$6,$len
+ jz .Lctr32_done
+ mov $key_,$key # restore $key
+ lea 1($rounds,$rounds),$rounds # restore original value
+
+.Lctr32_tail:
+ por $ivec,$inout0
+ movups ($inp),$in0
+ cmp \$2,$len
+ jb .Lctr32_one
+
+ por $ivec,$inout1
+ movups 0x10($inp),$in1
+ je .Lctr32_two
+
+ pshufd \$`3<<6`,$iv1,$inout3
+ por $ivec,$inout2
+ movups 0x20($inp),$in2
+ cmp \$4,$len
+ jb .Lctr32_three
+
+ pshufd \$`2<<6`,$iv1,$inout4
+ por $ivec,$inout3
+ movups 0x30($inp),$in3
+ je .Lctr32_four
+
+ por $ivec,$inout4
+ xorps $inout5,$inout5
+
+ call _aesni_encrypt6
+
+ movups 0x40($inp),$rndkey1
+ xorps $inout0,$in0
+ xorps $inout1,$in1
+ movups $in0,($out)
+ xorps $inout2,$in2
+ movups $in1,0x10($out)
+ xorps $inout3,$in3
+ movups $in2,0x20($out)
+ xorps $inout4,$rndkey1
+ movups $in3,0x30($out)
+ movups $rndkey1,0x40($out)
+ jmp .Lctr32_done
+
+.align 16
+.Lctr32_one_shortcut:
+ movups ($ivp),$inout0
+ movups ($inp),$in0
+ mov 240($key),$rounds # key->rounds
+.Lctr32_one:
+___
+ &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+ xorps $inout0,$in0
+ movups $in0,($out)
+ jmp .Lctr32_done
+
+.align 16
+.Lctr32_two:
+ xorps $inout2,$inout2
+ call _aesni_encrypt3
+ xorps $inout0,$in0
+ xorps $inout1,$in1
+ movups $in0,($out)
+ movups $in1,0x10($out)
+ jmp .Lctr32_done
+
+.align 16
+.Lctr32_three:
+ call _aesni_encrypt3
+ xorps $inout0,$in0
+ xorps $inout1,$in1
+ movups $in0,($out)
+ xorps $inout2,$in2
+ movups $in1,0x10($out)
+ movups $in2,0x20($out)
+ jmp .Lctr32_done
+
+.align 16
+.Lctr32_four:
+ call _aesni_encrypt4
+ xorps $inout0,$in0
+ xorps $inout1,$in1
+ movups $in0,($out)
+ xorps $inout2,$in2
+ movups $in1,0x10($out)
+ xorps $inout3,$in3
+ movups $in2,0x20($out)
+ movups $in3,0x30($out)
+
+.Lctr32_done:
+___
+$code.=<<___ if ($win64);
+ movaps 0x20(%rsp),%xmm6
+ movaps 0x30(%rsp),%xmm7
+ movaps 0x40(%rsp),%xmm8
+ movaps 0x50(%rsp),%xmm9
+ movaps 0x60(%rsp),%xmm10
+ movaps 0x70(%rsp),%xmm11
+ movaps 0x80(%rsp),%xmm12
+ movaps 0x90(%rsp),%xmm13
+ movaps 0xa0(%rsp),%xmm14
+ movaps 0xb0(%rsp),%xmm15
+ lea 0xc8(%rsp),%rsp
+.Lctr32_ret:
+___
+$code.=<<___;
+ ret
+.size aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
+___
+}
+
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+# const AES_KEY *key1, const AES_KEY *key2
+# const unsigned char iv[16]);
+#
+{
+my @tweak=map("%xmm$_",(10..15));
+my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
+my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
+my $frame_size = 0x68 + ($win64?160:0);
+
+$code.=<<___;
+.globl aesni_xts_encrypt
+.type aesni_xts_encrypt,\@function,6
+.align 16
+aesni_xts_encrypt:
+ lea -$frame_size(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,0x60(%rsp)
+ movaps %xmm7,0x70(%rsp)
+ movaps %xmm8,0x80(%rsp)
+ movaps %xmm9,0x90(%rsp)
+ movaps %xmm10,0xa0(%rsp)
+ movaps %xmm11,0xb0(%rsp)
+ movaps %xmm12,0xc0(%rsp)
+ movaps %xmm13,0xd0(%rsp)
+ movaps %xmm14,0xe0(%rsp)
+ movaps %xmm15,0xf0(%rsp)
+.Lxts_enc_body:
+___
+$code.=<<___;
+ movups ($ivp),@tweak[5] # load clear-text tweak
+ mov 240(%r8),$rounds # key2->rounds
+ mov 240($key),$rnds_ # key1->rounds
+___
+ # generate the tweak
+ &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
+$code.=<<___;
+ mov $key,$key_ # backup $key
+ mov $rnds_,$rounds # backup $rounds
+ mov $len,$len_ # backup $len
+ and \$-16,$len
+
+ movdqa .Lxts_magic(%rip),$twmask
+ pxor $twtmp,$twtmp
+ pcmpgtd @tweak[5],$twtmp # broadcast upper bits
+___
+ for ($i=0;$i<4;$i++) {
+ $code.=<<___;
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[$i]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ pand $twmask,$twres # isolate carry and residue
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ pxor $twres,@tweak[5]
+___
+ }
+$code.=<<___;
+ sub \$16*6,$len
+ jc .Lxts_enc_short
+
+ shr \$1,$rounds
+ sub \$1,$rounds
+ mov $rounds,$rnds_
+ jmp .Lxts_enc_grandloop
+
+.align 16
+.Lxts_enc_grandloop:
+ pshufd \$0x13,$twtmp,$twres
+ movdqa @tweak[5],@tweak[4]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ movdqu `16*0`($inp),$inout0 # load input
+ pand $twmask,$twres # isolate carry and residue
+ movdqu `16*1`($inp),$inout1
+ pxor $twres,@tweak[5]
+
+ movdqu `16*2`($inp),$inout2
+ pxor @tweak[0],$inout0 # input^=tweak
+ movdqu `16*3`($inp),$inout3
+ pxor @tweak[1],$inout1
+ movdqu `16*4`($inp),$inout4
+ pxor @tweak[2],$inout2
+ movdqu `16*5`($inp),$inout5
+ lea `16*6`($inp),$inp
+ pxor @tweak[3],$inout3
+ $movkey ($key_),$rndkey0
+ pxor @tweak[4],$inout4
+ pxor @tweak[5],$inout5
+
+ # inline _aesni_encrypt6 and interleave first and last rounds
+ # with own code...
+ $movkey 16($key_),$rndkey1
+ pxor $rndkey0,$inout0
+ pxor $rndkey0,$inout1
+ movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks
+ aesenc $rndkey1,$inout0
+ lea 32($key_),$key
+ pxor $rndkey0,$inout2
+ movdqa @tweak[1],`16*1`(%rsp)
+ aesenc $rndkey1,$inout1
+ pxor $rndkey0,$inout3
+ movdqa @tweak[2],`16*2`(%rsp)
+ aesenc $rndkey1,$inout2
+ pxor $rndkey0,$inout4
+ movdqa @tweak[3],`16*3`(%rsp)
+ aesenc $rndkey1,$inout3
+ pxor $rndkey0,$inout5
+ $movkey ($key),$rndkey0
+ dec $rounds
+ movdqa @tweak[4],`16*4`(%rsp)
+ aesenc $rndkey1,$inout4
+ movdqa @tweak[5],`16*5`(%rsp)
+ aesenc $rndkey1,$inout5
+ pxor $twtmp,$twtmp
+ pcmpgtd @tweak[5],$twtmp
+ jmp .Lxts_enc_loop6_enter
+
+.align 16
+.Lxts_enc_loop6:
+ aesenc $rndkey1,$inout0
+ aesenc $rndkey1,$inout1
+ dec $rounds
+ aesenc $rndkey1,$inout2
+ aesenc $rndkey1,$inout3
+ aesenc $rndkey1,$inout4
+ aesenc $rndkey1,$inout5
+.Lxts_enc_loop6_enter:
+ $movkey 16($key),$rndkey1
+ aesenc $rndkey0,$inout0
+ aesenc $rndkey0,$inout1
+ lea 32($key),$key
+ aesenc $rndkey0,$inout2
+ aesenc $rndkey0,$inout3
+ aesenc $rndkey0,$inout4
+ aesenc $rndkey0,$inout5
+ $movkey ($key),$rndkey0
+ jnz .Lxts_enc_loop6
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ aesenc $rndkey1,$inout0
+ pand $twmask,$twres # isolate carry and residue
+ aesenc $rndkey1,$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcast upper bits
+ aesenc $rndkey1,$inout2
+ pxor $twres,@tweak[5]
+ aesenc $rndkey1,$inout3
+ aesenc $rndkey1,$inout4
+ aesenc $rndkey1,$inout5
+ $movkey 16($key),$rndkey1
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[0]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ aesenc $rndkey0,$inout0
+ pand $twmask,$twres # isolate carry and residue
+ aesenc $rndkey0,$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ aesenc $rndkey0,$inout2
+ pxor $twres,@tweak[5]
+ aesenc $rndkey0,$inout3
+ aesenc $rndkey0,$inout4
+ aesenc $rndkey0,$inout5
+ $movkey 32($key),$rndkey0
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[1]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ aesenc $rndkey1,$inout0
+ pand $twmask,$twres # isolate carry and residue
+ aesenc $rndkey1,$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ aesenc $rndkey1,$inout2
+ pxor $twres,@tweak[5]
+ aesenc $rndkey1,$inout3
+ aesenc $rndkey1,$inout4
+ aesenc $rndkey1,$inout5
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[2]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ aesenclast $rndkey0,$inout0
+ pand $twmask,$twres # isolate carry and residue
+ aesenclast $rndkey0,$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ aesenclast $rndkey0,$inout2
+ pxor $twres,@tweak[5]
+ aesenclast $rndkey0,$inout3
+ aesenclast $rndkey0,$inout4
+ aesenclast $rndkey0,$inout5
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[3]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ xorps `16*0`(%rsp),$inout0 # output^=tweak
+ pand $twmask,$twres # isolate carry and residue
+ xorps `16*1`(%rsp),$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ pxor $twres,@tweak[5]
+
+ xorps `16*2`(%rsp),$inout2
+ movups $inout0,`16*0`($out) # write output
+ xorps `16*3`(%rsp),$inout3
+ movups $inout1,`16*1`($out)
+ xorps `16*4`(%rsp),$inout4
+ movups $inout2,`16*2`($out)
+ xorps `16*5`(%rsp),$inout5
+ movups $inout3,`16*3`($out)
+ mov $rnds_,$rounds # restore $rounds
+ movups $inout4,`16*4`($out)
+ movups $inout5,`16*5`($out)
+ lea `16*6`($out),$out
+ sub \$16*6,$len
+ jnc .Lxts_enc_grandloop
+
+ lea 3($rounds,$rounds),$rounds # restore original value
+ mov $key_,$key # restore $key
+ mov $rounds,$rnds_ # backup $rounds
+
+.Lxts_enc_short:
+ add \$16*6,$len
+ jz .Lxts_enc_done
+
+ cmp \$0x20,$len
+ jb .Lxts_enc_one
+ je .Lxts_enc_two
+
+ cmp \$0x40,$len
+ jb .Lxts_enc_three
+ je .Lxts_enc_four
+
+ pshufd \$0x13,$twtmp,$twres
+ movdqa @tweak[5],@tweak[4]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ movdqu ($inp),$inout0
+ pand $twmask,$twres # isolate carry and residue
+ movdqu 16*1($inp),$inout1
+ pxor $twres,@tweak[5]
+
+ movdqu 16*2($inp),$inout2
+ pxor @tweak[0],$inout0
+ movdqu 16*3($inp),$inout3
+ pxor @tweak[1],$inout1
+ movdqu 16*4($inp),$inout4
+ lea 16*5($inp),$inp
+ pxor @tweak[2],$inout2
+ pxor @tweak[3],$inout3
+ pxor @tweak[4],$inout4
+
+ call _aesni_encrypt6
+
+ xorps @tweak[0],$inout0
+ movdqa @tweak[5],@tweak[0]
+ xorps @tweak[1],$inout1
+ xorps @tweak[2],$inout2
+ movdqu $inout0,($out)
+ xorps @tweak[3],$inout3
+ movdqu $inout1,16*1($out)
+ xorps @tweak[4],$inout4
+ movdqu $inout2,16*2($out)
+ movdqu $inout3,16*3($out)
+ movdqu $inout4,16*4($out)
+ lea 16*5($out),$out
+ jmp .Lxts_enc_done
+
+.align 16
+.Lxts_enc_one:
+ movups ($inp),$inout0
+ lea 16*1($inp),$inp
+ xorps @tweak[0],$inout0
+___
+ &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+ xorps @tweak[0],$inout0
+ movdqa @tweak[1],@tweak[0]
+ movups $inout0,($out)
+ lea 16*1($out),$out
+ jmp .Lxts_enc_done
+
+.align 16
+.Lxts_enc_two:
+ movups ($inp),$inout0
+ movups 16($inp),$inout1
+ lea 32($inp),$inp
+ xorps @tweak[0],$inout0
+ xorps @tweak[1],$inout1
+
+ call _aesni_encrypt3
+
+ xorps @tweak[0],$inout0
+ movdqa @tweak[2],@tweak[0]
+ xorps @tweak[1],$inout1
+ movups $inout0,($out)
+ movups $inout1,16*1($out)
+ lea 16*2($out),$out
+ jmp .Lxts_enc_done
+
+.align 16
+.Lxts_enc_three:
+ movups ($inp),$inout0
+ movups 16*1($inp),$inout1
+ movups 16*2($inp),$inout2
+ lea 16*3($inp),$inp
+ xorps @tweak[0],$inout0
+ xorps @tweak[1],$inout1
+ xorps @tweak[2],$inout2
+
+ call _aesni_encrypt3
+
+ xorps @tweak[0],$inout0
+ movdqa @tweak[3],@tweak[0]
+ xorps @tweak[1],$inout1
+ xorps @tweak[2],$inout2
+ movups $inout0,($out)
+ movups $inout1,16*1($out)
+ movups $inout2,16*2($out)
+ lea 16*3($out),$out
+ jmp .Lxts_enc_done
+
+.align 16
+.Lxts_enc_four:
+ movups ($inp),$inout0
+ movups 16*1($inp),$inout1
+ movups 16*2($inp),$inout2
+ xorps @tweak[0],$inout0
+ movups 16*3($inp),$inout3
+ lea 16*4($inp),$inp
+ xorps @tweak[1],$inout1
+ xorps @tweak[2],$inout2
+ xorps @tweak[3],$inout3
+
+ call _aesni_encrypt4
+
+ xorps @tweak[0],$inout0
+ movdqa @tweak[5],@tweak[0]
+ xorps @tweak[1],$inout1
+ xorps @tweak[2],$inout2
+ movups $inout0,($out)
+ xorps @tweak[3],$inout3
+ movups $inout1,16*1($out)
+ movups $inout2,16*2($out)
+ movups $inout3,16*3($out)
+ lea 16*4($out),$out
+ jmp .Lxts_enc_done
+
+.align 16
+.Lxts_enc_done:
+ and \$15,$len_
+ jz .Lxts_enc_ret
+ mov $len_,$len
+
+.Lxts_enc_steal:
+ movzb ($inp),%eax # borrow $rounds ...
+ movzb -16($out),%ecx # ... and $key
+ lea 1($inp),$inp
+ mov %al,-16($out)
+ mov %cl,0($out)
+ lea 1($out),$out
+ sub \$1,$len
+ jnz .Lxts_enc_steal
+
+ sub $len_,$out # rewind $out
+ mov $key_,$key # restore $key
+ mov $rnds_,$rounds # restore $rounds
+
+ movups -16($out),$inout0
+ xorps @tweak[0],$inout0
+___
+ &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+ xorps @tweak[0],$inout0
+ movups $inout0,-16($out)
+
+.Lxts_enc_ret:
+___
+$code.=<<___ if ($win64);
+ movaps 0x60(%rsp),%xmm6
+ movaps 0x70(%rsp),%xmm7
+ movaps 0x80(%rsp),%xmm8
+ movaps 0x90(%rsp),%xmm9
+ movaps 0xa0(%rsp),%xmm10
+ movaps 0xb0(%rsp),%xmm11
+ movaps 0xc0(%rsp),%xmm12
+ movaps 0xd0(%rsp),%xmm13
+ movaps 0xe0(%rsp),%xmm14
+ movaps 0xf0(%rsp),%xmm15
+___
+$code.=<<___;
+ lea $frame_size(%rsp),%rsp
+.Lxts_enc_epilogue:
+ ret
+.size aesni_xts_encrypt,.-aesni_xts_encrypt
+___
+
+$code.=<<___;
+.globl aesni_xts_decrypt
+.type aesni_xts_decrypt,\@function,6
+.align 16
+aesni_xts_decrypt:
+ lea -$frame_size(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,0x60(%rsp)
+ movaps %xmm7,0x70(%rsp)
+ movaps %xmm8,0x80(%rsp)
+ movaps %xmm9,0x90(%rsp)
+ movaps %xmm10,0xa0(%rsp)
+ movaps %xmm11,0xb0(%rsp)
+ movaps %xmm12,0xc0(%rsp)
+ movaps %xmm13,0xd0(%rsp)
+ movaps %xmm14,0xe0(%rsp)
+ movaps %xmm15,0xf0(%rsp)
+.Lxts_dec_body:
+___
+$code.=<<___;
+ movups ($ivp),@tweak[5] # load clear-text tweak
+ mov 240($key2),$rounds # key2->rounds
+ mov 240($key),$rnds_ # key1->rounds
+___
+ # generate the tweak
+ &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
+$code.=<<___;
+ xor %eax,%eax # if ($len%16) len-=16;
+ test \$15,$len
+ setnz %al
+ shl \$4,%rax
+ sub %rax,$len
+
+ mov $key,$key_ # backup $key
+ mov $rnds_,$rounds # backup $rounds
+ mov $len,$len_ # backup $len
+ and \$-16,$len
+
+ movdqa .Lxts_magic(%rip),$twmask
+ pxor $twtmp,$twtmp
+ pcmpgtd @tweak[5],$twtmp # broadcast upper bits
+___
+ for ($i=0;$i<4;$i++) {
+ $code.=<<___;
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[$i]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ pand $twmask,$twres # isolate carry and residue
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ pxor $twres,@tweak[5]
+___
+ }
+$code.=<<___;
+ sub \$16*6,$len
+ jc .Lxts_dec_short
+
+ shr \$1,$rounds
+ sub \$1,$rounds
+ mov $rounds,$rnds_
+ jmp .Lxts_dec_grandloop
+
+.align 16
+.Lxts_dec_grandloop:
+ pshufd \$0x13,$twtmp,$twres
+ movdqa @tweak[5],@tweak[4]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ movdqu `16*0`($inp),$inout0 # load input
+ pand $twmask,$twres # isolate carry and residue
+ movdqu `16*1`($inp),$inout1
+ pxor $twres,@tweak[5]
+
+ movdqu `16*2`($inp),$inout2
+ pxor @tweak[0],$inout0 # input^=tweak
+ movdqu `16*3`($inp),$inout3
+ pxor @tweak[1],$inout1
+ movdqu `16*4`($inp),$inout4
+ pxor @tweak[2],$inout2
+ movdqu `16*5`($inp),$inout5
+ lea `16*6`($inp),$inp
+ pxor @tweak[3],$inout3
+ $movkey ($key_),$rndkey0
+ pxor @tweak[4],$inout4
+ pxor @tweak[5],$inout5
+
+ # inline _aesni_decrypt6 and interleave first and last rounds
+ # with own code...
+ $movkey 16($key_),$rndkey1
+ pxor $rndkey0,$inout0
+ pxor $rndkey0,$inout1
+ movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks
+ aesdec $rndkey1,$inout0
+ lea 32($key_),$key
+ pxor $rndkey0,$inout2
+ movdqa @tweak[1],`16*1`(%rsp)
+ aesdec $rndkey1,$inout1
+ pxor $rndkey0,$inout3
+ movdqa @tweak[2],`16*2`(%rsp)
+ aesdec $rndkey1,$inout2
+ pxor $rndkey0,$inout4
+ movdqa @tweak[3],`16*3`(%rsp)
+ aesdec $rndkey1,$inout3
+ pxor $rndkey0,$inout5
+ $movkey ($key),$rndkey0
+ dec $rounds
+ movdqa @tweak[4],`16*4`(%rsp)
+ aesdec $rndkey1,$inout4
+ movdqa @tweak[5],`16*5`(%rsp)
+ aesdec $rndkey1,$inout5
+ pxor $twtmp,$twtmp
+ pcmpgtd @tweak[5],$twtmp
+ jmp .Lxts_dec_loop6_enter
+
+.align 16
+.Lxts_dec_loop6:
+ aesdec $rndkey1,$inout0
+ aesdec $rndkey1,$inout1
+ dec $rounds
+ aesdec $rndkey1,$inout2
+ aesdec $rndkey1,$inout3
+ aesdec $rndkey1,$inout4
+ aesdec $rndkey1,$inout5
+.Lxts_dec_loop6_enter:
+ $movkey 16($key),$rndkey1
+ aesdec $rndkey0,$inout0
+ aesdec $rndkey0,$inout1
+ lea 32($key),$key
+ aesdec $rndkey0,$inout2
+ aesdec $rndkey0,$inout3
+ aesdec $rndkey0,$inout4
+ aesdec $rndkey0,$inout5
+ $movkey ($key),$rndkey0
+ jnz .Lxts_dec_loop6
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ aesdec $rndkey1,$inout0
+ pand $twmask,$twres # isolate carry and residue
+ aesdec $rndkey1,$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcast upper bits
+ aesdec $rndkey1,$inout2
+ pxor $twres,@tweak[5]
+ aesdec $rndkey1,$inout3
+ aesdec $rndkey1,$inout4
+ aesdec $rndkey1,$inout5
+ $movkey 16($key),$rndkey1
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[0]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ aesdec $rndkey0,$inout0
+ pand $twmask,$twres # isolate carry and residue
+ aesdec $rndkey0,$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ aesdec $rndkey0,$inout2
+ pxor $twres,@tweak[5]
+ aesdec $rndkey0,$inout3
+ aesdec $rndkey0,$inout4
+ aesdec $rndkey0,$inout5
+ $movkey 32($key),$rndkey0
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[1]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ aesdec $rndkey1,$inout0
+ pand $twmask,$twres # isolate carry and residue
+ aesdec $rndkey1,$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ aesdec $rndkey1,$inout2
+ pxor $twres,@tweak[5]
+ aesdec $rndkey1,$inout3
+ aesdec $rndkey1,$inout4
+ aesdec $rndkey1,$inout5
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[2]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ aesdeclast $rndkey0,$inout0
+ pand $twmask,$twres # isolate carry and residue
+ aesdeclast $rndkey0,$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ aesdeclast $rndkey0,$inout2
+ pxor $twres,@tweak[5]
+ aesdeclast $rndkey0,$inout3
+ aesdeclast $rndkey0,$inout4
+ aesdeclast $rndkey0,$inout5
+
+ pshufd \$0x13,$twtmp,$twres
+ pxor $twtmp,$twtmp
+ movdqa @tweak[5],@tweak[3]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ xorps `16*0`(%rsp),$inout0 # output^=tweak
+ pand $twmask,$twres # isolate carry and residue
+ xorps `16*1`(%rsp),$inout1
+ pcmpgtd @tweak[5],$twtmp # broadcat upper bits
+ pxor $twres,@tweak[5]
+
+ xorps `16*2`(%rsp),$inout2
+ movups $inout0,`16*0`($out) # write output
+ xorps `16*3`(%rsp),$inout3
+ movups $inout1,`16*1`($out)
+ xorps `16*4`(%rsp),$inout4
+ movups $inout2,`16*2`($out)
+ xorps `16*5`(%rsp),$inout5
+ movups $inout3,`16*3`($out)
+ mov $rnds_,$rounds # restore $rounds
+ movups $inout4,`16*4`($out)
+ movups $inout5,`16*5`($out)
+ lea `16*6`($out),$out
+ sub \$16*6,$len
+ jnc .Lxts_dec_grandloop
+
+ lea 3($rounds,$rounds),$rounds # restore original value
+ mov $key_,$key # restore $key
+ mov $rounds,$rnds_ # backup $rounds
+
+.Lxts_dec_short:
+ add \$16*6,$len
+ jz .Lxts_dec_done
+
+ cmp \$0x20,$len
+ jb .Lxts_dec_one
+ je .Lxts_dec_two
+
+ cmp \$0x40,$len
+ jb .Lxts_dec_three
+ je .Lxts_dec_four
+
+ pshufd \$0x13,$twtmp,$twres
+ movdqa @tweak[5],@tweak[4]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ movdqu ($inp),$inout0
+ pand $twmask,$twres # isolate carry and residue
+ movdqu 16*1($inp),$inout1
+ pxor $twres,@tweak[5]
+
+ movdqu 16*2($inp),$inout2
+ pxor @tweak[0],$inout0
+ movdqu 16*3($inp),$inout3
+ pxor @tweak[1],$inout1
+ movdqu 16*4($inp),$inout4
+ lea 16*5($inp),$inp
+ pxor @tweak[2],$inout2
+ pxor @tweak[3],$inout3
+ pxor @tweak[4],$inout4
+
+ call _aesni_decrypt6
+
+ xorps @tweak[0],$inout0
+ xorps @tweak[1],$inout1
+ xorps @tweak[2],$inout2
+ movdqu $inout0,($out)
+ xorps @tweak[3],$inout3
+ movdqu $inout1,16*1($out)
+ xorps @tweak[4],$inout4
+ movdqu $inout2,16*2($out)
+ pxor $twtmp,$twtmp
+ movdqu $inout3,16*3($out)
+ pcmpgtd @tweak[5],$twtmp
+ movdqu $inout4,16*4($out)
+ lea 16*5($out),$out
+ pshufd \$0x13,$twtmp,@tweak[1] # $twres
+ and \$15,$len_
+ jz .Lxts_dec_ret
+
+ movdqa @tweak[5],@tweak[0]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ pand $twmask,@tweak[1] # isolate carry and residue
+ pxor @tweak[5],@tweak[1]
+ jmp .Lxts_dec_done2
+
+.align 16
+.Lxts_dec_one:
+ movups ($inp),$inout0
+ lea 16*1($inp),$inp
+ xorps @tweak[0],$inout0
+___
+ &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+ xorps @tweak[0],$inout0
+ movdqa @tweak[1],@tweak[0]
+ movups $inout0,($out)
+ movdqa @tweak[2],@tweak[1]
+ lea 16*1($out),$out
+ jmp .Lxts_dec_done
+
+.align 16
+.Lxts_dec_two:
+ movups ($inp),$inout0
+ movups 16($inp),$inout1
+ lea 32($inp),$inp
+ xorps @tweak[0],$inout0
+ xorps @tweak[1],$inout1
+
+ call _aesni_decrypt3
+
+ xorps @tweak[0],$inout0
+ movdqa @tweak[2],@tweak[0]
+ xorps @tweak[1],$inout1
+ movdqa @tweak[3],@tweak[1]
+ movups $inout0,($out)
+ movups $inout1,16*1($out)
+ lea 16*2($out),$out
+ jmp .Lxts_dec_done
+
+.align 16
+.Lxts_dec_three:
+ movups ($inp),$inout0
+ movups 16*1($inp),$inout1
+ movups 16*2($inp),$inout2
+ lea 16*3($inp),$inp
+ xorps @tweak[0],$inout0
+ xorps @tweak[1],$inout1
+ xorps @tweak[2],$inout2
+
+ call _aesni_decrypt3
+
+ xorps @tweak[0],$inout0
+ movdqa @tweak[3],@tweak[0]
+ xorps @tweak[1],$inout1
+ movdqa @tweak[5],@tweak[1]
+ xorps @tweak[2],$inout2
+ movups $inout0,($out)
+ movups $inout1,16*1($out)
+ movups $inout2,16*2($out)
+ lea 16*3($out),$out
+ jmp .Lxts_dec_done
+
+.align 16
+.Lxts_dec_four:
+ pshufd \$0x13,$twtmp,$twres
+ movdqa @tweak[5],@tweak[4]
+ paddq @tweak[5],@tweak[5] # psllq 1,$tweak
+ movups ($inp),$inout0
+ pand $twmask,$twres # isolate carry and residue
+ movups 16*1($inp),$inout1
+ pxor $twres,@tweak[5]
+
+ movups 16*2($inp),$inout2
+ xorps @tweak[0],$inout0
+ movups 16*3($inp),$inout3
+ lea 16*4($inp),$inp
+ xorps @tweak[1],$inout1
+ xorps @tweak[2],$inout2
+ xorps @tweak[3],$inout3
+
+ call _aesni_decrypt4
+
+ xorps @tweak[0],$inout0
+ movdqa @tweak[4],@tweak[0]
+ xorps @tweak[1],$inout1
+ movdqa @tweak[5],@tweak[1]
+ xorps @tweak[2],$inout2
+ movups $inout0,($out)
+ xorps @tweak[3],$inout3
+ movups $inout1,16*1($out)
+ movups $inout2,16*2($out)
+ movups $inout3,16*3($out)
+ lea 16*4($out),$out
+ jmp .Lxts_dec_done
+
+.align 16
+.Lxts_dec_done:
+ and \$15,$len_
+ jz .Lxts_dec_ret
+.Lxts_dec_done2:
+ mov $len_,$len
+ mov $key_,$key # restore $key
+ mov $rnds_,$rounds # restore $rounds
+
+ movups ($inp),$inout0
+ xorps @tweak[1],$inout0
+___
+ &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+ xorps @tweak[1],$inout0
+ movups $inout0,($out)
+
+.Lxts_dec_steal:
+ movzb 16($inp),%eax # borrow $rounds ...
+ movzb ($out),%ecx # ... and $key
+ lea 1($inp),$inp
+ mov %al,($out)
+ mov %cl,16($out)
+ lea 1($out),$out
+ sub \$1,$len
+ jnz .Lxts_dec_steal
+
+ sub $len_,$out # rewind $out
+ mov $key_,$key # restore $key
+ mov $rnds_,$rounds # restore $rounds
+
+ movups ($out),$inout0
+ xorps @tweak[0],$inout0
+___
+ &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+ xorps @tweak[0],$inout0
+ movups $inout0,($out)
+
+.Lxts_dec_ret:
+___
+$code.=<<___ if ($win64);
+ movaps 0x60(%rsp),%xmm6
+ movaps 0x70(%rsp),%xmm7
+ movaps 0x80(%rsp),%xmm8
+ movaps 0x90(%rsp),%xmm9
+ movaps 0xa0(%rsp),%xmm10
+ movaps 0xb0(%rsp),%xmm11
+ movaps 0xc0(%rsp),%xmm12
+ movaps 0xd0(%rsp),%xmm13
+ movaps 0xe0(%rsp),%xmm14
+ movaps 0xf0(%rsp),%xmm15
+___
+$code.=<<___;
+ lea $frame_size(%rsp),%rsp
+.Lxts_dec_epilogue:
+ ret
+.size aesni_xts_decrypt,.-aesni_xts_decrypt
+___
+} }}
+
+########################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+# size_t length, const AES_KEY *key,
+# unsigned char *ivp,const int enc);
+{
+my $reserved = $win64?0x40:-0x18; # used in decrypt
+$code.=<<___;
+.globl ${PREFIX}_cbc_encrypt
+.type ${PREFIX}_cbc_encrypt,\@function,6
+.align 16
+${PREFIX}_cbc_encrypt:
+ test $len,$len # check length
+ jz .Lcbc_ret
+
+ mov 240($key),$rnds_ # key->rounds
+ mov $key,$key_ # backup $key
+ test %r9d,%r9d # 6th argument
+ jz .Lcbc_decrypt
+#--------------------------- CBC ENCRYPT ------------------------------#
+ movups ($ivp),$inout0 # load iv as initial state
+ mov $rnds_,$rounds
+ cmp \$16,$len
+ jb .Lcbc_enc_tail
+ sub \$16,$len
+ jmp .Lcbc_enc_loop
+.align 16
+.Lcbc_enc_loop:
+ movups ($inp),$inout1 # load input
+ lea 16($inp),$inp
+ #xorps $inout1,$inout0
+___
+ &aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
+$code.=<<___;
+ mov $rnds_,$rounds # restore $rounds
+ mov $key_,$key # restore $key
+ movups $inout0,0($out) # store output
+ lea 16($out),$out
+ sub \$16,$len
+ jnc .Lcbc_enc_loop
+ add \$16,$len
+ jnz .Lcbc_enc_tail
+ movups $inout0,($ivp)
+ jmp .Lcbc_ret
+
+.Lcbc_enc_tail:
+ mov $len,%rcx # zaps $key
+ xchg $inp,$out # $inp is %rsi and $out is %rdi now
+ .long 0x9066A4F3 # rep movsb
+ mov \$16,%ecx # zero tail
+ sub $len,%rcx
+ xor %eax,%eax
+ .long 0x9066AAF3 # rep stosb
+ lea -16(%rdi),%rdi # rewind $out by 1 block
+ mov $rnds_,$rounds # restore $rounds
+ mov %rdi,%rsi # $inp and $out are the same
+ mov $key_,$key # restore $key
+ xor $len,$len # len=16
+ jmp .Lcbc_enc_loop # one more spin
+ #--------------------------- CBC DECRYPT ------------------------------#
+.align 16
+.Lcbc_decrypt:
+___
+$code.=<<___ if ($win64);
+ lea -0x58(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ movaps %xmm8,0x20(%rsp)
+ movaps %xmm9,0x30(%rsp)
+.Lcbc_decrypt_body:
+___
+$code.=<<___;
+ movups ($ivp),$iv
+ mov $rnds_,$rounds
+ cmp \$0x70,$len
+ jbe .Lcbc_dec_tail
+ shr \$1,$rnds_
+ sub \$0x70,$len
+ mov $rnds_,$rounds
+ movaps $iv,$reserved(%rsp)
+ jmp .Lcbc_dec_loop8_enter
+.align 16
+.Lcbc_dec_loop8:
+ movaps $rndkey0,$reserved(%rsp) # save IV
+ movups $inout7,($out)
+ lea 0x10($out),$out
+.Lcbc_dec_loop8_enter:
+ $movkey ($key),$rndkey0
+ movups ($inp),$inout0 # load input
+ movups 0x10($inp),$inout1
+ $movkey 16($key),$rndkey1
+
+ lea 32($key),$key
+ movdqu 0x20($inp),$inout2
+ xorps $rndkey0,$inout0
+ movdqu 0x30($inp),$inout3
+ xorps $rndkey0,$inout1
+ movdqu 0x40($inp),$inout4
+ aesdec $rndkey1,$inout0
+ pxor $rndkey0,$inout2
+ movdqu 0x50($inp),$inout5
+ aesdec $rndkey1,$inout1
+ pxor $rndkey0,$inout3
+ movdqu 0x60($inp),$inout6
+ aesdec $rndkey1,$inout2
+ pxor $rndkey0,$inout4
+ movdqu 0x70($inp),$inout7
+ aesdec $rndkey1,$inout3
+ pxor $rndkey0,$inout5
+ dec $rounds
+ aesdec $rndkey1,$inout4
+ pxor $rndkey0,$inout6
+ aesdec $rndkey1,$inout5
+ pxor $rndkey0,$inout7
+ $movkey ($key),$rndkey0
+ aesdec $rndkey1,$inout6
+ aesdec $rndkey1,$inout7
+ $movkey 16($key),$rndkey1
+
+ call .Ldec_loop8_enter
+
+ movups ($inp),$rndkey1 # re-load input
+ movups 0x10($inp),$rndkey0
+ xorps $reserved(%rsp),$inout0 # ^= IV
+ xorps $rndkey1,$inout1
+ movups 0x20($inp),$rndkey1
+ xorps $rndkey0,$inout2
+ movups 0x30($inp),$rndkey0
+ xorps $rndkey1,$inout3
+ movups 0x40($inp),$rndkey1
+ xorps $rndkey0,$inout4
+ movups 0x50($inp),$rndkey0
+ xorps $rndkey1,$inout5
+ movups 0x60($inp),$rndkey1
+ xorps $rndkey0,$inout6
+ movups 0x70($inp),$rndkey0 # IV
+ xorps $rndkey1,$inout7
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ mov $rnds_,$rounds # restore $rounds
+ movups $inout4,0x40($out)
+ mov $key_,$key # restore $key
+ movups $inout5,0x50($out)
+ lea 0x80($inp),$inp
+ movups $inout6,0x60($out)
+ lea 0x70($out),$out
+ sub \$0x80,$len
+ ja .Lcbc_dec_loop8
+
+ movaps $inout7,$inout0
+ movaps $rndkey0,$iv
+ add \$0x70,$len
+ jle .Lcbc_dec_tail_collected
+ movups $inout0,($out)
+ lea 1($rnds_,$rnds_),$rounds
+ lea 0x10($out),$out
+.Lcbc_dec_tail:
+ movups ($inp),$inout0
+ movaps $inout0,$in0
+ cmp \$0x10,$len
+ jbe .Lcbc_dec_one
+
+ movups 0x10($inp),$inout1
+ movaps $inout1,$in1
+ cmp \$0x20,$len
+ jbe .Lcbc_dec_two
+
+ movups 0x20($inp),$inout2
+ movaps $inout2,$in2
+ cmp \$0x30,$len
+ jbe .Lcbc_dec_three
+
+ movups 0x30($inp),$inout3
+ cmp \$0x40,$len
+ jbe .Lcbc_dec_four
+
+ movups 0x40($inp),$inout4
+ cmp \$0x50,$len
+ jbe .Lcbc_dec_five
+
+ movups 0x50($inp),$inout5
+ cmp \$0x60,$len
+ jbe .Lcbc_dec_six
+
+ movups 0x60($inp),$inout6
+ movaps $iv,$reserved(%rsp) # save IV
+ call _aesni_decrypt8
+ movups ($inp),$rndkey1
+ movups 0x10($inp),$rndkey0
+ xorps $reserved(%rsp),$inout0 # ^= IV
+ xorps $rndkey1,$inout1
+ movups 0x20($inp),$rndkey1
+ xorps $rndkey0,$inout2
+ movups 0x30($inp),$rndkey0
+ xorps $rndkey1,$inout3
+ movups 0x40($inp),$rndkey1
+ xorps $rndkey0,$inout4
+ movups 0x50($inp),$rndkey0
+ xorps $rndkey1,$inout5
+ movups 0x60($inp),$iv # IV
+ xorps $rndkey0,$inout6
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ movups $inout5,0x50($out)
+ lea 0x60($out),$out
+ movaps $inout6,$inout0
+ sub \$0x70,$len
+ jmp .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_one:
+___
+ &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+ xorps $iv,$inout0
+ movaps $in0,$iv
+ sub \$0x10,$len
+ jmp .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_two:
+ xorps $inout2,$inout2
+ call _aesni_decrypt3
+ xorps $iv,$inout0
+ xorps $in0,$inout1
+ movups $inout0,($out)
+ movaps $in1,$iv
+ movaps $inout1,$inout0
+ lea 0x10($out),$out
+ sub \$0x20,$len
+ jmp .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_three:
+ call _aesni_decrypt3
+ xorps $iv,$inout0
+ xorps $in0,$inout1
+ movups $inout0,($out)
+ xorps $in1,$inout2
+ movups $inout1,0x10($out)
+ movaps $in2,$iv
+ movaps $inout2,$inout0
+ lea 0x20($out),$out
+ sub \$0x30,$len
+ jmp .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_four:
+ call _aesni_decrypt4
+ xorps $iv,$inout0
+ movups 0x30($inp),$iv
+ xorps $in0,$inout1
+ movups $inout0,($out)
+ xorps $in1,$inout2
+ movups $inout1,0x10($out)
+ xorps $in2,$inout3
+ movups $inout2,0x20($out)
+ movaps $inout3,$inout0
+ lea 0x30($out),$out
+ sub \$0x40,$len
+ jmp .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_five:
+ xorps $inout5,$inout5
+ call _aesni_decrypt6
+ movups 0x10($inp),$rndkey1
+ movups 0x20($inp),$rndkey0
+ xorps $iv,$inout0
+ xorps $in0,$inout1
+ xorps $rndkey1,$inout2
+ movups 0x30($inp),$rndkey1
+ xorps $rndkey0,$inout3
+ movups 0x40($inp),$iv
+ xorps $rndkey1,$inout4
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ lea 0x40($out),$out
+ movaps $inout4,$inout0
+ sub \$0x50,$len
+ jmp .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_six:
+ call _aesni_decrypt6
+ movups 0x10($inp),$rndkey1
+ movups 0x20($inp),$rndkey0
+ xorps $iv,$inout0
+ xorps $in0,$inout1
+ xorps $rndkey1,$inout2
+ movups 0x30($inp),$rndkey1
+ xorps $rndkey0,$inout3
+ movups 0x40($inp),$rndkey0
+ xorps $rndkey1,$inout4
+ movups 0x50($inp),$iv
+ xorps $rndkey0,$inout5
+ movups $inout0,($out)
+ movups $inout1,0x10($out)
+ movups $inout2,0x20($out)
+ movups $inout3,0x30($out)
+ movups $inout4,0x40($out)
+ lea 0x50($out),$out
+ movaps $inout5,$inout0
+ sub \$0x60,$len
+ jmp .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_tail_collected:
+ and \$15,$len
+ movups $iv,($ivp)
+ jnz .Lcbc_dec_tail_partial
+ movups $inout0,($out)
+ jmp .Lcbc_dec_ret
+.align 16
+.Lcbc_dec_tail_partial:
+ movaps $inout0,$reserved(%rsp)
+ mov \$16,%rcx
+ mov $out,%rdi
+ sub $len,%rcx
+ lea $reserved(%rsp),%rsi
+ .long 0x9066A4F3 # rep movsb
+
+.Lcbc_dec_ret:
+___
+$code.=<<___ if ($win64);
+ movaps (%rsp),%xmm6
+ movaps 0x10(%rsp),%xmm7
+ movaps 0x20(%rsp),%xmm8
+ movaps 0x30(%rsp),%xmm9
+ lea 0x58(%rsp),%rsp
+___
+$code.=<<___;
+.Lcbc_ret:
+ ret
+.size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+}
+# int $PREFIX_set_[en|de]crypt_key (const unsigned char *userKey,
+# int bits, AES_KEY *key)
+{ my ($inp,$bits,$key) = @_4args;
+ $bits =~ s/%r/%e/;
+
+$code.=<<___;
+.globl ${PREFIX}_set_decrypt_key
+.type ${PREFIX}_set_decrypt_key,\@abi-omnipotent
+.align 16
+${PREFIX}_set_decrypt_key:
+ .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
+ call __aesni_set_encrypt_key
+ shl \$4,$bits # rounds-1 after _aesni_set_encrypt_key
+ test %eax,%eax
+ jnz .Ldec_key_ret
+ lea 16($key,$bits),$inp # points at the end of key schedule
+
+ $movkey ($key),%xmm0 # just swap
+ $movkey ($inp),%xmm1
+ $movkey %xmm0,($inp)
+ $movkey %xmm1,($key)
+ lea 16($key),$key
+ lea -16($inp),$inp
+
+.Ldec_key_inverse:
+ $movkey ($key),%xmm0 # swap and inverse
+ $movkey ($inp),%xmm1
+ aesimc %xmm0,%xmm0
+ aesimc %xmm1,%xmm1
+ lea 16($key),$key
+ lea -16($inp),$inp
+ $movkey %xmm0,16($inp)
+ $movkey %xmm1,-16($key)
+ cmp $key,$inp
+ ja .Ldec_key_inverse
+
+ $movkey ($key),%xmm0 # inverse middle
+ aesimc %xmm0,%xmm0
+ $movkey %xmm0,($inp)
+.Ldec_key_ret:
+ add \$8,%rsp
+ ret
+.LSEH_end_set_decrypt_key:
+.size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+___
+
+# This is based on submission by
+#
+# Huang Ying <ying.huang@intel.com>
+# Vinodh Gopal <vinodh.gopal@intel.com>
+# Kahraman Akdemir
+#
+# Agressively optimized in respect to aeskeygenassist's critical path
+# and is contained in %xmm0-5 to meet Win64 ABI requirement.
+#
+$code.=<<___;
+.globl ${PREFIX}_set_encrypt_key
+.type ${PREFIX}_set_encrypt_key,\@abi-omnipotent
+.align 16
+${PREFIX}_set_encrypt_key:
+__aesni_set_encrypt_key:
+ .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
+ mov \$-1,%rax
+ test $inp,$inp
+ jz .Lenc_key_ret
+ test $key,$key
+ jz .Lenc_key_ret
+
+ movups ($inp),%xmm0 # pull first 128 bits of *userKey
+ xorps %xmm4,%xmm4 # low dword of xmm4 is assumed 0
+ lea 16($key),%rax
+ cmp \$256,$bits
+ je .L14rounds
+ cmp \$192,$bits
+ je .L12rounds
+ cmp \$128,$bits
+ jne .Lbad_keybits
+
+.L10rounds:
+ mov \$9,$bits # 10 rounds for 128-bit key
+ $movkey %xmm0,($key) # round 0
+ aeskeygenassist \$0x1,%xmm0,%xmm1 # round 1
+ call .Lkey_expansion_128_cold
+ aeskeygenassist \$0x2,%xmm0,%xmm1 # round 2
+ call .Lkey_expansion_128
+ aeskeygenassist \$0x4,%xmm0,%xmm1 # round 3
+ call .Lkey_expansion_128
+ aeskeygenassist \$0x8,%xmm0,%xmm1 # round 4
+ call .Lkey_expansion_128
+ aeskeygenassist \$0x10,%xmm0,%xmm1 # round 5
+ call .Lkey_expansion_128
+ aeskeygenassist \$0x20,%xmm0,%xmm1 # round 6
+ call .Lkey_expansion_128
+ aeskeygenassist \$0x40,%xmm0,%xmm1 # round 7
+ call .Lkey_expansion_128
+ aeskeygenassist \$0x80,%xmm0,%xmm1 # round 8
+ call .Lkey_expansion_128
+ aeskeygenassist \$0x1b,%xmm0,%xmm1 # round 9
+ call .Lkey_expansion_128
+ aeskeygenassist \$0x36,%xmm0,%xmm1 # round 10
+ call .Lkey_expansion_128
+ $movkey %xmm0,(%rax)
+ mov $bits,80(%rax) # 240(%rdx)
+ xor %eax,%eax
+ jmp .Lenc_key_ret
+
+.align 16
+.L12rounds:
+ movq 16($inp),%xmm2 # remaining 1/3 of *userKey
+ mov \$11,$bits # 12 rounds for 192
+ $movkey %xmm0,($key) # round 0
+ aeskeygenassist \$0x1,%xmm2,%xmm1 # round 1,2
+ call .Lkey_expansion_192a_cold
+ aeskeygenassist \$0x2,%xmm2,%xmm1 # round 2,3
+ call .Lkey_expansion_192b
+ aeskeygenassist \$0x4,%xmm2,%xmm1 # round 4,5
+ call .Lkey_expansion_192a
+ aeskeygenassist \$0x8,%xmm2,%xmm1 # round 5,6
+ call .Lkey_expansion_192b
+ aeskeygenassist \$0x10,%xmm2,%xmm1 # round 7,8
+ call .Lkey_expansion_192a
+ aeskeygenassist \$0x20,%xmm2,%xmm1 # round 8,9
+ call .Lkey_expansion_192b
+ aeskeygenassist \$0x40,%xmm2,%xmm1 # round 10,11
+ call .Lkey_expansion_192a
+ aeskeygenassist \$0x80,%xmm2,%xmm1 # round 11,12
+ call .Lkey_expansion_192b
+ $movkey %xmm0,(%rax)
+ mov $bits,48(%rax) # 240(%rdx)
+ xor %rax, %rax
+ jmp .Lenc_key_ret
+
+.align 16
+.L14rounds:
+ movups 16($inp),%xmm2 # remaning half of *userKey
+ mov \$13,$bits # 14 rounds for 256
+ lea 16(%rax),%rax
+ $movkey %xmm0,($key) # round 0
+ $movkey %xmm2,16($key) # round 1
+ aeskeygenassist \$0x1,%xmm2,%xmm1 # round 2
+ call .Lkey_expansion_256a_cold
+ aeskeygenassist \$0x1,%xmm0,%xmm1 # round 3
+ call .Lkey_expansion_256b
+ aeskeygenassist \$0x2,%xmm2,%xmm1 # round 4
+ call .Lkey_expansion_256a
+ aeskeygenassist \$0x2,%xmm0,%xmm1 # round 5
+ call .Lkey_expansion_256b
+ aeskeygenassist \$0x4,%xmm2,%xmm1 # round 6
+ call .Lkey_expansion_256a
+ aeskeygenassist \$0x4,%xmm0,%xmm1 # round 7
+ call .Lkey_expansion_256b
+ aeskeygenassist \$0x8,%xmm2,%xmm1 # round 8
+ call .Lkey_expansion_256a
+ aeskeygenassist \$0x8,%xmm0,%xmm1 # round 9
+ call .Lkey_expansion_256b
+ aeskeygenassist \$0x10,%xmm2,%xmm1 # round 10
+ call .Lkey_expansion_256a
+ aeskeygenassist \$0x10,%xmm0,%xmm1 # round 11
+ call .Lkey_expansion_256b
+ aeskeygenassist \$0x20,%xmm2,%xmm1 # round 12
+ call .Lkey_expansion_256a
+ aeskeygenassist \$0x20,%xmm0,%xmm1 # round 13
+ call .Lkey_expansion_256b
+ aeskeygenassist \$0x40,%xmm2,%xmm1 # round 14
+ call .Lkey_expansion_256a
+ $movkey %xmm0,(%rax)
+ mov $bits,16(%rax) # 240(%rdx)
+ xor %rax,%rax
+ jmp .Lenc_key_ret
+
+.align 16
+.Lbad_keybits:
+ mov \$-2,%rax
+.Lenc_key_ret:
+ add \$8,%rsp
+ ret
+.LSEH_end_set_encrypt_key:
+
+.align 16
+.Lkey_expansion_128:
+ $movkey %xmm0,(%rax)
+ lea 16(%rax),%rax
+.Lkey_expansion_128_cold:
+ shufps \$0b00010000,%xmm0,%xmm4
+ xorps %xmm4, %xmm0
+ shufps \$0b10001100,%xmm0,%xmm4
+ xorps %xmm4, %xmm0
+ shufps \$0b11111111,%xmm1,%xmm1 # critical path
+ xorps %xmm1,%xmm0
+ ret
+
+.align 16
+.Lkey_expansion_192a:
+ $movkey %xmm0,(%rax)
+ lea 16(%rax),%rax
+.Lkey_expansion_192a_cold:
+ movaps %xmm2, %xmm5
+.Lkey_expansion_192b_warm:
+ shufps \$0b00010000,%xmm0,%xmm4
+ movdqa %xmm2,%xmm3
+ xorps %xmm4,%xmm0
+ shufps \$0b10001100,%xmm0,%xmm4
+ pslldq \$4,%xmm3
+ xorps %xmm4,%xmm0
+ pshufd \$0b01010101,%xmm1,%xmm1 # critical path
+ pxor %xmm3,%xmm2
+ pxor %xmm1,%xmm0
+ pshufd \$0b11111111,%xmm0,%xmm3
+ pxor %xmm3,%xmm2
+ ret
+
+.align 16
+.Lkey_expansion_192b:
+ movaps %xmm0,%xmm3
+ shufps \$0b01000100,%xmm0,%xmm5
+ $movkey %xmm5,(%rax)
+ shufps \$0b01001110,%xmm2,%xmm3
+ $movkey %xmm3,16(%rax)
+ lea 32(%rax),%rax
+ jmp .Lkey_expansion_192b_warm
+
+.align 16
+.Lkey_expansion_256a:
+ $movkey %xmm2,(%rax)
+ lea 16(%rax),%rax
+.Lkey_expansion_256a_cold:
+ shufps \$0b00010000,%xmm0,%xmm4
+ xorps %xmm4,%xmm0
+ shufps \$0b10001100,%xmm0,%xmm4
+ xorps %xmm4,%xmm0
+ shufps \$0b11111111,%xmm1,%xmm1 # critical path
+ xorps %xmm1,%xmm0
+ ret
+
+.align 16
+.Lkey_expansion_256b:
+ $movkey %xmm0,(%rax)
+ lea 16(%rax),%rax
+
+ shufps \$0b00010000,%xmm2,%xmm4
+ xorps %xmm4,%xmm2
+ shufps \$0b10001100,%xmm2,%xmm4
+ xorps %xmm4,%xmm2
+ shufps \$0b10101010,%xmm1,%xmm1 # critical path
+ xorps %xmm1,%xmm2
+ ret
+.size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+.size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key
+___
+}
+
+$code.=<<___;
+.align 64
+.Lbswap_mask:
+ .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
+.Lincrement32:
+ .long 6,6,6,0
+.Lincrement64:
+ .long 1,0,0,0
+.Lxts_magic:
+ .long 0x87,0,1,0
+
+.asciz "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
+.align 64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.type ecb_se_handler,\@abi-omnipotent
+.align 16
+ecb_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 152($context),%rax # pull context->Rsp
+
+ jmp .Lcommon_seh_tail
+.size ecb_se_handler,.-ecb_se_handler
+
+.type ccm64_se_handler,\@abi-omnipotent
+.align 16
+ccm64_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ lea 0(%rax),%rsi # %xmm save area
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$8,%ecx # 4*sizeof(%xmm0)/sizeof(%rax)
+ .long 0xa548f3fc # cld; rep movsq
+ lea 0x58(%rax),%rax # adjust stack pointer
+
+ jmp .Lcommon_seh_tail
+.size ccm64_se_handler,.-ccm64_se_handler
+
+.type ctr32_se_handler,\@abi-omnipotent
+.align 16
+ctr32_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lctr32_body(%rip),%r10
+ cmp %r10,%rbx # context->Rip<"prologue" label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lctr32_ret(%rip),%r10
+ cmp %r10,%rbx
+ jae .Lcommon_seh_tail
+
+ lea 0x20(%rax),%rsi # %xmm save area
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
+ .long 0xa548f3fc # cld; rep movsq
+ lea 0xc8(%rax),%rax # adjust stack pointer
+
+ jmp .Lcommon_seh_tail
+.size ctr32_se_handler,.-ctr32_se_handler
+
+.type xts_se_handler,\@abi-omnipotent
+.align 16
+xts_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue lable
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ lea 0x60(%rax),%rsi # %xmm save area
+ lea 512($context),%rdi # & context.Xmm6
+ mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
+ .long 0xa548f3fc # cld; rep movsq
+ lea 0x68+160(%rax),%rax # adjust stack pointer
+
+ jmp .Lcommon_seh_tail
+.size xts_se_handler,.-xts_se_handler
+___
+$code.=<<___;
+.type cbc_se_handler,\@abi-omnipotent
+.align 16
+cbc_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 152($context),%rax # pull context->Rsp
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lcbc_decrypt(%rip),%r10
+ cmp %r10,%rbx # context->Rip<"prologue" label
+ jb .Lcommon_seh_tail
+
+ lea .Lcbc_decrypt_body(%rip),%r10
+ cmp %r10,%rbx # context->Rip<cbc_decrypt_body
+ jb .Lrestore_cbc_rax
+
+ lea .Lcbc_ret(%rip),%r10
+ cmp %r10,%rbx # context->Rip>="epilogue" label
+ jae .Lcommon_seh_tail
+
+ lea 0(%rax),%rsi # top of stack
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$8,%ecx # 4*sizeof(%xmm0)/sizeof(%rax)
+ .long 0xa548f3fc # cld; rep movsq
+ lea 0x58(%rax),%rax # adjust stack pointer
+ jmp .Lcommon_seh_tail
+
+.Lrestore_cbc_rax:
+ mov 120($context),%rax
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size cbc_se_handler,.-cbc_se_handler
+
+.section .pdata
+.align 4
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+ .rva .LSEH_begin_aesni_ecb_encrypt
+ .rva .LSEH_end_aesni_ecb_encrypt
+ .rva .LSEH_info_ecb
+
+ .rva .LSEH_begin_aesni_ccm64_encrypt_blocks
+ .rva .LSEH_end_aesni_ccm64_encrypt_blocks
+ .rva .LSEH_info_ccm64_enc
+
+ .rva .LSEH_begin_aesni_ccm64_decrypt_blocks
+ .rva .LSEH_end_aesni_ccm64_decrypt_blocks
+ .rva .LSEH_info_ccm64_dec
+
+ .rva .LSEH_begin_aesni_ctr32_encrypt_blocks
+ .rva .LSEH_end_aesni_ctr32_encrypt_blocks
+ .rva .LSEH_info_ctr32
+
+ .rva .LSEH_begin_aesni_xts_encrypt
+ .rva .LSEH_end_aesni_xts_encrypt
+ .rva .LSEH_info_xts_enc
+
+ .rva .LSEH_begin_aesni_xts_decrypt
+ .rva .LSEH_end_aesni_xts_decrypt
+ .rva .LSEH_info_xts_dec
+___
+$code.=<<___;
+ .rva .LSEH_begin_${PREFIX}_cbc_encrypt
+ .rva .LSEH_end_${PREFIX}_cbc_encrypt
+ .rva .LSEH_info_cbc
+
+ .rva ${PREFIX}_set_decrypt_key
+ .rva .LSEH_end_set_decrypt_key
+ .rva .LSEH_info_key
+
+ .rva ${PREFIX}_set_encrypt_key
+ .rva .LSEH_end_set_encrypt_key
+ .rva .LSEH_info_key
+.section .xdata
+.align 8
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.LSEH_info_ecb:
+ .byte 9,0,0,0
+ .rva ecb_se_handler
+.LSEH_info_ccm64_enc:
+ .byte 9,0,0,0
+ .rva ccm64_se_handler
+ .rva .Lccm64_enc_body,.Lccm64_enc_ret # HandlerData[]
+.LSEH_info_ccm64_dec:
+ .byte 9,0,0,0
+ .rva ccm64_se_handler
+ .rva .Lccm64_dec_body,.Lccm64_dec_ret # HandlerData[]
+.LSEH_info_ctr32:
+ .byte 9,0,0,0
+ .rva ctr32_se_handler
+.LSEH_info_xts_enc:
+ .byte 9,0,0,0
+ .rva xts_se_handler
+ .rva .Lxts_enc_body,.Lxts_enc_epilogue # HandlerData[]
+.LSEH_info_xts_dec:
+ .byte 9,0,0,0
+ .rva xts_se_handler
+ .rva .Lxts_dec_body,.Lxts_dec_epilogue # HandlerData[]
+___
+$code.=<<___;
+.LSEH_info_cbc:
+ .byte 9,0,0,0
+ .rva cbc_se_handler
+.LSEH_info_key:
+ .byte 0x01,0x04,0x01,0x00
+ .byte 0x04,0x02,0x00,0x00 # sub rsp,8
+___
+}
+
+sub rex {
+ local *opcode=shift;
+ my ($dst,$src)=@_;
+ my $rex=0;
+
+ $rex|=0x04 if($dst>=8);
+ $rex|=0x01 if($src>=8);
+ push @opcode,$rex|0x40 if($rex);
+}
+
+sub aesni {
+ my $line=shift;
+ my @opcode=(0x66);
+
+ if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+ rex(\@opcode,$4,$3);
+ push @opcode,0x0f,0x3a,0xdf;
+ push @opcode,0xc0|($3&7)|(($4&7)<<3); # ModR/M
+ my $c=$2;
+ push @opcode,$c=~/^0/?oct($c):$c;
+ return ".byte\t".join(',',@opcode);
+ }
+ elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+ my %opcodelet = (
+ "aesimc" => 0xdb,
+ "aesenc" => 0xdc, "aesenclast" => 0xdd,
+ "aesdec" => 0xde, "aesdeclast" => 0xdf
+ );
+ return undef if (!defined($opcodelet{$1}));
+ rex(\@opcode,$3,$2);
+ push @opcode,0x0f,0x38,$opcodelet{$1};
+ push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
+ return ".byte\t".join(',',@opcode);
+ }
+ return $line;
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/openssl/crypto/aes/asm/bsaes-x86_64.pl b/openssl/crypto/aes/asm/bsaes-x86_64.pl
new file mode 100644
index 000000000..ff7e3afe8
--- /dev/null
+++ b/openssl/crypto/aes/asm/bsaes-x86_64.pl
@@ -0,0 +1,3004 @@
+#!/usr/bin/env perl
+
+###################################################################
+### AES-128 [originally in CTR mode] ###
+### bitsliced implementation for Intel Core 2 processors ###
+### requires support of SSE extensions up to SSSE3 ###
+### Author: Emilia Käsper and Peter Schwabe ###
+### Date: 2009-03-19 ###
+### Public domain ###
+### ###
+### See http://homes.esat.kuleuven.be/~ekasper/#software for ###
+### further information. ###
+###################################################################
+#
+# September 2011.
+#
+# Started as transliteration to "perlasm" the original code has
+# undergone following changes:
+#
+# - code was made position-independent;
+# - rounds were folded into a loop resulting in >5x size reduction
+# from 12.5KB to 2.2KB;
+# - above was possibile thanks to mixcolumns() modification that
+# allowed to feed its output back to aesenc[last], this was
+# achieved at cost of two additional inter-registers moves;
+# - some instruction reordering and interleaving;
+# - this module doesn't implement key setup subroutine, instead it
+# relies on conversion of "conventional" key schedule as returned
+# by AES_set_encrypt_key (see discussion below);
+# - first and last round keys are treated differently, which allowed
+# to skip one shiftrows(), reduce bit-sliced key schedule and
+# speed-up conversion by 22%;
+# - support for 192- and 256-bit keys was added;
+#
+# Resulting performance in CPU cycles spent to encrypt one byte out
+# of 4096-byte buffer with 128-bit key is:
+#
+# Emilia's this(*) difference
+#
+# Core 2 9.30 8.69 +7%
+# Nehalem(**) 7.63 6.98 +9%
+# Atom 17.1 17.4 -2%(***)
+#
+# (*) Comparison is not completely fair, because "this" is ECB,
+# i.e. no extra processing such as counter values calculation
+# and xor-ing input as in Emilia's CTR implementation is
+# performed. However, the CTR calculations stand for not more
+# than 1% of total time, so comparison is *rather* fair.
+#
+# (**) Results were collected on Westmere, which is considered to
+# be equivalent to Nehalem for this code.
+#
+# (***) Slowdown on Atom is rather strange per se, because original
+# implementation has a number of 9+-bytes instructions, which
+# are bad for Atom front-end, and which I eliminated completely.
+# In attempt to address deterioration sbox() was tested in FP
+# SIMD "domain" (movaps instead of movdqa, xorps instead of
+# pxor, etc.). While it resulted in nominal 4% improvement on
+# Atom, it hurted Westmere by more than 2x factor.
+#
+# As for key schedule conversion subroutine. Interface to OpenSSL
+# relies on per-invocation on-the-fly conversion. This naturally
+# has impact on performance, especially for short inputs. Conversion
+# time in CPU cycles and its ratio to CPU cycles spent in 8x block
+# function is:
+#
+# conversion conversion/8x block
+# Core 2 410 0.37
+# Nehalem 310 0.35
+# Atom 570 0.26
+#
+# The ratio values mean that 128-byte blocks will be processed
+# 21-27% slower, 256-byte blocks - 12-16%, 384-byte blocks - 8-11%,
+# etc. Then keep in mind that input sizes not divisible by 128 are
+# *effectively* slower, especially shortest ones, e.g. consecutive
+# 144-byte blocks are processed 44% slower than one would expect,
+# 272 - 29%, 400 - 22%, etc. Yet, despite all these "shortcomings"
+# it's still faster than ["hyper-threading-safe" code path in]
+# aes-x86_64.pl on all lengths above 64 bytes...
+#
+# October 2011.
+#
+# Add decryption procedure. Performance in CPU cycles spent to decrypt
+# one byte out of 4096-byte buffer with 128-bit key is:
+#
+# Core 2 11.0
+# Nehalem 9.16
+#
+# November 2011.
+#
+# Add bsaes_xts_[en|de]crypt. Less-than-80-bytes-block performance is
+# suboptimal, but XTS is meant to be used with larger blocks...
+#
+# <appro@openssl.org>
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+my ($inp,$out,$len,$key,$ivp)=("%rdi","%rsi","%rdx","%rcx");
+my @XMM=map("%xmm$_",(15,0..14)); # best on Atom, +10% over (0..15)
+my $ecb=0; # suppress unreferenced ECB subroutines, spare some space...
+
+{
+my ($key,$rounds,$const)=("%rax","%r10d","%r11");
+
+sub Sbox {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+ &InBasisChange (@b);
+ &Inv_GF256 (@b[6,5,0,3,7,1,4,2],@t,@s);
+ &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
+my @b=@_[0..7];
+$code.=<<___;
+ pxor @b[6], @b[5]
+ pxor @b[1], @b[2]
+ pxor @b[0], @b[3]
+ pxor @b[2], @b[6]
+ pxor @b[0], @b[5]
+
+ pxor @b[3], @b[6]
+ pxor @b[7], @b[3]
+ pxor @b[5], @b[7]
+ pxor @b[4], @b[3]
+ pxor @b[5], @b[4]
+ pxor @b[1], @b[3]
+
+ pxor @b[7], @b[2]
+ pxor @b[5], @b[1]
+___
+}
+
+sub OutBasisChange {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+ pxor @b[6], @b[0]
+ pxor @b[4], @b[1]
+ pxor @b[0], @b[2]
+ pxor @b[6], @b[4]
+ pxor @b[1], @b[6]
+
+ pxor @b[5], @b[1]
+ pxor @b[3], @b[5]
+ pxor @b[7], @b[3]
+ pxor @b[5], @b[7]
+ pxor @b[5], @b[2]
+
+ pxor @b[7], @b[4]
+___
+}
+
+sub InvSbox {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+ &InvInBasisChange (@b);
+ &Inv_GF256 (@b[5,1,2,6,3,7,0,4],@t,@s);
+ &InvOutBasisChange (@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange { # OutBasisChange in reverse
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+ pxor @b[7], @b[4]
+
+ pxor @b[5], @b[7]
+ pxor @b[5], @b[2]
+ pxor @b[7], @b[3]
+ pxor @b[3], @b[5]
+ pxor @b[5], @b[1]
+
+ pxor @b[1], @b[6]
+ pxor @b[0], @b[2]
+ pxor @b[6], @b[4]
+ pxor @b[6], @b[0]
+ pxor @b[4], @b[1]
+___
+}
+
+sub InvOutBasisChange { # InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+ pxor @b[5], @b[1]
+ pxor @b[7], @b[2]
+
+ pxor @b[1], @b[3]
+ pxor @b[5], @b[4]
+ pxor @b[5], @b[7]
+ pxor @b[4], @b[3]
+ pxor @b[0], @b[5]
+ pxor @b[7], @b[3]
+ pxor @b[2], @b[6]
+ pxor @b[1], @b[2]
+ pxor @b[3], @b[6]
+
+ pxor @b[0], @b[3]
+ pxor @b[6], @b[5]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+ movdqa $y0, $t0
+ pxor $y1, $t0
+ pand $x0, $t0
+ pxor $x1, $x0
+ pand $y0, $x1
+ pand $y1, $x0
+ pxor $x1, $x0
+ pxor $t0, $x1
+___
+}
+
+sub Mul_GF4_N { # not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+ movdqa $y0, $t0
+ pxor $y1, $t0
+ pand $x0, $t0
+ pxor $x1, $x0
+ pand $y0, $x1
+ pand $y1, $x0
+ pxor $x0, $x1
+ pxor $t0, $x0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+ $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+ movdqa $y0, $t0
+ movdqa $y2, $t1
+ pxor $y1, $t0
+ pxor $y3, $t1
+ pand $x0, $t0
+ pand $x2, $t1
+ pxor $x1, $x0
+ pxor $x3, $x2
+ pand $y0, $x1
+ pand $y2, $x3
+ pand $y1, $x0
+ pand $y3, $x2
+ pxor $x0, $x1
+ pxor $x3, $x2
+ pxor $t0, $x0
+ pxor $t1, $x3
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+ movdqa @x[0], @t[0]
+ movdqa @x[1], @t[1]
+___
+ &Mul_GF4 (@x[0], @x[1], @y[0], @y[1], @t[2]);
+$code.=<<___;
+ pxor @x[2], @t[0]
+ pxor @x[3], @t[1]
+ pxor @y[2], @y[0]
+ pxor @y[3], @y[1]
+___
+ Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
+ @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+ pxor @t[0], @x[0]
+ pxor @t[0], @x[2]
+ pxor @t[1], @x[1]
+ pxor @t[1], @x[3]
+
+ movdqa @x[4], @t[0]
+ movdqa @x[5], @t[1]
+ pxor @x[6], @t[0]
+ pxor @x[7], @t[1]
+___
+ &Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
+ @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+ pxor @y[2], @y[0]
+ pxor @y[3], @y[1]
+___
+ &Mul_GF4 (@x[4], @x[5], @y[0], @y[1], @t[3]);
+$code.=<<___;
+ pxor @t[0], @x[4]
+ pxor @t[0], @x[6]
+ pxor @t[1], @x[5]
+ pxor @t[1], @x[7]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144) *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+ movdqa @x[4], @t[3]
+ movdqa @x[5], @t[2]
+ movdqa @x[1], @t[1]
+ movdqa @x[7], @s[1]
+ movdqa @x[0], @s[0]
+
+ pxor @x[6], @t[3]
+ pxor @x[7], @t[2]
+ pxor @x[3], @t[1]
+ movdqa @t[3], @s[2]
+ pxor @x[6], @s[1]
+ movdqa @t[2], @t[0]
+ pxor @x[2], @s[0]
+ movdqa @t[3], @s[3]
+
+ por @t[1], @t[2]
+ por @s[0], @t[3]
+ pxor @t[0], @s[3]
+ pand @s[0], @s[2]
+ pxor @t[1], @s[0]
+ pand @t[1], @t[0]
+ pand @s[0], @s[3]
+ movdqa @x[3], @s[0]
+ pxor @x[2], @s[0]
+ pand @s[0], @s[1]
+ pxor @s[1], @t[3]
+ pxor @s[1], @t[2]
+ movdqa @x[4], @s[1]
+ movdqa @x[1], @s[0]
+ pxor @x[5], @s[1]
+ pxor @x[0], @s[0]
+ movdqa @s[1], @t[1]
+ pand @s[0], @s[1]
+ por @s[0], @t[1]
+ pxor @s[1], @t[0]
+ pxor @s[3], @t[3]
+ pxor @s[2], @t[2]
+ pxor @s[3], @t[1]
+ movdqa @x[7], @s[0]
+ pxor @s[2], @t[0]
+ movdqa @x[6], @s[1]
+ pxor @s[2], @t[1]
+ movdqa @x[5], @s[2]
+ pand @x[3], @s[0]
+ movdqa @x[4], @s[3]
+ pand @x[2], @s[1]
+ pand @x[1], @s[2]
+ por @x[0], @s[3]
+ pxor @s[0], @t[3]
+ pxor @s[1], @t[2]
+ pxor @s[2], @t[1]
+ pxor @s[3], @t[0]
+
+ #Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+ # new smaller inversion
+
+ movdqa @t[3], @s[0]
+ pand @t[1], @t[3]
+ pxor @t[2], @s[0]
+
+ movdqa @t[0], @s[2]
+ movdqa @s[0], @s[3]
+ pxor @t[3], @s[2]
+ pand @s[2], @s[3]
+
+ movdqa @t[1], @s[1]
+ pxor @t[2], @s[3]
+ pxor @t[0], @s[1]
+
+ pxor @t[2], @t[3]
+
+ pand @t[3], @s[1]
+
+ movdqa @s[2], @t[2]
+ pxor @t[0], @s[1]
+
+ pxor @s[1], @t[2]
+ pxor @s[1], @t[1]
+
+ pand @t[0], @t[2]
+
+ pxor @t[2], @s[2]
+ pxor @t[2], @t[1]
+
+ pand @s[3], @s[2]
+
+ pxor @s[0], @s[2]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+ &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my $mask=pop;
+$code.=<<___;
+ pxor 0x00($key),@x[0]
+ pxor 0x10($key),@x[1]
+ pshufb $mask,@x[0]
+ pxor 0x20($key),@x[2]
+ pshufb $mask,@x[1]
+ pxor 0x30($key),@x[3]
+ pshufb $mask,@x[2]
+ pxor 0x40($key),@x[4]
+ pshufb $mask,@x[3]
+ pxor 0x50($key),@x[5]
+ pshufb $mask,@x[4]
+ pxor 0x60($key),@x[6]
+ pshufb $mask,@x[5]
+ pxor 0x70($key),@x[7]
+ pshufb $mask,@x[6]
+ lea 0x80($key),$key
+ pshufb $mask,@x[7]
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+$code.=<<___;
+ pshufd \$0x93, @x[0], @t[0] # x0 <<< 32
+ pshufd \$0x93, @x[1], @t[1]
+ pxor @t[0], @x[0] # x0 ^ (x0 <<< 32)
+ pshufd \$0x93, @x[2], @t[2]
+ pxor @t[1], @x[1]
+ pshufd \$0x93, @x[3], @t[3]
+ pxor @t[2], @x[2]
+ pshufd \$0x93, @x[4], @t[4]
+ pxor @t[3], @x[3]
+ pshufd \$0x93, @x[5], @t[5]
+ pxor @t[4], @x[4]
+ pshufd \$0x93, @x[6], @t[6]
+ pxor @t[5], @x[5]
+ pshufd \$0x93, @x[7], @t[7]
+ pxor @t[6], @x[6]
+ pxor @t[7], @x[7]
+
+ pxor @x[0], @t[1]
+ pxor @x[7], @t[0]
+ pxor @x[7], @t[1]
+ pshufd \$0x4E, @x[0], @x[0] # (x0 ^ (x0 <<< 32)) <<< 64)
+ pxor @x[1], @t[2]
+ pshufd \$0x4E, @x[1], @x[1]
+ pxor @x[4], @t[5]
+ pxor @t[0], @x[0]
+ pxor @x[5], @t[6]
+ pxor @t[1], @x[1]
+ pxor @x[3], @t[4]
+ pshufd \$0x4E, @x[4], @t[0]
+ pxor @x[6], @t[7]
+ pshufd \$0x4E, @x[5], @t[1]
+ pxor @x[2], @t[3]
+ pshufd \$0x4E, @x[3], @x[4]
+ pxor @x[7], @t[3]
+ pshufd \$0x4E, @x[7], @x[5]
+ pxor @x[7], @t[4]
+ pshufd \$0x4E, @x[6], @x[3]
+ pxor @t[4], @t[0]
+ pshufd \$0x4E, @x[2], @x[6]
+ pxor @t[5], @t[1]
+
+ pxor @t[3], @x[4]
+ pxor @t[7], @x[5]
+ pxor @t[6], @x[3]
+ movdqa @t[0], @x[2]
+ pxor @t[2], @x[6]
+ movdqa @t[1], @x[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+ # multiplication by 0x0e
+ pshufd \$0x93, @x[7], @t[7]
+ movdqa @x[2], @t[2]
+ pxor @x[5], @x[7] # 7 5
+ pxor @x[5], @x[2] # 2 5
+ pshufd \$0x93, @x[0], @t[0]
+ movdqa @x[5], @t[5]
+ pxor @x[0], @x[5] # 5 0 [1]
+ pxor @x[1], @x[0] # 0 1
+ pshufd \$0x93, @x[1], @t[1]
+ pxor @x[2], @x[1] # 1 25
+ pxor @x[6], @x[0] # 01 6 [2]
+ pxor @x[3], @x[1] # 125 3 [4]
+ pshufd \$0x93, @x[3], @t[3]
+ pxor @x[0], @x[2] # 25 016 [3]
+ pxor @x[7], @x[3] # 3 75
+ pxor @x[6], @x[7] # 75 6 [0]
+ pshufd \$0x93, @x[6], @t[6]
+ movdqa @x[4], @t[4]
+ pxor @x[4], @x[6] # 6 4
+ pxor @x[3], @x[4] # 4 375 [6]
+ pxor @x[7], @x[3] # 375 756=36
+ pxor @t[5], @x[6] # 64 5 [7]
+ pxor @t[2], @x[3] # 36 2
+ pxor @t[4], @x[3] # 362 4 [5]
+ pshufd \$0x93, @t[5], @t[5]
+___
+ my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+ # multiplication by 0x0b
+ pxor @y[0], @y[1]
+ pxor @t[0], @y[0]
+ pxor @t[1], @y[1]
+ pshufd \$0x93, @t[2], @t[2]
+ pxor @t[5], @y[0]
+ pxor @t[6], @y[1]
+ pxor @t[7], @y[0]
+ pshufd \$0x93, @t[4], @t[4]
+ pxor @t[6], @t[7] # clobber t[7]
+ pxor @y[0], @y[1]
+
+ pxor @t[0], @y[3]
+ pshufd \$0x93, @t[0], @t[0]
+ pxor @t[1], @y[2]
+ pxor @t[1], @y[4]
+ pxor @t[2], @y[2]
+ pshufd \$0x93, @t[1], @t[1]
+ pxor @t[2], @y[3]
+ pxor @t[2], @y[5]
+ pxor @t[7], @y[2]
+ pshufd \$0x93, @t[2], @t[2]
+ pxor @t[3], @y[3]
+ pxor @t[3], @y[6]
+ pxor @t[3], @y[4]
+ pshufd \$0x93, @t[3], @t[3]
+ pxor @t[4], @y[7]
+ pxor @t[4], @y[5]
+ pxor @t[7], @y[7]
+ pxor @t[5], @y[3]
+ pxor @t[4], @y[4]
+ pxor @t[5], @t[7] # clobber t[7] even more
+
+ pxor @t[7], @y[5]
+ pshufd \$0x93, @t[4], @t[4]
+ pxor @t[7], @y[6]
+ pxor @t[7], @y[4]
+
+ pxor @t[5], @t[7]
+ pshufd \$0x93, @t[5], @t[5]
+ pxor @t[6], @t[7] # restore t[7]
+
+ # multiplication by 0x0d
+ pxor @y[7], @y[4]
+ pxor @t[4], @y[7]
+ pshufd \$0x93, @t[6], @t[6]
+ pxor @t[0], @y[2]
+ pxor @t[5], @y[7]
+ pxor @t[2], @y[2]
+ pshufd \$0x93, @t[7], @t[7]
+
+ pxor @y[1], @y[3]
+ pxor @t[1], @y[1]
+ pxor @t[0], @y[0]
+ pxor @t[0], @y[3]
+ pxor @t[5], @y[1]
+ pxor @t[5], @y[0]
+ pxor @t[7], @y[1]
+ pshufd \$0x93, @t[0], @t[0]
+ pxor @t[6], @y[0]
+ pxor @y[1], @y[3]
+ pxor @t[1], @y[4]
+ pshufd \$0x93, @t[1], @t[1]
+
+ pxor @t[7], @y[7]
+ pxor @t[2], @y[4]
+ pxor @t[2], @y[5]
+ pshufd \$0x93, @t[2], @t[2]
+ pxor @t[6], @y[2]
+ pxor @t[3], @t[6] # clobber t[6]
+ pxor @y[7], @y[4]
+ pxor @t[6], @y[3]
+
+ pxor @t[6], @y[6]
+ pxor @t[5], @y[5]
+ pxor @t[4], @y[6]
+ pshufd \$0x93, @t[4], @t[4]
+ pxor @t[6], @y[5]
+ pxor @t[7], @y[6]
+ pxor @t[3], @t[6] # restore t[6]
+
+ pshufd \$0x93, @t[5], @t[5]
+ pshufd \$0x93, @t[6], @t[6]
+ pshufd \$0x93, @t[7], @t[7]
+ pshufd \$0x93, @t[3], @t[3]
+
+ # multiplication by 0x09
+ pxor @y[1], @y[4]
+ pxor @y[1], @t[1] # t[1]=y[1]
+ pxor @t[5], @t[0] # clobber t[0]
+ pxor @t[5], @t[1]
+ pxor @t[0], @y[3]
+ pxor @y[0], @t[0] # t[0]=y[0]
+ pxor @t[6], @t[1]
+ pxor @t[7], @t[6] # clobber t[6]
+ pxor @t[1], @y[4]
+ pxor @t[4], @y[7]
+ pxor @y[4], @t[4] # t[4]=y[4]
+ pxor @t[3], @y[6]
+ pxor @y[3], @t[3] # t[3]=y[3]
+ pxor @t[2], @y[5]
+ pxor @y[2], @t[2] # t[2]=y[2]
+ pxor @t[7], @t[3]
+ pxor @y[5], @t[5] # t[5]=y[5]
+ pxor @t[6], @t[2]
+ pxor @t[6], @t[5]
+ pxor @y[6], @t[6] # t[6]=y[6]
+ pxor @y[7], @t[7] # t[7]=y[7]
+
+ movdqa @t[0],@XMM[0]
+ movdqa @t[1],@XMM[1]
+ movdqa @t[2],@XMM[2]
+ movdqa @t[3],@XMM[3]
+ movdqa @t[4],@XMM[4]
+ movdqa @t[5],@XMM[5]
+ movdqa @t[6],@XMM[6]
+ movdqa @t[7],@XMM[7]
+___
+}
+
+sub aesenc { # not used
+my @b=@_[0..7];
+my @t=@_[8..15];
+$code.=<<___;
+ movdqa 0x30($const),@t[0] # .LSR
+___
+ &ShiftRows (@b,@t[0]);
+ &Sbox (@b,@t);
+ &MixColumns (@b[0,1,4,6,3,7,2,5],@t);
+}
+
+sub aesenclast { # not used
+my @b=@_[0..7];
+my @t=@_[8..15];
+$code.=<<___;
+ movdqa 0x40($const),@t[0] # .LSRM0
+___
+ &ShiftRows (@b,@t[0]);
+ &Sbox (@b,@t);
+$code.=<<___
+ pxor 0x00($key),@b[0]
+ pxor 0x10($key),@b[1]
+ pxor 0x20($key),@b[4]
+ pxor 0x30($key),@b[6]
+ pxor 0x40($key),@b[3]
+ pxor 0x50($key),@b[7]
+ pxor 0x60($key),@b[2]
+ pxor 0x70($key),@b[5]
+___
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+ movdqa $b,$t
+ psrlq \$$n,$b
+ pxor $a,$b
+ pand $mask,$b
+ pxor $b,$a
+ psllq \$$n,$b
+ pxor $t,$b
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+ movdqa $b0,$t0
+ psrlq \$$n,$b0
+ movdqa $b1,$t1
+ psrlq \$$n,$b1
+ pxor $a0,$b0
+ pxor $a1,$b1
+ pand $mask,$b0
+ pand $mask,$b1
+ pxor $b0,$a0
+ psllq \$$n,$b0
+ pxor $b1,$a1
+ psllq \$$n,$b1
+ pxor $t0,$b0
+ pxor $t1,$b1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+ movdqa 0x00($const),$t0 # .LBS0
+ movdqa 0x10($const),$t1 # .LBS1
+___
+ &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+ &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+ movdqa 0x20($const),$t0 # .LBS2
+___
+ &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+ &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+ &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+.text
+
+.extern asm_AES_encrypt
+.extern asm_AES_decrypt
+
+.type _bsaes_encrypt8,\@abi-omnipotent
+.align 64
+_bsaes_encrypt8:
+ lea .LBS0(%rip), $const # constants table
+
+ movdqa ($key), @XMM[9] # round 0 key
+ lea 0x10($key), $key
+ movdqa 0x60($const), @XMM[8] # .LM0SR
+ pxor @XMM[9], @XMM[0] # xor with round0 key
+ pxor @XMM[9], @XMM[1]
+ pshufb @XMM[8], @XMM[0]
+ pxor @XMM[9], @XMM[2]
+ pshufb @XMM[8], @XMM[1]
+ pxor @XMM[9], @XMM[3]
+ pshufb @XMM[8], @XMM[2]
+ pxor @XMM[9], @XMM[4]
+ pshufb @XMM[8], @XMM[3]
+ pxor @XMM[9], @XMM[5]
+ pshufb @XMM[8], @XMM[4]
+ pxor @XMM[9], @XMM[6]
+ pshufb @XMM[8], @XMM[5]
+ pxor @XMM[9], @XMM[7]
+ pshufb @XMM[8], @XMM[6]
+ pshufb @XMM[8], @XMM[7]
+_bsaes_encrypt8_bitslice:
+___
+ &bitslice (@XMM[0..7, 8..11]);
+$code.=<<___;
+ dec $rounds
+ jmp .Lenc_sbox
+.align 16
+.Lenc_loop:
+___
+ &ShiftRows (@XMM[0..7, 8]);
+$code.=".Lenc_sbox:\n";
+ &Sbox (@XMM[0..7, 8..15]);
+$code.=<<___;
+ dec $rounds
+ jl .Lenc_done
+___
+ &MixColumns (@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+ movdqa 0x30($const), @XMM[8] # .LSR
+ jnz .Lenc_loop
+ movdqa 0x40($const), @XMM[8] # .LSRM0
+ jmp .Lenc_loop
+.align 16
+.Lenc_done:
+___
+ # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+ &bitslice (@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+ movdqa ($key), @XMM[8] # last round key
+ pxor @XMM[8], @XMM[4]
+ pxor @XMM[8], @XMM[6]
+ pxor @XMM[8], @XMM[3]
+ pxor @XMM[8], @XMM[7]
+ pxor @XMM[8], @XMM[2]
+ pxor @XMM[8], @XMM[5]
+ pxor @XMM[8], @XMM[0]
+ pxor @XMM[8], @XMM[1]
+ ret
+.size _bsaes_encrypt8,.-_bsaes_encrypt8
+
+.type _bsaes_decrypt8,\@abi-omnipotent
+.align 64
+_bsaes_decrypt8:
+ lea .LBS0(%rip), $const # constants table
+
+ movdqa ($key), @XMM[9] # round 0 key
+ lea 0x10($key), $key
+ movdqa -0x30($const), @XMM[8] # .LM0ISR
+ pxor @XMM[9], @XMM[0] # xor with round0 key
+ pxor @XMM[9], @XMM[1]
+ pshufb @XMM[8], @XMM[0]
+ pxor @XMM[9], @XMM[2]
+ pshufb @XMM[8], @XMM[1]
+ pxor @XMM[9], @XMM[3]
+ pshufb @XMM[8], @XMM[2]
+ pxor @XMM[9], @XMM[4]
+ pshufb @XMM[8], @XMM[3]
+ pxor @XMM[9], @XMM[5]
+ pshufb @XMM[8], @XMM[4]
+ pxor @XMM[9], @XMM[6]
+ pshufb @XMM[8], @XMM[5]
+ pxor @XMM[9], @XMM[7]
+ pshufb @XMM[8], @XMM[6]
+ pshufb @XMM[8], @XMM[7]
+___
+ &bitslice (@XMM[0..7, 8..11]);
+$code.=<<___;
+ dec $rounds
+ jmp .Ldec_sbox
+.align 16
+.Ldec_loop:
+___
+ &ShiftRows (@XMM[0..7, 8]);
+$code.=".Ldec_sbox:\n";
+ &InvSbox (@XMM[0..7, 8..15]);
+$code.=<<___;
+ dec $rounds
+ jl .Ldec_done
+___
+ &InvMixColumns (@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+ movdqa -0x10($const), @XMM[8] # .LISR
+ jnz .Ldec_loop
+ movdqa -0x20($const), @XMM[8] # .LISRM0
+ jmp .Ldec_loop
+.align 16
+.Ldec_done:
+___
+ &bitslice (@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+ movdqa ($key), @XMM[8] # last round key
+ pxor @XMM[8], @XMM[6]
+ pxor @XMM[8], @XMM[4]
+ pxor @XMM[8], @XMM[2]
+ pxor @XMM[8], @XMM[7]
+ pxor @XMM[8], @XMM[3]
+ pxor @XMM[8], @XMM[5]
+ pxor @XMM[8], @XMM[0]
+ pxor @XMM[8], @XMM[1]
+ ret
+.size _bsaes_decrypt8,.-_bsaes_decrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("%rax","%rcx","%r10d","%r11");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+ &swapmove (@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+ #&swapmove(@x[2,3],1,$t0,$t2,$t3);
+ movdqa @x[0], @x[2]
+ movdqa @x[1], @x[3]
+___
+ #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+ &swapmove2x (@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+ #&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+ movdqa @x[0], @x[4]
+ movdqa @x[2], @x[6]
+ movdqa @x[1], @x[5]
+ movdqa @x[3], @x[7]
+___
+ &swapmove2x (@x[0,4,1,5],4,$bs2,$t2,$t3);
+ &swapmove2x (@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type _bsaes_key_convert,\@abi-omnipotent
+.align 16
+_bsaes_key_convert:
+ lea .LBS1(%rip), $const
+ movdqu ($inp), %xmm7 # load round 0 key
+ movdqa -0x10($const), %xmm8 # .LBS0
+ movdqa 0x00($const), %xmm9 # .LBS1
+ movdqa 0x10($const), %xmm10 # .LBS2
+ movdqa 0x40($const), %xmm13 # .LM0
+ movdqa 0x60($const), %xmm14 # .LNOT
+
+ movdqu 0x10($inp), %xmm6 # load round 1 key
+ lea 0x10($inp), $inp
+ movdqa %xmm7, ($out) # save round 0 key
+ lea 0x10($out), $out
+ dec $rounds
+ jmp .Lkey_loop
+.align 16
+.Lkey_loop:
+ pshufb %xmm13, %xmm6 # .LM0
+ movdqa %xmm6, %xmm7
+___
+ &bitslice_key (map("%xmm$_",(0..7, 8..12)));
+$code.=<<___;
+ pxor %xmm14, %xmm5 # "pnot"
+ pxor %xmm14, %xmm6
+ pxor %xmm14, %xmm0
+ pxor %xmm14, %xmm1
+ lea 0x10($inp), $inp
+ movdqa %xmm0, 0x00($out) # write bit-sliced round key
+ movdqa %xmm1, 0x10($out)
+ movdqa %xmm2, 0x20($out)
+ movdqa %xmm3, 0x30($out)
+ movdqa %xmm4, 0x40($out)
+ movdqa %xmm5, 0x50($out)
+ movdqa %xmm6, 0x60($out)
+ movdqa %xmm7, 0x70($out)
+ lea 0x80($out),$out
+ movdqu ($inp), %xmm6 # load next round key
+ dec $rounds
+ jnz .Lkey_loop
+
+ movdqa 0x70($const), %xmm7 # .L63
+ #movdqa %xmm6, ($out) # don't save last round key
+ ret
+.size _bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0 && !$win64) { # following four functions are unsupported interface
+ # used for benchmarking...
+$code.=<<___;
+.globl bsaes_enc_key_convert
+.type bsaes_enc_key_convert,\@function,2
+.align 16
+bsaes_enc_key_convert:
+ mov 240($inp),%r10d # pass rounds
+ mov $inp,%rcx # pass key
+ mov $out,%rax # pass key schedule
+ call _bsaes_key_convert
+ pxor %xmm6,%xmm7 # fix up last round key
+ movdqa %xmm7,(%rax) # save last round key
+ ret
+.size bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl bsaes_encrypt_128
+.type bsaes_encrypt_128,\@function,4
+.align 16
+bsaes_encrypt_128:
+.Lenc128_loop:
+ movdqu 0x00($inp), @XMM[0] # load input
+ movdqu 0x10($inp), @XMM[1]
+ movdqu 0x20($inp), @XMM[2]
+ movdqu 0x30($inp), @XMM[3]
+ movdqu 0x40($inp), @XMM[4]
+ movdqu 0x50($inp), @XMM[5]
+ movdqu 0x60($inp), @XMM[6]
+ movdqu 0x70($inp), @XMM[7]
+ mov $key, %rax # pass the $key
+ lea 0x80($inp), $inp
+ mov \$10,%r10d
+
+ call _bsaes_encrypt8
+
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[4], 0x20($out)
+ movdqu @XMM[6], 0x30($out)
+ movdqu @XMM[3], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[2], 0x60($out)
+ movdqu @XMM[5], 0x70($out)
+ lea 0x80($out), $out
+ sub \$0x80,$len
+ ja .Lenc128_loop
+ ret
+.size bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl bsaes_dec_key_convert
+.type bsaes_dec_key_convert,\@function,2
+.align 16
+bsaes_dec_key_convert:
+ mov 240($inp),%r10d # pass rounds
+ mov $inp,%rcx # pass key
+ mov $out,%rax # pass key schedule
+ call _bsaes_key_convert
+ pxor ($out),%xmm7 # fix up round 0 key
+ movdqa %xmm6,(%rax) # save last round key
+ movdqa %xmm7,($out)
+ ret
+.size bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl bsaes_decrypt_128
+.type bsaes_decrypt_128,\@function,4
+.align 16
+bsaes_decrypt_128:
+.Ldec128_loop:
+ movdqu 0x00($inp), @XMM[0] # load input
+ movdqu 0x10($inp), @XMM[1]
+ movdqu 0x20($inp), @XMM[2]
+ movdqu 0x30($inp), @XMM[3]
+ movdqu 0x40($inp), @XMM[4]
+ movdqu 0x50($inp), @XMM[5]
+ movdqu 0x60($inp), @XMM[6]
+ movdqu 0x70($inp), @XMM[7]
+ mov $key, %rax # pass the $key
+ lea 0x80($inp), $inp
+ mov \$10,%r10d
+
+ call _bsaes_decrypt8
+
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[3], 0x60($out)
+ movdqu @XMM[5], 0x70($out)
+ lea 0x80($out), $out
+ sub \$0x80,$len
+ ja .Ldec128_loop
+ ret
+.size bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+######################################################################
+#
+# OpenSSL interface
+#
+my ($arg1,$arg2,$arg3,$arg4,$arg5,$arg6)=$win64 ? ("%rcx","%rdx","%r8","%r9","%r10","%r11d")
+ : ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
+my ($inp,$out,$len,$key)=("%r12","%r13","%r14","%r15");
+
+if ($ecb) {
+$code.=<<___;
+.globl bsaes_ecb_encrypt_blocks
+.type bsaes_ecb_encrypt_blocks,\@abi-omnipotent
+.align 16
+bsaes_ecb_encrypt_blocks:
+ mov %rsp, %rax
+.Lecb_enc_prologue:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ lea -0x48(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+ lea -0xa0(%rsp), %rsp
+ movaps %xmm6, 0x40(%rsp)
+ movaps %xmm7, 0x50(%rsp)
+ movaps %xmm8, 0x60(%rsp)
+ movaps %xmm9, 0x70(%rsp)
+ movaps %xmm10, 0x80(%rsp)
+ movaps %xmm11, 0x90(%rsp)
+ movaps %xmm12, 0xa0(%rsp)
+ movaps %xmm13, 0xb0(%rsp)
+ movaps %xmm14, 0xc0(%rsp)
+ movaps %xmm15, 0xd0(%rsp)
+.Lecb_enc_body:
+___
+$code.=<<___;
+ mov %rsp,%rbp # backup %rsp
+ mov 240($arg4),%eax # rounds
+ mov $arg1,$inp # backup arguments
+ mov $arg2,$out
+ mov $arg3,$len
+ mov $arg4,$key
+ cmp \$8,$arg3
+ jb .Lecb_enc_short
+
+ mov %eax,%ebx # backup rounds
+ shl \$7,%rax # 128 bytes per inner round key
+ sub \$`128-32`,%rax # size of bit-sliced key schedule
+ sub %rax,%rsp
+ mov %rsp,%rax # pass key schedule
+ mov $key,%rcx # pass key
+ mov %ebx,%r10d # pass rounds
+ call _bsaes_key_convert
+ pxor %xmm6,%xmm7 # fix up last round key
+ movdqa %xmm7,(%rax) # save last round key
+
+ sub \$8,$len
+.Lecb_enc_loop:
+ movdqu 0x00($inp), @XMM[0] # load input
+ movdqu 0x10($inp), @XMM[1]
+ movdqu 0x20($inp), @XMM[2]
+ movdqu 0x30($inp), @XMM[3]
+ movdqu 0x40($inp), @XMM[4]
+ movdqu 0x50($inp), @XMM[5]
+ mov %rsp, %rax # pass key schedule
+ movdqu 0x60($inp), @XMM[6]
+ mov %ebx,%r10d # pass rounds
+ movdqu 0x70($inp), @XMM[7]
+ lea 0x80($inp), $inp
+
+ call _bsaes_encrypt8
+
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[4], 0x20($out)
+ movdqu @XMM[6], 0x30($out)
+ movdqu @XMM[3], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[2], 0x60($out)
+ movdqu @XMM[5], 0x70($out)
+ lea 0x80($out), $out
+ sub \$8,$len
+ jnc .Lecb_enc_loop
+
+ add \$8,$len
+ jz .Lecb_enc_done
+
+ movdqu 0x00($inp), @XMM[0] # load input
+ mov %rsp, %rax # pass key schedule
+ mov %ebx,%r10d # pass rounds
+ cmp \$2,$len
+ jb .Lecb_enc_one
+ movdqu 0x10($inp), @XMM[1]
+ je .Lecb_enc_two
+ movdqu 0x20($inp), @XMM[2]
+ cmp \$4,$len
+ jb .Lecb_enc_three
+ movdqu 0x30($inp), @XMM[3]
+ je .Lecb_enc_four
+ movdqu 0x40($inp), @XMM[4]
+ cmp \$6,$len
+ jb .Lecb_enc_five
+ movdqu 0x50($inp), @XMM[5]
+ je .Lecb_enc_six
+ movdqu 0x60($inp), @XMM[6]
+ call _bsaes_encrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[4], 0x20($out)
+ movdqu @XMM[6], 0x30($out)
+ movdqu @XMM[3], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[2], 0x60($out)
+ jmp .Lecb_enc_done
+.align 16
+.Lecb_enc_six:
+ call _bsaes_encrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[4], 0x20($out)
+ movdqu @XMM[6], 0x30($out)
+ movdqu @XMM[3], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ jmp .Lecb_enc_done
+.align 16
+.Lecb_enc_five:
+ call _bsaes_encrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[4], 0x20($out)
+ movdqu @XMM[6], 0x30($out)
+ movdqu @XMM[3], 0x40($out)
+ jmp .Lecb_enc_done
+.align 16
+.Lecb_enc_four:
+ call _bsaes_encrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[4], 0x20($out)
+ movdqu @XMM[6], 0x30($out)
+ jmp .Lecb_enc_done
+.align 16
+.Lecb_enc_three:
+ call _bsaes_encrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[4], 0x20($out)
+ jmp .Lecb_enc_done
+.align 16
+.Lecb_enc_two:
+ call _bsaes_encrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ jmp .Lecb_enc_done
+.align 16
+.Lecb_enc_one:
+ call _bsaes_encrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ jmp .Lecb_enc_done
+.align 16
+.Lecb_enc_short:
+ lea ($inp), $arg1
+ lea ($out), $arg2
+ lea ($key), $arg3
+ call asm_AES_encrypt
+ lea 16($inp), $inp
+ lea 16($out), $out
+ dec $len
+ jnz .Lecb_enc_short
+
+.Lecb_enc_done:
+ lea (%rsp),%rax
+ pxor %xmm0, %xmm0
+.Lecb_enc_bzero: # wipe key schedule [if any]
+ movdqa %xmm0, 0x00(%rax)
+ movdqa %xmm0, 0x10(%rax)
+ lea 0x20(%rax), %rax
+ cmp %rax, %rbp
+ jb .Lecb_enc_bzero
+
+ lea (%rbp),%rsp # restore %rsp
+___
+$code.=<<___ if ($win64);
+ movaps 0x40(%rbp), %xmm6
+ movaps 0x50(%rbp), %xmm7
+ movaps 0x60(%rbp), %xmm8
+ movaps 0x70(%rbp), %xmm9
+ movaps 0x80(%rbp), %xmm10
+ movaps 0x90(%rbp), %xmm11
+ movaps 0xa0(%rbp), %xmm12
+ movaps 0xb0(%rbp), %xmm13
+ movaps 0xc0(%rbp), %xmm14
+ movaps 0xd0(%rbp), %xmm15
+ lea 0xa0(%rbp), %rsp
+___
+$code.=<<___;
+ mov 0x48(%rsp), %r15
+ mov 0x50(%rsp), %r14
+ mov 0x58(%rsp), %r13
+ mov 0x60(%rsp), %r12
+ mov 0x68(%rsp), %rbx
+ mov 0x70(%rsp), %rax
+ lea 0x78(%rsp), %rsp
+ mov %rax, %rbp
+.Lecb_enc_epilogue:
+ ret
+.size bsaes_ecb_encrypt_blocks,.-bsaes_ecb_encrypt_blocks
+
+.globl bsaes_ecb_decrypt_blocks
+.type bsaes_ecb_decrypt_blocks,\@abi-omnipotent
+.align 16
+bsaes_ecb_decrypt_blocks:
+ mov %rsp, %rax
+.Lecb_dec_prologue:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ lea -0x48(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+ lea -0xa0(%rsp), %rsp
+ movaps %xmm6, 0x40(%rsp)
+ movaps %xmm7, 0x50(%rsp)
+ movaps %xmm8, 0x60(%rsp)
+ movaps %xmm9, 0x70(%rsp)
+ movaps %xmm10, 0x80(%rsp)
+ movaps %xmm11, 0x90(%rsp)
+ movaps %xmm12, 0xa0(%rsp)
+ movaps %xmm13, 0xb0(%rsp)
+ movaps %xmm14, 0xc0(%rsp)
+ movaps %xmm15, 0xd0(%rsp)
+.Lecb_dec_body:
+___
+$code.=<<___;
+ mov %rsp,%rbp # backup %rsp
+ mov 240($arg4),%eax # rounds
+ mov $arg1,$inp # backup arguments
+ mov $arg2,$out
+ mov $arg3,$len
+ mov $arg4,$key
+ cmp \$8,$arg3
+ jb .Lecb_dec_short
+
+ mov %eax,%ebx # backup rounds
+ shl \$7,%rax # 128 bytes per inner round key
+ sub \$`128-32`,%rax # size of bit-sliced key schedule
+ sub %rax,%rsp
+ mov %rsp,%rax # pass key schedule
+ mov $key,%rcx # pass key
+ mov %ebx,%r10d # pass rounds
+ call _bsaes_key_convert
+ pxor (%rsp),%xmm7 # fix up 0 round key
+ movdqa %xmm6,(%rax) # save last round key
+ movdqa %xmm7,(%rsp)
+
+ sub \$8,$len
+.Lecb_dec_loop:
+ movdqu 0x00($inp), @XMM[0] # load input
+ movdqu 0x10($inp), @XMM[1]
+ movdqu 0x20($inp), @XMM[2]
+ movdqu 0x30($inp), @XMM[3]
+ movdqu 0x40($inp), @XMM[4]
+ movdqu 0x50($inp), @XMM[5]
+ mov %rsp, %rax # pass key schedule
+ movdqu 0x60($inp), @XMM[6]
+ mov %ebx,%r10d # pass rounds
+ movdqu 0x70($inp), @XMM[7]
+ lea 0x80($inp), $inp
+
+ call _bsaes_decrypt8
+
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[3], 0x60($out)
+ movdqu @XMM[5], 0x70($out)
+ lea 0x80($out), $out
+ sub \$8,$len
+ jnc .Lecb_dec_loop
+
+ add \$8,$len
+ jz .Lecb_dec_done
+
+ movdqu 0x00($inp), @XMM[0] # load input
+ mov %rsp, %rax # pass key schedule
+ mov %ebx,%r10d # pass rounds
+ cmp \$2,$len
+ jb .Lecb_dec_one
+ movdqu 0x10($inp), @XMM[1]
+ je .Lecb_dec_two
+ movdqu 0x20($inp), @XMM[2]
+ cmp \$4,$len
+ jb .Lecb_dec_three
+ movdqu 0x30($inp), @XMM[3]
+ je .Lecb_dec_four
+ movdqu 0x40($inp), @XMM[4]
+ cmp \$6,$len
+ jb .Lecb_dec_five
+ movdqu 0x50($inp), @XMM[5]
+ je .Lecb_dec_six
+ movdqu 0x60($inp), @XMM[6]
+ call _bsaes_decrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[3], 0x60($out)
+ jmp .Lecb_dec_done
+.align 16
+.Lecb_dec_six:
+ call _bsaes_decrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ jmp .Lecb_dec_done
+.align 16
+.Lecb_dec_five:
+ call _bsaes_decrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ jmp .Lecb_dec_done
+.align 16
+.Lecb_dec_four:
+ call _bsaes_decrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ jmp .Lecb_dec_done
+.align 16
+.Lecb_dec_three:
+ call _bsaes_decrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ jmp .Lecb_dec_done
+.align 16
+.Lecb_dec_two:
+ call _bsaes_decrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ jmp .Lecb_dec_done
+.align 16
+.Lecb_dec_one:
+ call _bsaes_decrypt8
+ movdqu @XMM[0], 0x00($out) # write output
+ jmp .Lecb_dec_done
+.align 16
+.Lecb_dec_short:
+ lea ($inp), $arg1
+ lea ($out), $arg2
+ lea ($key), $arg3
+ call asm_AES_decrypt
+ lea 16($inp), $inp
+ lea 16($out), $out
+ dec $len
+ jnz .Lecb_dec_short
+
+.Lecb_dec_done:
+ lea (%rsp),%rax
+ pxor %xmm0, %xmm0
+.Lecb_dec_bzero: # wipe key schedule [if any]
+ movdqa %xmm0, 0x00(%rax)
+ movdqa %xmm0, 0x10(%rax)
+ lea 0x20(%rax), %rax
+ cmp %rax, %rbp
+ jb .Lecb_dec_bzero
+
+ lea (%rbp),%rsp # restore %rsp
+___
+$code.=<<___ if ($win64);
+ movaps 0x40(%rbp), %xmm6
+ movaps 0x50(%rbp), %xmm7
+ movaps 0x60(%rbp), %xmm8
+ movaps 0x70(%rbp), %xmm9
+ movaps 0x80(%rbp), %xmm10
+ movaps 0x90(%rbp), %xmm11
+ movaps 0xa0(%rbp), %xmm12
+ movaps 0xb0(%rbp), %xmm13
+ movaps 0xc0(%rbp), %xmm14
+ movaps 0xd0(%rbp), %xmm15
+ lea 0xa0(%rbp), %rsp
+___
+$code.=<<___;
+ mov 0x48(%rsp), %r15
+ mov 0x50(%rsp), %r14
+ mov 0x58(%rsp), %r13
+ mov 0x60(%rsp), %r12
+ mov 0x68(%rsp), %rbx
+ mov 0x70(%rsp), %rax
+ lea 0x78(%rsp), %rsp
+ mov %rax, %rbp
+.Lecb_dec_epilogue:
+ ret
+.size bsaes_ecb_decrypt_blocks,.-bsaes_ecb_decrypt_blocks
+___
+}
+$code.=<<___;
+.extern asm_AES_cbc_encrypt
+.globl bsaes_cbc_encrypt
+.type bsaes_cbc_encrypt,\@abi-omnipotent
+.align 16
+bsaes_cbc_encrypt:
+___
+$code.=<<___ if ($win64);
+ mov 48(%rsp),$arg6 # pull direction flag
+___
+$code.=<<___;
+ cmp \$0,$arg6
+ jne asm_AES_cbc_encrypt
+ cmp \$128,$arg3
+ jb asm_AES_cbc_encrypt
+
+ mov %rsp, %rax
+.Lcbc_dec_prologue:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ lea -0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+ mov 0xa0(%rsp),$arg5 # pull ivp
+ lea -0xa0(%rsp), %rsp
+ movaps %xmm6, 0x40(%rsp)
+ movaps %xmm7, 0x50(%rsp)
+ movaps %xmm8, 0x60(%rsp)
+ movaps %xmm9, 0x70(%rsp)
+ movaps %xmm10, 0x80(%rsp)
+ movaps %xmm11, 0x90(%rsp)
+ movaps %xmm12, 0xa0(%rsp)
+ movaps %xmm13, 0xb0(%rsp)
+ movaps %xmm14, 0xc0(%rsp)
+ movaps %xmm15, 0xd0(%rsp)
+.Lcbc_dec_body:
+___
+$code.=<<___;
+ mov %rsp, %rbp # backup %rsp
+ mov 240($arg4), %eax # rounds
+ mov $arg1, $inp # backup arguments
+ mov $arg2, $out
+ mov $arg3, $len
+ mov $arg4, $key
+ mov $arg5, %rbx
+ shr \$4, $len # bytes to blocks
+
+ mov %eax, %edx # rounds
+ shl \$7, %rax # 128 bytes per inner round key
+ sub \$`128-32`, %rax # size of bit-sliced key schedule
+ sub %rax, %rsp
+
+ mov %rsp, %rax # pass key schedule
+ mov $key, %rcx # pass key
+ mov %edx, %r10d # pass rounds
+ call _bsaes_key_convert
+ pxor (%rsp),%xmm7 # fix up 0 round key
+ movdqa %xmm6,(%rax) # save last round key
+ movdqa %xmm7,(%rsp)
+
+ movdqu (%rbx), @XMM[15] # load IV
+ sub \$8,$len
+.Lcbc_dec_loop:
+ movdqu 0x00($inp), @XMM[0] # load input
+ movdqu 0x10($inp), @XMM[1]
+ movdqu 0x20($inp), @XMM[2]
+ movdqu 0x30($inp), @XMM[3]
+ movdqu 0x40($inp), @XMM[4]
+ movdqu 0x50($inp), @XMM[5]
+ mov %rsp, %rax # pass key schedule
+ movdqu 0x60($inp), @XMM[6]
+ mov %edx,%r10d # pass rounds
+ movdqu 0x70($inp), @XMM[7]
+ movdqa @XMM[15], 0x20(%rbp) # put aside IV
+
+ call _bsaes_decrypt8
+
+ pxor 0x20(%rbp), @XMM[0] # ^= IV
+ movdqu 0x00($inp), @XMM[8] # re-load input
+ movdqu 0x10($inp), @XMM[9]
+ pxor @XMM[8], @XMM[1]
+ movdqu 0x20($inp), @XMM[10]
+ pxor @XMM[9], @XMM[6]
+ movdqu 0x30($inp), @XMM[11]
+ pxor @XMM[10], @XMM[4]
+ movdqu 0x40($inp), @XMM[12]
+ pxor @XMM[11], @XMM[2]
+ movdqu 0x50($inp), @XMM[13]
+ pxor @XMM[12], @XMM[7]
+ movdqu 0x60($inp), @XMM[14]
+ pxor @XMM[13], @XMM[3]
+ movdqu 0x70($inp), @XMM[15] # IV
+ pxor @XMM[14], @XMM[5]
+ movdqu @XMM[0], 0x00($out) # write output
+ lea 0x80($inp), $inp
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[3], 0x60($out)
+ movdqu @XMM[5], 0x70($out)
+ lea 0x80($out), $out
+ sub \$8,$len
+ jnc .Lcbc_dec_loop
+
+ add \$8,$len
+ jz .Lcbc_dec_done
+
+ movdqu 0x00($inp), @XMM[0] # load input
+ mov %rsp, %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+ cmp \$2,$len
+ jb .Lcbc_dec_one
+ movdqu 0x10($inp), @XMM[1]
+ je .Lcbc_dec_two
+ movdqu 0x20($inp), @XMM[2]
+ cmp \$4,$len
+ jb .Lcbc_dec_three
+ movdqu 0x30($inp), @XMM[3]
+ je .Lcbc_dec_four
+ movdqu 0x40($inp), @XMM[4]
+ cmp \$6,$len
+ jb .Lcbc_dec_five
+ movdqu 0x50($inp), @XMM[5]
+ je .Lcbc_dec_six
+ movdqu 0x60($inp), @XMM[6]
+ movdqa @XMM[15], 0x20(%rbp) # put aside IV
+ call _bsaes_decrypt8
+ pxor 0x20(%rbp), @XMM[0] # ^= IV
+ movdqu 0x00($inp), @XMM[8] # re-load input
+ movdqu 0x10($inp), @XMM[9]
+ pxor @XMM[8], @XMM[1]
+ movdqu 0x20($inp), @XMM[10]
+ pxor @XMM[9], @XMM[6]
+ movdqu 0x30($inp), @XMM[11]
+ pxor @XMM[10], @XMM[4]
+ movdqu 0x40($inp), @XMM[12]
+ pxor @XMM[11], @XMM[2]
+ movdqu 0x50($inp), @XMM[13]
+ pxor @XMM[12], @XMM[7]
+ movdqu 0x60($inp), @XMM[15] # IV
+ pxor @XMM[13], @XMM[3]
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[3], 0x60($out)
+ jmp .Lcbc_dec_done
+.align 16
+.Lcbc_dec_six:
+ movdqa @XMM[15], 0x20(%rbp) # put aside IV
+ call _bsaes_decrypt8
+ pxor 0x20(%rbp), @XMM[0] # ^= IV
+ movdqu 0x00($inp), @XMM[8] # re-load input
+ movdqu 0x10($inp), @XMM[9]
+ pxor @XMM[8], @XMM[1]
+ movdqu 0x20($inp), @XMM[10]
+ pxor @XMM[9], @XMM[6]
+ movdqu 0x30($inp), @XMM[11]
+ pxor @XMM[10], @XMM[4]
+ movdqu 0x40($inp), @XMM[12]
+ pxor @XMM[11], @XMM[2]
+ movdqu 0x50($inp), @XMM[15] # IV
+ pxor @XMM[12], @XMM[7]
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ jmp .Lcbc_dec_done
+.align 16
+.Lcbc_dec_five:
+ movdqa @XMM[15], 0x20(%rbp) # put aside IV
+ call _bsaes_decrypt8
+ pxor 0x20(%rbp), @XMM[0] # ^= IV
+ movdqu 0x00($inp), @XMM[8] # re-load input
+ movdqu 0x10($inp), @XMM[9]
+ pxor @XMM[8], @XMM[1]
+ movdqu 0x20($inp), @XMM[10]
+ pxor @XMM[9], @XMM[6]
+ movdqu 0x30($inp), @XMM[11]
+ pxor @XMM[10], @XMM[4]
+ movdqu 0x40($inp), @XMM[15] # IV
+ pxor @XMM[11], @XMM[2]
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ jmp .Lcbc_dec_done
+.align 16
+.Lcbc_dec_four:
+ movdqa @XMM[15], 0x20(%rbp) # put aside IV
+ call _bsaes_decrypt8
+ pxor 0x20(%rbp), @XMM[0] # ^= IV
+ movdqu 0x00($inp), @XMM[8] # re-load input
+ movdqu 0x10($inp), @XMM[9]
+ pxor @XMM[8], @XMM[1]
+ movdqu 0x20($inp), @XMM[10]
+ pxor @XMM[9], @XMM[6]
+ movdqu 0x30($inp), @XMM[15] # IV
+ pxor @XMM[10], @XMM[4]
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ jmp .Lcbc_dec_done
+.align 16
+.Lcbc_dec_three:
+ movdqa @XMM[15], 0x20(%rbp) # put aside IV
+ call _bsaes_decrypt8
+ pxor 0x20(%rbp), @XMM[0] # ^= IV
+ movdqu 0x00($inp), @XMM[8] # re-load input
+ movdqu 0x10($inp), @XMM[9]
+ pxor @XMM[8], @XMM[1]
+ movdqu 0x20($inp), @XMM[15] # IV
+ pxor @XMM[9], @XMM[6]
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ jmp .Lcbc_dec_done
+.align 16
+.Lcbc_dec_two:
+ movdqa @XMM[15], 0x20(%rbp) # put aside IV
+ call _bsaes_decrypt8
+ pxor 0x20(%rbp), @XMM[0] # ^= IV
+ movdqu 0x00($inp), @XMM[8] # re-load input
+ movdqu 0x10($inp), @XMM[15] # IV
+ pxor @XMM[8], @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ jmp .Lcbc_dec_done
+.align 16
+.Lcbc_dec_one:
+ lea ($inp), $arg1
+ lea 0x20(%rbp), $arg2 # buffer output
+ lea ($key), $arg3
+ call asm_AES_decrypt # doesn't touch %xmm
+ pxor 0x20(%rbp), @XMM[15] # ^= IV
+ movdqu @XMM[15], ($out) # write output
+ movdqa @XMM[0], @XMM[15] # IV
+
+.Lcbc_dec_done:
+ movdqu @XMM[15], (%rbx) # return IV
+ lea (%rsp), %rax
+ pxor %xmm0, %xmm0
+.Lcbc_dec_bzero: # wipe key schedule [if any]
+ movdqa %xmm0, 0x00(%rax)
+ movdqa %xmm0, 0x10(%rax)
+ lea 0x20(%rax), %rax
+ cmp %rax, %rbp
+ ja .Lcbc_dec_bzero
+
+ lea (%rbp),%rsp # restore %rsp
+___
+$code.=<<___ if ($win64);
+ movaps 0x40(%rbp), %xmm6
+ movaps 0x50(%rbp), %xmm7
+ movaps 0x60(%rbp), %xmm8
+ movaps 0x70(%rbp), %xmm9
+ movaps 0x80(%rbp), %xmm10
+ movaps 0x90(%rbp), %xmm11
+ movaps 0xa0(%rbp), %xmm12
+ movaps 0xb0(%rbp), %xmm13
+ movaps 0xc0(%rbp), %xmm14
+ movaps 0xd0(%rbp), %xmm15
+ lea 0xa0(%rbp), %rsp
+___
+$code.=<<___;
+ mov 0x48(%rsp), %r15
+ mov 0x50(%rsp), %r14
+ mov 0x58(%rsp), %r13
+ mov 0x60(%rsp), %r12
+ mov 0x68(%rsp), %rbx
+ mov 0x70(%rsp), %rax
+ lea 0x78(%rsp), %rsp
+ mov %rax, %rbp
+.Lcbc_dec_epilogue:
+ ret
+.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+
+.globl bsaes_ctr32_encrypt_blocks
+.type bsaes_ctr32_encrypt_blocks,\@abi-omnipotent
+.align 16
+bsaes_ctr32_encrypt_blocks:
+ mov %rsp, %rax
+.Lctr_enc_prologue:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ lea -0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+ mov 0xa0(%rsp),$arg5 # pull ivp
+ lea -0xa0(%rsp), %rsp
+ movaps %xmm6, 0x40(%rsp)
+ movaps %xmm7, 0x50(%rsp)
+ movaps %xmm8, 0x60(%rsp)
+ movaps %xmm9, 0x70(%rsp)
+ movaps %xmm10, 0x80(%rsp)
+ movaps %xmm11, 0x90(%rsp)
+ movaps %xmm12, 0xa0(%rsp)
+ movaps %xmm13, 0xb0(%rsp)
+ movaps %xmm14, 0xc0(%rsp)
+ movaps %xmm15, 0xd0(%rsp)
+.Lctr_enc_body:
+___
+$code.=<<___;
+ mov %rsp, %rbp # backup %rsp
+ movdqu ($arg5), %xmm0 # load counter
+ mov 240($arg4), %eax # rounds
+ mov $arg1, $inp # backup arguments
+ mov $arg2, $out
+ mov $arg3, $len
+ mov $arg4, $key
+ movdqa %xmm0, 0x20(%rbp) # copy counter
+ cmp \$8, $arg3
+ jb .Lctr_enc_short
+
+ mov %eax, %ebx # rounds
+ shl \$7, %rax # 128 bytes per inner round key
+ sub \$`128-32`, %rax # size of bit-sliced key schedule
+ sub %rax, %rsp
+
+ mov %rsp, %rax # pass key schedule
+ mov $key, %rcx # pass key
+ mov %ebx, %r10d # pass rounds
+ call _bsaes_key_convert
+ pxor %xmm6,%xmm7 # fix up last round key
+ movdqa %xmm7,(%rax) # save last round key
+
+ movdqa (%rsp), @XMM[9] # load round0 key
+ lea .LADD1(%rip), %r11
+ movdqa 0x20(%rbp), @XMM[0] # counter copy
+ movdqa -0x20(%r11), @XMM[8] # .LSWPUP
+ pshufb @XMM[8], @XMM[9] # byte swap upper part
+ pshufb @XMM[8], @XMM[0]
+ movdqa @XMM[9], (%rsp) # save adjusted round0 key
+ jmp .Lctr_enc_loop
+.align 16
+.Lctr_enc_loop:
+ movdqa @XMM[0], 0x20(%rbp) # save counter
+ movdqa @XMM[0], @XMM[1] # prepare 8 counter values
+ movdqa @XMM[0], @XMM[2]
+ paddd 0x00(%r11), @XMM[1] # .LADD1
+ movdqa @XMM[0], @XMM[3]
+ paddd 0x10(%r11), @XMM[2] # .LADD2
+ movdqa @XMM[0], @XMM[4]
+ paddd 0x20(%r11), @XMM[3] # .LADD3
+ movdqa @XMM[0], @XMM[5]
+ paddd 0x30(%r11), @XMM[4] # .LADD4
+ movdqa @XMM[0], @XMM[6]
+ paddd 0x40(%r11), @XMM[5] # .LADD5
+ movdqa @XMM[0], @XMM[7]
+ paddd 0x50(%r11), @XMM[6] # .LADD6
+ paddd 0x60(%r11), @XMM[7] # .LADD7
+
+ # Borrow prologue from _bsaes_encrypt8 to use the opportunity
+ # to flip byte order in 32-bit counter
+ movdqa (%rsp), @XMM[9] # round 0 key
+ lea 0x10(%rsp), %rax # pass key schedule
+ movdqa -0x10(%r11), @XMM[8] # .LSWPUPM0SR
+ pxor @XMM[9], @XMM[0] # xor with round0 key
+ pxor @XMM[9], @XMM[1]
+ pshufb @XMM[8], @XMM[0]
+ pxor @XMM[9], @XMM[2]
+ pshufb @XMM[8], @XMM[1]
+ pxor @XMM[9], @XMM[3]
+ pshufb @XMM[8], @XMM[2]
+ pxor @XMM[9], @XMM[4]
+ pshufb @XMM[8], @XMM[3]
+ pxor @XMM[9], @XMM[5]
+ pshufb @XMM[8], @XMM[4]
+ pxor @XMM[9], @XMM[6]
+ pshufb @XMM[8], @XMM[5]
+ pxor @XMM[9], @XMM[7]
+ pshufb @XMM[8], @XMM[6]
+ lea .LBS0(%rip), %r11 # constants table
+ pshufb @XMM[8], @XMM[7]
+ mov %ebx,%r10d # pass rounds
+
+ call _bsaes_encrypt8_bitslice
+
+ sub \$8,$len
+ jc .Lctr_enc_loop_done
+
+ movdqu 0x00($inp), @XMM[8] # load input
+ movdqu 0x10($inp), @XMM[9]
+ movdqu 0x20($inp), @XMM[10]
+ movdqu 0x30($inp), @XMM[11]
+ movdqu 0x40($inp), @XMM[12]
+ movdqu 0x50($inp), @XMM[13]
+ movdqu 0x60($inp), @XMM[14]
+ movdqu 0x70($inp), @XMM[15]
+ lea 0x80($inp),$inp
+ pxor @XMM[0], @XMM[8]
+ movdqa 0x20(%rbp), @XMM[0] # load counter
+ pxor @XMM[9], @XMM[1]
+ movdqu @XMM[8], 0x00($out) # write output
+ pxor @XMM[10], @XMM[4]
+ movdqu @XMM[1], 0x10($out)
+ pxor @XMM[11], @XMM[6]
+ movdqu @XMM[4], 0x20($out)
+ pxor @XMM[12], @XMM[3]
+ movdqu @XMM[6], 0x30($out)
+ pxor @XMM[13], @XMM[7]
+ movdqu @XMM[3], 0x40($out)
+ pxor @XMM[14], @XMM[2]
+ movdqu @XMM[7], 0x50($out)
+ pxor @XMM[15], @XMM[5]
+ movdqu @XMM[2], 0x60($out)
+ lea .LADD1(%rip), %r11
+ movdqu @XMM[5], 0x70($out)
+ lea 0x80($out), $out
+ paddd 0x70(%r11), @XMM[0] # .LADD8
+ jnz .Lctr_enc_loop
+
+ jmp .Lctr_enc_done
+.align 16
+.Lctr_enc_loop_done:
+ add \$8, $len
+ movdqu 0x00($inp), @XMM[8] # load input
+ pxor @XMM[8], @XMM[0]
+ movdqu @XMM[0], 0x00($out) # write output
+ cmp \$2,$len
+ jb .Lctr_enc_done
+ movdqu 0x10($inp), @XMM[9]
+ pxor @XMM[9], @XMM[1]
+ movdqu @XMM[1], 0x10($out)
+ je .Lctr_enc_done
+ movdqu 0x20($inp), @XMM[10]
+ pxor @XMM[10], @XMM[4]
+ movdqu @XMM[4], 0x20($out)
+ cmp \$4,$len
+ jb .Lctr_enc_done
+ movdqu 0x30($inp), @XMM[11]
+ pxor @XMM[11], @XMM[6]
+ movdqu @XMM[6], 0x30($out)
+ je .Lctr_enc_done
+ movdqu 0x40($inp), @XMM[12]
+ pxor @XMM[12], @XMM[3]
+ movdqu @XMM[3], 0x40($out)
+ cmp \$6,$len
+ jb .Lctr_enc_done
+ movdqu 0x50($inp), @XMM[13]
+ pxor @XMM[13], @XMM[7]
+ movdqu @XMM[7], 0x50($out)
+ je .Lctr_enc_done
+ movdqu 0x60($inp), @XMM[14]
+ pxor @XMM[14], @XMM[2]
+ movdqu @XMM[2], 0x60($out)
+ jmp .Lctr_enc_done
+
+.align 16
+.Lctr_enc_short:
+ lea 0x20(%rbp), $arg1
+ lea 0x30(%rbp), $arg2
+ lea ($key), $arg3
+ call asm_AES_encrypt
+ movdqu ($inp), @XMM[1]
+ lea 16($inp), $inp
+ mov 0x2c(%rbp), %eax # load 32-bit counter
+ bswap %eax
+ pxor 0x30(%rbp), @XMM[1]
+ inc %eax # increment
+ movdqu @XMM[1], ($out)
+ bswap %eax
+ lea 16($out), $out
+ mov %eax, 0x2c(%rsp) # save 32-bit counter
+ dec $len
+ jnz .Lctr_enc_short
+
+.Lctr_enc_done:
+ lea (%rsp), %rax
+ pxor %xmm0, %xmm0
+.Lctr_enc_bzero: # wipe key schedule [if any]
+ movdqa %xmm0, 0x00(%rax)
+ movdqa %xmm0, 0x10(%rax)
+ lea 0x20(%rax), %rax
+ cmp %rax, %rbp
+ ja .Lctr_enc_bzero
+
+ lea (%rbp),%rsp # restore %rsp
+___
+$code.=<<___ if ($win64);
+ movaps 0x40(%rbp), %xmm6
+ movaps 0x50(%rbp), %xmm7
+ movaps 0x60(%rbp), %xmm8
+ movaps 0x70(%rbp), %xmm9
+ movaps 0x80(%rbp), %xmm10
+ movaps 0x90(%rbp), %xmm11
+ movaps 0xa0(%rbp), %xmm12
+ movaps 0xb0(%rbp), %xmm13
+ movaps 0xc0(%rbp), %xmm14
+ movaps 0xd0(%rbp), %xmm15
+ lea 0xa0(%rbp), %rsp
+___
+$code.=<<___;
+ mov 0x48(%rsp), %r15
+ mov 0x50(%rsp), %r14
+ mov 0x58(%rsp), %r13
+ mov 0x60(%rsp), %r12
+ mov 0x68(%rsp), %rbx
+ mov 0x70(%rsp), %rax
+ lea 0x78(%rsp), %rsp
+ mov %rax, %rbp
+.Lctr_enc_epilogue:
+ ret
+.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+# const AES_KEY *key1, const AES_KEY *key2,
+# const unsigned char iv[16]);
+#
+my ($twmask,$twres,$twtmp)=@XMM[13..15];
+$code.=<<___;
+.globl bsaes_xts_encrypt
+.type bsaes_xts_encrypt,\@abi-omnipotent
+.align 16
+bsaes_xts_encrypt:
+ mov %rsp, %rax
+.Lxts_enc_prologue:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ lea -0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+ mov 0xa0(%rsp),$arg5 # pull key2
+ mov 0xa8(%rsp),$arg6 # pull ivp
+ lea -0xa0(%rsp), %rsp
+ movaps %xmm6, 0x40(%rsp)
+ movaps %xmm7, 0x50(%rsp)
+ movaps %xmm8, 0x60(%rsp)
+ movaps %xmm9, 0x70(%rsp)
+ movaps %xmm10, 0x80(%rsp)
+ movaps %xmm11, 0x90(%rsp)
+ movaps %xmm12, 0xa0(%rsp)
+ movaps %xmm13, 0xb0(%rsp)
+ movaps %xmm14, 0xc0(%rsp)
+ movaps %xmm15, 0xd0(%rsp)
+.Lxts_enc_body:
+___
+$code.=<<___;
+ mov %rsp, %rbp # backup %rsp
+ mov $arg1, $inp # backup arguments
+ mov $arg2, $out
+ mov $arg3, $len
+ mov $arg4, $key
+
+ lea ($arg6), $arg1
+ lea 0x20(%rbp), $arg2
+ lea ($arg5), $arg3
+ call asm_AES_encrypt # generate initial tweak
+
+ mov 240($key), %eax # rounds
+ mov $len, %rbx # backup $len
+
+ mov %eax, %edx # rounds
+ shl \$7, %rax # 128 bytes per inner round key
+ sub \$`128-32`, %rax # size of bit-sliced key schedule
+ sub %rax, %rsp
+
+ mov %rsp, %rax # pass key schedule
+ mov $key, %rcx # pass key
+ mov %edx, %r10d # pass rounds
+ call _bsaes_key_convert
+ pxor %xmm6, %xmm7 # fix up last round key
+ movdqa %xmm7, (%rax) # save last round key
+
+ and \$-16, $len
+ sub \$0x80, %rsp # place for tweak[8]
+ movdqa 0x20(%rbp), @XMM[7] # initial tweak
+
+ pxor $twtmp, $twtmp
+ movdqa .Lxts_magic(%rip), $twmask
+ pcmpgtd @XMM[7], $twtmp # broadcast upper bits
+
+ sub \$0x80, $len
+ jc .Lxts_enc_short
+ jmp .Lxts_enc_loop
+
+.align 16
+.Lxts_enc_loop:
+___
+ for ($i=0;$i<7;$i++) {
+ $code.=<<___;
+ pshufd \$0x13, $twtmp, $twres
+ pxor $twtmp, $twtmp
+ movdqa @XMM[7], @XMM[$i]
+ movdqa @XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+ paddq @XMM[7], @XMM[7] # psllq 1,$tweak
+ pand $twmask, $twres # isolate carry and residue
+ pcmpgtd @XMM[7], $twtmp # broadcast upper bits
+ pxor $twres, @XMM[7]
+___
+ $code.=<<___ if ($i>=1);
+ movdqu `0x10*($i-1)`($inp), @XMM[8+$i-1]
+___
+ $code.=<<___ if ($i>=2);
+ pxor @XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+ }
+$code.=<<___;
+ movdqu 0x60($inp), @XMM[8+6]
+ pxor @XMM[8+5], @XMM[5]
+ movdqu 0x70($inp), @XMM[8+7]
+ lea 0x80($inp), $inp
+ movdqa @XMM[7], 0x70(%rsp)
+ pxor @XMM[8+6], @XMM[6]
+ lea 0x80(%rsp), %rax # pass key schedule
+ pxor @XMM[8+7], @XMM[7]
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_encrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[4]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[6]
+ movdqu @XMM[4], 0x20($out)
+ pxor 0x40(%rsp), @XMM[3]
+ movdqu @XMM[6], 0x30($out)
+ pxor 0x50(%rsp), @XMM[7]
+ movdqu @XMM[3], 0x40($out)
+ pxor 0x60(%rsp), @XMM[2]
+ movdqu @XMM[7], 0x50($out)
+ pxor 0x70(%rsp), @XMM[5]
+ movdqu @XMM[2], 0x60($out)
+ movdqu @XMM[5], 0x70($out)
+ lea 0x80($out), $out
+
+ movdqa 0x70(%rsp), @XMM[7] # prepare next iteration tweak
+ pxor $twtmp, $twtmp
+ movdqa .Lxts_magic(%rip), $twmask
+ pcmpgtd @XMM[7], $twtmp
+ pshufd \$0x13, $twtmp, $twres
+ pxor $twtmp, $twtmp
+ paddq @XMM[7], @XMM[7] # psllq 1,$tweak
+ pand $twmask, $twres # isolate carry and residue
+ pcmpgtd @XMM[7], $twtmp # broadcast upper bits
+ pxor $twres, @XMM[7]
+
+ sub \$0x80,$len
+ jnc .Lxts_enc_loop
+
+.Lxts_enc_short:
+ add \$0x80, $len
+ jz .Lxts_enc_done
+___
+ for ($i=0;$i<7;$i++) {
+ $code.=<<___;
+ pshufd \$0x13, $twtmp, $twres
+ pxor $twtmp, $twtmp
+ movdqa @XMM[7], @XMM[$i]
+ movdqa @XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+ paddq @XMM[7], @XMM[7] # psllq 1,$tweak
+ pand $twmask, $twres # isolate carry and residue
+ pcmpgtd @XMM[7], $twtmp # broadcast upper bits
+ pxor $twres, @XMM[7]
+___
+ $code.=<<___ if ($i>=1);
+ movdqu `0x10*($i-1)`($inp), @XMM[8+$i-1]
+ cmp \$`0x10*$i`,$len
+ je .Lxts_enc_$i
+___
+ $code.=<<___ if ($i>=2);
+ pxor @XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+ }
+$code.=<<___;
+ movdqu 0x60($inp), @XMM[8+6]
+ pxor @XMM[8+5], @XMM[5]
+ movdqa @XMM[7], 0x70(%rsp)
+ lea 0x70($inp), $inp
+ pxor @XMM[8+6], @XMM[6]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_encrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[4]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[6]
+ movdqu @XMM[4], 0x20($out)
+ pxor 0x40(%rsp), @XMM[3]
+ movdqu @XMM[6], 0x30($out)
+ pxor 0x50(%rsp), @XMM[7]
+ movdqu @XMM[3], 0x40($out)
+ pxor 0x60(%rsp), @XMM[2]
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[2], 0x60($out)
+ lea 0x70($out), $out
+
+ movdqa 0x70(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_enc_done
+.align 16
+.Lxts_enc_6:
+ pxor @XMM[8+4], @XMM[4]
+ lea 0x60($inp), $inp
+ pxor @XMM[8+5], @XMM[5]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_encrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[4]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[6]
+ movdqu @XMM[4], 0x20($out)
+ pxor 0x40(%rsp), @XMM[3]
+ movdqu @XMM[6], 0x30($out)
+ pxor 0x50(%rsp), @XMM[7]
+ movdqu @XMM[3], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ lea 0x60($out), $out
+
+ movdqa 0x60(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_enc_done
+.align 16
+.Lxts_enc_5:
+ pxor @XMM[8+3], @XMM[3]
+ lea 0x50($inp), $inp
+ pxor @XMM[8+4], @XMM[4]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_encrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[4]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[6]
+ movdqu @XMM[4], 0x20($out)
+ pxor 0x40(%rsp), @XMM[3]
+ movdqu @XMM[6], 0x30($out)
+ movdqu @XMM[3], 0x40($out)
+ lea 0x50($out), $out
+
+ movdqa 0x50(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_enc_done
+.align 16
+.Lxts_enc_4:
+ pxor @XMM[8+2], @XMM[2]
+ lea 0x40($inp), $inp
+ pxor @XMM[8+3], @XMM[3]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_encrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[4]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[6]
+ movdqu @XMM[4], 0x20($out)
+ movdqu @XMM[6], 0x30($out)
+ lea 0x40($out), $out
+
+ movdqa 0x40(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_enc_done
+.align 16
+.Lxts_enc_3:
+ pxor @XMM[8+1], @XMM[1]
+ lea 0x30($inp), $inp
+ pxor @XMM[8+2], @XMM[2]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_encrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[4]
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[4], 0x20($out)
+ lea 0x30($out), $out
+
+ movdqa 0x30(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_enc_done
+.align 16
+.Lxts_enc_2:
+ pxor @XMM[8+0], @XMM[0]
+ lea 0x20($inp), $inp
+ pxor @XMM[8+1], @XMM[1]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_encrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ lea 0x20($out), $out
+
+ movdqa 0x20(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_enc_done
+.align 16
+.Lxts_enc_1:
+ pxor @XMM[0], @XMM[8]
+ lea 0x10($inp), $inp
+ movdqa @XMM[8], 0x20(%rbp)
+ lea 0x20(%rbp), $arg1
+ lea 0x20(%rbp), $arg2
+ lea ($key), $arg3
+ call asm_AES_encrypt # doesn't touch %xmm
+ pxor 0x20(%rbp), @XMM[0] # ^= tweak[]
+ #pxor @XMM[8], @XMM[0]
+ #lea 0x80(%rsp), %rax # pass key schedule
+ #mov %edx, %r10d # pass rounds
+ #call _bsaes_encrypt8
+ #pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ movdqu @XMM[0], 0x00($out) # write output
+ lea 0x10($out), $out
+
+ movdqa 0x10(%rsp), @XMM[7] # next iteration tweak
+
+.Lxts_enc_done:
+ and \$15, %ebx
+ jz .Lxts_enc_ret
+ mov $out, %rdx
+
+.Lxts_enc_steal:
+ movzb ($inp), %eax
+ movzb -16(%rdx), %ecx
+ lea 1($inp), $inp
+ mov %al, -16(%rdx)
+ mov %cl, 0(%rdx)
+ lea 1(%rdx), %rdx
+ sub \$1,%ebx
+ jnz .Lxts_enc_steal
+
+ movdqu -16($out), @XMM[0]
+ lea 0x20(%rbp), $arg1
+ pxor @XMM[7], @XMM[0]
+ lea 0x20(%rbp), $arg2
+ movdqa @XMM[0], 0x20(%rbp)
+ lea ($key), $arg3
+ call asm_AES_encrypt # doesn't touch %xmm
+ pxor 0x20(%rbp), @XMM[7]
+ movdqu @XMM[7], -16($out)
+
+.Lxts_enc_ret:
+ lea (%rsp), %rax
+ pxor %xmm0, %xmm0
+.Lxts_enc_bzero: # wipe key schedule [if any]
+ movdqa %xmm0, 0x00(%rax)
+ movdqa %xmm0, 0x10(%rax)
+ lea 0x20(%rax), %rax
+ cmp %rax, %rbp
+ ja .Lxts_enc_bzero
+
+ lea (%rbp),%rsp # restore %rsp
+___
+$code.=<<___ if ($win64);
+ movaps 0x40(%rbp), %xmm6
+ movaps 0x50(%rbp), %xmm7
+ movaps 0x60(%rbp), %xmm8
+ movaps 0x70(%rbp), %xmm9
+ movaps 0x80(%rbp), %xmm10
+ movaps 0x90(%rbp), %xmm11
+ movaps 0xa0(%rbp), %xmm12
+ movaps 0xb0(%rbp), %xmm13
+ movaps 0xc0(%rbp), %xmm14
+ movaps 0xd0(%rbp), %xmm15
+ lea 0xa0(%rbp), %rsp
+___
+$code.=<<___;
+ mov 0x48(%rsp), %r15
+ mov 0x50(%rsp), %r14
+ mov 0x58(%rsp), %r13
+ mov 0x60(%rsp), %r12
+ mov 0x68(%rsp), %rbx
+ mov 0x70(%rsp), %rax
+ lea 0x78(%rsp), %rsp
+ mov %rax, %rbp
+.Lxts_enc_epilogue:
+ ret
+.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type bsaes_xts_decrypt,\@abi-omnipotent
+.align 16
+bsaes_xts_decrypt:
+ mov %rsp, %rax
+.Lxts_dec_prologue:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ lea -0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+ mov 0xa0(%rsp),$arg5 # pull key2
+ mov 0xa8(%rsp),$arg6 # pull ivp
+ lea -0xa0(%rsp), %rsp
+ movaps %xmm6, 0x40(%rsp)
+ movaps %xmm7, 0x50(%rsp)
+ movaps %xmm8, 0x60(%rsp)
+ movaps %xmm9, 0x70(%rsp)
+ movaps %xmm10, 0x80(%rsp)
+ movaps %xmm11, 0x90(%rsp)
+ movaps %xmm12, 0xa0(%rsp)
+ movaps %xmm13, 0xb0(%rsp)
+ movaps %xmm14, 0xc0(%rsp)
+ movaps %xmm15, 0xd0(%rsp)
+.Lxts_dec_body:
+___
+$code.=<<___;
+ mov %rsp, %rbp # backup %rsp
+ mov $arg1, $inp # backup arguments
+ mov $arg2, $out
+ mov $arg3, $len
+ mov $arg4, $key
+
+ lea ($arg6), $arg1
+ lea 0x20(%rbp), $arg2
+ lea ($arg5), $arg3
+ call asm_AES_encrypt # generate initial tweak
+
+ mov 240($key), %eax # rounds
+ mov $len, %rbx # backup $len
+
+ mov %eax, %edx # rounds
+ shl \$7, %rax # 128 bytes per inner round key
+ sub \$`128-32`, %rax # size of bit-sliced key schedule
+ sub %rax, %rsp
+
+ mov %rsp, %rax # pass key schedule
+ mov $key, %rcx # pass key
+ mov %edx, %r10d # pass rounds
+ call _bsaes_key_convert
+ pxor (%rsp), %xmm7 # fix up round 0 key
+ movdqa %xmm6, (%rax) # save last round key
+ movdqa %xmm7, (%rsp)
+
+ xor %eax, %eax # if ($len%16) len-=16;
+ and \$-16, $len
+ test \$15, %ebx
+ setnz %al
+ shl \$4, %rax
+ sub %rax, $len
+
+ sub \$0x80, %rsp # place for tweak[8]
+ movdqa 0x20(%rbp), @XMM[7] # initial tweak
+
+ pxor $twtmp, $twtmp
+ movdqa .Lxts_magic(%rip), $twmask
+ pcmpgtd @XMM[7], $twtmp # broadcast upper bits
+
+ sub \$0x80, $len
+ jc .Lxts_dec_short
+ jmp .Lxts_dec_loop
+
+.align 16
+.Lxts_dec_loop:
+___
+ for ($i=0;$i<7;$i++) {
+ $code.=<<___;
+ pshufd \$0x13, $twtmp, $twres
+ pxor $twtmp, $twtmp
+ movdqa @XMM[7], @XMM[$i]
+ movdqa @XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+ paddq @XMM[7], @XMM[7] # psllq 1,$tweak
+ pand $twmask, $twres # isolate carry and residue
+ pcmpgtd @XMM[7], $twtmp # broadcast upper bits
+ pxor $twres, @XMM[7]
+___
+ $code.=<<___ if ($i>=1);
+ movdqu `0x10*($i-1)`($inp), @XMM[8+$i-1]
+___
+ $code.=<<___ if ($i>=2);
+ pxor @XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+ }
+$code.=<<___;
+ movdqu 0x60($inp), @XMM[8+6]
+ pxor @XMM[8+5], @XMM[5]
+ movdqu 0x70($inp), @XMM[8+7]
+ lea 0x80($inp), $inp
+ movdqa @XMM[7], 0x70(%rsp)
+ pxor @XMM[8+6], @XMM[6]
+ lea 0x80(%rsp), %rax # pass key schedule
+ pxor @XMM[8+7], @XMM[7]
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_decrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[6]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[4]
+ movdqu @XMM[6], 0x20($out)
+ pxor 0x40(%rsp), @XMM[2]
+ movdqu @XMM[4], 0x30($out)
+ pxor 0x50(%rsp), @XMM[7]
+ movdqu @XMM[2], 0x40($out)
+ pxor 0x60(%rsp), @XMM[3]
+ movdqu @XMM[7], 0x50($out)
+ pxor 0x70(%rsp), @XMM[5]
+ movdqu @XMM[3], 0x60($out)
+ movdqu @XMM[5], 0x70($out)
+ lea 0x80($out), $out
+
+ movdqa 0x70(%rsp), @XMM[7] # prepare next iteration tweak
+ pxor $twtmp, $twtmp
+ movdqa .Lxts_magic(%rip), $twmask
+ pcmpgtd @XMM[7], $twtmp
+ pshufd \$0x13, $twtmp, $twres
+ pxor $twtmp, $twtmp
+ paddq @XMM[7], @XMM[7] # psllq 1,$tweak
+ pand $twmask, $twres # isolate carry and residue
+ pcmpgtd @XMM[7], $twtmp # broadcast upper bits
+ pxor $twres, @XMM[7]
+
+ sub \$0x80,$len
+ jnc .Lxts_dec_loop
+
+.Lxts_dec_short:
+ add \$0x80, $len
+ jz .Lxts_dec_done
+___
+ for ($i=0;$i<7;$i++) {
+ $code.=<<___;
+ pshufd \$0x13, $twtmp, $twres
+ pxor $twtmp, $twtmp
+ movdqa @XMM[7], @XMM[$i]
+ movdqa @XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+ paddq @XMM[7], @XMM[7] # psllq 1,$tweak
+ pand $twmask, $twres # isolate carry and residue
+ pcmpgtd @XMM[7], $twtmp # broadcast upper bits
+ pxor $twres, @XMM[7]
+___
+ $code.=<<___ if ($i>=1);
+ movdqu `0x10*($i-1)`($inp), @XMM[8+$i-1]
+ cmp \$`0x10*$i`,$len
+ je .Lxts_dec_$i
+___
+ $code.=<<___ if ($i>=2);
+ pxor @XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+ }
+$code.=<<___;
+ movdqu 0x60($inp), @XMM[8+6]
+ pxor @XMM[8+5], @XMM[5]
+ movdqa @XMM[7], 0x70(%rsp)
+ lea 0x70($inp), $inp
+ pxor @XMM[8+6], @XMM[6]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_decrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[6]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[4]
+ movdqu @XMM[6], 0x20($out)
+ pxor 0x40(%rsp), @XMM[2]
+ movdqu @XMM[4], 0x30($out)
+ pxor 0x50(%rsp), @XMM[7]
+ movdqu @XMM[2], 0x40($out)
+ pxor 0x60(%rsp), @XMM[3]
+ movdqu @XMM[7], 0x50($out)
+ movdqu @XMM[3], 0x60($out)
+ lea 0x70($out), $out
+
+ movdqa 0x70(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_dec_done
+.align 16
+.Lxts_dec_6:
+ pxor @XMM[8+4], @XMM[4]
+ lea 0x60($inp), $inp
+ pxor @XMM[8+5], @XMM[5]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_decrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[6]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[4]
+ movdqu @XMM[6], 0x20($out)
+ pxor 0x40(%rsp), @XMM[2]
+ movdqu @XMM[4], 0x30($out)
+ pxor 0x50(%rsp), @XMM[7]
+ movdqu @XMM[2], 0x40($out)
+ movdqu @XMM[7], 0x50($out)
+ lea 0x60($out), $out
+
+ movdqa 0x60(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_dec_done
+.align 16
+.Lxts_dec_5:
+ pxor @XMM[8+3], @XMM[3]
+ lea 0x50($inp), $inp
+ pxor @XMM[8+4], @XMM[4]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_decrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[6]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[4]
+ movdqu @XMM[6], 0x20($out)
+ pxor 0x40(%rsp), @XMM[2]
+ movdqu @XMM[4], 0x30($out)
+ movdqu @XMM[2], 0x40($out)
+ lea 0x50($out), $out
+
+ movdqa 0x50(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_dec_done
+.align 16
+.Lxts_dec_4:
+ pxor @XMM[8+2], @XMM[2]
+ lea 0x40($inp), $inp
+ pxor @XMM[8+3], @XMM[3]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_decrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[6]
+ movdqu @XMM[1], 0x10($out)
+ pxor 0x30(%rsp), @XMM[4]
+ movdqu @XMM[6], 0x20($out)
+ movdqu @XMM[4], 0x30($out)
+ lea 0x40($out), $out
+
+ movdqa 0x40(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_dec_done
+.align 16
+.Lxts_dec_3:
+ pxor @XMM[8+1], @XMM[1]
+ lea 0x30($inp), $inp
+ pxor @XMM[8+2], @XMM[2]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_decrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ pxor 0x20(%rsp), @XMM[6]
+ movdqu @XMM[1], 0x10($out)
+ movdqu @XMM[6], 0x20($out)
+ lea 0x30($out), $out
+
+ movdqa 0x30(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_dec_done
+.align 16
+.Lxts_dec_2:
+ pxor @XMM[8+0], @XMM[0]
+ lea 0x20($inp), $inp
+ pxor @XMM[8+1], @XMM[1]
+ lea 0x80(%rsp), %rax # pass key schedule
+ mov %edx, %r10d # pass rounds
+
+ call _bsaes_decrypt8
+
+ pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ pxor 0x10(%rsp), @XMM[1]
+ movdqu @XMM[0], 0x00($out) # write output
+ movdqu @XMM[1], 0x10($out)
+ lea 0x20($out), $out
+
+ movdqa 0x20(%rsp), @XMM[7] # next iteration tweak
+ jmp .Lxts_dec_done
+.align 16
+.Lxts_dec_1:
+ pxor @XMM[0], @XMM[8]
+ lea 0x10($inp), $inp
+ movdqa @XMM[8], 0x20(%rbp)
+ lea 0x20(%rbp), $arg1
+ lea 0x20(%rbp), $arg2
+ lea ($key), $arg3
+ call asm_AES_decrypt # doesn't touch %xmm
+ pxor 0x20(%rbp), @XMM[0] # ^= tweak[]
+ #pxor @XMM[8], @XMM[0]
+ #lea 0x80(%rsp), %rax # pass key schedule
+ #mov %edx, %r10d # pass rounds
+ #call _bsaes_decrypt8
+ #pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
+ movdqu @XMM[0], 0x00($out) # write output
+ lea 0x10($out), $out
+
+ movdqa 0x10(%rsp), @XMM[7] # next iteration tweak
+
+.Lxts_dec_done:
+ and \$15, %ebx
+ jz .Lxts_dec_ret
+
+ pxor $twtmp, $twtmp
+ movdqa .Lxts_magic(%rip), $twmask
+ pcmpgtd @XMM[7], $twtmp
+ pshufd \$0x13, $twtmp, $twres
+ movdqa @XMM[7], @XMM[6]
+ paddq @XMM[7], @XMM[7] # psllq 1,$tweak
+ pand $twmask, $twres # isolate carry and residue
+ movdqu ($inp), @XMM[0]
+ pxor $twres, @XMM[7]
+
+ lea 0x20(%rbp), $arg1
+ pxor @XMM[7], @XMM[0]
+ lea 0x20(%rbp), $arg2
+ movdqa @XMM[0], 0x20(%rbp)
+ lea ($key), $arg3
+ call asm_AES_decrypt # doesn't touch %xmm
+ pxor 0x20(%rbp), @XMM[7]
+ mov $out, %rdx
+ movdqu @XMM[7], ($out)
+
+.Lxts_dec_steal:
+ movzb 16($inp), %eax
+ movzb (%rdx), %ecx
+ lea 1($inp), $inp
+ mov %al, (%rdx)
+ mov %cl, 16(%rdx)
+ lea 1(%rdx), %rdx
+ sub \$1,%ebx
+ jnz .Lxts_dec_steal
+
+ movdqu ($out), @XMM[0]
+ lea 0x20(%rbp), $arg1
+ pxor @XMM[6], @XMM[0]
+ lea 0x20(%rbp), $arg2
+ movdqa @XMM[0], 0x20(%rbp)
+ lea ($key), $arg3
+ call asm_AES_decrypt # doesn't touch %xmm
+ pxor 0x20(%rbp), @XMM[6]
+ movdqu @XMM[6], ($out)
+
+.Lxts_dec_ret:
+ lea (%rsp), %rax
+ pxor %xmm0, %xmm0
+.Lxts_dec_bzero: # wipe key schedule [if any]
+ movdqa %xmm0, 0x00(%rax)
+ movdqa %xmm0, 0x10(%rax)
+ lea 0x20(%rax), %rax
+ cmp %rax, %rbp
+ ja .Lxts_dec_bzero
+
+ lea (%rbp),%rsp # restore %rsp
+___
+$code.=<<___ if ($win64);
+ movaps 0x40(%rbp), %xmm6
+ movaps 0x50(%rbp), %xmm7
+ movaps 0x60(%rbp), %xmm8
+ movaps 0x70(%rbp), %xmm9
+ movaps 0x80(%rbp), %xmm10
+ movaps 0x90(%rbp), %xmm11
+ movaps 0xa0(%rbp), %xmm12
+ movaps 0xb0(%rbp), %xmm13
+ movaps 0xc0(%rbp), %xmm14
+ movaps 0xd0(%rbp), %xmm15
+ lea 0xa0(%rbp), %rsp
+___
+$code.=<<___;
+ mov 0x48(%rsp), %r15
+ mov 0x50(%rsp), %r14
+ mov 0x58(%rsp), %r13
+ mov 0x60(%rsp), %r12
+ mov 0x68(%rsp), %rbx
+ mov 0x70(%rsp), %rax
+ lea 0x78(%rsp), %rsp
+ mov %rax, %rbp
+.Lxts_dec_epilogue:
+ ret
+.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+.type _bsaes_const,\@object
+.align 64
+_bsaes_const:
+.LM0ISR: # InvShiftRows constants
+ .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISRM0:
+ .quad 0x01040b0e0205080f, 0x0306090c00070a0d
+.LISR:
+ .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
+.LBS0: # bit-slice constants
+ .quad 0x5555555555555555, 0x5555555555555555
+.LBS1:
+ .quad 0x3333333333333333, 0x3333333333333333
+.LBS2:
+ .quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
+.LSR: # shiftrows constants
+ .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+ .quad 0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+ .quad 0x02060a0e03070b0f, 0x0004080c0105090d
+.LM0SR:
+ .quad 0x0a0e02060f03070b, 0x0004080c05090d01
+.LNOT: # magic constants
+ .quad 0xffffffffffffffff, 0xffffffffffffffff
+.L63:
+ .quad 0x6363636363636363, 0x6363636363636363
+.LSWPUP: # byte-swap upper dword
+ .quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
+.LSWPUPM0SR:
+ .quad 0x0a0d02060c03070b, 0x0004080f05090e01
+.LADD1: # counter increment constants
+ .quad 0x0000000000000000, 0x0000000100000000
+.LADD2:
+ .quad 0x0000000000000000, 0x0000000200000000
+.LADD3:
+ .quad 0x0000000000000000, 0x0000000300000000
+.LADD4:
+ .quad 0x0000000000000000, 0x0000000400000000
+.LADD5:
+ .quad 0x0000000000000000, 0x0000000500000000
+.LADD6:
+ .quad 0x0000000000000000, 0x0000000600000000
+.LADD7:
+ .quad 0x0000000000000000, 0x0000000700000000
+.LADD8:
+ .quad 0x0000000000000000, 0x0000000800000000
+.Lxts_magic:
+ .long 0x87,0,1,0
+.asciz "Bit-sliced AES for x86_64/SSSE3, Emilia Käsper, Peter Schwabe, Andy Polyakov"
+.align 64
+.size _bsaes_const,.-_bsaes_const
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+
+ mov 160($context),%rax # pull context->Rbp
+
+ lea 0x40(%rax),%rsi # %xmm save area
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
+ .long 0xa548f3fc # cld; rep movsq
+ lea 0xa0(%rax),%rax # adjust stack pointer
+
+ mov 0x70(%rax),%rbp
+ mov 0x68(%rax),%rbx
+ mov 0x60(%rax),%r12
+ mov 0x58(%rax),%r13
+ mov 0x50(%rax),%r14
+ mov 0x48(%rax),%r15
+ lea 0x78(%rax),%rax # adjust stack pointer
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_prologue:
+ mov %rax,152($context) # restore context->Rsp
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$`1232/8`,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+
+.section .pdata
+.align 4
+___
+$code.=<<___ if ($ecb);
+ .rva .Lecb_enc_prologue
+ .rva .Lecb_enc_epilogue
+ .rva .Lecb_enc_info
+
+ .rva .Lecb_dec_prologue
+ .rva .Lecb_dec_epilogue
+ .rva .Lecb_dec_info
+___
+$code.=<<___;
+ .rva .Lcbc_dec_prologue
+ .rva .Lcbc_dec_epilogue
+ .rva .Lcbc_dec_info
+
+ .rva .Lctr_enc_prologue
+ .rva .Lctr_enc_epilogue
+ .rva .Lctr_enc_info
+
+ .rva .Lxts_enc_prologue
+ .rva .Lxts_enc_epilogue
+ .rva .Lxts_enc_info
+
+ .rva .Lxts_dec_prologue
+ .rva .Lxts_dec_epilogue
+ .rva .Lxts_dec_info
+
+.section .xdata
+.align 8
+___
+$code.=<<___ if ($ecb);
+.Lecb_enc_info:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lecb_enc_body,.Lecb_enc_epilogue # HandlerData[]
+.Lecb_dec_info:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lecb_dec_body,.Lecb_dec_epilogue # HandlerData[]
+___
+$code.=<<___;
+.Lcbc_dec_info:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lcbc_dec_body,.Lcbc_dec_epilogue # HandlerData[]
+.Lctr_enc_info:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lctr_enc_body,.Lctr_enc_epilogue # HandlerData[]
+.Lxts_enc_info:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lxts_enc_body,.Lxts_enc_epilogue # HandlerData[]
+.Lxts_dec_info:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lxts_dec_body,.Lxts_dec_epilogue # HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/openssl/crypto/aes/asm/vpaes-x86.pl b/openssl/crypto/aes/asm/vpaes-x86.pl
new file mode 100644
index 000000000..84a6f6d33
--- /dev/null
+++ b/openssl/crypto/aes/asm/vpaes-x86.pl
@@ -0,0 +1,901 @@
+#!/usr/bin/env perl
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Port vpaes-x86_64.pl as 32-bit "almost" drop-in replacement for
+# aes-586.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-586.pl column lists large-block CBC
+# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86.pl column - [also
+# large-block CBC] encrypt/decrypt.
+#
+# aes-586.pl vpaes-x86.pl
+#
+# Core 2(**) 29.1/42.3/18.3 22.0/25.6(***)
+# Nehalem 27.9/40.4/18.1 10.3/12.0
+# Atom 102./119./60.1 64.5/85.3(***)
+#
+# (*) "Hyper-threading" in the context refers rather to cache shared
+# among multiple cores, than to specifically Intel HTT. As vast
+# majority of contemporary cores share cache, slower code path
+# is common place. In other words "with-hyper-threading-off"
+# results are presented mostly for reference purposes.
+#
+# (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***) Less impressive improvement on Core 2 and Atom is due to slow
+# pshufb, yet it's respectable +32%/65% improvement on Core 2
+# and +58%/40% on Atom (as implied, over "hyper-threading-safe"
+# code path).
+#
+# <appro@openssl.org>
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"vpaes-x86.pl",$x86only = $ARGV[$#ARGV] eq "386");
+
+$PREFIX="vpaes";
+
+my ($round, $base, $magic, $key, $const, $inp, $out)=
+ ("eax", "ebx", "ecx", "edx","ebp", "esi","edi");
+
+&static_label("_vpaes_consts");
+&static_label("_vpaes_schedule_low_round");
+
+&set_label("_vpaes_consts",64);
+$k_inv=-0x30; # inv, inva
+ &data_word(0x0D080180,0x0E05060F,0x0A0B0C02,0x04070309);
+ &data_word(0x0F0B0780,0x01040A06,0x02050809,0x030D0E0C);
+
+$k_s0F=-0x10; # s0F
+ &data_word(0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F);
+
+$k_ipt=0x00; # input transform (lo, hi)
+ &data_word(0x5A2A7000,0xC2B2E898,0x52227808,0xCABAE090);
+ &data_word(0x317C4D00,0x4C01307D,0xB0FDCC81,0xCD80B1FC);
+
+$k_sb1=0x20; # sb1u, sb1t
+ &data_word(0xCB503E00,0xB19BE18F,0x142AF544,0xA5DF7A6E);
+ &data_word(0xFAE22300,0x3618D415,0x0D2ED9EF,0x3BF7CCC1);
+$k_sb2=0x40; # sb2u, sb2t
+ &data_word(0x0B712400,0xE27A93C6,0xBC982FCD,0x5EB7E955);
+ &data_word(0x0AE12900,0x69EB8840,0xAB82234A,0xC2A163C8);
+$k_sbo=0x60; # sbou, sbot
+ &data_word(0x6FBDC700,0xD0D26D17,0xC502A878,0x15AABF7A);
+ &data_word(0x5FBB6A00,0xCFE474A5,0x412B35FA,0x8E1E90D1);
+
+$k_mc_forward=0x80; # mc_forward
+ &data_word(0x00030201,0x04070605,0x080B0A09,0x0C0F0E0D);
+ &data_word(0x04070605,0x080B0A09,0x0C0F0E0D,0x00030201);
+ &data_word(0x080B0A09,0x0C0F0E0D,0x00030201,0x04070605);
+ &data_word(0x0C0F0E0D,0x00030201,0x04070605,0x080B0A09);
+
+$k_mc_backward=0xc0; # mc_backward
+ &data_word(0x02010003,0x06050407,0x0A09080B,0x0E0D0C0F);
+ &data_word(0x0E0D0C0F,0x02010003,0x06050407,0x0A09080B);
+ &data_word(0x0A09080B,0x0E0D0C0F,0x02010003,0x06050407);
+ &data_word(0x06050407,0x0A09080B,0x0E0D0C0F,0x02010003);
+
+$k_sr=0x100; # sr
+ &data_word(0x03020100,0x07060504,0x0B0A0908,0x0F0E0D0C);
+ &data_word(0x0F0A0500,0x030E0904,0x07020D08,0x0B06010C);
+ &data_word(0x0B020900,0x0F060D04,0x030A0108,0x070E050C);
+ &data_word(0x070A0D00,0x0B0E0104,0x0F020508,0x0306090C);
+
+$k_rcon=0x140; # rcon
+ &data_word(0xAF9DEEB6,0x1F8391B9,0x4D7C7D81,0x702A9808);
+
+$k_s63=0x150; # s63: all equal to 0x63 transformed
+ &data_word(0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B);
+
+$k_opt=0x160; # output transform
+ &data_word(0xD6B66000,0xFF9F4929,0xDEBE6808,0xF7974121);
+ &data_word(0x50BCEC00,0x01EDBD51,0xB05C0CE0,0xE10D5DB1);
+
+$k_deskew=0x180; # deskew tables: inverts the sbox's "skew"
+ &data_word(0x47A4E300,0x07E4A340,0x5DBEF91A,0x1DFEB95A);
+ &data_word(0x83EA6900,0x5F36B5DC,0xF49D1E77,0x2841C2AB);
+##
+## Decryption stuff
+## Key schedule constants
+##
+$k_dksd=0x1a0; # decryption key schedule: invskew x*D
+ &data_word(0xA3E44700,0xFEB91A5D,0x5A1DBEF9,0x0740E3A4);
+ &data_word(0xB5368300,0x41C277F4,0xAB289D1E,0x5FDC69EA);
+$k_dksb=0x1c0; # decryption key schedule: invskew x*B
+ &data_word(0x8550D500,0x9A4FCA1F,0x1CC94C99,0x03D65386);
+ &data_word(0xB6FC4A00,0x115BEDA7,0x7E3482C8,0xD993256F);
+$k_dkse=0x1e0; # decryption key schedule: invskew x*E + 0x63
+ &data_word(0x1FC9D600,0xD5031CCA,0x994F5086,0x53859A4C);
+ &data_word(0x4FDC7BE8,0xA2319605,0x20B31487,0xCD5EF96A);
+$k_dks9=0x200; # decryption key schedule: invskew x*9
+ &data_word(0x7ED9A700,0xB6116FC8,0x82255BFC,0x4AED9334);
+ &data_word(0x27143300,0x45765162,0xE9DAFDCE,0x8BB89FAC);
+
+##
+## Decryption stuff
+## Round function constants
+##
+$k_dipt=0x220; # decryption input transform
+ &data_word(0x0B545F00,0x0F505B04,0x114E451A,0x154A411E);
+ &data_word(0x60056500,0x86E383E6,0xF491F194,0x12771772);
+
+$k_dsb9=0x240; # decryption sbox output *9*u, *9*t
+ &data_word(0x9A86D600,0x851C0353,0x4F994CC9,0xCAD51F50);
+ &data_word(0xECD74900,0xC03B1789,0xB2FBA565,0x725E2C9E);
+$k_dsbd=0x260; # decryption sbox output *D*u, *D*t
+ &data_word(0xE6B1A200,0x7D57CCDF,0x882A4439,0xF56E9B13);
+ &data_word(0x24C6CB00,0x3CE2FAF7,0x15DEEFD3,0x2931180D);
+$k_dsbb=0x280; # decryption sbox output *B*u, *B*t
+ &data_word(0x96B44200,0xD0226492,0xB0F2D404,0x602646F6);
+ &data_word(0xCD596700,0xC19498A6,0x3255AA6B,0xF3FF0C3E);
+$k_dsbe=0x2a0; # decryption sbox output *E*u, *E*t
+ &data_word(0x26D4D000,0x46F29296,0x64B4F6B0,0x22426004);
+ &data_word(0xFFAAC100,0x0C55A6CD,0x98593E32,0x9467F36B);
+$k_dsbo=0x2c0; # decryption sbox final output
+ &data_word(0x7EF94000,0x1387EA53,0xD4943E2D,0xC7AA6DB9);
+ &data_word(0x93441D00,0x12D7560F,0xD8C58E9C,0xCA4B8159);
+&asciz ("Vector Permutation AES for x86/SSSE3, Mike Hamburg (Stanford University)");
+&align (64);
+
+&function_begin_B("_vpaes_preheat");
+ &add ($const,&DWP(0,"esp"));
+ &movdqa ("xmm7",&QWP($k_inv,$const));
+ &movdqa ("xmm6",&QWP($k_s0F,$const));
+ &ret ();
+&function_end_B("_vpaes_preheat");
+
+##
+## _aes_encrypt_core
+##
+## AES-encrypt %xmm0.
+##
+## Inputs:
+## %xmm0 = input
+## %xmm6-%xmm7 as in _vpaes_preheat
+## (%edx) = scheduled keys
+##
+## Output in %xmm0
+## Clobbers %xmm1-%xmm5, %eax, %ebx, %ecx, %edx
+##
+##
+&function_begin_B("_vpaes_encrypt_core");
+ &mov ($magic,16);
+ &mov ($round,&DWP(240,$key));
+ &movdqa ("xmm1","xmm6")
+ &movdqa ("xmm2",&QWP($k_ipt,$const));
+ &pandn ("xmm1","xmm0");
+ &movdqu ("xmm5",&QWP(0,$key));
+ &psrld ("xmm1",4);
+ &pand ("xmm0","xmm6");
+ &pshufb ("xmm2","xmm0");
+ &movdqa ("xmm0",&QWP($k_ipt+16,$const));
+ &pshufb ("xmm0","xmm1");
+ &pxor ("xmm2","xmm5");
+ &pxor ("xmm0","xmm2");
+ &add ($key,16);
+ &lea ($base,&DWP($k_mc_backward,$const));
+ &jmp (&label("enc_entry"));
+
+
+&set_label("enc_loop",16);
+ # middle of middle round
+ &movdqa ("xmm4",&QWP($k_sb1,$const)); # 4 : sb1u
+ &pshufb ("xmm4","xmm2"); # 4 = sb1u
+ &pxor ("xmm4","xmm5"); # 4 = sb1u + k
+ &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t
+ &pshufb ("xmm0","xmm3"); # 0 = sb1t
+ &pxor ("xmm0","xmm4"); # 0 = A
+ &movdqa ("xmm5",&QWP($k_sb2,$const)); # 4 : sb2u
+ &pshufb ("xmm5","xmm2"); # 4 = sb2u
+ &movdqa ("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[]
+ &movdqa ("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2t
+ &pshufb ("xmm2","xmm3"); # 2 = sb2t
+ &pxor ("xmm2","xmm5"); # 2 = 2A
+ &movdqa ("xmm4",&QWP(0,$base,$magic)); # .Lk_mc_backward[]
+ &movdqa ("xmm3","xmm0"); # 3 = A
+ &pshufb ("xmm0","xmm1"); # 0 = B
+ &add ($key,16); # next key
+ &pxor ("xmm0","xmm2"); # 0 = 2A+B
+ &pshufb ("xmm3","xmm4"); # 3 = D
+ &add ($magic,16); # next mc
+ &pxor ("xmm3","xmm0"); # 3 = 2A+B+D
+ &pshufb ("xmm0","xmm1"); # 0 = 2B+C
+ &and ($magic,0x30); # ... mod 4
+ &pxor ("xmm0","xmm3"); # 0 = 2A+3B+C+D
+ &sub ($round,1); # nr--
+
+&set_label("enc_entry");
+ # top of round
+ &movdqa ("xmm1","xmm6"); # 1 : i
+ &pandn ("xmm1","xmm0"); # 1 = i<<4
+ &psrld ("xmm1",4); # 1 = i
+ &pand ("xmm0","xmm6"); # 0 = k
+ &movdqa ("xmm5",&QWP($k_inv+16,$const));# 2 : a/k
+ &pshufb ("xmm5","xmm0"); # 2 = a/k
+ &pxor ("xmm0","xmm1"); # 0 = j
+ &movdqa ("xmm3","xmm7"); # 3 : 1/i
+ &pshufb ("xmm3","xmm1"); # 3 = 1/i
+ &pxor ("xmm3","xmm5"); # 3 = iak = 1/i + a/k
+ &movdqa ("xmm4","xmm7"); # 4 : 1/j
+ &pshufb ("xmm4","xmm0"); # 4 = 1/j
+ &pxor ("xmm4","xmm5"); # 4 = jak = 1/j + a/k
+ &movdqa ("xmm2","xmm7"); # 2 : 1/iak
+ &pshufb ("xmm2","xmm3"); # 2 = 1/iak
+ &pxor ("xmm2","xmm0"); # 2 = io
+ &movdqa ("xmm3","xmm7"); # 3 : 1/jak
+ &movdqu ("xmm5",&QWP(0,$key));
+ &pshufb ("xmm3","xmm4"); # 3 = 1/jak
+ &pxor ("xmm3","xmm1"); # 3 = jo
+ &jnz (&label("enc_loop"));
+
+ # middle of last round
+ &movdqa ("xmm4",&QWP($k_sbo,$const)); # 3 : sbou .Lk_sbo
+ &movdqa ("xmm0",&QWP($k_sbo+16,$const));# 3 : sbot .Lk_sbo+16
+ &pshufb ("xmm4","xmm2"); # 4 = sbou
+ &pxor ("xmm4","xmm5"); # 4 = sb1u + k
+ &pshufb ("xmm0","xmm3"); # 0 = sb1t
+ &movdqa ("xmm1",&QWP(0x40,$base,$magic));# .Lk_sr[]
+ &pxor ("xmm0","xmm4"); # 0 = A
+ &pshufb ("xmm0","xmm1");
+ &ret ();
+&function_end_B("_vpaes_encrypt_core");
+
+##
+## Decryption core
+##
+## Same API as encryption core.
+##
+&function_begin_B("_vpaes_decrypt_core");
+ &mov ($round,&DWP(240,$key));
+ &lea ($base,&DWP($k_dsbd,$const));
+ &movdqa ("xmm1","xmm6");
+ &movdqa ("xmm2",&QWP($k_dipt-$k_dsbd,$base));
+ &pandn ("xmm1","xmm0");
+ &mov ($magic,$round);
+ &psrld ("xmm1",4)
+ &movdqu ("xmm5",&QWP(0,$key));
+ &shl ($magic,4);
+ &pand ("xmm0","xmm6");
+ &pshufb ("xmm2","xmm0");
+ &movdqa ("xmm0",&QWP($k_dipt-$k_dsbd+16,$base));
+ &xor ($magic,0x30);
+ &pshufb ("xmm0","xmm1");
+ &and ($magic,0x30);
+ &pxor ("xmm2","xmm5");
+ &movdqa ("xmm5",&QWP($k_mc_forward+48,$const));
+ &pxor ("xmm0","xmm2");
+ &add ($key,16);
+ &lea ($magic,&DWP($k_sr-$k_dsbd,$base,$magic));
+ &jmp (&label("dec_entry"));
+
+&set_label("dec_loop",16);
+##
+## Inverse mix columns
+##
+ &movdqa ("xmm4",&QWP(-0x20,$base)); # 4 : sb9u
+ &pshufb ("xmm4","xmm2"); # 4 = sb9u
+ &pxor ("xmm4","xmm0");
+ &movdqa ("xmm0",&QWP(-0x10,$base)); # 0 : sb9t
+ &pshufb ("xmm0","xmm3"); # 0 = sb9t
+ &pxor ("xmm0","xmm4"); # 0 = ch
+ &add ($key,16); # next round key
+
+ &pshufb ("xmm0","xmm5"); # MC ch
+ &movdqa ("xmm4",&QWP(0,$base)); # 4 : sbdu
+ &pshufb ("xmm4","xmm2"); # 4 = sbdu
+ &pxor ("xmm4","xmm0"); # 4 = ch
+ &movdqa ("xmm0",&QWP(0x10,$base)); # 0 : sbdt
+ &pshufb ("xmm0","xmm3"); # 0 = sbdt
+ &pxor ("xmm0","xmm4"); # 0 = ch
+ &sub ($round,1); # nr--
+
+ &pshufb ("xmm0","xmm5"); # MC ch
+ &movdqa ("xmm4",&QWP(0x20,$base)); # 4 : sbbu
+ &pshufb ("xmm4","xmm2"); # 4 = sbbu
+ &pxor ("xmm4","xmm0"); # 4 = ch
+ &movdqa ("xmm0",&QWP(0x30,$base)); # 0 : sbbt
+ &pshufb ("xmm0","xmm3"); # 0 = sbbt
+ &pxor ("xmm0","xmm4"); # 0 = ch
+
+ &pshufb ("xmm0","xmm5"); # MC ch
+ &movdqa ("xmm4",&QWP(0x40,$base)); # 4 : sbeu
+ &pshufb ("xmm4","xmm2"); # 4 = sbeu
+ &pxor ("xmm4","xmm0"); # 4 = ch
+ &movdqa ("xmm0",&QWP(0x50,$base)); # 0 : sbet
+ &pshufb ("xmm0","xmm3"); # 0 = sbet
+ &pxor ("xmm0","xmm4"); # 0 = ch
+
+ &palignr("xmm5","xmm5",12);
+
+&set_label("dec_entry");
+ # top of round
+ &movdqa ("xmm1","xmm6"); # 1 : i
+ &pandn ("xmm1","xmm0"); # 1 = i<<4
+ &psrld ("xmm1",4); # 1 = i
+ &pand ("xmm0","xmm6"); # 0 = k
+ &movdqa ("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+ &pshufb ("xmm2","xmm0"); # 2 = a/k
+ &pxor ("xmm0","xmm1"); # 0 = j
+ &movdqa ("xmm3","xmm7"); # 3 : 1/i
+ &pshufb ("xmm3","xmm1"); # 3 = 1/i
+ &pxor ("xmm3","xmm2"); # 3 = iak = 1/i + a/k
+ &movdqa ("xmm4","xmm7"); # 4 : 1/j
+ &pshufb ("xmm4","xmm0"); # 4 = 1/j
+ &pxor ("xmm4","xmm2"); # 4 = jak = 1/j + a/k
+ &movdqa ("xmm2","xmm7"); # 2 : 1/iak
+ &pshufb ("xmm2","xmm3"); # 2 = 1/iak
+ &pxor ("xmm2","xmm0"); # 2 = io
+ &movdqa ("xmm3","xmm7"); # 3 : 1/jak
+ &pshufb ("xmm3","xmm4"); # 3 = 1/jak
+ &pxor ("xmm3","xmm1"); # 3 = jo
+ &movdqu ("xmm0",&QWP(0,$key));
+ &jnz (&label("dec_loop"));
+
+ # middle of last round
+ &movdqa ("xmm4",&QWP(0x60,$base)); # 3 : sbou
+ &pshufb ("xmm4","xmm2"); # 4 = sbou
+ &pxor ("xmm4","xmm0"); # 4 = sb1u + k
+ &movdqa ("xmm0",&QWP(0x70,$base)); # 0 : sbot
+ &movdqa ("xmm2",&QWP(0,$magic));
+ &pshufb ("xmm0","xmm3"); # 0 = sb1t
+ &pxor ("xmm0","xmm4"); # 0 = A
+ &pshufb ("xmm0","xmm2");
+ &ret ();
+&function_end_B("_vpaes_decrypt_core");
+
+########################################################
+## ##
+## AES key schedule ##
+## ##
+########################################################
+&function_begin_B("_vpaes_schedule_core");
+ &add ($const,&DWP(0,"esp"));
+ &movdqu ("xmm0",&QWP(0,$inp)); # load key (unaligned)
+ &movdqa ("xmm2",&QWP($k_rcon,$const)); # load rcon
+
+ # input transform
+ &movdqa ("xmm3","xmm0");
+ &lea ($base,&DWP($k_ipt,$const));
+ &movdqa (&QWP(4,"esp"),"xmm2"); # xmm8
+ &call ("_vpaes_schedule_transform");
+ &movdqa ("xmm7","xmm0");
+
+ &test ($out,$out);
+ &jnz (&label("schedule_am_decrypting"));
+
+ # encrypting, output zeroth round key after transform
+ &movdqu (&QWP(0,$key),"xmm0");
+ &jmp (&label("schedule_go"));
+
+&set_label("schedule_am_decrypting");
+ # decrypting, output zeroth round key after shiftrows
+ &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
+ &pshufb ("xmm3","xmm1");
+ &movdqu (&QWP(0,$key),"xmm3");
+ &xor ($magic,0x30);
+
+&set_label("schedule_go");
+ &cmp ($round,192);
+ &ja (&label("schedule_256"));
+ &je (&label("schedule_192"));
+ # 128: fall though
+
+##
+## .schedule_128
+##
+## 128-bit specific part of key schedule.
+##
+## This schedule is really simple, because all its parts
+## are accomplished by the subroutines.
+##
+&set_label("schedule_128");
+ &mov ($round,10);
+
+&set_label("loop_schedule_128");
+ &call ("_vpaes_schedule_round");
+ &dec ($round);
+ &jz (&label("schedule_mangle_last"));
+ &call ("_vpaes_schedule_mangle"); # write output
+ &jmp (&label("loop_schedule_128"));
+
+##
+## .aes_schedule_192
+##
+## 192-bit specific part of key schedule.
+##
+## The main body of this schedule is the same as the 128-bit
+## schedule, but with more smearing. The long, high side is
+## stored in %xmm7 as before, and the short, low side is in
+## the high bits of %xmm6.
+##
+## This schedule is somewhat nastier, however, because each
+## round produces 192 bits of key material, or 1.5 round keys.
+## Therefore, on each cycle we do 2 rounds and produce 3 round
+## keys.
+##
+&set_label("schedule_192",16);
+ &movdqu ("xmm0",&QWP(8,$inp)); # load key part 2 (very unaligned)
+ &call ("_vpaes_schedule_transform"); # input transform
+ &movdqa ("xmm6","xmm0"); # save short part
+ &pxor ("xmm4","xmm4"); # clear 4
+ &movhlps("xmm6","xmm4"); # clobber low side with zeros
+ &mov ($round,4);
+
+&set_label("loop_schedule_192");
+ &call ("_vpaes_schedule_round");
+ &palignr("xmm0","xmm6",8);
+ &call ("_vpaes_schedule_mangle"); # save key n
+ &call ("_vpaes_schedule_192_smear");
+ &call ("_vpaes_schedule_mangle"); # save key n+1
+ &call ("_vpaes_schedule_round");
+ &dec ($round);
+ &jz (&label("schedule_mangle_last"));
+ &call ("_vpaes_schedule_mangle"); # save key n+2
+ &call ("_vpaes_schedule_192_smear");
+ &jmp (&label("loop_schedule_192"));
+
+##
+## .aes_schedule_256
+##
+## 256-bit specific part of key schedule.
+##
+## The structure here is very similar to the 128-bit
+## schedule, but with an additional "low side" in
+## %xmm6. The low side's rounds are the same as the
+## high side's, except no rcon and no rotation.
+##
+&set_label("schedule_256",16);
+ &movdqu ("xmm0",&QWP(16,$inp)); # load key part 2 (unaligned)
+ &call ("_vpaes_schedule_transform"); # input transform
+ &mov ($round,7);
+
+&set_label("loop_schedule_256");
+ &call ("_vpaes_schedule_mangle"); # output low result
+ &movdqa ("xmm6","xmm0"); # save cur_lo in xmm6
+
+ # high round
+ &call ("_vpaes_schedule_round");
+ &dec ($round);
+ &jz (&label("schedule_mangle_last"));
+ &call ("_vpaes_schedule_mangle");
+
+ # low round. swap xmm7 and xmm6
+ &pshufd ("xmm0","xmm0",0xFF);
+ &movdqa (&QWP(20,"esp"),"xmm7");
+ &movdqa ("xmm7","xmm6");
+ &call ("_vpaes_schedule_low_round");
+ &movdqa ("xmm7",&QWP(20,"esp"));
+
+ &jmp (&label("loop_schedule_256"));
+
+##
+## .aes_schedule_mangle_last
+##
+## Mangler for last round of key schedule
+## Mangles %xmm0
+## when encrypting, outputs out(%xmm0) ^ 63
+## when decrypting, outputs unskew(%xmm0)
+##
+## Always called right before return... jumps to cleanup and exits
+##
+&set_label("schedule_mangle_last",16);
+ # schedule last round key from xmm0
+ &lea ($base,&DWP($k_deskew,$const));
+ &test ($out,$out);
+ &jnz (&label("schedule_mangle_last_dec"));
+
+ # encrypting
+ &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
+ &pshufb ("xmm0","xmm1"); # output permute
+ &lea ($base,&DWP($k_opt,$const)); # prepare to output transform
+ &add ($key,32);
+
+&set_label("schedule_mangle_last_dec");
+ &add ($key,-16);
+ &pxor ("xmm0",&QWP($k_s63,$const));
+ &call ("_vpaes_schedule_transform"); # output transform
+ &movdqu (&QWP(0,$key),"xmm0"); # save last key
+
+ # cleanup
+ &pxor ("xmm0","xmm0");
+ &pxor ("xmm1","xmm1");
+ &pxor ("xmm2","xmm2");
+ &pxor ("xmm3","xmm3");
+ &pxor ("xmm4","xmm4");
+ &pxor ("xmm5","xmm5");
+ &pxor ("xmm6","xmm6");
+ &pxor ("xmm7","xmm7");
+ &ret ();
+&function_end_B("_vpaes_schedule_core");
+
+##
+## .aes_schedule_192_smear
+##
+## Smear the short, low side in the 192-bit key schedule.
+##
+## Inputs:
+## %xmm7: high side, b a x y
+## %xmm6: low side, d c 0 0
+## %xmm13: 0
+##
+## Outputs:
+## %xmm6: b+c+d b+c 0 0
+## %xmm0: b+c+d b+c b a
+##
+&function_begin_B("_vpaes_schedule_192_smear");
+ &pshufd ("xmm0","xmm6",0x80); # d c 0 0 -> c 0 0 0
+ &pxor ("xmm6","xmm0"); # -> c+d c 0 0
+ &pshufd ("xmm0","xmm7",0xFE); # b a _ _ -> b b b a
+ &pxor ("xmm6","xmm0"); # -> b+c+d b+c b a
+ &movdqa ("xmm0","xmm6");
+ &pxor ("xmm1","xmm1");
+ &movhlps("xmm6","xmm1"); # clobber low side with zeros
+ &ret ();
+&function_end_B("_vpaes_schedule_192_smear");
+
+##
+## .aes_schedule_round
+##
+## Runs one main round of the key schedule on %xmm0, %xmm7
+##
+## Specifically, runs subbytes on the high dword of %xmm0
+## then rotates it by one byte and xors into the low dword of
+## %xmm7.
+##
+## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+## next rcon.
+##
+## Smears the dwords of %xmm7 by xoring the low into the
+## second low, result into third, result into highest.
+##
+## Returns results in %xmm7 = %xmm0.
+## Clobbers %xmm1-%xmm5.
+##
+&function_begin_B("_vpaes_schedule_round");
+ # extract rcon from xmm8
+ &movdqa ("xmm2",&QWP(8,"esp")); # xmm8
+ &pxor ("xmm1","xmm1");
+ &palignr("xmm1","xmm2",15);
+ &palignr("xmm2","xmm2",15);
+ &pxor ("xmm7","xmm1");
+
+ # rotate
+ &pshufd ("xmm0","xmm0",0xFF);
+ &palignr("xmm0","xmm0",1);
+
+ # fall through...
+ &movdqa (&QWP(8,"esp"),"xmm2"); # xmm8
+
+ # low round: same as high round, but no rotation and no rcon.
+&set_label("_vpaes_schedule_low_round");
+ # smear xmm7
+ &movdqa ("xmm1","xmm7");
+ &pslldq ("xmm7",4);
+ &pxor ("xmm7","xmm1");
+ &movdqa ("xmm1","xmm7");
+ &pslldq ("xmm7",8);
+ &pxor ("xmm7","xmm1");
+ &pxor ("xmm7",&QWP($k_s63,$const));
+
+ # subbyte
+ &movdqa ("xmm4",&QWP($k_s0F,$const));
+ &movdqa ("xmm5",&QWP($k_inv,$const)); # 4 : 1/j
+ &movdqa ("xmm1","xmm4");
+ &pandn ("xmm1","xmm0");
+ &psrld ("xmm1",4); # 1 = i
+ &pand ("xmm0","xmm4"); # 0 = k
+ &movdqa ("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+ &pshufb ("xmm2","xmm0"); # 2 = a/k
+ &pxor ("xmm0","xmm1"); # 0 = j
+ &movdqa ("xmm3","xmm5"); # 3 : 1/i
+ &pshufb ("xmm3","xmm1"); # 3 = 1/i
+ &pxor ("xmm3","xmm2"); # 3 = iak = 1/i + a/k
+ &movdqa ("xmm4","xmm5"); # 4 : 1/j
+ &pshufb ("xmm4","xmm0"); # 4 = 1/j
+ &pxor ("xmm4","xmm2"); # 4 = jak = 1/j + a/k
+ &movdqa ("xmm2","xmm5"); # 2 : 1/iak
+ &pshufb ("xmm2","xmm3"); # 2 = 1/iak
+ &pxor ("xmm2","xmm0"); # 2 = io
+ &movdqa ("xmm3","xmm5"); # 3 : 1/jak
+ &pshufb ("xmm3","xmm4"); # 3 = 1/jak
+ &pxor ("xmm3","xmm1"); # 3 = jo
+ &movdqa ("xmm4",&QWP($k_sb1,$const)); # 4 : sbou
+ &pshufb ("xmm4","xmm2"); # 4 = sbou
+ &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sbot
+ &pshufb ("xmm0","xmm3"); # 0 = sb1t
+ &pxor ("xmm0","xmm4"); # 0 = sbox output
+
+ # add in smeared stuff
+ &pxor ("xmm0","xmm7");
+ &movdqa ("xmm7","xmm0");
+ &ret ();
+&function_end_B("_vpaes_schedule_round");
+
+##
+## .aes_schedule_transform
+##
+## Linear-transform %xmm0 according to tables at (%ebx)
+##
+## Output in %xmm0
+## Clobbers %xmm1, %xmm2
+##
+&function_begin_B("_vpaes_schedule_transform");
+ &movdqa ("xmm2",&QWP($k_s0F,$const));
+ &movdqa ("xmm1","xmm2");
+ &pandn ("xmm1","xmm0");
+ &psrld ("xmm1",4);
+ &pand ("xmm0","xmm2");
+ &movdqa ("xmm2",&QWP(0,$base));
+ &pshufb ("xmm2","xmm0");
+ &movdqa ("xmm0",&QWP(16,$base));
+ &pshufb ("xmm0","xmm1");
+ &pxor ("xmm0","xmm2");
+ &ret ();
+&function_end_B("_vpaes_schedule_transform");
+
+##
+## .aes_schedule_mangle
+##
+## Mangle xmm0 from (basis-transformed) standard version
+## to our version.
+##
+## On encrypt,
+## xor with 0x63
+## multiply by circulant 0,1,1,1
+## apply shiftrows transform
+##
+## On decrypt,
+## xor with 0x63
+## multiply by "inverse mixcolumns" circulant E,B,D,9
+## deskew
+## apply shiftrows transform
+##
+##
+## Writes out to (%edx), and increments or decrements it
+## Keeps track of round number mod 4 in %ecx
+## Preserves xmm0
+## Clobbers xmm1-xmm5
+##
+&function_begin_B("_vpaes_schedule_mangle");
+ &movdqa ("xmm4","xmm0"); # save xmm0 for later
+ &movdqa ("xmm5",&QWP($k_mc_forward,$const));
+ &test ($out,$out);
+ &jnz (&label("schedule_mangle_dec"));
+
+ # encrypting
+ &add ($key,16);
+ &pxor ("xmm4",&QWP($k_s63,$const));
+ &pshufb ("xmm4","xmm5");
+ &movdqa ("xmm3","xmm4");
+ &pshufb ("xmm4","xmm5");
+ &pxor ("xmm3","xmm4");
+ &pshufb ("xmm4","xmm5");
+ &pxor ("xmm3","xmm4");
+
+ &jmp (&label("schedule_mangle_both"));
+
+&set_label("schedule_mangle_dec",16);
+ # inverse mix columns
+ &movdqa ("xmm2",&QWP($k_s0F,$const));
+ &lea ($inp,&DWP($k_dksd,$const));
+ &movdqa ("xmm1","xmm2");
+ &pandn ("xmm1","xmm4");
+ &psrld ("xmm1",4); # 1 = hi
+ &pand ("xmm4","xmm2"); # 4 = lo
+
+ &movdqa ("xmm2",&QWP(0,$inp));
+ &pshufb ("xmm2","xmm4");
+ &movdqa ("xmm3",&QWP(0x10,$inp));
+ &pshufb ("xmm3","xmm1");
+ &pxor ("xmm3","xmm2");
+ &pshufb ("xmm3","xmm5");
+
+ &movdqa ("xmm2",&QWP(0x20,$inp));
+ &pshufb ("xmm2","xmm4");
+ &pxor ("xmm2","xmm3");
+ &movdqa ("xmm3",&QWP(0x30,$inp));
+ &pshufb ("xmm3","xmm1");
+ &pxor ("xmm3","xmm2");
+ &pshufb ("xmm3","xmm5");
+
+ &movdqa ("xmm2",&QWP(0x40,$inp));
+ &pshufb ("xmm2","xmm4");
+ &pxor ("xmm2","xmm3");
+ &movdqa ("xmm3",&QWP(0x50,$inp));
+ &pshufb ("xmm3","xmm1");
+ &pxor ("xmm3","xmm2");
+ &pshufb ("xmm3","xmm5");
+
+ &movdqa ("xmm2",&QWP(0x60,$inp));
+ &pshufb ("xmm2","xmm4");
+ &pxor ("xmm2","xmm3");
+ &movdqa ("xmm3",&QWP(0x70,$inp));
+ &pshufb ("xmm3","xmm1");
+ &pxor ("xmm3","xmm2");
+
+ &add ($key,-16);
+
+&set_label("schedule_mangle_both");
+ &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
+ &pshufb ("xmm3","xmm1");
+ &add ($magic,-16);
+ &and ($magic,0x30);
+ &movdqu (&QWP(0,$key),"xmm3");
+ &ret ();
+&function_end_B("_vpaes_schedule_mangle");
+
+#
+# Interface to OpenSSL
+#
+&function_begin("${PREFIX}_set_encrypt_key");
+ &mov ($inp,&wparam(0)); # inp
+ &lea ($base,&DWP(-56,"esp"));
+ &mov ($round,&wparam(1)); # bits
+ &and ($base,-16);
+ &mov ($key,&wparam(2)); # key
+ &xchg ($base,"esp"); # alloca
+ &mov (&DWP(48,"esp"),$base);
+
+ &mov ($base,$round);
+ &shr ($base,5);
+ &add ($base,5);
+ &mov (&DWP(240,$key),$base); # AES_KEY->rounds = nbits/32+5;
+ &mov ($magic,0x30);
+ &mov ($out,0);
+
+ &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+ &call ("_vpaes_schedule_core");
+&set_label("pic_point");
+
+ &mov ("esp",&DWP(48,"esp"));
+ &xor ("eax","eax");
+&function_end("${PREFIX}_set_encrypt_key");
+
+&function_begin("${PREFIX}_set_decrypt_key");
+ &mov ($inp,&wparam(0)); # inp
+ &lea ($base,&DWP(-56,"esp"));
+ &mov ($round,&wparam(1)); # bits
+ &and ($base,-16);
+ &mov ($key,&wparam(2)); # key
+ &xchg ($base,"esp"); # alloca
+ &mov (&DWP(48,"esp"),$base);
+
+ &mov ($base,$round);
+ &shr ($base,5);
+ &add ($base,5);
+ &mov (&DWP(240,$key),$base); # AES_KEY->rounds = nbits/32+5;
+ &shl ($base,4);
+ &lea ($key,&DWP(16,$key,$base));
+
+ &mov ($out,1);
+ &mov ($magic,$round);
+ &shr ($magic,1);
+ &and ($magic,32);
+ &xor ($magic,32); # nbist==192?0:32;
+
+ &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+ &call ("_vpaes_schedule_core");
+&set_label("pic_point");
+
+ &mov ("esp",&DWP(48,"esp"));
+ &xor ("eax","eax");
+&function_end("${PREFIX}_set_decrypt_key");
+
+&function_begin("${PREFIX}_encrypt");
+ &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+ &call ("_vpaes_preheat");
+&set_label("pic_point");
+ &mov ($inp,&wparam(0)); # inp
+ &lea ($base,&DWP(-56,"esp"));
+ &mov ($out,&wparam(1)); # out
+ &and ($base,-16);
+ &mov ($key,&wparam(2)); # key
+ &xchg ($base,"esp"); # alloca
+ &mov (&DWP(48,"esp"),$base);
+
+ &movdqu ("xmm0",&QWP(0,$inp));
+ &call ("_vpaes_encrypt_core");
+ &movdqu (&QWP(0,$out),"xmm0");
+
+ &mov ("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_encrypt");
+
+&function_begin("${PREFIX}_decrypt");
+ &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+ &call ("_vpaes_preheat");
+&set_label("pic_point");
+ &mov ($inp,&wparam(0)); # inp
+ &lea ($base,&DWP(-56,"esp"));
+ &mov ($out,&wparam(1)); # out
+ &and ($base,-16);
+ &mov ($key,&wparam(2)); # key
+ &xchg ($base,"esp"); # alloca
+ &mov (&DWP(48,"esp"),$base);
+
+ &movdqu ("xmm0",&QWP(0,$inp));
+ &call ("_vpaes_decrypt_core");
+ &movdqu (&QWP(0,$out),"xmm0");
+
+ &mov ("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_decrypt");
+
+&function_begin("${PREFIX}_cbc_encrypt");
+ &mov ($inp,&wparam(0)); # inp
+ &mov ($out,&wparam(1)); # out
+ &mov ($round,&wparam(2)); # len
+ &mov ($key,&wparam(3)); # key
+ &lea ($base,&DWP(-56,"esp"));
+ &mov ($const,&wparam(4)); # ivp
+ &and ($base,-16);
+ &mov ($magic,&wparam(5)); # enc
+ &xchg ($base,"esp"); # alloca
+ &movdqu ("xmm1",&QWP(0,$const)); # load IV
+ &sub ($out,$inp);
+ &mov (&DWP(48,"esp"),$base);
+
+ &mov (&DWP(0,"esp"),$out); # save out
+ &sub ($round,16);
+ &mov (&DWP(4,"esp"),$key) # save key
+ &mov (&DWP(8,"esp"),$const); # save ivp
+ &mov ($out,$round); # $out works as $len
+
+ &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+ &call ("_vpaes_preheat");
+&set_label("pic_point");
+ &cmp ($magic,0);
+ &je (&label("cbc_dec_loop"));
+ &jmp (&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+ &movdqu ("xmm0",&QWP(0,$inp)); # load input
+ &pxor ("xmm0","xmm1"); # inp^=iv
+ &call ("_vpaes_encrypt_core");
+ &mov ($base,&DWP(0,"esp")); # restore out
+ &mov ($key,&DWP(4,"esp")); # restore key
+ &movdqa ("xmm1","xmm0");
+ &movdqu (&QWP(0,$base,$inp),"xmm0"); # write output
+ &lea ($inp,&DWP(16,$inp));
+ &sub ($out,16);
+ &jnc (&label("cbc_enc_loop"));
+ &jmp (&label("cbc_done"));
+
+&set_label("cbc_dec_loop",16);
+ &movdqu ("xmm0",&QWP(0,$inp)); # load input
+ &movdqa (&QWP(16,"esp"),"xmm1"); # save IV
+ &movdqa (&QWP(32,"esp"),"xmm0"); # save future IV
+ &call ("_vpaes_decrypt_core");
+ &mov ($base,&DWP(0,"esp")); # restore out
+ &mov ($key,&DWP(4,"esp")); # restore key
+ &pxor ("xmm0",&QWP(16,"esp")); # out^=iv
+ &movdqa ("xmm1",&QWP(32,"esp")); # load next IV
+ &movdqu (&QWP(0,$base,$inp),"xmm0"); # write output
+ &lea ($inp,&DWP(16,$inp));
+ &sub ($out,16);
+ &jnc (&label("cbc_dec_loop"));
+
+&set_label("cbc_done");
+ &mov ($base,&DWP(8,"esp")); # restore ivp
+ &mov ("esp",&DWP(48,"esp"));
+ &movdqu (&QWP(0,$base),"xmm1"); # write IV
+&function_end("${PREFIX}_cbc_encrypt");
+
+&asm_finish();
diff --git a/openssl/crypto/aes/asm/vpaes-x86_64.pl b/openssl/crypto/aes/asm/vpaes-x86_64.pl
new file mode 100644
index 000000000..025470223
--- /dev/null
+++ b/openssl/crypto/aes/asm/vpaes-x86_64.pl
@@ -0,0 +1,1204 @@
+#!/usr/bin/env perl
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Interface to OpenSSL as "almost" drop-in replacement for
+# aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-x86_64.pl column lists large-block CBC
+# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86_64.pl column -
+# [also large-block CBC] encrypt/decrypt.
+#
+# aes-x86_64.pl vpaes-x86_64.pl
+#
+# Core 2(**) 30.5/43.7/14.3 21.8/25.7(***)
+# Nehalem 30.5/42.2/14.6 9.8/11.8
+# Atom 63.9/79.0/32.1 64.0/84.8(***)
+#
+# (*) "Hyper-threading" in the context refers rather to cache shared
+# among multiple cores, than to specifically Intel HTT. As vast
+# majority of contemporary cores share cache, slower code path
+# is common place. In other words "with-hyper-threading-off"
+# results are presented mostly for reference purposes.
+#
+# (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***) Less impressive improvement on Core 2 and Atom is due to slow
+# pshufb, yet it's respectable +40%/78% improvement on Core 2
+# (as implied, over "hyper-threading-safe" code path).
+#
+# <appro@openssl.org>
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$PREFIX="vpaes";
+
+$code.=<<___;
+.text
+
+##
+## _aes_encrypt_core
+##
+## AES-encrypt %xmm0.
+##
+## Inputs:
+## %xmm0 = input
+## %xmm9-%xmm15 as in _vpaes_preheat
+## (%rdx) = scheduled keys
+##
+## Output in %xmm0
+## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
+## Preserves %xmm6 - %xmm8 so you get some local vectors
+##
+##
+.type _vpaes_encrypt_core,\@abi-omnipotent
+.align 16
+_vpaes_encrypt_core:
+ mov %rdx, %r9
+ mov \$16, %r11
+ mov 240(%rdx),%eax
+ movdqa %xmm9, %xmm1
+ movdqa .Lk_ipt(%rip), %xmm2 # iptlo
+ pandn %xmm0, %xmm1
+ movdqu (%r9), %xmm5 # round0 key
+ psrld \$4, %xmm1
+ pand %xmm9, %xmm0
+ pshufb %xmm0, %xmm2
+ movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi
+ pshufb %xmm1, %xmm0
+ pxor %xmm5, %xmm2
+ pxor %xmm2, %xmm0
+ add \$16, %r9
+ lea .Lk_mc_backward(%rip),%r10
+ jmp .Lenc_entry
+
+.align 16
+.Lenc_loop:
+ # middle of middle round
+ movdqa %xmm13, %xmm4 # 4 : sb1u
+ pshufb %xmm2, %xmm4 # 4 = sb1u
+ pxor %xmm5, %xmm4 # 4 = sb1u + k
+ movdqa %xmm12, %xmm0 # 0 : sb1t
+ pshufb %xmm3, %xmm0 # 0 = sb1t
+ pxor %xmm4, %xmm0 # 0 = A
+ movdqa %xmm15, %xmm5 # 4 : sb2u
+ pshufb %xmm2, %xmm5 # 4 = sb2u
+ movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
+ movdqa %xmm14, %xmm2 # 2 : sb2t
+ pshufb %xmm3, %xmm2 # 2 = sb2t
+ pxor %xmm5, %xmm2 # 2 = 2A
+ movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
+ movdqa %xmm0, %xmm3 # 3 = A
+ pshufb %xmm1, %xmm0 # 0 = B
+ add \$16, %r9 # next key
+ pxor %xmm2, %xmm0 # 0 = 2A+B
+ pshufb %xmm4, %xmm3 # 3 = D
+ add \$16, %r11 # next mc
+ pxor %xmm0, %xmm3 # 3 = 2A+B+D
+ pshufb %xmm1, %xmm0 # 0 = 2B+C
+ and \$0x30, %r11 # ... mod 4
+ pxor %xmm3, %xmm0 # 0 = 2A+3B+C+D
+ sub \$1,%rax # nr--
+
+.Lenc_entry:
+ # top of round
+ movdqa %xmm9, %xmm1 # 1 : i
+ pandn %xmm0, %xmm1 # 1 = i<<4
+ psrld \$4, %xmm1 # 1 = i
+ pand %xmm9, %xmm0 # 0 = k
+ movdqa %xmm11, %xmm5 # 2 : a/k
+ pshufb %xmm0, %xmm5 # 2 = a/k
+ pxor %xmm1, %xmm0 # 0 = j
+ movdqa %xmm10, %xmm3 # 3 : 1/i
+ pshufb %xmm1, %xmm3 # 3 = 1/i
+ pxor %xmm5, %xmm3 # 3 = iak = 1/i + a/k
+ movdqa %xmm10, %xmm4 # 4 : 1/j
+ pshufb %xmm0, %xmm4 # 4 = 1/j
+ pxor %xmm5, %xmm4 # 4 = jak = 1/j + a/k
+ movdqa %xmm10, %xmm2 # 2 : 1/iak
+ pshufb %xmm3, %xmm2 # 2 = 1/iak
+ pxor %xmm0, %xmm2 # 2 = io
+ movdqa %xmm10, %xmm3 # 3 : 1/jak
+ movdqu (%r9), %xmm5
+ pshufb %xmm4, %xmm3 # 3 = 1/jak
+ pxor %xmm1, %xmm3 # 3 = jo
+ jnz .Lenc_loop
+
+ # middle of last round
+ movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
+ movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
+ pshufb %xmm2, %xmm4 # 4 = sbou
+ pxor %xmm5, %xmm4 # 4 = sb1u + k
+ pshufb %xmm3, %xmm0 # 0 = sb1t
+ movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
+ pxor %xmm4, %xmm0 # 0 = A
+ pshufb %xmm1, %xmm0
+ ret
+.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
+
+##
+## Decryption core
+##
+## Same API as encryption core.
+##
+.type _vpaes_decrypt_core,\@abi-omnipotent
+.align 16
+_vpaes_decrypt_core:
+ mov %rdx, %r9 # load key
+ mov 240(%rdx),%eax
+ movdqa %xmm9, %xmm1
+ movdqa .Lk_dipt(%rip), %xmm2 # iptlo
+ pandn %xmm0, %xmm1
+ mov %rax, %r11
+ psrld \$4, %xmm1
+ movdqu (%r9), %xmm5 # round0 key
+ shl \$4, %r11
+ pand %xmm9, %xmm0
+ pshufb %xmm0, %xmm2
+ movdqa .Lk_dipt+16(%rip), %xmm0 # ipthi
+ xor \$0x30, %r11
+ lea .Lk_dsbd(%rip),%r10
+ pshufb %xmm1, %xmm0
+ and \$0x30, %r11
+ pxor %xmm5, %xmm2
+ movdqa .Lk_mc_forward+48(%rip), %xmm5
+ pxor %xmm2, %xmm0
+ add \$16, %r9
+ add %r10, %r11
+ jmp .Ldec_entry
+
+.align 16
+.Ldec_loop:
+##
+## Inverse mix columns
+##
+ movdqa -0x20(%r10),%xmm4 # 4 : sb9u
+ pshufb %xmm2, %xmm4 # 4 = sb9u
+ pxor %xmm0, %xmm4
+ movdqa -0x10(%r10),%xmm0 # 0 : sb9t
+ pshufb %xmm3, %xmm0 # 0 = sb9t
+ pxor %xmm4, %xmm0 # 0 = ch
+ add \$16, %r9 # next round key
+
+ pshufb %xmm5, %xmm0 # MC ch
+ movdqa 0x00(%r10),%xmm4 # 4 : sbdu
+ pshufb %xmm2, %xmm4 # 4 = sbdu
+ pxor %xmm0, %xmm4 # 4 = ch
+ movdqa 0x10(%r10),%xmm0 # 0 : sbdt
+ pshufb %xmm3, %xmm0 # 0 = sbdt
+ pxor %xmm4, %xmm0 # 0 = ch
+ sub \$1,%rax # nr--
+
+ pshufb %xmm5, %xmm0 # MC ch
+ movdqa 0x20(%r10),%xmm4 # 4 : sbbu
+ pshufb %xmm2, %xmm4 # 4 = sbbu
+ pxor %xmm0, %xmm4 # 4 = ch
+ movdqa 0x30(%r10),%xmm0 # 0 : sbbt
+ pshufb %xmm3, %xmm0 # 0 = sbbt
+ pxor %xmm4, %xmm0 # 0 = ch
+
+ pshufb %xmm5, %xmm0 # MC ch
+ movdqa 0x40(%r10),%xmm4 # 4 : sbeu
+ pshufb %xmm2, %xmm4 # 4 = sbeu
+ pxor %xmm0, %xmm4 # 4 = ch
+ movdqa 0x50(%r10),%xmm0 # 0 : sbet
+ pshufb %xmm3, %xmm0 # 0 = sbet
+ pxor %xmm4, %xmm0 # 0 = ch
+
+ palignr \$12, %xmm5, %xmm5
+
+.Ldec_entry:
+ # top of round
+ movdqa %xmm9, %xmm1 # 1 : i
+ pandn %xmm0, %xmm1 # 1 = i<<4
+ psrld \$4, %xmm1 # 1 = i
+ pand %xmm9, %xmm0 # 0 = k
+ movdqa %xmm11, %xmm2 # 2 : a/k
+ pshufb %xmm0, %xmm2 # 2 = a/k
+ pxor %xmm1, %xmm0 # 0 = j
+ movdqa %xmm10, %xmm3 # 3 : 1/i
+ pshufb %xmm1, %xmm3 # 3 = 1/i
+ pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
+ movdqa %xmm10, %xmm4 # 4 : 1/j
+ pshufb %xmm0, %xmm4 # 4 = 1/j
+ pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
+ movdqa %xmm10, %xmm2 # 2 : 1/iak
+ pshufb %xmm3, %xmm2 # 2 = 1/iak
+ pxor %xmm0, %xmm2 # 2 = io
+ movdqa %xmm10, %xmm3 # 3 : 1/jak
+ pshufb %xmm4, %xmm3 # 3 = 1/jak
+ pxor %xmm1, %xmm3 # 3 = jo
+ movdqu (%r9), %xmm0
+ jnz .Ldec_loop
+
+ # middle of last round
+ movdqa 0x60(%r10), %xmm4 # 3 : sbou
+ pshufb %xmm2, %xmm4 # 4 = sbou
+ pxor %xmm0, %xmm4 # 4 = sb1u + k
+ movdqa 0x70(%r10), %xmm0 # 0 : sbot
+ movdqa .Lk_sr-.Lk_dsbd(%r11), %xmm2
+ pshufb %xmm3, %xmm0 # 0 = sb1t
+ pxor %xmm4, %xmm0 # 0 = A
+ pshufb %xmm2, %xmm0
+ ret
+.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
+
+########################################################
+## ##
+## AES key schedule ##
+## ##
+########################################################
+.type _vpaes_schedule_core,\@abi-omnipotent
+.align 16
+_vpaes_schedule_core:
+ # rdi = key
+ # rsi = size in bits
+ # rdx = buffer
+ # rcx = direction. 0=encrypt, 1=decrypt
+
+ call _vpaes_preheat # load the tables
+ movdqa .Lk_rcon(%rip), %xmm8 # load rcon
+ movdqu (%rdi), %xmm0 # load key (unaligned)
+
+ # input transform
+ movdqa %xmm0, %xmm3
+ lea .Lk_ipt(%rip), %r11
+ call _vpaes_schedule_transform
+ movdqa %xmm0, %xmm7
+
+ lea .Lk_sr(%rip),%r10
+ test %rcx, %rcx
+ jnz .Lschedule_am_decrypting
+
+ # encrypting, output zeroth round key after transform
+ movdqu %xmm0, (%rdx)
+ jmp .Lschedule_go
+
+.Lschedule_am_decrypting:
+ # decrypting, output zeroth round key after shiftrows
+ movdqa (%r8,%r10),%xmm1
+ pshufb %xmm1, %xmm3
+ movdqu %xmm3, (%rdx)
+ xor \$0x30, %r8
+
+.Lschedule_go:
+ cmp \$192, %esi
+ ja .Lschedule_256
+ je .Lschedule_192
+ # 128: fall though
+
+##
+## .schedule_128
+##
+## 128-bit specific part of key schedule.
+##
+## This schedule is really simple, because all its parts
+## are accomplished by the subroutines.
+##
+.Lschedule_128:
+ mov \$10, %esi
+
+.Loop_schedule_128:
+ call _vpaes_schedule_round
+ dec %rsi
+ jz .Lschedule_mangle_last
+ call _vpaes_schedule_mangle # write output
+ jmp .Loop_schedule_128
+
+##
+## .aes_schedule_192
+##
+## 192-bit specific part of key schedule.
+##
+## The main body of this schedule is the same as the 128-bit
+## schedule, but with more smearing. The long, high side is
+## stored in %xmm7 as before, and the short, low side is in
+## the high bits of %xmm6.
+##
+## This schedule is somewhat nastier, however, because each
+## round produces 192 bits of key material, or 1.5 round keys.
+## Therefore, on each cycle we do 2 rounds and produce 3 round
+## keys.
+##
+.align 16
+.Lschedule_192:
+ movdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
+ call _vpaes_schedule_transform # input transform
+ movdqa %xmm0, %xmm6 # save short part
+ pxor %xmm4, %xmm4 # clear 4
+ movhlps %xmm4, %xmm6 # clobber low side with zeros
+ mov \$4, %esi
+
+.Loop_schedule_192:
+ call _vpaes_schedule_round
+ palignr \$8,%xmm6,%xmm0
+ call _vpaes_schedule_mangle # save key n
+ call _vpaes_schedule_192_smear
+ call _vpaes_schedule_mangle # save key n+1
+ call _vpaes_schedule_round
+ dec %rsi
+ jz .Lschedule_mangle_last
+ call _vpaes_schedule_mangle # save key n+2
+ call _vpaes_schedule_192_smear
+ jmp .Loop_schedule_192
+
+##
+## .aes_schedule_256
+##
+## 256-bit specific part of key schedule.
+##
+## The structure here is very similar to the 128-bit
+## schedule, but with an additional "low side" in
+## %xmm6. The low side's rounds are the same as the
+## high side's, except no rcon and no rotation.
+##
+.align 16
+.Lschedule_256:
+ movdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
+ call _vpaes_schedule_transform # input transform
+ mov \$7, %esi
+
+.Loop_schedule_256:
+ call _vpaes_schedule_mangle # output low result
+ movdqa %xmm0, %xmm6 # save cur_lo in xmm6
+
+ # high round
+ call _vpaes_schedule_round
+ dec %rsi
+ jz .Lschedule_mangle_last
+ call _vpaes_schedule_mangle
+
+ # low round. swap xmm7 and xmm6
+ pshufd \$0xFF, %xmm0, %xmm0
+ movdqa %xmm7, %xmm5
+ movdqa %xmm6, %xmm7
+ call _vpaes_schedule_low_round
+ movdqa %xmm5, %xmm7
+
+ jmp .Loop_schedule_256
+
+
+##
+## .aes_schedule_mangle_last
+##
+## Mangler for last round of key schedule
+## Mangles %xmm0
+## when encrypting, outputs out(%xmm0) ^ 63
+## when decrypting, outputs unskew(%xmm0)
+##
+## Always called right before return... jumps to cleanup and exits
+##
+.align 16
+.Lschedule_mangle_last:
+ # schedule last round key from xmm0
+ lea .Lk_deskew(%rip),%r11 # prepare to deskew
+ test %rcx, %rcx
+ jnz .Lschedule_mangle_last_dec
+
+ # encrypting
+ movdqa (%r8,%r10),%xmm1
+ pshufb %xmm1, %xmm0 # output permute
+ lea .Lk_opt(%rip), %r11 # prepare to output transform
+ add \$32, %rdx
+
+.Lschedule_mangle_last_dec:
+ add \$-16, %rdx
+ pxor .Lk_s63(%rip), %xmm0
+ call _vpaes_schedule_transform # output transform
+ movdqu %xmm0, (%rdx) # save last key
+
+ # cleanup
+ pxor %xmm0, %xmm0
+ pxor %xmm1, %xmm1
+ pxor %xmm2, %xmm2
+ pxor %xmm3, %xmm3
+ pxor %xmm4, %xmm4
+ pxor %xmm5, %xmm5
+ pxor %xmm6, %xmm6
+ pxor %xmm7, %xmm7
+ ret
+.size _vpaes_schedule_core,.-_vpaes_schedule_core
+
+##
+## .aes_schedule_192_smear
+##
+## Smear the short, low side in the 192-bit key schedule.
+##
+## Inputs:
+## %xmm7: high side, b a x y
+## %xmm6: low side, d c 0 0
+## %xmm13: 0
+##
+## Outputs:
+## %xmm6: b+c+d b+c 0 0
+## %xmm0: b+c+d b+c b a
+##
+.type _vpaes_schedule_192_smear,\@abi-omnipotent
+.align 16
+_vpaes_schedule_192_smear:
+ pshufd \$0x80, %xmm6, %xmm0 # d c 0 0 -> c 0 0 0
+ pxor %xmm0, %xmm6 # -> c+d c 0 0
+ pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
+ pxor %xmm0, %xmm6 # -> b+c+d b+c b a
+ movdqa %xmm6, %xmm0
+ pxor %xmm1, %xmm1
+ movhlps %xmm1, %xmm6 # clobber low side with zeros
+ ret
+.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
+
+##
+## .aes_schedule_round
+##
+## Runs one main round of the key schedule on %xmm0, %xmm7
+##
+## Specifically, runs subbytes on the high dword of %xmm0
+## then rotates it by one byte and xors into the low dword of
+## %xmm7.
+##
+## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+## next rcon.
+##
+## Smears the dwords of %xmm7 by xoring the low into the
+## second low, result into third, result into highest.
+##
+## Returns results in %xmm7 = %xmm0.
+## Clobbers %xmm1-%xmm4, %r11.
+##
+.type _vpaes_schedule_round,\@abi-omnipotent
+.align 16
+_vpaes_schedule_round:
+ # extract rcon from xmm8
+ pxor %xmm1, %xmm1
+ palignr \$15, %xmm8, %xmm1
+ palignr \$15, %xmm8, %xmm8
+ pxor %xmm1, %xmm7
+
+ # rotate
+ pshufd \$0xFF, %xmm0, %xmm0
+ palignr \$1, %xmm0, %xmm0
+
+ # fall through...
+
+ # low round: same as high round, but no rotation and no rcon.
+_vpaes_schedule_low_round:
+ # smear xmm7
+ movdqa %xmm7, %xmm1
+ pslldq \$4, %xmm7
+ pxor %xmm1, %xmm7
+ movdqa %xmm7, %xmm1
+ pslldq \$8, %xmm7
+ pxor %xmm1, %xmm7
+ pxor .Lk_s63(%rip), %xmm7
+
+ # subbytes
+ movdqa %xmm9, %xmm1
+ pandn %xmm0, %xmm1
+ psrld \$4, %xmm1 # 1 = i
+ pand %xmm9, %xmm0 # 0 = k
+ movdqa %xmm11, %xmm2 # 2 : a/k
+ pshufb %xmm0, %xmm2 # 2 = a/k
+ pxor %xmm1, %xmm0 # 0 = j
+ movdqa %xmm10, %xmm3 # 3 : 1/i
+ pshufb %xmm1, %xmm3 # 3 = 1/i
+ pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
+ movdqa %xmm10, %xmm4 # 4 : 1/j
+ pshufb %xmm0, %xmm4 # 4 = 1/j
+ pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
+ movdqa %xmm10, %xmm2 # 2 : 1/iak
+ pshufb %xmm3, %xmm2 # 2 = 1/iak
+ pxor %xmm0, %xmm2 # 2 = io
+ movdqa %xmm10, %xmm3 # 3 : 1/jak
+ pshufb %xmm4, %xmm3 # 3 = 1/jak
+ pxor %xmm1, %xmm3 # 3 = jo
+ movdqa %xmm13, %xmm4 # 4 : sbou
+ pshufb %xmm2, %xmm4 # 4 = sbou
+ movdqa %xmm12, %xmm0 # 0 : sbot
+ pshufb %xmm3, %xmm0 # 0 = sb1t
+ pxor %xmm4, %xmm0 # 0 = sbox output
+
+ # add in smeared stuff
+ pxor %xmm7, %xmm0
+ movdqa %xmm0, %xmm7
+ ret
+.size _vpaes_schedule_round,.-_vpaes_schedule_round
+
+##
+## .aes_schedule_transform
+##
+## Linear-transform %xmm0 according to tables at (%r11)
+##
+## Requires that %xmm9 = 0x0F0F... as in preheat
+## Output in %xmm0
+## Clobbers %xmm1, %xmm2
+##
+.type _vpaes_schedule_transform,\@abi-omnipotent
+.align 16
+_vpaes_schedule_transform:
+ movdqa %xmm9, %xmm1
+ pandn %xmm0, %xmm1
+ psrld \$4, %xmm1
+ pand %xmm9, %xmm0
+ movdqa (%r11), %xmm2 # lo
+ pshufb %xmm0, %xmm2
+ movdqa 16(%r11), %xmm0 # hi
+ pshufb %xmm1, %xmm0
+ pxor %xmm2, %xmm0
+ ret
+.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
+
+##
+## .aes_schedule_mangle
+##
+## Mangle xmm0 from (basis-transformed) standard version
+## to our version.
+##
+## On encrypt,
+## xor with 0x63
+## multiply by circulant 0,1,1,1
+## apply shiftrows transform
+##
+## On decrypt,
+## xor with 0x63
+## multiply by "inverse mixcolumns" circulant E,B,D,9
+## deskew
+## apply shiftrows transform
+##
+##
+## Writes out to (%rdx), and increments or decrements it
+## Keeps track of round number mod 4 in %r8
+## Preserves xmm0
+## Clobbers xmm1-xmm5
+##
+.type _vpaes_schedule_mangle,\@abi-omnipotent
+.align 16
+_vpaes_schedule_mangle:
+ movdqa %xmm0, %xmm4 # save xmm0 for later
+ movdqa .Lk_mc_forward(%rip),%xmm5
+ test %rcx, %rcx
+ jnz .Lschedule_mangle_dec
+
+ # encrypting
+ add \$16, %rdx
+ pxor .Lk_s63(%rip),%xmm4
+ pshufb %xmm5, %xmm4
+ movdqa %xmm4, %xmm3
+ pshufb %xmm5, %xmm4
+ pxor %xmm4, %xmm3
+ pshufb %xmm5, %xmm4
+ pxor %xmm4, %xmm3
+
+ jmp .Lschedule_mangle_both
+.align 16
+.Lschedule_mangle_dec:
+ # inverse mix columns
+ lea .Lk_dksd(%rip),%r11
+ movdqa %xmm9, %xmm1
+ pandn %xmm4, %xmm1
+ psrld \$4, %xmm1 # 1 = hi
+ pand %xmm9, %xmm4 # 4 = lo
+
+ movdqa 0x00(%r11), %xmm2
+ pshufb %xmm4, %xmm2
+ movdqa 0x10(%r11), %xmm3
+ pshufb %xmm1, %xmm3
+ pxor %xmm2, %xmm3
+ pshufb %xmm5, %xmm3
+
+ movdqa 0x20(%r11), %xmm2
+ pshufb %xmm4, %xmm2
+ pxor %xmm3, %xmm2
+ movdqa 0x30(%r11), %xmm3
+ pshufb %xmm1, %xmm3
+ pxor %xmm2, %xmm3
+ pshufb %xmm5, %xmm3
+
+ movdqa 0x40(%r11), %xmm2
+ pshufb %xmm4, %xmm2
+ pxor %xmm3, %xmm2
+ movdqa 0x50(%r11), %xmm3
+ pshufb %xmm1, %xmm3
+ pxor %xmm2, %xmm3
+ pshufb %xmm5, %xmm3
+
+ movdqa 0x60(%r11), %xmm2
+ pshufb %xmm4, %xmm2
+ pxor %xmm3, %xmm2
+ movdqa 0x70(%r11), %xmm3
+ pshufb %xmm1, %xmm3
+ pxor %xmm2, %xmm3
+
+ add \$-16, %rdx
+
+.Lschedule_mangle_both:
+ movdqa (%r8,%r10),%xmm1
+ pshufb %xmm1,%xmm3
+ add \$-16, %r8
+ and \$0x30, %r8
+ movdqu %xmm3, (%rdx)
+ ret
+.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
+
+#
+# Interface to OpenSSL
+#
+.globl ${PREFIX}_set_encrypt_key
+.type ${PREFIX}_set_encrypt_key,\@function,3
+.align 16
+${PREFIX}_set_encrypt_key:
+___
+$code.=<<___ if ($win64);
+ lea -0xb8(%rsp),%rsp
+ movaps %xmm6,0x10(%rsp)
+ movaps %xmm7,0x20(%rsp)
+ movaps %xmm8,0x30(%rsp)
+ movaps %xmm9,0x40(%rsp)
+ movaps %xmm10,0x50(%rsp)
+ movaps %xmm11,0x60(%rsp)
+ movaps %xmm12,0x70(%rsp)
+ movaps %xmm13,0x80(%rsp)
+ movaps %xmm14,0x90(%rsp)
+ movaps %xmm15,0xa0(%rsp)
+.Lenc_key_body:
+___
+$code.=<<___;
+ mov %esi,%eax
+ shr \$5,%eax
+ add \$5,%eax
+ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
+
+ mov \$0,%ecx
+ mov \$0x30,%r8d
+ call _vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+ movaps 0x10(%rsp),%xmm6
+ movaps 0x20(%rsp),%xmm7
+ movaps 0x30(%rsp),%xmm8
+ movaps 0x40(%rsp),%xmm9
+ movaps 0x50(%rsp),%xmm10
+ movaps 0x60(%rsp),%xmm11
+ movaps 0x70(%rsp),%xmm12
+ movaps 0x80(%rsp),%xmm13
+ movaps 0x90(%rsp),%xmm14
+ movaps 0xa0(%rsp),%xmm15
+ lea 0xb8(%rsp),%rsp
+.Lenc_key_epilogue:
+___
+$code.=<<___;
+ xor %eax,%eax
+ ret
+.size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+
+.globl ${PREFIX}_set_decrypt_key
+.type ${PREFIX}_set_decrypt_key,\@function,3
+.align 16
+${PREFIX}_set_decrypt_key:
+___
+$code.=<<___ if ($win64);
+ lea -0xb8(%rsp),%rsp
+ movaps %xmm6,0x10(%rsp)
+ movaps %xmm7,0x20(%rsp)
+ movaps %xmm8,0x30(%rsp)
+ movaps %xmm9,0x40(%rsp)
+ movaps %xmm10,0x50(%rsp)
+ movaps %xmm11,0x60(%rsp)
+ movaps %xmm12,0x70(%rsp)
+ movaps %xmm13,0x80(%rsp)
+ movaps %xmm14,0x90(%rsp)
+ movaps %xmm15,0xa0(%rsp)
+.Ldec_key_body:
+___
+$code.=<<___;
+ mov %esi,%eax
+ shr \$5,%eax
+ add \$5,%eax
+ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
+ shl \$4,%eax
+ lea 16(%rdx,%rax),%rdx
+
+ mov \$1,%ecx
+ mov %esi,%r8d
+ shr \$1,%r8d
+ and \$32,%r8d
+ xor \$32,%r8d # nbits==192?0:32
+ call _vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+ movaps 0x10(%rsp),%xmm6
+ movaps 0x20(%rsp),%xmm7
+ movaps 0x30(%rsp),%xmm8
+ movaps 0x40(%rsp),%xmm9
+ movaps 0x50(%rsp),%xmm10
+ movaps 0x60(%rsp),%xmm11
+ movaps 0x70(%rsp),%xmm12
+ movaps 0x80(%rsp),%xmm13
+ movaps 0x90(%rsp),%xmm14
+ movaps 0xa0(%rsp),%xmm15
+ lea 0xb8(%rsp),%rsp
+.Ldec_key_epilogue:
+___
+$code.=<<___;
+ xor %eax,%eax
+ ret
+.size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+
+.globl ${PREFIX}_encrypt
+.type ${PREFIX}_encrypt,\@function,3
+.align 16
+${PREFIX}_encrypt:
+___
+$code.=<<___ if ($win64);
+ lea -0xb8(%rsp),%rsp
+ movaps %xmm6,0x10(%rsp)
+ movaps %xmm7,0x20(%rsp)
+ movaps %xmm8,0x30(%rsp)
+ movaps %xmm9,0x40(%rsp)
+ movaps %xmm10,0x50(%rsp)
+ movaps %xmm11,0x60(%rsp)
+ movaps %xmm12,0x70(%rsp)
+ movaps %xmm13,0x80(%rsp)
+ movaps %xmm14,0x90(%rsp)
+ movaps %xmm15,0xa0(%rsp)
+.Lenc_body:
+___
+$code.=<<___;
+ movdqu (%rdi),%xmm0
+ call _vpaes_preheat
+ call _vpaes_encrypt_core
+ movdqu %xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+ movaps 0x10(%rsp),%xmm6
+ movaps 0x20(%rsp),%xmm7
+ movaps 0x30(%rsp),%xmm8
+ movaps 0x40(%rsp),%xmm9
+ movaps 0x50(%rsp),%xmm10
+ movaps 0x60(%rsp),%xmm11
+ movaps 0x70(%rsp),%xmm12
+ movaps 0x80(%rsp),%xmm13
+ movaps 0x90(%rsp),%xmm14
+ movaps 0xa0(%rsp),%xmm15
+ lea 0xb8(%rsp),%rsp
+.Lenc_epilogue:
+___
+$code.=<<___;
+ ret
+.size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl ${PREFIX}_decrypt
+.type ${PREFIX}_decrypt,\@function,3
+.align 16
+${PREFIX}_decrypt:
+___
+$code.=<<___ if ($win64);
+ lea -0xb8(%rsp),%rsp
+ movaps %xmm6,0x10(%rsp)
+ movaps %xmm7,0x20(%rsp)
+ movaps %xmm8,0x30(%rsp)
+ movaps %xmm9,0x40(%rsp)
+ movaps %xmm10,0x50(%rsp)
+ movaps %xmm11,0x60(%rsp)
+ movaps %xmm12,0x70(%rsp)
+ movaps %xmm13,0x80(%rsp)
+ movaps %xmm14,0x90(%rsp)
+ movaps %xmm15,0xa0(%rsp)
+.Ldec_body:
+___
+$code.=<<___;
+ movdqu (%rdi),%xmm0
+ call _vpaes_preheat
+ call _vpaes_decrypt_core
+ movdqu %xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+ movaps 0x10(%rsp),%xmm6
+ movaps 0x20(%rsp),%xmm7
+ movaps 0x30(%rsp),%xmm8
+ movaps 0x40(%rsp),%xmm9
+ movaps 0x50(%rsp),%xmm10
+ movaps 0x60(%rsp),%xmm11
+ movaps 0x70(%rsp),%xmm12
+ movaps 0x80(%rsp),%xmm13
+ movaps 0x90(%rsp),%xmm14
+ movaps 0xa0(%rsp),%xmm15
+ lea 0xb8(%rsp),%rsp
+.Ldec_epilogue:
+___
+$code.=<<___;
+ ret
+.size ${PREFIX}_decrypt,.-${PREFIX}_decrypt
+___
+{
+my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
+# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
+# size_t length, const AES_KEY *key,
+# unsigned char *ivp,const int enc);
+$code.=<<___;
+.globl ${PREFIX}_cbc_encrypt
+.type ${PREFIX}_cbc_encrypt,\@function,6
+.align 16
+${PREFIX}_cbc_encrypt:
+ xchg $key,$len
+___
+($len,$key)=($key,$len);
+$code.=<<___;
+___
+$code.=<<___ if ($win64);
+ lea -0xb8(%rsp),%rsp
+ movaps %xmm6,0x10(%rsp)
+ movaps %xmm7,0x20(%rsp)
+ movaps %xmm8,0x30(%rsp)
+ movaps %xmm9,0x40(%rsp)
+ movaps %xmm10,0x50(%rsp)
+ movaps %xmm11,0x60(%rsp)
+ movaps %xmm12,0x70(%rsp)
+ movaps %xmm13,0x80(%rsp)
+ movaps %xmm14,0x90(%rsp)
+ movaps %xmm15,0xa0(%rsp)
+.Lcbc_body:
+___
+$code.=<<___;
+ movdqu ($ivp),%xmm6 # load IV
+ sub $inp,$out
+ sub \$16,$len
+ call _vpaes_preheat
+ cmp \$0,${enc}d
+ je .Lcbc_dec_loop
+ jmp .Lcbc_enc_loop
+.align 16
+.Lcbc_enc_loop:
+ movdqu ($inp),%xmm0
+ pxor %xmm6,%xmm0
+ call _vpaes_encrypt_core
+ movdqa %xmm0,%xmm6
+ movdqu %xmm0,($out,$inp)
+ lea 16($inp),$inp
+ sub \$16,$len
+ jnc .Lcbc_enc_loop
+ jmp .Lcbc_done
+.align 16
+.Lcbc_dec_loop:
+ movdqu ($inp),%xmm0
+ movdqa %xmm0,%xmm7
+ call _vpaes_decrypt_core
+ pxor %xmm6,%xmm0
+ movdqa %xmm7,%xmm6
+ movdqu %xmm0,($out,$inp)
+ lea 16($inp),$inp
+ sub \$16,$len
+ jnc .Lcbc_dec_loop
+.Lcbc_done:
+ movdqu %xmm6,($ivp) # save IV
+___
+$code.=<<___ if ($win64);
+ movaps 0x10(%rsp),%xmm6
+ movaps 0x20(%rsp),%xmm7
+ movaps 0x30(%rsp),%xmm8
+ movaps 0x40(%rsp),%xmm9
+ movaps 0x50(%rsp),%xmm10
+ movaps 0x60(%rsp),%xmm11
+ movaps 0x70(%rsp),%xmm12
+ movaps 0x80(%rsp),%xmm13
+ movaps 0x90(%rsp),%xmm14
+ movaps 0xa0(%rsp),%xmm15
+ lea 0xb8(%rsp),%rsp
+.Lcbc_epilogue:
+___
+$code.=<<___;
+ ret
+.size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+}
+$code.=<<___;
+##
+## _aes_preheat
+##
+## Fills register %r10 -> .aes_consts (so you can -fPIC)
+## and %xmm9-%xmm15 as specified below.
+##
+.type _vpaes_preheat,\@abi-omnipotent
+.align 16
+_vpaes_preheat:
+ lea .Lk_s0F(%rip), %r10
+ movdqa -0x20(%r10), %xmm10 # .Lk_inv
+ movdqa -0x10(%r10), %xmm11 # .Lk_inv+16
+ movdqa 0x00(%r10), %xmm9 # .Lk_s0F
+ movdqa 0x30(%r10), %xmm13 # .Lk_sb1
+ movdqa 0x40(%r10), %xmm12 # .Lk_sb1+16
+ movdqa 0x50(%r10), %xmm15 # .Lk_sb2
+ movdqa 0x60(%r10), %xmm14 # .Lk_sb2+16
+ ret
+.size _vpaes_preheat,.-_vpaes_preheat
+########################################################
+## ##
+## Constants ##
+## ##
+########################################################
+.type _vpaes_consts,\@object
+.align 64
+_vpaes_consts:
+.Lk_inv: # inv, inva
+ .quad 0x0E05060F0D080180, 0x040703090A0B0C02
+ .quad 0x01040A060F0B0780, 0x030D0E0C02050809
+
+.Lk_s0F: # s0F
+ .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
+
+.Lk_ipt: # input transform (lo, hi)
+ .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
+ .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
+
+.Lk_sb1: # sb1u, sb1t
+ .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
+ .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
+.Lk_sb2: # sb2u, sb2t
+ .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
+ .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
+.Lk_sbo: # sbou, sbot
+ .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
+ .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
+
+.Lk_mc_forward: # mc_forward
+ .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
+ .quad 0x080B0A0904070605, 0x000302010C0F0E0D
+ .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
+ .quad 0x000302010C0F0E0D, 0x080B0A0904070605
+
+.Lk_mc_backward:# mc_backward
+ .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
+ .quad 0x020100030E0D0C0F, 0x0A09080B06050407
+ .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
+ .quad 0x0A09080B06050407, 0x020100030E0D0C0F
+
+.Lk_sr: # sr
+ .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
+ .quad 0x030E09040F0A0500, 0x0B06010C07020D08
+ .quad 0x0F060D040B020900, 0x070E050C030A0108
+ .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
+
+.Lk_rcon: # rcon
+ .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
+
+.Lk_s63: # s63: all equal to 0x63 transformed
+ .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
+
+.Lk_opt: # output transform
+ .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
+ .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
+
+.Lk_deskew: # deskew tables: inverts the sbox's "skew"
+ .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
+ .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
+
+##
+## Decryption stuff
+## Key schedule constants
+##
+.Lk_dksd: # decryption key schedule: invskew x*D
+ .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
+ .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
+.Lk_dksb: # decryption key schedule: invskew x*B
+ .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
+ .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
+.Lk_dkse: # decryption key schedule: invskew x*E + 0x63
+ .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
+ .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
+.Lk_dks9: # decryption key schedule: invskew x*9
+ .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
+ .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
+
+##
+## Decryption stuff
+## Round function constants
+##
+.Lk_dipt: # decryption input transform
+ .quad 0x0F505B040B545F00, 0x154A411E114E451A
+ .quad 0x86E383E660056500, 0x12771772F491F194
+
+.Lk_dsb9: # decryption sbox output *9*u, *9*t
+ .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
+ .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
+.Lk_dsbd: # decryption sbox output *D*u, *D*t
+ .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
+ .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
+.Lk_dsbb: # decryption sbox output *B*u, *B*t
+ .quad 0xD022649296B44200, 0x602646F6B0F2D404
+ .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
+.Lk_dsbe: # decryption sbox output *E*u, *E*t
+ .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
+ .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
+.Lk_dsbo: # decryption sbox final output
+ .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
+ .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
+.asciz "Vector Permutaion AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
+.align 64
+.size _vpaes_consts,.-_vpaes_consts
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+
+ lea 16(%rax),%rsi # %xmm save area
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
+ .long 0xa548f3fc # cld; rep movsq
+ lea 0xb8(%rax),%rax # adjust stack pointer
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$`1232/8`,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_${PREFIX}_set_encrypt_key
+ .rva .LSEH_end_${PREFIX}_set_encrypt_key
+ .rva .LSEH_info_${PREFIX}_set_encrypt_key
+
+ .rva .LSEH_begin_${PREFIX}_set_decrypt_key
+ .rva .LSEH_end_${PREFIX}_set_decrypt_key
+ .rva .LSEH_info_${PREFIX}_set_decrypt_key
+
+ .rva .LSEH_begin_${PREFIX}_encrypt
+ .rva .LSEH_end_${PREFIX}_encrypt
+ .rva .LSEH_info_${PREFIX}_encrypt
+
+ .rva .LSEH_begin_${PREFIX}_decrypt
+ .rva .LSEH_end_${PREFIX}_decrypt
+ .rva .LSEH_info_${PREFIX}_decrypt
+
+ .rva .LSEH_begin_${PREFIX}_cbc_encrypt
+ .rva .LSEH_end_${PREFIX}_cbc_encrypt
+ .rva .LSEH_info_${PREFIX}_cbc_encrypt
+
+.section .xdata
+.align 8
+.LSEH_info_${PREFIX}_set_encrypt_key:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lenc_key_body,.Lenc_key_epilogue # HandlerData[]
+.LSEH_info_${PREFIX}_set_decrypt_key:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Ldec_key_body,.Ldec_key_epilogue # HandlerData[]
+.LSEH_info_${PREFIX}_encrypt:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lenc_body,.Lenc_epilogue # HandlerData[]
+.LSEH_info_${PREFIX}_decrypt:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Ldec_body,.Ldec_epilogue # HandlerData[]
+.LSEH_info_${PREFIX}_cbc_encrypt:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lcbc_body,.Lcbc_epilogue # HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;