aboutsummaryrefslogtreecommitdiff
path: root/openssl/crypto/aes/asm
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2010-03-30 12:36:28 +0000
committermarha <marha@users.sourceforge.net>2010-03-30 12:36:28 +0000
commitff48c0d9098080b51ea12710029135916d117806 (patch)
tree96e6af9caf170ba21a1027b24e306a07e27d7b75 /openssl/crypto/aes/asm
parentbb731f5ac92655c4860a41fa818a7a63005f8369 (diff)
downloadvcxsrv-ff48c0d9098080b51ea12710029135916d117806.tar.gz
vcxsrv-ff48c0d9098080b51ea12710029135916d117806.tar.bz2
vcxsrv-ff48c0d9098080b51ea12710029135916d117806.zip
svn merge -r514:HEAD ^/branches/released .
Diffstat (limited to 'openssl/crypto/aes/asm')
-rw-r--r--openssl/crypto/aes/asm/aes-586.pl2401
-rw-r--r--openssl/crypto/aes/asm/aes-armv4.pl1
-rw-r--r--openssl/crypto/aes/asm/aes-s390x.pl6
-rw-r--r--openssl/crypto/aes/asm/aes-x86_64.pl2012
4 files changed, 3552 insertions, 868 deletions
diff --git a/openssl/crypto/aes/asm/aes-586.pl b/openssl/crypto/aes/asm/aes-586.pl
index 3bc46a968..aab40e6f1 100644
--- a/openssl/crypto/aes/asm/aes-586.pl
+++ b/openssl/crypto/aes/asm/aes-586.pl
@@ -2,11 +2,12 @@
#
# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. Rights for redistribution and usage in source and binary
-# forms are granted according to the OpenSSL license.
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
-# Version 3.6.
+# Version 4.3.
#
# You might fail to appreciate this module performance from the first
# try. If compared to "vanilla" linux-ia32-icc target, i.e. considered
@@ -81,11 +82,117 @@
# AMD K8 20 19
# PIII 25 23
# Pentium 81 78
-
-push(@INC,"perlasm","../../perlasm");
+#
+# Version 3.7 reimplements outer rounds as "compact." Meaning that
+# first and last rounds reference compact 256 bytes S-box. This means
+# that first round consumes a lot more CPU cycles and that encrypt
+# and decrypt performance becomes asymmetric. Encrypt performance
+# drops by 10-12%, while decrypt - by 20-25%:-( 256 bytes S-box is
+# aggressively pre-fetched.
+#
+# Version 4.0 effectively rolls back to 3.6 and instead implements
+# additional set of functions, _[x86|sse]_AES_[en|de]crypt_compact,
+# which use exclusively 256 byte S-box. These functions are to be
+# called in modes not concealing plain text, such as ECB, or when
+# we're asked to process smaller amount of data [or unconditionally
+# on hyper-threading CPU]. Currently it's called unconditionally from
+# AES_[en|de]crypt, which affects all modes, but CBC. CBC routine
+# still needs to be modified to switch between slower and faster
+# mode when appropriate... But in either case benchmark landscape
+# changes dramatically and below numbers are CPU cycles per processed
+# byte for 128-bit key.
+#
+# ECB encrypt ECB decrypt CBC large chunk
+# P4 56[60] 84[100] 23
+# AMD K8 48[44] 70[79] 18
+# PIII 41[50] 61[91] 24
+# Core 2 32[38] 45[70] 18.5
+# Pentium 120 160 77
+#
+# Version 4.1 switches to compact S-box even in key schedule setup.
+#
+# Version 4.2 prefetches compact S-box in every SSE round or in other
+# words every cache-line is *guaranteed* to be accessed within ~50
+# cycles window. Why just SSE? Because it's needed on hyper-threading
+# CPU! Which is also why it's prefetched with 64 byte stride. Best
+# part is that it has no negative effect on performance:-)
+#
+# Version 4.3 implements switch between compact and non-compact block
+# functions in AES_cbc_encrypt depending on how much data was asked
+# to be processed in one stroke.
+#
+######################################################################
+# Timing attacks are classified in two classes: synchronous when
+# attacker consciously initiates cryptographic operation and collects
+# timing data of various character afterwards, and asynchronous when
+# malicious code is executed on same CPU simultaneously with AES,
+# instruments itself and performs statistical analysis of this data.
+#
+# As far as synchronous attacks go the root to the AES timing
+# vulnerability is twofold. Firstly, of 256 S-box elements at most 160
+# are referred to in single 128-bit block operation. Well, in C
+# implementation with 4 distinct tables it's actually as little as 40
+# references per 256 elements table, but anyway... Secondly, even
+# though S-box elements are clustered into smaller amount of cache-
+# lines, smaller than 160 and even 40, it turned out that for certain
+# plain-text pattern[s] or simply put chosen plain-text and given key
+# few cache-lines remain unaccessed during block operation. Now, if
+# attacker can figure out this access pattern, he can deduct the key
+# [or at least part of it]. The natural way to mitigate this kind of
+# attacks is to minimize the amount of cache-lines in S-box and/or
+# prefetch them to ensure that every one is accessed for more uniform
+# timing. But note that *if* plain-text was concealed in such way that
+# input to block function is distributed *uniformly*, then attack
+# wouldn't apply. Now note that some encryption modes, most notably
+# CBC, do mask the plain-text in this exact way [secure cipher output
+# is distributed uniformly]. Yes, one still might find input that
+# would reveal the information about given key, but if amount of
+# candidate inputs to be tried is larger than amount of possible key
+# combinations then attack becomes infeasible. This is why revised
+# AES_cbc_encrypt "dares" to switch to larger S-box when larger chunk
+# of data is to be processed in one stroke. The current size limit of
+# 512 bytes is chosen to provide same [diminishigly low] probability
+# for cache-line to remain untouched in large chunk operation with
+# large S-box as for single block operation with compact S-box and
+# surely needs more careful consideration...
+#
+# As for asynchronous attacks. There are two flavours: attacker code
+# being interleaved with AES on hyper-threading CPU at *instruction*
+# level, and two processes time sharing single core. As for latter.
+# Two vectors. 1. Given that attacker process has higher priority,
+# yield execution to process performing AES just before timer fires
+# off the scheduler, immediately regain control of CPU and analyze the
+# cache state. For this attack to be efficient attacker would have to
+# effectively slow down the operation by several *orders* of magnitute,
+# by ratio of time slice to duration of handful of AES rounds, which
+# unlikely to remain unnoticed. Not to mention that this also means
+# that he would spend correspondigly more time to collect enough
+# statistical data to mount the attack. It's probably appropriate to
+# say that if adeversary reckons that this attack is beneficial and
+# risks to be noticed, you probably have larger problems having him
+# mere opportunity. In other words suggested code design expects you
+# to preclude/mitigate this attack by overall system security design.
+# 2. Attacker manages to make his code interrupt driven. In order for
+# this kind of attack to be feasible, interrupt rate has to be high
+# enough, again comparable to duration of handful of AES rounds. But
+# is there interrupt source of such rate? Hardly, not even 1Gbps NIC
+# generates interrupts at such raging rate...
+#
+# And now back to the former, hyper-threading CPU or more specifically
+# Intel P4. Recall that asynchronous attack implies that malicious
+# code instruments itself. And naturally instrumentation granularity
+# has be noticeably lower than duration of codepath accessing S-box.
+# Given that all cache-lines are accessed during that time that is.
+# Current implementation accesses *all* cache-lines within ~50 cycles
+# window, which is actually *less* than RDTSC latency on Intel P4!
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
require "x86asm.pl";
-&asm_init($ARGV[0],"aes-586.pl",$ARGV[$#ARGV] eq "386");
+&asm_init($ARGV[0],"aes-586.pl",$x86only = $ARGV[$#ARGV] eq "386");
+&static_label("AES_Te");
+&static_label("AES_Td");
$s0="eax";
$s1="ebx";
@@ -93,21 +200,36 @@ $s2="ecx";
$s3="edx";
$key="edi";
$acc="esi";
+$tbl="ebp";
+
+# stack frame layout in _[x86|sse]_AES_* routines, frame is allocated
+# by caller
+$__ra=&DWP(0,"esp"); # return address
+$__s0=&DWP(4,"esp"); # s0 backing store
+$__s1=&DWP(8,"esp"); # s1 backing store
+$__s2=&DWP(12,"esp"); # s2 backing store
+$__s3=&DWP(16,"esp"); # s3 backing store
+$__key=&DWP(20,"esp"); # pointer to key schedule
+$__end=&DWP(24,"esp"); # pointer to end of key schedule
+$__tbl=&DWP(28,"esp"); # %ebp backing store
+
+# stack frame layout in AES_[en|crypt] routines, which differs from
+# above by 4 and overlaps by %ebp backing store
+$_tbl=&DWP(24,"esp");
+$_esp=&DWP(28,"esp");
-$compromise=0; # $compromise=128 abstains from copying key
- # schedule to stack when encrypting inputs
- # shorter than 128 bytes at the cost of
- # risksing aliasing with S-boxes. In return
- # you get way better, up to +70%, small block
- # performance.
+sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
+
+$speed_limit=512; # chunks smaller than $speed_limit are
+ # processed with compact routine in CBC mode
$small_footprint=1; # $small_footprint=1 code is ~5% slower [on
# recent µ-archs], but ~5 times smaller!
# I favor compact code to minimize cache
# contention and in hope to "collect" 5% back
# in real-life applications...
+
$vertical_spin=0; # shift "verticaly" defaults to 0, because of
# its proof-of-concept status...
-
# Note that there is no decvert(), as well as last encryption round is
# performed with "horizontal" shifts. This is because this "vertical"
# implementation [one which groups shifts on a given $s[i] to form a
@@ -170,17 +292,484 @@ sub encvert()
&movz ($v0,&HB($v1));
&and ($v1,0xFF);
&xor ($s[3],&DWP(2,$te,$v1,8)); # s1>>16
- &mov ($key,&DWP(12,"esp")); # reincarnate v1 as key
+ &mov ($key,$__key); # reincarnate v1 as key
&xor ($s[2],&DWP(1,$te,$v0,8)); # s1>>24
}
+# Another experimental routine, which features "horizontal spin," but
+# eliminates one reference to stack. Strangely enough runs slower...
+sub enchoriz()
+{ my $v0 = $key, $v1 = $acc;
+
+ &movz ($v0,&LB($s0)); # 3, 2, 1, 0*
+ &rotr ($s2,8); # 8,11,10, 9
+ &mov ($v1,&DWP(0,$te,$v0,8)); # 0
+ &movz ($v0,&HB($s1)); # 7, 6, 5*, 4
+ &rotr ($s3,16); # 13,12,15,14
+ &xor ($v1,&DWP(3,$te,$v0,8)); # 5
+ &movz ($v0,&HB($s2)); # 8,11,10*, 9
+ &rotr ($s0,16); # 1, 0, 3, 2
+ &xor ($v1,&DWP(2,$te,$v0,8)); # 10
+ &movz ($v0,&HB($s3)); # 13,12,15*,14
+ &xor ($v1,&DWP(1,$te,$v0,8)); # 15, t[0] collected
+ &mov ($__s0,$v1); # t[0] saved
+
+ &movz ($v0,&LB($s1)); # 7, 6, 5, 4*
+ &shr ($s1,16); # -, -, 7, 6
+ &mov ($v1,&DWP(0,$te,$v0,8)); # 4
+ &movz ($v0,&LB($s3)); # 13,12,15,14*
+ &xor ($v1,&DWP(2,$te,$v0,8)); # 14
+ &movz ($v0,&HB($s0)); # 1, 0, 3*, 2
+ &and ($s3,0xffff0000); # 13,12, -, -
+ &xor ($v1,&DWP(1,$te,$v0,8)); # 3
+ &movz ($v0,&LB($s2)); # 8,11,10, 9*
+ &or ($s3,$s1); # 13,12, 7, 6
+ &xor ($v1,&DWP(3,$te,$v0,8)); # 9, t[1] collected
+ &mov ($s1,$v1); # s[1]=t[1]
+
+ &movz ($v0,&LB($s0)); # 1, 0, 3, 2*
+ &shr ($s2,16); # -, -, 8,11
+ &mov ($v1,&DWP(2,$te,$v0,8)); # 2
+ &movz ($v0,&HB($s3)); # 13,12, 7*, 6
+ &xor ($v1,&DWP(1,$te,$v0,8)); # 7
+ &movz ($v0,&HB($s2)); # -, -, 8*,11
+ &xor ($v1,&DWP(0,$te,$v0,8)); # 8
+ &mov ($v0,$s3);
+ &shr ($v0,24); # 13
+ &xor ($v1,&DWP(3,$te,$v0,8)); # 13, t[2] collected
+
+ &movz ($v0,&LB($s2)); # -, -, 8,11*
+ &shr ($s0,24); # 1*
+ &mov ($s2,&DWP(1,$te,$v0,8)); # 11
+ &xor ($s2,&DWP(3,$te,$s0,8)); # 1
+ &mov ($s0,$__s0); # s[0]=t[0]
+ &movz ($v0,&LB($s3)); # 13,12, 7, 6*
+ &shr ($s3,16); # , ,13,12
+ &xor ($s2,&DWP(2,$te,$v0,8)); # 6
+ &mov ($key,$__key); # reincarnate v0 as key
+ &and ($s3,0xff); # , ,13,12*
+ &mov ($s3,&DWP(0,$te,$s3,8)); # 12
+ &xor ($s3,$s2); # s[2]=t[3] collected
+ &mov ($s2,$v1); # s[2]=t[2]
+}
+
+# More experimental code... SSE one... Even though this one eliminates
+# *all* references to stack, it's not faster...
+sub sse_encbody()
+{
+ &movz ($acc,&LB("eax")); # 0
+ &mov ("ecx",&DWP(0,$tbl,$acc,8)); # 0
+ &pshufw ("mm2","mm0",0x0d); # 7, 6, 3, 2
+ &movz ("edx",&HB("eax")); # 1
+ &mov ("edx",&DWP(3,$tbl,"edx",8)); # 1
+ &shr ("eax",16); # 5, 4
+
+ &movz ($acc,&LB("ebx")); # 10
+ &xor ("ecx",&DWP(2,$tbl,$acc,8)); # 10
+ &pshufw ("mm6","mm4",0x08); # 13,12, 9, 8
+ &movz ($acc,&HB("ebx")); # 11
+ &xor ("edx",&DWP(1,$tbl,$acc,8)); # 11
+ &shr ("ebx",16); # 15,14
+
+ &movz ($acc,&HB("eax")); # 5
+ &xor ("ecx",&DWP(3,$tbl,$acc,8)); # 5
+ &movq ("mm3",QWP(16,$key));
+ &movz ($acc,&HB("ebx")); # 15
+ &xor ("ecx",&DWP(1,$tbl,$acc,8)); # 15
+ &movd ("mm0","ecx"); # t[0] collected
+
+ &movz ($acc,&LB("eax")); # 4
+ &mov ("ecx",&DWP(0,$tbl,$acc,8)); # 4
+ &movd ("eax","mm2"); # 7, 6, 3, 2
+ &movz ($acc,&LB("ebx")); # 14
+ &xor ("ecx",&DWP(2,$tbl,$acc,8)); # 14
+ &movd ("ebx","mm6"); # 13,12, 9, 8
+
+ &movz ($acc,&HB("eax")); # 3
+ &xor ("ecx",&DWP(1,$tbl,$acc,8)); # 3
+ &movz ($acc,&HB("ebx")); # 9
+ &xor ("ecx",&DWP(3,$tbl,$acc,8)); # 9
+ &movd ("mm1","ecx"); # t[1] collected
+
+ &movz ($acc,&LB("eax")); # 2
+ &mov ("ecx",&DWP(2,$tbl,$acc,8)); # 2
+ &shr ("eax",16); # 7, 6
+ &punpckldq ("mm0","mm1"); # t[0,1] collected
+ &movz ($acc,&LB("ebx")); # 8
+ &xor ("ecx",&DWP(0,$tbl,$acc,8)); # 8
+ &shr ("ebx",16); # 13,12
+
+ &movz ($acc,&HB("eax")); # 7
+ &xor ("ecx",&DWP(1,$tbl,$acc,8)); # 7
+ &pxor ("mm0","mm3");
+ &movz ("eax",&LB("eax")); # 6
+ &xor ("edx",&DWP(2,$tbl,"eax",8)); # 6
+ &pshufw ("mm1","mm0",0x08); # 5, 4, 1, 0
+ &movz ($acc,&HB("ebx")); # 13
+ &xor ("ecx",&DWP(3,$tbl,$acc,8)); # 13
+ &xor ("ecx",&DWP(24,$key)); # t[2]
+ &movd ("mm4","ecx"); # t[2] collected
+ &movz ("ebx",&LB("ebx")); # 12
+ &xor ("edx",&DWP(0,$tbl,"ebx",8)); # 12
+ &shr ("ecx",16);
+ &movd ("eax","mm1"); # 5, 4, 1, 0
+ &mov ("ebx",&DWP(28,$key)); # t[3]
+ &xor ("ebx","edx");
+ &movd ("mm5","ebx"); # t[3] collected
+ &and ("ebx",0xffff0000);
+ &or ("ebx","ecx");
+
+ &punpckldq ("mm4","mm5"); # t[2,3] collected
+}
+
+######################################################################
+# "Compact" block function
+######################################################################
+
+sub enccompact()
+{ my $Fn = mov;
+ while ($#_>5) { pop(@_); $Fn=sub{}; }
+ my ($i,$te,@s)=@_;
+ my $tmp = $key;
+ my $out = $i==3?$s[0]:$acc;
+
+ # $Fn is used in first compact round and its purpose is to
+ # void restoration of some values from stack, so that after
+ # 4xenccompact with extra argument $key value is left there...
+ if ($i==3) { &$Fn ($key,$__key); }##%edx
+ else { &mov ($out,$s[0]); }
+ &and ($out,0xFF);
+ if ($i==1) { &shr ($s[0],16); }#%ebx[1]
+ if ($i==2) { &shr ($s[0],24); }#%ecx[2]
+ &movz ($out,&BP(-128,$te,$out,1));
+
+ if ($i==3) { $tmp=$s[1]; }##%eax
+ &movz ($tmp,&HB($s[1]));
+ &movz ($tmp,&BP(-128,$te,$tmp,1));
+ &shl ($tmp,8);
+ &xor ($out,$tmp);
+
+ if ($i==3) { $tmp=$s[2]; &mov ($s[1],$__s0); }##%ebx
+ else { &mov ($tmp,$s[2]);
+ &shr ($tmp,16); }
+ if ($i==2) { &and ($s[1],0xFF); }#%edx[2]
+ &and ($tmp,0xFF);
+ &movz ($tmp,&BP(-128,$te,$tmp,1));
+ &shl ($tmp,16);
+ &xor ($out,$tmp);
+
+ if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }##%ecx
+ elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
+ else { &mov ($tmp,$s[3]);
+ &shr ($tmp,24); }
+ &movz ($tmp,&BP(-128,$te,$tmp,1));
+ &shl ($tmp,24);
+ &xor ($out,$tmp);
+ if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
+ if ($i==3) { &mov ($s[3],$acc); }
+ &comment();
+}
+
+sub enctransform()
+{ my @s = ($s0,$s1,$s2,$s3);
+ my $i = shift;
+ my $tmp = $tbl;
+ my $r2 = $key ;
+
+ &mov ($acc,$s[$i]);
+ &and ($acc,0x80808080);
+ &mov ($tmp,$acc);
+ &shr ($tmp,7);
+ &lea ($r2,&DWP(0,$s[$i],$s[$i]));
+ &sub ($acc,$tmp);
+ &and ($r2,0xfefefefe);
+ &and ($acc,0x1b1b1b1b);
+ &mov ($tmp,$s[$i]);
+ &xor ($acc,$r2); # r2
+
+ &xor ($s[$i],$acc); # r0 ^ r2
+ &rotl ($s[$i],24);
+ &xor ($s[$i],$acc) # ROTATE(r2^r0,24) ^ r2
+ &rotr ($tmp,16);
+ &xor ($s[$i],$tmp);
+ &rotr ($tmp,8);
+ &xor ($s[$i],$tmp);
+}
+
+&function_begin_B("_x86_AES_encrypt_compact");
+ # note that caller is expected to allocate stack frame for me!
+ &mov ($__key,$key); # save key
+
+ &xor ($s0,&DWP(0,$key)); # xor with key
+ &xor ($s1,&DWP(4,$key));
+ &xor ($s2,&DWP(8,$key));
+ &xor ($s3,&DWP(12,$key));
+
+ &mov ($acc,&DWP(240,$key)); # load key->rounds
+ &lea ($acc,&DWP(-2,$acc,$acc));
+ &lea ($acc,&DWP(0,$key,$acc,8));
+ &mov ($__end,$acc); # end of key schedule
+
+ # prefetch Te4
+ &mov ($key,&DWP(0-128,$tbl));
+ &mov ($acc,&DWP(32-128,$tbl));
+ &mov ($key,&DWP(64-128,$tbl));
+ &mov ($acc,&DWP(96-128,$tbl));
+ &mov ($key,&DWP(128-128,$tbl));
+ &mov ($acc,&DWP(160-128,$tbl));
+ &mov ($key,&DWP(192-128,$tbl));
+ &mov ($acc,&DWP(224-128,$tbl));
+
+ &set_label("loop",16);
+
+ &enccompact(0,$tbl,$s0,$s1,$s2,$s3,1);
+ &enccompact(1,$tbl,$s1,$s2,$s3,$s0,1);
+ &enccompact(2,$tbl,$s2,$s3,$s0,$s1,1);
+ &enccompact(3,$tbl,$s3,$s0,$s1,$s2,1);
+ &enctransform(2);
+ &enctransform(3);
+ &enctransform(0);
+ &enctransform(1);
+ &mov ($key,$__key);
+ &mov ($tbl,$__tbl);
+ &add ($key,16); # advance rd_key
+ &xor ($s0,&DWP(0,$key));
+ &xor ($s1,&DWP(4,$key));
+ &xor ($s2,&DWP(8,$key));
+ &xor ($s3,&DWP(12,$key));
+
+ &cmp ($key,$__end);
+ &mov ($__key,$key);
+ &jb (&label("loop"));
+
+ &enccompact(0,$tbl,$s0,$s1,$s2,$s3);
+ &enccompact(1,$tbl,$s1,$s2,$s3,$s0);
+ &enccompact(2,$tbl,$s2,$s3,$s0,$s1);
+ &enccompact(3,$tbl,$s3,$s0,$s1,$s2);
+
+ &xor ($s0,&DWP(16,$key));
+ &xor ($s1,&DWP(20,$key));
+ &xor ($s2,&DWP(24,$key));
+ &xor ($s3,&DWP(28,$key));
+
+ &ret ();
+&function_end_B("_x86_AES_encrypt_compact");
+
+######################################################################
+# "Compact" SSE block function.
+######################################################################
+#
+# Performance is not actually extraordinary in comparison to pure
+# x86 code. In particular encrypt performance is virtually the same.
+# Decrypt performance on the other hand is 15-20% better on newer
+# µ-archs [but we're thankful for *any* improvement here], and ~50%
+# better on PIII:-) And additionally on the pros side this code
+# eliminates redundant references to stack and thus relieves/
+# minimizes the pressure on the memory bus.
+#
+# MMX register layout lsb
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | mm4 | mm0 |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | s3 | s2 | s1 | s0 |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#
+# Indexes translate as s[N/4]>>(8*(N%4)), e.g. 5 means s1>>8.
+# In this terms encryption and decryption "compact" permutation
+# matrices can be depicted as following:
+#
+# encryption lsb # decryption lsb
+# +----++----+----+----+----+ # +----++----+----+----+----+
+# | t0 || 15 | 10 | 5 | 0 | # | t0 || 7 | 10 | 13 | 0 |
+# +----++----+----+----+----+ # +----++----+----+----+----+
+# | t1 || 3 | 14 | 9 | 4 | # | t1 || 11 | 14 | 1 | 4 |
+# +----++----+----+----+----+ # +----++----+----+----+----+
+# | t2 || 7 | 2 | 13 | 8 | # | t2 || 15 | 2 | 5 | 8 |
+# +----++----+----+----+----+ # +----++----+----+----+----+
+# | t3 || 11 | 6 | 1 | 12 | # | t3 || 3 | 6 | 9 | 12 |
+# +----++----+----+----+----+ # +----++----+----+----+----+
+#
+######################################################################
+# Why not xmm registers? Short answer. It was actually tested and
+# was not any faster, but *contrary*, most notably on Intel CPUs.
+# Longer answer. Main advantage of using mm registers is that movd
+# latency is lower, especially on Intel P4. While arithmetic
+# instructions are twice as many, they can be scheduled every cycle
+# and not every second one when they are operating on xmm register,
+# so that "arithmetic throughput" remains virtually the same. And
+# finally the code can be executed even on elder SSE-only CPUs:-)
+
+sub sse_enccompact()
+{
+ &pshufw ("mm1","mm0",0x08); # 5, 4, 1, 0
+ &pshufw ("mm5","mm4",0x0d); # 15,14,11,10
+ &movd ("eax","mm1"); # 5, 4, 1, 0
+ &movd ("ebx","mm5"); # 15,14,11,10
+
+ &movz ($acc,&LB("eax")); # 0
+ &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 0
+ &pshufw ("mm2","mm0",0x0d); # 7, 6, 3, 2
+ &movz ("edx",&HB("eax")); # 1
+ &movz ("edx",&BP(-128,$tbl,"edx",1)); # 1
+ &shl ("edx",8); # 1
+ &shr ("eax",16); # 5, 4
+
+ &movz ($acc,&LB("ebx")); # 10
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 10
+ &shl ($acc,16); # 10
+ &or ("ecx",$acc); # 10
+ &pshufw ("mm6","mm4",0x08); # 13,12, 9, 8
+ &movz ($acc,&HB("ebx")); # 11
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 11
+ &shl ($acc,24); # 11
+ &or ("edx",$acc); # 11
+ &shr ("ebx",16); # 15,14
+
+ &movz ($acc,&HB("eax")); # 5
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 5
+ &shl ($acc,8); # 5
+ &or ("ecx",$acc); # 5
+ &movz ($acc,&HB("ebx")); # 15
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 15
+ &shl ($acc,24); # 15
+ &or ("ecx",$acc); # 15
+ &movd ("mm0","ecx"); # t[0] collected
+
+ &movz ($acc,&LB("eax")); # 4
+ &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 4
+ &movd ("eax","mm2"); # 7, 6, 3, 2
+ &movz ($acc,&LB("ebx")); # 14
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 14
+ &shl ($acc,16); # 14
+ &or ("ecx",$acc); # 14
+
+ &movd ("ebx","mm6"); # 13,12, 9, 8
+ &movz ($acc,&HB("eax")); # 3
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 3
+ &shl ($acc,24); # 3
+ &or ("ecx",$acc); # 3
+ &movz ($acc,&HB("ebx")); # 9
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 9
+ &shl ($acc,8); # 9
+ &or ("ecx",$acc); # 9
+ &movd ("mm1","ecx"); # t[1] collected
+
+ &movz ($acc,&LB("ebx")); # 8
+ &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 8
+ &shr ("ebx",16); # 13,12
+ &movz ($acc,&LB("eax")); # 2
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 2
+ &shl ($acc,16); # 2
+ &or ("ecx",$acc); # 2
+ &shr ("eax",16); # 7, 6
+
+ &punpckldq ("mm0","mm1"); # t[0,1] collected
+
+ &movz ($acc,&HB("eax")); # 7
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 7
+ &shl ($acc,24); # 7
+ &or ("ecx",$acc); # 7
+ &and ("eax",0xff); # 6
+ &movz ("eax",&BP(-128,$tbl,"eax",1)); # 6
+ &shl ("eax",16); # 6
+ &or ("edx","eax"); # 6
+ &movz ($acc,&HB("ebx")); # 13
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 13
+ &shl ($acc,8); # 13
+ &or ("ecx",$acc); # 13
+ &movd ("mm4","ecx"); # t[2] collected
+ &and ("ebx",0xff); # 12
+ &movz ("ebx",&BP(-128,$tbl,"ebx",1)); # 12
+ &or ("edx","ebx"); # 12
+ &movd ("mm5","edx"); # t[3] collected
+
+ &punpckldq ("mm4","mm5"); # t[2,3] collected
+}
+
+ if (!$x86only) {
+&function_begin_B("_sse_AES_encrypt_compact");
+ &pxor ("mm0",&QWP(0,$key)); # 7, 6, 5, 4, 3, 2, 1, 0
+ &pxor ("mm4",&QWP(8,$key)); # 15,14,13,12,11,10, 9, 8
+
+ # note that caller is expected to allocate stack frame for me!
+ &mov ($acc,&DWP(240,$key)); # load key->rounds
+ &lea ($acc,&DWP(-2,$acc,$acc));
+ &lea ($acc,&DWP(0,$key,$acc,8));
+ &mov ($__end,$acc); # end of key schedule
+
+ &mov ($s0,0x1b1b1b1b); # magic constant
+ &mov (&DWP(8,"esp"),$s0);
+ &mov (&DWP(12,"esp"),$s0);
+
+ # prefetch Te4
+ &mov ($s0,&DWP(0-128,$tbl));
+ &mov ($s1,&DWP(32-128,$tbl));
+ &mov ($s2,&DWP(64-128,$tbl));
+ &mov ($s3,&DWP(96-128,$tbl));
+ &mov ($s0,&DWP(128-128,$tbl));
+ &mov ($s1,&DWP(160-128,$tbl));
+ &mov ($s2,&DWP(192-128,$tbl));
+ &mov ($s3,&DWP(224-128,$tbl));
+
+ &set_label("loop",16);
+ &sse_enccompact();
+ &add ($key,16);
+ &cmp ($key,$__end);
+ &ja (&label("out"));
+
+ &movq ("mm2",&QWP(8,"esp"));
+ &pxor ("mm3","mm3"); &pxor ("mm7","mm7");
+ &movq ("mm1","mm0"); &movq ("mm5","mm4"); # r0
+ &pcmpgtb("mm3","mm0"); &pcmpgtb("mm7","mm4");
+ &pand ("mm3","mm2"); &pand ("mm7","mm2");
+ &pshufw ("mm2","mm0",0xb1); &pshufw ("mm6","mm4",0xb1);# ROTATE(r0,16)
+ &paddb ("mm0","mm0"); &paddb ("mm4","mm4");
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # = r2
+ &pshufw ("mm3","mm2",0xb1); &pshufw ("mm7","mm6",0xb1);# r0
+ &pxor ("mm1","mm0"); &pxor ("mm5","mm4"); # r0^r2
+ &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= ROTATE(r0,16)
+
+ &movq ("mm2","mm3"); &movq ("mm6","mm7");
+ &pslld ("mm3",8); &pslld ("mm7",8);
+ &psrld ("mm2",24); &psrld ("mm6",24);
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= r0<<8
+ &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= r0>>24
+
+ &movq ("mm3","mm1"); &movq ("mm7","mm5");
+ &movq ("mm2",&QWP(0,$key)); &movq ("mm6",&QWP(8,$key));
+ &psrld ("mm1",8); &psrld ("mm5",8);
+ &mov ($s0,&DWP(0-128,$tbl));
+ &pslld ("mm3",24); &pslld ("mm7",24);
+ &mov ($s1,&DWP(64-128,$tbl));
+ &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= (r2^r0)<<8
+ &mov ($s2,&DWP(128-128,$tbl));
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= (r2^r0)>>24
+ &mov ($s3,&DWP(192-128,$tbl));
+
+ &pxor ("mm0","mm2"); &pxor ("mm4","mm6");
+ &jmp (&label("loop"));
+
+ &set_label("out",16);
+ &pxor ("mm0",&QWP(0,$key));
+ &pxor ("mm4",&QWP(8,$key));
+
+ &ret ();
+&function_end_B("_sse_AES_encrypt_compact");
+ }
+
+######################################################################
+# Vanilla block function.
+######################################################################
+
sub encstep()
{ my ($i,$te,@s) = @_;
my $tmp = $key;
my $out = $i==3?$s[0]:$acc;
# lines marked with #%e?x[i] denote "reordered" instructions...
- if ($i==3) { &mov ($key,&DWP(12,"esp")); }##%edx
+ if ($i==3) { &mov ($key,$__key); }##%edx
else { &mov ($out,$s[0]);
&and ($out,0xFF); }
if ($i==1) { &shr ($s[0],16); }#%ebx[1]
@@ -191,14 +780,14 @@ sub encstep()
&movz ($tmp,&HB($s[1]));
&xor ($out,&DWP(3,$te,$tmp,8));
- if ($i==3) { $tmp=$s[2]; &mov ($s[1],&DWP(4,"esp")); }##%ebx
+ if ($i==3) { $tmp=$s[2]; &mov ($s[1],$__s0); }##%ebx
else { &mov ($tmp,$s[2]);
&shr ($tmp,16); }
if ($i==2) { &and ($s[1],0xFF); }#%edx[2]
&and ($tmp,0xFF);
&xor ($out,&DWP(2,$te,$tmp,8));
- if ($i==3) { $tmp=$s[3]; &mov ($s[2],&DWP(8,"esp")); }##%ecx
+ if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }##%ecx
elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
else { &mov ($tmp,$s[3]);
&shr ($tmp,24) }
@@ -213,7 +802,7 @@ sub enclast()
my $tmp = $key;
my $out = $i==3?$s[0]:$acc;
- if ($i==3) { &mov ($key,&DWP(12,"esp")); }##%edx
+ if ($i==3) { &mov ($key,$__key); }##%edx
else { &mov ($out,$s[0]); }
&and ($out,0xFF);
if ($i==1) { &shr ($s[0],16); }#%ebx[1]
@@ -227,8 +816,8 @@ sub enclast()
&and ($tmp,0x0000ff00);
&xor ($out,$tmp);
- if ($i==3) { $tmp=$s[2]; &mov ($s[1],&DWP(4,"esp")); }##%ebx
- else { mov ($tmp,$s[2]);
+ if ($i==3) { $tmp=$s[2]; &mov ($s[1],$__s0); }##%ebx
+ else { &mov ($tmp,$s[2]);
&shr ($tmp,16); }
if ($i==2) { &and ($s[1],0xFF); }#%edx[2]
&and ($tmp,0xFF);
@@ -236,7 +825,7 @@ sub enclast()
&and ($tmp,0x00ff0000);
&xor ($out,$tmp);
- if ($i==3) { $tmp=$s[3]; &mov ($s[2],&DWP(8,"esp")); }##%ecx
+ if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }##%ecx
elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
else { &mov ($tmp,$s[3]);
&shr ($tmp,24); }
@@ -247,9 +836,6 @@ sub enclast()
if ($i==3) { &mov ($s[3],$acc); }
}
-sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
-
-&public_label("AES_Te");
&function_begin_B("_x86_AES_encrypt");
if ($vertical_spin) {
# I need high parts of volatile registers to be accessible...
@@ -258,7 +844,7 @@ sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
}
# note that caller is expected to allocate stack frame for me!
- &mov (&DWP(12,"esp"),$key); # save key
+ &mov ($__key,$key); # save key
&xor ($s0,&DWP(0,$key)); # xor with key
&xor ($s1,&DWP(4,$key));
@@ -270,24 +856,24 @@ sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
if ($small_footprint) {
&lea ($acc,&DWP(-2,$acc,$acc));
&lea ($acc,&DWP(0,$key,$acc,8));
- &mov (&DWP(16,"esp"),$acc); # end of key schedule
- &align (4);
- &set_label("loop");
+ &mov ($__end,$acc); # end of key schedule
+
+ &set_label("loop",16);
if ($vertical_spin) {
- &encvert("ebp",$s0,$s1,$s2,$s3);
+ &encvert($tbl,$s0,$s1,$s2,$s3);
} else {
- &encstep(0,"ebp",$s0,$s1,$s2,$s3);
- &encstep(1,"ebp",$s1,$s2,$s3,$s0);
- &encstep(2,"ebp",$s2,$s3,$s0,$s1);
- &encstep(3,"ebp",$s3,$s0,$s1,$s2);
+ &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+ &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+ &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+ &encstep(3,$tbl,$s3,$s0,$s1,$s2);
}
&add ($key,16); # advance rd_key
&xor ($s0,&DWP(0,$key));
&xor ($s1,&DWP(4,$key));
&xor ($s2,&DWP(8,$key));
&xor ($s3,&DWP(12,$key));
- &cmp ($key,&DWP(16,"esp"));
- &mov (&DWP(12,"esp"),$key);
+ &cmp ($key,$__end);
+ &mov ($__key,$key);
&jb (&label("loop"));
}
else {
@@ -296,15 +882,15 @@ sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
&cmp ($acc,12);
&jle (&label("12rounds"));
- &set_label("14rounds");
+ &set_label("14rounds",4);
for ($i=1;$i<3;$i++) {
if ($vertical_spin) {
- &encvert("ebp",$s0,$s1,$s2,$s3);
+ &encvert($tbl,$s0,$s1,$s2,$s3);
} else {
- &encstep(0,"ebp",$s0,$s1,$s2,$s3);
- &encstep(1,"ebp",$s1,$s2,$s3,$s0);
- &encstep(2,"ebp",$s2,$s3,$s0,$s1);
- &encstep(3,"ebp",$s3,$s0,$s1,$s2);
+ &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+ &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+ &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+ &encstep(3,$tbl,$s3,$s0,$s1,$s2);
}
&xor ($s0,&DWP(16*$i+0,$key));
&xor ($s1,&DWP(16*$i+4,$key));
@@ -312,16 +898,16 @@ sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
&xor ($s3,&DWP(16*$i+12,$key));
}
&add ($key,32);
- &mov (&DWP(12,"esp"),$key); # advance rd_key
- &set_label("12rounds");
+ &mov ($__key,$key); # advance rd_key
+ &set_label("12rounds",4);
for ($i=1;$i<3;$i++) {
if ($vertical_spin) {
- &encvert("ebp",$s0,$s1,$s2,$s3);
+ &encvert($tbl,$s0,$s1,$s2,$s3);
} else {
- &encstep(0,"ebp",$s0,$s1,$s2,$s3);
- &encstep(1,"ebp",$s1,$s2,$s3,$s0);
- &encstep(2,"ebp",$s2,$s3,$s0,$s1);
- &encstep(3,"ebp",$s3,$s0,$s1,$s2);
+ &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+ &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+ &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+ &encstep(3,$tbl,$s3,$s0,$s1,$s2);
}
&xor ($s0,&DWP(16*$i+0,$key));
&xor ($s1,&DWP(16*$i+4,$key));
@@ -329,16 +915,16 @@ sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
&xor ($s3,&DWP(16*$i+12,$key));
}
&add ($key,32);
- &mov (&DWP(12,"esp"),$key); # advance rd_key
- &set_label("10rounds");
+ &mov ($__key,$key); # advance rd_key
+ &set_label("10rounds",4);
for ($i=1;$i<10;$i++) {
if ($vertical_spin) {
- &encvert("ebp",$s0,$s1,$s2,$s3);
+ &encvert($tbl,$s0,$s1,$s2,$s3);
} else {
- &encstep(0,"ebp",$s0,$s1,$s2,$s3);
- &encstep(1,"ebp",$s1,$s2,$s3,$s0);
- &encstep(2,"ebp",$s2,$s3,$s0,$s1);
- &encstep(3,"ebp",$s3,$s0,$s1,$s2);
+ &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+ &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+ &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+ &encstep(3,$tbl,$s3,$s0,$s1,$s2);
}
&xor ($s0,&DWP(16*$i+0,$key));
&xor ($s1,&DWP(16*$i+4,$key));
@@ -352,10 +938,10 @@ sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
&mov ($s1="ebx",$key="edi");
&mov ($s2="ecx",$acc="esi");
}
- &enclast(0,"ebp",$s0,$s1,$s2,$s3);
- &enclast(1,"ebp",$s1,$s2,$s3,$s0);
- &enclast(2,"ebp",$s2,$s3,$s0,$s1);
- &enclast(3,"ebp",$s3,$s0,$s1,$s2);
+ &enclast(0,$tbl,$s0,$s1,$s2,$s3);
+ &enclast(1,$tbl,$s1,$s2,$s3,$s0);
+ &enclast(2,$tbl,$s2,$s3,$s0,$s1);
+ &enclast(3,$tbl,$s3,$s0,$s1,$s2);
&add ($key,$small_footprint?16:160);
&xor ($s0,&DWP(0,$key));
@@ -430,38 +1016,198 @@ sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
+
+#Te4 # four copies of Te4 to choose from to avoid L1 aliasing
+ &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+ &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+ &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+ &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+ &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+ &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+ &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+ &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+ &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+ &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+ &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+ &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+ &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+ &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+ &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+ &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+ &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+ &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+ &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+ &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+ &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+ &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+ &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+ &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+ &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+ &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+ &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+ &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+ &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+ &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+ &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+ &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+ &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+ &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+ &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+ &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+ &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+ &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+ &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+ &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+ &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+ &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+ &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+ &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+ &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+ &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+ &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+ &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+ &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+ &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+ &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+ &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+ &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+ &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+ &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+ &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+ &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+ &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+ &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+ &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+ &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+ &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+ &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+ &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+ &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+ &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+ &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+ &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+ &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+ &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+ &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+ &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+ &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+ &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+ &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+ &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+ &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+ &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+ &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+ &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+ &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+ &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+ &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+ &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+ &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+ &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+ &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+ &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+ &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+ &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+ &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+ &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+ &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+ &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+ &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+ &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+ &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+ &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+ &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+ &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+ &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+ &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+ &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+ &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+ &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+ &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+ &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+ &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+ &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+ &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+ &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+ &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+ &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+ &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+ &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+ &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+ &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+ &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+ &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+ &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+ &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+ &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+ &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+ &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+ &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+ &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+ &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+ &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
#rcon:
&data_word(0x00000001, 0x00000002, 0x00000004, 0x00000008);
&data_word(0x00000010, 0x00000020, 0x00000040, 0x00000080);
- &data_word(0x0000001b, 0x00000036, 0, 0, 0, 0, 0, 0);
+ &data_word(0x0000001b, 0x00000036, 0x00000000, 0x00000000);
+ &data_word(0x00000000, 0x00000000, 0x00000000, 0x00000000);
&function_end_B("_x86_AES_encrypt");
# void AES_encrypt (const void *inp,void *out,const AES_KEY *key);
-&public_label("AES_Te");
&function_begin("AES_encrypt");
&mov ($acc,&wparam(0)); # load inp
&mov ($key,&wparam(2)); # load key
&mov ($s0,"esp");
- &sub ("esp",24);
- &and ("esp",-64);
- &add ("esp",4);
- &mov (&DWP(16,"esp"),$s0);
+ &sub ("esp",36);
+ &and ("esp",-64); # align to cache-line
+
+ # place stack frame just "above" the key schedule
+ &lea ($s1,&DWP(-64-63,$key));
+ &sub ($s1,"esp");
+ &neg ($s1);
+ &and ($s1,0x3C0); # modulo 1024, but aligned to cache-line
+ &sub ("esp",$s1);
+ &add ("esp",4); # 4 is reserved for caller's return address
+ &mov ($_esp,$s0); # save stack pointer
&call (&label("pic_point")); # make it PIC!
&set_label("pic_point");
- &blindpop("ebp");
- &lea ("ebp",&DWP(&label("AES_Te")."-".&label("pic_point"),"ebp"));
-
+ &blindpop($tbl);
+ &picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if (!$x86only);
+ &lea ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
+
+ # pick Te4 copy which can't "overlap" with stack frame or key schedule
+ &lea ($s1,&DWP(768-4,"esp"));
+ &sub ($s1,$tbl);
+ &and ($s1,0x300);
+ &lea ($tbl,&DWP(2048+128,$tbl,$s1));
+
+ if (!$x86only) {
+ &bt (&DWP(0,$s0),25); # check for SSE bit
+ &jnc (&label("x86"));
+
+ &movq ("mm0",&QWP(0,$acc));
+ &movq ("mm4",&QWP(8,$acc));
+ &call ("_sse_AES_encrypt_compact");
+ &mov ("esp",$_esp); # restore stack pointer
+ &mov ($acc,&wparam(1)); # load out
+ &movq (&QWP(0,$acc),"mm0"); # write output data
+ &movq (&QWP(8,$acc),"mm4");
+ &emms ();
+ &function_end_A();
+ }
+ &set_label("x86",16);
+ &mov ($_tbl,$tbl);
&mov ($s0,&DWP(0,$acc)); # load input data
&mov ($s1,&DWP(4,$acc));
&mov ($s2,&DWP(8,$acc));
&mov ($s3,&DWP(12,$acc));
-
- &call ("_x86_AES_encrypt");
-
- &mov ("esp",&DWP(16,"esp"));
-
+ &call ("_x86_AES_encrypt_compact");
+ &mov ("esp",$_esp); # restore stack pointer
&mov ($acc,&wparam(1)); # load out
&mov (&DWP(0,$acc),$s0); # write output data
&mov (&DWP(4,$acc),$s1);
@@ -469,7 +1215,370 @@ sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
&mov (&DWP(12,$acc),$s3);
&function_end("AES_encrypt");
-#------------------------------------------------------------------#
+#--------------------------------------------------------------------#
+
+######################################################################
+# "Compact" block function
+######################################################################
+
+sub deccompact()
+{ my $Fn = mov;
+ while ($#_>5) { pop(@_); $Fn=sub{}; }
+ my ($i,$td,@s)=@_;
+ my $tmp = $key;
+ my $out = $i==3?$s[0]:$acc;
+
+ # $Fn is used in first compact round and its purpose is to
+ # void restoration of some values from stack, so that after
+ # 4xdeccompact with extra argument $key, $s0 and $s1 values
+ # are left there...
+ if($i==3) { &$Fn ($key,$__key); }
+ else { &mov ($out,$s[0]); }
+ &and ($out,0xFF);
+ &movz ($out,&BP(-128,$td,$out,1));
+
+ if ($i==3) { $tmp=$s[1]; }
+ &movz ($tmp,&HB($s[1]));
+ &movz ($tmp,&BP(-128,$td,$tmp,1));
+ &shl ($tmp,8);
+ &xor ($out,$tmp);
+
+ if ($i==3) { $tmp=$s[2]; &mov ($s[1],$acc); }
+ else { mov ($tmp,$s[2]); }
+ &shr ($tmp,16);
+ &and ($tmp,0xFF);
+ &movz ($tmp,&BP(-128,$td,$tmp,1));
+ &shl ($tmp,16);
+ &xor ($out,$tmp);
+
+ if ($i==3) { $tmp=$s[3]; &$Fn ($s[2],$__s1); }
+ else { &mov ($tmp,$s[3]); }
+ &shr ($tmp,24);
+ &movz ($tmp,&BP(-128,$td,$tmp,1));
+ &shl ($tmp,24);
+ &xor ($out,$tmp);
+ if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
+ if ($i==3) { &$Fn ($s[3],$__s0); }
+}
+
+# must be called with 2,3,0,1 as argument sequence!!!
+sub dectransform()
+{ my @s = ($s0,$s1,$s2,$s3);
+ my $i = shift;
+ my $tmp = $key;
+ my $tp2 = @s[($i+2)%4]; $tp2 = @s[2] if ($i==1);
+ my $tp4 = @s[($i+3)%4]; $tp4 = @s[3] if ($i==1);
+ my $tp8 = $tbl;
+
+ &mov ($acc,$s[$i]);
+ &and ($acc,0x80808080);
+ &mov ($tmp,$acc);
+ &shr ($tmp,7);
+ &lea ($tp2,&DWP(0,$s[$i],$s[$i]));
+ &sub ($acc,$tmp);
+ &and ($tp2,0xfefefefe);
+ &and ($acc,0x1b1b1b1b);
+ &xor ($acc,$tp2);
+ &mov ($tp2,$acc);
+
+ &and ($acc,0x80808080);
+ &mov ($tmp,$acc);
+ &shr ($tmp,7);
+ &lea ($tp4,&DWP(0,$tp2,$tp2));
+ &sub ($acc,$tmp);
+ &and ($tp4,0xfefefefe);
+ &and ($acc,0x1b1b1b1b);
+ &xor ($tp2,$s[$i]); # tp2^tp1
+ &xor ($acc,$tp4);
+ &mov ($tp4,$acc);
+
+ &and ($acc,0x80808080);
+ &mov ($tmp,$acc);
+ &shr ($tmp,7);
+ &lea ($tp8,&DWP(0,$tp4,$tp4));
+ &sub ($acc,$tmp);
+ &and ($tp8,0xfefefefe);
+ &and ($acc,0x1b1b1b1b);
+ &xor ($tp4,$s[$i]); # tp4^tp1
+ &rotl ($s[$i],8); # = ROTATE(tp1,8)
+ &xor ($tp8,$acc);
+
+ &xor ($s[$i],$tp2);
+ &xor ($tp2,$tp8);
+ &rotl ($tp2,24);
+ &xor ($s[$i],$tp4);
+ &xor ($tp4,$tp8);
+ &rotl ($tp4,16);
+ &xor ($s[$i],$tp8); # ^= tp8^(tp4^tp1)^(tp2^tp1)
+ &rotl ($tp8,8);
+ &xor ($s[$i],$tp2); # ^= ROTATE(tp8^tp2^tp1,24)
+ &xor ($s[$i],$tp4); # ^= ROTATE(tp8^tp4^tp1,16)
+ &mov ($s[0],$__s0) if($i==2); #prefetch $s0
+ &mov ($s[1],$__s1) if($i==3); #prefetch $s1
+ &mov ($s[2],$__s2) if($i==1);
+ &xor ($s[$i],$tp8); # ^= ROTATE(tp8,8)
+
+ &mov ($s[3],$__s3) if($i==1);
+ &mov (&DWP(4+4*$i,"esp"),$s[$i]) if($i>=2);
+}
+
+&function_begin_B("_x86_AES_decrypt_compact");
+ # note that caller is expected to allocate stack frame for me!
+ &mov ($__key,$key); # save key
+
+ &xor ($s0,&DWP(0,$key)); # xor with key
+ &xor ($s1,&DWP(4,$key));
+ &xor ($s2,&DWP(8,$key));
+ &xor ($s3,&DWP(12,$key));
+
+ &mov ($acc,&DWP(240,$key)); # load key->rounds
+
+ &lea ($acc,&DWP(-2,$acc,$acc));
+ &lea ($acc,&DWP(0,$key,$acc,8));
+ &mov ($__end,$acc); # end of key schedule
+
+ # prefetch Td4
+ &mov ($key,&DWP(0-128,$tbl));
+ &mov ($acc,&DWP(32-128,$tbl));
+ &mov ($key,&DWP(64-128,$tbl));
+ &mov ($acc,&DWP(96-128,$tbl));
+ &mov ($key,&DWP(128-128,$tbl));
+ &mov ($acc,&DWP(160-128,$tbl));
+ &mov ($key,&DWP(192-128,$tbl));
+ &mov ($acc,&DWP(224-128,$tbl));
+
+ &set_label("loop",16);
+
+ &deccompact(0,$tbl,$s0,$s3,$s2,$s1,1);
+ &deccompact(1,$tbl,$s1,$s0,$s3,$s2,1);
+ &deccompact(2,$tbl,$s2,$s1,$s0,$s3,1);
+ &deccompact(3,$tbl,$s3,$s2,$s1,$s0,1);
+ &dectransform(2);
+ &dectransform(3);
+ &dectransform(0);
+ &dectransform(1);
+ &mov ($key,$__key);
+ &mov ($tbl,$__tbl);
+ &add ($key,16); # advance rd_key
+ &xor ($s0,&DWP(0,$key));
+ &xor ($s1,&DWP(4,$key));
+ &xor ($s2,&DWP(8,$key));
+ &xor ($s3,&DWP(12,$key));
+
+ &cmp ($key,$__end);
+ &mov ($__key,$key);
+ &jb (&label("loop"));
+
+ &deccompact(0,$tbl,$s0,$s3,$s2,$s1);
+ &deccompact(1,$tbl,$s1,$s0,$s3,$s2);
+ &deccompact(2,$tbl,$s2,$s1,$s0,$s3);
+ &deccompact(3,$tbl,$s3,$s2,$s1,$s0);
+
+ &xor ($s0,&DWP(16,$key));
+ &xor ($s1,&DWP(20,$key));
+ &xor ($s2,&DWP(24,$key));
+ &xor ($s3,&DWP(28,$key));
+
+ &ret ();
+&function_end_B("_x86_AES_decrypt_compact");
+
+######################################################################
+# "Compact" SSE block function.
+######################################################################
+
+sub sse_deccompact()
+{
+ &pshufw ("mm1","mm0",0x0c); # 7, 6, 1, 0
+ &movd ("eax","mm1"); # 7, 6, 1, 0
+
+ &pshufw ("mm5","mm4",0x09); # 13,12,11,10
+ &movz ($acc,&LB("eax")); # 0
+ &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 0
+ &movd ("ebx","mm5"); # 13,12,11,10
+ &movz ("edx",&HB("eax")); # 1
+ &movz ("edx",&BP(-128,$tbl,"edx",1)); # 1
+ &shl ("edx",8); # 1
+
+ &pshufw ("mm2","mm0",0x06); # 3, 2, 5, 4
+ &movz ($acc,&LB("ebx")); # 10
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 10
+ &shl ($acc,16); # 10
+ &or ("ecx",$acc); # 10
+ &shr ("eax",16); # 7, 6
+ &movz ($acc,&HB("ebx")); # 11
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 11
+ &shl ($acc,24); # 11
+ &or ("edx",$acc); # 11
+ &shr ("ebx",16); # 13,12
+
+ &pshufw ("mm6","mm4",0x03); # 9, 8,15,14
+ &movz ($acc,&HB("eax")); # 7
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 7
+ &shl ($acc,24); # 7
+ &or ("ecx",$acc); # 7
+ &movz ($acc,&HB("ebx")); # 13
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 13
+ &shl ($acc,8); # 13
+ &or ("ecx",$acc); # 13
+ &movd ("mm0","ecx"); # t[0] collected
+
+ &movz ($acc,&LB("eax")); # 6
+ &movd ("eax","mm2"); # 3, 2, 5, 4
+ &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 6
+ &shl ("ecx",16); # 6
+ &movz ($acc,&LB("ebx")); # 12
+ &movd ("ebx","mm6"); # 9, 8,15,14
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 12
+ &or ("ecx",$acc); # 12
+
+ &movz ($acc,&LB("eax")); # 4
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 4
+ &or ("edx",$acc); # 4
+ &movz ($acc,&LB("ebx")); # 14
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 14
+ &shl ($acc,16); # 14
+ &or ("edx",$acc); # 14
+ &movd ("mm1","edx"); # t[1] collected
+
+ &movz ($acc,&HB("eax")); # 5
+ &movz ("edx",&BP(-128,$tbl,$acc,1)); # 5
+ &shl ("edx",8); # 5
+ &movz ($acc,&HB("ebx")); # 15
+ &shr ("eax",16); # 3, 2
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 15
+ &shl ($acc,24); # 15
+ &or ("edx",$acc); # 15
+ &shr ("ebx",16); # 9, 8
+
+ &punpckldq ("mm0","mm1"); # t[0,1] collected
+
+ &movz ($acc,&HB("ebx")); # 9
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 9
+ &shl ($acc,8); # 9
+ &or ("ecx",$acc); # 9
+ &and ("ebx",0xff); # 8
+ &movz ("ebx",&BP(-128,$tbl,"ebx",1)); # 8
+ &or ("edx","ebx"); # 8
+ &movz ($acc,&LB("eax")); # 2
+ &movz ($acc,&BP(-128,$tbl,$acc,1)); # 2
+ &shl ($acc,16); # 2
+ &or ("edx",$acc); # 2
+ &movd ("mm4","edx"); # t[2] collected
+ &movz ("eax",&HB("eax")); # 3
+ &movz ("eax",&BP(-128,$tbl,"eax",1)); # 3
+ &shl ("eax",24); # 3
+ &or ("ecx","eax"); # 3
+ &movd ("mm5","ecx"); # t[3] collected
+
+ &punpckldq ("mm4","mm5"); # t[2,3] collected
+}
+
+ if (!$x86only) {
+&function_begin_B("_sse_AES_decrypt_compact");
+ &pxor ("mm0",&QWP(0,$key)); # 7, 6, 5, 4, 3, 2, 1, 0
+ &pxor ("mm4",&QWP(8,$key)); # 15,14,13,12,11,10, 9, 8
+
+ # note that caller is expected to allocate stack frame for me!
+ &mov ($acc,&DWP(240,$key)); # load key->rounds
+ &lea ($acc,&DWP(-2,$acc,$acc));
+ &lea ($acc,&DWP(0,$key,$acc,8));
+ &mov ($__end,$acc); # end of key schedule
+
+ &mov ($s0,0x1b1b1b1b); # magic constant
+ &mov (&DWP(8,"esp"),$s0);
+ &mov (&DWP(12,"esp"),$s0);
+
+ # prefetch Td4
+ &mov ($s0,&DWP(0-128,$tbl));
+ &mov ($s1,&DWP(32-128,$tbl));
+ &mov ($s2,&DWP(64-128,$tbl));
+ &mov ($s3,&DWP(96-128,$tbl));
+ &mov ($s0,&DWP(128-128,$tbl));
+ &mov ($s1,&DWP(160-128,$tbl));
+ &mov ($s2,&DWP(192-128,$tbl));
+ &mov ($s3,&DWP(224-128,$tbl));
+
+ &set_label("loop",16);
+ &sse_deccompact();
+ &add ($key,16);
+ &cmp ($key,$__end);
+ &ja (&label("out"));
+
+ # ROTATE(x^y,N) == ROTATE(x,N)^ROTATE(y,N)
+ &movq ("mm3","mm0"); &movq ("mm7","mm4");
+ &movq ("mm2","mm0",1); &movq ("mm6","mm4",1);
+ &movq ("mm1","mm0"); &movq ("mm5","mm4");
+ &pshufw ("mm0","mm0",0xb1); &pshufw ("mm4","mm4",0xb1);# = ROTATE(tp0,16)
+ &pslld ("mm2",8); &pslld ("mm6",8);
+ &psrld ("mm3",8); &psrld ("mm7",8);
+ &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= tp0<<8
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp0>>8
+ &pslld ("mm2",16); &pslld ("mm6",16);
+ &psrld ("mm3",16); &psrld ("mm7",16);
+ &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= tp0<<24
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp0>>24
+
+ &movq ("mm3",&QWP(8,"esp"));
+ &pxor ("mm2","mm2"); &pxor ("mm6","mm6");
+ &pcmpgtb("mm2","mm1"); &pcmpgtb("mm6","mm5");
+ &pand ("mm2","mm3"); &pand ("mm6","mm3");
+ &paddb ("mm1","mm1"); &paddb ("mm5","mm5");
+ &pxor ("mm1","mm2"); &pxor ("mm5","mm6"); # tp2
+ &movq ("mm3","mm1"); &movq ("mm7","mm5");
+ &movq ("mm2","mm1"); &movq ("mm6","mm5");
+ &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp2
+ &pslld ("mm3",24); &pslld ("mm7",24);
+ &psrld ("mm2",8); &psrld ("mm6",8);
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp2<<24
+ &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= tp2>>8
+
+ &movq ("mm2",&QWP(8,"esp"));
+ &pxor ("mm3","mm3"); &pxor ("mm7","mm7");
+ &pcmpgtb("mm3","mm1"); &pcmpgtb("mm7","mm5");
+ &pand ("mm3","mm2"); &pand ("mm7","mm2");
+ &paddb ("mm1","mm1"); &paddb ("mm5","mm5");
+ &pxor ("mm1","mm3"); &pxor ("mm5","mm7"); # tp4
+ &pshufw ("mm3","mm1",0xb1); &pshufw ("mm7","mm5",0xb1);
+ &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp4
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= ROTATE(tp4,16)
+
+ &pxor ("mm3","mm3"); &pxor ("mm7","mm7");
+ &pcmpgtb("mm3","mm1"); &pcmpgtb("mm7","mm5");
+ &pand ("mm3","mm2"); &pand ("mm7","mm2");
+ &paddb ("mm1","mm1"); &paddb ("mm5","mm5");
+ &pxor ("mm1","mm3"); &pxor ("mm5","mm7"); # tp8
+ &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp8
+ &movq ("mm3","mm1"); &movq ("mm7","mm5");
+ &pshufw ("mm2","mm1",0xb1); &pshufw ("mm6","mm5",0xb1);
+ &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= ROTATE(tp8,16)
+ &pslld ("mm1",8); &pslld ("mm5",8);
+ &psrld ("mm3",8); &psrld ("mm7",8);
+ &movq ("mm2",&QWP(0,$key)); &movq ("mm6",&QWP(8,$key));
+ &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp8<<8
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp8>>8
+ &mov ($s0,&DWP(0-128,$tbl));
+ &pslld ("mm1",16); &pslld ("mm5",16);
+ &mov ($s1,&DWP(64-128,$tbl));
+ &psrld ("mm3",16); &psrld ("mm7",16);
+ &mov ($s2,&DWP(128-128,$tbl));
+ &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp8<<24
+ &mov ($s3,&DWP(192-128,$tbl));
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp8>>24
+
+ &pxor ("mm0","mm2"); &pxor ("mm4","mm6");
+ &jmp (&label("loop"));
+
+ &set_label("out",16);
+ &pxor ("mm0",&QWP(0,$key));
+ &pxor ("mm4",&QWP(8,$key));
+
+ &ret ();
+&function_end_B("_sse_AES_decrypt_compact");
+ }
+
+######################################################################
+# Vanilla block function.
+######################################################################
sub decstep()
{ my ($i,$td,@s) = @_;
@@ -480,7 +1589,7 @@ sub decstep()
# optimal... or rather that all attempts to reorder didn't
# result in better performance [which by the way is not a
# bit lower than ecryption].
- if($i==3) { &mov ($key,&DWP(12,"esp")); }
+ if($i==3) { &mov ($key,$__key); }
else { &mov ($out,$s[0]); }
&and ($out,0xFF);
&mov ($out,&DWP(0,$td,$out,8));
@@ -495,12 +1604,12 @@ sub decstep()
&and ($tmp,0xFF);
&xor ($out,&DWP(2,$td,$tmp,8));
- if ($i==3) { $tmp=$s[3]; &mov ($s[2],&DWP(8,"esp")); }
+ if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }
else { &mov ($tmp,$s[3]); }
&shr ($tmp,24);
&xor ($out,&DWP(1,$td,$tmp,8));
if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
- if ($i==3) { &mov ($s[3],&DWP(4,"esp")); }
+ if ($i==3) { &mov ($s[3],$__s0); }
&comment();
}
@@ -509,14 +1618,24 @@ sub declast()
my $tmp = $key;
my $out = $i==3?$s[0]:$acc;
- if($i==3) { &mov ($key,&DWP(12,"esp")); }
+ if($i==0) { &lea ($td,&DWP(2048+128,$td));
+ &mov ($tmp,&DWP(0-128,$td));
+ &mov ($acc,&DWP(32-128,$td));
+ &mov ($tmp,&DWP(64-128,$td));
+ &mov ($acc,&DWP(96-128,$td));
+ &mov ($tmp,&DWP(128-128,$td));
+ &mov ($acc,&DWP(160-128,$td));
+ &mov ($tmp,&DWP(192-128,$td));
+ &mov ($acc,&DWP(224-128,$td));
+ &lea ($td,&DWP(-128,$td)); }
+ if($i==3) { &mov ($key,$__key); }
else { &mov ($out,$s[0]); }
&and ($out,0xFF);
- &movz ($out,&BP(2048,$td,$out,1));
+ &movz ($out,&BP(0,$td,$out,1));
if ($i==3) { $tmp=$s[1]; }
&movz ($tmp,&HB($s[1]));
- &movz ($tmp,&BP(2048,$td,$tmp,1));
+ &movz ($tmp,&BP(0,$td,$tmp,1));
&shl ($tmp,8);
&xor ($out,$tmp);
@@ -524,24 +1643,24 @@ sub declast()
else { mov ($tmp,$s[2]); }
&shr ($tmp,16);
&and ($tmp,0xFF);
- &movz ($tmp,&BP(2048,$td,$tmp,1));
+ &movz ($tmp,&BP(0,$td,$tmp,1));
&shl ($tmp,16);
&xor ($out,$tmp);
- if ($i==3) { $tmp=$s[3]; &mov ($s[2],&DWP(8,"esp")); }
+ if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }
else { &mov ($tmp,$s[3]); }
&shr ($tmp,24);
- &movz ($tmp,&BP(2048,$td,$tmp,1));
+ &movz ($tmp,&BP(0,$td,$tmp,1));
&shl ($tmp,24);
&xor ($out,$tmp);
if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
- if ($i==3) { &mov ($s[3],&DWP(4,"esp")); }
+ if ($i==3) { &mov ($s[3],$__s0);
+ &lea ($td,&DWP(-2048,$td)); }
}
-&public_label("AES_Td");
&function_begin_B("_x86_AES_decrypt");
# note that caller is expected to allocate stack frame for me!
- &mov (&DWP(12,"esp"),$key); # save key
+ &mov ($__key,$key); # save key
&xor ($s0,&DWP(0,$key)); # xor with key
&xor ($s1,&DWP(4,$key));
@@ -553,20 +1672,19 @@ sub declast()
if ($small_footprint) {
&lea ($acc,&DWP(-2,$acc,$acc));
&lea ($acc,&DWP(0,$key,$acc,8));
- &mov (&DWP(16,"esp"),$acc); # end of key schedule
- &align (4);
- &set_label("loop");
- &decstep(0,"ebp",$s0,$s3,$s2,$s1);
- &decstep(1,"ebp",$s1,$s0,$s3,$s2);
- &decstep(2,"ebp",$s2,$s1,$s0,$s3);
- &decstep(3,"ebp",$s3,$s2,$s1,$s0);
+ &mov ($__end,$acc); # end of key schedule
+ &set_label("loop",16);
+ &decstep(0,$tbl,$s0,$s3,$s2,$s1);
+ &decstep(1,$tbl,$s1,$s0,$s3,$s2);
+ &decstep(2,$tbl,$s2,$s1,$s0,$s3);
+ &decstep(3,$tbl,$s3,$s2,$s1,$s0);
&add ($key,16); # advance rd_key
&xor ($s0,&DWP(0,$key));
&xor ($s1,&DWP(4,$key));
&xor ($s2,&DWP(8,$key));
&xor ($s3,&DWP(12,$key));
- &cmp ($key,&DWP(16,"esp"));
- &mov (&DWP(12,"esp"),$key);
+ &cmp ($key,$__end);
+ &mov ($__key,$key);
&jb (&label("loop"));
}
else {
@@ -575,38 +1693,38 @@ sub declast()
&cmp ($acc,12);
&jle (&label("12rounds"));
- &set_label("14rounds");
+ &set_label("14rounds",4);
for ($i=1;$i<3;$i++) {
- &decstep(0,"ebp",$s0,$s3,$s2,$s1);
- &decstep(1,"ebp",$s1,$s0,$s3,$s2);
- &decstep(2,"ebp",$s2,$s1,$s0,$s3);
- &decstep(3,"ebp",$s3,$s2,$s1,$s0);
+ &decstep(0,$tbl,$s0,$s3,$s2,$s1);
+ &decstep(1,$tbl,$s1,$s0,$s3,$s2);
+ &decstep(2,$tbl,$s2,$s1,$s0,$s3);
+ &decstep(3,$tbl,$s3,$s2,$s1,$s0);
&xor ($s0,&DWP(16*$i+0,$key));
&xor ($s1,&DWP(16*$i+4,$key));
&xor ($s2,&DWP(16*$i+8,$key));
&xor ($s3,&DWP(16*$i+12,$key));
}
&add ($key,32);
- &mov (&DWP(12,"esp"),$key); # advance rd_key
- &set_label("12rounds");
+ &mov ($__key,$key); # advance rd_key
+ &set_label("12rounds",4);
for ($i=1;$i<3;$i++) {
- &decstep(0,"ebp",$s0,$s3,$s2,$s1);
- &decstep(1,"ebp",$s1,$s0,$s3,$s2);
- &decstep(2,"ebp",$s2,$s1,$s0,$s3);
- &decstep(3,"ebp",$s3,$s2,$s1,$s0);
+ &decstep(0,$tbl,$s0,$s3,$s2,$s1);
+ &decstep(1,$tbl,$s1,$s0,$s3,$s2);
+ &decstep(2,$tbl,$s2,$s1,$s0,$s3);
+ &decstep(3,$tbl,$s3,$s2,$s1,$s0);
&xor ($s0,&DWP(16*$i+0,$key));
&xor ($s1,&DWP(16*$i+4,$key));
&xor ($s2,&DWP(16*$i+8,$key));
&xor ($s3,&DWP(16*$i+12,$key));
}
&add ($key,32);
- &mov (&DWP(12,"esp"),$key); # advance rd_key
- &set_label("10rounds");
+ &mov ($__key,$key); # advance rd_key
+ &set_label("10rounds",4);
for ($i=1;$i<10;$i++) {
- &decstep(0,"ebp",$s0,$s3,$s2,$s1);
- &decstep(1,"ebp",$s1,$s0,$s3,$s2);
- &decstep(2,"ebp",$s2,$s1,$s0,$s3);
- &decstep(3,"ebp",$s3,$s2,$s1,$s0);
+ &decstep(0,$tbl,$s0,$s3,$s2,$s1);
+ &decstep(1,$tbl,$s1,$s0,$s3,$s2);
+ &decstep(2,$tbl,$s2,$s1,$s0,$s3);
+ &decstep(3,$tbl,$s3,$s2,$s1,$s0);
&xor ($s0,&DWP(16*$i+0,$key));
&xor ($s1,&DWP(16*$i+4,$key));
&xor ($s2,&DWP(16*$i+8,$key));
@@ -614,10 +1732,10 @@ sub declast()
}
}
- &declast(0,"ebp",$s0,$s3,$s2,$s1);
- &declast(1,"ebp",$s1,$s0,$s3,$s2);
- &declast(2,"ebp",$s2,$s1,$s0,$s3);
- &declast(3,"ebp",$s3,$s2,$s1,$s0);
+ &declast(0,$tbl,$s0,$s3,$s2,$s1);
+ &declast(1,$tbl,$s1,$s0,$s3,$s2);
+ &declast(2,$tbl,$s2,$s1,$s0,$s3);
+ &declast(3,$tbl,$s3,$s2,$s1,$s0);
&add ($key,$small_footprint?16:160);
&xor ($s0,&DWP(0,$key));
@@ -692,7 +1810,107 @@ sub declast()
&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
-#Td4:
+
+#Td4: # four copies of Td4 to choose from to avoid L1 aliasing
+ &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+ &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+ &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+ &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+ &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+ &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+ &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+ &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+ &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+ &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+ &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+ &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+ &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+ &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+ &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+ &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+ &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+ &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+ &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+ &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+ &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+ &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+ &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+ &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+ &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+ &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+ &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+ &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+ &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+ &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+ &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+ &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+
+ &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+ &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+ &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+ &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+ &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+ &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+ &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+ &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+ &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+ &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+ &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+ &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+ &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+ &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+ &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+ &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+ &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+ &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+ &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+ &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+ &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+ &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+ &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+ &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+ &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+ &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+ &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+ &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+ &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+ &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+ &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+ &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+
+ &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+ &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+ &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+ &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+ &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+ &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+ &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+ &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+ &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+ &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+ &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+ &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+ &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+ &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+ &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+ &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+ &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+ &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+ &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+ &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+ &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+ &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+ &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+ &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+ &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+ &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+ &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+ &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+ &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+ &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+ &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+ &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+
&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
@@ -728,43 +1946,57 @@ sub declast()
&function_end_B("_x86_AES_decrypt");
# void AES_decrypt (const void *inp,void *out,const AES_KEY *key);
-&public_label("AES_Td");
&function_begin("AES_decrypt");
&mov ($acc,&wparam(0)); # load inp
&mov ($key,&wparam(2)); # load key
&mov ($s0,"esp");
- &sub ("esp",24);
- &and ("esp",-64);
- &add ("esp",4);
- &mov (&DWP(16,"esp"),$s0);
+ &sub ("esp",36);
+ &and ("esp",-64); # align to cache-line
+
+ # place stack frame just "above" the key schedule
+ &lea ($s1,&DWP(-64-63,$key));
+ &sub ($s1,"esp");
+ &neg ($s1);
+ &and ($s1,0x3C0); # modulo 1024, but aligned to cache-line
+ &sub ("esp",$s1);
+ &add ("esp",4); # 4 is reserved for caller's return address
+ &mov ($_esp,$s0); # save stack pointer
&call (&label("pic_point")); # make it PIC!
&set_label("pic_point");
- &blindpop("ebp");
- &lea ("ebp",&DWP(&label("AES_Td")."-".&label("pic_point"),"ebp"));
-
- # prefetch Td4
- &lea ("ebp",&DWP(2048+128,"ebp"));
- &mov ($s0,&DWP(0-128,"ebp"));
- &mov ($s1,&DWP(32-128,"ebp"));
- &mov ($s2,&DWP(64-128,"ebp"));
- &mov ($s3,&DWP(96-128,"ebp"));
- &mov ($s0,&DWP(128-128,"ebp"));
- &mov ($s1,&DWP(160-128,"ebp"));
- &mov ($s2,&DWP(192-128,"ebp"));
- &mov ($s3,&DWP(224-128,"ebp"));
- &lea ("ebp",&DWP(-2048-128,"ebp"));
-
+ &blindpop($tbl);
+ &picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
+ &lea ($tbl,&DWP(&label("AES_Td")."-".&label("pic_point"),$tbl));
+
+ # pick Td4 copy which can't "overlap" with stack frame or key schedule
+ &lea ($s1,&DWP(768-4,"esp"));
+ &sub ($s1,$tbl);
+ &and ($s1,0x300);
+ &lea ($tbl,&DWP(2048+128,$tbl,$s1));
+
+ if (!$x86only) {
+ &bt (&DWP(0,$s0),25); # check for SSE bit
+ &jnc (&label("x86"));
+
+ &movq ("mm0",&QWP(0,$acc));
+ &movq ("mm4",&QWP(8,$acc));
+ &call ("_sse_AES_decrypt_compact");
+ &mov ("esp",$_esp); # restore stack pointer
+ &mov ($acc,&wparam(1)); # load out
+ &movq (&QWP(0,$acc),"mm0"); # write output data
+ &movq (&QWP(8,$acc),"mm4");
+ &emms ();
+ &function_end_A();
+ }
+ &set_label("x86",16);
+ &mov ($_tbl,$tbl);
&mov ($s0,&DWP(0,$acc)); # load input data
&mov ($s1,&DWP(4,$acc));
&mov ($s2,&DWP(8,$acc));
&mov ($s3,&DWP(12,$acc));
-
- &call ("_x86_AES_decrypt");
-
- &mov ("esp",&DWP(16,"esp"));
-
+ &call ("_x86_AES_decrypt_compact");
+ &mov ("esp",$_esp); # restore stack pointer
&mov ($acc,&wparam(1)); # load out
&mov (&DWP(0,$acc),$s0); # write output data
&mov (&DWP(4,$acc),$s1);
@@ -777,126 +2009,136 @@ sub declast()
# unsigned char *ivp,const int enc);
{
# stack frame layout
-# -4(%esp) 0(%esp) return address
-# 0(%esp) 4(%esp) tmp1
-# 4(%esp) 8(%esp) tmp2
-# 8(%esp) 12(%esp) key
-# 12(%esp) 16(%esp) end of key schedule
-my $_esp=&DWP(16,"esp"); #saved %esp
-my $_inp=&DWP(20,"esp"); #copy of wparam(0)
-my $_out=&DWP(24,"esp"); #copy of wparam(1)
-my $_len=&DWP(28,"esp"); #copy of wparam(2)
-my $_key=&DWP(32,"esp"); #copy of wparam(3)
-my $_ivp=&DWP(36,"esp"); #copy of wparam(4)
-my $_tmp=&DWP(40,"esp"); #volatile variable
-my $ivec=&DWP(44,"esp"); #ivec[16]
-my $aes_key=&DWP(60,"esp"); #copy of aes_key
-my $mark=&DWP(60+240,"esp"); #copy of aes_key->rounds
-
-&public_label("AES_Te");
-&public_label("AES_Td");
+# -4(%esp) # return address 0(%esp)
+# 0(%esp) # s0 backing store 4(%esp)
+# 4(%esp) # s1 backing store 8(%esp)
+# 8(%esp) # s2 backing store 12(%esp)
+# 12(%esp) # s3 backing store 16(%esp)
+# 16(%esp) # key backup 20(%esp)
+# 20(%esp) # end of key schedule 24(%esp)
+# 24(%esp) # %ebp backup 28(%esp)
+# 28(%esp) # %esp backup
+my $_inp=&DWP(32,"esp"); # copy of wparam(0)
+my $_out=&DWP(36,"esp"); # copy of wparam(1)
+my $_len=&DWP(40,"esp"); # copy of wparam(2)
+my $_key=&DWP(44,"esp"); # copy of wparam(3)
+my $_ivp=&DWP(48,"esp"); # copy of wparam(4)
+my $_tmp=&DWP(52,"esp"); # volatile variable
+#
+my $ivec=&DWP(60,"esp"); # ivec[16]
+my $aes_key=&DWP(76,"esp"); # copy of aes_key
+my $mark=&DWP(76+240,"esp"); # copy of aes_key->rounds
+
&function_begin("AES_cbc_encrypt");
&mov ($s2 eq "ecx"? $s2 : "",&wparam(2)); # load len
&cmp ($s2,0);
- &je (&label("enc_out"));
+ &je (&label("drop_out"));
&call (&label("pic_point")); # make it PIC!
&set_label("pic_point");
- &blindpop("ebp");
-
- &pushf ();
- &cld ();
+ &blindpop($tbl);
+ &picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
&cmp (&wparam(5),0);
- &je (&label("DECRYPT"));
-
- &lea ("ebp",&DWP(&label("AES_Te")."-".&label("pic_point"),"ebp"));
+ &lea ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
+ &jne (&label("picked_te"));
+ &lea ($tbl,&DWP(&label("AES_Td")."-".&label("AES_Te"),$tbl));
+ &set_label("picked_te");
- # allocate aligned stack frame...
- &lea ($key,&DWP(-64-244,"esp"));
- &and ($key,-64);
+ # one can argue if this is required
+ &pushf ();
+ &cld ();
- # ... and make sure it doesn't alias with AES_Te modulo 4096
- &mov ($s0,"ebp");
- &lea ($s1,&DWP(2048,"ebp"));
- &mov ($s3,$key);
+ &cmp ($s2,$speed_limit);
+ &jb (&label("slow_way"));
+ &test ($s2,15);
+ &jnz (&label("slow_way"));
+ if (!$x86only) {
+ &bt (&DWP(0,$s0),28); # check for hyper-threading bit
+ &jc (&label("slow_way"));
+ }
+ # pre-allocate aligned stack frame...
+ &lea ($acc,&DWP(-80-244,"esp"));
+ &and ($acc,-64);
+
+ # ... and make sure it doesn't alias with $tbl modulo 4096
+ &mov ($s0,$tbl);
+ &lea ($s1,&DWP(2048+256,$tbl));
+ &mov ($s3,$acc);
&and ($s0,0xfff); # s = %ebp&0xfff
- &and ($s1,0xfff); # e = (%ebp+2048)&0xfff
+ &and ($s1,0xfff); # e = (%ebp+2048+256)&0xfff
&and ($s3,0xfff); # p = %esp&0xfff
&cmp ($s3,$s1); # if (p>=e) %esp =- (p-e);
- &jb (&label("te_break_out"));
+ &jb (&label("tbl_break_out"));
&sub ($s3,$s1);
- &sub ($key,$s3);
- &jmp (&label("te_ok"));
- &set_label("te_break_out"); # else %esp -= (p-s)&0xfff + framesz;
+ &sub ($acc,$s3);
+ &jmp (&label("tbl_ok"));
+ &set_label("tbl_break_out",4); # else %esp -= (p-s)&0xfff + framesz;
&sub ($s3,$s0);
&and ($s3,0xfff);
- &add ($s3,64+256);
- &sub ($key,$s3);
- &align (4);
- &set_label("te_ok");
-
- &mov ($s0,&wparam(0)); # load inp
- &mov ($s1,&wparam(1)); # load out
- &mov ($s3,&wparam(3)); # load key
- &mov ($acc,&wparam(4)); # load ivp
+ &add ($s3,384);
+ &sub ($acc,$s3);
+ &set_label("tbl_ok",4);
- &exch ("esp",$key);
+ &lea ($s3,&wparam(0)); # obtain pointer to parameter block
+ &exch ("esp",$acc); # allocate stack frame
&add ("esp",4); # reserve for return address!
- &mov ($_esp,$key); # save %esp
+ &mov ($_tbl,$tbl); # save %ebp
+ &mov ($_esp,$acc); # save %esp
+
+ &mov ($s0,&DWP(0,$s3)); # load inp
+ &mov ($s1,&DWP(4,$s3)); # load out
+ #&mov ($s2,&DWP(8,$s3)); # load len
+ &mov ($key,&DWP(12,$s3)); # load key
+ &mov ($acc,&DWP(16,$s3)); # load ivp
+ &mov ($s3,&DWP(20,$s3)); # load enc flag
&mov ($_inp,$s0); # save copy of inp
&mov ($_out,$s1); # save copy of out
&mov ($_len,$s2); # save copy of len
- &mov ($_key,$s3); # save copy of key
+ &mov ($_key,$key); # save copy of key
&mov ($_ivp,$acc); # save copy of ivp
&mov ($mark,0); # copy of aes_key->rounds = 0;
- if ($compromise) {
- &cmp ($s2,$compromise);
- &jb (&label("skip_ecopy"));
- }
# do we copy key schedule to stack?
- &mov ($s1 eq "ebx" ? $s1 : "",$s3);
+ &mov ($s1 eq "ebx" ? $s1 : "",$key);
&mov ($s2 eq "ecx" ? $s2 : "",244/4);
- &sub ($s1,"ebp");
- &mov ("esi",$s3);
+ &sub ($s1,$tbl);
+ &mov ("esi",$key);
&and ($s1,0xfff);
&lea ("edi",$aes_key);
- &cmp ($s1,2048);
- &jb (&label("do_ecopy"));
+ &cmp ($s1,2048+256);
+ &jb (&label("do_copy"));
&cmp ($s1,4096-244);
- &jb (&label("skip_ecopy"));
- &align (4);
- &set_label("do_ecopy");
+ &jb (&label("skip_copy"));
+ &set_label("do_copy",4);
&mov ($_key,"edi");
&data_word(0xA5F3F689); # rep movsd
- &set_label("skip_ecopy");
+ &set_label("skip_copy");
- &mov ($acc,$s0);
&mov ($key,16);
- &align (4);
- &set_label("prefetch_te");
- &mov ($s0,&DWP(0,"ebp"));
- &mov ($s1,&DWP(32,"ebp"));
- &mov ($s2,&DWP(64,"ebp"));
- &mov ($s3,&DWP(96,"ebp"));
- &lea ("ebp",&DWP(128,"ebp"));
- &dec ($key);
- &jnz (&label("prefetch_te"));
- &sub ("ebp",2048);
-
- &mov ($s2,$_len);
+ &set_label("prefetch_tbl",4);
+ &mov ($s0,&DWP(0,$tbl));
+ &mov ($s1,&DWP(32,$tbl));
+ &mov ($s2,&DWP(64,$tbl));
+ &mov ($acc,&DWP(96,$tbl));
+ &lea ($tbl,&DWP(128,$tbl));
+ &sub ($key,1);
+ &jnz (&label("prefetch_tbl"));
+ &sub ($tbl,2048);
+
+ &mov ($acc,$_inp);
&mov ($key,$_ivp);
- &test ($s2,0xFFFFFFF0);
- &jz (&label("enc_tail")); # short input...
+ &cmp ($s3,0);
+ &je (&label("fast_decrypt"));
+
+#----------------------------- ENCRYPT -----------------------------#
&mov ($s0,&DWP(0,$key)); # load iv
&mov ($s1,&DWP(4,$key));
- &align (4);
- &set_label("enc_loop");
+ &set_label("fast_enc_loop",16);
&mov ($s2,&DWP(8,$key));
&mov ($s3,&DWP(12,$key));
@@ -916,22 +2158,16 @@ my $mark=&DWP(60+240,"esp"); #copy of aes_key->rounds
&mov (&DWP(8,$key),$s2);
&mov (&DWP(12,$key),$s3);
+ &lea ($acc,&DWP(16,$acc)); # advance inp
&mov ($s2,$_len); # load len
-
- &lea ($acc,&DWP(16,$acc));
&mov ($_inp,$acc); # save inp
-
- &lea ($s3,&DWP(16,$key));
+ &lea ($s3,&DWP(16,$key)); # advance out
&mov ($_out,$s3); # save out
-
- &sub ($s2,16);
- &test ($s2,0xFFFFFFF0);
+ &sub ($s2,16); # decrease len
&mov ($_len,$s2); # save len
- &jnz (&label("enc_loop"));
- &test ($s2,15);
- &jnz (&label("enc_tail"));
+ &jnz (&label("fast_enc_loop"));
&mov ($acc,$_ivp); # load ivp
- &mov ($s2,&DWP(8,$key)); # restore last dwords
+ &mov ($s2,&DWP(8,$key)); # restore last 2 dwords
&mov ($s3,&DWP(12,$key));
&mov (&DWP(0,$acc),$s0); # save ivec
&mov (&DWP(4,$acc),$s1);
@@ -949,125 +2185,20 @@ my $mark=&DWP(60+240,"esp"); #copy of aes_key->rounds
&set_label("skip_ezero")
&mov ("esp",$_esp);
&popf ();
- &set_label("enc_out");
+ &set_label("drop_out");
&function_end_A();
&pushf (); # kludge, never executed
- &align (4);
- &set_label("enc_tail");
- &mov ($s0,$key eq "edi" ? $key : "");
- &mov ($key,$_out); # load out
- &push ($s0); # push ivp
- &mov ($s1,16);
- &sub ($s1,$s2);
- &cmp ($key,$acc); # compare with inp
- &je (&label("enc_in_place"));
- &align (4);
- &data_word(0xA4F3F689); # rep movsb # copy input
- &jmp (&label("enc_skip_in_place"));
- &set_label("enc_in_place");
- &lea ($key,&DWP(0,$key,$s2));
- &set_label("enc_skip_in_place");
- &mov ($s2,$s1);
- &xor ($s0,$s0);
- &align (4);
- &data_word(0xAAF3F689); # rep stosb # zero tail
- &pop ($key); # pop ivp
-
- &mov ($acc,$_out); # output as input
- &mov ($s0,&DWP(0,$key));
- &mov ($s1,&DWP(4,$key));
- &mov ($_len,16); # len=16
- &jmp (&label("enc_loop")); # one more spin...
-
#----------------------------- DECRYPT -----------------------------#
-&align (4);
-&set_label("DECRYPT");
- &lea ("ebp",&DWP(&label("AES_Td")."-".&label("pic_point"),"ebp"));
-
- # allocate aligned stack frame...
- &lea ($key,&DWP(-64-244,"esp"));
- &and ($key,-64);
-
- # ... and make sure it doesn't alias with AES_Td modulo 4096
- &mov ($s0,"ebp");
- &lea ($s1,&DWP(2048+256,"ebp"));
- &mov ($s3,$key);
- &and ($s0,0xfff); # s = %ebp&0xfff
- &and ($s1,0xfff); # e = (%ebp+2048+256)&0xfff
- &and ($s3,0xfff); # p = %esp&0xfff
-
- &cmp ($s3,$s1); # if (p>=e) %esp =- (p-e);
- &jb (&label("td_break_out"));
- &sub ($s3,$s1);
- &sub ($key,$s3);
- &jmp (&label("td_ok"));
- &set_label("td_break_out"); # else %esp -= (p-s)&0xfff + framesz;
- &sub ($s3,$s0);
- &and ($s3,0xfff);
- &add ($s3,64+256);
- &sub ($key,$s3);
- &align (4);
- &set_label("td_ok");
-
- &mov ($s0,&wparam(0)); # load inp
- &mov ($s1,&wparam(1)); # load out
- &mov ($s3,&wparam(3)); # load key
- &mov ($acc,&wparam(4)); # load ivp
-
- &exch ("esp",$key);
- &add ("esp",4); # reserve for return address!
- &mov ($_esp,$key); # save %esp
-
- &mov ($_inp,$s0); # save copy of inp
- &mov ($_out,$s1); # save copy of out
- &mov ($_len,$s2); # save copy of len
- &mov ($_key,$s3); # save copy of key
- &mov ($_ivp,$acc); # save copy of ivp
-
- &mov ($mark,0); # copy of aes_key->rounds = 0;
- if ($compromise) {
- &cmp ($s2,$compromise);
- &jb (&label("skip_dcopy"));
- }
- # do we copy key schedule to stack?
- &mov ($s1 eq "ebx" ? $s1 : "",$s3);
- &mov ($s2 eq "ecx" ? $s2 : "",244/4);
- &sub ($s1,"ebp");
- &mov ("esi",$s3);
- &and ($s1,0xfff);
- &lea ("edi",$aes_key);
- &cmp ($s1,2048+256);
- &jb (&label("do_dcopy"));
- &cmp ($s1,4096-244);
- &jb (&label("skip_dcopy"));
- &align (4);
- &set_label("do_dcopy");
- &mov ($_key,"edi");
- &data_word(0xA5F3F689); # rep movsd
- &set_label("skip_dcopy");
-
- &mov ($acc,$s0);
- &mov ($key,18);
- &align (4);
- &set_label("prefetch_td");
- &mov ($s0,&DWP(0,"ebp"));
- &mov ($s1,&DWP(32,"ebp"));
- &mov ($s2,&DWP(64,"ebp"));
- &mov ($s3,&DWP(96,"ebp"));
- &lea ("ebp",&DWP(128,"ebp"));
- &dec ($key);
- &jnz (&label("prefetch_td"));
- &sub ("ebp",2048+256);
+&set_label("fast_decrypt",16);
&cmp ($acc,$_out);
- &je (&label("dec_in_place")); # in-place processing...
+ &je (&label("fast_dec_in_place")); # in-place processing...
- &mov ($key,$_ivp); # load ivp
&mov ($_tmp,$key);
&align (4);
- &set_label("dec_loop");
+ &set_label("fast_dec_loop",16);
&mov ($s0,&DWP(0,$acc)); # read input
&mov ($s1,&DWP(4,$acc));
&mov ($s2,&DWP(8,$acc));
@@ -1083,27 +2214,24 @@ my $mark=&DWP(60+240,"esp"); #copy of aes_key->rounds
&xor ($s2,&DWP(8,$key));
&xor ($s3,&DWP(12,$key));
- &sub ($acc,16);
- &jc (&label("dec_partial"));
- &mov ($_len,$acc); # save len
- &mov ($acc,$_inp); # load inp
&mov ($key,$_out); # load out
+ &mov ($acc,$_inp); # load inp
&mov (&DWP(0,$key),$s0); # write output
&mov (&DWP(4,$key),$s1);
&mov (&DWP(8,$key),$s2);
&mov (&DWP(12,$key),$s3);
+ &mov ($s2,$_len); # load len
&mov ($_tmp,$acc); # save ivp
- &lea ($acc,&DWP(16,$acc));
+ &lea ($acc,&DWP(16,$acc)); # advance inp
&mov ($_inp,$acc); # save inp
-
- &lea ($key,&DWP(16,$key));
+ &lea ($key,&DWP(16,$key)); # advance out
&mov ($_out,$key); # save out
-
- &jnz (&label("dec_loop"));
+ &sub ($s2,16); # decrease len
+ &mov ($_len,$s2); # save len
+ &jnz (&label("fast_dec_loop"));
&mov ($key,$_tmp); # load temp ivp
- &set_label("dec_end");
&mov ($acc,$_ivp); # load user ivp
&mov ($s0,&DWP(0,$key)); # load iv
&mov ($s1,&DWP(4,$key));
@@ -1113,31 +2241,16 @@ my $mark=&DWP(60+240,"esp"); #copy of aes_key->rounds
&mov (&DWP(4,$acc),$s1);
&mov (&DWP(8,$acc),$s2);
&mov (&DWP(12,$acc),$s3);
- &jmp (&label("dec_out"));
+ &jmp (&label("fast_dec_out"));
- &align (4);
- &set_label("dec_partial");
- &lea ($key,$ivec);
- &mov (&DWP(0,$key),$s0); # dump output to stack
- &mov (&DWP(4,$key),$s1);
- &mov (&DWP(8,$key),$s2);
- &mov (&DWP(12,$key),$s3);
- &lea ($s2 eq "ecx" ? $s2 : "",&DWP(16,$acc));
- &mov ($acc eq "esi" ? $acc : "",$key);
- &mov ($key eq "edi" ? $key : "",$_out); # load out
- &data_word(0xA4F3F689); # rep movsb # copy output
- &mov ($key,$_inp); # use inp as temp ivp
- &jmp (&label("dec_end"));
-
- &align (4);
- &set_label("dec_in_place");
- &set_label("dec_in_place_loop");
- &lea ($key,$ivec);
+ &set_label("fast_dec_in_place",16);
+ &set_label("fast_dec_in_place_loop");
&mov ($s0,&DWP(0,$acc)); # read input
&mov ($s1,&DWP(4,$acc));
&mov ($s2,&DWP(8,$acc));
&mov ($s3,&DWP(12,$acc));
+ &lea ($key,$ivec);
&mov (&DWP(0,$key),$s0); # copy to temp
&mov (&DWP(4,$key),$s1);
&mov (&DWP(8,$key),$s2);
@@ -1158,7 +2271,7 @@ my $mark=&DWP(60+240,"esp"); #copy of aes_key->rounds
&mov (&DWP(8,$acc),$s2);
&mov (&DWP(12,$acc),$s3);
- &lea ($acc,&DWP(16,$acc));
+ &lea ($acc,&DWP(16,$acc)); # advance out
&mov ($_out,$acc); # save out
&lea ($acc,$ivec);
@@ -1173,40 +2286,340 @@ my $mark=&DWP(60+240,"esp"); #copy of aes_key->rounds
&mov (&DWP(12,$key),$s3);
&mov ($acc,$_inp); # load inp
+ &mov ($s2,$_len); # load len
+ &lea ($acc,&DWP(16,$acc)); # advance inp
+ &mov ($_inp,$acc); # save inp
+ &sub ($s2,16); # decrease len
+ &mov ($_len,$s2); # save len
+ &jnz (&label("fast_dec_in_place_loop"));
+
+ &set_label("fast_dec_out",4);
+ &cmp ($mark,0); # was the key schedule copied?
+ &mov ("edi",$_key);
+ &je (&label("skip_dzero"));
+ # zero copy of key schedule
+ &mov ("ecx",240/4);
+ &xor ("eax","eax");
+ &align (4);
+ &data_word(0xABF3F689); # rep stosd
+ &set_label("skip_dzero")
+ &mov ("esp",$_esp);
+ &popf ();
+ &function_end_A();
+ &pushf (); # kludge, never executed
+
+#--------------------------- SLOW ROUTINE ---------------------------#
+&set_label("slow_way",16);
+
+ &mov ($s0,&DWP(0,$s0)) if (!$x86only);# load OPENSSL_ia32cap
+ &mov ($key,&wparam(3)); # load key
+
+ # pre-allocate aligned stack frame...
+ &lea ($acc,&DWP(-80,"esp"));
+ &and ($acc,-64);
+
+ # ... and make sure it doesn't alias with $key modulo 1024
+ &lea ($s1,&DWP(-80-63,$key));
+ &sub ($s1,$acc);
+ &neg ($s1);
+ &and ($s1,0x3C0); # modulo 1024, but aligned to cache-line
+ &sub ($acc,$s1);
+
+ # pick S-box copy which can't overlap with stack frame or $key
+ &lea ($s1,&DWP(768,$acc));
+ &sub ($s1,$tbl);
+ &and ($s1,0x300);
+ &lea ($tbl,&DWP(2048+128,$tbl,$s1));
+
+ &lea ($s3,&wparam(0)); # pointer to parameter block
+
+ &exch ("esp",$acc);
+ &add ("esp",4); # reserve for return address!
+ &mov ($_tbl,$tbl); # save %ebp
+ &mov ($_esp,$acc); # save %esp
+ &mov ($_tmp,$s0); # save OPENSSL_ia32cap
+
+ &mov ($s0,&DWP(0,$s3)); # load inp
+ &mov ($s1,&DWP(4,$s3)); # load out
+ #&mov ($s2,&DWP(8,$s3)); # load len
+ #&mov ($key,&DWP(12,$s3)); # load key
+ &mov ($acc,&DWP(16,$s3)); # load ivp
+ &mov ($s3,&DWP(20,$s3)); # load enc flag
+
+ &mov ($_inp,$s0); # save copy of inp
+ &mov ($_out,$s1); # save copy of out
+ &mov ($_len,$s2); # save copy of len
+ &mov ($_key,$key); # save copy of key
+ &mov ($_ivp,$acc); # save copy of ivp
+
+ &mov ($key,$acc);
+ &mov ($acc,$s0);
+
+ &cmp ($s3,0);
+ &je (&label("slow_decrypt"));
+
+#--------------------------- SLOW ENCRYPT ---------------------------#
+ &cmp ($s2,16);
+ &mov ($s3,$s1);
+ &jb (&label("slow_enc_tail"));
+
+ if (!$x86only) {
+ &bt ($_tmp,25); # check for SSE bit
+ &jnc (&label("slow_enc_x86"));
- &lea ($acc,&DWP(16,$acc));
+ &movq ("mm0",&QWP(0,$key)); # load iv
+ &movq ("mm4",&QWP(8,$key));
+
+ &set_label("slow_enc_loop_sse",16);
+ &pxor ("mm0",&QWP(0,$acc)); # xor input data
+ &pxor ("mm4",&QWP(8,$acc));
+
+ &mov ($key,$_key);
+ &call ("_sse_AES_encrypt_compact");
+
+ &mov ($acc,$_inp); # load inp
+ &mov ($key,$_out); # load out
+ &mov ($s2,$_len); # load len
+
+ &movq (&QWP(0,$key),"mm0"); # save output data
+ &movq (&QWP(8,$key),"mm4");
+
+ &lea ($acc,&DWP(16,$acc)); # advance inp
&mov ($_inp,$acc); # save inp
+ &lea ($s3,&DWP(16,$key)); # advance out
+ &mov ($_out,$s3); # save out
+ &sub ($s2,16); # decrease len
+ &cmp ($s2,16);
+ &mov ($_len,$s2); # save len
+ &jae (&label("slow_enc_loop_sse"));
+ &test ($s2,15);
+ &jnz (&label("slow_enc_tail"));
+ &mov ($acc,$_ivp); # load ivp
+ &movq (&QWP(0,$acc),"mm0"); # save ivec
+ &movq (&QWP(8,$acc),"mm4");
+ &emms ();
+ &mov ("esp",$_esp);
+ &popf ();
+ &function_end_A();
+ &pushf (); # kludge, never executed
+ }
+ &set_label("slow_enc_x86",16);
+ &mov ($s0,&DWP(0,$key)); # load iv
+ &mov ($s1,&DWP(4,$key));
+
+ &set_label("slow_enc_loop_x86",4);
+ &mov ($s2,&DWP(8,$key));
+ &mov ($s3,&DWP(12,$key));
+
+ &xor ($s0,&DWP(0,$acc)); # xor input data
+ &xor ($s1,&DWP(4,$acc));
+ &xor ($s2,&DWP(8,$acc));
+ &xor ($s3,&DWP(12,$acc));
+
+ &mov ($key,$_key); # load key
+ &call ("_x86_AES_encrypt_compact");
+
+ &mov ($acc,$_inp); # load inp
+ &mov ($key,$_out); # load out
+
+ &mov (&DWP(0,$key),$s0); # save output data
+ &mov (&DWP(4,$key),$s1);
+ &mov (&DWP(8,$key),$s2);
+ &mov (&DWP(12,$key),$s3);
&mov ($s2,$_len); # load len
- &sub ($s2,16);
- &jc (&label("dec_in_place_partial"));
+ &lea ($acc,&DWP(16,$acc)); # advance inp
+ &mov ($_inp,$acc); # save inp
+ &lea ($s3,&DWP(16,$key)); # advance out
+ &mov ($_out,$s3); # save out
+ &sub ($s2,16); # decrease len
+ &cmp ($s2,16);
&mov ($_len,$s2); # save len
- &jnz (&label("dec_in_place_loop"));
- &jmp (&label("dec_out"));
-
- &align (4);
- &set_label("dec_in_place_partial");
- # one can argue if this is actually required...
- &mov ($key eq "edi" ? $key : "",$_out);
- &lea ($acc eq "esi" ? $acc : "",$ivec);
+ &jae (&label("slow_enc_loop_x86"));
+ &test ($s2,15);
+ &jnz (&label("slow_enc_tail"));
+ &mov ($acc,$_ivp); # load ivp
+ &mov ($s2,&DWP(8,$key)); # restore last dwords
+ &mov ($s3,&DWP(12,$key));
+ &mov (&DWP(0,$acc),$s0); # save ivec
+ &mov (&DWP(4,$acc),$s1);
+ &mov (&DWP(8,$acc),$s2);
+ &mov (&DWP(12,$acc),$s3);
+
+ &mov ("esp",$_esp);
+ &popf ();
+ &function_end_A();
+ &pushf (); # kludge, never executed
+
+ &set_label("slow_enc_tail",16);
+ &emms () if (!$x86only);
+ &mov ($key eq "edi"? $key:"",$s3); # load out to edi
+ &mov ($s1,16);
+ &sub ($s1,$s2);
+ &cmp ($key,$acc eq "esi"? $acc:""); # compare with inp
+ &je (&label("enc_in_place"));
+ &align (4);
+ &data_word(0xA4F3F689); # rep movsb # copy input
+ &jmp (&label("enc_skip_in_place"));
+ &set_label("enc_in_place");
&lea ($key,&DWP(0,$key,$s2));
- &lea ($acc,&DWP(16,$acc,$s2));
- &neg ($s2 eq "ecx" ? $s2 : "");
- &data_word(0xA4F3F689); # rep movsb # restore tail
-
- &align (4);
- &set_label("dec_out");
- &cmp ($mark,0); # was the key schedule copied?
- &mov ("edi",$_key);
- &je (&label("skip_dzero"));
- # zero copy of key schedule
- &mov ("ecx",240/4);
- &xor ("eax","eax");
- &align (4);
- &data_word(0xABF3F689); # rep stosd
- &set_label("skip_dzero")
- &mov ("esp",$_esp);
- &popf ();
+ &set_label("enc_skip_in_place");
+ &mov ($s2,$s1);
+ &xor ($s0,$s0);
+ &align (4);
+ &data_word(0xAAF3F689); # rep stosb # zero tail
+
+ &mov ($key,$_ivp); # restore ivp
+ &mov ($acc,$s3); # output as input
+ &mov ($s0,&DWP(0,$key));
+ &mov ($s1,&DWP(4,$key));
+ &mov ($_len,16); # len=16
+ &jmp (&label("slow_enc_loop_x86")); # one more spin...
+
+#--------------------------- SLOW DECRYPT ---------------------------#
+&set_label("slow_decrypt",16);
+ if (!$x86only) {
+ &bt ($_tmp,25); # check for SSE bit
+ &jnc (&label("slow_dec_loop_x86"));
+
+ &set_label("slow_dec_loop_sse",4);
+ &movq ("mm0",&QWP(0,$acc)); # read input
+ &movq ("mm4",&QWP(8,$acc));
+
+ &mov ($key,$_key);
+ &call ("_sse_AES_decrypt_compact");
+
+ &mov ($acc,$_inp); # load inp
+ &lea ($s0,$ivec);
+ &mov ($s1,$_out); # load out
+ &mov ($s2,$_len); # load len
+ &mov ($key,$_ivp); # load ivp
+
+ &movq ("mm1",&QWP(0,$acc)); # re-read input
+ &movq ("mm5",&QWP(8,$acc));
+
+ &pxor ("mm0",&QWP(0,$key)); # xor iv
+ &pxor ("mm4",&QWP(8,$key));
+
+ &movq (&QWP(0,$key),"mm1"); # copy input to iv
+ &movq (&QWP(8,$key),"mm5");
+
+ &sub ($s2,16); # decrease len
+ &jc (&label("slow_dec_partial_sse"));
+
+ &movq (&QWP(0,$s1),"mm0"); # write output
+ &movq (&QWP(8,$s1),"mm4");
+
+ &lea ($s1,&DWP(16,$s1)); # advance out
+ &mov ($_out,$s1); # save out
+ &lea ($acc,&DWP(16,$acc)); # advance inp
+ &mov ($_inp,$acc); # save inp
+ &mov ($_len,$s2); # save len
+ &jnz (&label("slow_dec_loop_sse"));
+ &emms ();
+ &mov ("esp",$_esp);
+ &popf ();
+ &function_end_A();
+ &pushf (); # kludge, never executed
+
+ &set_label("slow_dec_partial_sse",16);
+ &movq (&QWP(0,$s0),"mm0"); # save output to temp
+ &movq (&QWP(8,$s0),"mm4");
+ &emms ();
+
+ &add ($s2 eq "ecx" ? "ecx":"",16);
+ &mov ("edi",$s1); # out
+ &mov ("esi",$s0); # temp
+ &align (4);
+ &data_word(0xA4F3F689); # rep movsb # copy partial output
+
+ &mov ("esp",$_esp);
+ &popf ();
+ &function_end_A();
+ &pushf (); # kludge, never executed
+ }
+ &set_label("slow_dec_loop_x86",16);
+ &mov ($s0,&DWP(0,$acc)); # read input
+ &mov ($s1,&DWP(4,$acc));
+ &mov ($s2,&DWP(8,$acc));
+ &mov ($s3,&DWP(12,$acc));
+
+ &lea ($key,$ivec);
+ &mov (&DWP(0,$key),$s0); # copy to temp
+ &mov (&DWP(4,$key),$s1);
+ &mov (&DWP(8,$key),$s2);
+ &mov (&DWP(12,$key),$s3);
+
+ &mov ($key,$_key); # load key
+ &call ("_x86_AES_decrypt_compact");
+
+ &mov ($key,$_ivp); # load ivp
+ &mov ($acc,$_len); # load len
+ &xor ($s0,&DWP(0,$key)); # xor iv
+ &xor ($s1,&DWP(4,$key));
+ &xor ($s2,&DWP(8,$key));
+ &xor ($s3,&DWP(12,$key));
+
+ &sub ($acc,16);
+ &jc (&label("slow_dec_partial_x86"));
+
+ &mov ($_len,$acc); # save len
+ &mov ($acc,$_out); # load out
+
+ &mov (&DWP(0,$acc),$s0); # write output
+ &mov (&DWP(4,$acc),$s1);
+ &mov (&DWP(8,$acc),$s2);
+ &mov (&DWP(12,$acc),$s3);
+
+ &lea ($acc,&DWP(16,$acc)); # advance out
+ &mov ($_out,$acc); # save out
+
+ &lea ($acc,$ivec);
+ &mov ($s0,&DWP(0,$acc)); # read temp
+ &mov ($s1,&DWP(4,$acc));
+ &mov ($s2,&DWP(8,$acc));
+ &mov ($s3,&DWP(12,$acc));
+
+ &mov (&DWP(0,$key),$s0); # copy it to iv
+ &mov (&DWP(4,$key),$s1);
+ &mov (&DWP(8,$key),$s2);
+ &mov (&DWP(12,$key),$s3);
+
+ &mov ($acc,$_inp); # load inp
+ &lea ($acc,&DWP(16,$acc)); # advance inp
+ &mov ($_inp,$acc); # save inp
+ &jnz (&label("slow_dec_loop_x86"));
+ &mov ("esp",$_esp);
+ &popf ();
+ &function_end_A();
+ &pushf (); # kludge, never executed
+
+ &set_label("slow_dec_partial_x86",16);
+ &lea ($acc,$ivec);
+ &mov (&DWP(0,$acc),$s0); # save output to temp
+ &mov (&DWP(4,$acc),$s1);
+ &mov (&DWP(8,$acc),$s2);
+ &mov (&DWP(12,$acc),$s3);
+
+ &mov ($acc,$_inp);
+ &mov ($s0,&DWP(0,$acc)); # re-read input
+ &mov ($s1,&DWP(4,$acc));
+ &mov ($s2,&DWP(8,$acc));
+ &mov ($s3,&DWP(12,$acc));
+
+ &mov (&DWP(0,$key),$s0); # copy it to iv
+ &mov (&DWP(4,$key),$s1);
+ &mov (&DWP(8,$key),$s2);
+ &mov (&DWP(12,$key),$s3);
+
+ &mov ("ecx",$_len);
+ &mov ("edi",$_out);
+ &lea ("esi",$ivec);
+ &align (4);
+ &data_word(0xA4F3F689); # rep movsb # copy partial output
+
+ &mov ("esp",$_esp);
+ &popf ();
&function_end("AES_cbc_encrypt");
}
@@ -1215,35 +2628,31 @@ my $mark=&DWP(60+240,"esp"); #copy of aes_key->rounds
sub enckey()
{
&movz ("esi",&LB("edx")); # rk[i]>>0
- &mov ("ebx",&DWP(2,"ebp","esi",8));
+ &movz ("ebx",&BP(-128,$tbl,"esi",1));
&movz ("esi",&HB("edx")); # rk[i]>>8
- &and ("ebx",0xFF000000);
+ &shl ("ebx",24);
&xor ("eax","ebx");
- &mov ("ebx",&DWP(2,"ebp","esi",8));
+ &movz ("ebx",&BP(-128,$tbl,"esi",1));
&shr ("edx",16);
- &and ("ebx",0x000000FF);
&movz ("esi",&LB("edx")); # rk[i]>>16
&xor ("eax","ebx");
- &mov ("ebx",&DWP(0,"ebp","esi",8));
+ &movz ("ebx",&BP(-128,$tbl,"esi",1));
&movz ("esi",&HB("edx")); # rk[i]>>24
- &and ("ebx",0x0000FF00);
+ &shl ("ebx",8);
&xor ("eax","ebx");
- &mov ("ebx",&DWP(0,"ebp","esi",8));
- &and ("ebx",0x00FF0000);
+ &movz ("ebx",&BP(-128,$tbl,"esi",1));
+ &shl ("ebx",16);
&xor ("eax","ebx");
- &xor ("eax",&DWP(2048,"ebp","ecx",4)); # rcon
+ &xor ("eax",&DWP(1024-128,$tbl,"ecx",4)); # rcon
}
-# int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
-# AES_KEY *key)
-&public_label("AES_Te");
-&function_begin("AES_set_encrypt_key");
- &mov ("esi",&wparam(0)); # user supplied key
- &mov ("edi",&wparam(2)); # private key schedule
+&function_begin("_x86_AES_set_encrypt_key");
+ &mov ("esi",&wparam(1)); # user supplied key
+ &mov ("edi",&wparam(3)); # private key schedule
&test ("esi",-1);
&jz (&label("badpointer"));
@@ -1252,10 +2661,21 @@ sub enckey()
&call (&label("pic_point"));
&set_label("pic_point");
- &blindpop("ebp");
- &lea ("ebp",&DWP(&label("AES_Te")."-".&label("pic_point"),"ebp"));
-
- &mov ("ecx",&wparam(1)); # number of bits in key
+ &blindpop($tbl);
+ &lea ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
+ &lea ($tbl,&DWP(2048+128,$tbl));
+
+ # prefetch Te4
+ &mov ("eax",&DWP(0-128,$tbl));
+ &mov ("ebx",&DWP(32-128,$tbl));
+ &mov ("ecx",&DWP(64-128,$tbl));
+ &mov ("edx",&DWP(96-128,$tbl));
+ &mov ("eax",&DWP(128-128,$tbl));
+ &mov ("ebx",&DWP(160-128,$tbl));
+ &mov ("ecx",&DWP(192-128,$tbl));
+ &mov ("edx",&DWP(224-128,$tbl));
+
+ &mov ("ecx",&wparam(2)); # number of bits in key
&cmp ("ecx",128);
&je (&label("10rounds"));
&cmp ("ecx",192);
@@ -1394,24 +2814,23 @@ sub enckey()
&mov ("edx","eax");
&mov ("eax",&DWP(16,"edi")); # rk[4]
&movz ("esi",&LB("edx")); # rk[11]>>0
- &mov ("ebx",&DWP(2,"ebp","esi",8));
+ &movz ("ebx",&BP(-128,$tbl,"esi",1));
&movz ("esi",&HB("edx")); # rk[11]>>8
- &and ("ebx",0x000000FF);
&xor ("eax","ebx");
- &mov ("ebx",&DWP(0,"ebp","esi",8));
+ &movz ("ebx",&BP(-128,$tbl,"esi",1));
&shr ("edx",16);
- &and ("ebx",0x0000FF00);
+ &shl ("ebx",8);
&movz ("esi",&LB("edx")); # rk[11]>>16
&xor ("eax","ebx");
- &mov ("ebx",&DWP(0,"ebp","esi",8));
+ &movz ("ebx",&BP(-128,$tbl,"esi",1));
&movz ("esi",&HB("edx")); # rk[11]>>24
- &and ("ebx",0x00FF0000);
+ &shl ("ebx",16);
&xor ("eax","ebx");
- &mov ("ebx",&DWP(2,"ebp","esi",8));
- &and ("ebx",0xFF000000);
+ &movz ("ebx",&BP(-128,$tbl,"esi",1));
+ &shl ("ebx",24);
&xor ("eax","ebx");
&mov (&DWP(48,"edi"),"eax"); # rk[12]
@@ -1433,43 +2852,74 @@ sub enckey()
&set_label("badpointer");
&mov ("eax",-1);
&set_label("exit");
-&function_end("AES_set_encrypt_key");
+&function_end("_x86_AES_set_encrypt_key");
-sub deckey()
-{ my ($i,$ptr,$te,$td) = @_;
+# int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+# AES_KEY *key)
+&function_begin_B("AES_set_encrypt_key");
+ &call ("_x86_AES_set_encrypt_key");
+ &ret ();
+&function_end_B("AES_set_encrypt_key");
- &mov ("eax",&DWP($i,$ptr));
- &mov ("edx","eax");
- &movz ("ebx",&HB("eax"));
- &shr ("edx",16);
- &and ("eax",0xFF);
- &movz ("eax",&BP(2,$te,"eax",8));
- &movz ("ebx",&BP(2,$te,"ebx",8));
- &mov ("eax",&DWP(0,$td,"eax",8));
- &xor ("eax",&DWP(3,$td,"ebx",8));
- &movz ("ebx",&HB("edx"));
- &and ("edx",0xFF);
- &movz ("edx",&BP(2,$te,"edx",8));
- &movz ("ebx",&BP(2,$te,"ebx",8));
- &xor ("eax",&DWP(2,$td,"edx",8));
- &xor ("eax",&DWP(1,$td,"ebx",8));
- &mov (&DWP($i,$ptr),"eax");
+sub deckey()
+{ my ($i,$key,$tp1,$tp2,$tp4,$tp8) = @_;
+ my $tmp = $tbl;
+
+ &mov ($acc,$tp1);
+ &and ($acc,0x80808080);
+ &mov ($tmp,$acc);
+ &shr ($tmp,7);
+ &lea ($tp2,&DWP(0,$tp1,$tp1));
+ &sub ($acc,$tmp);
+ &and ($tp2,0xfefefefe);
+ &and ($acc,0x1b1b1b1b);
+ &xor ($acc,$tp2);
+ &mov ($tp2,$acc);
+
+ &and ($acc,0x80808080);
+ &mov ($tmp,$acc);
+ &shr ($tmp,7);
+ &lea ($tp4,&DWP(0,$tp2,$tp2));
+ &sub ($acc,$tmp);
+ &and ($tp4,0xfefefefe);
+ &and ($acc,0x1b1b1b1b);
+ &xor ($tp2,$tp1); # tp2^tp1
+ &xor ($acc,$tp4);
+ &mov ($tp4,$acc);
+
+ &and ($acc,0x80808080);
+ &mov ($tmp,$acc);
+ &shr ($tmp,7);
+ &lea ($tp8,&DWP(0,$tp4,$tp4));
+ &xor ($tp4,$tp1); # tp4^tp1
+ &sub ($acc,$tmp);
+ &and ($tp8,0xfefefefe);
+ &and ($acc,0x1b1b1b1b);
+ &rotl ($tp1,8); # = ROTATE(tp1,8)
+ &xor ($tp8,$acc);
+
+ &mov ($tmp,&DWP(4*($i+1),$key)); # modulo-scheduled load
+
+ &xor ($tp1,$tp2);
+ &xor ($tp2,$tp8);
+ &xor ($tp1,$tp4);
+ &rotl ($tp2,24);
+ &xor ($tp4,$tp8);
+ &xor ($tp1,$tp8); # ^= tp8^(tp4^tp1)^(tp2^tp1)
+ &rotl ($tp4,16);
+ &xor ($tp1,$tp2); # ^= ROTATE(tp8^tp2^tp1,24)
+ &rotl ($tp8,8);
+ &xor ($tp1,$tp4); # ^= ROTATE(tp8^tp4^tp1,16)
+ &mov ($tp2,$tmp);
+ &xor ($tp1,$tp8); # ^= ROTATE(tp8,8)
+
+ &mov (&DWP(4*$i,$key),$tp1);
}
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
# AES_KEY *key)
-&public_label("AES_Td");
-&public_label("AES_Te");
&function_begin_B("AES_set_decrypt_key");
- &mov ("eax",&wparam(0));
- &mov ("ecx",&wparam(1));
- &mov ("edx",&wparam(2));
- &sub ("esp",12);
- &mov (&DWP(0,"esp"),"eax");
- &mov (&DWP(4,"esp"),"ecx");
- &mov (&DWP(8,"esp"),"edx");
- &call ("AES_set_encrypt_key");
- &add ("esp",12);
+ &call ("_x86_AES_set_encrypt_key");
&cmp ("eax",0);
&je (&label("proceed"));
&ret ();
@@ -1485,8 +2935,7 @@ sub deckey()
&lea ("ecx",&DWP(0,"","ecx",4));
&lea ("edi",&DWP(0,"esi","ecx",4)); # pointer to last chunk
- &align (4);
- &set_label("invert"); # invert order of chunks
+ &set_label("invert",4); # invert order of chunks
&mov ("eax",&DWP(0,"esi"));
&mov ("ebx",&DWP(4,"esi"));
&mov ("ecx",&DWP(0,"edi"));
@@ -1508,26 +2957,24 @@ sub deckey()
&cmp ("esi","edi");
&jne (&label("invert"));
- &call (&label("pic_point"));
- &set_label("pic_point");
- blindpop("ebp");
- &lea ("edi",&DWP(&label("AES_Td")."-".&label("pic_point"),"ebp"));
- &lea ("ebp",&DWP(&label("AES_Te")."-".&label("pic_point"),"ebp"));
-
- &mov ("esi",&wparam(2));
- &mov ("ecx",&DWP(240,"esi")); # pull number of rounds
- &dec ("ecx");
- &align (4);
- &set_label("permute"); # permute the key schedule
- &add ("esi",16);
- &deckey (0,"esi","ebp","edi");
- &deckey (4,"esi","ebp","edi");
- &deckey (8,"esi","ebp","edi");
- &deckey (12,"esi","ebp","edi");
- &dec ("ecx");
- &jnz (&label("permute"));
+ &mov ($key,&wparam(2));
+ &mov ($acc,&DWP(240,$key)); # pull number of rounds
+ &lea ($acc,&DWP(-2,$acc,$acc));
+ &lea ($acc,&DWP(0,$key,$acc,8));
+ &mov (&wparam(2),$acc);
+
+ &mov ($s0,&DWP(16,$key)); # modulo-scheduled load
+ &set_label("permute",4); # permute the key schedule
+ &add ($key,16);
+ &deckey (0,$key,$s0,$s1,$s2,$s3);
+ &deckey (1,$key,$s1,$s2,$s3,$s0);
+ &deckey (2,$key,$s2,$s3,$s0,$s1);
+ &deckey (3,$key,$s3,$s0,$s1,$s2);
+ &cmp ($key,&wparam(2));
+ &jb (&label("permute"));
&xor ("eax","eax"); # return success
&function_end("AES_set_decrypt_key");
+&asciz("AES for x86, CRYPTOGAMS by <appro\@openssl.org>");
&asm_finish();
diff --git a/openssl/crypto/aes/asm/aes-armv4.pl b/openssl/crypto/aes/asm/aes-armv4.pl
index 15742c1ec..690244111 100644
--- a/openssl/crypto/aes/asm/aes-armv4.pl
+++ b/openssl/crypto/aes/asm/aes-armv4.pl
@@ -1024,6 +1024,7 @@ _armv4_AES_decrypt:
mov pc,lr @ return
.size _armv4_AES_decrypt,.-_armv4_AES_decrypt
.asciz "AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
___
$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
diff --git a/openssl/crypto/aes/asm/aes-s390x.pl b/openssl/crypto/aes/asm/aes-s390x.pl
index 4b27afd92..7e0188929 100644
--- a/openssl/crypto/aes/asm/aes-s390x.pl
+++ b/openssl/crypto/aes/asm/aes-s390x.pl
@@ -765,6 +765,11 @@ $code.=<<___ if (!$softonly);
srl %r5,6
ar %r5,%r0
+ larl %r1,OPENSSL_s390xcap_P
+ lg %r0,0(%r1)
+ tmhl %r0,0x4000 # check for message-security assist
+ jz .Lekey_internal
+
lghi %r0,0 # query capability vector
la %r1,16($sp)
.long 0xb92f0042 # kmc %r4,%r2
@@ -1323,6 +1328,7 @@ $code.=<<___;
4: ex $len,0($s1)
j .Lcbc_dec_exit
.size AES_cbc_encrypt,.-AES_cbc_encrypt
+.comm OPENSSL_s390xcap_P,8,8
___
}
$code.=<<___;
diff --git a/openssl/crypto/aes/asm/aes-x86_64.pl b/openssl/crypto/aes/asm/aes-x86_64.pl
index f616f1751..a545e892a 100644
--- a/openssl/crypto/aes/asm/aes-x86_64.pl
+++ b/openssl/crypto/aes/asm/aes-x86_64.pl
@@ -2,11 +2,12 @@
#
# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. Rights for redistribution and usage in source and binary
-# forms are granted according to the OpenSSL license.
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
-# Version 1.2.
+# Version 2.1.
#
# aes-*-cbc benchmarks are improved by >70% [compared to gcc 3.3.2 on
# Opteron 240 CPU] plus all the bells-n-whistles from 32-bit version
@@ -17,17 +18,29 @@
#
# Performance in number of cycles per processed byte for 128-bit key:
#
-# ECB CBC encrypt
-# AMD64 13.7 13.0(*)
-# EM64T 20.2 18.6(*)
+# ECB encrypt ECB decrypt CBC large chunk
+# AMD64 33 41 13.0
+# EM64T 38 59 18.6(*)
+# Core 2 30 43 14.5(*)
#
-# (*) CBC benchmarks are better than ECB thanks to custom ABI used
-# by the private block encryption function.
+# (*) with hyper-threading off
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
$verticalspin=1; # unlike 32-bit version $verticalspin performs
# ~15% better on both AMD and Intel cores
-$output=shift;
-open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
+$speed_limit=512; # see aes-586.pl for details
$code=".text\n";
@@ -35,9 +48,9 @@ $s0="%eax";
$s1="%ebx";
$s2="%ecx";
$s3="%edx";
-$acc0="%esi";
-$acc1="%edi";
-$acc2="%ebp";
+$acc0="%esi"; $mask80="%rsi";
+$acc1="%edi"; $maskfe="%rdi";
+$acc2="%ebp"; $mask1b="%rbp";
$inp="%r8";
$out="%r9";
$t0="%r10d";
@@ -51,6 +64,8 @@ sub hi() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1h/; $r; }
sub lo() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/;
$r =~ s/%[er]([sd]i)/%\1l/;
$r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; }
+sub LO() { my $r=shift; $r =~ s/%r([a-z]+)/%e\1/;
+ $r =~ s/%r([0-9]+)/%r\1d/; $r; }
sub _data_word()
{ my $i;
while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
@@ -138,22 +153,17 @@ $code.=<<___;
movzb `&lo("$s0")`,$acc0
movzb `&lo("$s1")`,$acc1
movzb `&lo("$s2")`,$acc2
- mov 2($sbox,$acc0,8),$t0
- mov 2($sbox,$acc1,8),$t1
- mov 2($sbox,$acc2,8),$t2
-
- and \$0x000000ff,$t0
- and \$0x000000ff,$t1
- and \$0x000000ff,$t2
+ movzb 2($sbox,$acc0,8),$t0
+ movzb 2($sbox,$acc1,8),$t1
+ movzb 2($sbox,$acc2,8),$t2
movzb `&lo("$s3")`,$acc0
movzb `&hi("$s1")`,$acc1
movzb `&hi("$s2")`,$acc2
- mov 2($sbox,$acc0,8),$t3
+ movzb 2($sbox,$acc0,8),$t3
mov 0($sbox,$acc1,8),$acc1 #$t0
mov 0($sbox,$acc2,8),$acc2 #$t1
- and \$0x000000ff,$t3
and \$0x0000ff00,$acc1
and \$0x0000ff00,$acc2
@@ -345,6 +355,234 @@ $code.=<<___;
.size _x86_64_AES_encrypt,.-_x86_64_AES_encrypt
___
+# it's possible to implement this by shifting tN by 8, filling least
+# significant byte with byte load and finally bswap-ing at the end,
+# but such partial register load kills Core 2...
+sub enccompactvert()
+{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
+
+$code.=<<___;
+ movzb `&lo("$s0")`,$t0
+ movzb `&lo("$s1")`,$t1
+ movzb `&lo("$s2")`,$t2
+ movzb ($sbox,$t0,1),$t0
+ movzb ($sbox,$t1,1),$t1
+ movzb ($sbox,$t2,1),$t2
+
+ movzb `&lo("$s3")`,$t3
+ movzb `&hi("$s1")`,$acc0
+ movzb `&hi("$s2")`,$acc1
+ movzb ($sbox,$t3,1),$t3
+ movzb ($sbox,$acc0,1),$t4 #$t0
+ movzb ($sbox,$acc1,1),$t5 #$t1
+
+ movzb `&hi("$s3")`,$acc2
+ movzb `&hi("$s0")`,$acc0
+ shr \$16,$s2
+ movzb ($sbox,$acc2,1),$acc2 #$t2
+ movzb ($sbox,$acc0,1),$acc0 #$t3
+ shr \$16,$s3
+
+ movzb `&lo("$s2")`,$acc1
+ shl \$8,$t4
+ shl \$8,$t5
+ movzb ($sbox,$acc1,1),$acc1 #$t0
+ xor $t4,$t0
+ xor $t5,$t1
+
+ movzb `&lo("$s3")`,$t4
+ shr \$16,$s0
+ shr \$16,$s1
+ movzb `&lo("$s0")`,$t5
+ shl \$8,$acc2
+ shl \$8,$acc0
+ movzb ($sbox,$t4,1),$t4 #$t1
+ movzb ($sbox,$t5,1),$t5 #$t2
+ xor $acc2,$t2
+ xor $acc0,$t3
+
+ movzb `&lo("$s1")`,$acc2
+ movzb `&hi("$s3")`,$acc0
+ shl \$16,$acc1
+ movzb ($sbox,$acc2,1),$acc2 #$t3
+ movzb ($sbox,$acc0,1),$acc0 #$t0
+ xor $acc1,$t0
+
+ movzb `&hi("$s0")`,$acc1
+ shr \$8,$s2
+ shr \$8,$s1
+ movzb ($sbox,$acc1,1),$acc1 #$t1
+ movzb ($sbox,$s2,1),$s3 #$t3
+ movzb ($sbox,$s1,1),$s2 #$t2
+ shl \$16,$t4
+ shl \$16,$t5
+ shl \$16,$acc2
+ xor $t4,$t1
+ xor $t5,$t2
+ xor $acc2,$t3
+
+ shl \$24,$acc0
+ shl \$24,$acc1
+ shl \$24,$s3
+ xor $acc0,$t0
+ shl \$24,$s2
+ xor $acc1,$t1
+ mov $t0,$s0
+ mov $t1,$s1
+ xor $t2,$s2
+ xor $t3,$s3
+___
+}
+
+sub enctransform_ref()
+{ my $sn = shift;
+ my ($acc,$r2,$tmp)=("%r8d","%r9d","%r13d");
+
+$code.=<<___;
+ mov $sn,$acc
+ and \$0x80808080,$acc
+ mov $acc,$tmp
+ shr \$7,$tmp
+ lea ($sn,$sn),$r2
+ sub $tmp,$acc
+ and \$0xfefefefe,$r2
+ and \$0x1b1b1b1b,$acc
+ mov $sn,$tmp
+ xor $acc,$r2
+
+ xor $r2,$sn
+ rol \$24,$sn
+ xor $r2,$sn
+ ror \$16,$tmp
+ xor $tmp,$sn
+ ror \$8,$tmp
+ xor $tmp,$sn
+___
+}
+
+# unlike decrypt case it does not pay off to parallelize enctransform
+sub enctransform()
+{ my ($t3,$r20,$r21)=($acc2,"%r8d","%r9d");
+
+$code.=<<___;
+ mov $s0,$acc0
+ mov $s1,$acc1
+ and \$0x80808080,$acc0
+ and \$0x80808080,$acc1
+ mov $acc0,$t0
+ mov $acc1,$t1
+ shr \$7,$t0
+ lea ($s0,$s0),$r20
+ shr \$7,$t1
+ lea ($s1,$s1),$r21
+ sub $t0,$acc0
+ sub $t1,$acc1
+ and \$0xfefefefe,$r20
+ and \$0xfefefefe,$r21
+ and \$0x1b1b1b1b,$acc0
+ and \$0x1b1b1b1b,$acc1
+ mov $s0,$t0
+ mov $s1,$t1
+ xor $acc0,$r20
+ xor $acc1,$r21
+
+ xor $r20,$s0
+ xor $r21,$s1
+ mov $s2,$acc0
+ mov $s3,$acc1
+ rol \$24,$s0
+ rol \$24,$s1
+ and \$0x80808080,$acc0
+ and \$0x80808080,$acc1
+ xor $r20,$s0
+ xor $r21,$s1
+ mov $acc0,$t2
+ mov $acc1,$t3
+ ror \$16,$t0
+ ror \$16,$t1
+ shr \$7,$t2
+ lea ($s2,$s2),$r20
+ xor $t0,$s0
+ xor $t1,$s1
+ shr \$7,$t3
+ lea ($s3,$s3),$r21
+ ror \$8,$t0
+ ror \$8,$t1
+ sub $t2,$acc0
+ sub $t3,$acc1
+ xor $t0,$s0
+ xor $t1,$s1
+
+ and \$0xfefefefe,$r20
+ and \$0xfefefefe,$r21
+ and \$0x1b1b1b1b,$acc0
+ and \$0x1b1b1b1b,$acc1
+ mov $s2,$t2
+ mov $s3,$t3
+ xor $acc0,$r20
+ xor $acc1,$r21
+
+ xor $r20,$s2
+ xor $r21,$s3
+ rol \$24,$s2
+ rol \$24,$s3
+ xor $r20,$s2
+ xor $r21,$s3
+ mov 0($sbox),$acc0 # prefetch Te4
+ ror \$16,$t2
+ ror \$16,$t3
+ mov 64($sbox),$acc1
+ xor $t2,$s2
+ xor $t3,$s3
+ mov 128($sbox),$r20
+ ror \$8,$t2
+ ror \$8,$t3
+ mov 192($sbox),$r21
+ xor $t2,$s2
+ xor $t3,$s3
+___
+}
+
+$code.=<<___;
+.type _x86_64_AES_encrypt_compact,\@abi-omnipotent
+.align 16
+_x86_64_AES_encrypt_compact:
+ lea 128($sbox),$inp # size optimization
+ mov 0-128($inp),$acc1 # prefetch Te4
+ mov 32-128($inp),$acc2
+ mov 64-128($inp),$t0
+ mov 96-128($inp),$t1
+ mov 128-128($inp),$acc1
+ mov 160-128($inp),$acc2
+ mov 192-128($inp),$t0
+ mov 224-128($inp),$t1
+ jmp .Lenc_loop_compact
+.align 16
+.Lenc_loop_compact:
+ xor 0($key),$s0 # xor with key
+ xor 4($key),$s1
+ xor 8($key),$s2
+ xor 12($key),$s3
+ lea 16($key),$key
+___
+ &enccompactvert();
+$code.=<<___;
+ cmp 16(%rsp),$key
+ je .Lenc_compact_done
+___
+ &enctransform();
+$code.=<<___;
+ jmp .Lenc_loop_compact
+.align 16
+.Lenc_compact_done:
+ xor 0($key),$s0
+ xor 4($key),$s1
+ xor 8($key),$s2
+ xor 12($key),$s3
+ .byte 0xf3,0xc3 # rep ret
+.size _x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
+___
+
# void AES_encrypt (const void *inp,void *out,const AES_KEY *key);
$code.=<<___;
.globl AES_encrypt
@@ -358,31 +596,57 @@ AES_encrypt:
push %r14
push %r15
- mov %rdx,$key
- mov %rdi,$inp
- mov %rsi,$out
-
- .picmeup $sbox
- lea AES_Te-.($sbox),$sbox
-
- mov 0($inp),$s0
- mov 4($inp),$s1
- mov 8($inp),$s2
- mov 12($inp),$s3
+ # allocate frame "above" key schedule
+ mov %rsp,%r10
+ lea -63(%rdx),%rcx # %rdx is key argument
+ and \$-64,%rsp
+ sub %rsp,%rcx
+ neg %rcx
+ and \$0x3c0,%rcx
+ sub %rcx,%rsp
+ sub \$32,%rsp
- call _x86_64_AES_encrypt
+ mov %rsi,16(%rsp) # save out
+ mov %r10,24(%rsp) # save real stack pointer
+.Lenc_prologue:
- mov $s0,0($out)
+ mov %rdx,$key
+ mov 240($key),$rnds # load rounds
+
+ mov 0(%rdi),$s0 # load input vector
+ mov 4(%rdi),$s1
+ mov 8(%rdi),$s2
+ mov 12(%rdi),$s3
+
+ shl \$4,$rnds
+ lea ($key,$rnds),%rbp
+ mov $key,(%rsp) # key schedule
+ mov %rbp,8(%rsp) # end of key schedule
+
+ # pick Te4 copy which can't "overlap" with stack frame or key schedule
+ lea .LAES_Te+2048(%rip),$sbox
+ lea 768(%rsp),%rbp
+ sub $sbox,%rbp
+ and \$0x300,%rbp
+ lea ($sbox,%rbp),$sbox
+
+ call _x86_64_AES_encrypt_compact
+
+ mov 16(%rsp),$out # restore out
+ mov 24(%rsp),%rsi # restore saved stack pointer
+ mov $s0,0($out) # write output vector
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lenc_epilogue:
ret
.size AES_encrypt,.-AES_encrypt
___
@@ -453,19 +717,20 @@ sub declastvert()
{ my $t3="%r8d"; # zaps $inp!
$code.=<<___;
+ lea 2048($sbox),$sbox # size optimization
movzb `&lo("$s0")`,$acc0
movzb `&lo("$s1")`,$acc1
movzb `&lo("$s2")`,$acc2
- movzb 2048($sbox,$acc0,1),$t0
- movzb 2048($sbox,$acc1,1),$t1
- movzb 2048($sbox,$acc2,1),$t2
+ movzb ($sbox,$acc0,1),$t0
+ movzb ($sbox,$acc1,1),$t1
+ movzb ($sbox,$acc2,1),$t2
movzb `&lo("$s3")`,$acc0
movzb `&hi("$s3")`,$acc1
movzb `&hi("$s0")`,$acc2
- movzb 2048($sbox,$acc0,1),$t3
- movzb 2048($sbox,$acc1,1),$acc1 #$t0
- movzb 2048($sbox,$acc2,1),$acc2 #$t1
+ movzb ($sbox,$acc0,1),$t3
+ movzb ($sbox,$acc1,1),$acc1 #$t0
+ movzb ($sbox,$acc2,1),$acc2 #$t1
shl \$8,$acc1
shl \$8,$acc2
@@ -477,8 +742,8 @@ $code.=<<___;
movzb `&hi("$s1")`,$acc0
movzb `&hi("$s2")`,$acc1
shr \$16,$s0
- movzb 2048($sbox,$acc0,1),$acc0 #$t2
- movzb 2048($sbox,$acc1,1),$acc1 #$t3
+ movzb ($sbox,$acc0,1),$acc0 #$t2
+ movzb ($sbox,$acc1,1),$acc1 #$t3
shl \$8,$acc0
shl \$8,$acc1
@@ -490,9 +755,9 @@ $code.=<<___;
movzb `&lo("$s2")`,$acc0
movzb `&lo("$s3")`,$acc1
movzb `&lo("$s0")`,$acc2
- movzb 2048($sbox,$acc0,1),$acc0 #$t0
- movzb 2048($sbox,$acc1,1),$acc1 #$t1
- movzb 2048($sbox,$acc2,1),$acc2 #$t2
+ movzb ($sbox,$acc0,1),$acc0 #$t0
+ movzb ($sbox,$acc1,1),$acc1 #$t1
+ movzb ($sbox,$acc2,1),$acc2 #$t2
shl \$16,$acc0
shl \$16,$acc1
@@ -505,9 +770,9 @@ $code.=<<___;
movzb `&lo("$s1")`,$acc0
movzb `&hi("$s1")`,$acc1
movzb `&hi("$s2")`,$acc2
- movzb 2048($sbox,$acc0,1),$acc0 #$t3
- movzb 2048($sbox,$acc1,1),$acc1 #$t0
- movzb 2048($sbox,$acc2,1),$acc2 #$t1
+ movzb ($sbox,$acc0,1),$acc0 #$t3
+ movzb ($sbox,$acc1,1),$acc1 #$t0
+ movzb ($sbox,$acc2,1),$acc2 #$t1
shl \$16,$acc0
shl \$24,$acc1
@@ -520,8 +785,8 @@ $code.=<<___;
movzb `&hi("$s3")`,$acc0
movzb `&hi("$s0")`,$acc1
mov 16+12($key),$s3
- movzb 2048($sbox,$acc0,1),$acc0 #$t2
- movzb 2048($sbox,$acc1,1),$acc1 #$t3
+ movzb ($sbox,$acc0,1),$acc0 #$t2
+ movzb ($sbox,$acc1,1),$acc1 #$t3
mov 16+0($key),$s0
shl \$24,$acc0
@@ -532,6 +797,7 @@ $code.=<<___;
mov 16+4($key),$s1
mov 16+8($key),$s2
+ lea -2048($sbox),$sbox
xor $t0,$s0
xor $t1,$s1
xor $t2,$s2
@@ -659,6 +925,260 @@ $code.=<<___;
.size _x86_64_AES_decrypt,.-_x86_64_AES_decrypt
___
+sub deccompactvert()
+{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
+
+$code.=<<___;
+ movzb `&lo("$s0")`,$t0
+ movzb `&lo("$s1")`,$t1
+ movzb `&lo("$s2")`,$t2
+ movzb ($sbox,$t0,1),$t0
+ movzb ($sbox,$t1,1),$t1
+ movzb ($sbox,$t2,1),$t2
+
+ movzb `&lo("$s3")`,$t3
+ movzb `&hi("$s3")`,$acc0
+ movzb `&hi("$s0")`,$acc1
+ movzb ($sbox,$t3,1),$t3
+ movzb ($sbox,$acc0,1),$t4 #$t0
+ movzb ($sbox,$acc1,1),$t5 #$t1
+
+ movzb `&hi("$s1")`,$acc2
+ movzb `&hi("$s2")`,$acc0
+ shr \$16,$s2
+ movzb ($sbox,$acc2,1),$acc2 #$t2
+ movzb ($sbox,$acc0,1),$acc0 #$t3
+ shr \$16,$s3
+
+ movzb `&lo("$s2")`,$acc1
+ shl \$8,$t4
+ shl \$8,$t5
+ movzb ($sbox,$acc1,1),$acc1 #$t0
+ xor $t4,$t0
+ xor $t5,$t1
+
+ movzb `&lo("$s3")`,$t4
+ shr \$16,$s0
+ shr \$16,$s1
+ movzb `&lo("$s0")`,$t5
+ shl \$8,$acc2
+ shl \$8,$acc0
+ movzb ($sbox,$t4,1),$t4 #$t1
+ movzb ($sbox,$t5,1),$t5 #$t2
+ xor $acc2,$t2
+ xor $acc0,$t3
+
+ movzb `&lo("$s1")`,$acc2
+ movzb `&hi("$s1")`,$acc0
+ shl \$16,$acc1
+ movzb ($sbox,$acc2,1),$acc2 #$t3
+ movzb ($sbox,$acc0,1),$acc0 #$t0
+ xor $acc1,$t0
+
+ movzb `&hi("$s2")`,$acc1
+ shl \$16,$t4
+ shl \$16,$t5
+ movzb ($sbox,$acc1,1),$s1 #$t1
+ xor $t4,$t1
+ xor $t5,$t2
+
+ movzb `&hi("$s3")`,$acc1
+ shr \$8,$s0
+ shl \$16,$acc2
+ movzb ($sbox,$acc1,1),$s2 #$t2
+ movzb ($sbox,$s0,1),$s3 #$t3
+ xor $acc2,$t3
+
+ shl \$24,$acc0
+ shl \$24,$s1
+ shl \$24,$s2
+ xor $acc0,$t0
+ shl \$24,$s3
+ xor $t1,$s1
+ mov $t0,$s0
+ xor $t2,$s2
+ xor $t3,$s3
+___
+}
+
+# parallelized version! input is pair of 64-bit values: %rax=s1.s0
+# and %rcx=s3.s2, output is four 32-bit values in %eax=s0, %ebx=s1,
+# %ecx=s2 and %edx=s3.
+sub dectransform()
+{ my ($tp10,$tp20,$tp40,$tp80,$acc0)=("%rax","%r8", "%r9", "%r10","%rbx");
+ my ($tp18,$tp28,$tp48,$tp88,$acc8)=("%rcx","%r11","%r12","%r13","%rdx");
+ my $prefetch = shift;
+
+$code.=<<___;
+ mov $tp10,$acc0
+ mov $tp18,$acc8
+ and $mask80,$acc0
+ and $mask80,$acc8
+ mov $acc0,$tp40
+ mov $acc8,$tp48
+ shr \$7,$tp40
+ lea ($tp10,$tp10),$tp20
+ shr \$7,$tp48
+ lea ($tp18,$tp18),$tp28
+ sub $tp40,$acc0
+ sub $tp48,$acc8
+ and $maskfe,$tp20
+ and $maskfe,$tp28
+ and $mask1b,$acc0
+ and $mask1b,$acc8
+ xor $tp20,$acc0
+ xor $tp28,$acc8
+ mov $acc0,$tp20
+ mov $acc8,$tp28
+
+ and $mask80,$acc0
+ and $mask80,$acc8
+ mov $acc0,$tp80
+ mov $acc8,$tp88
+ shr \$7,$tp80
+ lea ($tp20,$tp20),$tp40
+ shr \$7,$tp88
+ lea ($tp28,$tp28),$tp48
+ sub $tp80,$acc0
+ sub $tp88,$acc8
+ and $maskfe,$tp40
+ and $maskfe,$tp48
+ and $mask1b,$acc0
+ and $mask1b,$acc8
+ xor $tp40,$acc0
+ xor $tp48,$acc8
+ mov $acc0,$tp40
+ mov $acc8,$tp48
+
+ and $mask80,$acc0
+ and $mask80,$acc8
+ mov $acc0,$tp80
+ mov $acc8,$tp88
+ shr \$7,$tp80
+ xor $tp10,$tp20 # tp2^=tp1
+ shr \$7,$tp88
+ xor $tp18,$tp28 # tp2^=tp1
+ sub $tp80,$acc0
+ sub $tp88,$acc8
+ lea ($tp40,$tp40),$tp80
+ lea ($tp48,$tp48),$tp88
+ xor $tp10,$tp40 # tp4^=tp1
+ xor $tp18,$tp48 # tp4^=tp1
+ and $maskfe,$tp80
+ and $maskfe,$tp88
+ and $mask1b,$acc0
+ and $mask1b,$acc8
+ xor $acc0,$tp80
+ xor $acc8,$tp88
+
+ xor $tp80,$tp10 # tp1^=tp8
+ xor $tp88,$tp18 # tp1^=tp8
+ xor $tp80,$tp20 # tp2^tp1^=tp8
+ xor $tp88,$tp28 # tp2^tp1^=tp8
+ mov $tp10,$acc0
+ mov $tp18,$acc8
+ xor $tp80,$tp40 # tp4^tp1^=tp8
+ xor $tp88,$tp48 # tp4^tp1^=tp8
+ shr \$32,$acc0
+ shr \$32,$acc8
+ xor $tp20,$tp80 # tp8^=tp8^tp2^tp1=tp2^tp1
+ xor $tp28,$tp88 # tp8^=tp8^tp2^tp1=tp2^tp1
+ rol \$8,`&LO("$tp10")` # ROTATE(tp1^tp8,8)
+ rol \$8,`&LO("$tp18")` # ROTATE(tp1^tp8,8)
+ xor $tp40,$tp80 # tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
+ xor $tp48,$tp88 # tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
+
+ rol \$8,`&LO("$acc0")` # ROTATE(tp1^tp8,8)
+ rol \$8,`&LO("$acc8")` # ROTATE(tp1^tp8,8)
+ xor `&LO("$tp80")`,`&LO("$tp10")`
+ xor `&LO("$tp88")`,`&LO("$tp18")`
+ shr \$32,$tp80
+ shr \$32,$tp88
+ xor `&LO("$tp80")`,`&LO("$acc0")`
+ xor `&LO("$tp88")`,`&LO("$acc8")`
+
+ mov $tp20,$tp80
+ mov $tp28,$tp88
+ shr \$32,$tp80
+ shr \$32,$tp88
+ rol \$24,`&LO("$tp20")` # ROTATE(tp2^tp1^tp8,24)
+ rol \$24,`&LO("$tp28")` # ROTATE(tp2^tp1^tp8,24)
+ rol \$24,`&LO("$tp80")` # ROTATE(tp2^tp1^tp8,24)
+ rol \$24,`&LO("$tp88")` # ROTATE(tp2^tp1^tp8,24)
+ xor `&LO("$tp20")`,`&LO("$tp10")`
+ xor `&LO("$tp28")`,`&LO("$tp18")`
+ mov $tp40,$tp20
+ mov $tp48,$tp28
+ xor `&LO("$tp80")`,`&LO("$acc0")`
+ xor `&LO("$tp88")`,`&LO("$acc8")`
+
+ `"mov 0($sbox),$mask80" if ($prefetch)`
+ shr \$32,$tp20
+ shr \$32,$tp28
+ `"mov 64($sbox),$maskfe" if ($prefetch)`
+ rol \$16,`&LO("$tp40")` # ROTATE(tp4^tp1^tp8,16)
+ rol \$16,`&LO("$tp48")` # ROTATE(tp4^tp1^tp8,16)
+ `"mov 128($sbox),$mask1b" if ($prefetch)`
+ rol \$16,`&LO("$tp20")` # ROTATE(tp4^tp1^tp8,16)
+ rol \$16,`&LO("$tp28")` # ROTATE(tp4^tp1^tp8,16)
+ `"mov 192($sbox),$tp80" if ($prefetch)`
+ xor `&LO("$tp40")`,`&LO("$tp10")`
+ xor `&LO("$tp48")`,`&LO("$tp18")`
+ `"mov 256($sbox),$tp88" if ($prefetch)`
+ xor `&LO("$tp20")`,`&LO("$acc0")`
+ xor `&LO("$tp28")`,`&LO("$acc8")`
+___
+}
+
+$code.=<<___;
+.type _x86_64_AES_decrypt_compact,\@abi-omnipotent
+.align 16
+_x86_64_AES_decrypt_compact:
+ lea 128($sbox),$inp # size optimization
+ mov 0-128($inp),$acc1 # prefetch Td4
+ mov 32-128($inp),$acc2
+ mov 64-128($inp),$t0
+ mov 96-128($inp),$t1
+ mov 128-128($inp),$acc1
+ mov 160-128($inp),$acc2
+ mov 192-128($inp),$t0
+ mov 224-128($inp),$t1
+ jmp .Ldec_loop_compact
+
+.align 16
+.Ldec_loop_compact:
+ xor 0($key),$s0 # xor with key
+ xor 4($key),$s1
+ xor 8($key),$s2
+ xor 12($key),$s3
+ lea 16($key),$key
+___
+ &deccompactvert();
+$code.=<<___;
+ cmp 16(%rsp),$key
+ je .Ldec_compact_done
+
+ mov 256+0($sbox),$mask80
+ shl \$32,%rbx
+ shl \$32,%rdx
+ mov 256+8($sbox),$maskfe
+ or %rbx,%rax
+ or %rdx,%rcx
+ mov 256+16($sbox),$mask1b
+___
+ &dectransform(1);
+$code.=<<___;
+ jmp .Ldec_loop_compact
+.align 16
+.Ldec_compact_done:
+ xor 0($key),$s0
+ xor 4($key),$s1
+ xor 8($key),$s2
+ xor 12($key),$s3
+ .byte 0xf3,0xc3 # rep ret
+.size _x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
+___
+
# void AES_decrypt (const void *inp,void *out,const AES_KEY *key);
$code.=<<___;
.globl AES_decrypt
@@ -672,43 +1192,59 @@ AES_decrypt:
push %r14
push %r15
- mov %rdx,$key
- mov %rdi,$inp
- mov %rsi,$out
+ # allocate frame "above" key schedule
+ mov %rsp,%r10
+ lea -63(%rdx),%rcx # %rdx is key argument
+ and \$-64,%rsp
+ sub %rsp,%rcx
+ neg %rcx
+ and \$0x3c0,%rcx
+ sub %rcx,%rsp
+ sub \$32,%rsp
+
+ mov %rsi,16(%rsp) # save out
+ mov %r10,24(%rsp) # save real stack pointer
+.Ldec_prologue:
- .picmeup $sbox
- lea AES_Td-.($sbox),$sbox
-
- # prefetch Td4
- lea 2048+128($sbox),$sbox;
- mov 0-128($sbox),$s0
- mov 32-128($sbox),$s1
- mov 64-128($sbox),$s2
- mov 96-128($sbox),$s3
- mov 128-128($sbox),$s0
- mov 160-128($sbox),$s1
- mov 192-128($sbox),$s2
- mov 224-128($sbox),$s3
- lea -2048-128($sbox),$sbox;
-
- mov 0($inp),$s0
- mov 4($inp),$s1
- mov 8($inp),$s2
- mov 12($inp),$s3
-
- call _x86_64_AES_decrypt
-
- mov $s0,0($out)
+ mov %rdx,$key
+ mov 240($key),$rnds # load rounds
+
+ mov 0(%rdi),$s0 # load input vector
+ mov 4(%rdi),$s1
+ mov 8(%rdi),$s2
+ mov 12(%rdi),$s3
+
+ shl \$4,$rnds
+ lea ($key,$rnds),%rbp
+ mov $key,(%rsp) # key schedule
+ mov %rbp,8(%rsp) # end of key schedule
+
+ # pick Td4 copy which can't "overlap" with stack frame or key schedule
+ lea .LAES_Td+2048(%rip),$sbox
+ lea 768(%rsp),%rbp
+ sub $sbox,%rbp
+ and \$0x300,%rbp
+ lea ($sbox,%rbp),$sbox
+ shr \$3,%rbp # recall "magic" constants!
+ add %rbp,$sbox
+
+ call _x86_64_AES_decrypt_compact
+
+ mov 16(%rsp),$out # restore out
+ mov 24(%rsp),%rsi # restore saved stack pointer
+ mov $s0,0($out) # write output vector
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Ldec_epilogue:
ret
.size AES_decrypt,.-AES_decrypt
___
@@ -718,27 +1254,26 @@ sub enckey()
{
$code.=<<___;
movz %dl,%esi # rk[i]>>0
- mov 2(%rbp,%rsi,8),%ebx
+ movzb -128(%rbp,%rsi),%ebx
movz %dh,%esi # rk[i]>>8
- and \$0xFF000000,%ebx
+ shl \$24,%ebx
xor %ebx,%eax
- mov 2(%rbp,%rsi,8),%ebx
+ movzb -128(%rbp,%rsi),%ebx
shr \$16,%edx
- and \$0x000000FF,%ebx
movz %dl,%esi # rk[i]>>16
xor %ebx,%eax
- mov 0(%rbp,%rsi,8),%ebx
+ movzb -128(%rbp,%rsi),%ebx
movz %dh,%esi # rk[i]>>24
- and \$0x0000FF00,%ebx
+ shl \$8,%ebx
xor %ebx,%eax
- mov 0(%rbp,%rsi,8),%ebx
- and \$0x00FF0000,%ebx
+ movzb -128(%rbp,%rsi),%ebx
+ shl \$16,%ebx
xor %ebx,%eax
- xor 2048(%rbp,%rcx,4),%eax # rcon
+ xor 1024-128(%rbp,%rcx,4),%eax # rcon
___
}
@@ -751,7 +1286,29 @@ $code.=<<___;
AES_set_encrypt_key:
push %rbx
push %rbp
+ push %r12 # redundant, but allows to share
+ push %r13 # exception handler...
+ push %r14
+ push %r15
+ sub \$8,%rsp
+.Lenc_key_prologue:
+
+ call _x86_64_AES_set_encrypt_key
+
+ mov 8(%rsp),%r15
+ mov 16(%rsp),%r14
+ mov 24(%rsp),%r13
+ mov 32(%rsp),%r12
+ mov 40(%rsp),%rbp
+ mov 48(%rsp),%rbx
+ add \$56,%rsp
+.Lenc_key_epilogue:
+ ret
+.size AES_set_encrypt_key,.-AES_set_encrypt_key
+.type _x86_64_AES_set_encrypt_key,\@abi-omnipotent
+.align 16
+_x86_64_AES_set_encrypt_key:
mov %esi,%ecx # %ecx=bits
mov %rdi,%rsi # %rsi=userKey
mov %rdx,%rdi # %rdi=key
@@ -761,8 +1318,18 @@ AES_set_encrypt_key:
test \$-1,%rdi
jz .Lbadpointer
- .picmeup %rbp
- lea AES_Te-.(%rbp),%rbp
+ lea .LAES_Te(%rip),%rbp
+ lea 2048+128(%rbp),%rbp
+
+ # prefetch Te4
+ mov 0-128(%rbp),%eax
+ mov 32-128(%rbp),%ebx
+ mov 64-128(%rbp),%r8d
+ mov 96-128(%rbp),%edx
+ mov 128-128(%rbp),%eax
+ mov 160-128(%rbp),%ebx
+ mov 192-128(%rbp),%r8d
+ mov 224-128(%rbp),%edx
cmp \$128,%ecx
je .L10rounds
@@ -774,15 +1341,12 @@ AES_set_encrypt_key:
jmp .Lexit
.L10rounds:
- mov 0(%rsi),%eax # copy first 4 dwords
- mov 4(%rsi),%ebx
- mov 8(%rsi),%ecx
- mov 12(%rsi),%edx
- mov %eax,0(%rdi)
- mov %ebx,4(%rdi)
- mov %ecx,8(%rdi)
- mov %edx,12(%rdi)
+ mov 0(%rsi),%rax # copy first 4 dwords
+ mov 8(%rsi),%rdx
+ mov %rax,0(%rdi)
+ mov %rdx,8(%rdi)
+ shr \$32,%rdx
xor %ecx,%ecx
jmp .L10shortcut
.align 4
@@ -810,19 +1374,14 @@ $code.=<<___;
jmp .Lexit
.L12rounds:
- mov 0(%rsi),%eax # copy first 6 dwords
- mov 4(%rsi),%ebx
- mov 8(%rsi),%ecx
- mov 12(%rsi),%edx
- mov %eax,0(%rdi)
- mov %ebx,4(%rdi)
- mov %ecx,8(%rdi)
- mov %edx,12(%rdi)
- mov 16(%rsi),%ecx
- mov 20(%rsi),%edx
- mov %ecx,16(%rdi)
- mov %edx,20(%rdi)
-
+ mov 0(%rsi),%rax # copy first 6 dwords
+ mov 8(%rsi),%rbx
+ mov 16(%rsi),%rdx
+ mov %rax,0(%rdi)
+ mov %rbx,8(%rdi)
+ mov %rdx,16(%rdi)
+
+ shr \$32,%rdx
xor %ecx,%ecx
jmp .L12shortcut
.align 4
@@ -858,30 +1417,23 @@ $code.=<<___;
jmp .Lexit
.L14rounds:
- mov 0(%rsi),%eax # copy first 8 dwords
- mov 4(%rsi),%ebx
- mov 8(%rsi),%ecx
- mov 12(%rsi),%edx
- mov %eax,0(%rdi)
- mov %ebx,4(%rdi)
- mov %ecx,8(%rdi)
- mov %edx,12(%rdi)
- mov 16(%rsi),%eax
- mov 20(%rsi),%ebx
- mov 24(%rsi),%ecx
- mov 28(%rsi),%edx
- mov %eax,16(%rdi)
- mov %ebx,20(%rdi)
- mov %ecx,24(%rdi)
- mov %edx,28(%rdi)
-
+ mov 0(%rsi),%rax # copy first 8 dwords
+ mov 8(%rsi),%rbx
+ mov 16(%rsi),%rcx
+ mov 24(%rsi),%rdx
+ mov %rax,0(%rdi)
+ mov %rbx,8(%rdi)
+ mov %rcx,16(%rdi)
+ mov %rdx,24(%rdi)
+
+ shr \$32,%rdx
xor %ecx,%ecx
jmp .L14shortcut
.align 4
.L14loop:
+ mov 0(%rdi),%eax # rk[0]
mov 28(%rdi),%edx # rk[4]
.L14shortcut:
- mov 0(%rdi),%eax # rk[0]
___
&enckey ();
$code.=<<___;
@@ -900,24 +1452,23 @@ $code.=<<___;
mov %eax,%edx
mov 16(%rdi),%eax # rk[4]
movz %dl,%esi # rk[11]>>0
- mov 2(%rbp,%rsi,8),%ebx
+ movzb -128(%rbp,%rsi),%ebx
movz %dh,%esi # rk[11]>>8
- and \$0x000000FF,%ebx
xor %ebx,%eax
- mov 0(%rbp,%rsi,8),%ebx
+ movzb -128(%rbp,%rsi),%ebx
shr \$16,%edx
- and \$0x0000FF00,%ebx
+ shl \$8,%ebx
movz %dl,%esi # rk[11]>>16
xor %ebx,%eax
- mov 0(%rbp,%rsi,8),%ebx
+ movzb -128(%rbp,%rsi),%ebx
movz %dh,%esi # rk[11]>>24
- and \$0x00FF0000,%ebx
+ shl \$16,%ebx
xor %ebx,%eax
- mov 2(%rbp,%rsi,8),%ebx
- and \$0xFF000000,%ebx
+ movzb -128(%rbp,%rsi),%ebx
+ shl \$24,%ebx
xor %ebx,%eax
mov %eax,48(%rdi) # rk[12]
@@ -938,31 +1489,61 @@ $code.=<<___;
.Lbadpointer:
mov \$-1,%rax
.Lexit:
- pop %rbp
- pop %rbx
- ret
-.size AES_set_encrypt_key,.-AES_set_encrypt_key
+ .byte 0xf3,0xc3 # rep ret
+.size _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
___
-sub deckey()
+sub deckey_ref()
{ my ($i,$ptr,$te,$td) = @_;
+ my ($tp1,$tp2,$tp4,$tp8,$acc)=("%eax","%ebx","%edi","%edx","%r8d");
$code.=<<___;
- mov $i($ptr),%eax
- mov %eax,%edx
- movz %ah,%ebx
- shr \$16,%edx
- and \$0xFF,%eax
- movzb 2($te,%rax,8),%rax
- movzb 2($te,%rbx,8),%rbx
- mov 0($td,%rax,8),%eax
- xor 3($td,%rbx,8),%eax
- movzb %dh,%ebx
- and \$0xFF,%edx
- movzb 2($te,%rdx,8),%rdx
- movzb 2($te,%rbx,8),%rbx
- xor 2($td,%rdx,8),%eax
- xor 1($td,%rbx,8),%eax
- mov %eax,$i($ptr)
+ mov $i($ptr),$tp1
+ mov $tp1,$acc
+ and \$0x80808080,$acc
+ mov $acc,$tp4
+ shr \$7,$tp4
+ lea 0($tp1,$tp1),$tp2
+ sub $tp4,$acc
+ and \$0xfefefefe,$tp2
+ and \$0x1b1b1b1b,$acc
+ xor $tp2,$acc
+ mov $acc,$tp2
+
+ and \$0x80808080,$acc
+ mov $acc,$tp8
+ shr \$7,$tp8
+ lea 0($tp2,$tp2),$tp4
+ sub $tp8,$acc
+ and \$0xfefefefe,$tp4
+ and \$0x1b1b1b1b,$acc
+ xor $tp1,$tp2 # tp2^tp1
+ xor $tp4,$acc
+ mov $acc,$tp4
+
+ and \$0x80808080,$acc
+ mov $acc,$tp8
+ shr \$7,$tp8
+ sub $tp8,$acc
+ lea 0($tp4,$tp4),$tp8
+ xor $tp1,$tp4 # tp4^tp1
+ and \$0xfefefefe,$tp8
+ and \$0x1b1b1b1b,$acc
+ xor $acc,$tp8
+
+ xor $tp8,$tp1 # tp1^tp8
+ rol \$8,$tp1 # ROTATE(tp1^tp8,8)
+ xor $tp8,$tp2 # tp2^tp1^tp8
+ xor $tp8,$tp4 # tp4^tp1^tp8
+ xor $tp2,$tp8
+ xor $tp4,$tp8 # tp8^(tp8^tp4^tp1)^(tp8^tp2^tp1)=tp8^tp4^tp2
+
+ xor $tp8,$tp1
+ rol \$24,$tp2 # ROTATE(tp2^tp1^tp8,24)
+ xor $tp2,$tp1
+ rol \$16,$tp4 # ROTATE(tp4^tp1^tp8,16)
+ xor $tp4,$tp1
+
+ mov $tp1,$i($ptr)
___
}
@@ -973,19 +1554,23 @@ $code.=<<___;
.type AES_set_decrypt_key,\@function,3
.align 16
AES_set_decrypt_key:
- push %rdx
- call AES_set_encrypt_key
- cmp \$0,%eax
- je .Lproceed
- lea 24(%rsp),%rsp
- ret
-.Lproceed:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ push %rdx # save key schedule
+.Ldec_key_prologue:
+
+ call _x86_64_AES_set_encrypt_key
mov (%rsp),%r8 # restore key schedule
- mov %rbx,(%rsp)
+ cmp \$0,%eax
+ jne .Labort
- mov 240(%r8),%ecx # pull number of rounds
+ mov 240(%r8),%r14d # pull number of rounds
xor %rdi,%rdi
- lea (%rdi,%rcx,4),%rcx
+ lea (%rdi,%r14d,4),%rcx
mov %r8,%rsi
lea (%r8,%rcx,4),%rdi # pointer to last chunk
.align 4
@@ -1003,27 +1588,39 @@ AES_set_decrypt_key:
cmp %rsi,%rdi
jne .Linvert
- .picmeup %r9
- lea AES_Td-.(%r9),%rdi
- lea AES_Te-AES_Td(%rdi),%r9
+ lea .LAES_Te+2048+1024(%rip),%rax # rcon
- mov %r8,%rsi
- mov 240(%r8),%ecx # pull number of rounds
- sub \$1,%ecx
+ mov 40(%rax),$mask80
+ mov 48(%rax),$maskfe
+ mov 56(%rax),$mask1b
+
+ mov %r8,$key
+ sub \$1,%r14d
.align 4
.Lpermute:
- lea 16(%rsi),%rsi
+ lea 16($key),$key
+ mov 0($key),%rax
+ mov 8($key),%rcx
___
- &deckey (0,"%rsi","%r9","%rdi");
- &deckey (4,"%rsi","%r9","%rdi");
- &deckey (8,"%rsi","%r9","%rdi");
- &deckey (12,"%rsi","%r9","%rdi");
+ &dectransform ();
$code.=<<___;
- sub \$1,%ecx
+ mov %eax,0($key)
+ mov %ebx,4($key)
+ mov %ecx,8($key)
+ mov %edx,12($key)
+ sub \$1,%r14d
jnz .Lpermute
xor %rax,%rax
- pop %rbx
+.Labort:
+ mov 8(%rsp),%r15
+ mov 16(%rsp),%r14
+ mov 24(%rsp),%r13
+ mov 32(%rsp),%r12
+ mov 40(%rsp),%rbp
+ mov 48(%rsp),%rbx
+ add \$56,%rsp
+.Ldec_key_epilogue:
ret
.size AES_set_decrypt_key,.-AES_set_decrypt_key
___
@@ -1034,47 +1631,59 @@ ___
{
# stack frame layout
# -8(%rsp) return address
-my $_rsp="0(%rsp)"; # saved %rsp
-my $_len="8(%rsp)"; # copy of 3rd parameter, length
-my $_key="16(%rsp)"; # copy of 4th parameter, key
-my $_ivp="24(%rsp)"; # copy of 5th parameter, ivp
-my $keyp="32(%rsp)"; # one to pass as $key
-my $ivec="40(%rsp)"; # ivec[16]
-my $aes_key="56(%rsp)"; # copy of aes_key
-my $mark="56+240(%rsp)"; # copy of aes_key->rounds
+my $keyp="0(%rsp)"; # one to pass as $key
+my $keyend="8(%rsp)"; # &(keyp->rd_key[4*keyp->rounds])
+my $_rsp="16(%rsp)"; # saved %rsp
+my $_inp="24(%rsp)"; # copy of 1st parameter, inp
+my $_out="32(%rsp)"; # copy of 2nd parameter, out
+my $_len="40(%rsp)"; # copy of 3rd parameter, length
+my $_key="48(%rsp)"; # copy of 4th parameter, key
+my $_ivp="56(%rsp)"; # copy of 5th parameter, ivp
+my $ivec="64(%rsp)"; # ivec[16]
+my $aes_key="80(%rsp)"; # copy of aes_key
+my $mark="80+240(%rsp)"; # copy of aes_key->rounds
$code.=<<___;
.globl AES_cbc_encrypt
.type AES_cbc_encrypt,\@function,6
.align 16
+.extern OPENSSL_ia32cap_P
AES_cbc_encrypt:
cmp \$0,%rdx # check length
- je .Lcbc_just_ret
+ je .Lcbc_epilogue
+ pushfq
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
- pushfq
+.Lcbc_prologue:
+
cld
mov %r9d,%r9d # clear upper half of enc
- .picmeup $sbox
-.Lcbc_pic_point:
-
+ lea .LAES_Te(%rip),$sbox
cmp \$0,%r9
- je .LDECRYPT
-
- lea AES_Te-.Lcbc_pic_point($sbox),$sbox
+ jne .Lcbc_picked_te
+ lea .LAES_Td(%rip),$sbox
+.Lcbc_picked_te:
+
+ mov OPENSSL_ia32cap_P(%rip),%r10d
+ cmp \$$speed_limit,%rdx
+ jb .Lcbc_slow_prologue
+ test \$15,%rdx
+ jnz .Lcbc_slow_prologue
+ bt \$28,%r10d
+ jc .Lcbc_slow_prologue
# allocate aligned stack frame...
- lea -64-248(%rsp),$key
+ lea -88-248(%rsp),$key
and \$-64,$key
- # ... and make it doesn't alias with AES_Te modulo 4096
+ # ... and make sure it doesn't alias with AES_T[ed] modulo 4096
mov $sbox,%r10
- lea 2048($sbox),%r11
+ lea 2304($sbox),%r11
mov $key,%r12
and \$0xFFF,%r10 # s = $sbox&0xfff
and \$0xFFF,%r11 # e = ($sbox+2048)&0xfff
@@ -1094,22 +1703,27 @@ AES_cbc_encrypt:
.Lcbc_te_ok:
xchg %rsp,$key
- add \$8,%rsp # reserve for return address!
+ #add \$8,%rsp # reserve for return address!
mov $key,$_rsp # save %rsp
+.Lcbc_fast_body:
+ mov %rdi,$_inp # save copy of inp
+ mov %rsi,$_out # save copy of out
mov %rdx,$_len # save copy of len
mov %rcx,$_key # save copy of key
mov %r8,$_ivp # save copy of ivp
movl \$0,$mark # copy of aes_key->rounds = 0;
mov %r8,%rbp # rearrange input arguments
+ mov %r9,%rbx
mov %rsi,$out
mov %rdi,$inp
mov %rcx,$key
+ mov 240($key),%eax # key->rounds
# do we copy key schedule to stack?
mov $key,%r10
sub $sbox,%r10
and \$0xfff,%r10
- cmp \$2048,%r10
+ cmp \$2304,%r10
jb .Lcbc_do_ecopy
cmp \$4096-248,%r10
jb .Lcbc_skip_ecopy
@@ -1120,12 +1734,11 @@ AES_cbc_encrypt:
lea $aes_key,$key
mov \$240/8,%ecx
.long 0x90A548F3 # rep movsq
- mov (%rsi),%eax # copy aes_key->rounds
- mov %eax,(%rdi)
+ mov %eax,(%rdi) # copy aes_key->rounds
.Lcbc_skip_ecopy:
mov $key,$keyp # save key pointer
- mov \$16,%ecx
+ mov \$18,%ecx
.align 4
.Lcbc_prefetch_te:
mov 0($sbox),%r10
@@ -1135,184 +1748,77 @@ AES_cbc_encrypt:
lea 128($sbox),$sbox
sub \$1,%ecx
jnz .Lcbc_prefetch_te
- sub \$2048,$sbox
+ lea -2304($sbox),$sbox
- test \$-16,%rdx # check upon length
- mov %rdx,%r10
+ cmp \$0,%rbx
+ je .LFAST_DECRYPT
+
+#----------------------------- ENCRYPT -----------------------------#
mov 0(%rbp),$s0 # load iv
mov 4(%rbp),$s1
mov 8(%rbp),$s2
mov 12(%rbp),$s3
- jz .Lcbc_enc_tail # short input...
.align 4
-.Lcbc_enc_loop:
+.Lcbc_fast_enc_loop:
xor 0($inp),$s0
xor 4($inp),$s1
xor 8($inp),$s2
xor 12($inp),$s3
- mov $inp,$ivec # if ($verticalspin) save inp
-
mov $keyp,$key # restore key
+ mov $inp,$_inp # if ($verticalspin) save inp
+
call _x86_64_AES_encrypt
- mov $ivec,$inp # if ($verticalspin) restore inp
+ mov $_inp,$inp # if ($verticalspin) restore inp
+ mov $_len,%r10
mov $s0,0($out)
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
- mov $_len,%r10
lea 16($inp),$inp
lea 16($out),$out
sub \$16,%r10
test \$-16,%r10
mov %r10,$_len
- jnz .Lcbc_enc_loop
- test \$15,%r10
- jnz .Lcbc_enc_tail
+ jnz .Lcbc_fast_enc_loop
mov $_ivp,%rbp # restore ivp
mov $s0,0(%rbp) # save ivec
mov $s1,4(%rbp)
mov $s2,8(%rbp)
mov $s3,12(%rbp)
-.align 4
-.Lcbc_cleanup:
- cmpl \$0,$mark # was the key schedule copied?
- lea $aes_key,%rdi
- mov $_rsp,%rsp
- je .Lcbc_exit
- mov \$240/8,%ecx
- xor %rax,%rax
- .long 0x90AB48F3 # rep stosq
-.Lcbc_exit:
- popfq
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
-.Lcbc_just_ret:
- ret
-.align 4
-.Lcbc_enc_tail:
- mov %rax,%r11
- mov %rcx,%r12
- mov %r10,%rcx
- mov $inp,%rsi
- mov $out,%rdi
- .long 0xF689A4F3 # rep movsb
- mov \$16,%rcx # zero tail
- sub %r10,%rcx
- xor %rax,%rax
- .long 0xF689AAF3 # rep stosb
- mov $out,$inp # this is not a mistake!
- movq \$16,$_len # len=16
- mov %r11,%rax
- mov %r12,%rcx
- jmp .Lcbc_enc_loop # one more spin...
+ jmp .Lcbc_fast_cleanup
+
#----------------------------- DECRYPT -----------------------------#
.align 16
-.LDECRYPT:
- lea AES_Td-.Lcbc_pic_point($sbox),$sbox
-
- # allocate aligned stack frame...
- lea -64-248(%rsp),$key
- and \$-64,$key
-
- # ... and make it doesn't alias with AES_Td modulo 4096
- mov $sbox,%r10
- lea 2304($sbox),%r11
- mov $key,%r12
- and \$0xFFF,%r10 # s = $sbox&0xfff
- and \$0xFFF,%r11 # e = ($sbox+2048+256)&0xfff
- and \$0xFFF,%r12 # p = %rsp&0xfff
-
- cmp %r11,%r12 # if (p=>e) %rsp =- (p-e);
- jb .Lcbc_td_break_out
- sub %r11,%r12
- sub %r12,$key
- jmp .Lcbc_td_ok
-.Lcbc_td_break_out: # else %rsp -= (p-s)&0xfff + framesz
- sub %r10,%r12
- and \$0xFFF,%r12
- add \$320,%r12
- sub %r12,$key
-.align 4
-.Lcbc_td_ok:
-
- xchg %rsp,$key
- add \$8,%rsp # reserve for return address!
- mov $key,$_rsp # save %rsp
- mov %rdx,$_len # save copy of len
- mov %rcx,$_key # save copy of key
- mov %r8,$_ivp # save copy of ivp
- movl \$0,$mark # copy of aes_key->rounds = 0;
- mov %r8,%rbp # rearrange input arguments
- mov %rsi,$out
- mov %rdi,$inp
- mov %rcx,$key
-
- # do we copy key schedule to stack?
- mov $key,%r10
- sub $sbox,%r10
- and \$0xfff,%r10
- cmp \$2304,%r10
- jb .Lcbc_do_dcopy
- cmp \$4096-248,%r10
- jb .Lcbc_skip_dcopy
-.align 4
-.Lcbc_do_dcopy:
- mov $key,%rsi
- lea $aes_key,%rdi
- lea $aes_key,$key
- mov \$240/8,%ecx
- .long 0x90A548F3 # rep movsq
- mov (%rsi),%eax # copy aes_key->rounds
- mov %eax,(%rdi)
-.Lcbc_skip_dcopy:
- mov $key,$keyp # save key pointer
-
- mov \$18,%ecx
-.align 4
-.Lcbc_prefetch_td:
- mov 0($sbox),%r10
- mov 32($sbox),%r11
- mov 64($sbox),%r12
- mov 96($sbox),%r13
- lea 128($sbox),$sbox
- sub \$1,%ecx
- jnz .Lcbc_prefetch_td
- sub \$2304,$sbox
-
+.LFAST_DECRYPT:
cmp $inp,$out
- je .Lcbc_dec_in_place
+ je .Lcbc_fast_dec_in_place
mov %rbp,$ivec
.align 4
-.Lcbc_dec_loop:
- mov 0($inp),$s0 # read input
+.Lcbc_fast_dec_loop:
+ mov 0($inp),$s0 # read input
mov 4($inp),$s1
mov 8($inp),$s2
mov 12($inp),$s3
- mov $inp,8+$ivec # if ($verticalspin) save inp
-
mov $keyp,$key # restore key
+ mov $inp,$_inp # if ($verticalspin) save inp
+
call _x86_64_AES_decrypt
mov $ivec,%rbp # load ivp
- mov 8+$ivec,$inp # if ($verticalspin) restore inp
+ mov $_inp,$inp # if ($verticalspin) restore inp
+ mov $_len,%r10 # load len
xor 0(%rbp),$s0 # xor iv
xor 4(%rbp),$s1
xor 8(%rbp),$s2
xor 12(%rbp),$s3
mov $inp,%rbp # current input, next iv
- mov $_len,%r10 # load len
sub \$16,%r10
- jc .Lcbc_dec_partial
mov %r10,$_len # update len
mov %rbp,$ivec # update ivp
@@ -1323,81 +1829,281 @@ AES_cbc_encrypt:
lea 16($inp),$inp
lea 16($out),$out
- jnz .Lcbc_dec_loop
-.Lcbc_dec_end:
+ jnz .Lcbc_fast_dec_loop
mov $_ivp,%r12 # load user ivp
mov 0(%rbp),%r10 # load iv
mov 8(%rbp),%r11
mov %r10,0(%r12) # copy back to user
mov %r11,8(%r12)
- jmp .Lcbc_cleanup
-
-.align 4
-.Lcbc_dec_partial:
- mov $s0,0+$ivec # dump output to stack
- mov $s1,4+$ivec
- mov $s2,8+$ivec
- mov $s3,12+$ivec
- mov $out,%rdi
- lea $ivec,%rsi
- mov \$16,%rcx
- add %r10,%rcx # number of bytes to copy
- .long 0xF689A4F3 # rep movsb
- jmp .Lcbc_dec_end
+ jmp .Lcbc_fast_cleanup
.align 16
-.Lcbc_dec_in_place:
+.Lcbc_fast_dec_in_place:
+ mov 0(%rbp),%r10 # copy iv to stack
+ mov 8(%rbp),%r11
+ mov %r10,0+$ivec
+ mov %r11,8+$ivec
+.align 4
+.Lcbc_fast_dec_in_place_loop:
mov 0($inp),$s0 # load input
mov 4($inp),$s1
mov 8($inp),$s2
mov 12($inp),$s3
+ mov $keyp,$key # restore key
+ mov $inp,$_inp # if ($verticalspin) save inp
- mov $inp,$ivec # if ($verticalspin) save inp
- mov $keyp,$key
call _x86_64_AES_decrypt
- mov $ivec,$inp # if ($verticalspin) restore inp
- mov $_ivp,%rbp
- xor 0(%rbp),$s0
- xor 4(%rbp),$s1
- xor 8(%rbp),$s2
- xor 12(%rbp),$s3
+ mov $_inp,$inp # if ($verticalspin) restore inp
+ mov $_len,%r10
+ xor 0+$ivec,$s0
+ xor 4+$ivec,$s1
+ xor 8+$ivec,$s2
+ xor 12+$ivec,$s3
+
+ mov 0($inp),%r11 # load input
+ mov 8($inp),%r12
+ sub \$16,%r10
+ jz .Lcbc_fast_dec_in_place_done
- mov 0($inp),%r10 # copy input to iv
- mov 8($inp),%r11
- mov %r10,0(%rbp)
- mov %r11,8(%rbp)
+ mov %r11,0+$ivec # copy input to iv
+ mov %r12,8+$ivec
mov $s0,0($out) # save output [zaps input]
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
- mov $_len,%rcx
lea 16($inp),$inp
lea 16($out),$out
- sub \$16,%rcx
- jc .Lcbc_dec_in_place_partial
- mov %rcx,$_len
- jnz .Lcbc_dec_in_place
- jmp .Lcbc_cleanup
+ mov %r10,$_len
+ jmp .Lcbc_fast_dec_in_place_loop
+.Lcbc_fast_dec_in_place_done:
+ mov $_ivp,%rdi
+ mov %r11,0(%rdi) # copy iv back to user
+ mov %r12,8(%rdi)
+
+ mov $s0,0($out) # save output [zaps input]
+ mov $s1,4($out)
+ mov $s2,8($out)
+ mov $s3,12($out)
.align 4
-.Lcbc_dec_in_place_partial:
- # one can argue if this is actually required
- lea ($out,%rcx),%rdi
- lea (%rbp,%rcx),%rsi
- neg %rcx
- .long 0xF689A4F3 # rep movsb # restore tail
- jmp .Lcbc_cleanup
+.Lcbc_fast_cleanup:
+ cmpl \$0,$mark # was the key schedule copied?
+ lea $aes_key,%rdi
+ je .Lcbc_exit
+ mov \$240/8,%ecx
+ xor %rax,%rax
+ .long 0x90AB48F3 # rep stosq
+
+ jmp .Lcbc_exit
+
+#--------------------------- SLOW ROUTINE ---------------------------#
+.align 16
+.Lcbc_slow_prologue:
+ # allocate aligned stack frame...
+ lea -88(%rsp),%rbp
+ and \$-64,%rbp
+ # ... just "above" key schedule
+ lea -88-63(%rcx),%r10
+ sub %rbp,%r10
+ neg %r10
+ and \$0x3c0,%r10
+ sub %r10,%rbp
+
+ xchg %rsp,%rbp
+ #add \$8,%rsp # reserve for return address!
+ mov %rbp,$_rsp # save %rsp
+.Lcbc_slow_body:
+ #mov %rdi,$_inp # save copy of inp
+ #mov %rsi,$_out # save copy of out
+ #mov %rdx,$_len # save copy of len
+ #mov %rcx,$_key # save copy of key
+ mov %r8,$_ivp # save copy of ivp
+ mov %r8,%rbp # rearrange input arguments
+ mov %r9,%rbx
+ mov %rsi,$out
+ mov %rdi,$inp
+ mov %rcx,$key
+ mov %rdx,%r10
+
+ mov 240($key),%eax
+ mov $key,$keyp # save key pointer
+ shl \$4,%eax
+ lea ($key,%rax),%rax
+ mov %rax,$keyend
+
+ # pick Te4 copy which can't "overlap" with stack frame or key scdedule
+ lea 2048($sbox),$sbox
+ lea 768-8(%rsp),%rax
+ sub $sbox,%rax
+ and \$0x300,%rax
+ lea ($sbox,%rax),$sbox
+
+ cmp \$0,%rbx
+ je .LSLOW_DECRYPT
+
+#--------------------------- SLOW ENCRYPT ---------------------------#
+ test \$-16,%r10 # check upon length
+ mov 0(%rbp),$s0 # load iv
+ mov 4(%rbp),$s1
+ mov 8(%rbp),$s2
+ mov 12(%rbp),$s3
+ jz .Lcbc_slow_enc_tail # short input...
+
+.align 4
+.Lcbc_slow_enc_loop:
+ xor 0($inp),$s0
+ xor 4($inp),$s1
+ xor 8($inp),$s2
+ xor 12($inp),$s3
+ mov $keyp,$key # restore key
+ mov $inp,$_inp # save inp
+ mov $out,$_out # save out
+ mov %r10,$_len # save len
+
+ call _x86_64_AES_encrypt_compact
+
+ mov $_inp,$inp # restore inp
+ mov $_out,$out # restore out
+ mov $_len,%r10 # restore len
+ mov $s0,0($out)
+ mov $s1,4($out)
+ mov $s2,8($out)
+ mov $s3,12($out)
+
+ lea 16($inp),$inp
+ lea 16($out),$out
+ sub \$16,%r10
+ test \$-16,%r10
+ jnz .Lcbc_slow_enc_loop
+ test \$15,%r10
+ jnz .Lcbc_slow_enc_tail
+ mov $_ivp,%rbp # restore ivp
+ mov $s0,0(%rbp) # save ivec
+ mov $s1,4(%rbp)
+ mov $s2,8(%rbp)
+ mov $s3,12(%rbp)
+
+ jmp .Lcbc_exit
+
+.align 4
+.Lcbc_slow_enc_tail:
+ mov %rax,%r11
+ mov %rcx,%r12
+ mov %r10,%rcx
+ mov $inp,%rsi
+ mov $out,%rdi
+ .long 0x9066A4F3 # rep movsb
+ mov \$16,%rcx # zero tail
+ sub %r10,%rcx
+ xor %rax,%rax
+ .long 0x9066AAF3 # rep stosb
+ mov $out,$inp # this is not a mistake!
+ mov \$16,%r10 # len=16
+ mov %r11,%rax
+ mov %r12,%rcx
+ jmp .Lcbc_slow_enc_loop # one more spin...
+#--------------------------- SLOW DECRYPT ---------------------------#
+.align 16
+.LSLOW_DECRYPT:
+ shr \$3,%rax
+ add %rax,$sbox # recall "magic" constants!
+
+ mov 0(%rbp),%r11 # copy iv to stack
+ mov 8(%rbp),%r12
+ mov %r11,0+$ivec
+ mov %r12,8+$ivec
+
+.align 4
+.Lcbc_slow_dec_loop:
+ mov 0($inp),$s0 # load input
+ mov 4($inp),$s1
+ mov 8($inp),$s2
+ mov 12($inp),$s3
+ mov $keyp,$key # restore key
+ mov $inp,$_inp # save inp
+ mov $out,$_out # save out
+ mov %r10,$_len # save len
+
+ call _x86_64_AES_decrypt_compact
+
+ mov $_inp,$inp # restore inp
+ mov $_out,$out # restore out
+ mov $_len,%r10
+ xor 0+$ivec,$s0
+ xor 4+$ivec,$s1
+ xor 8+$ivec,$s2
+ xor 12+$ivec,$s3
+
+ mov 0($inp),%r11 # load input
+ mov 8($inp),%r12
+ sub \$16,%r10
+ jc .Lcbc_slow_dec_partial
+ jz .Lcbc_slow_dec_done
+
+ mov %r11,0+$ivec # copy input to iv
+ mov %r12,8+$ivec
+
+ mov $s0,0($out) # save output [can zap input]
+ mov $s1,4($out)
+ mov $s2,8($out)
+ mov $s3,12($out)
+
+ lea 16($inp),$inp
+ lea 16($out),$out
+ jmp .Lcbc_slow_dec_loop
+.Lcbc_slow_dec_done:
+ mov $_ivp,%rdi
+ mov %r11,0(%rdi) # copy iv back to user
+ mov %r12,8(%rdi)
+
+ mov $s0,0($out) # save output [can zap input]
+ mov $s1,4($out)
+ mov $s2,8($out)
+ mov $s3,12($out)
+
+ jmp .Lcbc_exit
+
+.align 4
+.Lcbc_slow_dec_partial:
+ mov $_ivp,%rdi
+ mov %r11,0(%rdi) # copy iv back to user
+ mov %r12,8(%rdi)
+
+ mov $s0,0+$ivec # save output to stack
+ mov $s1,4+$ivec
+ mov $s2,8+$ivec
+ mov $s3,12+$ivec
+
+ mov $out,%rdi
+ lea $ivec,%rsi
+ lea 16(%r10),%rcx
+ .long 0x9066A4F3 # rep movsb
+ jmp .Lcbc_exit
+
+.align 16
+.Lcbc_exit:
+ mov $_rsp,%rsi
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lcbc_popfq:
+ popfq
+.Lcbc_epilogue:
+ ret
.size AES_cbc_encrypt,.-AES_cbc_encrypt
___
}
$code.=<<___;
-.globl AES_Te
.align 64
-AES_Te:
+.LAES_Te:
___
&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
@@ -1463,16 +2169,149 @@ ___
&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
+
+#Te4 # four copies of Te4 to choose from to avoid L1 aliasing
+ &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+ &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+ &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+ &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+ &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+ &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+ &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+ &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+ &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+ &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+ &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+ &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+ &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+ &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+ &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+ &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+ &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+ &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+ &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+ &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+ &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+ &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+ &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+ &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+ &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+ &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+ &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+ &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+ &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+ &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+ &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+ &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+ &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+ &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+ &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+ &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+ &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+ &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+ &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+ &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+ &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+ &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+ &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+ &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+ &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+ &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+ &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+ &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+ &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+ &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+ &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+ &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+ &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+ &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+ &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+ &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+ &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+ &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+ &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+ &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+ &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+ &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+ &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+ &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+ &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+ &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+ &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+ &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+ &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+ &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+ &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+ &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+ &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+ &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+ &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+ &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+ &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+ &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+ &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+ &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+ &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+ &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+ &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+ &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+ &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+ &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+ &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+ &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+ &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+ &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+ &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+ &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+ &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+ &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+ &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+ &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+ &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+ &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+ &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+ &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+ &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+ &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+ &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+ &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+ &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+ &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+ &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+ &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+ &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+ &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+ &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+ &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+ &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+ &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+ &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+ &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+ &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+ &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+ &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+ &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+ &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+ &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+ &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+ &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+ &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+ &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+ &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+ &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
#rcon:
$code.=<<___;
.long 0x00000001, 0x00000002, 0x00000004, 0x00000008
.long 0x00000010, 0x00000020, 0x00000040, 0x00000080
- .long 0x0000001b, 0x00000036, 0, 0, 0, 0, 0, 0
+ .long 0x0000001b, 0x00000036, 0x80808080, 0x80808080
+ .long 0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b
___
$code.=<<___;
-.globl AES_Td
.align 64
-AES_Td:
+.LAES_Td:
___
&_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
&_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
@@ -1538,7 +2377,116 @@ ___
&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
-#Td4:
+
+#Td4: # four copies of Td4 to choose from to avoid L1 aliasing
+ &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+ &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+ &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+ &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+ &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+ &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+ &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+ &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+ &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+ &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+ &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+ &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+ &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+ &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+ &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+ &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+ &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+ &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+ &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+ &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+ &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+ &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+ &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+ &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+ &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+ &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+ &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+ &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+ &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+ &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+ &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+ &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+ .long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+ .long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+___
+ &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+ &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+ &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+ &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+ &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+ &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+ &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+ &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+ &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+ &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+ &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+ &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+ &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+ &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+ &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+ &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+ &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+ &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+ &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+ &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+ &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+ &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+ &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+ &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+ &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+ &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+ &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+ &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+ &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+ &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+ &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+ &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+ .long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+ .long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+___
+ &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+ &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+ &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+ &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+ &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+ &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+ &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+ &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+ &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+ &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+ &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+ &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+ &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+ &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+ &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+ &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+ &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+ &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+ &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+ &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+ &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+ &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+ &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+ &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+ &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+ &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+ &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+ &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+ &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+ &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+ &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+ &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+ .long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+ .long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+___
&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
@@ -1571,6 +2519,288 @@ ___
&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+ .long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+ .long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+.asciz "AES for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type block_se_handler,\@abi-omnipotent
+.align 16
+block_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_block_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_block_prologue
+
+ mov 24(%rax),%rax # pull saved real stack pointer
+ lea 48(%rax),%rax # adjust...
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_block_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ jmp .Lcommon_seh_exit
+.size block_se_handler,.-block_se_handler
+
+.type key_se_handler,\@abi-omnipotent
+.align 16
+key_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_key_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_key_prologue
+
+ lea 56(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_key_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ jmp .Lcommon_seh_exit
+.size key_se_handler,.-key_se_handler
+
+.type cbc_se_handler,\@abi-omnipotent
+.align 16
+cbc_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lcbc_prologue(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lcbc_prologue
+ jb .Lin_cbc_prologue
+
+ lea .Lcbc_fast_body(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lcbc_fast_body
+ jb .Lin_cbc_frame_setup
+
+ lea .Lcbc_slow_prologue(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lcbc_slow_prologue
+ jb .Lin_cbc_body
+
+ lea .Lcbc_slow_body(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lcbc_slow_body
+ jb .Lin_cbc_frame_setup
+
+.Lin_cbc_body:
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lcbc_epilogue(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lcbc_epilogue
+ jae .Lin_cbc_prologue
+
+ lea 8(%rax),%rax
+
+ lea .Lcbc_popfq(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lcbc_popfq
+ jae .Lin_cbc_prologue
+
+ mov `16-8`(%rax),%rax # biased $_rsp
+ lea 56(%rax),%rax
+
+.Lin_cbc_frame_setup:
+ mov -16(%rax),%rbx
+ mov -24(%rax),%rbp
+ mov -32(%rax),%r12
+ mov -40(%rax),%r13
+ mov -48(%rax),%r14
+ mov -56(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_cbc_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+.Lcommon_seh_exit:
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$`1232/8`,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size cbc_se_handler,.-cbc_se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_AES_encrypt
+ .rva .LSEH_end_AES_encrypt
+ .rva .LSEH_info_AES_encrypt
+
+ .rva .LSEH_begin_AES_decrypt
+ .rva .LSEH_end_AES_decrypt
+ .rva .LSEH_info_AES_decrypt
+
+ .rva .LSEH_begin_AES_set_encrypt_key
+ .rva .LSEH_end_AES_set_encrypt_key
+ .rva .LSEH_info_AES_set_encrypt_key
+
+ .rva .LSEH_begin_AES_set_decrypt_key
+ .rva .LSEH_end_AES_set_decrypt_key
+ .rva .LSEH_info_AES_set_decrypt_key
+
+ .rva .LSEH_begin_AES_cbc_encrypt
+ .rva .LSEH_end_AES_cbc_encrypt
+ .rva .LSEH_info_AES_cbc_encrypt
+
+.section .xdata
+.align 8
+.LSEH_info_AES_encrypt:
+ .byte 9,0,0,0
+ .rva block_se_handler
+ .rva .Lenc_prologue,.Lenc_epilogue # HandlerData[]
+.LSEH_info_AES_decrypt:
+ .byte 9,0,0,0
+ .rva block_se_handler
+ .rva .Ldec_prologue,.Ldec_epilogue # HandlerData[]
+.LSEH_info_AES_set_encrypt_key:
+ .byte 9,0,0,0
+ .rva key_se_handler
+ .rva .Lenc_key_prologue,.Lenc_key_epilogue # HandlerData[]
+.LSEH_info_AES_set_decrypt_key:
+ .byte 9,0,0,0
+ .rva key_se_handler
+ .rva .Ldec_key_prologue,.Ldec_key_epilogue # HandlerData[]
+.LSEH_info_AES_cbc_encrypt:
+ .byte 9,0,0,0
+ .rva cbc_se_handler
+___
+}
$code =~ s/\`([^\`]*)\`/eval($1)/gem;