diff options
author | marha <marha@users.sourceforge.net> | 2011-01-23 19:50:13 +0000 |
---|---|---|
committer | marha <marha@users.sourceforge.net> | 2011-01-23 19:50:13 +0000 |
commit | b680cf39ed5bc37e0eb7eb86ad8599bf92df3f2b (patch) | |
tree | 4722cd31e41fdda28e5c2b37bdf8500d27868384 /openssl/crypto/aes | |
parent | 8cd59857a99c534c560f58c931f5c2466d4c1f9b (diff) | |
download | vcxsrv-b680cf39ed5bc37e0eb7eb86ad8599bf92df3f2b.tar.gz vcxsrv-b680cf39ed5bc37e0eb7eb86ad8599bf92df3f2b.tar.bz2 vcxsrv-b680cf39ed5bc37e0eb7eb86ad8599bf92df3f2b.zip |
Updated to openssl-1.0.0c
Diffstat (limited to 'openssl/crypto/aes')
-rw-r--r-- | openssl/crypto/aes/aes_wrap.c | 12 | ||||
-rw-r--r-- | openssl/crypto/aes/asm/aes-armv4.pl | 397 |
2 files changed, 204 insertions, 205 deletions
diff --git a/openssl/crypto/aes/aes_wrap.c b/openssl/crypto/aes/aes_wrap.c index 9feacd65d..e2d73d37c 100644 --- a/openssl/crypto/aes/aes_wrap.c +++ b/openssl/crypto/aes/aes_wrap.c @@ -85,9 +85,9 @@ int AES_wrap_key(AES_KEY *key, const unsigned char *iv, A[7] ^= (unsigned char)(t & 0xff); if (t > 0xff) { - A[6] ^= (unsigned char)((t & 0xff) >> 8); - A[5] ^= (unsigned char)((t & 0xff) >> 16); - A[4] ^= (unsigned char)((t & 0xff) >> 24); + A[6] ^= (unsigned char)((t >> 8) & 0xff); + A[5] ^= (unsigned char)((t >> 16) & 0xff); + A[4] ^= (unsigned char)((t >> 24) & 0xff); } memcpy(R, B + 8, 8); } @@ -119,9 +119,9 @@ int AES_unwrap_key(AES_KEY *key, const unsigned char *iv, A[7] ^= (unsigned char)(t & 0xff); if (t > 0xff) { - A[6] ^= (unsigned char)((t & 0xff) >> 8); - A[5] ^= (unsigned char)((t & 0xff) >> 16); - A[4] ^= (unsigned char)((t & 0xff) >> 24); + A[6] ^= (unsigned char)((t >> 8) & 0xff); + A[5] ^= (unsigned char)((t >> 16) & 0xff); + A[4] ^= (unsigned char)((t >> 24) & 0xff); } memcpy(B + 8, R, 8); AES_decrypt(B, B, key); diff --git a/openssl/crypto/aes/asm/aes-armv4.pl b/openssl/crypto/aes/asm/aes-armv4.pl index 690244111..c51ee1fbf 100644 --- a/openssl/crypto/aes/asm/aes-armv4.pl +++ b/openssl/crypto/aes/asm/aes-armv4.pl @@ -16,12 +16,20 @@ # allows to merge logical or arithmetic operation with shift or rotate # in one instruction and emit combined result every cycle. The module # is endian-neutral. The performance is ~42 cycles/byte for 128-bit -# key. +# key [on single-issue Xscale PXA250 core]. # May 2007. # # AES_set_[en|de]crypt_key is added. +# July 2010. +# +# Rescheduling for dual-issue pipeline resulted in 12% improvement on +# Cortex A8 core and ~25 cycles per byte processed with 128-bit key. + +while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} +open STDOUT,">$output"; + $s0="r0"; $s1="r1"; $s2="r2"; @@ -164,24 +172,24 @@ AES_encrypt: ldrb $t2,[$rounds,#1] ldrb $t3,[$rounds,#0] orr $s0,$s0,$t1,lsl#8 - orr $s0,$s0,$t2,lsl#16 - orr $s0,$s0,$t3,lsl#24 ldrb $s1,[$rounds,#7] + orr $s0,$s0,$t2,lsl#16 ldrb $t1,[$rounds,#6] + orr $s0,$s0,$t3,lsl#24 ldrb $t2,[$rounds,#5] ldrb $t3,[$rounds,#4] orr $s1,$s1,$t1,lsl#8 - orr $s1,$s1,$t2,lsl#16 - orr $s1,$s1,$t3,lsl#24 ldrb $s2,[$rounds,#11] + orr $s1,$s1,$t2,lsl#16 ldrb $t1,[$rounds,#10] + orr $s1,$s1,$t3,lsl#24 ldrb $t2,[$rounds,#9] ldrb $t3,[$rounds,#8] orr $s2,$s2,$t1,lsl#8 - orr $s2,$s2,$t2,lsl#16 - orr $s2,$s2,$t3,lsl#24 ldrb $s3,[$rounds,#15] + orr $s2,$s2,$t2,lsl#16 ldrb $t1,[$rounds,#14] + orr $s2,$s2,$t3,lsl#24 ldrb $t2,[$rounds,#13] ldrb $t3,[$rounds,#12] orr $s3,$s3,$t1,lsl#8 @@ -196,24 +204,24 @@ AES_encrypt: mov $t3,$s0,lsr#8 strb $t1,[$rounds,#0] strb $t2,[$rounds,#1] - strb $t3,[$rounds,#2] - strb $s0,[$rounds,#3] mov $t1,$s1,lsr#24 + strb $t3,[$rounds,#2] mov $t2,$s1,lsr#16 + strb $s0,[$rounds,#3] mov $t3,$s1,lsr#8 strb $t1,[$rounds,#4] strb $t2,[$rounds,#5] - strb $t3,[$rounds,#6] - strb $s1,[$rounds,#7] mov $t1,$s2,lsr#24 + strb $t3,[$rounds,#6] mov $t2,$s2,lsr#16 + strb $s1,[$rounds,#7] mov $t3,$s2,lsr#8 strb $t1,[$rounds,#8] strb $t2,[$rounds,#9] - strb $t3,[$rounds,#10] - strb $s2,[$rounds,#11] mov $t1,$s3,lsr#24 + strb $t3,[$rounds,#10] mov $t2,$s3,lsr#16 + strb $s2,[$rounds,#11] mov $t3,$s3,lsr#8 strb $t1,[$rounds,#12] strb $t2,[$rounds,#13] @@ -230,141 +238,137 @@ AES_encrypt: .align 2 _armv4_AES_encrypt: str lr,[sp,#-4]! @ push lr - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - ldr $rounds,[$key,#240-16] + ldmia $key!,{$t1-$i1} eor $s0,$s0,$t1 + ldr $rounds,[$key,#240-16] eor $s1,$s1,$t2 eor $s2,$s2,$t3 eor $s3,$s3,$i1 sub $rounds,$rounds,#1 mov lr,#255 -.Lenc_loop: + and $i1,lr,$s0 and $i2,lr,$s0,lsr#8 and $i3,lr,$s0,lsr#16 - and $i1,lr,$s0 mov $s0,$s0,lsr#24 +.Lenc_loop: ldr $t1,[$tbl,$i1,lsl#2] @ Te3[s0>>0] - ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24] - ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8] - ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16] - and $i1,lr,$s1,lsr#16 @ i0 + ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8] and $i2,lr,$s1 + ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16] and $i3,lr,$s1,lsr#8 + ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24] mov $s1,$s1,lsr#24 + ldr $i1,[$tbl,$i1,lsl#2] @ Te1[s1>>16] - ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24] ldr $i2,[$tbl,$i2,lsl#2] @ Te3[s1>>0] ldr $i3,[$tbl,$i3,lsl#2] @ Te2[s1>>8] eor $s0,$s0,$i1,ror#8 - eor $s1,$s1,$t1,ror#24 - eor $t2,$t2,$i2,ror#8 - eor $t3,$t3,$i3,ror#8 - + ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24] and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$t2,$i2,ror#8 and $i2,lr,$s2,lsr#16 @ i1 + eor $t3,$t3,$i3,ror#8 and $i3,lr,$s2 - mov $s2,$s2,lsr#24 + eor $s1,$s1,$t1,ror#24 ldr $i1,[$tbl,$i1,lsl#2] @ Te2[s2>>8] + mov $s2,$s2,lsr#24 + ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16] - ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24] ldr $i3,[$tbl,$i3,lsl#2] @ Te3[s2>>0] eor $s0,$s0,$i1,ror#16 - eor $s1,$s1,$i2,ror#8 - eor $s2,$s2,$t2,ror#16 - eor $t3,$t3,$i3,ror#16 - + ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24] and $i1,lr,$s3 @ i0 + eor $s1,$s1,$i2,ror#8 and $i2,lr,$s3,lsr#8 @ i1 + eor $t3,$t3,$i3,ror#16 and $i3,lr,$s3,lsr#16 @ i2 - mov $s3,$s3,lsr#24 + eor $s2,$s2,$t2,ror#16 ldr $i1,[$tbl,$i1,lsl#2] @ Te3[s3>>0] + mov $s3,$s3,lsr#24 + ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8] ldr $i3,[$tbl,$i3,lsl#2] @ Te1[s3>>16] - ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24] eor $s0,$s0,$i1,ror#24 + ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24] eor $s1,$s1,$i2,ror#16 + ldr $i1,[$key],#16 eor $s2,$s2,$i3,ror#8 + ldr $t1,[$key,#-12] eor $s3,$s3,$t3,ror#8 - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + ldr $t2,[$key,#-8] + eor $s0,$s0,$i1 + ldr $t3,[$key,#-4] + and $i1,lr,$s0 + eor $s1,$s1,$t1 + and $i2,lr,$s0,lsr#8 + eor $s2,$s2,$t2 + and $i3,lr,$s0,lsr#16 + eor $s3,$s3,$t3 + mov $s0,$s0,lsr#24 subs $rounds,$rounds,#1 bne .Lenc_loop add $tbl,$tbl,#2 - and $i1,lr,$s0 - and $i2,lr,$s0,lsr#8 - and $i3,lr,$s0,lsr#16 - mov $s0,$s0,lsr#24 ldrb $t1,[$tbl,$i1,lsl#2] @ Te4[s0>>0] - ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24] - ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8] - ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16] - and $i1,lr,$s1,lsr#16 @ i0 + ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8] and $i2,lr,$s1 + ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16] and $i3,lr,$s1,lsr#8 + ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24] mov $s1,$s1,lsr#24 + ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s1>>16] - ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24] ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s1>>0] ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s1>>8] eor $s0,$i1,$s0,lsl#8 - eor $s1,$t1,$s1,lsl#24 - eor $t2,$i2,$t2,lsl#8 - eor $t3,$i3,$t3,lsl#8 - + ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24] and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$i2,$t2,lsl#8 and $i2,lr,$s2,lsr#16 @ i1 + eor $t3,$i3,$t3,lsl#8 and $i3,lr,$s2 - mov $s2,$s2,lsr#24 + eor $s1,$t1,$s1,lsl#24 ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s2>>8] + mov $s2,$s2,lsr#24 + ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16] - ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24] ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s2>>0] eor $s0,$i1,$s0,lsl#8 - eor $s1,$s1,$i2,lsl#16 - eor $s2,$t2,$s2,lsl#24 - eor $t3,$i3,$t3,lsl#8 - + ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24] and $i1,lr,$s3 @ i0 + eor $s1,$s1,$i2,lsl#16 and $i2,lr,$s3,lsr#8 @ i1 + eor $t3,$i3,$t3,lsl#8 and $i3,lr,$s3,lsr#16 @ i2 - mov $s3,$s3,lsr#24 + eor $s2,$t2,$s2,lsl#24 ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s3>>0] + mov $s3,$s3,lsr#24 + ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8] ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s3>>16] - ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24] eor $s0,$i1,$s0,lsl#8 + ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24] + ldr $i1,[$key,#0] eor $s1,$s1,$i2,lsl#8 + ldr $t1,[$key,#4] eor $s2,$s2,$i3,lsl#16 + ldr $t2,[$key,#8] eor $s3,$t3,$s3,lsl#24 + ldr $t3,[$key,#12] - ldr lr,[sp],#4 @ pop lr - ldr $t1,[$key,#0] - ldr $t2,[$key,#4] - ldr $t3,[$key,#8] - ldr $i1,[$key,#12] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + eor $s0,$s0,$i1 + eor $s1,$s1,$t1 + eor $s2,$s2,$t2 + eor $s3,$s3,$t3 sub $tbl,$tbl,#2 - mov pc,lr @ return + ldr pc,[sp],#4 @ pop and return .size _armv4_AES_encrypt,.-_armv4_AES_encrypt .global AES_set_encrypt_key @@ -399,31 +403,31 @@ AES_set_encrypt_key: ldrb $t2,[$rounds,#1] ldrb $t3,[$rounds,#0] orr $s0,$s0,$t1,lsl#8 - orr $s0,$s0,$t2,lsl#16 - orr $s0,$s0,$t3,lsl#24 ldrb $s1,[$rounds,#7] + orr $s0,$s0,$t2,lsl#16 ldrb $t1,[$rounds,#6] + orr $s0,$s0,$t3,lsl#24 ldrb $t2,[$rounds,#5] ldrb $t3,[$rounds,#4] orr $s1,$s1,$t1,lsl#8 - orr $s1,$s1,$t2,lsl#16 - orr $s1,$s1,$t3,lsl#24 ldrb $s2,[$rounds,#11] + orr $s1,$s1,$t2,lsl#16 ldrb $t1,[$rounds,#10] + orr $s1,$s1,$t3,lsl#24 ldrb $t2,[$rounds,#9] ldrb $t3,[$rounds,#8] orr $s2,$s2,$t1,lsl#8 - orr $s2,$s2,$t2,lsl#16 - orr $s2,$s2,$t3,lsl#24 ldrb $s3,[$rounds,#15] + orr $s2,$s2,$t2,lsl#16 ldrb $t1,[$rounds,#14] + orr $s2,$s2,$t3,lsl#24 ldrb $t2,[$rounds,#13] ldrb $t3,[$rounds,#12] orr $s3,$s3,$t1,lsl#8 - orr $s3,$s3,$t2,lsl#16 - orr $s3,$s3,$t3,lsl#24 str $s0,[$key],#16 + orr $s3,$s3,$t2,lsl#16 str $s1,[$key,#-12] + orr $s3,$s3,$t3,lsl#24 str $s2,[$key,#-8] str $s3,[$key,#-4] @@ -437,27 +441,26 @@ AES_set_encrypt_key: .L128_loop: and $t2,lr,$s3,lsr#24 and $i1,lr,$s3,lsr#16 - and $i2,lr,$s3,lsr#8 - and $i3,lr,$s3 ldrb $t2,[$tbl,$t2] + and $i2,lr,$s3,lsr#8 ldrb $i1,[$tbl,$i1] + and $i3,lr,$s3 ldrb $i2,[$tbl,$i2] - ldrb $i3,[$tbl,$i3] - ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i1,lsl#24 + ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i3,lsl#8 eor $t2,$t2,$t1 eor $s0,$s0,$t2 @ rk[4]=rk[0]^... eor $s1,$s1,$s0 @ rk[5]=rk[1]^rk[4] - eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5] - eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6] str $s0,[$key],#16 + eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5] str $s1,[$key,#-12] + eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6] str $s2,[$key,#-8] - str $s3,[$key,#-4] - subs $rounds,$rounds,#1 + str $s3,[$key,#-4] bne .L128_loop sub r2,$key,#176 b .Ldone @@ -468,16 +471,16 @@ AES_set_encrypt_key: ldrb $t2,[$rounds,#17] ldrb $t3,[$rounds,#16] orr $i2,$i2,$t1,lsl#8 - orr $i2,$i2,$t2,lsl#16 - orr $i2,$i2,$t3,lsl#24 ldrb $i3,[$rounds,#23] + orr $i2,$i2,$t2,lsl#16 ldrb $t1,[$rounds,#22] + orr $i2,$i2,$t3,lsl#24 ldrb $t2,[$rounds,#21] ldrb $t3,[$rounds,#20] orr $i3,$i3,$t1,lsl#8 orr $i3,$i3,$t2,lsl#16 - orr $i3,$i3,$t3,lsl#24 str $i2,[$key],#8 + orr $i3,$i3,$t3,lsl#24 str $i3,[$key,#-4] teq lr,#192 @@ -491,27 +494,26 @@ AES_set_encrypt_key: .L192_loop: and $t2,lr,$i3,lsr#24 and $i1,lr,$i3,lsr#16 - and $i2,lr,$i3,lsr#8 - and $i3,lr,$i3 ldrb $t2,[$tbl,$t2] + and $i2,lr,$i3,lsr#8 ldrb $i1,[$tbl,$i1] + and $i3,lr,$i3 ldrb $i2,[$tbl,$i2] - ldrb $i3,[$tbl,$i3] - ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i1,lsl#24 + ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i3,lsl#8 eor $i3,$t2,$t1 eor $s0,$s0,$i3 @ rk[6]=rk[0]^... eor $s1,$s1,$s0 @ rk[7]=rk[1]^rk[6] - eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7] - eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8] str $s0,[$key],#24 + eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7] str $s1,[$key,#-20] + eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8] str $s2,[$key,#-16] - str $s3,[$key,#-12] - subs $rounds,$rounds,#1 + str $s3,[$key,#-12] subeq r2,$key,#216 beq .Ldone @@ -529,16 +531,16 @@ AES_set_encrypt_key: ldrb $t2,[$rounds,#25] ldrb $t3,[$rounds,#24] orr $i2,$i2,$t1,lsl#8 - orr $i2,$i2,$t2,lsl#16 - orr $i2,$i2,$t3,lsl#24 ldrb $i3,[$rounds,#31] + orr $i2,$i2,$t2,lsl#16 ldrb $t1,[$rounds,#30] + orr $i2,$i2,$t3,lsl#24 ldrb $t2,[$rounds,#29] ldrb $t3,[$rounds,#28] orr $i3,$i3,$t1,lsl#8 orr $i3,$i3,$t2,lsl#16 - orr $i3,$i3,$t3,lsl#24 str $i2,[$key],#8 + orr $i3,$i3,$t3,lsl#24 str $i3,[$key,#-4] mov $rounds,#14 @@ -550,52 +552,51 @@ AES_set_encrypt_key: .L256_loop: and $t2,lr,$i3,lsr#24 and $i1,lr,$i3,lsr#16 - and $i2,lr,$i3,lsr#8 - and $i3,lr,$i3 ldrb $t2,[$tbl,$t2] + and $i2,lr,$i3,lsr#8 ldrb $i1,[$tbl,$i1] + and $i3,lr,$i3 ldrb $i2,[$tbl,$i2] - ldrb $i3,[$tbl,$i3] - ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i1,lsl#24 + ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i3,lsl#8 eor $i3,$t2,$t1 eor $s0,$s0,$i3 @ rk[8]=rk[0]^... eor $s1,$s1,$s0 @ rk[9]=rk[1]^rk[8] - eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9] - eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10] str $s0,[$key],#32 + eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9] str $s1,[$key,#-28] + eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10] str $s2,[$key,#-24] - str $s3,[$key,#-20] - subs $rounds,$rounds,#1 + str $s3,[$key,#-20] subeq r2,$key,#256 beq .Ldone and $t2,lr,$s3 and $i1,lr,$s3,lsr#8 - and $i2,lr,$s3,lsr#16 - and $i3,lr,$s3,lsr#24 ldrb $t2,[$tbl,$t2] + and $i2,lr,$s3,lsr#16 ldrb $i1,[$tbl,$i1] + and $i3,lr,$s3,lsr#24 ldrb $i2,[$tbl,$i2] - ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i1,lsl#8 + ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$key,#-48] orr $t2,$t2,$i3,lsl#24 - ldr $t1,[$key,#-48] ldr $i1,[$key,#-44] ldr $i2,[$key,#-40] - ldr $i3,[$key,#-36] eor $t1,$t1,$t2 @ rk[12]=rk[4]^... + ldr $i3,[$key,#-36] eor $i1,$i1,$t1 @ rk[13]=rk[5]^rk[12] - eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13] - eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14] str $t1,[$key,#-16] + eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13] str $i1,[$key,#-12] + eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14] str $i2,[$key,#-8] str $i3,[$key,#-4] b .L256_loop @@ -816,24 +817,24 @@ AES_decrypt: ldrb $t2,[$rounds,#1] ldrb $t3,[$rounds,#0] orr $s0,$s0,$t1,lsl#8 - orr $s0,$s0,$t2,lsl#16 - orr $s0,$s0,$t3,lsl#24 ldrb $s1,[$rounds,#7] + orr $s0,$s0,$t2,lsl#16 ldrb $t1,[$rounds,#6] + orr $s0,$s0,$t3,lsl#24 ldrb $t2,[$rounds,#5] ldrb $t3,[$rounds,#4] orr $s1,$s1,$t1,lsl#8 - orr $s1,$s1,$t2,lsl#16 - orr $s1,$s1,$t3,lsl#24 ldrb $s2,[$rounds,#11] + orr $s1,$s1,$t2,lsl#16 ldrb $t1,[$rounds,#10] + orr $s1,$s1,$t3,lsl#24 ldrb $t2,[$rounds,#9] ldrb $t3,[$rounds,#8] orr $s2,$s2,$t1,lsl#8 - orr $s2,$s2,$t2,lsl#16 - orr $s2,$s2,$t3,lsl#24 ldrb $s3,[$rounds,#15] + orr $s2,$s2,$t2,lsl#16 ldrb $t1,[$rounds,#14] + orr $s2,$s2,$t3,lsl#24 ldrb $t2,[$rounds,#13] ldrb $t3,[$rounds,#12] orr $s3,$s3,$t1,lsl#8 @@ -848,24 +849,24 @@ AES_decrypt: mov $t3,$s0,lsr#8 strb $t1,[$rounds,#0] strb $t2,[$rounds,#1] - strb $t3,[$rounds,#2] - strb $s0,[$rounds,#3] mov $t1,$s1,lsr#24 + strb $t3,[$rounds,#2] mov $t2,$s1,lsr#16 + strb $s0,[$rounds,#3] mov $t3,$s1,lsr#8 strb $t1,[$rounds,#4] strb $t2,[$rounds,#5] - strb $t3,[$rounds,#6] - strb $s1,[$rounds,#7] mov $t1,$s2,lsr#24 + strb $t3,[$rounds,#6] mov $t2,$s2,lsr#16 + strb $s1,[$rounds,#7] mov $t3,$s2,lsr#8 strb $t1,[$rounds,#8] strb $t2,[$rounds,#9] - strb $t3,[$rounds,#10] - strb $s2,[$rounds,#11] mov $t1,$s3,lsr#24 + strb $t3,[$rounds,#10] mov $t2,$s3,lsr#16 + strb $s2,[$rounds,#11] mov $t3,$s3,lsr#8 strb $t1,[$rounds,#12] strb $t2,[$rounds,#13] @@ -882,146 +883,143 @@ AES_decrypt: .align 2 _armv4_AES_decrypt: str lr,[sp,#-4]! @ push lr - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - ldr $rounds,[$key,#240-16] + ldmia $key!,{$t1-$i1} eor $s0,$s0,$t1 + ldr $rounds,[$key,#240-16] eor $s1,$s1,$t2 eor $s2,$s2,$t3 eor $s3,$s3,$i1 sub $rounds,$rounds,#1 mov lr,#255 -.Ldec_loop: and $i1,lr,$s0,lsr#16 and $i2,lr,$s0,lsr#8 and $i3,lr,$s0 mov $s0,$s0,lsr#24 +.Ldec_loop: ldr $t1,[$tbl,$i1,lsl#2] @ Td1[s0>>16] - ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24] - ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8] - ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0] - and $i1,lr,$s1 @ i0 + ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8] and $i2,lr,$s1,lsr#16 + ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0] and $i3,lr,$s1,lsr#8 + ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24] mov $s1,$s1,lsr#24 + ldr $i1,[$tbl,$i1,lsl#2] @ Td3[s1>>0] - ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24] ldr $i2,[$tbl,$i2,lsl#2] @ Td1[s1>>16] ldr $i3,[$tbl,$i3,lsl#2] @ Td2[s1>>8] eor $s0,$s0,$i1,ror#24 - eor $s1,$s1,$t1,ror#8 - eor $t2,$i2,$t2,ror#8 - eor $t3,$i3,$t3,ror#8 - + ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24] and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$i2,$t2,ror#8 and $i2,lr,$s2 @ i1 + eor $t3,$i3,$t3,ror#8 and $i3,lr,$s2,lsr#16 - mov $s2,$s2,lsr#24 + eor $s1,$s1,$t1,ror#8 ldr $i1,[$tbl,$i1,lsl#2] @ Td2[s2>>8] + mov $s2,$s2,lsr#24 + ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0] - ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24] ldr $i3,[$tbl,$i3,lsl#2] @ Td1[s2>>16] eor $s0,$s0,$i1,ror#16 - eor $s1,$s1,$i2,ror#24 - eor $s2,$s2,$t2,ror#8 - eor $t3,$i3,$t3,ror#8 - + ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24] and $i1,lr,$s3,lsr#16 @ i0 + eor $s1,$s1,$i2,ror#24 and $i2,lr,$s3,lsr#8 @ i1 + eor $t3,$i3,$t3,ror#8 and $i3,lr,$s3 @ i2 - mov $s3,$s3,lsr#24 + eor $s2,$s2,$t2,ror#8 ldr $i1,[$tbl,$i1,lsl#2] @ Td1[s3>>16] + mov $s3,$s3,lsr#24 + ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8] ldr $i3,[$tbl,$i3,lsl#2] @ Td3[s3>>0] - ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24] eor $s0,$s0,$i1,ror#8 + ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24] eor $s1,$s1,$i2,ror#16 eor $s2,$s2,$i3,ror#24 + ldr $i1,[$key],#16 eor $s3,$s3,$t3,ror#8 - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + ldr $t1,[$key,#-12] + ldr $t2,[$key,#-8] + eor $s0,$s0,$i1 + ldr $t3,[$key,#-4] + and $i1,lr,$s0,lsr#16 + eor $s1,$s1,$t1 + and $i2,lr,$s0,lsr#8 + eor $s2,$s2,$t2 + and $i3,lr,$s0 + eor $s3,$s3,$t3 + mov $s0,$s0,lsr#24 subs $rounds,$rounds,#1 bne .Ldec_loop add $tbl,$tbl,#1024 - ldr $t1,[$tbl,#0] @ prefetch Td4 - ldr $t2,[$tbl,#32] - ldr $t3,[$tbl,#64] - ldr $i1,[$tbl,#96] - ldr $i2,[$tbl,#128] - ldr $i3,[$tbl,#160] - ldr $t1,[$tbl,#192] - ldr $t2,[$tbl,#224] + ldr $t2,[$tbl,#0] @ prefetch Td4 + ldr $t3,[$tbl,#32] + ldr $t1,[$tbl,#64] + ldr $t2,[$tbl,#96] + ldr $t3,[$tbl,#128] + ldr $t1,[$tbl,#160] + ldr $t2,[$tbl,#192] + ldr $t3,[$tbl,#224] - and $i1,lr,$s0,lsr#16 - and $i2,lr,$s0,lsr#8 - and $i3,lr,$s0 - ldrb $s0,[$tbl,$s0,lsr#24] @ Td4[s0>>24] + ldrb $s0,[$tbl,$s0] @ Td4[s0>>24] ldrb $t1,[$tbl,$i1] @ Td4[s0>>16] - ldrb $t2,[$tbl,$i2] @ Td4[s0>>8] - ldrb $t3,[$tbl,$i3] @ Td4[s0>>0] - and $i1,lr,$s1 @ i0 + ldrb $t2,[$tbl,$i2] @ Td4[s0>>8] and $i2,lr,$s1,lsr#16 + ldrb $t3,[$tbl,$i3] @ Td4[s0>>0] and $i3,lr,$s1,lsr#8 + ldrb $i1,[$tbl,$i1] @ Td4[s1>>0] ldrb $s1,[$tbl,$s1,lsr#24] @ Td4[s1>>24] ldrb $i2,[$tbl,$i2] @ Td4[s1>>16] - ldrb $i3,[$tbl,$i3] @ Td4[s1>>8] eor $s0,$i1,$s0,lsl#24 + ldrb $i3,[$tbl,$i3] @ Td4[s1>>8] eor $s1,$t1,$s1,lsl#8 - eor $t2,$t2,$i2,lsl#8 - eor $t3,$t3,$i3,lsl#8 - and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$t2,$i2,lsl#8 and $i2,lr,$s2 @ i1 - and $i3,lr,$s2,lsr#16 + eor $t3,$t3,$i3,lsl#8 ldrb $i1,[$tbl,$i1] @ Td4[s2>>8] + and $i3,lr,$s2,lsr#16 + ldrb $i2,[$tbl,$i2] @ Td4[s2>>0] ldrb $s2,[$tbl,$s2,lsr#24] @ Td4[s2>>24] - ldrb $i3,[$tbl,$i3] @ Td4[s2>>16] eor $s0,$s0,$i1,lsl#8 + ldrb $i3,[$tbl,$i3] @ Td4[s2>>16] eor $s1,$i2,$s1,lsl#16 - eor $s2,$t2,$s2,lsl#16 - eor $t3,$t3,$i3,lsl#16 - and $i1,lr,$s3,lsr#16 @ i0 + eor $s2,$t2,$s2,lsl#16 and $i2,lr,$s3,lsr#8 @ i1 - and $i3,lr,$s3 @ i2 + eor $t3,$t3,$i3,lsl#16 ldrb $i1,[$tbl,$i1] @ Td4[s3>>16] + and $i3,lr,$s3 @ i2 + ldrb $i2,[$tbl,$i2] @ Td4[s3>>8] ldrb $i3,[$tbl,$i3] @ Td4[s3>>0] ldrb $s3,[$tbl,$s3,lsr#24] @ Td4[s3>>24] eor $s0,$s0,$i1,lsl#16 + ldr $i1,[$key,#0] eor $s1,$s1,$i2,lsl#8 + ldr $t1,[$key,#4] eor $s2,$i3,$s2,lsl#8 + ldr $t2,[$key,#8] eor $s3,$t3,$s3,lsl#24 + ldr $t3,[$key,#12] - ldr lr,[sp],#4 @ pop lr - ldr $t1,[$key,#0] - ldr $t2,[$key,#4] - ldr $t3,[$key,#8] - ldr $i1,[$key,#12] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + eor $s0,$s0,$i1 + eor $s1,$s1,$t1 + eor $s2,$s2,$t2 + eor $s3,$s3,$t3 sub $tbl,$tbl,#1024 - mov pc,lr @ return + ldr pc,[sp],#4 @ pop and return .size _armv4_AES_decrypt,.-_armv4_AES_decrypt .asciz "AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>" .align 2 @@ -1029,3 +1027,4 @@ ___ $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 print $code; +close STDOUT; # enforce flush |