From 3562e78743202e43aec8727005182a2558117eca Mon Sep 17 00:00:00 2001 From: marha Date: Sun, 28 Jun 2009 22:07:26 +0000 Subject: Checked in the following released items: xkeyboard-config-1.4.tar.gz ttf-bitstream-vera-1.10.tar.gz font-alias-1.0.1.tar.gz font-sun-misc-1.0.0.tar.gz font-sun-misc-1.0.0.tar.gz font-sony-misc-1.0.0.tar.gz font-schumacher-misc-1.0.0.tar.gz font-mutt-misc-1.0.0.tar.gz font-misc-misc-1.0.0.tar.gz font-misc-meltho-1.0.0.tar.gz font-micro-misc-1.0.0.tar.gz font-jis-misc-1.0.0.tar.gz font-isas-misc-1.0.0.tar.gz font-dec-misc-1.0.0.tar.gz font-daewoo-misc-1.0.0.tar.gz font-cursor-misc-1.0.0.tar.gz font-arabic-misc-1.0.0.tar.gz font-winitzki-cyrillic-1.0.0.tar.gz font-misc-cyrillic-1.0.0.tar.gz font-cronyx-cyrillic-1.0.0.tar.gz font-screen-cyrillic-1.0.1.tar.gz font-xfree86-type1-1.0.1.tar.gz font-adobe-utopia-type1-1.0.1.tar.gz font-ibm-type1-1.0.0.tar.gz font-bitstream-type1-1.0.0.tar.gz font-bitstream-speedo-1.0.0.tar.gz font-bh-ttf-1.0.0.tar.gz font-bh-type1-1.0.0.tar.gz font-bitstream-100dpi-1.0.0.tar.gz font-bh-lucidatypewriter-100dpi-1.0.0.tar.gz font-bh-100dpi-1.0.0.tar.gz font-adobe-utopia-100dpi-1.0.1.tar.gz font-adobe-100dpi-1.0.0.tar.gz font-util-1.0.1.tar.gz font-bitstream-75dpi-1.0.0.tar.gz font-bh-lucidatypewriter-75dpi-1.0.0.tar.gz font-adobe-utopia-75dpi-1.0.1.tar.gz font-bh-75dpi-1.0.0.tar.gz bdftopcf-1.0.1.tar.gz font-adobe-75dpi-1.0.0.tar.gz mkfontscale-1.0.6.tar.gz openssl-0.9.8k.tar.gz bigreqsproto-1.0.2.tar.gz xtrans-1.2.2.tar.gz resourceproto-1.0.2.tar.gz inputproto-1.4.4.tar.gz compositeproto-0.4.tar.gz damageproto-1.1.0.tar.gz zlib-1.2.3.tar.gz xkbcomp-1.0.5.tar.gz freetype-2.3.9.tar.gz pthreads-w32-2-8-0-release.tar.gz pixman-0.12.0.tar.gz kbproto-1.0.3.tar.gz evieext-1.0.2.tar.gz fixesproto-4.0.tar.gz recordproto-1.13.2.tar.gz randrproto-1.2.2.tar.gz scrnsaverproto-1.1.0.tar.gz renderproto-0.9.3.tar.gz xcmiscproto-1.1.2.tar.gz fontsproto-2.0.2.tar.gz xextproto-7.0.3.tar.gz xproto-7.0.14.tar.gz libXdmcp-1.0.2.tar.gz libxkbfile-1.0.5.tar.gz libfontenc-1.0.4.tar.gz libXfont-1.3.4.tar.gz libX11-1.1.5.tar.gz libXau-1.0.4.tar.gz libxcb-1.1.tar.gz xorg-server-1.5.3.tar.gz --- openssl/crypto/rc4/asm/rc4-586.pl | 230 ++++++++++++++++++++++ openssl/crypto/rc4/asm/rc4-ia64.S | 159 +++++++++++++++ openssl/crypto/rc4/asm/rc4-x86_64.pl | 366 +++++++++++++++++++++++++++++++++++ 3 files changed, 755 insertions(+) create mode 100644 openssl/crypto/rc4/asm/rc4-586.pl create mode 100644 openssl/crypto/rc4/asm/rc4-ia64.S create mode 100644 openssl/crypto/rc4/asm/rc4-x86_64.pl (limited to 'openssl/crypto/rc4/asm') diff --git a/openssl/crypto/rc4/asm/rc4-586.pl b/openssl/crypto/rc4/asm/rc4-586.pl new file mode 100644 index 000000000..ef7eee766 --- /dev/null +++ b/openssl/crypto/rc4/asm/rc4-586.pl @@ -0,0 +1,230 @@ +#!/usr/local/bin/perl + +# At some point it became apparent that the original SSLeay RC4 +# assembler implementation performs suboptimaly on latest IA-32 +# microarchitectures. After re-tuning performance has changed as +# following: +# +# Pentium +0% +# Pentium III +17% +# AMD +52%(*) +# P4 +180%(**) +# +# (*) This number is actually a trade-off:-) It's possible to +# achieve +72%, but at the cost of -48% off PIII performance. +# In other words code performing further 13% faster on AMD +# would perform almost 2 times slower on Intel PIII... +# For reference! This code delivers ~80% of rc4-amd64.pl +# performance on the same Opteron machine. +# (**) This number requires compressed key schedule set up by +# RC4_set_key and therefore doesn't apply to 0.9.7 [option for +# compressed key schedule is implemented in 0.9.8 and later, +# see commentary section in rc4_skey.c for further details]. +# +# + +push(@INC,"perlasm","../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],"rc4-586.pl"); + +$x="eax"; +$y="ebx"; +$tx="ecx"; +$ty="edx"; +$in="esi"; +$out="edi"; +$d="ebp"; + +&RC4("RC4"); + +&asm_finish(); + +sub RC4_loop + { + local($n,$p,$char)=@_; + + &comment("Round $n"); + + if ($char) + { + if ($p >= 0) + { + &mov($ty, &swtmp(2)); + &cmp($ty, $in); + &jbe(&label("finished")); + &inc($in); + } + else + { + &add($ty, 8); + &inc($in); + &cmp($ty, $in); + &jb(&label("finished")); + &mov(&swtmp(2), $ty); + } + } + # Moved out + # &mov( $tx, &DWP(0,$d,$x,4)) if $p < 0; + + &add( &LB($y), &LB($tx)); + &mov( $ty, &DWP(0,$d,$y,4)); + # XXX + &mov( &DWP(0,$d,$x,4),$ty); + &add( $ty, $tx); + &mov( &DWP(0,$d,$y,4),$tx); + &and( $ty, 0xff); + &inc( &LB($x)); # NEXT ROUND + &mov( $tx, &DWP(0,$d,$x,4)) if $p < 1; # NEXT ROUND + &mov( $ty, &DWP(0,$d,$ty,4)); + + if (!$char) + { + #moved up into last round + if ($p >= 1) + { + &add( $out, 8) + } + &movb( &BP($n,"esp","",0), &LB($ty)); + } + else + { + # Note in+=8 has occured + &movb( &HB($ty), &BP(-1,$in,"",0)); + # XXX + &xorb(&LB($ty), &HB($ty)); + # XXX + &movb(&BP($n,$out,"",0),&LB($ty)); + } + } + + +sub RC4 + { + local($name)=@_; + + &function_begin_B($name,""); + + &mov($ty,&wparam(1)); # len + &cmp($ty,0); + &jne(&label("proceed")); + &ret(); + &set_label("proceed"); + + &comment(""); + + &push("ebp"); + &push("ebx"); + &push("esi"); + &xor( $x, $x); # avoid partial register stalls + &push("edi"); + &xor( $y, $y); # avoid partial register stalls + &mov( $d, &wparam(0)); # key + &mov( $in, &wparam(2)); + + &movb( &LB($x), &BP(0,$d,"",1)); + &movb( &LB($y), &BP(4,$d,"",1)); + + &mov( $out, &wparam(3)); + &inc( &LB($x)); + + &stack_push(3); # 3 temp variables + &add( $d, 8); + + # detect compressed schedule, see commentary section in rc4_skey.c... + # in 0.9.7 context ~50 bytes below RC4_CHAR label remain redundant, + # as compressed key schedule is set up in 0.9.8 and later. + &cmp(&DWP(256,$d),-1); + &je(&label("RC4_CHAR")); + + &lea( $ty, &DWP(-8,$ty,$in)); + + # check for 0 length input + + &mov( &swtmp(2), $ty); # this is now address to exit at + &mov( $tx, &DWP(0,$d,$x,4)); + + &cmp( $ty, $in); + &jb( &label("end")); # less than 8 bytes + + &set_label("start"); + + # filling DELAY SLOT + &add( $in, 8); + + &RC4_loop(0,-1,0); + &RC4_loop(1,0,0); + &RC4_loop(2,0,0); + &RC4_loop(3,0,0); + &RC4_loop(4,0,0); + &RC4_loop(5,0,0); + &RC4_loop(6,0,0); + &RC4_loop(7,1,0); + + &comment("apply the cipher text"); + # xor the cipher data with input + + #&add( $out, 8); #moved up into last round + + &mov( $tx, &swtmp(0)); + &mov( $ty, &DWP(-8,$in,"",0)); + &xor( $tx, $ty); + &mov( $ty, &DWP(-4,$in,"",0)); + &mov( &DWP(-8,$out,"",0), $tx); + &mov( $tx, &swtmp(1)); + &xor( $tx, $ty); + &mov( $ty, &swtmp(2)); # load end ptr; + &mov( &DWP(-4,$out,"",0), $tx); + &mov( $tx, &DWP(0,$d,$x,4)); + &cmp($in, $ty); + &jbe(&label("start")); + + &set_label("end"); + + # There is quite a bit of extra crap in RC4_loop() for this + # first round + &RC4_loop(0,-1,1); + &RC4_loop(1,0,1); + &RC4_loop(2,0,1); + &RC4_loop(3,0,1); + &RC4_loop(4,0,1); + &RC4_loop(5,0,1); + &RC4_loop(6,1,1); + + &jmp(&label("finished")); + + &align(16); + # this is essentially Intel P4 specific codepath, see rc4_skey.c, + # and is engaged in 0.9.8 and later context... + &set_label("RC4_CHAR"); + + &lea ($ty,&DWP(0,$in,$ty)); + &mov (&swtmp(2),$ty); + &movz ($tx,&BP(0,$d,$x)); + + # strangely enough unrolled loop performs over 20% slower... + &set_label("RC4_CHAR_loop"); + &add (&LB($y),&LB($tx)); + &movz ($ty,&BP(0,$d,$y)); + &movb (&BP(0,$d,$y),&LB($tx)); + &movb (&BP(0,$d,$x),&LB($ty)); + &add (&LB($ty),&LB($tx)); + &movz ($ty,&BP(0,$d,$ty)); + &add (&LB($x),1); + &xorb (&LB($ty),&BP(0,$in)); + &lea ($in,&DWP(1,$in)); + &movz ($tx,&BP(0,$d,$x)); + &cmp ($in,&swtmp(2)); + &movb (&BP(0,$out),&LB($ty)); + &lea ($out,&DWP(1,$out)); + &jb (&label("RC4_CHAR_loop")); + + &set_label("finished"); + &dec( $x); + &stack_pop(3); + &movb( &BP(-4,$d,"",0),&LB($y)); + &movb( &BP(-8,$d,"",0),&LB($x)); + + &function_end($name); + } + diff --git a/openssl/crypto/rc4/asm/rc4-ia64.S b/openssl/crypto/rc4/asm/rc4-ia64.S new file mode 100644 index 000000000..8210c47d0 --- /dev/null +++ b/openssl/crypto/rc4/asm/rc4-ia64.S @@ -0,0 +1,159 @@ +// ==================================================================== +// Written by Andy Polyakov for the OpenSSL +// project. +// +// Rights for redistribution and usage in source and binary forms are +// granted according to the OpenSSL license. Warranty of any kind is +// disclaimed. +// ==================================================================== + +.ident "rc4-ia64.S, Version 2.0" +.ident "IA-64 ISA artwork by Andy Polyakov " + +// What's wrong with compiler generated code? Because of the nature of +// C language, compiler doesn't [dare to] reorder load and stores. But +// being memory-bound, RC4 should benefit from reorder [on in-order- +// execution core such as IA-64]. But what can we reorder? At the very +// least we can safely reorder references to key schedule in respect +// to input and output streams. Secondly, from the first [close] glance +// it appeared that it's possible to pull up some references to +// elements of the key schedule itself. Original rationale ["prior +// loads are not safe only for "degenerated" key schedule, when some +// elements equal to the same value"] was kind of sloppy. I should have +// formulated as it really was: if we assume that pulling up reference +// to key[x+1] is not safe, then it would mean that key schedule would +// "degenerate," which is never the case. The problem is that this +// holds true in respect to references to key[x], but not to key[y]. +// Legitimate "collisions" do occur within every 256^2 bytes window. +// Fortunately there're enough free instruction slots to keep prior +// reference to key[x+1], detect "collision" and compensate for it. +// All this without sacrificing a single clock cycle:-) Throughput is +// ~210MBps on 900MHz CPU, which is is >3x faster than gcc generated +// code and +30% - if compared to HP-UX C. Unrolling loop below should +// give >30% on top of that... + +.text +.explicit + +#if defined(_HPUX_SOURCE) && !defined(_LP64) +# define ADDP addp4 +#else +# define ADDP add +#endif + +#ifndef SZ +#define SZ 4 // this is set to sizeof(RC4_INT) +#endif +// SZ==4 seems to be optimal. At least SZ==8 is not any faster, not for +// assembler implementation, while SZ==1 code is ~30% slower. +#if SZ==1 // RC4_INT is unsigned char +# define LDKEY ld1 +# define STKEY st1 +# define OFF 0 +#elif SZ==4 // RC4_INT is unsigned int +# define LDKEY ld4 +# define STKEY st4 +# define OFF 2 +#elif SZ==8 // RC4_INT is unsigned long +# define LDKEY ld8 +# define STKEY st8 +# define OFF 3 +#endif + +out=r8; // [expanded] output pointer +inp=r9; // [expanded] output pointer +prsave=r10; +key=r28; // [expanded] pointer to RC4_KEY +ksch=r29; // (key->data+255)[&~(sizeof(key->data)-1)] +xx=r30; +yy=r31; + +// void RC4(RC4_KEY *key,size_t len,const void *inp,void *out); +.global RC4# +.proc RC4# +.align 32 +.skip 16 +RC4: + .prologue + .save ar.pfs,r2 +{ .mii; alloc r2=ar.pfs,4,12,0,16 + .save pr,prsave + mov prsave=pr + ADDP key=0,in0 };; +{ .mib; cmp.eq p6,p0=0,in1 // len==0? + .save ar.lc,r3 + mov r3=ar.lc +(p6) br.ret.spnt.many b0 };; // emergency exit + + .body + .rotr dat[4],key_x[4],tx[2],rnd[2],key_y[2],ty[1]; + +{ .mib; LDKEY xx=[key],SZ // load key->x + add in1=-1,in1 // adjust len for loop counter + nop.b 0 } +{ .mib; ADDP inp=0,in2 + ADDP out=0,in3 + brp.loop.imp .Ltop,.Lexit-16 };; +{ .mmi; LDKEY yy=[key] // load key->y + add ksch=SZ,key + mov ar.lc=in1 } +{ .mmi; mov key_y[1]=r0 // guarantee inequality + // in first iteration + add xx=1,xx + mov pr.rot=1<<16 };; +{ .mii; nop.m 0 + dep key_x[1]=xx,r0,OFF,8 + mov ar.ec=3 };; // note that epilogue counter + // is off by 1. I compensate + // for this at exit... +.Ltop: +// The loop is scheduled for 4*(n+2) spin-rate on Itanium 2, which +// theoretically gives asymptotic performance of clock frequency +// divided by 4 bytes per seconds, or 400MBps on 1.6GHz CPU. This is +// for sizeof(RC4_INT)==4. For smaller RC4_INT STKEY inadvertently +// splits the last bundle and you end up with 5*n spin-rate:-( +// Originally the loop was scheduled for 3*n and relied on key +// schedule to be aligned at 256*sizeof(RC4_INT) boundary. But +// *(out++)=dat, which maps to st1, had same effect [inadvertent +// bundle split] and holded the loop back. Rescheduling for 4*n +// made it possible to eliminate dependence on specific alignment +// and allow OpenSSH keep "abusing" our API. Reaching for 3*n would +// require unrolling, sticking to variable shift instruction for +// collecting output [to avoid starvation for integer shifter] and +// copying of key schedule to controlled place in stack [so that +// deposit instruction can serve as substitute for whole +// key->data+((x&255)<data[0])))]... +{ .mmi; (p19) st1 [out]=dat[3],1 // *(out++)=dat + (p16) add xx=1,xx // x++ + (p18) dep rnd[1]=rnd[1],r0,OFF,8 } // ((tx+ty)&255)<y + mov pr=prsave,0x1ffff + nop.b 0 } +{ .mib; st1 [out]=dat[3],1 // compensate for truncated + // epilogue counter + add xx=-1,xx + nop.b 0 };; +{ .mib; STKEY [key]=xx // save key->x + mov ar.lc=r3 + br.ret.sptk.many b0 };; +.endp RC4# diff --git a/openssl/crypto/rc4/asm/rc4-x86_64.pl b/openssl/crypto/rc4/asm/rc4-x86_64.pl new file mode 100644 index 000000000..00c6fa28a --- /dev/null +++ b/openssl/crypto/rc4/asm/rc4-x86_64.pl @@ -0,0 +1,366 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in +# "hand-coded assembler"] doesn't stand for the whole improvement +# coefficient. It turned out that eliminating RC4_CHAR from config +# line results in ~40% improvement (yes, even for C implementation). +# Presumably it has everything to do with AMD cache architecture and +# RAW or whatever penalties. Once again! The module *requires* config +# line *without* RC4_CHAR! As for coding "secret," I bet on partial +# register arithmetics. For example instead of 'inc %r8; and $255,%r8' +# I simply 'inc %r8b'. Even though optimization manual discourages +# to operate on partial registers, it turned out to be the best bet. +# At least for AMD... How IA32E would perform remains to be seen... + +# As was shown by Marc Bevand reordering of couple of load operations +# results in even higher performance gain of 3.3x:-) At least on +# Opteron... For reference, 1x in this case is RC4_CHAR C-code +# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock. +# Latter means that if you want to *estimate* what to expect from +# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz. + +# Intel P4 EM64T core was found to run the AMD64 code really slow... +# The only way to achieve comparable performance on P4 was to keep +# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to +# compose blended code, which would perform even within 30% marginal +# on either AMD and Intel platforms, I implement both cases. See +# rc4_skey.c for further details... + +# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing +# those with add/sub results in 50% performance improvement of folded +# loop... + +# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T +# performance by >30% [unlike P4 32-bit case that is]. But this is +# provided that loads are reordered even more aggressively! Both code +# pathes, AMD64 and EM64T, reorder loads in essentially same manner +# as my IA-64 implementation. On Opteron this resulted in modest 5% +# improvement [I had to test it], while final Intel P4 performance +# achieves respectful 432MBps on 2.8GHz processor now. For reference. +# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than +# RC4_INT code-path. While if executed on Opteron, it's only 25% +# slower than the RC4_INT one [meaning that if CPU µ-arch detection +# is not implemented, then this final RC4_CHAR code-path should be +# preferred, as it provides better *all-round* performance]. + +# Intel Core2 was observed to perform poorly on both code paths:-( It +# apparently suffers from some kind of partial register stall, which +# occurs in 64-bit mode only [as virtually identical 32-bit loop was +# observed to outperform 64-bit one by almost 50%]. Adding two movzb to +# cloop1 boosts its performance by 80%! This loop appears to be optimal +# fit for Core2 and therefore the code was modified to skip cloop8 on +# this CPU. + +$output=shift; + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or +die "can't locate x86_64-xlate.pl"; + +open STDOUT,"| $^X $xlate $output"; + +$dat="%rdi"; # arg1 +$len="%rsi"; # arg2 +$inp="%rdx"; # arg3 +$out="%rcx"; # arg4 + +@XX=("%r8","%r10"); +@TX=("%r9","%r11"); +$YY="%r12"; +$TY="%r13"; + +$code=<<___; +.text + +.globl RC4 +.type RC4,\@function,4 +.align 16 +RC4: or $len,$len + jne .Lentry + ret +.Lentry: + push %r12 + push %r13 + + add \$8,$dat + movl -8($dat),$XX[0]#d + movl -4($dat),$YY#d + cmpl \$-1,256($dat) + je .LRC4_CHAR + inc $XX[0]#b + movl ($dat,$XX[0],4),$TX[0]#d + test \$-8,$len + jz .Lloop1 + jmp .Lloop8 +.align 16 +.Lloop8: +___ +for ($i=0;$i<8;$i++) { +$code.=<<___; + add $TX[0]#b,$YY#b + mov $XX[0],$XX[1] + movl ($dat,$YY,4),$TY#d + ror \$8,%rax # ror is redundant when $i=0 + inc $XX[1]#b + movl ($dat,$XX[1],4),$TX[1]#d + cmp $XX[1],$YY + movl $TX[0]#d,($dat,$YY,4) + cmove $TX[0],$TX[1] + movl $TY#d,($dat,$XX[0],4) + add $TX[0]#b,$TY#b + movb ($dat,$TY,4),%al +___ +push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers +} +$code.=<<___; + ror \$8,%rax + sub \$8,$len + + xor ($inp),%rax + add \$8,$inp + mov %rax,($out) + add \$8,$out + + test \$-8,$len + jnz .Lloop8 + cmp \$0,$len + jne .Lloop1 +___ +$code.=<<___; +.Lexit: + sub \$1,$XX[0]#b + movl $XX[0]#d,-8($dat) + movl $YY#d,-4($dat) + + pop %r13 + pop %r12 + ret +.align 16 +.Lloop1: + add $TX[0]#b,$YY#b + movl ($dat,$YY,4),$TY#d + movl $TX[0]#d,($dat,$YY,4) + movl $TY#d,($dat,$XX[0],4) + add $TY#b,$TX[0]#b + inc $XX[0]#b + movl ($dat,$TX[0],4),$TY#d + movl ($dat,$XX[0],4),$TX[0]#d + xorb ($inp),$TY#b + inc $inp + movb $TY#b,($out) + inc $out + dec $len + jnz .Lloop1 + jmp .Lexit + +.align 16 +.LRC4_CHAR: + add \$1,$XX[0]#b + movzb ($dat,$XX[0]),$TX[0]#d + test \$-8,$len + jz .Lcloop1 + cmp \$0,260($dat) + jnz .Lcloop1 + push %rbx + jmp .Lcloop8 +.align 16 +.Lcloop8: + mov ($inp),%eax + mov 4($inp),%ebx +___ +# unroll 2x4-wise, because 64-bit rotates kill Intel P4... +for ($i=0;$i<4;$i++) { +$code.=<<___; + add $TX[0]#b,$YY#b + lea 1($XX[0]),$XX[1] + movzb ($dat,$YY),$TY#d + movzb $XX[1]#b,$XX[1]#d + movzb ($dat,$XX[1]),$TX[1]#d + movb $TX[0]#b,($dat,$YY) + cmp $XX[1],$YY + movb $TY#b,($dat,$XX[0]) + jne .Lcmov$i # Intel cmov is sloooow... + mov $TX[0],$TX[1] +.Lcmov$i: + add $TX[0]#b,$TY#b + xor ($dat,$TY),%al + ror \$8,%eax +___ +push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers +} +for ($i=4;$i<8;$i++) { +$code.=<<___; + add $TX[0]#b,$YY#b + lea 1($XX[0]),$XX[1] + movzb ($dat,$YY),$TY#d + movzb $XX[1]#b,$XX[1]#d + movzb ($dat,$XX[1]),$TX[1]#d + movb $TX[0]#b,($dat,$YY) + cmp $XX[1],$YY + movb $TY#b,($dat,$XX[0]) + jne .Lcmov$i # Intel cmov is sloooow... + mov $TX[0],$TX[1] +.Lcmov$i: + add $TX[0]#b,$TY#b + xor ($dat,$TY),%bl + ror \$8,%ebx +___ +push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers +} +$code.=<<___; + lea -8($len),$len + mov %eax,($out) + lea 8($inp),$inp + mov %ebx,4($out) + lea 8($out),$out + + test \$-8,$len + jnz .Lcloop8 + pop %rbx + cmp \$0,$len + jne .Lcloop1 + jmp .Lexit +___ +$code.=<<___; +.align 16 +.Lcloop1: + add $TX[0]#b,$YY#b + movzb ($dat,$YY),$TY#d + movb $TX[0]#b,($dat,$YY) + movb $TY#b,($dat,$XX[0]) + add $TX[0]#b,$TY#b + add \$1,$XX[0]#b + movzb $TY#b,$TY#d + movzb $XX[0]#b,$XX[0]#d + movzb ($dat,$TY),$TY#d + movzb ($dat,$XX[0]),$TX[0]#d + xorb ($inp),$TY#b + lea 1($inp),$inp + movb $TY#b,($out) + lea 1($out),$out + sub \$1,$len + jnz .Lcloop1 + jmp .Lexit +.size RC4,.-RC4 +___ + +$idx="%r8"; +$ido="%r9"; + +$code.=<<___; +.extern OPENSSL_ia32cap_P +.globl RC4_set_key +.type RC4_set_key,\@function,3 +.align 16 +RC4_set_key: + lea 8($dat),$dat + lea ($inp,$len),$inp + neg $len + mov $len,%rcx + xor %eax,%eax + xor $ido,$ido + xor %r10,%r10 + xor %r11,%r11 + + mov OPENSSL_ia32cap_P(%rip),$idx#d + bt \$20,$idx#d + jnc .Lw1stloop + bt \$30,$idx#d + setc $ido#b + mov $ido#d,260($dat) + jmp .Lc1stloop + +.align 16 +.Lw1stloop: + mov %eax,($dat,%rax,4) + add \$1,%al + jnc .Lw1stloop + + xor $ido,$ido + xor $idx,$idx +.align 16 +.Lw2ndloop: + mov ($dat,$ido,4),%r10d + add ($inp,$len,1),$idx#b + add %r10b,$idx#b + add \$1,$len + mov ($dat,$idx,4),%r11d + cmovz %rcx,$len + mov %r10d,($dat,$idx,4) + mov %r11d,($dat,$ido,4) + add \$1,$ido#b + jnc .Lw2ndloop + jmp .Lexit_key + +.align 16 +.Lc1stloop: + mov %al,($dat,%rax) + add \$1,%al + jnc .Lc1stloop + + xor $ido,$ido + xor $idx,$idx +.align 16 +.Lc2ndloop: + mov ($dat,$ido),%r10b + add ($inp,$len),$idx#b + add %r10b,$idx#b + add \$1,$len + mov ($dat,$idx),%r11b + jnz .Lcnowrap + mov %rcx,$len +.Lcnowrap: + mov %r10b,($dat,$idx) + mov %r11b,($dat,$ido) + add \$1,$ido#b + jnc .Lc2ndloop + movl \$-1,256($dat) + +.align 16 +.Lexit_key: + xor %eax,%eax + mov %eax,-8($dat) + mov %eax,-4($dat) + ret +.size RC4_set_key,.-RC4_set_key + +.globl RC4_options +.type RC4_options,\@function,0 +.align 16 +RC4_options: + .picmeup %rax + lea .Lopts-.(%rax),%rax + mov OPENSSL_ia32cap_P(%rip),%edx + bt \$20,%edx + jnc .Ldone + add \$12,%rax + bt \$30,%edx + jnc .Ldone + add \$13,%rax +.Ldone: + ret +.align 64 +.Lopts: +.asciz "rc4(8x,int)" +.asciz "rc4(8x,char)" +.asciz "rc4(1x,char)" +.asciz "RC4 for x86_64, CRYPTOGAMS by " +.align 64 +.size RC4_options,.-RC4_options +___ + +$code =~ s/#([bwd])/$1/gm; + +$code =~ s/RC4_set_key/private_RC4_set_key/g if ($ENV{FIPSCANLIB} ne ""); + +print $code; + +close STDOUT; -- cgit v1.2.3