3 # At some point it became apparent that the original SSLeay RC4
4 # assembler implementation performs suboptimaly on latest IA-32
5 # microarchitectures. After re-tuning performance has changed as
13 # (*) This number is actually a trade-off:-) It's possible to
14 # achieve +72%, but at the cost of -48% off PIII performance.
15 # In other words code performing further 13% faster on AMD
16 # would perform almost 2 times slower on Intel PIII...
17 # For reference! This code delivers ~80% of rc4-amd64.pl
18 # performance on the same Opteron machine.
19 # (**) This number requires compressed key schedule set up by
20 # RC4_set_key and therefore doesn't apply to 0.9.7 [option for
21 # compressed key schedule is implemented in 0.9.8 and later,
22 # see commentary section in rc4_skey.c for further details].
24 # <appro@fy.chalmers.se>
26 push(@INC,"perlasm","../../perlasm");
29 &asm_init
($ARGV[0],"rc4-586.pl");
45 local($n,$p,$char)=@_;
55 &jbe
(&label
("finished"));
63 &jb
(&label
("finished"));
68 # &mov( $tx, &DWP(0,$d,$x,4)) if $p < 0;
70 &add
( &LB
($y), &LB
($tx));
71 &mov
( $ty, &DWP
(0,$d,$y,4));
73 &mov
( &DWP
(0,$d,$x,4),$ty);
75 &mov
( &DWP
(0,$d,$y,4),$tx);
77 &inc
( &LB
($x)); # NEXT ROUND
78 &mov
( $tx, &DWP
(0,$d,$x,4)) if $p < 1; # NEXT ROUND
79 &mov
( $ty, &DWP
(0,$d,$ty,4));
83 #moved up into last round
88 &movb
( &BP
($n,"esp","",0), &LB
($ty));
92 # Note in+=8 has occured
93 &movb
( &HB
($ty), &BP
(-1,$in,"",0));
95 &xorb
(&LB
($ty), &HB
($ty));
97 &movb
(&BP
($n,$out,"",0),&LB
($ty));
106 &function_begin_B
($name,"");
108 &mov
($ty,&wparam
(1)); # len
110 &jne
(&label
("proceed"));
112 &set_label
("proceed");
119 &xor( $x, $x); # avoid partial register stalls
121 &xor( $y, $y); # avoid partial register stalls
122 &mov
( $d, &wparam
(0)); # key
123 &mov
( $in, &wparam
(2));
125 &movb
( &LB
($x), &BP
(0,$d,"",1));
126 &movb
( &LB
($y), &BP
(4,$d,"",1));
128 &mov
( $out, &wparam
(3));
131 &stack_push
(3); # 3 temp variables
134 # detect compressed schedule, see commentary section in rc4_skey.c...
135 # in 0.9.7 context ~50 bytes below RC4_CHAR label remain redundant,
136 # as compressed key schedule is set up in 0.9.8 and later.
137 &cmp(&DWP
(256,$d),-1);
138 &je
(&label
("RC4_CHAR"));
140 &lea
( $ty, &DWP
(-8,$ty,$in));
142 # check for 0 length input
144 &mov
( &swtmp
(2), $ty); # this is now address to exit at
145 &mov
( $tx, &DWP
(0,$d,$x,4));
148 &jb
( &label
("end")); # less than 8 bytes
164 &comment
("apply the cipher text");
165 # xor the cipher data with input
167 #&add( $out, 8); #moved up into last round
169 &mov
( $tx, &swtmp
(0));
170 &mov
( $ty, &DWP
(-8,$in,"",0));
172 &mov
( $ty, &DWP
(-4,$in,"",0));
173 &mov
( &DWP
(-8,$out,"",0), $tx);
174 &mov
( $tx, &swtmp
(1));
176 &mov
( $ty, &swtmp
(2)); # load end ptr;
177 &mov
( &DWP
(-4,$out,"",0), $tx);
178 &mov
( $tx, &DWP
(0,$d,$x,4));
180 &jbe
(&label
("start"));
184 # There is quite a bit of extra crap in RC4_loop() for this
194 &jmp
(&label
("finished"));
197 # this is essentially Intel P4 specific codepath, see rc4_skey.c,
198 # and is engaged in 0.9.8 and later context...
199 &set_label
("RC4_CHAR");
201 &lea
($ty,&DWP
(0,$in,$ty));
202 &mov
(&swtmp
(2),$ty);
203 &movz
($tx,&BP
(0,$d,$x));
205 # strangely enough unrolled loop performs over 20% slower...
206 &set_label
("RC4_CHAR_loop");
207 &add
(&LB
($y),&LB
($tx));
208 &movz
($ty,&BP
(0,$d,$y));
209 &movb
(&BP
(0,$d,$y),&LB
($tx));
210 &movb
(&BP
(0,$d,$x),&LB
($ty));
211 &add
(&LB
($ty),&LB
($tx));
212 &movz
($ty,&BP
(0,$d,$ty));
214 &xorb
(&LB
($ty),&BP
(0,$in));
215 &lea
($in,&BP
(1,$in));
216 &movz
($tx,&BP
(0,$d,$x));
217 &cmp ($in,&swtmp
(2));
218 &movb
(&BP
(0,$out),&LB
($ty));
219 &lea
($out,&BP
(1,$out));
220 &jb
(&label
("RC4_CHAR_loop"));
222 &set_label
("finished");
225 &movb
( &BP
(-4,$d,"",0),&LB
($y));
226 &movb
( &BP
(-8,$d,"",0),&LB
($x));
228 &function_end
($name);