OpenSSL: update to 1.0.2a
[tomato.git] / release / src / router / openssl / crypto / aes / asm / aesni-x86_64.pl
blob5f6174635f68f58a1f1533c2d8188a88831e82b0
1 #!/usr/bin/env perl
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # This module implements support for Intel AES-NI extension. In
11 # OpenSSL context it's used with Intel engine, but can also be used as
12 # drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
13 # details].
15 # Performance.
17 # Given aes(enc|dec) instructions' latency asymptotic performance for
18 # non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
19 # processed with 128-bit key. And given their throughput asymptotic
20 # performance for parallelizable modes is 1.25 cycles per byte. Being
21 # asymptotic limit it's not something you commonly achieve in reality,
22 # but how close does one get? Below are results collected for
23 # different modes and block sized. Pairs of numbers are for en-/
24 # decryption.
26 # 16-byte 64-byte 256-byte 1-KB 8-KB
27 # ECB 4.25/4.25 1.38/1.38 1.28/1.28 1.26/1.26 1.26/1.26
28 # CTR 5.42/5.42 1.92/1.92 1.44/1.44 1.28/1.28 1.26/1.26
29 # CBC 4.38/4.43 4.15/1.43 4.07/1.32 4.07/1.29 4.06/1.28
30 # CCM 5.66/9.42 4.42/5.41 4.16/4.40 4.09/4.15 4.06/4.07
31 # OFB 5.42/5.42 4.64/4.64 4.44/4.44 4.39/4.39 4.38/4.38
32 # CFB 5.73/5.85 5.56/5.62 5.48/5.56 5.47/5.55 5.47/5.55
34 # ECB, CTR, CBC and CCM results are free from EVP overhead. This means
35 # that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
36 # [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
37 # The results were collected with specially crafted speed.c benchmark
38 # in order to compare them with results reported in "Intel Advanced
39 # Encryption Standard (AES) New Instruction Set" White Paper Revision
40 # 3.0 dated May 2010. All above results are consistently better. This
41 # module also provides better performance for block sizes smaller than
42 # 128 bytes in points *not* represented in the above table.
44 # Looking at the results for 8-KB buffer.
46 # CFB and OFB results are far from the limit, because implementation
47 # uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
48 # single-block aesni_encrypt, which is not the most optimal way to go.
49 # CBC encrypt result is unexpectedly high and there is no documented
50 # explanation for it. Seemingly there is a small penalty for feeding
51 # the result back to AES unit the way it's done in CBC mode. There is
52 # nothing one can do and the result appears optimal. CCM result is
53 # identical to CBC, because CBC-MAC is essentially CBC encrypt without
54 # saving output. CCM CTR "stays invisible," because it's neatly
55 # interleaved wih CBC-MAC. This provides ~30% improvement over
56 # "straghtforward" CCM implementation with CTR and CBC-MAC performed
57 # disjointly. Parallelizable modes practically achieve the theoretical
58 # limit.
60 # Looking at how results vary with buffer size.
62 # Curves are practically saturated at 1-KB buffer size. In most cases
63 # "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
64 # CTR curve doesn't follow this pattern and is "slowest" changing one
65 # with "256-byte" result being 87% of "8-KB." This is because overhead
66 # in CTR mode is most computationally intensive. Small-block CCM
67 # decrypt is slower than encrypt, because first CTR and last CBC-MAC
68 # iterations can't be interleaved.
70 # Results for 192- and 256-bit keys.
72 # EVP-free results were observed to scale perfectly with number of
73 # rounds for larger block sizes, i.e. 192-bit result being 10/12 times
74 # lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
75 # are a tad smaller, because the above mentioned penalty biases all
76 # results by same constant value. In similar way function call
77 # overhead affects small-block performance, as well as OFB and CFB
78 # results. Differences are not large, most common coefficients are
79 # 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
80 # observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
82 # January 2011
84 # While Westmere processor features 6 cycles latency for aes[enc|dec]
85 # instructions, which can be scheduled every second cycle, Sandy
86 # Bridge spends 8 cycles per instruction, but it can schedule them
87 # every cycle. This means that code targeting Westmere would perform
88 # suboptimally on Sandy Bridge. Therefore this update.
90 # In addition, non-parallelizable CBC encrypt (as well as CCM) is
91 # optimized. Relative improvement might appear modest, 8% on Westmere,
92 # but in absolute terms it's 3.77 cycles per byte encrypted with
93 # 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
94 # should be compared to asymptotic limits of 3.75 for Westmere and
95 # 5.00 for Sandy Bridge. Actually, the fact that they get this close
96 # to asymptotic limits is quite amazing. Indeed, the limit is
97 # calculated as latency times number of rounds, 10 for 128-bit key,
98 # and divided by 16, the number of bytes in block, or in other words
99 # it accounts *solely* for aesenc instructions. But there are extra
100 # instructions, and numbers so close to the asymptotic limits mean
101 # that it's as if it takes as little as *one* additional cycle to
102 # execute all of them. How is it possible? It is possible thanks to
103 # out-of-order execution logic, which manages to overlap post-
104 # processing of previous block, things like saving the output, with
105 # actual encryption of current block, as well as pre-processing of
106 # current block, things like fetching input and xor-ing it with
107 # 0-round element of the key schedule, with actual encryption of
108 # previous block. Keep this in mind...
110 # For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
111 # performance is achieved by interleaving instructions working on
112 # independent blocks. In which case asymptotic limit for such modes
113 # can be obtained by dividing above mentioned numbers by AES
114 # instructions' interleave factor. Westmere can execute at most 3
115 # instructions at a time, meaning that optimal interleave factor is 3,
116 # and that's where the "magic" number of 1.25 come from. "Optimal
117 # interleave factor" means that increase of interleave factor does
118 # not improve performance. The formula has proven to reflect reality
119 # pretty well on Westmere... Sandy Bridge on the other hand can
120 # execute up to 8 AES instructions at a time, so how does varying
121 # interleave factor affect the performance? Here is table for ECB
122 # (numbers are cycles per byte processed with 128-bit key):
124 # instruction interleave factor 3x 6x 8x
125 # theoretical asymptotic limit 1.67 0.83 0.625
126 # measured performance for 8KB block 1.05 0.86 0.84
128 # "as if" interleave factor 4.7x 5.8x 6.0x
130 # Further data for other parallelizable modes:
132 # CBC decrypt 1.16 0.93 0.74
133 # CTR 1.14 0.91 0.74
135 # Well, given 3x column it's probably inappropriate to call the limit
136 # asymptotic, if it can be surpassed, isn't it? What happens there?
137 # Rewind to CBC paragraph for the answer. Yes, out-of-order execution
138 # magic is responsible for this. Processor overlaps not only the
139 # additional instructions with AES ones, but even AES instuctions
140 # processing adjacent triplets of independent blocks. In the 6x case
141 # additional instructions still claim disproportionally small amount
142 # of additional cycles, but in 8x case number of instructions must be
143 # a tad too high for out-of-order logic to cope with, and AES unit
144 # remains underutilized... As you can see 8x interleave is hardly
145 # justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
146 # utilizies 6x interleave because of limited register bank capacity.
148 # Higher interleave factors do have negative impact on Westmere
149 # performance. While for ECB mode it's negligible ~1.5%, other
150 # parallelizables perform ~5% worse, which is outweighed by ~25%
151 # improvement on Sandy Bridge. To balance regression on Westmere
152 # CTR mode was implemented with 6x aesenc interleave factor.
154 # April 2011
156 # Add aesni_xts_[en|de]crypt. Westmere spends 1.25 cycles processing
157 # one byte out of 8KB with 128-bit key, Sandy Bridge - 0.90. Just like
158 # in CTR mode AES instruction interleave factor was chosen to be 6x.
160 ######################################################################
161 # Current large-block performance in cycles per byte processed with
162 # 128-bit key (less is better).
164 # CBC en-/decrypt CTR XTS ECB
165 # Westmere 3.77/1.25 1.25 1.25 1.26
166 # * Bridge 5.07/0.74 0.75 0.90 0.85
167 # Haswell 4.44/0.63 0.63 0.73 0.63
168 # Atom 5.75/3.54 3.56 4.12 3.87(*)
169 # Bulldozer 5.77/0.70 0.72 0.90 0.70
171 # (*) Atom ECB result is suboptimal because of penalties incurred
172 # by operations on %xmm8-15. As ECB is not considered
173 # critical, nothing was done to mitigate the problem.
175 $PREFIX="aesni"; # if $PREFIX is set to "AES", the script
176 # generates drop-in replacement for
177 # crypto/aes/asm/aes-x86_64.pl:-)
179 $flavour = shift;
180 $output = shift;
181 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
183 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
185 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
186 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
187 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
188 die "can't locate x86_64-xlate.pl";
190 open OUT,"| \"$^X\" $xlate $flavour $output";
191 *STDOUT=*OUT;
193 $movkey = $PREFIX eq "aesni" ? "movups" : "movups";
194 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
195 ("%rdi","%rsi","%rdx","%rcx"); # Unix order
197 $code=".text\n";
198 $code.=".extern OPENSSL_ia32cap_P\n";
200 $rounds="%eax"; # input to and changed by aesni_[en|de]cryptN !!!
201 # this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
202 $inp="%rdi";
203 $out="%rsi";
204 $len="%rdx";
205 $key="%rcx"; # input to and changed by aesni_[en|de]cryptN !!!
206 $ivp="%r8"; # cbc, ctr, ...
208 $rnds_="%r10d"; # backup copy for $rounds
209 $key_="%r11"; # backup copy for $key
211 # %xmm register layout
212 $rndkey0="%xmm0"; $rndkey1="%xmm1";
213 $inout0="%xmm2"; $inout1="%xmm3";
214 $inout2="%xmm4"; $inout3="%xmm5";
215 $inout4="%xmm6"; $inout5="%xmm7";
216 $inout6="%xmm8"; $inout7="%xmm9";
218 $in2="%xmm6"; $in1="%xmm7"; # used in CBC decrypt, CTR, ...
219 $in0="%xmm8"; $iv="%xmm9";
221 # Inline version of internal aesni_[en|de]crypt1.
223 # Why folded loop? Because aes[enc|dec] is slow enough to accommodate
224 # cycles which take care of loop variables...
225 { my $sn;
226 sub aesni_generate1 {
227 my ($p,$key,$rounds,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
228 ++$sn;
229 $code.=<<___;
230 $movkey ($key),$rndkey0
231 $movkey 16($key),$rndkey1
233 $code.=<<___ if (defined($ivec));
234 xorps $rndkey0,$ivec
235 lea 32($key),$key
236 xorps $ivec,$inout
238 $code.=<<___ if (!defined($ivec));
239 lea 32($key),$key
240 xorps $rndkey0,$inout
242 $code.=<<___;
243 .Loop_${p}1_$sn:
244 aes${p} $rndkey1,$inout
245 dec $rounds
246 $movkey ($key),$rndkey1
247 lea 16($key),$key
248 jnz .Loop_${p}1_$sn # loop body is 16 bytes
249 aes${p}last $rndkey1,$inout
252 # void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
254 { my ($inp,$out,$key) = @_4args;
256 $code.=<<___;
257 .globl ${PREFIX}_encrypt
258 .type ${PREFIX}_encrypt,\@abi-omnipotent
259 .align 16
260 ${PREFIX}_encrypt:
261 movups ($inp),$inout0 # load input
262 mov 240($key),$rounds # key->rounds
264 &aesni_generate1("enc",$key,$rounds);
265 $code.=<<___;
266 movups $inout0,($out) # output
268 .size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
270 .globl ${PREFIX}_decrypt
271 .type ${PREFIX}_decrypt,\@abi-omnipotent
272 .align 16
273 ${PREFIX}_decrypt:
274 movups ($inp),$inout0 # load input
275 mov 240($key),$rounds # key->rounds
277 &aesni_generate1("dec",$key,$rounds);
278 $code.=<<___;
279 movups $inout0,($out) # output
281 .size ${PREFIX}_decrypt, .-${PREFIX}_decrypt
285 # _aesni_[en|de]cryptN are private interfaces, N denotes interleave
286 # factor. Why 3x subroutine were originally used in loops? Even though
287 # aes[enc|dec] latency was originally 6, it could be scheduled only
288 # every *2nd* cycle. Thus 3x interleave was the one providing optimal
289 # utilization, i.e. when subroutine's throughput is virtually same as
290 # of non-interleaved subroutine [for number of input blocks up to 3].
291 # This is why it originally made no sense to implement 2x subroutine.
292 # But times change and it became appropriate to spend extra 192 bytes
293 # on 2x subroutine on Atom Silvermont account. For processors that
294 # can schedule aes[enc|dec] every cycle optimal interleave factor
295 # equals to corresponding instructions latency. 8x is optimal for
296 # * Bridge and "super-optimal" for other Intel CPUs...
298 sub aesni_generate2 {
299 my $dir=shift;
300 # As already mentioned it takes in $key and $rounds, which are *not*
301 # preserved. $inout[0-1] is cipher/clear text...
302 $code.=<<___;
303 .type _aesni_${dir}rypt2,\@abi-omnipotent
304 .align 16
305 _aesni_${dir}rypt2:
306 $movkey ($key),$rndkey0
307 shl \$4,$rounds
308 $movkey 16($key),$rndkey1
309 xorps $rndkey0,$inout0
310 xorps $rndkey0,$inout1
311 $movkey 32($key),$rndkey0
312 lea 32($key,$rounds),$key
313 neg %rax # $rounds
314 add \$16,%rax
316 .L${dir}_loop2:
317 aes${dir} $rndkey1,$inout0
318 aes${dir} $rndkey1,$inout1
319 $movkey ($key,%rax),$rndkey1
320 add \$32,%rax
321 aes${dir} $rndkey0,$inout0
322 aes${dir} $rndkey0,$inout1
323 $movkey -16($key,%rax),$rndkey0
324 jnz .L${dir}_loop2
326 aes${dir} $rndkey1,$inout0
327 aes${dir} $rndkey1,$inout1
328 aes${dir}last $rndkey0,$inout0
329 aes${dir}last $rndkey0,$inout1
331 .size _aesni_${dir}rypt2,.-_aesni_${dir}rypt2
334 sub aesni_generate3 {
335 my $dir=shift;
336 # As already mentioned it takes in $key and $rounds, which are *not*
337 # preserved. $inout[0-2] is cipher/clear text...
338 $code.=<<___;
339 .type _aesni_${dir}rypt3,\@abi-omnipotent
340 .align 16
341 _aesni_${dir}rypt3:
342 $movkey ($key),$rndkey0
343 shl \$4,$rounds
344 $movkey 16($key),$rndkey1
345 xorps $rndkey0,$inout0
346 xorps $rndkey0,$inout1
347 xorps $rndkey0,$inout2
348 $movkey 32($key),$rndkey0
349 lea 32($key,$rounds),$key
350 neg %rax # $rounds
351 add \$16,%rax
353 .L${dir}_loop3:
354 aes${dir} $rndkey1,$inout0
355 aes${dir} $rndkey1,$inout1
356 aes${dir} $rndkey1,$inout2
357 $movkey ($key,%rax),$rndkey1
358 add \$32,%rax
359 aes${dir} $rndkey0,$inout0
360 aes${dir} $rndkey0,$inout1
361 aes${dir} $rndkey0,$inout2
362 $movkey -16($key,%rax),$rndkey0
363 jnz .L${dir}_loop3
365 aes${dir} $rndkey1,$inout0
366 aes${dir} $rndkey1,$inout1
367 aes${dir} $rndkey1,$inout2
368 aes${dir}last $rndkey0,$inout0
369 aes${dir}last $rndkey0,$inout1
370 aes${dir}last $rndkey0,$inout2
372 .size _aesni_${dir}rypt3,.-_aesni_${dir}rypt3
375 # 4x interleave is implemented to improve small block performance,
376 # most notably [and naturally] 4 block by ~30%. One can argue that one
377 # should have implemented 5x as well, but improvement would be <20%,
378 # so it's not worth it...
379 sub aesni_generate4 {
380 my $dir=shift;
381 # As already mentioned it takes in $key and $rounds, which are *not*
382 # preserved. $inout[0-3] is cipher/clear text...
383 $code.=<<___;
384 .type _aesni_${dir}rypt4,\@abi-omnipotent
385 .align 16
386 _aesni_${dir}rypt4:
387 $movkey ($key),$rndkey0
388 shl \$4,$rounds
389 $movkey 16($key),$rndkey1
390 xorps $rndkey0,$inout0
391 xorps $rndkey0,$inout1
392 xorps $rndkey0,$inout2
393 xorps $rndkey0,$inout3
394 $movkey 32($key),$rndkey0
395 lea 32($key,$rounds),$key
396 neg %rax # $rounds
397 .byte 0x0f,0x1f,0x00
398 add \$16,%rax
400 .L${dir}_loop4:
401 aes${dir} $rndkey1,$inout0
402 aes${dir} $rndkey1,$inout1
403 aes${dir} $rndkey1,$inout2
404 aes${dir} $rndkey1,$inout3
405 $movkey ($key,%rax),$rndkey1
406 add \$32,%rax
407 aes${dir} $rndkey0,$inout0
408 aes${dir} $rndkey0,$inout1
409 aes${dir} $rndkey0,$inout2
410 aes${dir} $rndkey0,$inout3
411 $movkey -16($key,%rax),$rndkey0
412 jnz .L${dir}_loop4
414 aes${dir} $rndkey1,$inout0
415 aes${dir} $rndkey1,$inout1
416 aes${dir} $rndkey1,$inout2
417 aes${dir} $rndkey1,$inout3
418 aes${dir}last $rndkey0,$inout0
419 aes${dir}last $rndkey0,$inout1
420 aes${dir}last $rndkey0,$inout2
421 aes${dir}last $rndkey0,$inout3
423 .size _aesni_${dir}rypt4,.-_aesni_${dir}rypt4
426 sub aesni_generate6 {
427 my $dir=shift;
428 # As already mentioned it takes in $key and $rounds, which are *not*
429 # preserved. $inout[0-5] is cipher/clear text...
430 $code.=<<___;
431 .type _aesni_${dir}rypt6,\@abi-omnipotent
432 .align 16
433 _aesni_${dir}rypt6:
434 $movkey ($key),$rndkey0
435 shl \$4,$rounds
436 $movkey 16($key),$rndkey1
437 xorps $rndkey0,$inout0
438 pxor $rndkey0,$inout1
439 pxor $rndkey0,$inout2
440 aes${dir} $rndkey1,$inout0
441 lea 32($key,$rounds),$key
442 neg %rax # $rounds
443 aes${dir} $rndkey1,$inout1
444 pxor $rndkey0,$inout3
445 pxor $rndkey0,$inout4
446 aes${dir} $rndkey1,$inout2
447 pxor $rndkey0,$inout5
448 add \$16,%rax
449 aes${dir} $rndkey1,$inout3
450 aes${dir} $rndkey1,$inout4
451 aes${dir} $rndkey1,$inout5
452 $movkey -16($key,%rax),$rndkey0
453 jmp .L${dir}_loop6_enter
454 .align 16
455 .L${dir}_loop6:
456 aes${dir} $rndkey1,$inout0
457 aes${dir} $rndkey1,$inout1
458 aes${dir} $rndkey1,$inout2
459 aes${dir} $rndkey1,$inout3
460 aes${dir} $rndkey1,$inout4
461 aes${dir} $rndkey1,$inout5
462 .L${dir}_loop6_enter:
463 $movkey ($key,%rax),$rndkey1
464 add \$32,%rax
465 aes${dir} $rndkey0,$inout0
466 aes${dir} $rndkey0,$inout1
467 aes${dir} $rndkey0,$inout2
468 aes${dir} $rndkey0,$inout3
469 aes${dir} $rndkey0,$inout4
470 aes${dir} $rndkey0,$inout5
471 $movkey -16($key,%rax),$rndkey0
472 jnz .L${dir}_loop6
474 aes${dir} $rndkey1,$inout0
475 aes${dir} $rndkey1,$inout1
476 aes${dir} $rndkey1,$inout2
477 aes${dir} $rndkey1,$inout3
478 aes${dir} $rndkey1,$inout4
479 aes${dir} $rndkey1,$inout5
480 aes${dir}last $rndkey0,$inout0
481 aes${dir}last $rndkey0,$inout1
482 aes${dir}last $rndkey0,$inout2
483 aes${dir}last $rndkey0,$inout3
484 aes${dir}last $rndkey0,$inout4
485 aes${dir}last $rndkey0,$inout5
487 .size _aesni_${dir}rypt6,.-_aesni_${dir}rypt6
490 sub aesni_generate8 {
491 my $dir=shift;
492 # As already mentioned it takes in $key and $rounds, which are *not*
493 # preserved. $inout[0-7] is cipher/clear text...
494 $code.=<<___;
495 .type _aesni_${dir}rypt8,\@abi-omnipotent
496 .align 16
497 _aesni_${dir}rypt8:
498 $movkey ($key),$rndkey0
499 shl \$4,$rounds
500 $movkey 16($key),$rndkey1
501 xorps $rndkey0,$inout0
502 xorps $rndkey0,$inout1
503 pxor $rndkey0,$inout2
504 pxor $rndkey0,$inout3
505 pxor $rndkey0,$inout4
506 lea 32($key,$rounds),$key
507 neg %rax # $rounds
508 aes${dir} $rndkey1,$inout0
509 add \$16,%rax
510 pxor $rndkey0,$inout5
511 aes${dir} $rndkey1,$inout1
512 pxor $rndkey0,$inout6
513 pxor $rndkey0,$inout7
514 aes${dir} $rndkey1,$inout2
515 aes${dir} $rndkey1,$inout3
516 aes${dir} $rndkey1,$inout4
517 aes${dir} $rndkey1,$inout5
518 aes${dir} $rndkey1,$inout6
519 aes${dir} $rndkey1,$inout7
520 $movkey -16($key,%rax),$rndkey0
521 jmp .L${dir}_loop8_enter
522 .align 16
523 .L${dir}_loop8:
524 aes${dir} $rndkey1,$inout0
525 aes${dir} $rndkey1,$inout1
526 aes${dir} $rndkey1,$inout2
527 aes${dir} $rndkey1,$inout3
528 aes${dir} $rndkey1,$inout4
529 aes${dir} $rndkey1,$inout5
530 aes${dir} $rndkey1,$inout6
531 aes${dir} $rndkey1,$inout7
532 .L${dir}_loop8_enter:
533 $movkey ($key,%rax),$rndkey1
534 add \$32,%rax
535 aes${dir} $rndkey0,$inout0
536 aes${dir} $rndkey0,$inout1
537 aes${dir} $rndkey0,$inout2
538 aes${dir} $rndkey0,$inout3
539 aes${dir} $rndkey0,$inout4
540 aes${dir} $rndkey0,$inout5
541 aes${dir} $rndkey0,$inout6
542 aes${dir} $rndkey0,$inout7
543 $movkey -16($key,%rax),$rndkey0
544 jnz .L${dir}_loop8
546 aes${dir} $rndkey1,$inout0
547 aes${dir} $rndkey1,$inout1
548 aes${dir} $rndkey1,$inout2
549 aes${dir} $rndkey1,$inout3
550 aes${dir} $rndkey1,$inout4
551 aes${dir} $rndkey1,$inout5
552 aes${dir} $rndkey1,$inout6
553 aes${dir} $rndkey1,$inout7
554 aes${dir}last $rndkey0,$inout0
555 aes${dir}last $rndkey0,$inout1
556 aes${dir}last $rndkey0,$inout2
557 aes${dir}last $rndkey0,$inout3
558 aes${dir}last $rndkey0,$inout4
559 aes${dir}last $rndkey0,$inout5
560 aes${dir}last $rndkey0,$inout6
561 aes${dir}last $rndkey0,$inout7
563 .size _aesni_${dir}rypt8,.-_aesni_${dir}rypt8
566 &aesni_generate2("enc") if ($PREFIX eq "aesni");
567 &aesni_generate2("dec");
568 &aesni_generate3("enc") if ($PREFIX eq "aesni");
569 &aesni_generate3("dec");
570 &aesni_generate4("enc") if ($PREFIX eq "aesni");
571 &aesni_generate4("dec");
572 &aesni_generate6("enc") if ($PREFIX eq "aesni");
573 &aesni_generate6("dec");
574 &aesni_generate8("enc") if ($PREFIX eq "aesni");
575 &aesni_generate8("dec");
577 if ($PREFIX eq "aesni") {
578 ########################################################################
579 # void aesni_ecb_encrypt (const void *in, void *out,
580 # size_t length, const AES_KEY *key,
581 # int enc);
582 $code.=<<___;
583 .globl aesni_ecb_encrypt
584 .type aesni_ecb_encrypt,\@function,5
585 .align 16
586 aesni_ecb_encrypt:
588 $code.=<<___ if ($win64);
589 lea -0x58(%rsp),%rsp
590 movaps %xmm6,(%rsp)
591 movaps %xmm7,0x10(%rsp)
592 movaps %xmm8,0x20(%rsp)
593 movaps %xmm9,0x30(%rsp)
594 .Lecb_enc_body:
596 $code.=<<___;
597 and \$-16,$len
598 jz .Lecb_ret
600 mov 240($key),$rounds # key->rounds
601 $movkey ($key),$rndkey0
602 mov $key,$key_ # backup $key
603 mov $rounds,$rnds_ # backup $rounds
604 test %r8d,%r8d # 5th argument
605 jz .Lecb_decrypt
606 #--------------------------- ECB ENCRYPT ------------------------------#
607 cmp \$0x80,$len
608 jb .Lecb_enc_tail
610 movdqu ($inp),$inout0
611 movdqu 0x10($inp),$inout1
612 movdqu 0x20($inp),$inout2
613 movdqu 0x30($inp),$inout3
614 movdqu 0x40($inp),$inout4
615 movdqu 0x50($inp),$inout5
616 movdqu 0x60($inp),$inout6
617 movdqu 0x70($inp),$inout7
618 lea 0x80($inp),$inp
619 sub \$0x80,$len
620 jmp .Lecb_enc_loop8_enter
621 .align 16
622 .Lecb_enc_loop8:
623 movups $inout0,($out)
624 mov $key_,$key # restore $key
625 movdqu ($inp),$inout0
626 mov $rnds_,$rounds # restore $rounds
627 movups $inout1,0x10($out)
628 movdqu 0x10($inp),$inout1
629 movups $inout2,0x20($out)
630 movdqu 0x20($inp),$inout2
631 movups $inout3,0x30($out)
632 movdqu 0x30($inp),$inout3
633 movups $inout4,0x40($out)
634 movdqu 0x40($inp),$inout4
635 movups $inout5,0x50($out)
636 movdqu 0x50($inp),$inout5
637 movups $inout6,0x60($out)
638 movdqu 0x60($inp),$inout6
639 movups $inout7,0x70($out)
640 lea 0x80($out),$out
641 movdqu 0x70($inp),$inout7
642 lea 0x80($inp),$inp
643 .Lecb_enc_loop8_enter:
645 call _aesni_encrypt8
647 sub \$0x80,$len
648 jnc .Lecb_enc_loop8
650 movups $inout0,($out)
651 mov $key_,$key # restore $key
652 movups $inout1,0x10($out)
653 mov $rnds_,$rounds # restore $rounds
654 movups $inout2,0x20($out)
655 movups $inout3,0x30($out)
656 movups $inout4,0x40($out)
657 movups $inout5,0x50($out)
658 movups $inout6,0x60($out)
659 movups $inout7,0x70($out)
660 lea 0x80($out),$out
661 add \$0x80,$len
662 jz .Lecb_ret
664 .Lecb_enc_tail:
665 movups ($inp),$inout0
666 cmp \$0x20,$len
667 jb .Lecb_enc_one
668 movups 0x10($inp),$inout1
669 je .Lecb_enc_two
670 movups 0x20($inp),$inout2
671 cmp \$0x40,$len
672 jb .Lecb_enc_three
673 movups 0x30($inp),$inout3
674 je .Lecb_enc_four
675 movups 0x40($inp),$inout4
676 cmp \$0x60,$len
677 jb .Lecb_enc_five
678 movups 0x50($inp),$inout5
679 je .Lecb_enc_six
680 movdqu 0x60($inp),$inout6
681 call _aesni_encrypt8
682 movups $inout0,($out)
683 movups $inout1,0x10($out)
684 movups $inout2,0x20($out)
685 movups $inout3,0x30($out)
686 movups $inout4,0x40($out)
687 movups $inout5,0x50($out)
688 movups $inout6,0x60($out)
689 jmp .Lecb_ret
690 .align 16
691 .Lecb_enc_one:
693 &aesni_generate1("enc",$key,$rounds);
694 $code.=<<___;
695 movups $inout0,($out)
696 jmp .Lecb_ret
697 .align 16
698 .Lecb_enc_two:
699 call _aesni_encrypt2
700 movups $inout0,($out)
701 movups $inout1,0x10($out)
702 jmp .Lecb_ret
703 .align 16
704 .Lecb_enc_three:
705 call _aesni_encrypt3
706 movups $inout0,($out)
707 movups $inout1,0x10($out)
708 movups $inout2,0x20($out)
709 jmp .Lecb_ret
710 .align 16
711 .Lecb_enc_four:
712 call _aesni_encrypt4
713 movups $inout0,($out)
714 movups $inout1,0x10($out)
715 movups $inout2,0x20($out)
716 movups $inout3,0x30($out)
717 jmp .Lecb_ret
718 .align 16
719 .Lecb_enc_five:
720 xorps $inout5,$inout5
721 call _aesni_encrypt6
722 movups $inout0,($out)
723 movups $inout1,0x10($out)
724 movups $inout2,0x20($out)
725 movups $inout3,0x30($out)
726 movups $inout4,0x40($out)
727 jmp .Lecb_ret
728 .align 16
729 .Lecb_enc_six:
730 call _aesni_encrypt6
731 movups $inout0,($out)
732 movups $inout1,0x10($out)
733 movups $inout2,0x20($out)
734 movups $inout3,0x30($out)
735 movups $inout4,0x40($out)
736 movups $inout5,0x50($out)
737 jmp .Lecb_ret
738 \f#--------------------------- ECB DECRYPT ------------------------------#
739 .align 16
740 .Lecb_decrypt:
741 cmp \$0x80,$len
742 jb .Lecb_dec_tail
744 movdqu ($inp),$inout0
745 movdqu 0x10($inp),$inout1
746 movdqu 0x20($inp),$inout2
747 movdqu 0x30($inp),$inout3
748 movdqu 0x40($inp),$inout4
749 movdqu 0x50($inp),$inout5
750 movdqu 0x60($inp),$inout6
751 movdqu 0x70($inp),$inout7
752 lea 0x80($inp),$inp
753 sub \$0x80,$len
754 jmp .Lecb_dec_loop8_enter
755 .align 16
756 .Lecb_dec_loop8:
757 movups $inout0,($out)
758 mov $key_,$key # restore $key
759 movdqu ($inp),$inout0
760 mov $rnds_,$rounds # restore $rounds
761 movups $inout1,0x10($out)
762 movdqu 0x10($inp),$inout1
763 movups $inout2,0x20($out)
764 movdqu 0x20($inp),$inout2
765 movups $inout3,0x30($out)
766 movdqu 0x30($inp),$inout3
767 movups $inout4,0x40($out)
768 movdqu 0x40($inp),$inout4
769 movups $inout5,0x50($out)
770 movdqu 0x50($inp),$inout5
771 movups $inout6,0x60($out)
772 movdqu 0x60($inp),$inout6
773 movups $inout7,0x70($out)
774 lea 0x80($out),$out
775 movdqu 0x70($inp),$inout7
776 lea 0x80($inp),$inp
777 .Lecb_dec_loop8_enter:
779 call _aesni_decrypt8
781 $movkey ($key_),$rndkey0
782 sub \$0x80,$len
783 jnc .Lecb_dec_loop8
785 movups $inout0,($out)
786 mov $key_,$key # restore $key
787 movups $inout1,0x10($out)
788 mov $rnds_,$rounds # restore $rounds
789 movups $inout2,0x20($out)
790 movups $inout3,0x30($out)
791 movups $inout4,0x40($out)
792 movups $inout5,0x50($out)
793 movups $inout6,0x60($out)
794 movups $inout7,0x70($out)
795 lea 0x80($out),$out
796 add \$0x80,$len
797 jz .Lecb_ret
799 .Lecb_dec_tail:
800 movups ($inp),$inout0
801 cmp \$0x20,$len
802 jb .Lecb_dec_one
803 movups 0x10($inp),$inout1
804 je .Lecb_dec_two
805 movups 0x20($inp),$inout2
806 cmp \$0x40,$len
807 jb .Lecb_dec_three
808 movups 0x30($inp),$inout3
809 je .Lecb_dec_four
810 movups 0x40($inp),$inout4
811 cmp \$0x60,$len
812 jb .Lecb_dec_five
813 movups 0x50($inp),$inout5
814 je .Lecb_dec_six
815 movups 0x60($inp),$inout6
816 $movkey ($key),$rndkey0
817 call _aesni_decrypt8
818 movups $inout0,($out)
819 movups $inout1,0x10($out)
820 movups $inout2,0x20($out)
821 movups $inout3,0x30($out)
822 movups $inout4,0x40($out)
823 movups $inout5,0x50($out)
824 movups $inout6,0x60($out)
825 jmp .Lecb_ret
826 .align 16
827 .Lecb_dec_one:
829 &aesni_generate1("dec",$key,$rounds);
830 $code.=<<___;
831 movups $inout0,($out)
832 jmp .Lecb_ret
833 .align 16
834 .Lecb_dec_two:
835 call _aesni_decrypt2
836 movups $inout0,($out)
837 movups $inout1,0x10($out)
838 jmp .Lecb_ret
839 .align 16
840 .Lecb_dec_three:
841 call _aesni_decrypt3
842 movups $inout0,($out)
843 movups $inout1,0x10($out)
844 movups $inout2,0x20($out)
845 jmp .Lecb_ret
846 .align 16
847 .Lecb_dec_four:
848 call _aesni_decrypt4
849 movups $inout0,($out)
850 movups $inout1,0x10($out)
851 movups $inout2,0x20($out)
852 movups $inout3,0x30($out)
853 jmp .Lecb_ret
854 .align 16
855 .Lecb_dec_five:
856 xorps $inout5,$inout5
857 call _aesni_decrypt6
858 movups $inout0,($out)
859 movups $inout1,0x10($out)
860 movups $inout2,0x20($out)
861 movups $inout3,0x30($out)
862 movups $inout4,0x40($out)
863 jmp .Lecb_ret
864 .align 16
865 .Lecb_dec_six:
866 call _aesni_decrypt6
867 movups $inout0,($out)
868 movups $inout1,0x10($out)
869 movups $inout2,0x20($out)
870 movups $inout3,0x30($out)
871 movups $inout4,0x40($out)
872 movups $inout5,0x50($out)
874 .Lecb_ret:
876 $code.=<<___ if ($win64);
877 movaps (%rsp),%xmm6
878 movaps 0x10(%rsp),%xmm7
879 movaps 0x20(%rsp),%xmm8
880 movaps 0x30(%rsp),%xmm9
881 lea 0x58(%rsp),%rsp
882 .Lecb_enc_ret:
884 $code.=<<___;
886 .size aesni_ecb_encrypt,.-aesni_ecb_encrypt
890 ######################################################################
891 # void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
892 # size_t blocks, const AES_KEY *key,
893 # const char *ivec,char *cmac);
895 # Handles only complete blocks, operates on 64-bit counter and
896 # does not update *ivec! Nor does it finalize CMAC value
897 # (see engine/eng_aesni.c for details)
900 my $cmac="%r9"; # 6th argument
902 my $increment="%xmm9";
903 my $iv="%xmm6";
904 my $bswap_mask="%xmm7";
906 $code.=<<___;
907 .globl aesni_ccm64_encrypt_blocks
908 .type aesni_ccm64_encrypt_blocks,\@function,6
909 .align 16
910 aesni_ccm64_encrypt_blocks:
912 $code.=<<___ if ($win64);
913 lea -0x58(%rsp),%rsp
914 movaps %xmm6,(%rsp)
915 movaps %xmm7,0x10(%rsp)
916 movaps %xmm8,0x20(%rsp)
917 movaps %xmm9,0x30(%rsp)
918 .Lccm64_enc_body:
920 $code.=<<___;
921 mov 240($key),$rounds # key->rounds
922 movdqu ($ivp),$iv
923 movdqa .Lincrement64(%rip),$increment
924 movdqa .Lbswap_mask(%rip),$bswap_mask
926 shl \$4,$rounds
927 mov \$16,$rnds_
928 lea 0($key),$key_
929 movdqu ($cmac),$inout1
930 movdqa $iv,$inout0
931 lea 32($key,$rounds),$key # end of key schedule
932 pshufb $bswap_mask,$iv
933 sub %rax,%r10 # twisted $rounds
934 jmp .Lccm64_enc_outer
935 .align 16
936 .Lccm64_enc_outer:
937 $movkey ($key_),$rndkey0
938 mov %r10,%rax
939 movups ($inp),$in0 # load inp
941 xorps $rndkey0,$inout0 # counter
942 $movkey 16($key_),$rndkey1
943 xorps $in0,$rndkey0
944 xorps $rndkey0,$inout1 # cmac^=inp
945 $movkey 32($key_),$rndkey0
947 .Lccm64_enc2_loop:
948 aesenc $rndkey1,$inout0
949 aesenc $rndkey1,$inout1
950 $movkey ($key,%rax),$rndkey1
951 add \$32,%rax
952 aesenc $rndkey0,$inout0
953 aesenc $rndkey0,$inout1
954 $movkey -16($key,%rax),$rndkey0
955 jnz .Lccm64_enc2_loop
956 aesenc $rndkey1,$inout0
957 aesenc $rndkey1,$inout1
958 paddq $increment,$iv
959 dec $len
960 aesenclast $rndkey0,$inout0
961 aesenclast $rndkey0,$inout1
963 lea 16($inp),$inp
964 xorps $inout0,$in0 # inp ^= E(iv)
965 movdqa $iv,$inout0
966 movups $in0,($out) # save output
967 pshufb $bswap_mask,$inout0
968 lea 16($out),$out
969 jnz .Lccm64_enc_outer
971 movups $inout1,($cmac)
973 $code.=<<___ if ($win64);
974 movaps (%rsp),%xmm6
975 movaps 0x10(%rsp),%xmm7
976 movaps 0x20(%rsp),%xmm8
977 movaps 0x30(%rsp),%xmm9
978 lea 0x58(%rsp),%rsp
979 .Lccm64_enc_ret:
981 $code.=<<___;
983 .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
985 ######################################################################
986 $code.=<<___;
987 .globl aesni_ccm64_decrypt_blocks
988 .type aesni_ccm64_decrypt_blocks,\@function,6
989 .align 16
990 aesni_ccm64_decrypt_blocks:
992 $code.=<<___ if ($win64);
993 lea -0x58(%rsp),%rsp
994 movaps %xmm6,(%rsp)
995 movaps %xmm7,0x10(%rsp)
996 movaps %xmm8,0x20(%rsp)
997 movaps %xmm9,0x30(%rsp)
998 .Lccm64_dec_body:
1000 $code.=<<___;
1001 mov 240($key),$rounds # key->rounds
1002 movups ($ivp),$iv
1003 movdqu ($cmac),$inout1
1004 movdqa .Lincrement64(%rip),$increment
1005 movdqa .Lbswap_mask(%rip),$bswap_mask
1007 movaps $iv,$inout0
1008 mov $rounds,$rnds_
1009 mov $key,$key_
1010 pshufb $bswap_mask,$iv
1012 &aesni_generate1("enc",$key,$rounds);
1013 $code.=<<___;
1014 shl \$4,$rnds_
1015 mov \$16,$rounds
1016 movups ($inp),$in0 # load inp
1017 paddq $increment,$iv
1018 lea 16($inp),$inp
1019 sub %r10,%rax # twisted $rounds
1020 lea 32($key_,$rnds_),$key # end of key schedule
1021 mov %rax,%r10
1022 jmp .Lccm64_dec_outer
1023 .align 16
1024 .Lccm64_dec_outer:
1025 xorps $inout0,$in0 # inp ^= E(iv)
1026 movdqa $iv,$inout0
1027 movups $in0,($out) # save output
1028 lea 16($out),$out
1029 pshufb $bswap_mask,$inout0
1031 sub \$1,$len
1032 jz .Lccm64_dec_break
1034 $movkey ($key_),$rndkey0
1035 mov %r10,%rax
1036 $movkey 16($key_),$rndkey1
1037 xorps $rndkey0,$in0
1038 xorps $rndkey0,$inout0
1039 xorps $in0,$inout1 # cmac^=out
1040 $movkey 32($key_),$rndkey0
1041 jmp .Lccm64_dec2_loop
1042 .align 16
1043 .Lccm64_dec2_loop:
1044 aesenc $rndkey1,$inout0
1045 aesenc $rndkey1,$inout1
1046 $movkey ($key,%rax),$rndkey1
1047 add \$32,%rax
1048 aesenc $rndkey0,$inout0
1049 aesenc $rndkey0,$inout1
1050 $movkey -16($key,%rax),$rndkey0
1051 jnz .Lccm64_dec2_loop
1052 movups ($inp),$in0 # load inp
1053 paddq $increment,$iv
1054 aesenc $rndkey1,$inout0
1055 aesenc $rndkey1,$inout1
1056 aesenclast $rndkey0,$inout0
1057 aesenclast $rndkey0,$inout1
1058 lea 16($inp),$inp
1059 jmp .Lccm64_dec_outer
1061 .align 16
1062 .Lccm64_dec_break:
1063 #xorps $in0,$inout1 # cmac^=out
1064 mov 240($key_),$rounds
1066 &aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
1067 $code.=<<___;
1068 movups $inout1,($cmac)
1070 $code.=<<___ if ($win64);
1071 movaps (%rsp),%xmm6
1072 movaps 0x10(%rsp),%xmm7
1073 movaps 0x20(%rsp),%xmm8
1074 movaps 0x30(%rsp),%xmm9
1075 lea 0x58(%rsp),%rsp
1076 .Lccm64_dec_ret:
1078 $code.=<<___;
1080 .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
1083 ######################################################################
1084 # void aesni_ctr32_encrypt_blocks (const void *in, void *out,
1085 # size_t blocks, const AES_KEY *key,
1086 # const char *ivec);
1088 # Handles only complete blocks, operates on 32-bit counter and
1089 # does not update *ivec! (see crypto/modes/ctr128.c for details)
1091 # Overhaul based on suggestions from Shay Gueron and Vlad Krasnov,
1092 # http://rt.openssl.org/Ticket/Display.html?id=3021&user=guest&pass=guest.
1093 # Keywords are full unroll and modulo-schedule counter calculations
1094 # with zero-round key xor.
1096 my ($in0,$in1,$in2,$in3,$in4,$in5)=map("%xmm$_",(10..15));
1097 my ($key0,$ctr)=("${key_}d","${ivp}d");
1098 my $frame_size = 0x80 + ($win64?160:0);
1100 $code.=<<___;
1101 .globl aesni_ctr32_encrypt_blocks
1102 .type aesni_ctr32_encrypt_blocks,\@function,5
1103 .align 16
1104 aesni_ctr32_encrypt_blocks:
1105 lea (%rsp),%rax
1106 push %rbp
1107 sub \$$frame_size,%rsp
1108 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
1110 $code.=<<___ if ($win64);
1111 movaps %xmm6,-0xa8(%rax)
1112 movaps %xmm7,-0x98(%rax)
1113 movaps %xmm8,-0x88(%rax)
1114 movaps %xmm9,-0x78(%rax)
1115 movaps %xmm10,-0x68(%rax)
1116 movaps %xmm11,-0x58(%rax)
1117 movaps %xmm12,-0x48(%rax)
1118 movaps %xmm13,-0x38(%rax)
1119 movaps %xmm14,-0x28(%rax)
1120 movaps %xmm15,-0x18(%rax)
1121 .Lctr32_body:
1123 $code.=<<___;
1124 lea -8(%rax),%rbp
1126 cmp \$1,$len
1127 je .Lctr32_one_shortcut
1129 movdqu ($ivp),$inout0
1130 movdqu ($key),$rndkey0
1131 mov 12($ivp),$ctr # counter LSB
1132 pxor $rndkey0,$inout0
1133 mov 12($key),$key0 # 0-round key LSB
1134 movdqa $inout0,0x00(%rsp) # populate counter block
1135 bswap $ctr
1136 movdqa $inout0,$inout1
1137 movdqa $inout0,$inout2
1138 movdqa $inout0,$inout3
1139 movdqa $inout0,0x40(%rsp)
1140 movdqa $inout0,0x50(%rsp)
1141 movdqa $inout0,0x60(%rsp)
1142 mov %rdx,%r10 # borrow %rdx
1143 movdqa $inout0,0x70(%rsp)
1145 lea 1($ctr),%rax
1146 lea 2($ctr),%rdx
1147 bswap %eax
1148 bswap %edx
1149 xor $key0,%eax
1150 xor $key0,%edx
1151 pinsrd \$3,%eax,$inout1
1152 lea 3($ctr),%rax
1153 movdqa $inout1,0x10(%rsp)
1154 pinsrd \$3,%edx,$inout2
1155 bswap %eax
1156 mov %r10,%rdx # restore %rdx
1157 lea 4($ctr),%r10
1158 movdqa $inout2,0x20(%rsp)
1159 xor $key0,%eax
1160 bswap %r10d
1161 pinsrd \$3,%eax,$inout3
1162 xor $key0,%r10d
1163 movdqa $inout3,0x30(%rsp)
1164 lea 5($ctr),%r9
1165 mov %r10d,0x40+12(%rsp)
1166 bswap %r9d
1167 lea 6($ctr),%r10
1168 mov 240($key),$rounds # key->rounds
1169 xor $key0,%r9d
1170 bswap %r10d
1171 mov %r9d,0x50+12(%rsp)
1172 xor $key0,%r10d
1173 lea 7($ctr),%r9
1174 mov %r10d,0x60+12(%rsp)
1175 bswap %r9d
1176 mov OPENSSL_ia32cap_P+4(%rip),%r10d
1177 xor $key0,%r9d
1178 and \$`1<<26|1<<22`,%r10d # isolate XSAVE+MOVBE
1179 mov %r9d,0x70+12(%rsp)
1181 $movkey 0x10($key),$rndkey1
1183 movdqa 0x40(%rsp),$inout4
1184 movdqa 0x50(%rsp),$inout5
1186 cmp \$8,$len
1187 jb .Lctr32_tail
1189 sub \$6,$len
1190 cmp \$`1<<22`,%r10d # check for MOVBE without XSAVE
1191 je .Lctr32_6x
1193 lea 0x80($key),$key # size optimization
1194 sub \$2,$len
1195 jmp .Lctr32_loop8
1197 .align 16
1198 .Lctr32_6x:
1199 shl \$4,$rounds
1200 mov \$48,$rnds_
1201 bswap $key0
1202 lea 32($key,$rounds),$key # end of key schedule
1203 sub %rax,%r10 # twisted $rounds
1204 jmp .Lctr32_loop6
1206 .align 16
1207 .Lctr32_loop6:
1208 add \$6,$ctr
1209 $movkey -48($key,$rnds_),$rndkey0
1210 aesenc $rndkey1,$inout0
1211 mov $ctr,%eax
1212 xor $key0,%eax
1213 aesenc $rndkey1,$inout1
1214 movbe %eax,`0x00+12`(%rsp)
1215 lea 1($ctr),%eax
1216 aesenc $rndkey1,$inout2
1217 xor $key0,%eax
1218 movbe %eax,`0x10+12`(%rsp)
1219 aesenc $rndkey1,$inout3
1220 lea 2($ctr),%eax
1221 xor $key0,%eax
1222 aesenc $rndkey1,$inout4
1223 movbe %eax,`0x20+12`(%rsp)
1224 lea 3($ctr),%eax
1225 aesenc $rndkey1,$inout5
1226 $movkey -32($key,$rnds_),$rndkey1
1227 xor $key0,%eax
1229 aesenc $rndkey0,$inout0
1230 movbe %eax,`0x30+12`(%rsp)
1231 lea 4($ctr),%eax
1232 aesenc $rndkey0,$inout1
1233 xor $key0,%eax
1234 movbe %eax,`0x40+12`(%rsp)
1235 aesenc $rndkey0,$inout2
1236 lea 5($ctr),%eax
1237 xor $key0,%eax
1238 aesenc $rndkey0,$inout3
1239 movbe %eax,`0x50+12`(%rsp)
1240 mov %r10,%rax # mov $rnds_,$rounds
1241 aesenc $rndkey0,$inout4
1242 aesenc $rndkey0,$inout5
1243 $movkey -16($key,$rnds_),$rndkey0
1245 call .Lenc_loop6
1247 movdqu ($inp),$inout6
1248 movdqu 0x10($inp),$inout7
1249 movdqu 0x20($inp),$in0
1250 movdqu 0x30($inp),$in1
1251 movdqu 0x40($inp),$in2
1252 movdqu 0x50($inp),$in3
1253 lea 0x60($inp),$inp
1254 $movkey -64($key,$rnds_),$rndkey1
1255 pxor $inout0,$inout6
1256 movaps 0x00(%rsp),$inout0
1257 pxor $inout1,$inout7
1258 movaps 0x10(%rsp),$inout1
1259 pxor $inout2,$in0
1260 movaps 0x20(%rsp),$inout2
1261 pxor $inout3,$in1
1262 movaps 0x30(%rsp),$inout3
1263 pxor $inout4,$in2
1264 movaps 0x40(%rsp),$inout4
1265 pxor $inout5,$in3
1266 movaps 0x50(%rsp),$inout5
1267 movdqu $inout6,($out)
1268 movdqu $inout7,0x10($out)
1269 movdqu $in0,0x20($out)
1270 movdqu $in1,0x30($out)
1271 movdqu $in2,0x40($out)
1272 movdqu $in3,0x50($out)
1273 lea 0x60($out),$out
1275 sub \$6,$len
1276 jnc .Lctr32_loop6
1278 add \$6,$len
1279 jz .Lctr32_done
1281 lea -48($rnds_),$rounds
1282 lea -80($key,$rnds_),$key # restore $key
1283 neg $rounds
1284 shr \$4,$rounds # restore $rounds
1285 jmp .Lctr32_tail
1287 .align 32
1288 .Lctr32_loop8:
1289 add \$8,$ctr
1290 movdqa 0x60(%rsp),$inout6
1291 aesenc $rndkey1,$inout0
1292 mov $ctr,%r9d
1293 movdqa 0x70(%rsp),$inout7
1294 aesenc $rndkey1,$inout1
1295 bswap %r9d
1296 $movkey 0x20-0x80($key),$rndkey0
1297 aesenc $rndkey1,$inout2
1298 xor $key0,%r9d
1300 aesenc $rndkey1,$inout3
1301 mov %r9d,0x00+12(%rsp)
1302 lea 1($ctr),%r9
1303 aesenc $rndkey1,$inout4
1304 aesenc $rndkey1,$inout5
1305 aesenc $rndkey1,$inout6
1306 aesenc $rndkey1,$inout7
1307 $movkey 0x30-0x80($key),$rndkey1
1309 for($i=2;$i<8;$i++) {
1310 my $rndkeyx = ($i&1)?$rndkey1:$rndkey0;
1311 $code.=<<___;
1312 bswap %r9d
1313 aesenc $rndkeyx,$inout0
1314 aesenc $rndkeyx,$inout1
1315 xor $key0,%r9d
1316 .byte 0x66,0x90
1317 aesenc $rndkeyx,$inout2
1318 aesenc $rndkeyx,$inout3
1319 mov %r9d,`0x10*($i-1)`+12(%rsp)
1320 lea $i($ctr),%r9
1321 aesenc $rndkeyx,$inout4
1322 aesenc $rndkeyx,$inout5
1323 aesenc $rndkeyx,$inout6
1324 aesenc $rndkeyx,$inout7
1325 $movkey `0x20+0x10*$i`-0x80($key),$rndkeyx
1328 $code.=<<___;
1329 bswap %r9d
1330 aesenc $rndkey0,$inout0
1331 aesenc $rndkey0,$inout1
1332 aesenc $rndkey0,$inout2
1333 xor $key0,%r9d
1334 movdqu 0x00($inp),$in0
1335 aesenc $rndkey0,$inout3
1336 mov %r9d,0x70+12(%rsp)
1337 cmp \$11,$rounds
1338 aesenc $rndkey0,$inout4
1339 aesenc $rndkey0,$inout5
1340 aesenc $rndkey0,$inout6
1341 aesenc $rndkey0,$inout7
1342 $movkey 0xa0-0x80($key),$rndkey0
1344 jb .Lctr32_enc_done
1346 aesenc $rndkey1,$inout0
1347 aesenc $rndkey1,$inout1
1348 aesenc $rndkey1,$inout2
1349 aesenc $rndkey1,$inout3
1350 aesenc $rndkey1,$inout4
1351 aesenc $rndkey1,$inout5
1352 aesenc $rndkey1,$inout6
1353 aesenc $rndkey1,$inout7
1354 $movkey 0xb0-0x80($key),$rndkey1
1356 aesenc $rndkey0,$inout0
1357 aesenc $rndkey0,$inout1
1358 aesenc $rndkey0,$inout2
1359 aesenc $rndkey0,$inout3
1360 aesenc $rndkey0,$inout4
1361 aesenc $rndkey0,$inout5
1362 aesenc $rndkey0,$inout6
1363 aesenc $rndkey0,$inout7
1364 $movkey 0xc0-0x80($key),$rndkey0
1365 je .Lctr32_enc_done
1367 aesenc $rndkey1,$inout0
1368 aesenc $rndkey1,$inout1
1369 aesenc $rndkey1,$inout2
1370 aesenc $rndkey1,$inout3
1371 aesenc $rndkey1,$inout4
1372 aesenc $rndkey1,$inout5
1373 aesenc $rndkey1,$inout6
1374 aesenc $rndkey1,$inout7
1375 $movkey 0xd0-0x80($key),$rndkey1
1377 aesenc $rndkey0,$inout0
1378 aesenc $rndkey0,$inout1
1379 aesenc $rndkey0,$inout2
1380 aesenc $rndkey0,$inout3
1381 aesenc $rndkey0,$inout4
1382 aesenc $rndkey0,$inout5
1383 aesenc $rndkey0,$inout6
1384 aesenc $rndkey0,$inout7
1385 $movkey 0xe0-0x80($key),$rndkey0
1386 jmp .Lctr32_enc_done
1388 .align 16
1389 .Lctr32_enc_done:
1390 movdqu 0x10($inp),$in1
1391 pxor $rndkey0,$in0
1392 movdqu 0x20($inp),$in2
1393 pxor $rndkey0,$in1
1394 movdqu 0x30($inp),$in3
1395 pxor $rndkey0,$in2
1396 movdqu 0x40($inp),$in4
1397 pxor $rndkey0,$in3
1398 movdqu 0x50($inp),$in5
1399 pxor $rndkey0,$in4
1400 pxor $rndkey0,$in5
1401 aesenc $rndkey1,$inout0
1402 aesenc $rndkey1,$inout1
1403 aesenc $rndkey1,$inout2
1404 aesenc $rndkey1,$inout3
1405 aesenc $rndkey1,$inout4
1406 aesenc $rndkey1,$inout5
1407 aesenc $rndkey1,$inout6
1408 aesenc $rndkey1,$inout7
1409 movdqu 0x60($inp),$rndkey1
1410 lea 0x80($inp),$inp
1412 aesenclast $in0,$inout0
1413 pxor $rndkey0,$rndkey1
1414 movdqu 0x70-0x80($inp),$in0
1415 aesenclast $in1,$inout1
1416 pxor $rndkey0,$in0
1417 movdqa 0x00(%rsp),$in1 # load next counter block
1418 aesenclast $in2,$inout2
1419 aesenclast $in3,$inout3
1420 movdqa 0x10(%rsp),$in2
1421 movdqa 0x20(%rsp),$in3
1422 aesenclast $in4,$inout4
1423 aesenclast $in5,$inout5
1424 movdqa 0x30(%rsp),$in4
1425 movdqa 0x40(%rsp),$in5
1426 aesenclast $rndkey1,$inout6
1427 movdqa 0x50(%rsp),$rndkey0
1428 $movkey 0x10-0x80($key),$rndkey1
1429 aesenclast $in0,$inout7
1431 movups $inout0,($out) # store output
1432 movdqa $in1,$inout0
1433 movups $inout1,0x10($out)
1434 movdqa $in2,$inout1
1435 movups $inout2,0x20($out)
1436 movdqa $in3,$inout2
1437 movups $inout3,0x30($out)
1438 movdqa $in4,$inout3
1439 movups $inout4,0x40($out)
1440 movdqa $in5,$inout4
1441 movups $inout5,0x50($out)
1442 movdqa $rndkey0,$inout5
1443 movups $inout6,0x60($out)
1444 movups $inout7,0x70($out)
1445 lea 0x80($out),$out
1447 sub \$8,$len
1448 jnc .Lctr32_loop8
1450 add \$8,$len
1451 jz .Lctr32_done
1452 lea -0x80($key),$key
1454 .Lctr32_tail:
1455 lea 16($key),$key
1456 cmp \$4,$len
1457 jb .Lctr32_loop3
1458 je .Lctr32_loop4
1460 shl \$4,$rounds
1461 movdqa 0x60(%rsp),$inout6
1462 pxor $inout7,$inout7
1464 $movkey 16($key),$rndkey0
1465 aesenc $rndkey1,$inout0
1466 aesenc $rndkey1,$inout1
1467 lea 32-16($key,$rounds),$key
1468 neg %rax
1469 aesenc $rndkey1,$inout2
1470 add \$16,%rax
1471 movups ($inp),$in0
1472 aesenc $rndkey1,$inout3
1473 aesenc $rndkey1,$inout4
1474 movups 0x10($inp),$in1
1475 movups 0x20($inp),$in2
1476 aesenc $rndkey1,$inout5
1477 aesenc $rndkey1,$inout6
1479 call .Lenc_loop8_enter
1481 movdqu 0x30($inp),$in3
1482 pxor $in0,$inout0
1483 movdqu 0x40($inp),$in0
1484 pxor $in1,$inout1
1485 movdqu $inout0,($out)
1486 pxor $in2,$inout2
1487 movdqu $inout1,0x10($out)
1488 pxor $in3,$inout3
1489 movdqu $inout2,0x20($out)
1490 pxor $in0,$inout4
1491 movdqu $inout3,0x30($out)
1492 movdqu $inout4,0x40($out)
1493 cmp \$6,$len
1494 jb .Lctr32_done
1496 movups 0x50($inp),$in1
1497 xorps $in1,$inout5
1498 movups $inout5,0x50($out)
1499 je .Lctr32_done
1501 movups 0x60($inp),$in2
1502 xorps $in2,$inout6
1503 movups $inout6,0x60($out)
1504 jmp .Lctr32_done
1506 .align 32
1507 .Lctr32_loop4:
1508 aesenc $rndkey1,$inout0
1509 lea 16($key),$key
1510 dec $rounds
1511 aesenc $rndkey1,$inout1
1512 aesenc $rndkey1,$inout2
1513 aesenc $rndkey1,$inout3
1514 $movkey ($key),$rndkey1
1515 jnz .Lctr32_loop4
1516 aesenclast $rndkey1,$inout0
1517 aesenclast $rndkey1,$inout1
1518 movups ($inp),$in0
1519 movups 0x10($inp),$in1
1520 aesenclast $rndkey1,$inout2
1521 aesenclast $rndkey1,$inout3
1522 movups 0x20($inp),$in2
1523 movups 0x30($inp),$in3
1525 xorps $in0,$inout0
1526 movups $inout0,($out)
1527 xorps $in1,$inout1
1528 movups $inout1,0x10($out)
1529 pxor $in2,$inout2
1530 movdqu $inout2,0x20($out)
1531 pxor $in3,$inout3
1532 movdqu $inout3,0x30($out)
1533 jmp .Lctr32_done
1535 .align 32
1536 .Lctr32_loop3:
1537 aesenc $rndkey1,$inout0
1538 lea 16($key),$key
1539 dec $rounds
1540 aesenc $rndkey1,$inout1
1541 aesenc $rndkey1,$inout2
1542 $movkey ($key),$rndkey1
1543 jnz .Lctr32_loop3
1544 aesenclast $rndkey1,$inout0
1545 aesenclast $rndkey1,$inout1
1546 aesenclast $rndkey1,$inout2
1548 movups ($inp),$in0
1549 xorps $in0,$inout0
1550 movups $inout0,($out)
1551 cmp \$2,$len
1552 jb .Lctr32_done
1554 movups 0x10($inp),$in1
1555 xorps $in1,$inout1
1556 movups $inout1,0x10($out)
1557 je .Lctr32_done
1559 movups 0x20($inp),$in2
1560 xorps $in2,$inout2
1561 movups $inout2,0x20($out)
1562 jmp .Lctr32_done
1564 .align 16
1565 .Lctr32_one_shortcut:
1566 movups ($ivp),$inout0
1567 movups ($inp),$in0
1568 mov 240($key),$rounds # key->rounds
1570 &aesni_generate1("enc",$key,$rounds);
1571 $code.=<<___;
1572 xorps $in0,$inout0
1573 movups $inout0,($out)
1574 jmp .Lctr32_done
1576 .align 16
1577 .Lctr32_done:
1579 $code.=<<___ if ($win64);
1580 movaps -0xa0(%rbp),%xmm6
1581 movaps -0x90(%rbp),%xmm7
1582 movaps -0x80(%rbp),%xmm8
1583 movaps -0x70(%rbp),%xmm9
1584 movaps -0x60(%rbp),%xmm10
1585 movaps -0x50(%rbp),%xmm11
1586 movaps -0x40(%rbp),%xmm12
1587 movaps -0x30(%rbp),%xmm13
1588 movaps -0x20(%rbp),%xmm14
1589 movaps -0x10(%rbp),%xmm15
1591 $code.=<<___;
1592 lea (%rbp),%rsp
1593 pop %rbp
1594 .Lctr32_epilogue:
1596 .size aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
1600 ######################################################################
1601 # void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
1602 # const AES_KEY *key1, const AES_KEY *key2
1603 # const unsigned char iv[16]);
1606 my @tweak=map("%xmm$_",(10..15));
1607 my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
1608 my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
1609 my $frame_size = 0x70 + ($win64?160:0);
1611 $code.=<<___;
1612 .globl aesni_xts_encrypt
1613 .type aesni_xts_encrypt,\@function,6
1614 .align 16
1615 aesni_xts_encrypt:
1616 lea (%rsp),%rax
1617 push %rbp
1618 sub \$$frame_size,%rsp
1619 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
1621 $code.=<<___ if ($win64);
1622 movaps %xmm6,-0xa8(%rax)
1623 movaps %xmm7,-0x98(%rax)
1624 movaps %xmm8,-0x88(%rax)
1625 movaps %xmm9,-0x78(%rax)
1626 movaps %xmm10,-0x68(%rax)
1627 movaps %xmm11,-0x58(%rax)
1628 movaps %xmm12,-0x48(%rax)
1629 movaps %xmm13,-0x38(%rax)
1630 movaps %xmm14,-0x28(%rax)
1631 movaps %xmm15,-0x18(%rax)
1632 .Lxts_enc_body:
1634 $code.=<<___;
1635 lea -8(%rax),%rbp
1636 movups ($ivp),$inout0 # load clear-text tweak
1637 mov 240(%r8),$rounds # key2->rounds
1638 mov 240($key),$rnds_ # key1->rounds
1640 # generate the tweak
1641 &aesni_generate1("enc",$key2,$rounds,$inout0);
1642 $code.=<<___;
1643 $movkey ($key),$rndkey0 # zero round key
1644 mov $key,$key_ # backup $key
1645 mov $rnds_,$rounds # backup $rounds
1646 shl \$4,$rnds_
1647 mov $len,$len_ # backup $len
1648 and \$-16,$len
1650 $movkey 16($key,$rnds_),$rndkey1 # last round key
1652 movdqa .Lxts_magic(%rip),$twmask
1653 movdqa $inout0,@tweak[5]
1654 pshufd \$0x5f,$inout0,$twres
1655 pxor $rndkey0,$rndkey1
1657 # alternative tweak calculation algorithm is based on suggestions
1658 # by Shay Gueron. psrad doesn't conflict with AES-NI instructions
1659 # and should help in the future...
1660 for ($i=0;$i<4;$i++) {
1661 $code.=<<___;
1662 movdqa $twres,$twtmp
1663 paddd $twres,$twres
1664 movdqa @tweak[5],@tweak[$i]
1665 psrad \$31,$twtmp # broadcast upper bits
1666 paddq @tweak[5],@tweak[5]
1667 pand $twmask,$twtmp
1668 pxor $rndkey0,@tweak[$i]
1669 pxor $twtmp,@tweak[5]
1672 $code.=<<___;
1673 movdqa @tweak[5],@tweak[4]
1674 psrad \$31,$twres
1675 paddq @tweak[5],@tweak[5]
1676 pand $twmask,$twres
1677 pxor $rndkey0,@tweak[4]
1678 pxor $twres,@tweak[5]
1679 movaps $rndkey1,0x60(%rsp) # save round[0]^round[last]
1681 sub \$16*6,$len
1682 jc .Lxts_enc_short
1684 mov \$16+96,$rounds
1685 lea 32($key_,$rnds_),$key # end of key schedule
1686 sub %r10,%rax # twisted $rounds
1687 $movkey 16($key_),$rndkey1
1688 mov %rax,%r10 # backup twisted $rounds
1689 lea .Lxts_magic(%rip),%r8
1690 jmp .Lxts_enc_grandloop
1692 .align 32
1693 .Lxts_enc_grandloop:
1694 movdqu `16*0`($inp),$inout0 # load input
1695 movdqa $rndkey0,$twmask
1696 movdqu `16*1`($inp),$inout1
1697 pxor @tweak[0],$inout0
1698 movdqu `16*2`($inp),$inout2
1699 pxor @tweak[1],$inout1
1700 aesenc $rndkey1,$inout0
1701 movdqu `16*3`($inp),$inout3
1702 pxor @tweak[2],$inout2
1703 aesenc $rndkey1,$inout1
1704 movdqu `16*4`($inp),$inout4
1705 pxor @tweak[3],$inout3
1706 aesenc $rndkey1,$inout2
1707 movdqu `16*5`($inp),$inout5
1708 pxor @tweak[5],$twmask # round[0]^=tweak[5]
1709 movdqa 0x60(%rsp),$twres # load round[0]^round[last]
1710 pxor @tweak[4],$inout4
1711 aesenc $rndkey1,$inout3
1712 $movkey 32($key_),$rndkey0
1713 lea `16*6`($inp),$inp
1714 pxor $twmask,$inout5
1716 pxor $twres,@tweak[0]
1717 aesenc $rndkey1,$inout4
1718 pxor $twres,@tweak[1]
1719 movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^last round key
1720 aesenc $rndkey1,$inout5
1721 $movkey 48($key_),$rndkey1
1722 pxor $twres,@tweak[2]
1724 aesenc $rndkey0,$inout0
1725 pxor $twres,@tweak[3]
1726 movdqa @tweak[1],`16*1`(%rsp)
1727 aesenc $rndkey0,$inout1
1728 pxor $twres,@tweak[4]
1729 movdqa @tweak[2],`16*2`(%rsp)
1730 aesenc $rndkey0,$inout2
1731 aesenc $rndkey0,$inout3
1732 pxor $twres,$twmask
1733 movdqa @tweak[4],`16*4`(%rsp)
1734 aesenc $rndkey0,$inout4
1735 aesenc $rndkey0,$inout5
1736 $movkey 64($key_),$rndkey0
1737 movdqa $twmask,`16*5`(%rsp)
1738 pshufd \$0x5f,@tweak[5],$twres
1739 jmp .Lxts_enc_loop6
1740 .align 32
1741 .Lxts_enc_loop6:
1742 aesenc $rndkey1,$inout0
1743 aesenc $rndkey1,$inout1
1744 aesenc $rndkey1,$inout2
1745 aesenc $rndkey1,$inout3
1746 aesenc $rndkey1,$inout4
1747 aesenc $rndkey1,$inout5
1748 $movkey -64($key,%rax),$rndkey1
1749 add \$32,%rax
1751 aesenc $rndkey0,$inout0
1752 aesenc $rndkey0,$inout1
1753 aesenc $rndkey0,$inout2
1754 aesenc $rndkey0,$inout3
1755 aesenc $rndkey0,$inout4
1756 aesenc $rndkey0,$inout5
1757 $movkey -80($key,%rax),$rndkey0
1758 jnz .Lxts_enc_loop6
1760 movdqa (%r8),$twmask
1761 movdqa $twres,$twtmp
1762 paddd $twres,$twres
1763 aesenc $rndkey1,$inout0
1764 paddq @tweak[5],@tweak[5]
1765 psrad \$31,$twtmp
1766 aesenc $rndkey1,$inout1
1767 pand $twmask,$twtmp
1768 $movkey ($key_),@tweak[0] # load round[0]
1769 aesenc $rndkey1,$inout2
1770 aesenc $rndkey1,$inout3
1771 aesenc $rndkey1,$inout4
1772 pxor $twtmp,@tweak[5]
1773 movaps @tweak[0],@tweak[1] # copy round[0]
1774 aesenc $rndkey1,$inout5
1775 $movkey -64($key),$rndkey1
1777 movdqa $twres,$twtmp
1778 aesenc $rndkey0,$inout0
1779 paddd $twres,$twres
1780 pxor @tweak[5],@tweak[0]
1781 aesenc $rndkey0,$inout1
1782 psrad \$31,$twtmp
1783 paddq @tweak[5],@tweak[5]
1784 aesenc $rndkey0,$inout2
1785 aesenc $rndkey0,$inout3
1786 pand $twmask,$twtmp
1787 movaps @tweak[1],@tweak[2]
1788 aesenc $rndkey0,$inout4
1789 pxor $twtmp,@tweak[5]
1790 movdqa $twres,$twtmp
1791 aesenc $rndkey0,$inout5
1792 $movkey -48($key),$rndkey0
1794 paddd $twres,$twres
1795 aesenc $rndkey1,$inout0
1796 pxor @tweak[5],@tweak[1]
1797 psrad \$31,$twtmp
1798 aesenc $rndkey1,$inout1
1799 paddq @tweak[5],@tweak[5]
1800 pand $twmask,$twtmp
1801 aesenc $rndkey1,$inout2
1802 aesenc $rndkey1,$inout3
1803 movdqa @tweak[3],`16*3`(%rsp)
1804 pxor $twtmp,@tweak[5]
1805 aesenc $rndkey1,$inout4
1806 movaps @tweak[2],@tweak[3]
1807 movdqa $twres,$twtmp
1808 aesenc $rndkey1,$inout5
1809 $movkey -32($key),$rndkey1
1811 paddd $twres,$twres
1812 aesenc $rndkey0,$inout0
1813 pxor @tweak[5],@tweak[2]
1814 psrad \$31,$twtmp
1815 aesenc $rndkey0,$inout1
1816 paddq @tweak[5],@tweak[5]
1817 pand $twmask,$twtmp
1818 aesenc $rndkey0,$inout2
1819 aesenc $rndkey0,$inout3
1820 aesenc $rndkey0,$inout4
1821 pxor $twtmp,@tweak[5]
1822 movaps @tweak[3],@tweak[4]
1823 aesenc $rndkey0,$inout5
1825 movdqa $twres,$rndkey0
1826 paddd $twres,$twres
1827 aesenc $rndkey1,$inout0
1828 pxor @tweak[5],@tweak[3]
1829 psrad \$31,$rndkey0
1830 aesenc $rndkey1,$inout1
1831 paddq @tweak[5],@tweak[5]
1832 pand $twmask,$rndkey0
1833 aesenc $rndkey1,$inout2
1834 aesenc $rndkey1,$inout3
1835 pxor $rndkey0,@tweak[5]
1836 $movkey ($key_),$rndkey0
1837 aesenc $rndkey1,$inout4
1838 aesenc $rndkey1,$inout5
1839 $movkey 16($key_),$rndkey1
1841 pxor @tweak[5],@tweak[4]
1842 aesenclast `16*0`(%rsp),$inout0
1843 psrad \$31,$twres
1844 paddq @tweak[5],@tweak[5]
1845 aesenclast `16*1`(%rsp),$inout1
1846 aesenclast `16*2`(%rsp),$inout2
1847 pand $twmask,$twres
1848 mov %r10,%rax # restore $rounds
1849 aesenclast `16*3`(%rsp),$inout3
1850 aesenclast `16*4`(%rsp),$inout4
1851 aesenclast `16*5`(%rsp),$inout5
1852 pxor $twres,@tweak[5]
1854 lea `16*6`($out),$out
1855 movups $inout0,`-16*6`($out) # write output
1856 movups $inout1,`-16*5`($out)
1857 movups $inout2,`-16*4`($out)
1858 movups $inout3,`-16*3`($out)
1859 movups $inout4,`-16*2`($out)
1860 movups $inout5,`-16*1`($out)
1861 sub \$16*6,$len
1862 jnc .Lxts_enc_grandloop
1864 mov \$16+96,$rounds
1865 sub $rnds_,$rounds
1866 mov $key_,$key # restore $key
1867 shr \$4,$rounds # restore original value
1869 .Lxts_enc_short:
1870 mov $rounds,$rnds_ # backup $rounds
1871 pxor $rndkey0,@tweak[0]
1872 add \$16*6,$len
1873 jz .Lxts_enc_done
1875 pxor $rndkey0,@tweak[1]
1876 cmp \$0x20,$len
1877 jb .Lxts_enc_one
1878 pxor $rndkey0,@tweak[2]
1879 je .Lxts_enc_two
1881 pxor $rndkey0,@tweak[3]
1882 cmp \$0x40,$len
1883 jb .Lxts_enc_three
1884 pxor $rndkey0,@tweak[4]
1885 je .Lxts_enc_four
1887 movdqu ($inp),$inout0
1888 movdqu 16*1($inp),$inout1
1889 movdqu 16*2($inp),$inout2
1890 pxor @tweak[0],$inout0
1891 movdqu 16*3($inp),$inout3
1892 pxor @tweak[1],$inout1
1893 movdqu 16*4($inp),$inout4
1894 lea 16*5($inp),$inp
1895 pxor @tweak[2],$inout2
1896 pxor @tweak[3],$inout3
1897 pxor @tweak[4],$inout4
1899 call _aesni_encrypt6
1901 xorps @tweak[0],$inout0
1902 movdqa @tweak[5],@tweak[0]
1903 xorps @tweak[1],$inout1
1904 xorps @tweak[2],$inout2
1905 movdqu $inout0,($out)
1906 xorps @tweak[3],$inout3
1907 movdqu $inout1,16*1($out)
1908 xorps @tweak[4],$inout4
1909 movdqu $inout2,16*2($out)
1910 movdqu $inout3,16*3($out)
1911 movdqu $inout4,16*4($out)
1912 lea 16*5($out),$out
1913 jmp .Lxts_enc_done
1915 .align 16
1916 .Lxts_enc_one:
1917 movups ($inp),$inout0
1918 lea 16*1($inp),$inp
1919 xorps @tweak[0],$inout0
1921 &aesni_generate1("enc",$key,$rounds);
1922 $code.=<<___;
1923 xorps @tweak[0],$inout0
1924 movdqa @tweak[1],@tweak[0]
1925 movups $inout0,($out)
1926 lea 16*1($out),$out
1927 jmp .Lxts_enc_done
1929 .align 16
1930 .Lxts_enc_two:
1931 movups ($inp),$inout0
1932 movups 16($inp),$inout1
1933 lea 32($inp),$inp
1934 xorps @tweak[0],$inout0
1935 xorps @tweak[1],$inout1
1937 call _aesni_encrypt2
1939 xorps @tweak[0],$inout0
1940 movdqa @tweak[2],@tweak[0]
1941 xorps @tweak[1],$inout1
1942 movups $inout0,($out)
1943 movups $inout1,16*1($out)
1944 lea 16*2($out),$out
1945 jmp .Lxts_enc_done
1947 .align 16
1948 .Lxts_enc_three:
1949 movups ($inp),$inout0
1950 movups 16*1($inp),$inout1
1951 movups 16*2($inp),$inout2
1952 lea 16*3($inp),$inp
1953 xorps @tweak[0],$inout0
1954 xorps @tweak[1],$inout1
1955 xorps @tweak[2],$inout2
1957 call _aesni_encrypt3
1959 xorps @tweak[0],$inout0
1960 movdqa @tweak[3],@tweak[0]
1961 xorps @tweak[1],$inout1
1962 xorps @tweak[2],$inout2
1963 movups $inout0,($out)
1964 movups $inout1,16*1($out)
1965 movups $inout2,16*2($out)
1966 lea 16*3($out),$out
1967 jmp .Lxts_enc_done
1969 .align 16
1970 .Lxts_enc_four:
1971 movups ($inp),$inout0
1972 movups 16*1($inp),$inout1
1973 movups 16*2($inp),$inout2
1974 xorps @tweak[0],$inout0
1975 movups 16*3($inp),$inout3
1976 lea 16*4($inp),$inp
1977 xorps @tweak[1],$inout1
1978 xorps @tweak[2],$inout2
1979 xorps @tweak[3],$inout3
1981 call _aesni_encrypt4
1983 pxor @tweak[0],$inout0
1984 movdqa @tweak[4],@tweak[0]
1985 pxor @tweak[1],$inout1
1986 pxor @tweak[2],$inout2
1987 movdqu $inout0,($out)
1988 pxor @tweak[3],$inout3
1989 movdqu $inout1,16*1($out)
1990 movdqu $inout2,16*2($out)
1991 movdqu $inout3,16*3($out)
1992 lea 16*4($out),$out
1993 jmp .Lxts_enc_done
1995 .align 16
1996 .Lxts_enc_done:
1997 and \$15,$len_
1998 jz .Lxts_enc_ret
1999 mov $len_,$len
2001 .Lxts_enc_steal:
2002 movzb ($inp),%eax # borrow $rounds ...
2003 movzb -16($out),%ecx # ... and $key
2004 lea 1($inp),$inp
2005 mov %al,-16($out)
2006 mov %cl,0($out)
2007 lea 1($out),$out
2008 sub \$1,$len
2009 jnz .Lxts_enc_steal
2011 sub $len_,$out # rewind $out
2012 mov $key_,$key # restore $key
2013 mov $rnds_,$rounds # restore $rounds
2015 movups -16($out),$inout0
2016 xorps @tweak[0],$inout0
2018 &aesni_generate1("enc",$key,$rounds);
2019 $code.=<<___;
2020 xorps @tweak[0],$inout0
2021 movups $inout0,-16($out)
2023 .Lxts_enc_ret:
2025 $code.=<<___ if ($win64);
2026 movaps -0xa0(%rbp),%xmm6
2027 movaps -0x90(%rbp),%xmm7
2028 movaps -0x80(%rbp),%xmm8
2029 movaps -0x70(%rbp),%xmm9
2030 movaps -0x60(%rbp),%xmm10
2031 movaps -0x50(%rbp),%xmm11
2032 movaps -0x40(%rbp),%xmm12
2033 movaps -0x30(%rbp),%xmm13
2034 movaps -0x20(%rbp),%xmm14
2035 movaps -0x10(%rbp),%xmm15
2037 $code.=<<___;
2038 lea (%rbp),%rsp
2039 pop %rbp
2040 .Lxts_enc_epilogue:
2042 .size aesni_xts_encrypt,.-aesni_xts_encrypt
2045 $code.=<<___;
2046 .globl aesni_xts_decrypt
2047 .type aesni_xts_decrypt,\@function,6
2048 .align 16
2049 aesni_xts_decrypt:
2050 lea (%rsp),%rax
2051 push %rbp
2052 sub \$$frame_size,%rsp
2053 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
2055 $code.=<<___ if ($win64);
2056 movaps %xmm6,-0xa8(%rax)
2057 movaps %xmm7,-0x98(%rax)
2058 movaps %xmm8,-0x88(%rax)
2059 movaps %xmm9,-0x78(%rax)
2060 movaps %xmm10,-0x68(%rax)
2061 movaps %xmm11,-0x58(%rax)
2062 movaps %xmm12,-0x48(%rax)
2063 movaps %xmm13,-0x38(%rax)
2064 movaps %xmm14,-0x28(%rax)
2065 movaps %xmm15,-0x18(%rax)
2066 .Lxts_dec_body:
2068 $code.=<<___;
2069 lea -8(%rax),%rbp
2070 movups ($ivp),$inout0 # load clear-text tweak
2071 mov 240($key2),$rounds # key2->rounds
2072 mov 240($key),$rnds_ # key1->rounds
2074 # generate the tweak
2075 &aesni_generate1("enc",$key2,$rounds,$inout0);
2076 $code.=<<___;
2077 xor %eax,%eax # if ($len%16) len-=16;
2078 test \$15,$len
2079 setnz %al
2080 shl \$4,%rax
2081 sub %rax,$len
2083 $movkey ($key),$rndkey0 # zero round key
2084 mov $key,$key_ # backup $key
2085 mov $rnds_,$rounds # backup $rounds
2086 shl \$4,$rnds_
2087 mov $len,$len_ # backup $len
2088 and \$-16,$len
2090 $movkey 16($key,$rnds_),$rndkey1 # last round key
2092 movdqa .Lxts_magic(%rip),$twmask
2093 movdqa $inout0,@tweak[5]
2094 pshufd \$0x5f,$inout0,$twres
2095 pxor $rndkey0,$rndkey1
2097 for ($i=0;$i<4;$i++) {
2098 $code.=<<___;
2099 movdqa $twres,$twtmp
2100 paddd $twres,$twres
2101 movdqa @tweak[5],@tweak[$i]
2102 psrad \$31,$twtmp # broadcast upper bits
2103 paddq @tweak[5],@tweak[5]
2104 pand $twmask,$twtmp
2105 pxor $rndkey0,@tweak[$i]
2106 pxor $twtmp,@tweak[5]
2109 $code.=<<___;
2110 movdqa @tweak[5],@tweak[4]
2111 psrad \$31,$twres
2112 paddq @tweak[5],@tweak[5]
2113 pand $twmask,$twres
2114 pxor $rndkey0,@tweak[4]
2115 pxor $twres,@tweak[5]
2116 movaps $rndkey1,0x60(%rsp) # save round[0]^round[last]
2118 sub \$16*6,$len
2119 jc .Lxts_dec_short
2121 mov \$16+96,$rounds
2122 lea 32($key_,$rnds_),$key # end of key schedule
2123 sub %r10,%rax # twisted $rounds
2124 $movkey 16($key_),$rndkey1
2125 mov %rax,%r10 # backup twisted $rounds
2126 lea .Lxts_magic(%rip),%r8
2127 jmp .Lxts_dec_grandloop
2129 .align 32
2130 .Lxts_dec_grandloop:
2131 movdqu `16*0`($inp),$inout0 # load input
2132 movdqa $rndkey0,$twmask
2133 movdqu `16*1`($inp),$inout1
2134 pxor @tweak[0],$inout0
2135 movdqu `16*2`($inp),$inout2
2136 pxor @tweak[1],$inout1
2137 aesdec $rndkey1,$inout0
2138 movdqu `16*3`($inp),$inout3
2139 pxor @tweak[2],$inout2
2140 aesdec $rndkey1,$inout1
2141 movdqu `16*4`($inp),$inout4
2142 pxor @tweak[3],$inout3
2143 aesdec $rndkey1,$inout2
2144 movdqu `16*5`($inp),$inout5
2145 pxor @tweak[5],$twmask # round[0]^=tweak[5]
2146 movdqa 0x60(%rsp),$twres # load round[0]^round[last]
2147 pxor @tweak[4],$inout4
2148 aesdec $rndkey1,$inout3
2149 $movkey 32($key_),$rndkey0
2150 lea `16*6`($inp),$inp
2151 pxor $twmask,$inout5
2153 pxor $twres,@tweak[0]
2154 aesdec $rndkey1,$inout4
2155 pxor $twres,@tweak[1]
2156 movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^last round key
2157 aesdec $rndkey1,$inout5
2158 $movkey 48($key_),$rndkey1
2159 pxor $twres,@tweak[2]
2161 aesdec $rndkey0,$inout0
2162 pxor $twres,@tweak[3]
2163 movdqa @tweak[1],`16*1`(%rsp)
2164 aesdec $rndkey0,$inout1
2165 pxor $twres,@tweak[4]
2166 movdqa @tweak[2],`16*2`(%rsp)
2167 aesdec $rndkey0,$inout2
2168 aesdec $rndkey0,$inout3
2169 pxor $twres,$twmask
2170 movdqa @tweak[4],`16*4`(%rsp)
2171 aesdec $rndkey0,$inout4
2172 aesdec $rndkey0,$inout5
2173 $movkey 64($key_),$rndkey0
2174 movdqa $twmask,`16*5`(%rsp)
2175 pshufd \$0x5f,@tweak[5],$twres
2176 jmp .Lxts_dec_loop6
2177 .align 32
2178 .Lxts_dec_loop6:
2179 aesdec $rndkey1,$inout0
2180 aesdec $rndkey1,$inout1
2181 aesdec $rndkey1,$inout2
2182 aesdec $rndkey1,$inout3
2183 aesdec $rndkey1,$inout4
2184 aesdec $rndkey1,$inout5
2185 $movkey -64($key,%rax),$rndkey1
2186 add \$32,%rax
2188 aesdec $rndkey0,$inout0
2189 aesdec $rndkey0,$inout1
2190 aesdec $rndkey0,$inout2
2191 aesdec $rndkey0,$inout3
2192 aesdec $rndkey0,$inout4
2193 aesdec $rndkey0,$inout5
2194 $movkey -80($key,%rax),$rndkey0
2195 jnz .Lxts_dec_loop6
2197 movdqa (%r8),$twmask
2198 movdqa $twres,$twtmp
2199 paddd $twres,$twres
2200 aesdec $rndkey1,$inout0
2201 paddq @tweak[5],@tweak[5]
2202 psrad \$31,$twtmp
2203 aesdec $rndkey1,$inout1
2204 pand $twmask,$twtmp
2205 $movkey ($key_),@tweak[0] # load round[0]
2206 aesdec $rndkey1,$inout2
2207 aesdec $rndkey1,$inout3
2208 aesdec $rndkey1,$inout4
2209 pxor $twtmp,@tweak[5]
2210 movaps @tweak[0],@tweak[1] # copy round[0]
2211 aesdec $rndkey1,$inout5
2212 $movkey -64($key),$rndkey1
2214 movdqa $twres,$twtmp
2215 aesdec $rndkey0,$inout0
2216 paddd $twres,$twres
2217 pxor @tweak[5],@tweak[0]
2218 aesdec $rndkey0,$inout1
2219 psrad \$31,$twtmp
2220 paddq @tweak[5],@tweak[5]
2221 aesdec $rndkey0,$inout2
2222 aesdec $rndkey0,$inout3
2223 pand $twmask,$twtmp
2224 movaps @tweak[1],@tweak[2]
2225 aesdec $rndkey0,$inout4
2226 pxor $twtmp,@tweak[5]
2227 movdqa $twres,$twtmp
2228 aesdec $rndkey0,$inout5
2229 $movkey -48($key),$rndkey0
2231 paddd $twres,$twres
2232 aesdec $rndkey1,$inout0
2233 pxor @tweak[5],@tweak[1]
2234 psrad \$31,$twtmp
2235 aesdec $rndkey1,$inout1
2236 paddq @tweak[5],@tweak[5]
2237 pand $twmask,$twtmp
2238 aesdec $rndkey1,$inout2
2239 aesdec $rndkey1,$inout3
2240 movdqa @tweak[3],`16*3`(%rsp)
2241 pxor $twtmp,@tweak[5]
2242 aesdec $rndkey1,$inout4
2243 movaps @tweak[2],@tweak[3]
2244 movdqa $twres,$twtmp
2245 aesdec $rndkey1,$inout5
2246 $movkey -32($key),$rndkey1
2248 paddd $twres,$twres
2249 aesdec $rndkey0,$inout0
2250 pxor @tweak[5],@tweak[2]
2251 psrad \$31,$twtmp
2252 aesdec $rndkey0,$inout1
2253 paddq @tweak[5],@tweak[5]
2254 pand $twmask,$twtmp
2255 aesdec $rndkey0,$inout2
2256 aesdec $rndkey0,$inout3
2257 aesdec $rndkey0,$inout4
2258 pxor $twtmp,@tweak[5]
2259 movaps @tweak[3],@tweak[4]
2260 aesdec $rndkey0,$inout5
2262 movdqa $twres,$rndkey0
2263 paddd $twres,$twres
2264 aesdec $rndkey1,$inout0
2265 pxor @tweak[5],@tweak[3]
2266 psrad \$31,$rndkey0
2267 aesdec $rndkey1,$inout1
2268 paddq @tweak[5],@tweak[5]
2269 pand $twmask,$rndkey0
2270 aesdec $rndkey1,$inout2
2271 aesdec $rndkey1,$inout3
2272 pxor $rndkey0,@tweak[5]
2273 $movkey ($key_),$rndkey0
2274 aesdec $rndkey1,$inout4
2275 aesdec $rndkey1,$inout5
2276 $movkey 16($key_),$rndkey1
2278 pxor @tweak[5],@tweak[4]
2279 aesdeclast `16*0`(%rsp),$inout0
2280 psrad \$31,$twres
2281 paddq @tweak[5],@tweak[5]
2282 aesdeclast `16*1`(%rsp),$inout1
2283 aesdeclast `16*2`(%rsp),$inout2
2284 pand $twmask,$twres
2285 mov %r10,%rax # restore $rounds
2286 aesdeclast `16*3`(%rsp),$inout3
2287 aesdeclast `16*4`(%rsp),$inout4
2288 aesdeclast `16*5`(%rsp),$inout5
2289 pxor $twres,@tweak[5]
2291 lea `16*6`($out),$out
2292 movups $inout0,`-16*6`($out) # write output
2293 movups $inout1,`-16*5`($out)
2294 movups $inout2,`-16*4`($out)
2295 movups $inout3,`-16*3`($out)
2296 movups $inout4,`-16*2`($out)
2297 movups $inout5,`-16*1`($out)
2298 sub \$16*6,$len
2299 jnc .Lxts_dec_grandloop
2301 mov \$16+96,$rounds
2302 sub $rnds_,$rounds
2303 mov $key_,$key # restore $key
2304 shr \$4,$rounds # restore original value
2306 .Lxts_dec_short:
2307 mov $rounds,$rnds_ # backup $rounds
2308 pxor $rndkey0,@tweak[0]
2309 pxor $rndkey0,@tweak[1]
2310 add \$16*6,$len
2311 jz .Lxts_dec_done
2313 pxor $rndkey0,@tweak[2]
2314 cmp \$0x20,$len
2315 jb .Lxts_dec_one
2316 pxor $rndkey0,@tweak[3]
2317 je .Lxts_dec_two
2319 pxor $rndkey0,@tweak[4]
2320 cmp \$0x40,$len
2321 jb .Lxts_dec_three
2322 je .Lxts_dec_four
2324 movdqu ($inp),$inout0
2325 movdqu 16*1($inp),$inout1
2326 movdqu 16*2($inp),$inout2
2327 pxor @tweak[0],$inout0
2328 movdqu 16*3($inp),$inout3
2329 pxor @tweak[1],$inout1
2330 movdqu 16*4($inp),$inout4
2331 lea 16*5($inp),$inp
2332 pxor @tweak[2],$inout2
2333 pxor @tweak[3],$inout3
2334 pxor @tweak[4],$inout4
2336 call _aesni_decrypt6
2338 xorps @tweak[0],$inout0
2339 xorps @tweak[1],$inout1
2340 xorps @tweak[2],$inout2
2341 movdqu $inout0,($out)
2342 xorps @tweak[3],$inout3
2343 movdqu $inout1,16*1($out)
2344 xorps @tweak[4],$inout4
2345 movdqu $inout2,16*2($out)
2346 pxor $twtmp,$twtmp
2347 movdqu $inout3,16*3($out)
2348 pcmpgtd @tweak[5],$twtmp
2349 movdqu $inout4,16*4($out)
2350 lea 16*5($out),$out
2351 pshufd \$0x13,$twtmp,@tweak[1] # $twres
2352 and \$15,$len_
2353 jz .Lxts_dec_ret
2355 movdqa @tweak[5],@tweak[0]
2356 paddq @tweak[5],@tweak[5] # psllq 1,$tweak
2357 pand $twmask,@tweak[1] # isolate carry and residue
2358 pxor @tweak[5],@tweak[1]
2359 jmp .Lxts_dec_done2
2361 .align 16
2362 .Lxts_dec_one:
2363 movups ($inp),$inout0
2364 lea 16*1($inp),$inp
2365 xorps @tweak[0],$inout0
2367 &aesni_generate1("dec",$key,$rounds);
2368 $code.=<<___;
2369 xorps @tweak[0],$inout0
2370 movdqa @tweak[1],@tweak[0]
2371 movups $inout0,($out)
2372 movdqa @tweak[2],@tweak[1]
2373 lea 16*1($out),$out
2374 jmp .Lxts_dec_done
2376 .align 16
2377 .Lxts_dec_two:
2378 movups ($inp),$inout0
2379 movups 16($inp),$inout1
2380 lea 32($inp),$inp
2381 xorps @tweak[0],$inout0
2382 xorps @tweak[1],$inout1
2384 call _aesni_decrypt2
2386 xorps @tweak[0],$inout0
2387 movdqa @tweak[2],@tweak[0]
2388 xorps @tweak[1],$inout1
2389 movdqa @tweak[3],@tweak[1]
2390 movups $inout0,($out)
2391 movups $inout1,16*1($out)
2392 lea 16*2($out),$out
2393 jmp .Lxts_dec_done
2395 .align 16
2396 .Lxts_dec_three:
2397 movups ($inp),$inout0
2398 movups 16*1($inp),$inout1
2399 movups 16*2($inp),$inout2
2400 lea 16*3($inp),$inp
2401 xorps @tweak[0],$inout0
2402 xorps @tweak[1],$inout1
2403 xorps @tweak[2],$inout2
2405 call _aesni_decrypt3
2407 xorps @tweak[0],$inout0
2408 movdqa @tweak[3],@tweak[0]
2409 xorps @tweak[1],$inout1
2410 movdqa @tweak[4],@tweak[1]
2411 xorps @tweak[2],$inout2
2412 movups $inout0,($out)
2413 movups $inout1,16*1($out)
2414 movups $inout2,16*2($out)
2415 lea 16*3($out),$out
2416 jmp .Lxts_dec_done
2418 .align 16
2419 .Lxts_dec_four:
2420 movups ($inp),$inout0
2421 movups 16*1($inp),$inout1
2422 movups 16*2($inp),$inout2
2423 xorps @tweak[0],$inout0
2424 movups 16*3($inp),$inout3
2425 lea 16*4($inp),$inp
2426 xorps @tweak[1],$inout1
2427 xorps @tweak[2],$inout2
2428 xorps @tweak[3],$inout3
2430 call _aesni_decrypt4
2432 pxor @tweak[0],$inout0
2433 movdqa @tweak[4],@tweak[0]
2434 pxor @tweak[1],$inout1
2435 movdqa @tweak[5],@tweak[1]
2436 pxor @tweak[2],$inout2
2437 movdqu $inout0,($out)
2438 pxor @tweak[3],$inout3
2439 movdqu $inout1,16*1($out)
2440 movdqu $inout2,16*2($out)
2441 movdqu $inout3,16*3($out)
2442 lea 16*4($out),$out
2443 jmp .Lxts_dec_done
2445 .align 16
2446 .Lxts_dec_done:
2447 and \$15,$len_
2448 jz .Lxts_dec_ret
2449 .Lxts_dec_done2:
2450 mov $len_,$len
2451 mov $key_,$key # restore $key
2452 mov $rnds_,$rounds # restore $rounds
2454 movups ($inp),$inout0
2455 xorps @tweak[1],$inout0
2457 &aesni_generate1("dec",$key,$rounds);
2458 $code.=<<___;
2459 xorps @tweak[1],$inout0
2460 movups $inout0,($out)
2462 .Lxts_dec_steal:
2463 movzb 16($inp),%eax # borrow $rounds ...
2464 movzb ($out),%ecx # ... and $key
2465 lea 1($inp),$inp
2466 mov %al,($out)
2467 mov %cl,16($out)
2468 lea 1($out),$out
2469 sub \$1,$len
2470 jnz .Lxts_dec_steal
2472 sub $len_,$out # rewind $out
2473 mov $key_,$key # restore $key
2474 mov $rnds_,$rounds # restore $rounds
2476 movups ($out),$inout0
2477 xorps @tweak[0],$inout0
2479 &aesni_generate1("dec",$key,$rounds);
2480 $code.=<<___;
2481 xorps @tweak[0],$inout0
2482 movups $inout0,($out)
2484 .Lxts_dec_ret:
2486 $code.=<<___ if ($win64);
2487 movaps -0xa0(%rbp),%xmm6
2488 movaps -0x90(%rbp),%xmm7
2489 movaps -0x80(%rbp),%xmm8
2490 movaps -0x70(%rbp),%xmm9
2491 movaps -0x60(%rbp),%xmm10
2492 movaps -0x50(%rbp),%xmm11
2493 movaps -0x40(%rbp),%xmm12
2494 movaps -0x30(%rbp),%xmm13
2495 movaps -0x20(%rbp),%xmm14
2496 movaps -0x10(%rbp),%xmm15
2498 $code.=<<___;
2499 lea (%rbp),%rsp
2500 pop %rbp
2501 .Lxts_dec_epilogue:
2503 .size aesni_xts_decrypt,.-aesni_xts_decrypt
2505 } }}
2507 ########################################################################
2508 # void $PREFIX_cbc_encrypt (const void *inp, void *out,
2509 # size_t length, const AES_KEY *key,
2510 # unsigned char *ivp,const int enc);
2512 my $frame_size = 0x10 + ($win64?0xa0:0); # used in decrypt
2513 my ($iv,$in0,$in1,$in2,$in3,$in4)=map("%xmm$_",(10..15));
2514 my $inp_=$key_;
2516 $code.=<<___;
2517 .globl ${PREFIX}_cbc_encrypt
2518 .type ${PREFIX}_cbc_encrypt,\@function,6
2519 .align 16
2520 ${PREFIX}_cbc_encrypt:
2521 test $len,$len # check length
2522 jz .Lcbc_ret
2524 mov 240($key),$rnds_ # key->rounds
2525 mov $key,$key_ # backup $key
2526 test %r9d,%r9d # 6th argument
2527 jz .Lcbc_decrypt
2528 #--------------------------- CBC ENCRYPT ------------------------------#
2529 movups ($ivp),$inout0 # load iv as initial state
2530 mov $rnds_,$rounds
2531 cmp \$16,$len
2532 jb .Lcbc_enc_tail
2533 sub \$16,$len
2534 jmp .Lcbc_enc_loop
2535 .align 16
2536 .Lcbc_enc_loop:
2537 movups ($inp),$inout1 # load input
2538 lea 16($inp),$inp
2539 #xorps $inout1,$inout0
2541 &aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
2542 $code.=<<___;
2543 mov $rnds_,$rounds # restore $rounds
2544 mov $key_,$key # restore $key
2545 movups $inout0,0($out) # store output
2546 lea 16($out),$out
2547 sub \$16,$len
2548 jnc .Lcbc_enc_loop
2549 add \$16,$len
2550 jnz .Lcbc_enc_tail
2551 movups $inout0,($ivp)
2552 jmp .Lcbc_ret
2554 .Lcbc_enc_tail:
2555 mov $len,%rcx # zaps $key
2556 xchg $inp,$out # $inp is %rsi and $out is %rdi now
2557 .long 0x9066A4F3 # rep movsb
2558 mov \$16,%ecx # zero tail
2559 sub $len,%rcx
2560 xor %eax,%eax
2561 .long 0x9066AAF3 # rep stosb
2562 lea -16(%rdi),%rdi # rewind $out by 1 block
2563 mov $rnds_,$rounds # restore $rounds
2564 mov %rdi,%rsi # $inp and $out are the same
2565 mov $key_,$key # restore $key
2566 xor $len,$len # len=16
2567 jmp .Lcbc_enc_loop # one more spin
2568 \f#--------------------------- CBC DECRYPT ------------------------------#
2569 .align 16
2570 .Lcbc_decrypt:
2571 lea (%rsp),%rax
2572 push %rbp
2573 sub \$$frame_size,%rsp
2574 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
2576 $code.=<<___ if ($win64);
2577 movaps %xmm6,0x10(%rsp)
2578 movaps %xmm7,0x20(%rsp)
2579 movaps %xmm8,0x30(%rsp)
2580 movaps %xmm9,0x40(%rsp)
2581 movaps %xmm10,0x50(%rsp)
2582 movaps %xmm11,0x60(%rsp)
2583 movaps %xmm12,0x70(%rsp)
2584 movaps %xmm13,0x80(%rsp)
2585 movaps %xmm14,0x90(%rsp)
2586 movaps %xmm15,0xa0(%rsp)
2587 .Lcbc_decrypt_body:
2589 $code.=<<___;
2590 lea -8(%rax),%rbp
2591 movups ($ivp),$iv
2592 mov $rnds_,$rounds
2593 cmp \$0x50,$len
2594 jbe .Lcbc_dec_tail
2596 $movkey ($key),$rndkey0
2597 movdqu 0x00($inp),$inout0 # load input
2598 movdqu 0x10($inp),$inout1
2599 movdqa $inout0,$in0
2600 movdqu 0x20($inp),$inout2
2601 movdqa $inout1,$in1
2602 movdqu 0x30($inp),$inout3
2603 movdqa $inout2,$in2
2604 movdqu 0x40($inp),$inout4
2605 movdqa $inout3,$in3
2606 movdqu 0x50($inp),$inout5
2607 movdqa $inout4,$in4
2608 mov OPENSSL_ia32cap_P+4(%rip),%r9d
2609 cmp \$0x70,$len
2610 jbe .Lcbc_dec_six_or_seven
2612 and \$`1<<26|1<<22`,%r9d # isolate XSAVE+MOVBE
2613 sub \$0x50,$len
2614 cmp \$`1<<22`,%r9d # check for MOVBE without XSAVE
2615 je .Lcbc_dec_loop6_enter
2616 sub \$0x20,$len
2617 lea 0x70($key),$key # size optimization
2618 jmp .Lcbc_dec_loop8_enter
2619 .align 16
2620 .Lcbc_dec_loop8:
2621 movups $inout7,($out)
2622 lea 0x10($out),$out
2623 .Lcbc_dec_loop8_enter:
2624 movdqu 0x60($inp),$inout6
2625 pxor $rndkey0,$inout0
2626 movdqu 0x70($inp),$inout7
2627 pxor $rndkey0,$inout1
2628 $movkey 0x10-0x70($key),$rndkey1
2629 pxor $rndkey0,$inout2
2630 xor $inp_,$inp_
2631 cmp \$0x70,$len # is there at least 0x60 bytes ahead?
2632 pxor $rndkey0,$inout3
2633 pxor $rndkey0,$inout4
2634 pxor $rndkey0,$inout5
2635 pxor $rndkey0,$inout6
2637 aesdec $rndkey1,$inout0
2638 pxor $rndkey0,$inout7
2639 $movkey 0x20-0x70($key),$rndkey0
2640 aesdec $rndkey1,$inout1
2641 aesdec $rndkey1,$inout2
2642 aesdec $rndkey1,$inout3
2643 aesdec $rndkey1,$inout4
2644 aesdec $rndkey1,$inout5
2645 aesdec $rndkey1,$inout6
2646 setnc ${inp_}b
2647 shl \$7,$inp_
2648 aesdec $rndkey1,$inout7
2649 add $inp,$inp_
2650 $movkey 0x30-0x70($key),$rndkey1
2652 for($i=1;$i<12;$i++) {
2653 my $rndkeyx = ($i&1)?$rndkey0:$rndkey1;
2654 $code.=<<___ if ($i==7);
2655 cmp \$11,$rounds
2657 $code.=<<___;
2658 aesdec $rndkeyx,$inout0
2659 aesdec $rndkeyx,$inout1
2660 aesdec $rndkeyx,$inout2
2661 aesdec $rndkeyx,$inout3
2662 aesdec $rndkeyx,$inout4
2663 aesdec $rndkeyx,$inout5
2664 aesdec $rndkeyx,$inout6
2665 aesdec $rndkeyx,$inout7
2666 $movkey `0x30+0x10*$i`-0x70($key),$rndkeyx
2668 $code.=<<___ if ($i<6 || (!($i&1) && $i>7));
2671 $code.=<<___ if ($i==7);
2672 jb .Lcbc_dec_done
2674 $code.=<<___ if ($i==9);
2675 je .Lcbc_dec_done
2677 $code.=<<___ if ($i==11);
2678 jmp .Lcbc_dec_done
2681 $code.=<<___;
2682 .align 16
2683 .Lcbc_dec_done:
2684 aesdec $rndkey1,$inout0
2685 aesdec $rndkey1,$inout1
2686 pxor $rndkey0,$iv
2687 pxor $rndkey0,$in0
2688 aesdec $rndkey1,$inout2
2689 aesdec $rndkey1,$inout3
2690 pxor $rndkey0,$in1
2691 pxor $rndkey0,$in2
2692 aesdec $rndkey1,$inout4
2693 aesdec $rndkey1,$inout5
2694 pxor $rndkey0,$in3
2695 pxor $rndkey0,$in4
2696 aesdec $rndkey1,$inout6
2697 aesdec $rndkey1,$inout7
2698 movdqu 0x50($inp),$rndkey1
2700 aesdeclast $iv,$inout0
2701 movdqu 0x60($inp),$iv # borrow $iv
2702 pxor $rndkey0,$rndkey1
2703 aesdeclast $in0,$inout1
2704 pxor $rndkey0,$iv
2705 movdqu 0x70($inp),$rndkey0 # next IV
2706 aesdeclast $in1,$inout2
2707 lea 0x80($inp),$inp
2708 movdqu 0x00($inp_),$in0
2709 aesdeclast $in2,$inout3
2710 aesdeclast $in3,$inout4
2711 movdqu 0x10($inp_),$in1
2712 movdqu 0x20($inp_),$in2
2713 aesdeclast $in4,$inout5
2714 aesdeclast $rndkey1,$inout6
2715 movdqu 0x30($inp_),$in3
2716 movdqu 0x40($inp_),$in4
2717 aesdeclast $iv,$inout7
2718 movdqa $rndkey0,$iv # return $iv
2719 movdqu 0x50($inp_),$rndkey1
2720 $movkey -0x70($key),$rndkey0
2722 movups $inout0,($out) # store output
2723 movdqa $in0,$inout0
2724 movups $inout1,0x10($out)
2725 movdqa $in1,$inout1
2726 movups $inout2,0x20($out)
2727 movdqa $in2,$inout2
2728 movups $inout3,0x30($out)
2729 movdqa $in3,$inout3
2730 movups $inout4,0x40($out)
2731 movdqa $in4,$inout4
2732 movups $inout5,0x50($out)
2733 movdqa $rndkey1,$inout5
2734 movups $inout6,0x60($out)
2735 lea 0x70($out),$out
2737 sub \$0x80,$len
2738 ja .Lcbc_dec_loop8
2740 movaps $inout7,$inout0
2741 lea -0x70($key),$key
2742 add \$0x70,$len
2743 jle .Lcbc_dec_tail_collected
2744 movups $inout7,($out)
2745 lea 0x10($out),$out
2746 cmp \$0x50,$len
2747 jbe .Lcbc_dec_tail
2749 movaps $in0,$inout0
2750 .Lcbc_dec_six_or_seven:
2751 cmp \$0x60,$len
2752 ja .Lcbc_dec_seven
2754 movaps $inout5,$inout6
2755 call _aesni_decrypt6
2756 pxor $iv,$inout0 # ^= IV
2757 movaps $inout6,$iv
2758 pxor $in0,$inout1
2759 movdqu $inout0,($out)
2760 pxor $in1,$inout2
2761 movdqu $inout1,0x10($out)
2762 pxor $in2,$inout3
2763 movdqu $inout2,0x20($out)
2764 pxor $in3,$inout4
2765 movdqu $inout3,0x30($out)
2766 pxor $in4,$inout5
2767 movdqu $inout4,0x40($out)
2768 lea 0x50($out),$out
2769 movdqa $inout5,$inout0
2770 jmp .Lcbc_dec_tail_collected
2772 .align 16
2773 .Lcbc_dec_seven:
2774 movups 0x60($inp),$inout6
2775 xorps $inout7,$inout7
2776 call _aesni_decrypt8
2777 movups 0x50($inp),$inout7
2778 pxor $iv,$inout0 # ^= IV
2779 movups 0x60($inp),$iv
2780 pxor $in0,$inout1
2781 movdqu $inout0,($out)
2782 pxor $in1,$inout2
2783 movdqu $inout1,0x10($out)
2784 pxor $in2,$inout3
2785 movdqu $inout2,0x20($out)
2786 pxor $in3,$inout4
2787 movdqu $inout3,0x30($out)
2788 pxor $in4,$inout5
2789 movdqu $inout4,0x40($out)
2790 pxor $inout7,$inout6
2791 movdqu $inout5,0x50($out)
2792 lea 0x60($out),$out
2793 movdqa $inout6,$inout0
2794 jmp .Lcbc_dec_tail_collected
2796 .align 16
2797 .Lcbc_dec_loop6:
2798 movups $inout5,($out)
2799 lea 0x10($out),$out
2800 movdqu 0x00($inp),$inout0 # load input
2801 movdqu 0x10($inp),$inout1
2802 movdqa $inout0,$in0
2803 movdqu 0x20($inp),$inout2
2804 movdqa $inout1,$in1
2805 movdqu 0x30($inp),$inout3
2806 movdqa $inout2,$in2
2807 movdqu 0x40($inp),$inout4
2808 movdqa $inout3,$in3
2809 movdqu 0x50($inp),$inout5
2810 movdqa $inout4,$in4
2811 .Lcbc_dec_loop6_enter:
2812 lea 0x60($inp),$inp
2813 movdqa $inout5,$inout6
2815 call _aesni_decrypt6
2817 pxor $iv,$inout0 # ^= IV
2818 movdqa $inout6,$iv
2819 pxor $in0,$inout1
2820 movdqu $inout0,($out)
2821 pxor $in1,$inout2
2822 movdqu $inout1,0x10($out)
2823 pxor $in2,$inout3
2824 movdqu $inout2,0x20($out)
2825 pxor $in3,$inout4
2826 mov $key_,$key
2827 movdqu $inout3,0x30($out)
2828 pxor $in4,$inout5
2829 mov $rnds_,$rounds
2830 movdqu $inout4,0x40($out)
2831 lea 0x50($out),$out
2832 sub \$0x60,$len
2833 ja .Lcbc_dec_loop6
2835 movdqa $inout5,$inout0
2836 add \$0x50,$len
2837 jle .Lcbc_dec_tail_collected
2838 movups $inout5,($out)
2839 lea 0x10($out),$out
2841 .Lcbc_dec_tail:
2842 movups ($inp),$inout0
2843 sub \$0x10,$len
2844 jbe .Lcbc_dec_one
2846 movups 0x10($inp),$inout1
2847 movaps $inout0,$in0
2848 sub \$0x10,$len
2849 jbe .Lcbc_dec_two
2851 movups 0x20($inp),$inout2
2852 movaps $inout1,$in1
2853 sub \$0x10,$len
2854 jbe .Lcbc_dec_three
2856 movups 0x30($inp),$inout3
2857 movaps $inout2,$in2
2858 sub \$0x10,$len
2859 jbe .Lcbc_dec_four
2861 movups 0x40($inp),$inout4
2862 movaps $inout3,$in3
2863 movaps $inout4,$in4
2864 xorps $inout5,$inout5
2865 call _aesni_decrypt6
2866 pxor $iv,$inout0
2867 movaps $in4,$iv
2868 pxor $in0,$inout1
2869 movdqu $inout0,($out)
2870 pxor $in1,$inout2
2871 movdqu $inout1,0x10($out)
2872 pxor $in2,$inout3
2873 movdqu $inout2,0x20($out)
2874 pxor $in3,$inout4
2875 movdqu $inout3,0x30($out)
2876 lea 0x40($out),$out
2877 movdqa $inout4,$inout0
2878 sub \$0x10,$len
2879 jmp .Lcbc_dec_tail_collected
2881 .align 16
2882 .Lcbc_dec_one:
2883 movaps $inout0,$in0
2885 &aesni_generate1("dec",$key,$rounds);
2886 $code.=<<___;
2887 xorps $iv,$inout0
2888 movaps $in0,$iv
2889 jmp .Lcbc_dec_tail_collected
2890 .align 16
2891 .Lcbc_dec_two:
2892 movaps $inout1,$in1
2893 call _aesni_decrypt2
2894 pxor $iv,$inout0
2895 movaps $in1,$iv
2896 pxor $in0,$inout1
2897 movdqu $inout0,($out)
2898 movdqa $inout1,$inout0
2899 lea 0x10($out),$out
2900 jmp .Lcbc_dec_tail_collected
2901 .align 16
2902 .Lcbc_dec_three:
2903 movaps $inout2,$in2
2904 call _aesni_decrypt3
2905 pxor $iv,$inout0
2906 movaps $in2,$iv
2907 pxor $in0,$inout1
2908 movdqu $inout0,($out)
2909 pxor $in1,$inout2
2910 movdqu $inout1,0x10($out)
2911 movdqa $inout2,$inout0
2912 lea 0x20($out),$out
2913 jmp .Lcbc_dec_tail_collected
2914 .align 16
2915 .Lcbc_dec_four:
2916 movaps $inout3,$in3
2917 call _aesni_decrypt4
2918 pxor $iv,$inout0
2919 movaps $in3,$iv
2920 pxor $in0,$inout1
2921 movdqu $inout0,($out)
2922 pxor $in1,$inout2
2923 movdqu $inout1,0x10($out)
2924 pxor $in2,$inout3
2925 movdqu $inout2,0x20($out)
2926 movdqa $inout3,$inout0
2927 lea 0x30($out),$out
2928 jmp .Lcbc_dec_tail_collected
2930 .align 16
2931 .Lcbc_dec_tail_collected:
2932 movups $iv,($ivp)
2933 and \$15,$len
2934 jnz .Lcbc_dec_tail_partial
2935 movups $inout0,($out)
2936 jmp .Lcbc_dec_ret
2937 .align 16
2938 .Lcbc_dec_tail_partial:
2939 movaps $inout0,(%rsp)
2940 mov \$16,%rcx
2941 mov $out,%rdi
2942 sub $len,%rcx
2943 lea (%rsp),%rsi
2944 .long 0x9066A4F3 # rep movsb
2946 .Lcbc_dec_ret:
2948 $code.=<<___ if ($win64);
2949 movaps 0x10(%rsp),%xmm6
2950 movaps 0x20(%rsp),%xmm7
2951 movaps 0x30(%rsp),%xmm8
2952 movaps 0x40(%rsp),%xmm9
2953 movaps 0x50(%rsp),%xmm10
2954 movaps 0x60(%rsp),%xmm11
2955 movaps 0x70(%rsp),%xmm12
2956 movaps 0x80(%rsp),%xmm13
2957 movaps 0x90(%rsp),%xmm14
2958 movaps 0xa0(%rsp),%xmm15
2960 $code.=<<___;
2961 lea (%rbp),%rsp
2962 pop %rbp
2963 .Lcbc_ret:
2965 .size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
2967 } \f
2968 # int $PREFIX_set_[en|de]crypt_key (const unsigned char *userKey,
2969 # int bits, AES_KEY *key)
2970 { my ($inp,$bits,$key) = @_4args;
2971 $bits =~ s/%r/%e/;
2973 $code.=<<___;
2974 .globl ${PREFIX}_set_decrypt_key
2975 .type ${PREFIX}_set_decrypt_key,\@abi-omnipotent
2976 .align 16
2977 ${PREFIX}_set_decrypt_key:
2978 .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
2979 call __aesni_set_encrypt_key
2980 shl \$4,$bits # rounds-1 after _aesni_set_encrypt_key
2981 test %eax,%eax
2982 jnz .Ldec_key_ret
2983 lea 16($key,$bits),$inp # points at the end of key schedule
2985 $movkey ($key),%xmm0 # just swap
2986 $movkey ($inp),%xmm1
2987 $movkey %xmm0,($inp)
2988 $movkey %xmm1,($key)
2989 lea 16($key),$key
2990 lea -16($inp),$inp
2992 .Ldec_key_inverse:
2993 $movkey ($key),%xmm0 # swap and inverse
2994 $movkey ($inp),%xmm1
2995 aesimc %xmm0,%xmm0
2996 aesimc %xmm1,%xmm1
2997 lea 16($key),$key
2998 lea -16($inp),$inp
2999 $movkey %xmm0,16($inp)
3000 $movkey %xmm1,-16($key)
3001 cmp $key,$inp
3002 ja .Ldec_key_inverse
3004 $movkey ($key),%xmm0 # inverse middle
3005 aesimc %xmm0,%xmm0
3006 $movkey %xmm0,($inp)
3007 .Ldec_key_ret:
3008 add \$8,%rsp
3010 .LSEH_end_set_decrypt_key:
3011 .size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
3014 # This is based on submission by
3016 # Huang Ying <ying.huang@intel.com>
3017 # Vinodh Gopal <vinodh.gopal@intel.com>
3018 # Kahraman Akdemir
3020 # Agressively optimized in respect to aeskeygenassist's critical path
3021 # and is contained in %xmm0-5 to meet Win64 ABI requirement.
3023 $code.=<<___;
3024 .globl ${PREFIX}_set_encrypt_key
3025 .type ${PREFIX}_set_encrypt_key,\@abi-omnipotent
3026 .align 16
3027 ${PREFIX}_set_encrypt_key:
3028 __aesni_set_encrypt_key:
3029 .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
3030 mov \$-1,%rax
3031 test $inp,$inp
3032 jz .Lenc_key_ret
3033 test $key,$key
3034 jz .Lenc_key_ret
3036 movups ($inp),%xmm0 # pull first 128 bits of *userKey
3037 xorps %xmm4,%xmm4 # low dword of xmm4 is assumed 0
3038 lea 16($key),%rax
3039 cmp \$256,$bits
3040 je .L14rounds
3041 cmp \$192,$bits
3042 je .L12rounds
3043 cmp \$128,$bits
3044 jne .Lbad_keybits
3046 .L10rounds:
3047 mov \$9,$bits # 10 rounds for 128-bit key
3048 $movkey %xmm0,($key) # round 0
3049 aeskeygenassist \$0x1,%xmm0,%xmm1 # round 1
3050 call .Lkey_expansion_128_cold
3051 aeskeygenassist \$0x2,%xmm0,%xmm1 # round 2
3052 call .Lkey_expansion_128
3053 aeskeygenassist \$0x4,%xmm0,%xmm1 # round 3
3054 call .Lkey_expansion_128
3055 aeskeygenassist \$0x8,%xmm0,%xmm1 # round 4
3056 call .Lkey_expansion_128
3057 aeskeygenassist \$0x10,%xmm0,%xmm1 # round 5
3058 call .Lkey_expansion_128
3059 aeskeygenassist \$0x20,%xmm0,%xmm1 # round 6
3060 call .Lkey_expansion_128
3061 aeskeygenassist \$0x40,%xmm0,%xmm1 # round 7
3062 call .Lkey_expansion_128
3063 aeskeygenassist \$0x80,%xmm0,%xmm1 # round 8
3064 call .Lkey_expansion_128
3065 aeskeygenassist \$0x1b,%xmm0,%xmm1 # round 9
3066 call .Lkey_expansion_128
3067 aeskeygenassist \$0x36,%xmm0,%xmm1 # round 10
3068 call .Lkey_expansion_128
3069 $movkey %xmm0,(%rax)
3070 mov $bits,80(%rax) # 240(%rdx)
3071 xor %eax,%eax
3072 jmp .Lenc_key_ret
3074 .align 16
3075 .L12rounds:
3076 movq 16($inp),%xmm2 # remaining 1/3 of *userKey
3077 mov \$11,$bits # 12 rounds for 192
3078 $movkey %xmm0,($key) # round 0
3079 aeskeygenassist \$0x1,%xmm2,%xmm1 # round 1,2
3080 call .Lkey_expansion_192a_cold
3081 aeskeygenassist \$0x2,%xmm2,%xmm1 # round 2,3
3082 call .Lkey_expansion_192b
3083 aeskeygenassist \$0x4,%xmm2,%xmm1 # round 4,5
3084 call .Lkey_expansion_192a
3085 aeskeygenassist \$0x8,%xmm2,%xmm1 # round 5,6
3086 call .Lkey_expansion_192b
3087 aeskeygenassist \$0x10,%xmm2,%xmm1 # round 7,8
3088 call .Lkey_expansion_192a
3089 aeskeygenassist \$0x20,%xmm2,%xmm1 # round 8,9
3090 call .Lkey_expansion_192b
3091 aeskeygenassist \$0x40,%xmm2,%xmm1 # round 10,11
3092 call .Lkey_expansion_192a
3093 aeskeygenassist \$0x80,%xmm2,%xmm1 # round 11,12
3094 call .Lkey_expansion_192b
3095 $movkey %xmm0,(%rax)
3096 mov $bits,48(%rax) # 240(%rdx)
3097 xor %rax, %rax
3098 jmp .Lenc_key_ret
3100 .align 16
3101 .L14rounds:
3102 movups 16($inp),%xmm2 # remaning half of *userKey
3103 mov \$13,$bits # 14 rounds for 256
3104 lea 16(%rax),%rax
3105 $movkey %xmm0,($key) # round 0
3106 $movkey %xmm2,16($key) # round 1
3107 aeskeygenassist \$0x1,%xmm2,%xmm1 # round 2
3108 call .Lkey_expansion_256a_cold
3109 aeskeygenassist \$0x1,%xmm0,%xmm1 # round 3
3110 call .Lkey_expansion_256b
3111 aeskeygenassist \$0x2,%xmm2,%xmm1 # round 4
3112 call .Lkey_expansion_256a
3113 aeskeygenassist \$0x2,%xmm0,%xmm1 # round 5
3114 call .Lkey_expansion_256b
3115 aeskeygenassist \$0x4,%xmm2,%xmm1 # round 6
3116 call .Lkey_expansion_256a
3117 aeskeygenassist \$0x4,%xmm0,%xmm1 # round 7
3118 call .Lkey_expansion_256b
3119 aeskeygenassist \$0x8,%xmm2,%xmm1 # round 8
3120 call .Lkey_expansion_256a
3121 aeskeygenassist \$0x8,%xmm0,%xmm1 # round 9
3122 call .Lkey_expansion_256b
3123 aeskeygenassist \$0x10,%xmm2,%xmm1 # round 10
3124 call .Lkey_expansion_256a
3125 aeskeygenassist \$0x10,%xmm0,%xmm1 # round 11
3126 call .Lkey_expansion_256b
3127 aeskeygenassist \$0x20,%xmm2,%xmm1 # round 12
3128 call .Lkey_expansion_256a
3129 aeskeygenassist \$0x20,%xmm0,%xmm1 # round 13
3130 call .Lkey_expansion_256b
3131 aeskeygenassist \$0x40,%xmm2,%xmm1 # round 14
3132 call .Lkey_expansion_256a
3133 $movkey %xmm0,(%rax)
3134 mov $bits,16(%rax) # 240(%rdx)
3135 xor %rax,%rax
3136 jmp .Lenc_key_ret
3138 .align 16
3139 .Lbad_keybits:
3140 mov \$-2,%rax
3141 .Lenc_key_ret:
3142 add \$8,%rsp
3144 .LSEH_end_set_encrypt_key:
3146 .align 16
3147 .Lkey_expansion_128:
3148 $movkey %xmm0,(%rax)
3149 lea 16(%rax),%rax
3150 .Lkey_expansion_128_cold:
3151 shufps \$0b00010000,%xmm0,%xmm4
3152 xorps %xmm4, %xmm0
3153 shufps \$0b10001100,%xmm0,%xmm4
3154 xorps %xmm4, %xmm0
3155 shufps \$0b11111111,%xmm1,%xmm1 # critical path
3156 xorps %xmm1,%xmm0
3159 .align 16
3160 .Lkey_expansion_192a:
3161 $movkey %xmm0,(%rax)
3162 lea 16(%rax),%rax
3163 .Lkey_expansion_192a_cold:
3164 movaps %xmm2, %xmm5
3165 .Lkey_expansion_192b_warm:
3166 shufps \$0b00010000,%xmm0,%xmm4
3167 movdqa %xmm2,%xmm3
3168 xorps %xmm4,%xmm0
3169 shufps \$0b10001100,%xmm0,%xmm4
3170 pslldq \$4,%xmm3
3171 xorps %xmm4,%xmm0
3172 pshufd \$0b01010101,%xmm1,%xmm1 # critical path
3173 pxor %xmm3,%xmm2
3174 pxor %xmm1,%xmm0
3175 pshufd \$0b11111111,%xmm0,%xmm3
3176 pxor %xmm3,%xmm2
3179 .align 16
3180 .Lkey_expansion_192b:
3181 movaps %xmm0,%xmm3
3182 shufps \$0b01000100,%xmm0,%xmm5
3183 $movkey %xmm5,(%rax)
3184 shufps \$0b01001110,%xmm2,%xmm3
3185 $movkey %xmm3,16(%rax)
3186 lea 32(%rax),%rax
3187 jmp .Lkey_expansion_192b_warm
3189 .align 16
3190 .Lkey_expansion_256a:
3191 $movkey %xmm2,(%rax)
3192 lea 16(%rax),%rax
3193 .Lkey_expansion_256a_cold:
3194 shufps \$0b00010000,%xmm0,%xmm4
3195 xorps %xmm4,%xmm0
3196 shufps \$0b10001100,%xmm0,%xmm4
3197 xorps %xmm4,%xmm0
3198 shufps \$0b11111111,%xmm1,%xmm1 # critical path
3199 xorps %xmm1,%xmm0
3202 .align 16
3203 .Lkey_expansion_256b:
3204 $movkey %xmm0,(%rax)
3205 lea 16(%rax),%rax
3207 shufps \$0b00010000,%xmm2,%xmm4
3208 xorps %xmm4,%xmm2
3209 shufps \$0b10001100,%xmm2,%xmm4
3210 xorps %xmm4,%xmm2
3211 shufps \$0b10101010,%xmm1,%xmm1 # critical path
3212 xorps %xmm1,%xmm2
3214 .size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
3215 .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key
3219 $code.=<<___;
3220 .align 64
3221 .Lbswap_mask:
3222 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
3223 .Lincrement32:
3224 .long 6,6,6,0
3225 .Lincrement64:
3226 .long 1,0,0,0
3227 .Lxts_magic:
3228 .long 0x87,0,1,0
3229 .Lincrement1:
3230 .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
3232 .asciz "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
3233 .align 64
3236 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3237 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3238 if ($win64) {
3239 $rec="%rcx";
3240 $frame="%rdx";
3241 $context="%r8";
3242 $disp="%r9";
3244 $code.=<<___;
3245 .extern __imp_RtlVirtualUnwind
3247 $code.=<<___ if ($PREFIX eq "aesni");
3248 .type ecb_ccm64_se_handler,\@abi-omnipotent
3249 .align 16
3250 ecb_ccm64_se_handler:
3251 push %rsi
3252 push %rdi
3253 push %rbx
3254 push %rbp
3255 push %r12
3256 push %r13
3257 push %r14
3258 push %r15
3259 pushfq
3260 sub \$64,%rsp
3262 mov 120($context),%rax # pull context->Rax
3263 mov 248($context),%rbx # pull context->Rip
3265 mov 8($disp),%rsi # disp->ImageBase
3266 mov 56($disp),%r11 # disp->HandlerData
3268 mov 0(%r11),%r10d # HandlerData[0]
3269 lea (%rsi,%r10),%r10 # prologue label
3270 cmp %r10,%rbx # context->Rip<prologue label
3271 jb .Lcommon_seh_tail
3273 mov 152($context),%rax # pull context->Rsp
3275 mov 4(%r11),%r10d # HandlerData[1]
3276 lea (%rsi,%r10),%r10 # epilogue label
3277 cmp %r10,%rbx # context->Rip>=epilogue label
3278 jae .Lcommon_seh_tail
3280 lea 0(%rax),%rsi # %xmm save area
3281 lea 512($context),%rdi # &context.Xmm6
3282 mov \$8,%ecx # 4*sizeof(%xmm0)/sizeof(%rax)
3283 .long 0xa548f3fc # cld; rep movsq
3284 lea 0x58(%rax),%rax # adjust stack pointer
3286 jmp .Lcommon_seh_tail
3287 .size ecb_ccm64_se_handler,.-ecb_ccm64_se_handler
3289 .type ctr_xts_se_handler,\@abi-omnipotent
3290 .align 16
3291 ctr_xts_se_handler:
3292 push %rsi
3293 push %rdi
3294 push %rbx
3295 push %rbp
3296 push %r12
3297 push %r13
3298 push %r14
3299 push %r15
3300 pushfq
3301 sub \$64,%rsp
3303 mov 120($context),%rax # pull context->Rax
3304 mov 248($context),%rbx # pull context->Rip
3306 mov 8($disp),%rsi # disp->ImageBase
3307 mov 56($disp),%r11 # disp->HandlerData
3309 mov 0(%r11),%r10d # HandlerData[0]
3310 lea (%rsi,%r10),%r10 # prologue lable
3311 cmp %r10,%rbx # context->Rip<prologue label
3312 jb .Lcommon_seh_tail
3314 mov 152($context),%rax # pull context->Rsp
3316 mov 4(%r11),%r10d # HandlerData[1]
3317 lea (%rsi,%r10),%r10 # epilogue label
3318 cmp %r10,%rbx # context->Rip>=epilogue label
3319 jae .Lcommon_seh_tail
3321 mov 160($context),%rax # pull context->Rbp
3322 lea -0xa0(%rax),%rsi # %xmm save area
3323 lea 512($context),%rdi # & context.Xmm6
3324 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
3325 .long 0xa548f3fc # cld; rep movsq
3327 jmp .Lcommon_rbp_tail
3328 .size ctr_xts_se_handler,.-ctr_xts_se_handler
3330 $code.=<<___;
3331 .type cbc_se_handler,\@abi-omnipotent
3332 .align 16
3333 cbc_se_handler:
3334 push %rsi
3335 push %rdi
3336 push %rbx
3337 push %rbp
3338 push %r12
3339 push %r13
3340 push %r14
3341 push %r15
3342 pushfq
3343 sub \$64,%rsp
3345 mov 152($context),%rax # pull context->Rsp
3346 mov 248($context),%rbx # pull context->Rip
3348 lea .Lcbc_decrypt(%rip),%r10
3349 cmp %r10,%rbx # context->Rip<"prologue" label
3350 jb .Lcommon_seh_tail
3352 lea .Lcbc_decrypt_body(%rip),%r10
3353 cmp %r10,%rbx # context->Rip<cbc_decrypt_body
3354 jb .Lrestore_cbc_rax
3356 lea .Lcbc_ret(%rip),%r10
3357 cmp %r10,%rbx # context->Rip>="epilogue" label
3358 jae .Lcommon_seh_tail
3360 lea 16(%rax),%rsi # %xmm save area
3361 lea 512($context),%rdi # &context.Xmm6
3362 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
3363 .long 0xa548f3fc # cld; rep movsq
3365 .Lcommon_rbp_tail:
3366 mov 160($context),%rax # pull context->Rbp
3367 mov (%rax),%rbp # restore saved %rbp
3368 lea 8(%rax),%rax # adjust stack pointer
3369 mov %rbp,160($context) # restore context->Rbp
3370 jmp .Lcommon_seh_tail
3372 .Lrestore_cbc_rax:
3373 mov 120($context),%rax
3375 .Lcommon_seh_tail:
3376 mov 8(%rax),%rdi
3377 mov 16(%rax),%rsi
3378 mov %rax,152($context) # restore context->Rsp
3379 mov %rsi,168($context) # restore context->Rsi
3380 mov %rdi,176($context) # restore context->Rdi
3382 mov 40($disp),%rdi # disp->ContextRecord
3383 mov $context,%rsi # context
3384 mov \$154,%ecx # sizeof(CONTEXT)
3385 .long 0xa548f3fc # cld; rep movsq
3387 mov $disp,%rsi
3388 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3389 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3390 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3391 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3392 mov 40(%rsi),%r10 # disp->ContextRecord
3393 lea 56(%rsi),%r11 # &disp->HandlerData
3394 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3395 mov %r10,32(%rsp) # arg5
3396 mov %r11,40(%rsp) # arg6
3397 mov %r12,48(%rsp) # arg7
3398 mov %rcx,56(%rsp) # arg8, (NULL)
3399 call *__imp_RtlVirtualUnwind(%rip)
3401 mov \$1,%eax # ExceptionContinueSearch
3402 add \$64,%rsp
3403 popfq
3404 pop %r15
3405 pop %r14
3406 pop %r13
3407 pop %r12
3408 pop %rbp
3409 pop %rbx
3410 pop %rdi
3411 pop %rsi
3413 .size cbc_se_handler,.-cbc_se_handler
3415 .section .pdata
3416 .align 4
3418 $code.=<<___ if ($PREFIX eq "aesni");
3419 .rva .LSEH_begin_aesni_ecb_encrypt
3420 .rva .LSEH_end_aesni_ecb_encrypt
3421 .rva .LSEH_info_ecb
3423 .rva .LSEH_begin_aesni_ccm64_encrypt_blocks
3424 .rva .LSEH_end_aesni_ccm64_encrypt_blocks
3425 .rva .LSEH_info_ccm64_enc
3427 .rva .LSEH_begin_aesni_ccm64_decrypt_blocks
3428 .rva .LSEH_end_aesni_ccm64_decrypt_blocks
3429 .rva .LSEH_info_ccm64_dec
3431 .rva .LSEH_begin_aesni_ctr32_encrypt_blocks
3432 .rva .LSEH_end_aesni_ctr32_encrypt_blocks
3433 .rva .LSEH_info_ctr32
3435 .rva .LSEH_begin_aesni_xts_encrypt
3436 .rva .LSEH_end_aesni_xts_encrypt
3437 .rva .LSEH_info_xts_enc
3439 .rva .LSEH_begin_aesni_xts_decrypt
3440 .rva .LSEH_end_aesni_xts_decrypt
3441 .rva .LSEH_info_xts_dec
3443 $code.=<<___;
3444 .rva .LSEH_begin_${PREFIX}_cbc_encrypt
3445 .rva .LSEH_end_${PREFIX}_cbc_encrypt
3446 .rva .LSEH_info_cbc
3448 .rva ${PREFIX}_set_decrypt_key
3449 .rva .LSEH_end_set_decrypt_key
3450 .rva .LSEH_info_key
3452 .rva ${PREFIX}_set_encrypt_key
3453 .rva .LSEH_end_set_encrypt_key
3454 .rva .LSEH_info_key
3455 .section .xdata
3456 .align 8
3458 $code.=<<___ if ($PREFIX eq "aesni");
3459 .LSEH_info_ecb:
3460 .byte 9,0,0,0
3461 .rva ecb_ccm64_se_handler
3462 .rva .Lecb_enc_body,.Lecb_enc_ret # HandlerData[]
3463 .LSEH_info_ccm64_enc:
3464 .byte 9,0,0,0
3465 .rva ecb_ccm64_se_handler
3466 .rva .Lccm64_enc_body,.Lccm64_enc_ret # HandlerData[]
3467 .LSEH_info_ccm64_dec:
3468 .byte 9,0,0,0
3469 .rva ecb_ccm64_se_handler
3470 .rva .Lccm64_dec_body,.Lccm64_dec_ret # HandlerData[]
3471 .LSEH_info_ctr32:
3472 .byte 9,0,0,0
3473 .rva ctr_xts_se_handler
3474 .rva .Lctr32_body,.Lctr32_epilogue # HandlerData[]
3475 .LSEH_info_xts_enc:
3476 .byte 9,0,0,0
3477 .rva ctr_xts_se_handler
3478 .rva .Lxts_enc_body,.Lxts_enc_epilogue # HandlerData[]
3479 .LSEH_info_xts_dec:
3480 .byte 9,0,0,0
3481 .rva ctr_xts_se_handler
3482 .rva .Lxts_dec_body,.Lxts_dec_epilogue # HandlerData[]
3484 $code.=<<___;
3485 .LSEH_info_cbc:
3486 .byte 9,0,0,0
3487 .rva cbc_se_handler
3488 .LSEH_info_key:
3489 .byte 0x01,0x04,0x01,0x00
3490 .byte 0x04,0x02,0x00,0x00 # sub rsp,8
3494 sub rex {
3495 local *opcode=shift;
3496 my ($dst,$src)=@_;
3497 my $rex=0;
3499 $rex|=0x04 if($dst>=8);
3500 $rex|=0x01 if($src>=8);
3501 push @opcode,$rex|0x40 if($rex);
3504 sub aesni {
3505 my $line=shift;
3506 my @opcode=(0x66);
3508 if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
3509 rex(\@opcode,$4,$3);
3510 push @opcode,0x0f,0x3a,0xdf;
3511 push @opcode,0xc0|($3&7)|(($4&7)<<3); # ModR/M
3512 my $c=$2;
3513 push @opcode,$c=~/^0/?oct($c):$c;
3514 return ".byte\t".join(',',@opcode);
3516 elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
3517 my %opcodelet = (
3518 "aesimc" => 0xdb,
3519 "aesenc" => 0xdc, "aesenclast" => 0xdd,
3520 "aesdec" => 0xde, "aesdeclast" => 0xdf
3522 return undef if (!defined($opcodelet{$1}));
3523 rex(\@opcode,$3,$2);
3524 push @opcode,0x0f,0x38,$opcodelet{$1};
3525 push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
3526 return ".byte\t".join(',',@opcode);
3528 elsif ($line=~/(aes[a-z]+)\s+([0x1-9a-fA-F]*)\(%rsp\),\s*%xmm([0-9]+)/) {
3529 my %opcodelet = (
3530 "aesenc" => 0xdc, "aesenclast" => 0xdd,
3531 "aesdec" => 0xde, "aesdeclast" => 0xdf
3533 return undef if (!defined($opcodelet{$1}));
3534 my $off = $2;
3535 push @opcode,0x44 if ($3>=8);
3536 push @opcode,0x0f,0x38,$opcodelet{$1};
3537 push @opcode,0x44|(($3&7)<<3),0x24; # ModR/M
3538 push @opcode,($off=~/^0/?oct($off):$off)&0xff;
3539 return ".byte\t".join(',',@opcode);
3541 return $line;
3544 sub movbe {
3545 ".byte 0x0f,0x38,0xf1,0x44,0x24,".shift;
3548 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
3549 $code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
3550 #$code =~ s/\bmovbe\s+%eax/bswap %eax; mov %eax/gm; # debugging artefact
3551 $code =~ s/\bmovbe\s+%eax,\s*([0-9]+)\(%rsp\)/movbe($1)/gem;
3553 print $code;
3555 close STDOUT;