OpenSSL: update to 1.0.2a
[tomato.git] / release / src / router / openssl / crypto / sha / asm / sha1-mb-x86_64.pl
bloba8ee075eaaa0a037a599aca20362172d8fdbd644
1 #!/usr/bin/env perl
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # Multi-buffer SHA1 procedure processes n buffers in parallel by
11 # placing buffer data to designated lane of SIMD register. n is
12 # naturally limited to 4 on pre-AVX2 processors and to 8 on
13 # AVX2-capable processors such as Haswell.
15 # this +aesni(i) sha1 aesni-sha1 gain(iv)
16 # -------------------------------------------------------------------
17 # Westmere(ii) 10.7/n +1.28=3.96(n=4) 5.30 6.66 +68%
18 # Atom(ii) 18.1/n +3.93=8.46(n=4) 9.37 12.8 +51%
19 # Sandy Bridge (8.16 +5.15=13.3)/n 4.99 5.98 +80%
20 # Ivy Bridge (8.08 +5.14=13.2)/n 4.60 5.54 +68%
21 # Haswell(iii) (8.96 +5.00=14.0)/n 3.57 4.55 +160%
22 # Bulldozer (9.76 +5.76=15.5)/n 5.95 6.37 +64%
24 # (i) multi-block CBC encrypt with 128-bit key;
25 # (ii) (HASH+AES)/n does not apply to Westmere for n>3 and Atom,
26 # because of lower AES-NI instruction throughput;
27 # (iii) "this" is for n=8, when we gather twice as much data, result
28 # for n=4 is 8.00+4.44=12.4;
29 # (iv) presented improvement coefficients are asymptotic limits and
30 # in real-life application are somewhat lower, e.g. for 2KB
31 # fragments they range from 30% to 100% (on Haswell);
33 $flavour = shift;
34 $output = shift;
35 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
37 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
39 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
40 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
41 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
42 die "can't locate x86_64-xlate.pl";
44 $avx=0;
46 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
47 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
48 $avx = ($1>=2.19) + ($1>=2.22);
51 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
52 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
53 $avx = ($1>=2.09) + ($1>=2.10);
56 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
57 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
58 $avx = ($1>=10) + ($1>=11);
61 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
62 $avx = ($2>=3.0) + ($2>3.0);
65 open OUT,"| \"$^X\" $xlate $flavour $output";
66 *STDOUT=*OUT;
68 # void sha1_multi_block (
69 # struct { unsigned int A[8];
70 # unsigned int B[8];
71 # unsigned int C[8];
72 # unsigned int D[8];
73 # unsigned int E[8]; } *ctx,
74 # struct { void *ptr; int blocks; } inp[8],
75 # int num); /* 1 or 2 */
77 $ctx="%rdi"; # 1st arg
78 $inp="%rsi"; # 2nd arg
79 $num="%edx";
80 @ptr=map("%r$_",(8..11));
81 $Tbl="%rbp";
83 @V=($A,$B,$C,$D,$E)=map("%xmm$_",(0..4));
84 ($t0,$t1,$t2,$t3,$tx)=map("%xmm$_",(5..9));
85 @Xi=map("%xmm$_",(10..14));
86 $K="%xmm15";
88 if (1) {
89 # Atom-specific optimization aiming to eliminate pshufb with high
90 # registers [and thus get rid of 48 cycles accumulated penalty]
91 @Xi=map("%xmm$_",(0..4));
92 ($tx,$t0,$t1,$t2,$t3)=map("%xmm$_",(5..9));
93 @V=($A,$B,$C,$D,$E)=map("%xmm$_",(10..14));
96 $REG_SZ=16;
98 sub Xi_off {
99 my $off = shift;
101 $off %= 16; $off *= $REG_SZ;
102 $off<256 ? "$off-128(%rax)" : "$off-256-128(%rbx)";
105 sub BODY_00_19 {
106 my ($i,$a,$b,$c,$d,$e)=@_;
107 my $j=$i+1;
108 my $k=$i+2;
110 # Loads are performed 2+3/4 iterations in advance. 3/4 means that out
111 # of 4 words you would expect to be loaded per given iteration one is
112 # spilled to next iteration. In other words indices in four input
113 # streams are distributed as following:
115 # $i==0: 0,0,0,0,1,1,1,1,2,2,2,
116 # $i==1: 2,3,3,3,
117 # $i==2: 3,4,4,4,
118 # ...
119 # $i==13: 14,15,15,15,
120 # $i==14: 15
122 # Then at $i==15 Xupdate is applied one iteration in advance...
123 $code.=<<___ if ($i==0);
124 movd (@ptr[0]),@Xi[0]
125 lea `16*4`(@ptr[0]),@ptr[0]
126 movd (@ptr[1]),@Xi[2] # borrow @Xi[2]
127 lea `16*4`(@ptr[1]),@ptr[1]
128 movd (@ptr[2]),@Xi[3] # borrow @Xi[3]
129 lea `16*4`(@ptr[2]),@ptr[2]
130 movd (@ptr[3]),@Xi[4] # borrow @Xi[4]
131 lea `16*4`(@ptr[3]),@ptr[3]
132 punpckldq @Xi[3],@Xi[0]
133 movd `4*$j-16*4`(@ptr[0]),@Xi[1]
134 punpckldq @Xi[4],@Xi[2]
135 movd `4*$j-16*4`(@ptr[1]),$t3
136 punpckldq @Xi[2],@Xi[0]
137 movd `4*$j-16*4`(@ptr[2]),$t2
138 pshufb $tx,@Xi[0]
140 $code.=<<___ if ($i<14); # just load input
141 movd `4*$j-16*4`(@ptr[3]),$t1
142 punpckldq $t2,@Xi[1]
143 movdqa $a,$t2
144 paddd $K,$e # e+=K_00_19
145 punpckldq $t1,$t3
146 movdqa $b,$t1
147 movdqa $b,$t0
148 pslld \$5,$t2
149 pandn $d,$t1
150 pand $c,$t0
151 punpckldq $t3,@Xi[1]
152 movdqa $a,$t3
154 movdqa @Xi[0],`&Xi_off($i)`
155 paddd @Xi[0],$e # e+=X[i]
156 movd `4*$k-16*4`(@ptr[0]),@Xi[2]
157 psrld \$27,$t3
158 pxor $t1,$t0 # Ch(b,c,d)
159 movdqa $b,$t1
161 por $t3,$t2 # rol(a,5)
162 movd `4*$k-16*4`(@ptr[1]),$t3
163 pslld \$30,$t1
164 paddd $t0,$e # e+=Ch(b,c,d)
166 psrld \$2,$b
167 paddd $t2,$e # e+=rol(a,5)
168 pshufb $tx,@Xi[1]
169 movd `4*$k-16*4`(@ptr[2]),$t2
170 por $t1,$b # b=rol(b,30)
172 $code.=<<___ if ($i==14); # just load input
173 movd `4*$j-16*4`(@ptr[3]),$t1
174 punpckldq $t2,@Xi[1]
175 movdqa $a,$t2
176 paddd $K,$e # e+=K_00_19
177 punpckldq $t1,$t3
178 movdqa $b,$t1
179 movdqa $b,$t0
180 pslld \$5,$t2
181 prefetcht0 63(@ptr[0])
182 pandn $d,$t1
183 pand $c,$t0
184 punpckldq $t3,@Xi[1]
185 movdqa $a,$t3
187 movdqa @Xi[0],`&Xi_off($i)`
188 paddd @Xi[0],$e # e+=X[i]
189 psrld \$27,$t3
190 pxor $t1,$t0 # Ch(b,c,d)
191 movdqa $b,$t1
192 prefetcht0 63(@ptr[1])
194 por $t3,$t2 # rol(a,5)
195 pslld \$30,$t1
196 paddd $t0,$e # e+=Ch(b,c,d)
197 prefetcht0 63(@ptr[2])
199 psrld \$2,$b
200 paddd $t2,$e # e+=rol(a,5)
201 pshufb $tx,@Xi[1]
202 prefetcht0 63(@ptr[3])
203 por $t1,$b # b=rol(b,30)
205 $code.=<<___ if ($i>=13 && $i<15);
206 movdqa `&Xi_off($j+2)`,@Xi[3] # preload "X[2]"
208 $code.=<<___ if ($i>=15); # apply Xupdate
209 pxor @Xi[-2],@Xi[1] # "X[13]"
210 movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
212 movdqa $a,$t2
213 pxor `&Xi_off($j+8)`,@Xi[1]
214 paddd $K,$e # e+=K_00_19
215 movdqa $b,$t1
216 pslld \$5,$t2
217 pxor @Xi[3],@Xi[1]
218 movdqa $b,$t0
219 pandn $d,$t1
220 movdqa @Xi[1],$tx
221 pand $c,$t0
222 movdqa $a,$t3
223 psrld \$31,$tx
224 paddd @Xi[1],@Xi[1]
226 movdqa @Xi[0],`&Xi_off($i)`
227 paddd @Xi[0],$e # e+=X[i]
228 psrld \$27,$t3
229 pxor $t1,$t0 # Ch(b,c,d)
231 movdqa $b,$t1
232 por $t3,$t2 # rol(a,5)
233 pslld \$30,$t1
234 paddd $t0,$e # e+=Ch(b,c,d)
236 psrld \$2,$b
237 paddd $t2,$e # e+=rol(a,5)
238 por $tx,@Xi[1] # rol \$1,@Xi[1]
239 por $t1,$b # b=rol(b,30)
241 push(@Xi,shift(@Xi));
244 sub BODY_20_39 {
245 my ($i,$a,$b,$c,$d,$e)=@_;
246 my $j=$i+1;
248 $code.=<<___ if ($i<79);
249 pxor @Xi[-2],@Xi[1] # "X[13]"
250 movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
252 movdqa $a,$t2
253 movdqa $d,$t0
254 pxor `&Xi_off($j+8)`,@Xi[1]
255 paddd $K,$e # e+=K_20_39
256 pslld \$5,$t2
257 pxor $b,$t0
259 movdqa $a,$t3
261 $code.=<<___ if ($i<72);
262 movdqa @Xi[0],`&Xi_off($i)`
264 $code.=<<___ if ($i<79);
265 paddd @Xi[0],$e # e+=X[i]
266 pxor @Xi[3],@Xi[1]
267 psrld \$27,$t3
268 pxor $c,$t0 # Parity(b,c,d)
269 movdqa $b,$t1
271 pslld \$30,$t1
272 movdqa @Xi[1],$tx
273 por $t3,$t2 # rol(a,5)
274 psrld \$31,$tx
275 paddd $t0,$e # e+=Parity(b,c,d)
276 paddd @Xi[1],@Xi[1]
278 psrld \$2,$b
279 paddd $t2,$e # e+=rol(a,5)
280 por $tx,@Xi[1] # rol(@Xi[1],1)
281 por $t1,$b # b=rol(b,30)
283 $code.=<<___ if ($i==79);
284 movdqa $a,$t2
285 paddd $K,$e # e+=K_20_39
286 movdqa $d,$t0
287 pslld \$5,$t2
288 pxor $b,$t0
290 movdqa $a,$t3
291 paddd @Xi[0],$e # e+=X[i]
292 psrld \$27,$t3
293 movdqa $b,$t1
294 pxor $c,$t0 # Parity(b,c,d)
296 pslld \$30,$t1
297 por $t3,$t2 # rol(a,5)
298 paddd $t0,$e # e+=Parity(b,c,d)
300 psrld \$2,$b
301 paddd $t2,$e # e+=rol(a,5)
302 por $t1,$b # b=rol(b,30)
304 push(@Xi,shift(@Xi));
307 sub BODY_40_59 {
308 my ($i,$a,$b,$c,$d,$e)=@_;
309 my $j=$i+1;
311 $code.=<<___;
312 pxor @Xi[-2],@Xi[1] # "X[13]"
313 movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
315 movdqa $a,$t2
316 movdqa $d,$t1
317 pxor `&Xi_off($j+8)`,@Xi[1]
318 pxor @Xi[3],@Xi[1]
319 paddd $K,$e # e+=K_40_59
320 pslld \$5,$t2
321 movdqa $a,$t3
322 pand $c,$t1
324 movdqa $d,$t0
325 movdqa @Xi[1],$tx
326 psrld \$27,$t3
327 paddd $t1,$e
328 pxor $c,$t0
330 movdqa @Xi[0],`&Xi_off($i)`
331 paddd @Xi[0],$e # e+=X[i]
332 por $t3,$t2 # rol(a,5)
333 psrld \$31,$tx
334 pand $b,$t0
335 movdqa $b,$t1
337 pslld \$30,$t1
338 paddd @Xi[1],@Xi[1]
339 paddd $t0,$e # e+=Maj(b,d,c)
341 psrld \$2,$b
342 paddd $t2,$e # e+=rol(a,5)
343 por $tx,@Xi[1] # rol(@X[1],1)
344 por $t1,$b # b=rol(b,30)
346 push(@Xi,shift(@Xi));
349 $code.=<<___;
350 .text
352 .extern OPENSSL_ia32cap_P
354 .globl sha1_multi_block
355 .type sha1_multi_block,\@function,3
356 .align 32
357 sha1_multi_block:
358 mov OPENSSL_ia32cap_P+4(%rip),%rcx
359 bt \$61,%rcx # check SHA bit
360 jc _shaext_shortcut
362 $code.=<<___ if ($avx);
363 test \$`1<<28`,%ecx
364 jnz _avx_shortcut
366 $code.=<<___;
367 mov %rsp,%rax
368 push %rbx
369 push %rbp
371 $code.=<<___ if ($win64);
372 lea -0xa8(%rsp),%rsp
373 movaps %xmm6,(%rsp)
374 movaps %xmm7,0x10(%rsp)
375 movaps %xmm8,0x20(%rsp)
376 movaps %xmm9,0x30(%rsp)
377 movaps %xmm10,-0x78(%rax)
378 movaps %xmm11,-0x68(%rax)
379 movaps %xmm12,-0x58(%rax)
380 movaps %xmm13,-0x48(%rax)
381 movaps %xmm14,-0x38(%rax)
382 movaps %xmm15,-0x28(%rax)
384 $code.=<<___;
385 sub \$`$REG_SZ*18`,%rsp
386 and \$-256,%rsp
387 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
388 .Lbody:
389 lea K_XX_XX(%rip),$Tbl
390 lea `$REG_SZ*16`(%rsp),%rbx
392 .Loop_grande:
393 mov $num,`$REG_SZ*17+8`(%rsp) # original $num
394 xor $num,$num
396 for($i=0;$i<4;$i++) {
397 $code.=<<___;
398 mov `16*$i+0`($inp),@ptr[$i] # input pointer
399 mov `16*$i+8`($inp),%ecx # number of blocks
400 cmp $num,%ecx
401 cmovg %ecx,$num # find maximum
402 test %ecx,%ecx
403 mov %ecx,`4*$i`(%rbx) # initialize counters
404 cmovle $Tbl,@ptr[$i] # cancel input
407 $code.=<<___;
408 test $num,$num
409 jz .Ldone
411 movdqu 0x00($ctx),$A # load context
412 lea 128(%rsp),%rax
413 movdqu 0x20($ctx),$B
414 movdqu 0x40($ctx),$C
415 movdqu 0x60($ctx),$D
416 movdqu 0x80($ctx),$E
417 movdqa 0x60($Tbl),$tx # pbswap_mask
418 movdqa -0x20($Tbl),$K # K_00_19
419 jmp .Loop
421 .align 32
422 .Loop:
424 for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
425 $code.=" movdqa 0x00($Tbl),$K\n"; # K_20_39
426 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
427 $code.=" movdqa 0x20($Tbl),$K\n"; # K_40_59
428 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
429 $code.=" movdqa 0x40($Tbl),$K\n"; # K_60_79
430 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
431 $code.=<<___;
432 movdqa (%rbx),@Xi[0] # pull counters
433 mov \$1,%ecx
434 cmp 4*0(%rbx),%ecx # examinte counters
435 pxor $t2,$t2
436 cmovge $Tbl,@ptr[0] # cancel input
437 cmp 4*1(%rbx),%ecx
438 movdqa @Xi[0],@Xi[1]
439 cmovge $Tbl,@ptr[1]
440 cmp 4*2(%rbx),%ecx
441 pcmpgtd $t2,@Xi[1] # mask value
442 cmovge $Tbl,@ptr[2]
443 cmp 4*3(%rbx),%ecx
444 paddd @Xi[1],@Xi[0] # counters--
445 cmovge $Tbl,@ptr[3]
447 movdqu 0x00($ctx),$t0
448 pand @Xi[1],$A
449 movdqu 0x20($ctx),$t1
450 pand @Xi[1],$B
451 paddd $t0,$A
452 movdqu 0x40($ctx),$t2
453 pand @Xi[1],$C
454 paddd $t1,$B
455 movdqu 0x60($ctx),$t3
456 pand @Xi[1],$D
457 paddd $t2,$C
458 movdqu 0x80($ctx),$tx
459 pand @Xi[1],$E
460 movdqu $A,0x00($ctx)
461 paddd $t3,$D
462 movdqu $B,0x20($ctx)
463 paddd $tx,$E
464 movdqu $C,0x40($ctx)
465 movdqu $D,0x60($ctx)
466 movdqu $E,0x80($ctx)
468 movdqa @Xi[0],(%rbx) # save counters
469 movdqa 0x60($Tbl),$tx # pbswap_mask
470 movdqa -0x20($Tbl),$K # K_00_19
471 dec $num
472 jnz .Loop
474 mov `$REG_SZ*17+8`(%rsp),$num
475 lea $REG_SZ($ctx),$ctx
476 lea `16*$REG_SZ/4`($inp),$inp
477 dec $num
478 jnz .Loop_grande
480 .Ldone:
481 mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
483 $code.=<<___ if ($win64);
484 movaps -0xb8(%rax),%xmm6
485 movaps -0xa8(%rax),%xmm7
486 movaps -0x98(%rax),%xmm8
487 movaps -0x88(%rax),%xmm9
488 movaps -0x78(%rax),%xmm10
489 movaps -0x68(%rax),%xmm11
490 movaps -0x58(%rax),%xmm12
491 movaps -0x48(%rax),%xmm13
492 movaps -0x38(%rax),%xmm14
493 movaps -0x28(%rax),%xmm15
495 $code.=<<___;
496 mov -16(%rax),%rbp
497 mov -8(%rax),%rbx
498 lea (%rax),%rsp
499 .Lepilogue:
501 .size sha1_multi_block,.-sha1_multi_block
504 my ($ABCD0,$E0,$E0_,$BSWAP,$ABCD1,$E1,$E1_)=map("%xmm$_",(0..3,8..10));
505 my @MSG0=map("%xmm$_",(4..7));
506 my @MSG1=map("%xmm$_",(11..14));
508 $code.=<<___;
509 .type sha1_multi_block_shaext,\@function,3
510 .align 32
511 sha1_multi_block_shaext:
512 _shaext_shortcut:
513 mov %rsp,%rax
514 push %rbx
515 push %rbp
517 $code.=<<___ if ($win64);
518 lea -0xa8(%rsp),%rsp
519 movaps %xmm6,(%rsp)
520 movaps %xmm7,0x10(%rsp)
521 movaps %xmm8,0x20(%rsp)
522 movaps %xmm9,0x30(%rsp)
523 movaps %xmm10,-0x78(%rax)
524 movaps %xmm11,-0x68(%rax)
525 movaps %xmm12,-0x58(%rax)
526 movaps %xmm13,-0x48(%rax)
527 movaps %xmm14,-0x38(%rax)
528 movaps %xmm15,-0x28(%rax)
530 $code.=<<___;
531 sub \$`$REG_SZ*18`,%rsp
532 shl \$1,$num # we process pair at a time
533 and \$-256,%rsp
534 lea 0x40($ctx),$ctx # size optimization
535 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
536 .Lbody_shaext:
537 lea `$REG_SZ*16`(%rsp),%rbx
538 movdqa K_XX_XX+0x80(%rip),$BSWAP # byte-n-word swap
540 .Loop_grande_shaext:
541 mov $num,`$REG_SZ*17+8`(%rsp) # orignal $num
542 xor $num,$num
544 for($i=0;$i<2;$i++) {
545 $code.=<<___;
546 mov `16*$i+0`($inp),@ptr[$i] # input pointer
547 mov `16*$i+8`($inp),%ecx # number of blocks
548 cmp $num,%ecx
549 cmovg %ecx,$num # find maximum
550 test %ecx,%ecx
551 mov %ecx,`4*$i`(%rbx) # initialize counters
552 cmovle %rsp,@ptr[$i] # cancel input
555 $code.=<<___;
556 test $num,$num
557 jz .Ldone_shaext
559 movq 0x00-0x40($ctx),$ABCD0 # a1.a0
560 movq 0x20-0x40($ctx),@MSG0[0]# b1.b0
561 movq 0x40-0x40($ctx),@MSG0[1]# c1.c0
562 movq 0x60-0x40($ctx),@MSG0[2]# d1.d0
563 movq 0x80-0x40($ctx),@MSG0[3]# e1.e0
565 punpckldq @MSG0[0],$ABCD0 # b1.a1.b0.a0
566 punpckldq @MSG0[2],@MSG0[1] # d1.c1.d0.c0
568 movdqa $ABCD0,$ABCD1
569 punpcklqdq @MSG0[1],$ABCD0 # d0.c0.b0.a0
570 punpckhqdq @MSG0[1],$ABCD1 # d1.c1.b1.a1
572 pshufd \$0b00111111,@MSG0[3],$E0
573 pshufd \$0b01111111,@MSG0[3],$E1
574 pshufd \$0b00011011,$ABCD0,$ABCD0
575 pshufd \$0b00011011,$ABCD1,$ABCD1
576 jmp .Loop_shaext
578 .align 32
579 .Loop_shaext:
580 movdqu 0x00(@ptr[0]),@MSG0[0]
581 movdqu 0x00(@ptr[1]),@MSG1[0]
582 movdqu 0x10(@ptr[0]),@MSG0[1]
583 movdqu 0x10(@ptr[1]),@MSG1[1]
584 movdqu 0x20(@ptr[0]),@MSG0[2]
585 pshufb $BSWAP,@MSG0[0]
586 movdqu 0x20(@ptr[1]),@MSG1[2]
587 pshufb $BSWAP,@MSG1[0]
588 movdqu 0x30(@ptr[0]),@MSG0[3]
589 lea 0x40(@ptr[0]),@ptr[0]
590 pshufb $BSWAP,@MSG0[1]
591 movdqu 0x30(@ptr[1]),@MSG1[3]
592 lea 0x40(@ptr[1]),@ptr[1]
593 pshufb $BSWAP,@MSG1[1]
595 movdqa $E0,0x50(%rsp) # offload
596 paddd @MSG0[0],$E0
597 movdqa $E1,0x70(%rsp)
598 paddd @MSG1[0],$E1
599 movdqa $ABCD0,0x40(%rsp) # offload
600 movdqa $ABCD0,$E0_
601 movdqa $ABCD1,0x60(%rsp)
602 movdqa $ABCD1,$E1_
603 sha1rnds4 \$0,$E0,$ABCD0 # 0-3
604 sha1nexte @MSG0[1],$E0_
605 sha1rnds4 \$0,$E1,$ABCD1 # 0-3
606 sha1nexte @MSG1[1],$E1_
607 pshufb $BSWAP,@MSG0[2]
608 prefetcht0 127(@ptr[0])
609 sha1msg1 @MSG0[1],@MSG0[0]
610 pshufb $BSWAP,@MSG1[2]
611 prefetcht0 127(@ptr[1])
612 sha1msg1 @MSG1[1],@MSG1[0]
614 pshufb $BSWAP,@MSG0[3]
615 movdqa $ABCD0,$E0
616 pshufb $BSWAP,@MSG1[3]
617 movdqa $ABCD1,$E1
618 sha1rnds4 \$0,$E0_,$ABCD0 # 4-7
619 sha1nexte @MSG0[2],$E0
620 sha1rnds4 \$0,$E1_,$ABCD1 # 4-7
621 sha1nexte @MSG1[2],$E1
622 pxor @MSG0[2],@MSG0[0]
623 sha1msg1 @MSG0[2],@MSG0[1]
624 pxor @MSG1[2],@MSG1[0]
625 sha1msg1 @MSG1[2],@MSG1[1]
627 for($i=2;$i<20-4;$i++) {
628 $code.=<<___;
629 movdqa $ABCD0,$E0_
630 movdqa $ABCD1,$E1_
631 sha1rnds4 \$`int($i/5)`,$E0,$ABCD0 # 8-11
632 sha1nexte @MSG0[3],$E0_
633 sha1rnds4 \$`int($i/5)`,$E1,$ABCD1 # 8-11
634 sha1nexte @MSG1[3],$E1_
635 sha1msg2 @MSG0[3],@MSG0[0]
636 sha1msg2 @MSG1[3],@MSG1[0]
637 pxor @MSG0[3],@MSG0[1]
638 sha1msg1 @MSG0[3],@MSG0[2]
639 pxor @MSG1[3],@MSG1[1]
640 sha1msg1 @MSG1[3],@MSG1[2]
642 ($E0,$E0_)=($E0_,$E0); ($E1,$E1_)=($E1_,$E1);
643 push(@MSG0,shift(@MSG0)); push(@MSG1,shift(@MSG1));
645 $code.=<<___;
646 movdqa $ABCD0,$E0_
647 movdqa $ABCD1,$E1_
648 sha1rnds4 \$3,$E0,$ABCD0 # 64-67
649 sha1nexte @MSG0[3],$E0_
650 sha1rnds4 \$3,$E1,$ABCD1 # 64-67
651 sha1nexte @MSG1[3],$E1_
652 sha1msg2 @MSG0[3],@MSG0[0]
653 sha1msg2 @MSG1[3],@MSG1[0]
654 pxor @MSG0[3],@MSG0[1]
655 pxor @MSG1[3],@MSG1[1]
657 mov \$1,%ecx
658 pxor @MSG0[2],@MSG0[2] # zero
659 cmp 4*0(%rbx),%ecx # examine counters
660 cmovge %rsp,@ptr[0] # cancel input
662 movdqa $ABCD0,$E0
663 movdqa $ABCD1,$E1
664 sha1rnds4 \$3,$E0_,$ABCD0 # 68-71
665 sha1nexte @MSG0[0],$E0
666 sha1rnds4 \$3,$E1_,$ABCD1 # 68-71
667 sha1nexte @MSG1[0],$E1
668 sha1msg2 @MSG0[0],@MSG0[1]
669 sha1msg2 @MSG1[0],@MSG1[1]
671 cmp 4*1(%rbx),%ecx
672 cmovge %rsp,@ptr[1]
673 movq (%rbx),@MSG0[0] # pull counters
675 movdqa $ABCD0,$E0_
676 movdqa $ABCD1,$E1_
677 sha1rnds4 \$3,$E0,$ABCD0 # 72-75
678 sha1nexte @MSG0[1],$E0_
679 sha1rnds4 \$3,$E1,$ABCD1 # 72-75
680 sha1nexte @MSG1[1],$E1_
682 pshufd \$0x00,@MSG0[0],@MSG1[2]
683 pshufd \$0x55,@MSG0[0],@MSG1[3]
684 movdqa @MSG0[0],@MSG0[1]
685 pcmpgtd @MSG0[2],@MSG1[2]
686 pcmpgtd @MSG0[2],@MSG1[3]
688 movdqa $ABCD0,$E0
689 movdqa $ABCD1,$E1
690 sha1rnds4 \$3,$E0_,$ABCD0 # 76-79
691 sha1nexte $MSG0[2],$E0
692 sha1rnds4 \$3,$E1_,$ABCD1 # 76-79
693 sha1nexte $MSG0[2],$E1
695 pcmpgtd @MSG0[2],@MSG0[1] # counter mask
696 pand @MSG1[2],$ABCD0
697 pand @MSG1[2],$E0
698 pand @MSG1[3],$ABCD1
699 pand @MSG1[3],$E1
700 paddd @MSG0[1],@MSG0[0] # counters--
702 paddd 0x40(%rsp),$ABCD0
703 paddd 0x50(%rsp),$E0
704 paddd 0x60(%rsp),$ABCD1
705 paddd 0x70(%rsp),$E1
707 movq @MSG0[0],(%rbx) # save counters
708 dec $num
709 jnz .Loop_shaext
711 mov `$REG_SZ*17+8`(%rsp),$num
713 pshufd \$0b00011011,$ABCD0,$ABCD0
714 pshufd \$0b00011011,$ABCD1,$ABCD1
716 movdqa $ABCD0,@MSG0[0]
717 punpckldq $ABCD1,$ABCD0 # b1.b0.a1.a0
718 punpckhdq $ABCD1,@MSG0[0] # d1.d0.c1.c0
719 punpckhdq $E1,$E0 # e1.e0.xx.xx
720 movq $ABCD0,0x00-0x40($ctx) # a1.a0
721 psrldq \$8,$ABCD0
722 movq @MSG0[0],0x40-0x40($ctx)# c1.c0
723 psrldq \$8,@MSG0[0]
724 movq $ABCD0,0x20-0x40($ctx) # b1.b0
725 psrldq \$8,$E0
726 movq @MSG0[0],0x60-0x40($ctx)# d1.d0
727 movq $E0,0x80-0x40($ctx) # e1.e0
729 lea `$REG_SZ/2`($ctx),$ctx
730 lea `16*2`($inp),$inp
731 dec $num
732 jnz .Loop_grande_shaext
734 .Ldone_shaext:
735 #mov `$REG_SZ*17`(%rsp),%rax # original %rsp
737 $code.=<<___ if ($win64);
738 movaps -0xb8(%rax),%xmm6
739 movaps -0xa8(%rax),%xmm7
740 movaps -0x98(%rax),%xmm8
741 movaps -0x88(%rax),%xmm9
742 movaps -0x78(%rax),%xmm10
743 movaps -0x68(%rax),%xmm11
744 movaps -0x58(%rax),%xmm12
745 movaps -0x48(%rax),%xmm13
746 movaps -0x38(%rax),%xmm14
747 movaps -0x28(%rax),%xmm15
749 $code.=<<___;
750 mov -16(%rax),%rbp
751 mov -8(%rax),%rbx
752 lea (%rax),%rsp
753 .Lepilogue_shaext:
755 .size sha1_multi_block_shaext,.-sha1_multi_block_shaext
759 if ($avx) {{{
760 sub BODY_00_19_avx {
761 my ($i,$a,$b,$c,$d,$e)=@_;
762 my $j=$i+1;
763 my $k=$i+2;
764 my $vpack = $REG_SZ==16 ? "vpunpckldq" : "vinserti128";
765 my $ptr_n = $REG_SZ==16 ? @ptr[1] : @ptr[4];
767 $code.=<<___ if ($i==0 && $REG_SZ==16);
768 vmovd (@ptr[0]),@Xi[0]
769 lea `16*4`(@ptr[0]),@ptr[0]
770 vmovd (@ptr[1]),@Xi[2] # borrow Xi[2]
771 lea `16*4`(@ptr[1]),@ptr[1]
772 vpinsrd \$1,(@ptr[2]),@Xi[0],@Xi[0]
773 lea `16*4`(@ptr[2]),@ptr[2]
774 vpinsrd \$1,(@ptr[3]),@Xi[2],@Xi[2]
775 lea `16*4`(@ptr[3]),@ptr[3]
776 vmovd `4*$j-16*4`(@ptr[0]),@Xi[1]
777 vpunpckldq @Xi[2],@Xi[0],@Xi[0]
778 vmovd `4*$j-16*4`($ptr_n),$t3
779 vpshufb $tx,@Xi[0],@Xi[0]
781 $code.=<<___ if ($i<15 && $REG_SZ==16); # just load input
782 vpinsrd \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
783 vpinsrd \$1,`4*$j-16*4`(@ptr[3]),$t3,$t3
785 $code.=<<___ if ($i==0 && $REG_SZ==32);
786 vmovd (@ptr[0]),@Xi[0]
787 lea `16*4`(@ptr[0]),@ptr[0]
788 vmovd (@ptr[4]),@Xi[2] # borrow Xi[2]
789 lea `16*4`(@ptr[4]),@ptr[4]
790 vmovd (@ptr[1]),$t2
791 lea `16*4`(@ptr[1]),@ptr[1]
792 vmovd (@ptr[5]),$t1
793 lea `16*4`(@ptr[5]),@ptr[5]
794 vpinsrd \$1,(@ptr[2]),@Xi[0],@Xi[0]
795 lea `16*4`(@ptr[2]),@ptr[2]
796 vpinsrd \$1,(@ptr[6]),@Xi[2],@Xi[2]
797 lea `16*4`(@ptr[6]),@ptr[6]
798 vpinsrd \$1,(@ptr[3]),$t2,$t2
799 lea `16*4`(@ptr[3]),@ptr[3]
800 vpunpckldq $t2,@Xi[0],@Xi[0]
801 vpinsrd \$1,(@ptr[7]),$t1,$t1
802 lea `16*4`(@ptr[7]),@ptr[7]
803 vpunpckldq $t1,@Xi[2],@Xi[2]
804 vmovd `4*$j-16*4`(@ptr[0]),@Xi[1]
805 vinserti128 @Xi[2],@Xi[0],@Xi[0]
806 vmovd `4*$j-16*4`($ptr_n),$t3
807 vpshufb $tx,@Xi[0],@Xi[0]
809 $code.=<<___ if ($i<15 && $REG_SZ==32); # just load input
810 vmovd `4*$j-16*4`(@ptr[1]),$t2
811 vmovd `4*$j-16*4`(@ptr[5]),$t1
812 vpinsrd \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
813 vpinsrd \$1,`4*$j-16*4`(@ptr[6]),$t3,$t3
814 vpinsrd \$1,`4*$j-16*4`(@ptr[3]),$t2,$t2
815 vpunpckldq $t2,@Xi[1],@Xi[1]
816 vpinsrd \$1,`4*$j-16*4`(@ptr[7]),$t1,$t1
817 vpunpckldq $t1,$t3,$t3
819 $code.=<<___ if ($i<14);
820 vpaddd $K,$e,$e # e+=K_00_19
821 vpslld \$5,$a,$t2
822 vpandn $d,$b,$t1
823 vpand $c,$b,$t0
825 vmovdqa @Xi[0],`&Xi_off($i)`
826 vpaddd @Xi[0],$e,$e # e+=X[i]
827 $vpack $t3,@Xi[1],@Xi[1]
828 vpsrld \$27,$a,$t3
829 vpxor $t1,$t0,$t0 # Ch(b,c,d)
830 vmovd `4*$k-16*4`(@ptr[0]),@Xi[2]
832 vpslld \$30,$b,$t1
833 vpor $t3,$t2,$t2 # rol(a,5)
834 vmovd `4*$k-16*4`($ptr_n),$t3
835 vpaddd $t0,$e,$e # e+=Ch(b,c,d)
837 vpsrld \$2,$b,$b
838 vpaddd $t2,$e,$e # e+=rol(a,5)
839 vpshufb $tx,@Xi[1],@Xi[1]
840 vpor $t1,$b,$b # b=rol(b,30)
842 $code.=<<___ if ($i==14);
843 vpaddd $K,$e,$e # e+=K_00_19
844 prefetcht0 63(@ptr[0])
845 vpslld \$5,$a,$t2
846 vpandn $d,$b,$t1
847 vpand $c,$b,$t0
849 vmovdqa @Xi[0],`&Xi_off($i)`
850 vpaddd @Xi[0],$e,$e # e+=X[i]
851 $vpack $t3,@Xi[1],@Xi[1]
852 vpsrld \$27,$a,$t3
853 prefetcht0 63(@ptr[1])
854 vpxor $t1,$t0,$t0 # Ch(b,c,d)
856 vpslld \$30,$b,$t1
857 vpor $t3,$t2,$t2 # rol(a,5)
858 prefetcht0 63(@ptr[2])
859 vpaddd $t0,$e,$e # e+=Ch(b,c,d)
861 vpsrld \$2,$b,$b
862 vpaddd $t2,$e,$e # e+=rol(a,5)
863 prefetcht0 63(@ptr[3])
864 vpshufb $tx,@Xi[1],@Xi[1]
865 vpor $t1,$b,$b # b=rol(b,30)
867 $code.=<<___ if ($i>=13 && $i<15);
868 vmovdqa `&Xi_off($j+2)`,@Xi[3] # preload "X[2]"
870 $code.=<<___ if ($i>=15); # apply Xupdate
871 vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
872 vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
874 vpaddd $K,$e,$e # e+=K_00_19
875 vpslld \$5,$a,$t2
876 vpandn $d,$b,$t1
877 `"prefetcht0 63(@ptr[4])" if ($i==15 && $REG_SZ==32)`
878 vpand $c,$b,$t0
880 vmovdqa @Xi[0],`&Xi_off($i)`
881 vpaddd @Xi[0],$e,$e # e+=X[i]
882 vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
883 vpsrld \$27,$a,$t3
884 vpxor $t1,$t0,$t0 # Ch(b,c,d)
885 vpxor @Xi[3],@Xi[1],@Xi[1]
886 `"prefetcht0 63(@ptr[5])" if ($i==15 && $REG_SZ==32)`
888 vpslld \$30,$b,$t1
889 vpor $t3,$t2,$t2 # rol(a,5)
890 vpaddd $t0,$e,$e # e+=Ch(b,c,d)
891 `"prefetcht0 63(@ptr[6])" if ($i==15 && $REG_SZ==32)`
892 vpsrld \$31,@Xi[1],$tx
893 vpaddd @Xi[1],@Xi[1],@Xi[1]
895 vpsrld \$2,$b,$b
896 `"prefetcht0 63(@ptr[7])" if ($i==15 && $REG_SZ==32)`
897 vpaddd $t2,$e,$e # e+=rol(a,5)
898 vpor $tx,@Xi[1],@Xi[1] # rol \$1,@Xi[1]
899 vpor $t1,$b,$b # b=rol(b,30)
901 push(@Xi,shift(@Xi));
904 sub BODY_20_39_avx {
905 my ($i,$a,$b,$c,$d,$e)=@_;
906 my $j=$i+1;
908 $code.=<<___ if ($i<79);
909 vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
910 vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
912 vpslld \$5,$a,$t2
913 vpaddd $K,$e,$e # e+=K_20_39
914 vpxor $b,$d,$t0
916 $code.=<<___ if ($i<72);
917 vmovdqa @Xi[0],`&Xi_off($i)`
919 $code.=<<___ if ($i<79);
920 vpaddd @Xi[0],$e,$e # e+=X[i]
921 vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
922 vpsrld \$27,$a,$t3
923 vpxor $c,$t0,$t0 # Parity(b,c,d)
924 vpxor @Xi[3],@Xi[1],@Xi[1]
926 vpslld \$30,$b,$t1
927 vpor $t3,$t2,$t2 # rol(a,5)
928 vpaddd $t0,$e,$e # e+=Parity(b,c,d)
929 vpsrld \$31,@Xi[1],$tx
930 vpaddd @Xi[1],@Xi[1],@Xi[1]
932 vpsrld \$2,$b,$b
933 vpaddd $t2,$e,$e # e+=rol(a,5)
934 vpor $tx,@Xi[1],@Xi[1] # rol(@Xi[1],1)
935 vpor $t1,$b,$b # b=rol(b,30)
937 $code.=<<___ if ($i==79);
938 vpslld \$5,$a,$t2
939 vpaddd $K,$e,$e # e+=K_20_39
940 vpxor $b,$d,$t0
942 vpsrld \$27,$a,$t3
943 vpaddd @Xi[0],$e,$e # e+=X[i]
944 vpxor $c,$t0,$t0 # Parity(b,c,d)
946 vpslld \$30,$b,$t1
947 vpor $t3,$t2,$t2 # rol(a,5)
948 vpaddd $t0,$e,$e # e+=Parity(b,c,d)
950 vpsrld \$2,$b,$b
951 vpaddd $t2,$e,$e # e+=rol(a,5)
952 vpor $t1,$b,$b # b=rol(b,30)
954 push(@Xi,shift(@Xi));
957 sub BODY_40_59_avx {
958 my ($i,$a,$b,$c,$d,$e)=@_;
959 my $j=$i+1;
961 $code.=<<___;
962 vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
963 vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
965 vpaddd $K,$e,$e # e+=K_40_59
966 vpslld \$5,$a,$t2
967 vpand $c,$d,$t1
968 vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
970 vpaddd $t1,$e,$e
971 vpsrld \$27,$a,$t3
972 vpxor $c,$d,$t0
973 vpxor @Xi[3],@Xi[1],@Xi[1]
975 vmovdqu @Xi[0],`&Xi_off($i)`
976 vpaddd @Xi[0],$e,$e # e+=X[i]
977 vpor $t3,$t2,$t2 # rol(a,5)
978 vpsrld \$31,@Xi[1],$tx
979 vpand $b,$t0,$t0
980 vpaddd @Xi[1],@Xi[1],@Xi[1]
982 vpslld \$30,$b,$t1
983 vpaddd $t0,$e,$e # e+=Maj(b,d,c)
985 vpsrld \$2,$b,$b
986 vpaddd $t2,$e,$e # e+=rol(a,5)
987 vpor $tx,@Xi[1],@Xi[1] # rol(@X[1],1)
988 vpor $t1,$b,$b # b=rol(b,30)
990 push(@Xi,shift(@Xi));
993 $code.=<<___;
994 .type sha1_multi_block_avx,\@function,3
995 .align 32
996 sha1_multi_block_avx:
997 _avx_shortcut:
999 $code.=<<___ if ($avx>1);
1000 shr \$32,%rcx
1001 cmp \$2,$num
1002 jb .Lavx
1003 test \$`1<<5`,%ecx
1004 jnz _avx2_shortcut
1005 jmp .Lavx
1006 .align 32
1007 .Lavx:
1009 $code.=<<___;
1010 mov %rsp,%rax
1011 push %rbx
1012 push %rbp
1014 $code.=<<___ if ($win64);
1015 lea -0xa8(%rsp),%rsp
1016 movaps %xmm6,(%rsp)
1017 movaps %xmm7,0x10(%rsp)
1018 movaps %xmm8,0x20(%rsp)
1019 movaps %xmm9,0x30(%rsp)
1020 movaps %xmm10,-0x78(%rax)
1021 movaps %xmm11,-0x68(%rax)
1022 movaps %xmm12,-0x58(%rax)
1023 movaps %xmm13,-0x48(%rax)
1024 movaps %xmm14,-0x38(%rax)
1025 movaps %xmm15,-0x28(%rax)
1027 $code.=<<___;
1028 sub \$`$REG_SZ*18`, %rsp
1029 and \$-256,%rsp
1030 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
1031 .Lbody_avx:
1032 lea K_XX_XX(%rip),$Tbl
1033 lea `$REG_SZ*16`(%rsp),%rbx
1035 vzeroupper
1036 .Loop_grande_avx:
1037 mov $num,`$REG_SZ*17+8`(%rsp) # original $num
1038 xor $num,$num
1040 for($i=0;$i<4;$i++) {
1041 $code.=<<___;
1042 mov `16*$i+0`($inp),@ptr[$i] # input pointer
1043 mov `16*$i+8`($inp),%ecx # number of blocks
1044 cmp $num,%ecx
1045 cmovg %ecx,$num # find maximum
1046 test %ecx,%ecx
1047 mov %ecx,`4*$i`(%rbx) # initialize counters
1048 cmovle $Tbl,@ptr[$i] # cancel input
1051 $code.=<<___;
1052 test $num,$num
1053 jz .Ldone_avx
1055 vmovdqu 0x00($ctx),$A # load context
1056 lea 128(%rsp),%rax
1057 vmovdqu 0x20($ctx),$B
1058 vmovdqu 0x40($ctx),$C
1059 vmovdqu 0x60($ctx),$D
1060 vmovdqu 0x80($ctx),$E
1061 vmovdqu 0x60($Tbl),$tx # pbswap_mask
1062 jmp .Loop_avx
1064 .align 32
1065 .Loop_avx:
1067 $code.=" vmovdqa -0x20($Tbl),$K\n"; # K_00_19
1068 for($i=0;$i<20;$i++) { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
1069 $code.=" vmovdqa 0x00($Tbl),$K\n"; # K_20_39
1070 for(;$i<40;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
1071 $code.=" vmovdqa 0x20($Tbl),$K\n"; # K_40_59
1072 for(;$i<60;$i++) { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
1073 $code.=" vmovdqa 0x40($Tbl),$K\n"; # K_60_79
1074 for(;$i<80;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
1075 $code.=<<___;
1076 mov \$1,%ecx
1078 for($i=0;$i<4;$i++) {
1079 $code.=<<___;
1080 cmp `4*$i`(%rbx),%ecx # examine counters
1081 cmovge $Tbl,@ptr[$i] # cancel input
1084 $code.=<<___;
1085 vmovdqu (%rbx),$t0 # pull counters
1086 vpxor $t2,$t2,$t2
1087 vmovdqa $t0,$t1
1088 vpcmpgtd $t2,$t1,$t1 # mask value
1089 vpaddd $t1,$t0,$t0 # counters--
1091 vpand $t1,$A,$A
1092 vpand $t1,$B,$B
1093 vpaddd 0x00($ctx),$A,$A
1094 vpand $t1,$C,$C
1095 vpaddd 0x20($ctx),$B,$B
1096 vpand $t1,$D,$D
1097 vpaddd 0x40($ctx),$C,$C
1098 vpand $t1,$E,$E
1099 vpaddd 0x60($ctx),$D,$D
1100 vpaddd 0x80($ctx),$E,$E
1101 vmovdqu $A,0x00($ctx)
1102 vmovdqu $B,0x20($ctx)
1103 vmovdqu $C,0x40($ctx)
1104 vmovdqu $D,0x60($ctx)
1105 vmovdqu $E,0x80($ctx)
1107 vmovdqu $t0,(%rbx) # save counters
1108 vmovdqu 0x60($Tbl),$tx # pbswap_mask
1109 dec $num
1110 jnz .Loop_avx
1112 mov `$REG_SZ*17+8`(%rsp),$num
1113 lea $REG_SZ($ctx),$ctx
1114 lea `16*$REG_SZ/4`($inp),$inp
1115 dec $num
1116 jnz .Loop_grande_avx
1118 .Ldone_avx:
1119 mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
1120 vzeroupper
1122 $code.=<<___ if ($win64);
1123 movaps -0xb8(%rax),%xmm6
1124 movaps -0xa8(%rax),%xmm7
1125 movaps -0x98(%rax),%xmm8
1126 movaps -0x88(%rax),%xmm9
1127 movaps -0x78(%rax),%xmm10
1128 movaps -0x68(%rax),%xmm11
1129 movaps -0x58(%rax),%xmm12
1130 movaps -0x48(%rax),%xmm13
1131 movaps -0x38(%rax),%xmm14
1132 movaps -0x28(%rax),%xmm15
1134 $code.=<<___;
1135 mov -16(%rax),%rbp
1136 mov -8(%rax),%rbx
1137 lea (%rax),%rsp
1138 .Lepilogue_avx:
1140 .size sha1_multi_block_avx,.-sha1_multi_block_avx
1143 if ($avx>1) {
1144 $code =~ s/\`([^\`]*)\`/eval $1/gem;
1146 $REG_SZ=32;
1148 @ptr=map("%r$_",(12..15,8..11));
1150 @V=($A,$B,$C,$D,$E)=map("%ymm$_",(0..4));
1151 ($t0,$t1,$t2,$t3,$tx)=map("%ymm$_",(5..9));
1152 @Xi=map("%ymm$_",(10..14));
1153 $K="%ymm15";
1155 $code.=<<___;
1156 .type sha1_multi_block_avx2,\@function,3
1157 .align 32
1158 sha1_multi_block_avx2:
1159 _avx2_shortcut:
1160 mov %rsp,%rax
1161 push %rbx
1162 push %rbp
1163 push %r12
1164 push %r13
1165 push %r14
1166 push %r15
1168 $code.=<<___ if ($win64);
1169 lea -0xa8(%rsp),%rsp
1170 movaps %xmm6,(%rsp)
1171 movaps %xmm7,0x10(%rsp)
1172 movaps %xmm8,0x20(%rsp)
1173 movaps %xmm9,0x30(%rsp)
1174 movaps %xmm10,0x40(%rsp)
1175 movaps %xmm11,0x50(%rsp)
1176 movaps %xmm12,-0x78(%rax)
1177 movaps %xmm13,-0x68(%rax)
1178 movaps %xmm14,-0x58(%rax)
1179 movaps %xmm15,-0x48(%rax)
1181 $code.=<<___;
1182 sub \$`$REG_SZ*18`, %rsp
1183 and \$-256,%rsp
1184 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
1185 .Lbody_avx2:
1186 lea K_XX_XX(%rip),$Tbl
1187 shr \$1,$num
1189 vzeroupper
1190 .Loop_grande_avx2:
1191 mov $num,`$REG_SZ*17+8`(%rsp) # original $num
1192 xor $num,$num
1193 lea `$REG_SZ*16`(%rsp),%rbx
1195 for($i=0;$i<8;$i++) {
1196 $code.=<<___;
1197 mov `16*$i+0`($inp),@ptr[$i] # input pointer
1198 mov `16*$i+8`($inp),%ecx # number of blocks
1199 cmp $num,%ecx
1200 cmovg %ecx,$num # find maximum
1201 test %ecx,%ecx
1202 mov %ecx,`4*$i`(%rbx) # initialize counters
1203 cmovle $Tbl,@ptr[$i] # cancel input
1206 $code.=<<___;
1207 vmovdqu 0x00($ctx),$A # load context
1208 lea 128(%rsp),%rax
1209 vmovdqu 0x20($ctx),$B
1210 lea 256+128(%rsp),%rbx
1211 vmovdqu 0x40($ctx),$C
1212 vmovdqu 0x60($ctx),$D
1213 vmovdqu 0x80($ctx),$E
1214 vmovdqu 0x60($Tbl),$tx # pbswap_mask
1215 jmp .Loop_avx2
1217 .align 32
1218 .Loop_avx2:
1220 $code.=" vmovdqa -0x20($Tbl),$K\n"; # K_00_19
1221 for($i=0;$i<20;$i++) { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
1222 $code.=" vmovdqa 0x00($Tbl),$K\n"; # K_20_39
1223 for(;$i<40;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
1224 $code.=" vmovdqa 0x20($Tbl),$K\n"; # K_40_59
1225 for(;$i<60;$i++) { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
1226 $code.=" vmovdqa 0x40($Tbl),$K\n"; # K_60_79
1227 for(;$i<80;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
1228 $code.=<<___;
1229 mov \$1,%ecx
1230 lea `$REG_SZ*16`(%rsp),%rbx
1232 for($i=0;$i<8;$i++) {
1233 $code.=<<___;
1234 cmp `4*$i`(%rbx),%ecx # examine counters
1235 cmovge $Tbl,@ptr[$i] # cancel input
1238 $code.=<<___;
1239 vmovdqu (%rbx),$t0 # pull counters
1240 vpxor $t2,$t2,$t2
1241 vmovdqa $t0,$t1
1242 vpcmpgtd $t2,$t1,$t1 # mask value
1243 vpaddd $t1,$t0,$t0 # counters--
1245 vpand $t1,$A,$A
1246 vpand $t1,$B,$B
1247 vpaddd 0x00($ctx),$A,$A
1248 vpand $t1,$C,$C
1249 vpaddd 0x20($ctx),$B,$B
1250 vpand $t1,$D,$D
1251 vpaddd 0x40($ctx),$C,$C
1252 vpand $t1,$E,$E
1253 vpaddd 0x60($ctx),$D,$D
1254 vpaddd 0x80($ctx),$E,$E
1255 vmovdqu $A,0x00($ctx)
1256 vmovdqu $B,0x20($ctx)
1257 vmovdqu $C,0x40($ctx)
1258 vmovdqu $D,0x60($ctx)
1259 vmovdqu $E,0x80($ctx)
1261 vmovdqu $t0,(%rbx) # save counters
1262 lea 256+128(%rsp),%rbx
1263 vmovdqu 0x60($Tbl),$tx # pbswap_mask
1264 dec $num
1265 jnz .Loop_avx2
1267 #mov `$REG_SZ*17+8`(%rsp),$num
1268 #lea $REG_SZ($ctx),$ctx
1269 #lea `16*$REG_SZ/4`($inp),$inp
1270 #dec $num
1271 #jnz .Loop_grande_avx2
1273 .Ldone_avx2:
1274 mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
1275 vzeroupper
1277 $code.=<<___ if ($win64);
1278 movaps -0xd8(%rax),%xmm6
1279 movaps -0xc8(%rax),%xmm7
1280 movaps -0xb8(%rax),%xmm8
1281 movaps -0xa8(%rax),%xmm9
1282 movaps -0x98(%rax),%xmm10
1283 movaps -0x88(%rax),%xmm11
1284 movaps -0x78(%rax),%xmm12
1285 movaps -0x68(%rax),%xmm13
1286 movaps -0x58(%rax),%xmm14
1287 movaps -0x48(%rax),%xmm15
1289 $code.=<<___;
1290 mov -48(%rax),%r15
1291 mov -40(%rax),%r14
1292 mov -32(%rax),%r13
1293 mov -24(%rax),%r12
1294 mov -16(%rax),%rbp
1295 mov -8(%rax),%rbx
1296 lea (%rax),%rsp
1297 .Lepilogue_avx2:
1299 .size sha1_multi_block_avx2,.-sha1_multi_block_avx2
1301 } }}}
1302 $code.=<<___;
1304 .align 256
1305 .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
1306 .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
1307 K_XX_XX:
1308 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
1309 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
1310 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
1311 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
1312 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
1313 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
1314 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
1315 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
1316 .byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
1317 .asciz "SHA1 multi-block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1320 if ($win64) {
1321 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1322 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1323 $rec="%rcx";
1324 $frame="%rdx";
1325 $context="%r8";
1326 $disp="%r9";
1328 $code.=<<___;
1329 .extern __imp_RtlVirtualUnwind
1330 .type se_handler,\@abi-omnipotent
1331 .align 16
1332 se_handler:
1333 push %rsi
1334 push %rdi
1335 push %rbx
1336 push %rbp
1337 push %r12
1338 push %r13
1339 push %r14
1340 push %r15
1341 pushfq
1342 sub \$64,%rsp
1344 mov 120($context),%rax # pull context->Rax
1345 mov 248($context),%rbx # pull context->Rip
1347 mov 8($disp),%rsi # disp->ImageBase
1348 mov 56($disp),%r11 # disp->HandlerData
1350 mov 0(%r11),%r10d # HandlerData[0]
1351 lea (%rsi,%r10),%r10 # end of prologue label
1352 cmp %r10,%rbx # context->Rip<.Lbody
1353 jb .Lin_prologue
1355 mov 152($context),%rax # pull context->Rsp
1357 mov 4(%r11),%r10d # HandlerData[1]
1358 lea (%rsi,%r10),%r10 # epilogue label
1359 cmp %r10,%rbx # context->Rip>=.Lepilogue
1360 jae .Lin_prologue
1362 mov `16*17`(%rax),%rax # pull saved stack pointer
1364 mov -8(%rax),%rbx
1365 mov -16(%rax),%rbp
1366 mov %rbx,144($context) # restore context->Rbx
1367 mov %rbp,160($context) # restore context->Rbp
1369 lea -24-10*16(%rax),%rsi
1370 lea 512($context),%rdi # &context.Xmm6
1371 mov \$20,%ecx
1372 .long 0xa548f3fc # cld; rep movsq
1374 .Lin_prologue:
1375 mov 8(%rax),%rdi
1376 mov 16(%rax),%rsi
1377 mov %rax,152($context) # restore context->Rsp
1378 mov %rsi,168($context) # restore context->Rsi
1379 mov %rdi,176($context) # restore context->Rdi
1381 mov 40($disp),%rdi # disp->ContextRecord
1382 mov $context,%rsi # context
1383 mov \$154,%ecx # sizeof(CONTEXT)
1384 .long 0xa548f3fc # cld; rep movsq
1386 mov $disp,%rsi
1387 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1388 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1389 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1390 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1391 mov 40(%rsi),%r10 # disp->ContextRecord
1392 lea 56(%rsi),%r11 # &disp->HandlerData
1393 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1394 mov %r10,32(%rsp) # arg5
1395 mov %r11,40(%rsp) # arg6
1396 mov %r12,48(%rsp) # arg7
1397 mov %rcx,56(%rsp) # arg8, (NULL)
1398 call *__imp_RtlVirtualUnwind(%rip)
1400 mov \$1,%eax # ExceptionContinueSearch
1401 add \$64,%rsp
1402 popfq
1403 pop %r15
1404 pop %r14
1405 pop %r13
1406 pop %r12
1407 pop %rbp
1408 pop %rbx
1409 pop %rdi
1410 pop %rsi
1412 .size se_handler,.-se_handler
1414 $code.=<<___ if ($avx>1);
1415 .type avx2_handler,\@abi-omnipotent
1416 .align 16
1417 avx2_handler:
1418 push %rsi
1419 push %rdi
1420 push %rbx
1421 push %rbp
1422 push %r12
1423 push %r13
1424 push %r14
1425 push %r15
1426 pushfq
1427 sub \$64,%rsp
1429 mov 120($context),%rax # pull context->Rax
1430 mov 248($context),%rbx # pull context->Rip
1432 mov 8($disp),%rsi # disp->ImageBase
1433 mov 56($disp),%r11 # disp->HandlerData
1435 mov 0(%r11),%r10d # HandlerData[0]
1436 lea (%rsi,%r10),%r10 # end of prologue label
1437 cmp %r10,%rbx # context->Rip<body label
1438 jb .Lin_prologue
1440 mov 152($context),%rax # pull context->Rsp
1442 mov 4(%r11),%r10d # HandlerData[1]
1443 lea (%rsi,%r10),%r10 # epilogue label
1444 cmp %r10,%rbx # context->Rip>=epilogue label
1445 jae .Lin_prologue
1447 mov `32*17`($context),%rax # pull saved stack pointer
1449 mov -8(%rax),%rbx
1450 mov -16(%rax),%rbp
1451 mov -24(%rax),%r12
1452 mov -32(%rax),%r13
1453 mov -40(%rax),%r14
1454 mov -48(%rax),%r15
1455 mov %rbx,144($context) # restore context->Rbx
1456 mov %rbp,160($context) # restore context->Rbp
1457 mov %r12,216($context) # restore cotnext->R12
1458 mov %r13,224($context) # restore cotnext->R13
1459 mov %r14,232($context) # restore cotnext->R14
1460 mov %r15,240($context) # restore cotnext->R15
1462 lea -56-10*16(%rax),%rsi
1463 lea 512($context),%rdi # &context.Xmm6
1464 mov \$20,%ecx
1465 .long 0xa548f3fc # cld; rep movsq
1467 jmp .Lin_prologue
1468 .size avx2_handler,.-avx2_handler
1470 $code.=<<___;
1471 .section .pdata
1472 .align 4
1473 .rva .LSEH_begin_sha1_multi_block
1474 .rva .LSEH_end_sha1_multi_block
1475 .rva .LSEH_info_sha1_multi_block
1476 .rva .LSEH_begin_sha1_multi_block_shaext
1477 .rva .LSEH_end_sha1_multi_block_shaext
1478 .rva .LSEH_info_sha1_multi_block_shaext
1480 $code.=<<___ if ($avx);
1481 .rva .LSEH_begin_sha1_multi_block_avx
1482 .rva .LSEH_end_sha1_multi_block_avx
1483 .rva .LSEH_info_sha1_multi_block_avx
1485 $code.=<<___ if ($avx>1);
1486 .rva .LSEH_begin_sha1_multi_block_avx2
1487 .rva .LSEH_end_sha1_multi_block_avx2
1488 .rva .LSEH_info_sha1_multi_block_avx2
1490 $code.=<<___;
1491 .section .xdata
1492 .align 8
1493 .LSEH_info_sha1_multi_block:
1494 .byte 9,0,0,0
1495 .rva se_handler
1496 .rva .Lbody,.Lepilogue # HandlerData[]
1497 .LSEH_info_sha1_multi_block_shaext:
1498 .byte 9,0,0,0
1499 .rva se_handler
1500 .rva .Lbody_shaext,.Lepilogue_shaext # HandlerData[]
1502 $code.=<<___ if ($avx);
1503 .LSEH_info_sha1_multi_block_avx:
1504 .byte 9,0,0,0
1505 .rva se_handler
1506 .rva .Lbody_avx,.Lepilogue_avx # HandlerData[]
1508 $code.=<<___ if ($avx>1);
1509 .LSEH_info_sha1_multi_block_avx2:
1510 .byte 9,0,0,0
1511 .rva avx2_handler
1512 .rva .Lbody_avx2,.Lepilogue_avx2 # HandlerData[]
1515 ####################################################################
1517 sub rex {
1518 local *opcode=shift;
1519 my ($dst,$src)=@_;
1520 my $rex=0;
1522 $rex|=0x04 if ($dst>=8);
1523 $rex|=0x01 if ($src>=8);
1524 unshift @opcode,$rex|0x40 if ($rex);
1527 sub sha1rnds4 {
1528 if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
1529 my @opcode=(0x0f,0x3a,0xcc);
1530 rex(\@opcode,$3,$2);
1531 push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
1532 my $c=$1;
1533 push @opcode,$c=~/^0/?oct($c):$c;
1534 return ".byte\t".join(',',@opcode);
1535 } else {
1536 return "sha1rnds4\t".@_[0];
1540 sub sha1op38 {
1541 my $instr = shift;
1542 my %opcodelet = (
1543 "sha1nexte" => 0xc8,
1544 "sha1msg1" => 0xc9,
1545 "sha1msg2" => 0xca );
1547 if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
1548 my @opcode=(0x0f,0x38);
1549 rex(\@opcode,$2,$1);
1550 push @opcode,$opcodelet{$instr};
1551 push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
1552 return ".byte\t".join(',',@opcode);
1553 } else {
1554 return $instr."\t".@_[0];
1558 foreach (split("\n",$code)) {
1559 s/\`([^\`]*)\`/eval($1)/ge;
1561 s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo or
1562 s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo or
1564 s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1565 s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
1566 s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+),%ymm([0-9]+)/$1$2%xmm$3,%xmm$4/go or
1567 s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1568 s/\b(vinserti128)\b(\s+)%ymm/$1$2\$1,%xmm/go or
1569 s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1571 print $_,"\n";
1574 close STDOUT;