3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Companion to x86_64-mont.pl that optimizes cache-timing attack
13 # countermeasures. The subroutines are produced by replacing bp[i]
14 # references in their x86_64-mont.pl counterparts with cache-neutral
15 # references to powers table computed in BN_mod_exp_mont_consttime.
16 # In addition subroutine that scatters elements of the powers table
17 # is implemented, so that scatter-/gathering can be tuned without
18 # bn_exp.c modifications.
22 # Add MULX/AD*X code paths and additional interfaces to optimize for
23 # branch prediction unit. For input lengths that are multiples of 8
24 # the np argument is not just modulus value, but one interleaved
25 # with 0. This is to optimize post-condition...
29 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
31 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
33 $0 =~ m/(.*[\/\\])[^\
/\\]+$/; $dir=$1;
34 ( $xlate="${dir}x86_64-xlate.pl" and -f
$xlate ) or
35 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f
$xlate) or
36 die "can't locate x86_64-xlate.pl";
38 open OUT
,"| \"$^X\" $xlate $flavour $output";
41 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
42 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
46 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM
} =~ /nasm/) &&
47 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
51 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM
} =~ /ml64/) &&
52 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
56 # int bn_mul_mont_gather5(
57 $rp="%rdi"; # BN_ULONG *rp,
58 $ap="%rsi"; # const BN_ULONG *ap,
59 $bp="%rdx"; # const BN_ULONG *bp,
60 $np="%rcx"; # const BN_ULONG *np,
61 $n0="%r8"; # const BN_ULONG *n0,
62 $num="%r9"; # int num,
63 # int idx); # 0 to 2^5-1, "index" in $bp holding
64 # pre-computed powers of a', interlaced
65 # in such manner that b[0] is $bp[idx],
66 # b[1] is [2^5+idx], etc.
78 .extern OPENSSL_ia32cap_P
80 .globl bn_mul_mont_gather5
81 .type bn_mul_mont_gather5
,\
@function,6
87 $code.=<<___
if ($addx);
88 mov OPENSSL_ia32cap_P
+8(%rip),%r11d
97 mov
`($win64?56:8)`(%rsp),%r10d # load 7th argument
105 $code.=<<___
if ($win64);
108 movaps
%xmm7,0x10(%rsp)
113 lea
(%rsp,%r11,8),%rsp # tp=alloca(8*(num+2))
114 and \
$-1024,%rsp # minimize TLB usage
116 mov
%rax,8(%rsp,$num,8) # tp[num+1]=%rsp
118 mov
$bp,%r12 # reassign $bp
121 $STRIDE=2**5*8; # 5 is "window size"
122 $N=$STRIDE/4; # should match cache line size
125 shr \
$`log($N/8)/log(2)`,%r10
128 lea
.Lmagic_masks
(%rip),%rax
129 and \
$`2**5/($N/8)-1`,%r10 # 5 is "window size"
130 lea
96($bp,%r11,8),$bp # pointer within 1st cache line
131 movq
0(%rax,%r10,8),%xmm4 # set of masks denoting which
132 movq
8(%rax,%r10,8),%xmm5 # cache line contains element
133 movq
16(%rax,%r10,8),%xmm6 # denoted by 7th argument
134 movq
24(%rax,%r10,8),%xmm7
136 movq
`0*$STRIDE/4-96`($bp),%xmm0
137 movq
`1*$STRIDE/4-96`($bp),%xmm1
139 movq
`2*$STRIDE/4-96`($bp),%xmm2
141 movq
`3*$STRIDE/4-96`($bp),%xmm3
149 movq
%xmm0,$m0 # m0=bp[0]
151 mov
($n0),$n0 # pull n0[0] value
157 movq
`0*$STRIDE/4-96`($bp),%xmm0
158 movq
`1*$STRIDE/4-96`($bp),%xmm1
160 movq
`2*$STRIDE/4-96`($bp),%xmm2
164 mulq
$m0 # ap[0]*bp[0]
168 movq
`3*$STRIDE/4-96`($bp),%xmm3
173 imulq
$lo0,$m1 # "tp[0]"*n0
181 add
%rax,$lo0 # discarded
194 add
$hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
197 mov
$hi1,-16(%rsp,$j,8) # tp[j-1]
201 mulq
$m0 # ap[j]*bp[0]
212 movq
%xmm0,$m0 # bp[1]
215 mov
($ap),%rax # ap[0]
217 add
$hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
219 mov
$hi1,-16(%rsp,$j,8) # tp[j-1]
226 mov
$hi1,-8(%rsp,$num,8)
227 mov
%rdx,(%rsp,$num,8) # store upmost overflow bit
237 movq
`0*$STRIDE/4-96`($bp),%xmm0
238 movq
`1*$STRIDE/4-96`($bp),%xmm1
240 movq
`2*$STRIDE/4-96`($bp),%xmm2
243 mulq
$m0 # ap[0]*bp[i]
244 add
%rax,$lo0 # ap[0]*bp[i]+tp[0]
248 movq
`3*$STRIDE/4-96`($bp),%xmm3
253 imulq
$lo0,$m1 # tp[0]*n0
261 add
%rax,$lo0 # discarded
264 mov
8(%rsp),$lo0 # tp[1]
275 add
$lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
278 mov
$hi1,-16(%rsp,$j,8) # tp[j-1]
282 mulq
$m0 # ap[j]*bp[i]
286 add
$hi0,$lo0 # ap[j]*bp[i]+tp[j]
295 movq
%xmm0,$m0 # bp[i+1]
298 mov
($ap),%rax # ap[0]
300 add
$lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
303 mov
$hi1,-16(%rsp,$j,8) # tp[j-1]
309 add
$lo0,$hi1 # pull upmost overflow bit
311 mov
$hi1,-8(%rsp,$num,8)
312 mov
%rdx,(%rsp,$num,8) # store upmost overflow bit
318 xor $i,$i # i=0 and clear CF!
319 mov
(%rsp),%rax # tp[0]
320 lea
(%rsp),$ap # borrow ap for tp
324 .Lsub
: sbb
($np,$i,8),%rax
325 mov
%rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
326 mov
8($ap,$i,8),%rax # tp[i+1]
328 dec
$j # doesnn't affect CF!
331 sbb \
$0,%rax # handle upmost overflow bit
338 or $np,$ap # ap=borrow?tp:rp
340 .Lcopy
: # copy or in-place refresh
342 mov
$i,(%rsp,$i,8) # zap temporary vector
343 mov
%rax,($rp,$i,8) # rp[i]=tp[i]
348 mov
8(%rsp,$num,8),%rsi # restore %rsp
351 $code.=<<___
if ($win64);
352 movaps
-88(%rsi),%xmm6
353 movaps
-72(%rsi),%xmm7
365 .size bn_mul_mont_gather5
,.-bn_mul_mont_gather5
368 my @A=("%r10","%r11");
369 my @N=("%r13","%rdi");
371 .type bn_mul4x_mont_gather5
,\
@function,6
373 bn_mul4x_mont_gather5
:
376 $code.=<<___
if ($addx);
391 $code.=<<___
if ($win64);
394 movaps
%xmm7,0x10(%rsp)
400 shl \
$3+2,%r10d # 4*$num
403 ##############################################################
404 # ensure that stack frame doesn't alias with $aptr+4*$num
405 # modulo 4096, which covers ret[num], am[num] and n[2*num]
406 # (see bn_exp.c). this is done to allow memory disambiguation
407 # logic do its magic. [excessive frame is allocated in order
408 # to allow bn_from_mont8x to clear it.]
410 lea
-64(%rsp,$num,2),%r11
415 sub %r11,%rsp # align with $ap
416 lea
-64(%rsp,$num,2),%rsp # alloca(128+num*8)
421 lea
4096-64(,$num,2),%r10
422 lea
-64(%rsp,$num,2),%rsp # alloca(128+num*8)
436 mov
40(%rsp),%rsi # restore %rsp
439 $code.=<<___
if ($win64);
440 movaps
-88(%rsi),%xmm6
441 movaps
-72(%rsi),%xmm7
453 .size bn_mul4x_mont_gather5
,.-bn_mul4x_mont_gather5
455 .type mul4x_internal
,\
@abi-omnipotent
459 mov
`($win64?56:8)`(%rax),%r10d # load 7th argument
460 lea
256(%rdx,$num),%r13
461 shr \
$5,$num # restore $num
464 $STRIDE=2**5*8; # 5 is "window size"
465 $N=$STRIDE/4; # should match cache line size
469 shr \
$`log($N/8)/log(2)`,%r10
472 lea
.Lmagic_masks
(%rip),%rax
473 and \
$`2**5/($N/8)-1`,%r10 # 5 is "window size"
474 lea
96(%rdx,%r11,8),$bp # pointer within 1st cache line
475 movq
0(%rax,%r10,8),%xmm4 # set of masks denoting which
476 movq
8(%rax,%r10,8),%xmm5 # cache line contains element
478 movq
16(%rax,%r10,8),%xmm6 # denoted by 7th argument
479 movq
24(%rax,%r10,8),%xmm7
482 movq
`0*$STRIDE/4-96`($bp),%xmm0
483 lea
$STRIDE($bp),$tp # borrow $tp
484 movq
`1*$STRIDE/4-96`($bp),%xmm1
486 movq
`2*$STRIDE/4-96`($bp),%xmm2
488 movq
`3*$STRIDE/4-96`($bp),%xmm3
492 movq
`0*$STRIDE/4-96`($tp),%xmm1
497 movq
`1*$STRIDE/4-96`($tp),%xmm2
502 movq
`2*$STRIDE/4-96`($tp),%xmm3
504 movq
%xmm0,$m0 # m0=bp[0]
505 movq
`3*$STRIDE/4-96`($tp),%xmm0
506 mov
%r13,16+8(%rsp) # save end of b[num]
507 mov
$rp, 56+8(%rsp) # save $rp
509 mov
($n0),$n0 # pull n0[0] value
511 lea
($ap,$num),$ap # end of a[num]
515 mulq
$m0 # ap[0]*bp[0]
523 imulq
$A[0],$m1 # "tp[0]"*n0
524 ##############################################################
525 # $tp is chosen so that writing to top-most element of the
526 # vector occurs just "above" references to powers table,
527 # "above" modulo cache-line size, which effectively precludes
528 # possibility of memory disambiguation logic failure when
529 # accessing the table.
531 lea
64+8(%rsp,%r11,8),$tp
536 lea
2*$STRIDE($bp),$bp
540 add
%rax,$A[0] # discarded
547 mov
16*1($np),%rax # interleaved with 0, therefore 16*n
553 mov
16($ap,$num),%rax
556 lea
4*8($num),$j # j=4
565 mulq
$m0 # ap[j]*bp[0]
576 add
$A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
578 mov
$N[0],-24($tp) # tp[j-1]
581 mulq
$m0 # ap[j]*bp[0]
591 add
$A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
593 mov
$N[1],-16($tp) # tp[j-1]
596 mulq
$m0 # ap[j]*bp[0]
606 add
$A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
608 mov
$N[0],-8($tp) # tp[j-1]
611 mulq
$m0 # ap[j]*bp[0]
621 add
$A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
624 mov
$N[1],($tp) # tp[j-1]
630 mulq
$m0 # ap[j]*bp[0]
641 add
$A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
643 mov
$N[0],-24($tp) # tp[j-1]
646 mulq
$m0 # ap[j]*bp[0]
654 mov
($ap,$num),%rax # ap[0]
656 add
$A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
658 mov
$N[1],-16($tp) # tp[j-1]
661 movq
%xmm0,$m0 # bp[1]
662 lea
($np,$num,2),$np # rewind $np
675 mulq
$m0 # ap[0]*bp[i]
676 add
%rax,$A[0] # ap[0]*bp[i]+tp[0]
680 movq
`0*$STRIDE/4-96`($bp),%xmm0
681 movq
`1*$STRIDE/4-96`($bp),%xmm1
683 movq
`2*$STRIDE/4-96`($bp),%xmm2
685 movq
`3*$STRIDE/4-96`($bp),%xmm3
687 imulq
$A[0],$m1 # tp[0]*n0
690 mov
$N[1],($tp) # store upmost overflow bit
696 lea
($tp,$num),$tp # rewind $tp
701 add
%rax,$A[0] # "$N[0]", discarded
706 mulq
$m0 # ap[j]*bp[i]
708 mov
16*1($np),%rax # interleaved with 0, therefore 16*n
710 add
8($tp),$A[1] # +tp[1]
716 mov
16($ap,$num),%rax
718 add
$A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
719 lea
4*8($num),$j # j=4
727 mulq
$m0 # ap[j]*bp[i]
731 add
16($tp),$A[0] # ap[j]*bp[i]+tp[j]
742 mov
$N[1],-32($tp) # tp[j-1]
745 mulq
$m0 # ap[j]*bp[i]
759 mov
$N[0],-24($tp) # tp[j-1]
762 mulq
$m0 # ap[j]*bp[i]
766 add
($tp),$A[0] # ap[j]*bp[i]+tp[j]
776 mov
$N[1],-16($tp) # tp[j-1]
779 mulq
$m0 # ap[j]*bp[i]
794 mov
$N[0],-8($tp) # tp[j-1]
800 mulq
$m0 # ap[j]*bp[i]
804 add
16($tp),$A[0] # ap[j]*bp[i]+tp[j]
815 mov
$N[1],-32($tp) # tp[j-1]
818 mulq
$m0 # ap[j]*bp[i]
829 mov
($ap,$num),%rax # ap[0]
833 mov
$N[0],-24($tp) # tp[j-1]
836 movq
%xmm0,$m0 # bp[i+1]
837 mov
$N[1],-16($tp) # tp[j-1]
838 lea
($np,$num,2),$np # rewind $np
843 add
($tp),$N[0] # pull upmost overflow bit
844 adc \
$0,$N[1] # upmost overflow bit
852 sub $N[0],$m1 # compare top-most words
853 adc
$j,$j # $j is zero
856 lea
($tp,$num),%rbx # tptr in .sqr4x_sub
857 lea
($np,$N[1],8),%rbp # nptr in .sqr4x_sub
859 sar \
$3+2,%rcx # cf=0
860 mov
56+8(%rsp),%rdi # rptr in .sqr4x_sub
864 my @ri=("%rax",$bp,$m0,$m1);
868 lea
($tp,$num),$tp # rewind $tp
870 lea
($np,$N[1],8),$np
871 mov
56+8(%rsp),$rp # restore $rp
901 .size mul4x_internal
,.-mul4x_internal
905 ######################################################################
907 my $rptr="%rdi"; # BN_ULONG *rptr,
908 my $aptr="%rsi"; # const BN_ULONG *aptr,
909 my $bptr="%rdx"; # const void *table,
910 my $nptr="%rcx"; # const BN_ULONG *nptr,
911 my $n0 ="%r8"; # const BN_ULONG *n0);
912 my $num ="%r9"; # int num, has to be divisible by 8
915 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
916 my @A0=("%r10","%r11");
917 my @A1=("%r12","%r13");
918 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
922 .type bn_power5
,\
@function,6
926 $code.=<<___
if ($addx);
927 mov OPENSSL_ia32cap_P
+8(%rip),%r11d
941 $code.=<<___
if ($win64);
944 movaps
%xmm7,0x10(%rsp)
948 shl \
$3,${num
}d
# convert $num to bytes
949 shl \
$3+2,%r10d # 4*$num
953 ##############################################################
954 # ensure that stack frame doesn't alias with $aptr+4*$num
955 # modulo 4096, which covers ret[num], am[num] and n[2*num]
956 # (see bn_exp.c). this is done to allow memory disambiguation
957 # logic do its magic.
959 lea
-64(%rsp,$num,2),%r11
964 sub %r11,%rsp # align with $aptr
965 lea
-64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
970 lea
4096-64(,$num,2),%r10 # 4096-frame-2*$num
971 lea
-64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
981 ##############################################################
984 # +0 saved $num, used in reduction section
985 # +8 &t[2*$num], used in reduction section
991 mov
%rax, 40(%rsp) # save original %rsp
993 movq
$rptr,%xmm1 # save $rptr
994 movq
$nptr,%xmm2 # save $nptr
995 movq
%r10, %xmm3 # -$num
998 call __bn_sqr8x_internal
999 call __bn_sqr8x_internal
1000 call __bn_sqr8x_internal
1001 call __bn_sqr8x_internal
1002 call __bn_sqr8x_internal
1012 mov
40(%rsp),%rsi # restore %rsp
1023 .size bn_power5
,.-bn_power5
1025 .globl bn_sqr8x_internal
1026 .hidden bn_sqr8x_internal
1027 .type bn_sqr8x_internal
,\
@abi-omnipotent
1030 __bn_sqr8x_internal
:
1031 ##############################################################
1034 # a) multiply-n-add everything but a[i]*a[i];
1035 # b) shift result of a) by 1 to the left and accumulate
1036 # a[i]*a[i] products;
1038 ##############################################################
1104 lea
32(%r10),$i # $i=-($num-32)
1105 lea
($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1107 mov
$num,$j # $j=$num
1109 # comments apply to $num==8 case
1110 mov
-32($aptr,$i),$a0 # a[0]
1111 lea
48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1112 mov
-24($aptr,$i),%rax # a[1]
1113 lea
-32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1114 mov
-16($aptr,$i),$ai # a[2]
1118 mov
%rax,$A0[0] # a[1]*a[0]
1121 mov
$A0[0],-24($tptr,$i) # t[1]
1127 mov
$A0[1],-16($tptr,$i) # t[2]
1131 mov
-8($aptr,$i),$ai # a[3]
1133 mov
%rax,$A1[0] # a[2]*a[1]+t[3]
1139 add
%rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1145 mov
$A0[0],-8($tptr,$j) # t[3]
1150 mov
($aptr,$j),$ai # a[4]
1152 add
%rax,$A1[1] # a[3]*a[1]+t[4]
1158 add
%rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1160 mov
8($aptr,$j),$ai # a[5]
1168 add
%rax,$A1[0] # a[4]*a[3]+t[5]
1170 mov
$A0[1],($tptr,$j) # t[4]
1175 add
%rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1177 mov
16($aptr,$j),$ai # a[6]
1184 add
%rax,$A1[1] # a[5]*a[3]+t[6]
1186 mov
$A0[0],8($tptr,$j) # t[5]
1191 add
%rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1193 mov
24($aptr,$j),$ai # a[7]
1201 add
%rax,$A1[0] # a[6]*a[5]+t[7]
1203 mov
$A0[1],16($tptr,$j) # t[6]
1209 add
%rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1215 mov
$A0[0],-8($tptr,$j) # t[7]
1227 mov
$A1[1],($tptr) # t[8]
1229 mov
%rdx,8($tptr) # t[9]
1233 .Lsqr4x_outer
: # comments apply to $num==6 case
1234 mov
-32($aptr,$i),$a0 # a[0]
1235 lea
48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1236 mov
-24($aptr,$i),%rax # a[1]
1237 lea
-32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1238 mov
-16($aptr,$i),$ai # a[2]
1242 mov
-24($tptr,$i),$A0[0] # t[1]
1243 add
%rax,$A0[0] # a[1]*a[0]+t[1]
1246 mov
$A0[0],-24($tptr,$i) # t[1]
1253 add
-16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1256 mov
$A0[1],-16($tptr,$i) # t[2]
1260 mov
-8($aptr,$i),$ai # a[3]
1262 add
%rax,$A1[0] # a[2]*a[1]+t[3]
1265 add
-8($tptr,$i),$A1[0]
1270 add
%rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1276 mov
$A0[0],-8($tptr,$i) # t[3]
1283 mov
($aptr,$j),$ai # a[4]
1285 add
%rax,$A1[1] # a[3]*a[1]+t[4]
1289 add
($tptr,$j),$A1[1]
1294 add
%rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1296 mov
8($aptr,$j),$ai # a[5]
1303 add
%rax,$A1[0] # a[4]*a[3]+t[5]
1304 mov
$A0[1],($tptr,$j) # t[4]
1308 add
8($tptr,$j),$A1[0]
1313 add
%rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1319 mov
$A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1331 mov
$A1[1],($tptr) # t[6], "preloaded t[2]" below
1333 mov
%rdx,8($tptr) # t[7], "preloaded t[3]" below
1338 # comments apply to $num==4 case
1339 mov
-32($aptr),$a0 # a[0]
1340 lea
48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1341 mov
-24($aptr),%rax # a[1]
1342 lea
-32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1343 mov
-16($aptr),$ai # a[2]
1347 add
%rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1355 mov
$A0[0],-24($tptr) # t[1]
1358 add
$A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1359 mov
-8($aptr),$ai # a[3]
1363 add
%rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1365 mov
$A0[1],-16($tptr) # t[2]
1370 add
%rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1376 mov
$A0[0],-8($tptr) # t[3]
1380 mov
-16($aptr),%rax # a[2]
1385 mov
$A1[1],($tptr) # t[4]
1387 mov
%rdx,8($tptr) # t[5]
1392 my ($shift,$carry)=($a0,$a1);
1393 my @S=(@A1,$ai,$n0);
1397 sub $num,$i # $i=16-$num
1400 add
$A1[0],%rax # t[5]
1402 mov
%rax,8($tptr) # t[5]
1403 mov
%rdx,16($tptr) # t[6]
1404 mov
$carry,24($tptr) # t[7]
1406 mov
-16($aptr,$i),%rax # a[0]
1407 lea
48+8(%rsp),$tptr
1408 xor $A0[0],$A0[0] # t[0]
1409 mov
8($tptr),$A0[1] # t[1]
1411 lea
($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1413 lea
($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1415 or $A0[0],$S[1] # | t[2*i]>>63
1416 mov
16($tptr),$A0[0] # t[2*i+2] # prefetch
1417 mov
$A0[1],$shift # shift=t[2*i+1]>>63
1418 mul
%rax # a[i]*a[i]
1419 neg
$carry # mov $carry,cf
1420 mov
24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1422 mov
-8($aptr,$i),%rax # a[i+1] # prefetch
1426 lea
($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1428 sbb
$carry,$carry # mov cf,$carry
1430 lea
($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1432 or $A0[0],$S[3] # | t[2*i]>>63
1433 mov
32($tptr),$A0[0] # t[2*i+2] # prefetch
1434 mov
$A0[1],$shift # shift=t[2*i+1]>>63
1435 mul
%rax # a[i]*a[i]
1436 neg
$carry # mov $carry,cf
1437 mov
40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1439 mov
0($aptr,$i),%rax # a[i+1] # prefetch
1444 sbb
$carry,$carry # mov cf,$carry
1446 jmp
.Lsqr4x_shift_n_add
1449 .Lsqr4x_shift_n_add
:
1450 lea
($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1452 lea
($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1454 or $A0[0],$S[1] # | t[2*i]>>63
1455 mov
-16($tptr),$A0[0] # t[2*i+2] # prefetch
1456 mov
$A0[1],$shift # shift=t[2*i+1]>>63
1457 mul
%rax # a[i]*a[i]
1458 neg
$carry # mov $carry,cf
1459 mov
-8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1461 mov
-8($aptr,$i),%rax # a[i+1] # prefetch
1462 mov
$S[0],-32($tptr)
1465 lea
($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1466 mov
$S[1],-24($tptr)
1467 sbb
$carry,$carry # mov cf,$carry
1469 lea
($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1471 or $A0[0],$S[3] # | t[2*i]>>63
1472 mov
0($tptr),$A0[0] # t[2*i+2] # prefetch
1473 mov
$A0[1],$shift # shift=t[2*i+1]>>63
1474 mul
%rax # a[i]*a[i]
1475 neg
$carry # mov $carry,cf
1476 mov
8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1478 mov
0($aptr,$i),%rax # a[i+1] # prefetch
1479 mov
$S[2],-16($tptr)
1482 lea
($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1484 sbb
$carry,$carry # mov cf,$carry
1486 lea
($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1488 or $A0[0],$S[1] # | t[2*i]>>63
1489 mov
16($tptr),$A0[0] # t[2*i+2] # prefetch
1490 mov
$A0[1],$shift # shift=t[2*i+1]>>63
1491 mul
%rax # a[i]*a[i]
1492 neg
$carry # mov $carry,cf
1493 mov
24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1495 mov
8($aptr,$i),%rax # a[i+1] # prefetch
1499 lea
($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1501 sbb
$carry,$carry # mov cf,$carry
1503 lea
($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1505 or $A0[0],$S[3] # | t[2*i]>>63
1506 mov
32($tptr),$A0[0] # t[2*i+2] # prefetch
1507 mov
$A0[1],$shift # shift=t[2*i+1]>>63
1508 mul
%rax # a[i]*a[i]
1509 neg
$carry # mov $carry,cf
1510 mov
40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1512 mov
16($aptr,$i),%rax # a[i+1] # prefetch
1516 sbb
$carry,$carry # mov cf,$carry
1519 jnz
.Lsqr4x_shift_n_add
1521 lea
($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1524 lea
($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1526 or $A0[0],$S[1] # | t[2*i]>>63
1527 mov
-16($tptr),$A0[0] # t[2*i+2] # prefetch
1528 mov
$A0[1],$shift # shift=t[2*i+1]>>63
1529 mul
%rax # a[i]*a[i]
1530 neg
$carry # mov $carry,cf
1531 mov
-8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1533 mov
-8($aptr),%rax # a[i+1] # prefetch
1534 mov
$S[0],-32($tptr)
1537 lea
($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1538 mov
$S[1],-24($tptr)
1539 sbb
$carry,$carry # mov cf,$carry
1541 lea
($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1543 or $A0[0],$S[3] # | t[2*i]>>63
1544 mul
%rax # a[i]*a[i]
1545 neg
$carry # mov $carry,cf
1548 mov
$S[2],-16($tptr)
1552 ######################################################################
1553 # Montgomery reduction part, "word-by-word" algorithm.
1555 # This new path is inspired by multiple submissions from Intel, by
1556 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1559 my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1565 lea
($nptr,$num,2),%rcx # end of n[]
1566 lea
48+8(%rsp,$num,2),%rdx # end of t[] buffer
1568 lea
48+8(%rsp,$num),$tptr # end of initial t[] window
1571 jmp
.L8x_reduction_loop
1574 .L8x_reduction_loop
:
1575 lea
($tptr,$num),$tptr # start of current t[] window
1585 mov
%rax,(%rdx) # store top-most carry bit
1586 lea
8*8($tptr),$tptr
1590 imulq
32+8(%rsp),$m0 # n0*a[0]
1591 mov
16*0($nptr),%rax # n[0]
1598 mov
16*1($nptr),%rax # n[1]
1605 mov
16*2($nptr),%rax
1608 mov
$m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1614 mov
16*3($nptr),%rax
1617 mov
32+8(%rsp),$carry # pull n0, borrow $carry
1623 mov
16*4($nptr),%rax
1625 imulq
%r8,$carry # modulo-scheduled
1632 mov
16*5($nptr),%rax
1640 mov
16*6($nptr),%rax
1648 mov
16*7($nptr),%rax
1655 mov
$carry,$m0 # n0*a[i]
1657 mov
16*0($nptr),%rax # n[0]
1666 lea
16*8($nptr),$nptr
1668 mov
8+8(%rsp),%rdx # pull end of t[]
1669 cmp 0+8(%rsp),$nptr # end of n[]?
1681 sbb
$carry,$carry # top carry
1683 mov
48+56+8(%rsp),$m0 # pull n0*a[0]
1685 mov
16*0($nptr),%rax
1692 mov
16*1($nptr),%rax
1693 mov
%r8,($tptr) # save result
1699 mov
16*2($nptr),%rax
1702 lea
8($tptr),$tptr # $tptr++
1708 mov
16*3($nptr),%rax
1716 mov
16*4($nptr),%rax
1724 mov
16*5($nptr),%rax
1732 mov
16*6($nptr),%rax
1740 mov
16*7($nptr),%rax
1747 mov
48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1751 mov
16*0($nptr),%rax # pull n[0]
1758 lea
16*8($nptr),$nptr
1759 mov
8+8(%rsp),%rdx # pull end of t[]
1760 cmp 0+8(%rsp),$nptr # end of n[]?
1761 jae
.L8x_tail_done
# break out of loop
1763 mov
48+56+8(%rsp),$m0 # pull n0*a[0]
1765 mov
8*0($nptr),%rax # pull n[0]
1774 sbb
$carry,$carry # top carry
1781 add
(%rdx),%r8 # can this overflow?
1794 adc \
$0,%rax # top-most carry
1795 mov
-16($nptr),%rcx # np[num-1]
1798 movq
%xmm2,$nptr # restore $nptr
1800 mov
%r8,8*0($tptr) # store top 512 bits
1802 movq
%xmm3,$num # $num is %r9, can't be moved upwards
1809 lea
8*8($tptr),$tptr
1811 cmp %rdx,$tptr # end of t[]?
1812 jb
.L8x_reduction_loop
1815 ##############################################################
1816 # Post-condition, 4x unrolled
1819 my ($tptr,$nptr)=("%rbx","%rbp");
1821 #xor %rsi,%rsi # %rsi was $carry above
1822 sub %r15,%rcx # compare top-most words
1823 lea
(%rdi,$num),$tptr # %rdi was $tptr above
1827 movq
%xmm1,$rptr # restore $rptr
1829 movq
%xmm1,$aptr # prepare for back-to-back call
1830 lea
($nptr,%rax,8),$nptr
1831 sar \
$3+2,%rcx # cf=0
1839 sbb
16*0($nptr),%r12
1841 sbb
16*1($nptr),%r13
1843 lea
8*4($tptr),$tptr
1844 sbb
16*2($nptr),%r14
1846 sbb
16*3($nptr),%r15
1847 lea
16*4($nptr),$nptr
1851 lea
8*4($rptr),$rptr
1858 mov
$num,%r10 # prepare for back-to-back call
1859 neg
$num # restore $num
1861 .size bn_sqr8x_internal
,.-bn_sqr8x_internal
1865 .globl bn_from_montgomery
1866 .type bn_from_montgomery
,\
@abi-omnipotent
1869 testl \
$7,`($win64?"48(%rsp)":"%r9d")`
1873 .size bn_from_montgomery
,.-bn_from_montgomery
1875 .type bn_from_mont8x
,\
@function,6
1887 $code.=<<___
if ($win64);
1888 lea
-0x28(%rsp),%rsp
1890 movaps
%xmm7,0x10(%rsp)
1895 shl \
$3,${num
}d
# convert $num to bytes
1896 shl \
$3+2,%r10d # 4*$num
1900 ##############################################################
1901 # ensure that stack frame doesn't alias with $aptr+4*$num
1902 # modulo 4096, which covers ret[num], am[num] and n[2*num]
1903 # (see bn_exp.c). this is done to allow memory disambiguation
1904 # logic do its magic.
1906 lea
-64(%rsp,$num,2),%r11
1911 sub %r11,%rsp # align with $aptr
1912 lea
-64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
1917 lea
4096-64(,$num,2),%r10 # 4096-frame-2*$num
1918 lea
-64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
1928 ##############################################################
1931 # +0 saved $num, used in reduction section
1932 # +8 &t[2*$num], used in reduction section
1938 mov
%rax, 40(%rsp) # save original %rsp
1947 movdqu
($aptr),%xmm1
1948 movdqu
16($aptr),%xmm2
1949 movdqu
32($aptr),%xmm3
1950 movdqa
%xmm0,(%rax,$num)
1951 movdqu
48($aptr),%xmm4
1952 movdqa
%xmm0,16(%rax,$num)
1953 .byte
0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
1955 movdqa
%xmm0,32(%rax,$num)
1956 movdqa
%xmm2,16(%rax)
1957 movdqa
%xmm0,48(%rax,$num)
1958 movdqa
%xmm3,32(%rax)
1959 movdqa
%xmm4,48(%rax)
1968 movq
%r10, %xmm3 # -num
1970 $code.=<<___
if ($addx);
1971 mov OPENSSL_ia32cap_P
+8(%rip),%r11d
1976 lea
(%rax,$num),$rptr
1977 call sqrx8x_reduction
1981 mov
40(%rsp),%rsi # restore %rsp
1982 jmp
.Lfrom_mont_zero
1988 call sqr8x_reduction
1992 mov
40(%rsp),%rsi # restore %rsp
1993 jmp
.Lfrom_mont_zero
1997 movdqa
%xmm0,16*0(%rax)
1998 movdqa
%xmm0,16*1(%rax)
1999 movdqa
%xmm0,16*2(%rax)
2000 movdqa
%xmm0,16*3(%rax)
2003 jnz
.Lfrom_mont_zero
2015 .size bn_from_mont8x
,.-bn_from_mont8x
2021 my $bp="%rdx"; # restore original value
2024 .type bn_mulx4x_mont_gather5
,\
@function,6
2026 bn_mulx4x_mont_gather5
:
2037 $code.=<<___
if ($win64);
2038 lea
-0x28(%rsp),%rsp
2040 movaps
%xmm7,0x10(%rsp)
2045 shl \
$3,${num
}d
# convert $num to bytes
2046 shl \
$3+2,%r10d # 4*$num
2050 ##############################################################
2051 # ensure that stack frame doesn't alias with $aptr+4*$num
2052 # modulo 4096, which covers a[num], ret[num] and n[2*num]
2053 # (see bn_exp.c). this is done to allow memory disambiguation
2054 # logic do its magic. [excessive frame is allocated in order
2055 # to allow bn_from_mont8x to clear it.]
2057 lea
-64(%rsp,$num,2),%r11
2062 sub %r11,%rsp # align with $aptr
2063 lea
-64(%rsp,$num,2),%rsp # alloca(frame+$num)
2068 lea
4096-64(,$num,2),%r10 # 4096-frame-$num
2069 lea
-64(%rsp,$num,2),%rsp # alloca(frame+$num)
2075 and \
$-64,%rsp # ensure alignment
2076 ##############################################################
2079 # +8 off-loaded &b[i]
2088 mov
$n0, 32(%rsp) # save *n0
2089 mov
%rax,40(%rsp) # save original %rsp
2091 call mulx4x_internal
2093 mov
40(%rsp),%rsi # restore %rsp
2096 $code.=<<___
if ($win64);
2097 movaps
-88(%rsi),%xmm6
2098 movaps
-72(%rsi),%xmm7
2110 .size bn_mulx4x_mont_gather5
,.-bn_mulx4x_mont_gather5
2112 .type mulx4x_internal
,\
@abi-omnipotent
2115 .byte
0x4c,0x89,0x8c,0x24,0x08,0x00,0x00,0x00 # mov $num,8(%rsp) # save -$num
2117 neg
$num # restore $num
2119 lea
256($bp,$num),%r13
2121 mov
`($win64?56:8)`(%rax),%r10d # load 7th argument
2123 mov
%r13,16+8(%rsp) # end of b[num]
2124 mov
$num,24+8(%rsp) # inner counter
2125 mov
$rp, 56+8(%rsp) # save $rp
2127 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2128 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2130 my $STRIDE=2**5*8; # 5 is "window size"
2131 my $N=$STRIDE/4; # should match cache line size
2134 shr \
$`log($N/8)/log(2)`,%r10
2137 lea
.Lmagic_masks
(%rip),%rax
2138 and \
$`2**5/($N/8)-1`,%r10 # 5 is "window size"
2139 lea
96($bp,%r11,8),$bptr # pointer within 1st cache line
2140 movq
0(%rax,%r10,8),%xmm4 # set of masks denoting which
2141 movq
8(%rax,%r10,8),%xmm5 # cache line contains element
2143 movq
16(%rax,%r10,8),%xmm6 # denoted by 7th argument
2144 movq
24(%rax,%r10,8),%xmm7
2147 movq
`0*$STRIDE/4-96`($bptr),%xmm0
2148 lea
$STRIDE($bptr),$tptr # borrow $tptr
2149 movq
`1*$STRIDE/4-96`($bptr),%xmm1
2151 movq
`2*$STRIDE/4-96`($bptr),%xmm2
2153 movq
`3*$STRIDE/4-96`($bptr),%xmm3
2156 movq
`0*$STRIDE/4-96`($tptr),%xmm1
2159 movq
`1*$STRIDE/4-96`($tptr),%xmm2
2163 movq
`2*$STRIDE/4-96`($tptr),%xmm3
2165 movq
%xmm0,%rdx # bp[0]
2166 movq
`3*$STRIDE/4-96`($tptr),%xmm0
2167 lea
2*$STRIDE($bptr),$bptr # next &b[i]
2171 ##############################################################
2172 # $tptr is chosen so that writing to top-most element of the
2173 # vector occurs just "above" references to powers table,
2174 # "above" modulo cache-line size, which effectively precludes
2175 # possibility of memory disambiguation logic failure when
2176 # accessing the table.
2178 lea
64+8*4+8(%rsp,%r11,8),$tptr
2181 mulx
0*8($aptr),$mi,%rax # a[0]*b[0]
2182 mulx
1*8($aptr),%r11,%r12 # a[1]*b[0]
2184 mulx
2*8($aptr),%rax,%r13 # ...
2187 mulx
3*8($aptr),%rax,%r14
2190 imulq
32+8(%rsp),$mi # "t[0]"*n0
2191 xor $zero,$zero # cf=0, of=0
2197 mov
$bptr,8+8(%rsp) # off-load &b[i]
2200 .byte
0x48,0x8d,0xb6,0x20,0x00,0x00,0x00 # lea 4*8($aptr),$aptr
2202 adcx
$zero,%r14 # cf=0
2204 mulx
0*16($nptr),%rax,%r10
2205 adcx
%rax,%r15 # discarded
2207 mulx
1*16($nptr),%rax,%r11
2210 mulx
2*16($nptr),%rax,%r12
2211 mov
24+8(%rsp),$bptr # counter value
2213 mov
%r10,-8*4($tptr)
2216 mulx
3*16($nptr),%rax,%r15
2219 mov
%r11,-8*3($tptr)
2221 adox
$zero,%r15 # of=0
2222 .byte
0x48,0x8d,0x89,0x40,0x00,0x00,0x00 # lea 4*16($nptr),$nptr
2223 mov
%r12,-8*2($tptr)
2228 adcx
$zero,%r15 # cf=0, modulo-scheduled
2229 mulx
0*8($aptr),%r10,%rax # a[4]*b[0]
2231 mulx
1*8($aptr),%r11,%r14 # a[5]*b[0]
2233 mulx
2*8($aptr),%r12,%rax # ...
2235 mulx
3*8($aptr),%r13,%r14
2239 adcx
$zero,%r14 # cf=0
2240 lea
4*8($aptr),$aptr
2241 lea
4*8($tptr),$tptr
2244 mulx
0*16($nptr),%rax,%r15
2247 mulx
1*16($nptr),%rax,%r15
2250 mulx
2*16($nptr),%rax,%r15
2251 mov
%r10,-5*8($tptr)
2253 mov
%r11,-4*8($tptr)
2255 mulx
3*16($nptr),%rax,%r15
2257 mov
%r12,-3*8($tptr)
2260 lea
4*16($nptr),$nptr
2261 mov
%r13,-2*8($tptr)
2263 dec
$bptr # of=0, pass cf
2266 mov
8(%rsp),$num # load -num
2267 movq
%xmm0,%rdx # bp[1]
2268 adc
$zero,%r15 # modulo-scheduled
2269 lea
($aptr,$num),$aptr # rewind $aptr
2271 mov
8+8(%rsp),$bptr # re-load &b[i]
2272 adc
$zero,$zero # top-most carry
2273 mov
%r14,-1*8($tptr)
2278 mov
$zero,($tptr) # save top-most carry
2279 lea
4*8($tptr,$num),$tptr # rewind $tptr
2280 mulx
0*8($aptr),$mi,%r11 # a[0]*b[i]
2281 xor $zero,$zero # cf=0, of=0
2283 mulx
1*8($aptr),%r14,%r12 # a[1]*b[i]
2284 adox
-4*8($tptr),$mi # +t[0]
2286 mulx
2*8($aptr),%r15,%r13 # ...
2287 adox
-3*8($tptr),%r11
2289 mulx
3*8($aptr),%rdx,%r14
2290 adox
-2*8($tptr),%r12
2292 lea
($nptr,$num,2),$nptr # rewind $nptr
2293 lea
4*8($aptr),$aptr
2294 adox
-1*8($tptr),%r13
2300 imulq
32+8(%rsp),$mi # "t[0]"*n0
2302 movq
`0*$STRIDE/4-96`($bptr),%xmm0
2305 movq
`1*$STRIDE/4-96`($bptr),%xmm1
2308 movq
`2*$STRIDE/4-96`($bptr),%xmm2
2311 movq
`3*$STRIDE/4-96`($bptr),%xmm3
2312 add \
$$STRIDE,$bptr # next &b[i]
2317 xor $zero,$zero # cf=0, of=0
2318 mov
$bptr,8+8(%rsp) # off-load &b[i]
2320 mulx
0*16($nptr),%rax,%r10
2321 adcx
%rax,%r15 # discarded
2323 mulx
1*16($nptr),%rax,%r11
2326 mulx
2*16($nptr),%rax,%r12
2329 mulx
3*16($nptr),%rax,%r15
2332 mov
24+8(%rsp),$bptr # counter value
2333 mov
%r10,-8*4($tptr)
2336 mov
%r11,-8*3($tptr)
2337 adox
$zero,%r15 # of=0
2338 mov
%r12,-8*2($tptr)
2339 lea
4*16($nptr),$nptr
2344 mulx
0*8($aptr),%r10,%rax # a[4]*b[i]
2345 adcx
$zero,%r15 # cf=0, modulo-scheduled
2347 mulx
1*8($aptr),%r11,%r14 # a[5]*b[i]
2348 adcx
0*8($tptr),%r10
2350 mulx
2*8($aptr),%r12,%rax # ...
2351 adcx
1*8($tptr),%r11
2353 mulx
3*8($aptr),%r13,%r14
2355 adcx
2*8($tptr),%r12
2357 adcx
3*8($tptr),%r13
2358 adox
$zero,%r14 # of=0
2359 lea
4*8($aptr),$aptr
2360 lea
4*8($tptr),$tptr
2361 adcx
$zero,%r14 # cf=0
2364 mulx
0*16($nptr),%rax,%r15
2367 mulx
1*16($nptr),%rax,%r15
2370 mulx
2*16($nptr),%rax,%r15
2371 mov
%r10,-5*8($tptr)
2374 mov
%r11,-4*8($tptr)
2375 mulx
3*16($nptr),%rax,%r15
2377 lea
4*16($nptr),$nptr
2378 mov
%r12,-3*8($tptr)
2381 mov
%r13,-2*8($tptr)
2383 dec
$bptr # of=0, pass cf
2386 mov
0+8(%rsp),$num # load -num
2387 movq
%xmm0,%rdx # bp[i+1]
2388 adc
$zero,%r15 # modulo-scheduled
2389 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2390 mov
8+8(%rsp),$bptr # re-load &b[i]
2393 lea
($aptr,$num),$aptr # rewind $aptr
2394 adc
$zero,$zero # top-most carry
2395 mov
%r14,-1*8($tptr)
2402 sub %r14,%r10 # compare top-most words
2406 lea
($tptr,$num),%rdi # rewind $tptr
2407 lea
($nptr,$num,2),$nptr # rewind $nptr
2409 sar \
$3+2,$num # cf=0
2410 lea
($nptr,$zero,8),%rbp
2411 mov
56+8(%rsp),%rdx # restore rp
2413 jmp
.Lsqrx4x_sub
# common post-condition
2414 .size mulx4x_internal
,.-mulx4x_internal
2417 ######################################################################
2419 my $rptr="%rdi"; # BN_ULONG *rptr,
2420 my $aptr="%rsi"; # const BN_ULONG *aptr,
2421 my $bptr="%rdx"; # const void *table,
2422 my $nptr="%rcx"; # const BN_ULONG *nptr,
2423 my $n0 ="%r8"; # const BN_ULONG *n0);
2424 my $num ="%r9"; # int num, has to be divisible by 8
2427 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2428 my @A0=("%r10","%r11");
2429 my @A1=("%r12","%r13");
2430 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2433 .type bn_powerx5
,\
@function,6
2446 $code.=<<___
if ($win64);
2447 lea
-0x28(%rsp),%rsp
2449 movaps
%xmm7,0x10(%rsp)
2454 shl \
$3,${num
}d
# convert $num to bytes
2455 shl \
$3+2,%r10d # 4*$num
2459 ##############################################################
2460 # ensure that stack frame doesn't alias with $aptr+4*$num
2461 # modulo 4096, which covers ret[num], am[num] and n[2*num]
2462 # (see bn_exp.c). this is done to allow memory disambiguation
2463 # logic do its magic.
2465 lea
-64(%rsp,$num,2),%r11
2470 sub %r11,%rsp # align with $aptr
2471 lea
-64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
2476 lea
4096-64(,$num,2),%r10 # 4096-frame-2*$num
2477 lea
-64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
2487 ##############################################################
2490 # +0 saved $num, used in reduction section
2491 # +8 &t[2*$num], used in reduction section
2492 # +16 intermediate carry bit
2493 # +24 top-most carry bit, used in reduction section
2499 movq
$rptr,%xmm1 # save $rptr
2500 movq
$nptr,%xmm2 # save $nptr
2501 movq
%r10, %xmm3 # -$num
2504 mov
%rax, 40(%rsp) # save original %rsp
2507 call __bn_sqrx8x_internal
2508 call __bn_sqrx8x_internal
2509 call __bn_sqrx8x_internal
2510 call __bn_sqrx8x_internal
2511 call __bn_sqrx8x_internal
2513 mov
%r10,$num # -num
2519 call mulx4x_internal
2521 mov
40(%rsp),%rsi # restore %rsp
2524 $code.=<<___
if ($win64);
2525 movaps
-88(%rsi),%xmm6
2526 movaps
-72(%rsi),%xmm7
2538 .size bn_powerx5
,.-bn_powerx5
2540 .globl bn_sqrx8x_internal
2541 .hidden bn_sqrx8x_internal
2542 .type bn_sqrx8x_internal
,\
@abi-omnipotent
2545 __bn_sqrx8x_internal
:
2546 ##################################################################
2549 # a) multiply-n-add everything but a[i]*a[i];
2550 # b) shift result of a) by 1 to the left and accumulate
2551 # a[i]*a[i] products;
2553 ##################################################################
2554 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2585 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2588 my ($zero,$carry)=("%rbp","%rcx");
2591 lea
48+8(%rsp),$tptr
2592 lea
($aptr,$num),$aaptr
2593 mov
$num,0+8(%rsp) # save $num
2594 mov
$aaptr,8+8(%rsp) # save end of $aptr
2595 jmp
.Lsqr8x_zero_start
2598 .byte
0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2601 movdqa
%xmm0,0*8($tptr)
2602 movdqa
%xmm0,2*8($tptr)
2603 movdqa
%xmm0,4*8($tptr)
2604 movdqa
%xmm0,6*8($tptr)
2605 .Lsqr8x_zero_start
: # aligned at 32
2606 movdqa
%xmm0,8*8($tptr)
2607 movdqa
%xmm0,10*8($tptr)
2608 movdqa
%xmm0,12*8($tptr)
2609 movdqa
%xmm0,14*8($tptr)
2610 lea
16*8($tptr),$tptr
2614 mov
0*8($aptr),%rdx # a[0], modulo-scheduled
2615 #xor %r9,%r9 # t[1], ex-$num, zero already
2622 lea
48+8(%rsp),$tptr
2623 xor $zero,$zero # cf=0, cf=0
2624 jmp
.Lsqrx8x_outer_loop
2627 .Lsqrx8x_outer_loop
:
2628 mulx
1*8($aptr),%r8,%rax # a[1]*a[0]
2629 adcx
%r9,%r8 # a[1]*a[0]+=t[1]
2631 mulx
2*8($aptr),%r9,%rax # a[2]*a[0]
2634 .byte
0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2637 .byte
0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2640 mulx
5*8($aptr),%r12,%rax
2643 mulx
6*8($aptr),%r13,%rax
2646 mulx
7*8($aptr),%r14,%r15
2647 mov
1*8($aptr),%rdx # a[1]
2651 mov
%r8,1*8($tptr) # t[1]
2652 mov
%r9,2*8($tptr) # t[2]
2653 sbb
$carry,$carry # mov %cf,$carry
2654 xor $zero,$zero # cf=0, of=0
2657 mulx
2*8($aptr),%r8,%rbx # a[2]*a[1]
2658 mulx
3*8($aptr),%r9,%rax # a[3]*a[1]
2661 mulx
4*8($aptr),%r10,%rbx # ...
2664 .byte
0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2667 .byte
0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2670 .byte
0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2671 mov
2*8($aptr),%rdx # a[2]
2675 adox
$zero,%r14 # of=0
2676 adcx
$zero,%r14 # cf=0
2678 mov
%r8,3*8($tptr) # t[3]
2679 mov
%r9,4*8($tptr) # t[4]
2681 mulx
3*8($aptr),%r8,%rbx # a[3]*a[2]
2682 mulx
4*8($aptr),%r9,%rax # a[4]*a[2]
2685 mulx
5*8($aptr),%r10,%rbx # ...
2688 .byte
0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2691 .byte
0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2693 mov
3*8($aptr),%rdx # a[3]
2697 mov
%r8,5*8($tptr) # t[5]
2698 mov
%r9,6*8($tptr) # t[6]
2699 mulx
4*8($aptr),%r8,%rax # a[4]*a[3]
2700 adox
$zero,%r13 # of=0
2701 adcx
$zero,%r13 # cf=0
2703 mulx
5*8($aptr),%r9,%rbx # a[5]*a[3]
2706 mulx
6*8($aptr),%r10,%rax # ...
2709 mulx
7*8($aptr),%r11,%r12
2710 mov
4*8($aptr),%rdx # a[4]
2711 mov
5*8($aptr),%r14 # a[5]
2714 mov
6*8($aptr),%r15 # a[6]
2716 adox
$zero,%r12 # of=0
2717 adcx
$zero,%r12 # cf=0
2719 mov
%r8,7*8($tptr) # t[7]
2720 mov
%r9,8*8($tptr) # t[8]
2722 mulx
%r14,%r9,%rax # a[5]*a[4]
2723 mov
7*8($aptr),%r8 # a[7]
2725 mulx
%r15,%r10,%rbx # a[6]*a[4]
2728 mulx
%r8,%r11,%rax # a[7]*a[4]
2729 mov
%r14,%rdx # a[5]
2732 #adox $zero,%rax # of=0
2733 adcx
$zero,%rax # cf=0
2735 mulx
%r15,%r14,%rbx # a[6]*a[5]
2736 mulx
%r8,%r12,%r13 # a[7]*a[5]
2737 mov
%r15,%rdx # a[6]
2738 lea
8*8($aptr),$aptr
2745 mulx
%r8,%r8,%r14 # a[7]*a[6]
2750 je
.Lsqrx8x_outer_break
2752 neg
$carry # mov $carry,%cf
2756 adcx
9*8($tptr),%r9 # +=t[9]
2757 adcx
10*8($tptr),%r10 # ...
2758 adcx
11*8($tptr),%r11
2759 adc
12*8($tptr),%r12
2760 adc
13*8($tptr),%r13
2761 adc
14*8($tptr),%r14
2762 adc
15*8($tptr),%r15
2764 lea
2*64($tptr),$tptr
2765 sbb
%rax,%rax # mov %cf,$carry
2767 mov
-64($aptr),%rdx # a[0]
2768 mov
%rax,16+8(%rsp) # offload $carry
2769 mov
$tptr,24+8(%rsp)
2771 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
2772 xor %eax,%eax # cf=0, of=0
2778 mulx
0*8($aaptr),%rax,%r8 # a[8]*a[i]
2779 adcx
%rax,%rbx # +=t[8]
2782 mulx
1*8($aaptr),%rax,%r9 # ...
2786 mulx
2*8($aaptr),%rax,%r10
2790 mulx
3*8($aaptr),%rax,%r11
2794 .byte
0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
2798 mulx
5*8($aaptr),%rax,%r13
2802 mulx
6*8($aaptr),%rax,%r14
2803 mov
%rbx,($tptr,%rcx,8) # store t[8+i]
2808 .byte
0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
2809 mov
8($aptr,%rcx,8),%rdx # a[i]
2811 adox
%rbx,%r15 # %rbx is 0, of=0
2812 adcx
%rbx,%r15 # cf=0
2818 lea
8*8($aaptr),$aaptr
2820 cmp 8+8(%rsp),$aaptr # done?
2823 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
2834 lea
8*8($tptr),$tptr
2836 sbb
%rax,%rax # mov %cf,%rax
2837 xor %ebx,%ebx # cf=0, of=0
2838 mov
%rax,16+8(%rsp) # offload carry
2843 sub 16+8(%rsp),%r8 # consume last carry
2844 mov
24+8(%rsp),$carry # initial $tptr, borrow $carry
2845 mov
0*8($aptr),%rdx # a[8], modulo-scheduled
2846 xor %ebp,%ebp # xor $zero,$zero
2848 cmp $carry,$tptr # cf=0, of=0
2849 je
.Lsqrx8x_outer_loop
2854 mov
2*8($carry),%r10
2856 mov
3*8($carry),%r11
2858 mov
4*8($carry),%r12
2860 mov
5*8($carry),%r13
2862 mov
6*8($carry),%r14
2864 mov
7*8($carry),%r15
2866 jmp
.Lsqrx8x_outer_loop
2869 .Lsqrx8x_outer_break
:
2870 mov
%r9,9*8($tptr) # t[9]
2871 movq
%xmm3,%rcx # -$num
2872 mov
%r10,10*8($tptr) # ...
2873 mov
%r11,11*8($tptr)
2874 mov
%r12,12*8($tptr)
2875 mov
%r13,13*8($tptr)
2876 mov
%r14,14*8($tptr)
2881 lea
48+8(%rsp),$tptr
2882 mov
($aptr,$i),%rdx # a[0]
2884 mov
8($tptr),$A0[1] # t[1]
2885 xor $A0[0],$A0[0] # t[0], of=0, cf=0
2886 mov
0+8(%rsp),$num # restore $num
2888 mov
16($tptr),$A1[0] # t[2] # prefetch
2889 mov
24($tptr),$A1[1] # t[3] # prefetch
2890 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
2893 .Lsqrx4x_shift_n_add
:
2897 .byte
0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
2898 .byte
0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
2901 mov
40($tptr),$A0[1] # t[2*i+4+1] # prefetch
2908 mov
16($aptr,$i),%rdx # a[i+2] # prefetch
2909 mov
48($tptr),$A1[0] # t[2*i+6] # prefetch
2912 mov
56($tptr),$A1[1] # t[2*i+6+1] # prefetch
2919 mov
24($aptr,$i),%rdx # a[i+3] # prefetch
2921 mov
64($tptr),$A0[0] # t[2*i+8] # prefetch
2924 mov
72($tptr),$A0[1] # t[2*i+8+1] # prefetch
2931 jrcxz
.Lsqrx4x_shift_n_add_break
2932 .byte
0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
2935 mov
80($tptr),$A1[0] # t[2*i+10] # prefetch
2936 mov
88($tptr),$A1[1] # t[2*i+10+1] # prefetch
2941 jmp
.Lsqrx4x_shift_n_add
2944 .Lsqrx4x_shift_n_add_break
:
2948 lea
64($tptr),$tptr # end of t[] buffer
2951 ######################################################################
2952 # Montgomery reduction part, "word-by-word" algorithm.
2954 # This new path is inspired by multiple submissions from Intel, by
2955 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
2958 my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
2963 xor %eax,%eax # initial top-most carry bit
2964 mov
32+8(%rsp),%rbx # n0
2965 mov
48+8(%rsp),%rdx # "%r8", 8*0($tptr)
2966 lea
-128($nptr,$num,2),%rcx # end of n[]
2967 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
2968 mov
%rcx, 0+8(%rsp) # save end of n[]
2969 mov
$tptr,8+8(%rsp) # save end of t[]
2971 lea
48+8(%rsp),$tptr # initial t[] window
2972 jmp
.Lsqrx8x_reduction_loop
2975 .Lsqrx8x_reduction_loop
:
2981 imulq
%rbx,%rdx # n0*a[i]
2985 mov
%rax,24+8(%rsp) # store top-most carry bit
2987 lea
8*8($tptr),$tptr
2988 xor $carry,$carry # cf=0,of=0
2995 mulx
16*0($nptr),%rax,%r8 # n[0]
2996 adcx
%rbx,%rax # discarded
2999 mulx
16*1($nptr),%rbx,%r9 # n[1]
3003 mulx
16*2($nptr),%rbx,%r10
3007 mulx
16*3($nptr),%rbx,%r11
3011 .byte
0xc4,0x62,0xe3,0xf6,0xa5,0x40,0x00,0x00,0x00 # mulx 16*4($nptr),%rbx,%r12
3017 mulx
32+8(%rsp),%rbx,%rdx # %rdx discarded
3019 mov
%rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3021 mulx
16*5($nptr),%rax,%r13
3025 mulx
16*6($nptr),%rax,%r14
3029 mulx
16*7($nptr),%rax,%r15
3032 adox
$carry,%r15 # $carry is 0
3033 adcx
$carry,%r15 # cf=0
3035 .byte
0x67,0x67,0x67
3039 mov
$carry,%rax # xor %rax,%rax
3040 cmp 0+8(%rsp),$nptr # end of n[]?
3041 jae
.Lsqrx8x_no_tail
3043 mov
48+8(%rsp),%rdx # pull n0*a[0]
3045 lea
16*8($nptr),$nptr
3048 adcx
8*2($tptr),%r10
3054 lea
8*8($tptr),$tptr
3055 sbb
%rax,%rax # top carry
3057 xor $carry,$carry # of=0, cf=0
3064 mulx
16*0($nptr),%rax,%r8
3068 mulx
16*1($nptr),%rax,%r9
3072 mulx
16*2($nptr),%rax,%r10
3076 mulx
16*3($nptr),%rax,%r11
3080 .byte
0xc4,0x62,0xfb,0xf6,0xa5,0x40,0x00,0x00,0x00 # mulx 16*4($nptr),%rax,%r12
3084 mulx
16*5($nptr),%rax,%r13
3088 mulx
16*6($nptr),%rax,%r14
3092 mulx
16*7($nptr),%rax,%r15
3093 mov
72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3096 mov
%rbx,($tptr,%rcx,8) # save result
3098 adcx
$carry,%r15 # cf=0
3103 cmp 0+8(%rsp),$nptr # end of n[]?
3104 jae
.Lsqrx8x_tail_done
# break out of loop
3106 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3107 mov
48+8(%rsp),%rdx # pull n0*a[0]
3108 lea
16*8($nptr),$nptr
3117 lea
8*8($tptr),$tptr
3119 sub \
$8,%rcx # mov \$-8,%rcx
3121 xor $carry,$carry # of=0, cf=0
3127 add
24+8(%rsp),%r8 # can this overflow?
3128 mov
$carry,%rax # xor %rax,%rax
3130 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3131 .Lsqrx8x_no_tail
: # %cf is 0 if jumped here
3135 mov
16*7($nptr),$carry
3136 movq
%xmm2,$nptr # restore $nptr
3143 adc
%rax,%rax # top-most carry
3145 mov
32+8(%rsp),%rbx # n0
3146 mov
8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3148 mov
%r8,8*0($tptr) # store top 512 bits
3149 lea
8*8($tptr),%r8 # borrow %r8
3158 lea
8*8($tptr,%rcx),$tptr # start of current t[] window
3159 cmp 8+8(%rsp),%r8 # end of t[]?
3160 jb
.Lsqrx8x_reduction_loop
3163 ##############################################################
3164 # Post-condition, 4x unrolled
3167 my ($rptr,$nptr)=("%rdx","%rbp");
3168 my @ri=map("%r$_",(10..13));
3169 my @ni=map("%r$_",(14..15));
3172 sub %r15,%rsi # compare top-most words
3174 mov
%rcx,%r10 # -$num
3178 mov
%rcx,%r9 # -$num
3180 sar \
$3+2,%rcx # cf=0
3181 #lea 48+8(%rsp,%r9),$tptr
3182 lea
($nptr,%rax,8),$nptr
3183 movq
%xmm1,$rptr # restore $rptr
3184 movq
%xmm1,$aptr # prepare for back-to-back call
3192 sbb
16*0($nptr),%r12
3194 sbb
16*1($nptr),%r13
3196 lea
8*4($tptr),$tptr
3197 sbb
16*2($nptr),%r14
3199 sbb
16*3($nptr),%r15
3200 lea
16*4($nptr),$nptr
3204 lea
8*4($rptr),$rptr
3211 neg
%r9 # restore $num
3214 .size bn_sqrx8x_internal
,.-bn_sqrx8x_internal
3218 my ($inp,$num,$tbl,$idx)=$win64?
("%rcx","%edx","%r8", "%r9d") : # Win64 order
3219 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3226 .type bn_get_bits5
,\
@abi-omnipotent
3238 movzw
(%r10,$num,2),%eax
3242 .size bn_get_bits5
,.-bn_get_bits5
3245 .type bn_scatter5
,\
@abi-omnipotent
3249 jz
.Lscatter_epilogue
3250 lea
($tbl,$idx,8),$tbl
3260 .size bn_scatter5
,.-bn_scatter5
3263 .type bn_gather5
,\
@abi-omnipotent
3267 $code.=<<___
if ($win64);
3268 .LSEH_begin_bn_gather5
:
3269 # I can't trust assembler to use specific encoding:-(
3270 .byte
0x48,0x83,0xec,0x28 #sub \$0x28,%rsp
3271 .byte
0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
3272 .byte
0x0f,0x29,0x7c,0x24,0x10 #movdqa %xmm7,0x10(%rsp)
3276 shr \
$`log($N/8)/log(2)`,$idx
3279 lea
.Lmagic_masks
(%rip),%rax
3280 and \
$`2**5/($N/8)-1`,$idx # 5 is "window size"
3281 lea
128($tbl,%r11,8),$tbl # pointer within 1st cache line
3282 movq
0(%rax,$idx,8),%xmm4 # set of masks denoting which
3283 movq
8(%rax,$idx,8),%xmm5 # cache line contains element
3284 movq
16(%rax,$idx,8),%xmm6 # denoted by 7th argument
3285 movq
24(%rax,$idx,8),%xmm7
3289 movq
`0*$STRIDE/4-128`($tbl),%xmm0
3290 movq
`1*$STRIDE/4-128`($tbl),%xmm1
3292 movq
`2*$STRIDE/4-128`($tbl),%xmm2
3294 movq
`3*$STRIDE/4-128`($tbl),%xmm3
3300 lea
$STRIDE($tbl),$tbl
3303 movq
%xmm0,($out) # m0=bp[0]
3308 $code.=<<___
if ($win64);
3310 movaps
0x10(%rsp),%xmm7
3315 .LSEH_end_bn_gather5
:
3316 .size bn_gather5
,.-bn_gather5
3322 .long
0,0, 0,0, 0,0, -1,-1
3323 .long
0,0, 0,0, 0,0, 0,0
3324 .asciz
"Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3327 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3328 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3336 .extern __imp_RtlVirtualUnwind
3337 .type mul_handler
,\
@abi-omnipotent
3351 mov
120($context),%rax # pull context->Rax
3352 mov
248($context),%rbx # pull context->Rip
3354 mov
8($disp),%rsi # disp->ImageBase
3355 mov
56($disp),%r11 # disp->HandlerData
3357 mov
0(%r11),%r10d # HandlerData[0]
3358 lea
(%rsi,%r10),%r10 # end of prologue label
3359 cmp %r10,%rbx # context->Rip<end of prologue label
3360 jb
.Lcommon_seh_tail
3362 mov
152($context),%rax # pull context->Rsp
3364 mov
4(%r11),%r10d # HandlerData[1]
3365 lea
(%rsi,%r10),%r10 # epilogue label
3366 cmp %r10,%rbx # context->Rip>=epilogue label
3367 jae
.Lcommon_seh_tail
3369 lea
.Lmul_epilogue
(%rip),%r10
3373 mov
192($context),%r10 # pull $num
3374 mov
8(%rax,%r10,8),%rax # pull saved stack pointer
3378 mov
40(%rax),%rax # pull saved stack pointer
3381 movaps
-88(%rax),%xmm0
3382 movaps
-72(%rax),%xmm1
3390 mov
%rbx,144($context) # restore context->Rbx
3391 mov
%rbp,160($context) # restore context->Rbp
3392 mov
%r12,216($context) # restore context->R12
3393 mov
%r13,224($context) # restore context->R13
3394 mov
%r14,232($context) # restore context->R14
3395 mov
%r15,240($context) # restore context->R15
3396 movups
%xmm0,512($context) # restore context->Xmm6
3397 movups
%xmm1,528($context) # restore context->Xmm7
3402 mov
%rax,152($context) # restore context->Rsp
3403 mov
%rsi,168($context) # restore context->Rsi
3404 mov
%rdi,176($context) # restore context->Rdi
3406 mov
40($disp),%rdi # disp->ContextRecord
3407 mov
$context,%rsi # context
3408 mov \
$154,%ecx # sizeof(CONTEXT)
3409 .long
0xa548f3fc # cld; rep movsq
3412 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3413 mov
8(%rsi),%rdx # arg2, disp->ImageBase
3414 mov
0(%rsi),%r8 # arg3, disp->ControlPc
3415 mov
16(%rsi),%r9 # arg4, disp->FunctionEntry
3416 mov
40(%rsi),%r10 # disp->ContextRecord
3417 lea
56(%rsi),%r11 # &disp->HandlerData
3418 lea
24(%rsi),%r12 # &disp->EstablisherFrame
3419 mov
%r10,32(%rsp) # arg5
3420 mov
%r11,40(%rsp) # arg6
3421 mov
%r12,48(%rsp) # arg7
3422 mov
%rcx,56(%rsp) # arg8, (NULL)
3423 call
*__imp_RtlVirtualUnwind
(%rip)
3425 mov \
$1,%eax # ExceptionContinueSearch
3437 .size mul_handler
,.-mul_handler
3441 .rva
.LSEH_begin_bn_mul_mont_gather5
3442 .rva
.LSEH_end_bn_mul_mont_gather5
3443 .rva
.LSEH_info_bn_mul_mont_gather5
3445 .rva
.LSEH_begin_bn_mul4x_mont_gather5
3446 .rva
.LSEH_end_bn_mul4x_mont_gather5
3447 .rva
.LSEH_info_bn_mul4x_mont_gather5
3449 .rva
.LSEH_begin_bn_power5
3450 .rva
.LSEH_end_bn_power5
3451 .rva
.LSEH_info_bn_power5
3453 .rva
.LSEH_begin_bn_from_mont8x
3454 .rva
.LSEH_end_bn_from_mont8x
3455 .rva
.LSEH_info_bn_from_mont8x
3457 $code.=<<___
if ($addx);
3458 .rva
.LSEH_begin_bn_mulx4x_mont_gather5
3459 .rva
.LSEH_end_bn_mulx4x_mont_gather5
3460 .rva
.LSEH_info_bn_mulx4x_mont_gather5
3462 .rva
.LSEH_begin_bn_powerx5
3463 .rva
.LSEH_end_bn_powerx5
3464 .rva
.LSEH_info_bn_powerx5
3467 .rva
.LSEH_begin_bn_gather5
3468 .rva
.LSEH_end_bn_gather5
3469 .rva
.LSEH_info_bn_gather5
3473 .LSEH_info_bn_mul_mont_gather5
:
3476 .rva
.Lmul_body
,.Lmul_epilogue
# HandlerData[]
3478 .LSEH_info_bn_mul4x_mont_gather5
:
3481 .rva
.Lmul4x_body
,.Lmul4x_epilogue
# HandlerData[]
3483 .LSEH_info_bn_power5
:
3486 .rva
.Lpower5_body
,.Lpower5_epilogue
# HandlerData[]
3488 .LSEH_info_bn_from_mont8x
:
3491 .rva
.Lfrom_body
,.Lfrom_epilogue
# HandlerData[]
3493 $code.=<<___
if ($addx);
3495 .LSEH_info_bn_mulx4x_mont_gather5
:
3498 .rva
.Lmulx4x_body
,.Lmulx4x_epilogue
# HandlerData[]
3500 .LSEH_info_bn_powerx5
:
3503 .rva
.Lpowerx5_body
,.Lpowerx5_epilogue
# HandlerData[]
3507 .LSEH_info_bn_gather5
:
3508 .byte
0x01,0x0d,0x05,0x00
3509 .byte
0x0d,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
3510 .byte
0x08,0x68,0x00,0x00 #movaps (rsp),xmm6
3511 .byte
0x04,0x42,0x00,0x00 #sub rsp,0x28
3516 $code =~ s/\`([^\`]*)\`/eval($1)/gem;