import libcrypto (LibreSSL 2.5.2)
[unleashed.git] / lib / libcrypto / md5 / asm / md5-x86_64.pl
blobc902a1b532f59b0a9c6e8ead604b4c572c2952f2
1 #!/usr/bin/perl -w
3 # MD5 optimized for AMD64.
5 # Author: Marc Bevand <bevand_m (at) epita.fr>
6 # Licence: I hereby disclaim the copyright on this code and place it
7 # in the public domain.
10 use strict;
12 my $code;
14 # round1_step() does:
15 # dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
16 # %r10d = X[k_next]
17 # %r11d = z' (copy of z for the next step)
18 # Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC)
19 sub round1_step
21 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
22 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
23 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
24 $code .= <<EOF;
25 xor $y, %r11d /* y ^ ... */
26 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
27 and $x, %r11d /* x & ... */
28 xor $z, %r11d /* z ^ ... */
29 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
30 add %r11d, $dst /* dst += ... */
31 rol \$$s, $dst /* dst <<< s */
32 mov $y, %r11d /* (NEXT STEP) z' = $y */
33 add $x, $dst /* dst += x */
34 EOF
37 # round2_step() does:
38 # dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
39 # %r10d = X[k_next]
40 # %r11d = z' (copy of z for the next step)
41 # %r12d = z' (copy of z for the next step)
42 # Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC)
43 sub round2_step
45 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
46 $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1);
47 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
48 $code .= " mov %edx, %r12d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
49 $code .= <<EOF;
50 not %r11d /* not z */
51 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
52 and $x, %r12d /* x & z */
53 and $y, %r11d /* y & (not z) */
54 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
55 or %r11d, %r12d /* (y & (not z)) | (x & z) */
56 mov $y, %r11d /* (NEXT STEP) z' = $y */
57 add %r12d, $dst /* dst += ... */
58 mov $y, %r12d /* (NEXT STEP) z' = $y */
59 rol \$$s, $dst /* dst <<< s */
60 add $x, $dst /* dst += x */
61 EOF
64 # round3_step() does:
65 # dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
66 # %r10d = X[k_next]
67 # %r11d = y' (copy of y for the next step)
68 # Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC)
69 sub round3_step
71 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
72 $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1);
73 $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
74 $code .= <<EOF;
75 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
76 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
77 xor $z, %r11d /* z ^ ... */
78 xor $x, %r11d /* x ^ ... */
79 add %r11d, $dst /* dst += ... */
80 rol \$$s, $dst /* dst <<< s */
81 mov $x, %r11d /* (NEXT STEP) y' = $x */
82 add $x, $dst /* dst += x */
83 EOF
86 # round4_step() does:
87 # dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
88 # %r10d = X[k_next]
89 # %r11d = not z' (copy of not z for the next step)
90 # Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC)
91 sub round4_step
93 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
94 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
95 $code .= " mov \$0xffffffff, %r11d\n" if ($pos == -1);
96 $code .= " xor %edx, %r11d /* (NEXT STEP) not z' = not %edx*/\n"
97 if ($pos == -1);
98 $code .= <<EOF;
99 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
100 or $x, %r11d /* x | ... */
101 xor $y, %r11d /* y ^ ... */
102 add %r11d, $dst /* dst += ... */
103 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
104 mov \$0xffffffff, %r11d
105 rol \$$s, $dst /* dst <<< s */
106 xor $y, %r11d /* (NEXT STEP) not z' = not $y */
107 add $x, $dst /* dst += x */
111 my $flavour = shift;
112 my $output = shift;
113 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
115 $0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate;
116 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
117 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
118 die "can't locate x86_64-xlate.pl";
120 no warnings qw(uninitialized);
121 open OUT,"| \"$^X\" $xlate $flavour $output";
122 *STDOUT=*OUT;
124 $code .= <<EOF;
125 .text
126 .align 16
128 .globl md5_block_asm_data_order
129 .type md5_block_asm_data_order,\@function,3
130 md5_block_asm_data_order:
131 push %rbp
132 push %rbx
133 push %r12
134 push %r14
135 push %r15
136 .Lprologue:
138 # rdi = arg #1 (ctx, MD5_CTX pointer)
139 # rsi = arg #2 (ptr, data pointer)
140 # rdx = arg #3 (nbr, number of 16-word blocks to process)
141 mov %rdi, %rbp # rbp = ctx
142 shl \$6, %rdx # rdx = nbr in bytes
143 lea (%rsi,%rdx), %rdi # rdi = end
144 mov 0*4(%rbp), %eax # eax = ctx->A
145 mov 1*4(%rbp), %ebx # ebx = ctx->B
146 mov 2*4(%rbp), %ecx # ecx = ctx->C
147 mov 3*4(%rbp), %edx # edx = ctx->D
148 # end is 'rdi'
149 # ptr is 'rsi'
150 # A is 'eax'
151 # B is 'ebx'
152 # C is 'ecx'
153 # D is 'edx'
155 cmp %rdi, %rsi # cmp end with ptr
156 je .Lend # jmp if ptr == end
158 # BEGIN of loop over 16-word blocks
159 .Lloop: # save old values of A, B, C, D
160 mov %eax, %r8d
161 mov %ebx, %r9d
162 mov %ecx, %r14d
163 mov %edx, %r15d
165 round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
166 round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
167 round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
168 round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
169 round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
170 round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
171 round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
172 round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
173 round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
174 round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
175 round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
176 round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
177 round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
178 round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
179 round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
180 round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22');
182 round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
183 round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
184 round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
185 round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
186 round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
187 round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
188 round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
189 round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
190 round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
191 round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
192 round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
193 round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
194 round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
195 round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
196 round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
197 round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20');
199 round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
200 round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
201 round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
202 round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
203 round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
204 round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
205 round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
206 round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
207 round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
208 round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
209 round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
210 round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
211 round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
212 round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
213 round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
214 round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
216 round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
217 round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
218 round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
219 round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
220 round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
221 round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
222 round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
223 round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
224 round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
225 round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
226 round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
227 round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
228 round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
229 round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
230 round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
231 round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
232 $code .= <<EOF;
233 # add old values of A, B, C, D
234 add %r8d, %eax
235 add %r9d, %ebx
236 add %r14d, %ecx
237 add %r15d, %edx
239 # loop control
240 add \$64, %rsi # ptr += 64
241 cmp %rdi, %rsi # cmp end with ptr
242 jb .Lloop # jmp if ptr < end
243 # END of loop over 16-word blocks
245 .Lend:
246 mov %eax, 0*4(%rbp) # ctx->A = A
247 mov %ebx, 1*4(%rbp) # ctx->B = B
248 mov %ecx, 2*4(%rbp) # ctx->C = C
249 mov %edx, 3*4(%rbp) # ctx->D = D
251 mov (%rsp),%r15
252 mov 8(%rsp),%r14
253 mov 16(%rsp),%r12
254 mov 24(%rsp),%rbx
255 mov 32(%rsp),%rbp
256 add \$40,%rsp
257 .Lepilogue:
259 .size md5_block_asm_data_order,.-md5_block_asm_data_order
262 print $code;
264 close STDOUT;